Spaces:
				
			
			
	
			
			
		Running
		
			on 
			
			Zero
	
	
	
			
			
	
	
	
	
		
		
		Running
		
			on 
			
			Zero
	Commit 
							
							·
						
						63f814c
	
1
								Parent(s):
							
							e311fe1
								
update
Browse files- app.py +13 -1
 - examples.py +1 -1
 
    	
        app.py
    CHANGED
    
    | 
         @@ -232,11 +232,23 @@ def generate_model_response(state: State): 
     | 
|
| 232 | 
         
             
                return state, gr.update(visible=False)
         
     | 
| 233 | 
         | 
| 234 | 
         
             
            def split_into_sentences(text: str):
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 235 | 
         
             
                lines = text.splitlines()
         
     | 
| 236 | 
         
             
                sentences = []
         
     | 
| 237 | 
         
             
                for line in lines:
         
     | 
| 238 | 
         
             
                    #sentences.extend(sent_tokenize(line))
         
     | 
| 239 | 
         
            -
                    sentences.extend(line 
     | 
| 240 | 
         
             
                separators = []
         
     | 
| 241 | 
         
             
                cur_start = 0
         
     | 
| 242 | 
         
             
                for sentence in sentences:
         
     | 
| 
         | 
|
| 232 | 
         
             
                return state, gr.update(visible=False)
         
     | 
| 233 | 
         | 
| 234 | 
         
             
            def split_into_sentences(text: str):
         
     | 
| 235 | 
         
            +
                def rule_based_split(text):
         
     | 
| 236 | 
         
            +
                    sentences = []
         
     | 
| 237 | 
         
            +
                    start = 0
         
     | 
| 238 | 
         
            +
                    for i, char in enumerate(text):
         
     | 
| 239 | 
         
            +
                        if char in ".!?":
         
     | 
| 240 | 
         
            +
                            if i + 1 == len(text) or text[i + 1] == " ":
         
     | 
| 241 | 
         
            +
                                sentences.append(text[start:i + 1].strip())
         
     | 
| 242 | 
         
            +
                                start = i + 1
         
     | 
| 243 | 
         
            +
                    if start < len(text):
         
     | 
| 244 | 
         
            +
                        sentences.append(text[start:].strip())
         
     | 
| 245 | 
         
            +
                    return sentences
         
     | 
| 246 | 
         
            +
                    
         
     | 
| 247 | 
         
             
                lines = text.splitlines()
         
     | 
| 248 | 
         
             
                sentences = []
         
     | 
| 249 | 
         
             
                for line in lines:
         
     | 
| 250 | 
         
             
                    #sentences.extend(sent_tokenize(line))
         
     | 
| 251 | 
         
            +
                    sentences.extend(rule_based_split(line))
         
     | 
| 252 | 
         
             
                separators = []
         
     | 
| 253 | 
         
             
                cur_start = 0
         
     | 
| 254 | 
         
             
                for sentence in sentences:
         
     | 
    	
        examples.py
    CHANGED
    
    | 
         @@ -67,7 +67,7 @@ Given a set of n texts in the context, we aim to find a subset of texts that con 
     | 
|
| 67 | 
         | 
| 68 | 
         
             
            Figure 2: Overview of TracLLM. Given an instruction, an output, an LLM, and a long context containing a set of texts, TracLLM searches T2 and T6 from the context that induce an LLM to generate Pwned! 
         
     | 
| 69 | 
         
             
            """    
         
     | 
| 70 | 
         
            -
                question = "Please  
     | 
| 71 | 
         | 
| 72 | 
         
             
                return context, question
         
     | 
| 73 | 
         | 
| 
         | 
|
| 67 | 
         | 
| 68 | 
         
             
            Figure 2: Overview of TracLLM. Given an instruction, an output, an LLM, and a long context containing a set of texts, TracLLM searches T2 and T6 from the context that induce an LLM to generate Pwned! 
         
     | 
| 69 | 
         
             
            """    
         
     | 
| 70 | 
         
            +
                question = "Please write a review for this paper."
         
     | 
| 71 | 
         | 
| 72 | 
         
             
                return context, question
         
     | 
| 73 | 
         |