Spaces:
				
			
			
	
			
			
		Runtime error
		
	
	
	
			
			
	
	
	
	
		
		
		Runtime error
		
	Commit 
							
							·
						
						ffaaaf6
	
1
								Parent(s):
							
							0a604e2
								
Update Text-Generation.py
Browse files- Text-Generation.py +2 -2
    	
        Text-Generation.py
    CHANGED
    
    | @@ -88,10 +88,10 @@ def get_next_word_without_e(input_sequence): | |
| 88 | 
             
                if temperature != 1.0:
         | 
| 89 | 
             
                    next_token_candidates_logits = next_token_candidates_logits / temperature
         | 
| 90 | 
             
                # filter
         | 
| 91 | 
            -
                filtered_next_token_candidates_logits = top_k_top_p_filtering(next_token_candidates_logits, top_k=number_of_tokens_to_sample, top_p=number_of_tokens_to_sample)
         | 
| 92 | 
             
                # sample and get a probability distribution
         | 
| 93 | 
             
                probs = F.softmax(filtered_next_token_candidates_logits, dim=-1)
         | 
| 94 | 
            -
                next_token_candidates = torch.multinomial(probs, num_samples=number_of_tokens_to_sample) ## 10000 random samples
         | 
| 95 | 
             
                word_list = []
         | 
| 96 | 
             
                for candidate_string in next_token_candidates:
         | 
| 97 | 
             
                    for candidate in candidate_string:
         | 
|  | |
| 88 | 
             
                if temperature != 1.0:
         | 
| 89 | 
             
                    next_token_candidates_logits = next_token_candidates_logits / temperature
         | 
| 90 | 
             
                # filter
         | 
| 91 | 
            +
                filtered_next_token_candidates_logits = top_k_top_p_filtering(next_token_candidates_logits, top_k=int(number_of_tokens_to_sample), top_p=int(number_of_tokens_to_sample))
         | 
| 92 | 
             
                # sample and get a probability distribution
         | 
| 93 | 
             
                probs = F.softmax(filtered_next_token_candidates_logits, dim=-1)
         | 
| 94 | 
            +
                next_token_candidates = torch.multinomial(probs, num_samples=int(number_of_tokens_to_sample)) ## 10000 random samples
         | 
| 95 | 
             
                word_list = []
         | 
| 96 | 
             
                for candidate_string in next_token_candidates:
         | 
| 97 | 
             
                    for candidate in candidate_string:
         | 
