sabssag commited on
Commit
5933d22
·
verified ·
1 Parent(s): 1c861fc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -2
app.py CHANGED
@@ -20,13 +20,12 @@ def generate_text(text):
20
  # Generate text
21
  output = model.generate(
22
  input_ids=encoded_input['input_ids'],
23
- max_length=100, # Specify the max length for the generated text
24
  num_return_sequences=1, # Number of sequences to generate
25
  no_repeat_ngram_size=2, # Avoid repeating n-grams of length 2
26
  top_k=50, # Limits the sampling pool to top_k tokens
27
  top_p=0.95, # Cumulative probability threshold for nucleus sampling
28
  temperature=0.7, # Controls the randomness of predictions
29
- do_sample=True, # Enable sampling
30
  attention_mask=encoded_input['attention_mask'], # Correct attention mask
31
  pad_token_id=tokenizer.eos_token_id # Use the end-of-sequence token as padding
32
  )
 
20
  # Generate text
21
  output = model.generate(
22
  input_ids=encoded_input['input_ids'],
23
+ max_length=200, # Specify the max length for the generated text
24
  num_return_sequences=1, # Number of sequences to generate
25
  no_repeat_ngram_size=2, # Avoid repeating n-grams of length 2
26
  top_k=50, # Limits the sampling pool to top_k tokens
27
  top_p=0.95, # Cumulative probability threshold for nucleus sampling
28
  temperature=0.7, # Controls the randomness of predictions
 
29
  attention_mask=encoded_input['attention_mask'], # Correct attention mask
30
  pad_token_id=tokenizer.eos_token_id # Use the end-of-sequence token as padding
31
  )