dashi6174 commited on
Commit
d219eb3
·
1 Parent(s): 71b2e07

The default max tokens of 215 is too small, answers are often cut off.I will modify it to 512 to address this issue. (#845)

Browse files
api/db/db_models.py CHANGED
@@ -759,7 +759,7 @@ class Dialog(DataBaseModel):
759
  help_text="English|Chinese")
760
  llm_id = CharField(max_length=128, null=False, help_text="default llm ID")
761
  llm_setting = JSONField(null=False, default={"temperature": 0.1, "top_p": 0.3, "frequency_penalty": 0.7,
762
- "presence_penalty": 0.4, "max_tokens": 215})
763
  prompt_type = CharField(
764
  max_length=16,
765
  null=False,
 
759
  help_text="English|Chinese")
760
  llm_id = CharField(max_length=128, null=False, help_text="default llm ID")
761
  llm_setting = JSONField(null=False, default={"temperature": 0.1, "top_p": 0.3, "frequency_penalty": 0.7,
762
+ "presence_penalty": 0.4, "max_tokens": 512})
763
  prompt_type = CharField(
764
  max_length=16,
765
  null=False,
web/src/constants/knowledge.ts CHANGED
@@ -31,14 +31,14 @@ export const settledModelVariableMap = {
31
  top_p: 0.3,
32
  frequency_penalty: 0.7,
33
  presence_penalty: 0.4,
34
- max_tokens: 215,
35
  },
36
  [ModelVariableType.Balance]: {
37
  temperature: 0.5,
38
  top_p: 0.5,
39
  frequency_penalty: 0.7,
40
  presence_penalty: 0.4,
41
- max_tokens: 215,
42
  },
43
  };
44
 
 
31
  top_p: 0.3,
32
  frequency_penalty: 0.7,
33
  presence_penalty: 0.4,
34
+ max_tokens: 512,
35
  },
36
  [ModelVariableType.Balance]: {
37
  temperature: 0.5,
38
  top_p: 0.5,
39
  frequency_penalty: 0.7,
40
  presence_penalty: 0.4,
41
+ max_tokens: 512,
42
  },
43
  };
44