hmm
Browse files- model/custom_agent.py +12 -3
model/custom_agent.py
CHANGED
|
@@ -106,7 +106,7 @@ class CustomHfAgent(Agent):
|
|
| 106 |
### https://github.com/huggingface/transformers/blob/main/src/transformers/tools/prompts.py -> run chat_template.txt
|
| 107 |
### https://huggingface.co/datasets/huggingface-tools/default-prompts/blob/main/chat_prompt_template.txt
|
| 108 |
###
|
| 109 |
-
|
| 110 |
|
| 111 |
checkpoint = "bigcode/starcoder"
|
| 112 |
tokenizer = AutoTokenizer.from_pretrained(checkpoint, token = self.token)
|
|
@@ -118,6 +118,7 @@ class CustomHfAgent(Agent):
|
|
| 118 |
|
| 119 |
if chat_mode:
|
| 120 |
if self.chat_history is None:
|
|
|
|
| 121 |
prompt = self.chat_prompt_template.replace("<<all_tools>>", description)
|
| 122 |
prompt += CHAT_MESSAGE_PROMPT.replace("<<task>>", task)
|
| 123 |
|
|
@@ -127,8 +128,11 @@ class CustomHfAgent(Agent):
|
|
| 127 |
"content": prompt,
|
| 128 |
}
|
| 129 |
]
|
| 130 |
-
|
|
|
|
| 131 |
else:
|
|
|
|
|
|
|
| 132 |
prompt = self.chat_history
|
| 133 |
prompt += CHAT_MESSAGE_PROMPT.replace("<<task>>", task)
|
| 134 |
messages = [
|
|
@@ -137,9 +141,12 @@ class CustomHfAgent(Agent):
|
|
| 137 |
"content": prompt,
|
| 138 |
}
|
| 139 |
]
|
|
|
|
|
|
|
| 140 |
# prompt = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
|
| 141 |
## prompt
|
| 142 |
else:
|
|
|
|
| 143 |
prompt = self.run_prompt_template.replace("<<all_tools>>", description)
|
| 144 |
prompt = prompt.replace("<<prompt>>", task)
|
| 145 |
messages = [
|
|
@@ -148,6 +155,8 @@ class CustomHfAgent(Agent):
|
|
| 148 |
"content": prompt,
|
| 149 |
}
|
| 150 |
]
|
|
|
|
|
|
|
| 151 |
# prompt = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
|
| 152 |
print("formatted propmpt ---- " + prompt)
|
| 153 |
-
return prompt
|
|
|
|
| 106 |
### https://github.com/huggingface/transformers/blob/main/src/transformers/tools/prompts.py -> run chat_template.txt
|
| 107 |
### https://huggingface.co/datasets/huggingface-tools/default-prompts/blob/main/chat_prompt_template.txt
|
| 108 |
###
|
| 109 |
+
def format_prompt(self, task, chat_mode=False):
|
| 110 |
|
| 111 |
checkpoint = "bigcode/starcoder"
|
| 112 |
tokenizer = AutoTokenizer.from_pretrained(checkpoint, token = self.token)
|
|
|
|
| 118 |
|
| 119 |
if chat_mode:
|
| 120 |
if self.chat_history is None:
|
| 121 |
+
print("no histroy yet ")
|
| 122 |
prompt = self.chat_prompt_template.replace("<<all_tools>>", description)
|
| 123 |
prompt += CHAT_MESSAGE_PROMPT.replace("<<task>>", task)
|
| 124 |
|
|
|
|
| 128 |
"content": prompt,
|
| 129 |
}
|
| 130 |
]
|
| 131 |
+
print("tokenized "+tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False))
|
| 132 |
+
# prompt = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
|
| 133 |
else:
|
| 134 |
+
print(" chat histroy ")
|
| 135 |
+
print(self.chat_history)
|
| 136 |
prompt = self.chat_history
|
| 137 |
prompt += CHAT_MESSAGE_PROMPT.replace("<<task>>", task)
|
| 138 |
messages = [
|
|
|
|
| 141 |
"content": prompt,
|
| 142 |
}
|
| 143 |
]
|
| 144 |
+
print("tokenized "+tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False))
|
| 145 |
+
|
| 146 |
# prompt = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
|
| 147 |
## prompt
|
| 148 |
else:
|
| 149 |
+
print("else block not chat mode ")
|
| 150 |
prompt = self.run_prompt_template.replace("<<all_tools>>", description)
|
| 151 |
prompt = prompt.replace("<<prompt>>", task)
|
| 152 |
messages = [
|
|
|
|
| 155 |
"content": prompt,
|
| 156 |
}
|
| 157 |
]
|
| 158 |
+
print("tokenized "+tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False))
|
| 159 |
+
|
| 160 |
# prompt = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
|
| 161 |
print("formatted propmpt ---- " + prompt)
|
| 162 |
+
return prompt
|