Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -59,6 +59,9 @@ I am confident that I can leverage my expertise to assist you in developing and
|
|
| 59 |
|
| 60 |
return summary, next_step
|
| 61 |
|
|
|
|
|
|
|
|
|
|
| 62 |
def save_agent_to_file(agent):
|
| 63 |
"""Saves the agent's prompt to a file."""
|
| 64 |
if not os.path.exists(AGENT_DIRECTORY):
|
|
@@ -91,8 +94,8 @@ def chat_interface_with_agent(input_text, agent_name):
|
|
| 91 |
|
| 92 |
model_name = "Bin12345/AutoCoder_S_6.7B"
|
| 93 |
try:
|
| 94 |
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
| 95 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 96 |
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
| 97 |
except EnvironmentError as e:
|
| 98 |
return f"Error loading model: {e}"
|
|
@@ -143,33 +146,33 @@ def code_editor_interface(code):
|
|
| 143 |
|
| 144 |
# Text summarization tool
|
| 145 |
def summarize_text(text):
|
| 146 |
-
summarizer = pipeline("summarization")
|
| 147 |
summary = summarizer(text, max_length=130, min_length=30, do_sample=False)
|
| 148 |
return summary[0]['summary_text']
|
| 149 |
|
| 150 |
# Sentiment analysis tool
|
| 151 |
def sentiment_analysis(text):
|
| 152 |
-
analyzer = pipeline("sentiment-analysis")
|
| 153 |
result = analyzer(text)
|
| 154 |
return result[0]['label']
|
| 155 |
|
| 156 |
# Text translation tool (code translation)
|
| 157 |
def translate_code(code, source_language, target_language):
|
| 158 |
# Use a Hugging Face translation model instead of OpenAI
|
| 159 |
-
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-en-es") # Example: English to Spanish
|
| 160 |
translated_code = translator(code, target_lang=target_language)[0]['translation_text']
|
| 161 |
return translated_code
|
| 162 |
|
| 163 |
def generate_code(code_idea):
|
| 164 |
# Use a Hugging Face code generation model instead of OpenAI
|
| 165 |
-
generator = pipeline('text-generation', model='bigcode/starcoder')
|
| 166 |
generated_code = generator(code_idea, max_length=1000, num_return_sequences=1)[0]['generated_text']
|
| 167 |
return generated_code
|
| 168 |
|
| 169 |
def chat_interface(input_text):
|
| 170 |
"""Handles general chat interactions with the user."""
|
| 171 |
# Use a Hugging Face chatbot model or your own logic
|
| 172 |
-
chatbot = pipeline("text-generation", model="microsoft/DialoGPT-medium")
|
| 173 |
response = chatbot(input_text, max_length=50, num_return_sequences=1)[0]['generated_text']
|
| 174 |
return response
|
| 175 |
|
|
|
|
| 59 |
|
| 60 |
return summary, next_step
|
| 61 |
|
| 62 |
+
def load_hf_token():
|
| 63 |
+
return 'YOUR_HF_TOKEN'
|
| 64 |
+
|
| 65 |
def save_agent_to_file(agent):
|
| 66 |
"""Saves the agent's prompt to a file."""
|
| 67 |
if not os.path.exists(AGENT_DIRECTORY):
|
|
|
|
| 94 |
|
| 95 |
model_name = "Bin12345/AutoCoder_S_6.7B"
|
| 96 |
try:
|
| 97 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=load_hf_token())
|
| 98 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=load_hf_token())
|
| 99 |
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
| 100 |
except EnvironmentError as e:
|
| 101 |
return f"Error loading model: {e}"
|
|
|
|
| 146 |
|
| 147 |
# Text summarization tool
|
| 148 |
def summarize_text(text):
|
| 149 |
+
summarizer = pipeline("summarization", model="t5-base", use_auth_token=load_hf_token())
|
| 150 |
summary = summarizer(text, max_length=130, min_length=30, do_sample=False)
|
| 151 |
return summary[0]['summary_text']
|
| 152 |
|
| 153 |
# Sentiment analysis tool
|
| 154 |
def sentiment_analysis(text):
|
| 155 |
+
analyzer = pipeline("sentiment-analysis", model="cardiffnlp/twitter-roberta-base-sentiment", use_auth_token=load_hf_token())
|
| 156 |
result = analyzer(text)
|
| 157 |
return result[0]['label']
|
| 158 |
|
| 159 |
# Text translation tool (code translation)
|
| 160 |
def translate_code(code, source_language, target_language):
|
| 161 |
# Use a Hugging Face translation model instead of OpenAI
|
| 162 |
+
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-en-es", use_auth_token=load_hf_token()) # Example: English to Spanish
|
| 163 |
translated_code = translator(code, target_lang=target_language)[0]['translation_text']
|
| 164 |
return translated_code
|
| 165 |
|
| 166 |
def generate_code(code_idea):
|
| 167 |
# Use a Hugging Face code generation model instead of OpenAI
|
| 168 |
+
generator = pipeline('text-generation', model='bigcode/starcoder', use_auth_token=load_hf_token())
|
| 169 |
generated_code = generator(code_idea, max_length=1000, num_return_sequences=1)[0]['generated_text']
|
| 170 |
return generated_code
|
| 171 |
|
| 172 |
def chat_interface(input_text):
|
| 173 |
"""Handles general chat interactions with the user."""
|
| 174 |
# Use a Hugging Face chatbot model or your own logic
|
| 175 |
+
chatbot = pipeline("text-generation", model="microsoft/DialoGPT-medium", use_auth_token=load_hf_token())
|
| 176 |
response = chatbot(input_text, max_length=50, num_return_sequences=1)[0]['generated_text']
|
| 177 |
return response
|
| 178 |
|