fschwartzer commited on
Commit
ba48fb7
·
verified ·
1 Parent(s): 767b3b4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -7
app.py CHANGED
@@ -1,10 +1,9 @@
1
  import pandas as pd
2
  import gradio as gr
3
- from transformers import AutoTokenizer, AutoModelForCausalLM
4
 
5
- model_name = "meta-llama/Llama-2-7b-hf"
6
- tokenizer = AutoTokenizer.from_pretrained(model_name)
7
- model = AutoModelForCausalLM.from_pretrained(model_name)
8
 
9
  # Dados iniciais
10
  data = {
@@ -32,9 +31,12 @@ def get_gpt_response(query):
32
  {csv_data}
33
 
34
  """
35
- input_ids = tokenizer(query, return_tensors="pt").input_ids
36
- output = model.generate(input_ids, max_new_tokens=100)
37
- return tokenizer.decode(output[0], skip_special_tokens=True)
 
 
 
38
 
39
  def ask_question(pergunta):
40
  resposta = get_gpt_response(pergunta)
 
1
  import pandas as pd
2
  import gradio as gr
3
+ from transformers import GPT2LMHeadModel, GPT2Tokenizer
4
 
5
+ tokenizer = GPT2Tokenizer.from_pretrained("distilgpt2")
6
+ model = GPT2LMHeadModel.from_pretrained("distilgpt2")
 
7
 
8
  # Dados iniciais
9
  data = {
 
31
  {csv_data}
32
 
33
  """
34
+ input_ids = tokenizer.encode(query, return_tensors='pt')
35
+ max_length = input_ids.shape[1] + 100
36
+ generated_ids = model.generate(input_ids, max_length=max_length)
37
+ generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
38
+ return generated_text
39
+
40
 
41
  def ask_question(pergunta):
42
  resposta = get_gpt_response(pergunta)