XA-vito commited on
Commit
33c462c
verified
1 Parent(s): c283661

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -5,7 +5,7 @@ import numpy as np
5
  import requests
6
  import torch
7
  import os
8
- from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
9
  from langchain.memory import ConversationBufferMemory
10
 
11
  # Configuraci贸n del modelo de lenguaje
@@ -18,16 +18,16 @@ if not HF_TOKEN:
18
 
19
  print("馃攧 Cargando modelo de lenguaje...")
20
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN)
21
- bnb_config = BitsAndBytesConfig(load_in_8bit=True)
22
 
23
  model = AutoModelForCausalLM.from_pretrained(
24
  MODEL_NAME,
25
  torch_dtype=torch.float16,
26
- quantization_config=bnb_config,
27
  device_map="auto",
28
  token=HF_TOKEN
29
  )
30
 
 
31
  # Memoria conversacional
32
  memory = ConversationBufferMemory()
33
 
 
5
  import requests
6
  import torch
7
  import os
8
+ from transformers import AutoModelForCausalLM, AutoTokenizer
9
  from langchain.memory import ConversationBufferMemory
10
 
11
  # Configuraci贸n del modelo de lenguaje
 
18
 
19
  print("馃攧 Cargando modelo de lenguaje...")
20
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN)
21
+
22
 
23
  model = AutoModelForCausalLM.from_pretrained(
24
  MODEL_NAME,
25
  torch_dtype=torch.float16,
 
26
  device_map="auto",
27
  token=HF_TOKEN
28
  )
29
 
30
+
31
  # Memoria conversacional
32
  memory = ConversationBufferMemory()
33