Kaveh commited on
Commit
6b614e8
·
unverified ·
1 Parent(s): e19df82

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -6
app.py CHANGED
@@ -11,10 +11,10 @@ from nltk.tokenize import sent_tokenize
11
  import torch
12
 
13
  # تنظیم مسیر cache برای Transformers
14
- cache_dir = '/tmp/transformers_cache'
15
- os.environ['TRANSFORMERS_CACHE'] = cache_dir
16
- os.environ['HF_HOME'] = cache_dir
17
- os.makedirs(cache_dir, exist_ok=True)
18
 
19
  # تنظیم مسیر nltk
20
  try:
@@ -73,10 +73,9 @@ def load_persian_model():
73
  global model, tokenizer
74
  try:
75
  logger.info(f"Loading Persian model: {MODEL_NAME}")
76
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, cache_dir=cache_dir)
77
  model = AutoModelForSeq2SeqLM.from_pretrained(
78
  MODEL_NAME,
79
- cache_dir=cache_dir,
80
  torch_dtype=torch.float32
81
  )
82
  model.eval()
 
11
  import torch
12
 
13
  # تنظیم مسیر cache برای Transformers
14
+ #cache_dir = '/tmp/transformers_cache'
15
+ #os.environ['TRANSFORMERS_CACHE'] = cache_dir
16
+ #os.environ['HF_HOME'] = cache_dir
17
+ #os.makedirs(cache_dir, exist_ok=True)
18
 
19
  # تنظیم مسیر nltk
20
  try:
 
73
  global model, tokenizer
74
  try:
75
  logger.info(f"Loading Persian model: {MODEL_NAME}")
76
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
77
  model = AutoModelForSeq2SeqLM.from_pretrained(
78
  MODEL_NAME,
 
79
  torch_dtype=torch.float32
80
  )
81
  model.eval()