Spaces:
Sleeping
Sleeping
Use phi4
Browse files
main.py
CHANGED
@@ -11,7 +11,7 @@ assert token is not None, "Hugging Face token is missing. Please set the 'HUGGIN
|
|
11 |
|
12 |
|
13 |
#Load pre-trained tokenizer and model (Works)
|
14 |
-
model_name = "
|
15 |
tokenizer = AutoTokenizer.from_pretrained(model_name, token=token)
|
16 |
tokenizer.pad_token = tokenizer.eos_token
|
17 |
model = AutoModelForCausalLM.from_pretrained(
|
@@ -23,7 +23,8 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
23 |
|
24 |
|
25 |
# Example usage: Generate text
|
26 |
-
prompt = "
|
|
|
27 |
|
28 |
inputs = tokenizer(prompt, return_tensors="pt", padding=True, return_attention_mask=True ).to(model.device)
|
29 |
outputs = model.generate(
|
@@ -64,7 +65,7 @@ async def generate_text(item: Item):
|
|
64 |
|
65 |
# logging.info("Response generated")
|
66 |
|
67 |
-
inp =f"
|
68 |
inputs = tokenizer(inp, return_tensors="pt", padding=True, return_attention_mask=True ).to(model.device)
|
69 |
|
70 |
|
|
|
11 |
|
12 |
|
13 |
#Load pre-trained tokenizer and model (Works)
|
14 |
+
model_name = "microsoft/Phi-4-mini-instruct"
|
15 |
tokenizer = AutoTokenizer.from_pretrained(model_name, token=token)
|
16 |
tokenizer.pad_token = tokenizer.eos_token
|
17 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
23 |
|
24 |
|
25 |
# Example usage: Generate text
|
26 |
+
prompt = "<|system|>You are a helpful assistant<|end|><|user|>What is the capital of france?<|end|><|assistant|>"
|
27 |
+
|
28 |
|
29 |
inputs = tokenizer(prompt, return_tensors="pt", padding=True, return_attention_mask=True ).to(model.device)
|
30 |
outputs = model.generate(
|
|
|
65 |
|
66 |
# logging.info("Response generated")
|
67 |
|
68 |
+
inp =f""<|system|>You are a helpful assistant<|end|><|user|> {item.prompt} <|end|><|assistant|>""
|
69 |
inputs = tokenizer(inp, return_tensors="pt", padding=True, return_attention_mask=True ).to(model.device)
|
70 |
|
71 |
|