A newer version of this model is available:
moonshotai/Kimi-K2-Instruct
accuracy and qick response balance
https://huggingface.co/Qwen/Qwen3-4B/tree/main
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
model_name = "./"
# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype="auto", # Use float16 or bfloat16 depending on GPU
device_map="auto", # Automatically maps to GPU/CPU
trust_remote_code=True
)
model.eval()
# Inference function
def ask_qwen(prompt: str, max_new_tokens=128):
messages = [{"role": "user", "content": prompt + " /no_think"}]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False # Fast replies, no step-by-step thinking
)
inputs = tokenizer([text], return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=max_new_tokens,
temperature=0.7,
top_p=0.8,
top_k=20,
min_p=0.0,
do_sample=True
)
generated = outputs[0][inputs["input_ids"].shape[-1]:]
return tokenizer.decode(generated, skip_special_tokens=True).strip()
# Continuous loop for user prompts
if __name__ == "__main__":
print("๐ Qwen3-4B Chat Running... Type 'exit' to quit.")
while True:
prompt = input("\nYou: ")
if prompt.lower().strip() in ['exit', 'quit']:
print("๐ Exiting Qwen chat.")
break
try:
response = ask_qwen(prompt)
print(f"Qwen: {response}")
except Exception as e:
print(f"โ ๏ธ Error: {e}")
- Downloads last month
- -
Inference Providers
NEW
This model isn't deployed by any Inference Provider.
๐
Ask for provider support
Model tree for ankitkushwaha90/Qwen3-4B
Base model
moonshotai/Kimi-K2-Instruct