| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| # Load a tiny model | |
| model_name = "distilgpt2" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForCausalLM.from_pretrained(model_name) | |
| print("Hello, I'm your baby AI! Type 'quit' to exit.") | |
| while True: | |
| user_input = input("You: ") | |
| if user_input.lower() == "quit": | |
| break | |
| inputs = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt") | |
| outputs = model.generate(inputs, max_length=100, pad_token_id=tokenizer.eos_token_id) | |
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| print("AI:", response) | |