Martico2432 commited on
Commit
e3f2d37
Β·
verified Β·
1 Parent(s): f40d789

Upload example_use.ipynb

Browse files

A jupyter notebook for testing the model

Files changed (1) hide show
  1. example_use.ipynb +1 -0
example_use.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata":{"kernelspec":{"name":"python3","display_name":"Python 3","language":"python"},"language_info":{"name":"python","version":"3.11.13","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"colab":{"machine_shape":"hm","gpuType":"T4"},"accelerator":"GPU","kaggle":{"accelerator":"nvidiaTeslaT4","dataSources":[],"dockerImageVersionId":31090,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"!pip install transformers accelerate peft ipywidgets --quiet","metadata":{"colab_type":"code","trusted":true,"execution":{"iopub.status.busy":"2025-09-02T15:01:16.077038Z","iopub.execute_input":"2025-09-02T15:01:16.077698Z","iopub.status.idle":"2025-09-02T15:02:47.248403Z","shell.execute_reply.started":"2025-09-02T15:01:16.077672Z","shell.execute_reply":"2025-09-02T15:02:47.247630Z"}},"outputs":[{"name":"stdout","text":"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m363.4/363.4 MB\u001b[0m \u001b[31m2.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0mm0:02\u001b[0mm\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m13.8/13.8 MB\u001b[0m \u001b[31m9.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m0:00:01\u001b[0m00:01\u001b[0m\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m24.6/24.6 MB\u001b[0m \u001b[31m69.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m:00:01\u001b[0m00:01\u001b[0m\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m883.7/883.7 kB\u001b[0m \u001b[31m52.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m664.8/664.8 MB\u001b[0m \u001b[31m2.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m:00:01\u001b[0m00:01\u001b[0m\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m211.5/211.5 MB\u001b[0m \u001b[31m7.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m:00:01\u001b[0m00:01\u001b[0m\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m56.3/56.3 MB\u001b[0m \u001b[31m30.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m:00:01\u001b[0m00:01\u001b[0m\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m127.9/127.9 MB\u001b[0m \u001b[31m13.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m207.5/207.5 MB\u001b[0m \u001b[31m6.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m:00:01\u001b[0m00:01\u001b[0m\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m21.1/21.1 MB\u001b[0m \u001b[31m18.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m:00:01\u001b[0m0:01\u001b[0m\n\u001b[?25h","output_type":"stream"}],"execution_count":5},{"cell_type":"markdown","source":"## Local Inference on GPU \nModel page: https://huggingface.co/Martico2432/mcollm1-2b\n\n⚠️ If the generated code snippets do not work, please open an issue on either the [model repo](https://huggingface.co/Martico2432/mcollm1-2b)\n\t\t\tand/or on [huggingface.js](https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/src/model-libraries-snippets.ts) πŸ™","metadata":{"colab_type":"text"}},{"cell_type":"code","source":"import huggingface_hub\nhuggingface_hum.login()","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\nfrom peft import PeftModel\n\n# Base model repo for gemma-2b-it\nbase_model_name = \"google/gemma-2b-it\"\n\n# Your LoRA adapter repo or local path\nlora_adapter_path = \"Martico2432/mcollm1-2b\"\n\n# Load tokenizer and base model\ntokenizer = AutoTokenizer.from_pretrained(base_model_name)\n\n# Load base model with half precision if CUDA available\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nbase_model = AutoModelForCausalLM.from_pretrained(base_model_name, torch_dtype=torch.float16 if device==\"cuda\" else torch.float32)\nbase_model.to(device)\n\n# Load LoRA adapter on top of base model\nmodel = PeftModel.from_pretrained(base_model, lora_adapter_path)\nmodel.to(device)\nmodel.eval()","metadata":{"colab_type":"code","trusted":true,"execution":{"iopub.status.busy":"2025-09-02T15:02:47.250028Z","iopub.execute_input":"2025-09-02T15:02:47.250297Z","iopub.status.idle":"2025-09-02T15:03:00.980670Z","shell.execute_reply.started":"2025-09-02T15:02:47.250274Z","shell.execute_reply":"2025-09-02T15:03:00.979911Z"}},"outputs":[{"output_type":"display_data","data":{"text/plain":"tokenizer_config.json: 0%| | 0.00/34.2k [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"0c2d8d9083fa44df95e5961e80eca265"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"tokenizer.model: 0%| | 0.00/4.24M [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"8e895a880c794c69ad3c122477f8150d"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"tokenizer.json: 0%| | 0.00/17.5M [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"644654df30fb43e0ad1d26a90756ce8a"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"special_tokens_map.json: 0%| | 0.00/636 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"372655dd259a4c509d6de9203f5cdbd7"}},"metadata":{}},{"name":"stderr","text":"`torch_dtype` is deprecated! Use `dtype` instead!\n","output_type":"stream"},{"output_type":"display_data","data":{"text/plain":"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"25a20e7d3a9a49f8bfb00d2420fdaa2c"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"generation_config.json: 0%| | 0.00/137 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"6cb212c04a234f8f9d5a6451847d1ce2"}},"metadata":{}},{"execution_count":6,"output_type":"execute_result","data":{"text/plain":"PeftModelForCausalLM(\n (base_model): LoraModel(\n (model): GemmaForCausalLM(\n (model): GemmaModel(\n (embed_tokens): Embedding(256000, 2048, padding_idx=0)\n (layers): ModuleList(\n (0-17): 18 x GemmaDecoderLayer(\n (self_attn): GemmaAttention(\n (q_proj): lora.Linear(\n (base_layer): Linear(in_features=2048, out_features=2048, bias=False)\n (lora_dropout): ModuleDict(\n (default): Dropout(p=0.05, inplace=False)\n )\n (lora_A): ModuleDict(\n (default): Linear(in_features=2048, out_features=16, bias=False)\n )\n (lora_B): ModuleDict(\n (default): Linear(in_features=16, out_features=2048, bias=False)\n )\n (lora_embedding_A): ParameterDict()\n (lora_embedding_B): ParameterDict()\n (lora_magnitude_vector): ModuleDict()\n )\n (k_proj): lora.Linear(\n (base_layer): Linear(in_features=2048, out_features=256, bias=False)\n (lora_dropout): ModuleDict(\n (default): Dropout(p=0.05, inplace=False)\n )\n (lora_A): ModuleDict(\n (default): Linear(in_features=2048, out_features=16, bias=False)\n )\n (lora_B): ModuleDict(\n (default): Linear(in_features=16, out_features=256, bias=False)\n )\n (lora_embedding_A): ParameterDict()\n (lora_embedding_B): ParameterDict()\n (lora_magnitude_vector): ModuleDict()\n )\n (v_proj): lora.Linear(\n (base_layer): Linear(in_features=2048, out_features=256, bias=False)\n (lora_dropout): ModuleDict(\n (default): Dropout(p=0.05, inplace=False)\n )\n (lora_A): ModuleDict(\n (default): Linear(in_features=2048, out_features=16, bias=False)\n )\n (lora_B): ModuleDict(\n (default): Linear(in_features=16, out_features=256, bias=False)\n )\n (lora_embedding_A): ParameterDict()\n (lora_embedding_B): ParameterDict()\n (lora_magnitude_vector): ModuleDict()\n )\n (o_proj): lora.Linear(\n (base_layer): Linear(in_features=2048, out_features=2048, bias=False)\n (lora_dropout): ModuleDict(\n (default): Dropout(p=0.05, inplace=False)\n )\n (lora_A): ModuleDict(\n (default): Linear(in_features=2048, out_features=16, bias=False)\n )\n (lora_B): ModuleDict(\n (default): Linear(in_features=16, out_features=2048, bias=False)\n )\n (lora_embedding_A): ParameterDict()\n (lora_embedding_B): ParameterDict()\n (lora_magnitude_vector): ModuleDict()\n )\n )\n (mlp): GemmaMLP(\n (gate_proj): Linear(in_features=2048, out_features=16384, bias=False)\n (up_proj): Linear(in_features=2048, out_features=16384, bias=False)\n (down_proj): Linear(in_features=16384, out_features=2048, bias=False)\n (act_fn): GELUActivation()\n )\n (input_layernorm): GemmaRMSNorm((2048,), eps=1e-06)\n (post_attention_layernorm): GemmaRMSNorm((2048,), eps=1e-06)\n )\n )\n (norm): GemmaRMSNorm((2048,), eps=1e-06)\n (rotary_emb): GemmaRotaryEmbedding()\n )\n (lm_head): Linear(in_features=2048, out_features=256000, bias=False)\n )\n )\n)"},"metadata":{}}],"execution_count":6},{"cell_type":"code","source":"def generate_response(prompt, max_length=256, temperature=0.7):\n inputs = tokenizer(prompt, return_tensors=\"pt\").to(device)\n with torch.no_grad():\n outputs = model.generate(\n **inputs,\n max_length=inputs['input_ids'].shape[-1] + max_length,\n temperature=temperature,\n do_sample=True,\n pad_token_id=tokenizer.eos_token_id,\n eos_token_id=tokenizer.eos_token_id,\n )\n # Slice off prompt tokens before decoding\n generated_tokens = outputs[0][inputs['input_ids'].shape[-1]:]\n response = tokenizer.decode(generated_tokens, skip_special_tokens=True)\n return response.strip()","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-09-02T15:11:15.243174Z","iopub.execute_input":"2025-09-02T15:11:15.243905Z","iopub.status.idle":"2025-09-02T15:11:15.248535Z","shell.execute_reply.started":"2025-09-02T15:11:15.243882Z","shell.execute_reply":"2025-09-02T15:11:15.247872Z"}},"outputs":[],"execution_count":13},{"cell_type":"code","source":"import ipywidgets as widgets\nfrom IPython.display import display, Markdown, clear_output\n\nchat_history = []\n\ninput_box = widgets.Textarea(\n placeholder=\"Type your message here...\",\n layout=widgets.Layout(width=\"100%\", height=\"80px\")\n)\nsend_button = widgets.Button(description=\"Send\", button_style=\"success\")\noutput_area = widgets.Output(layout=widgets.Layout(border=\"1px solid black\", padding=\"10px\", height=\"400px\", overflow_y=\"auto\"))\n\ndef update_chat_display():\n with output_area:\n clear_output()\n for i, (user_msg, bot_msg) in enumerate(chat_history):\n display(Markdown(f\"**πŸ™‹β€β™‚οΈ User:** {user_msg}\"))\n display(Markdown(f\"**πŸ€– Model:** {bot_msg}\"))\n display(Markdown(\"---\"))\n\ndef on_send_clicked(b):\n user_input = input_box.value.strip()\n if not user_input:\n return\n chat_history.append((user_input, \"...\"))\n update_chat_display()\n input_box.value = \"\"\n \n # Build prompt with conversation history for context\n prompt = \"\"\n for u, r in chat_history[:-1]:\n prompt += f\"<start_of_turn>user\\n{u}<end_of_turn>\\n<start_of_turn>model\\n{r}<end_of_turn>\\n\"\n prompt += f\"<start_of_turn>user\\n{user_input}<end_of_turn>\\n<start_of_turn>model\\n\"\n \n response = generate_response(prompt)\n chat_history[-1] = (user_input, response)\n update_chat_display()\n\nsend_button.on_click(on_send_clicked)\n\ndisplay(input_box, send_button, output_area)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-09-02T15:11:16.470162Z","iopub.execute_input":"2025-09-02T15:11:16.470731Z","iopub.status.idle":"2025-09-02T15:11:16.486551Z","shell.execute_reply.started":"2025-09-02T15:11:16.470708Z","shell.execute_reply":"2025-09-02T15:11:16.485698Z"}},"outputs":[{"output_type":"display_data","data":{"text/plain":"Textarea(value='', layout=Layout(height='80px', width='100%'), placeholder='Type your message here...')","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"4f9a5f10e6134be28324bb962f6736c0"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Button(button_style='success', description='Send', style=ButtonStyle())","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"4e99692a5a35450b85a33d9514c24e39"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Output(layout=Layout(border_bottom='1px solid black', border_left='1px solid black', border_right='1px solid b…","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"6c77afe27c4442169c06fc6711df6f28"}},"metadata":{}}],"execution_count":14}]}