diff --git "a/Llama3_1_(8B)_Alpaca.ipynb" "b/Llama3_1_(8B)_Alpaca.ipynb"
new file mode 100644--- /dev/null
+++ "b/Llama3_1_(8B)_Alpaca.ipynb"
@@ -0,0 +1,3651 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "graLDQDUjCMy"
+ },
+ "source": [
+ "To run this, press \"*Runtime*\" and press \"*Run all*\" on a **free** Tesla T4 Google Colab instance!\n",
+ "
\n",
+ "

\n",
+ "

\n",
+ "

Join Discord if you need help + β
Star us on Github β\n",
+ "
\n",
+ "\n",
+ "To install Unsloth on your own computer, follow the installation instructions on our Github page [here](https://docs.unsloth.ai/get-started/installing-+-updating).\n",
+ "\n",
+ "You will learn how to do [data prep](#Data), how to [train](#Train), how to [run the model](#Inference), & [how to save it](#Save)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "XaKl-AiijCM1"
+ },
+ "source": [
+ "### News"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "pRlV_7u9jCM2"
+ },
+ "source": [
+ "**Read our [Gemma 3 blog](https://unsloth.ai/blog/gemma3) for what's new in Unsloth and our [Reasoning blog](https://unsloth.ai/blog/r1-reasoning) on how to train reasoning models.**\n",
+ "\n",
+ "Visit our docs for all our [model uploads](https://docs.unsloth.ai/get-started/all-our-models) and [notebooks](https://docs.unsloth.ai/get-started/unsloth-notebooks).\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "HoT-XZ67jCM3"
+ },
+ "source": [
+ "### Installation"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "g2unDd0HjCM4"
+ },
+ "outputs": [],
+ "source": [
+ "# %%capture\n",
+ "# import os\n",
+ "# if \"COLAB_\" not in \"\".join(os.environ.keys()):\n",
+ "# !pip install unsloth\n",
+ "# else:\n",
+ "# # Do this only in Colab notebooks! Otherwise use pip install unsloth\n",
+ "# !pip install --no-deps bitsandbytes accelerate xformers==0.0.29.post3 peft trl==0.15.2 triton cut_cross_entropy unsloth_zoo\n",
+ "# !pip install sentencepiece protobuf datasets huggingface_hub hf_transfer\n",
+ "# !pip install --no-deps unsloth"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "RGNyjArHjCM5"
+ },
+ "source": [
+ "### Unsloth"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 333,
+ "referenced_widgets": [
+ "6358f3dd281e450e9d862fb819431e94",
+ "d3c86a31c63c4bcc8044d869c307d650",
+ "0498617989a345f2833f9191fc8d0c0b",
+ "5d30d40769ad435c8e5367254b3ce416",
+ "1c7db46fc15f4b0a869d2541d4fdb3e9",
+ "fcd62c3342a7435c8fa582f46e25a7fe",
+ "cd549bc1372e429cb466cf91c4cb14f6",
+ "9208c1d37e7b49659d386896a86936ec",
+ "bec4bd89dc6d472db216b376055d486c",
+ "8bf81c3194ba4a70b13138d9dc5bd32c",
+ "5a76aeeee3ca4fbd8e5fc50fa7febce0",
+ "047a514f9797432098ea1ca08791f17d",
+ "31f86416918f4369836a15e411c76004",
+ "e6ffc8ffcdd242a18444c113c65afc3a",
+ "c971bc042e67481d88ea618946c31557",
+ "4648982d046046e4af427d6effa7552f",
+ "874a8e03473c498e913bfeb2cae2284f",
+ "6649c127f38a4a878446c6f7cf155057",
+ "57a7d8220c86425592ae87c157b56485",
+ "147e5a626c7848c1adf46cf559f858bd",
+ "19f9fe73153b46bc960e8e7e60a705ee",
+ "226ae004bd1a486da39d316e0e047750",
+ "da1fdb9955774c95a33bd3ba03a5fc5b",
+ "0d6542b46c464d8b9013ccf0a28a9572",
+ "162cbc90157a42bd86c40cd76a75d031",
+ "f383dc65ca65440eb77740c383fb2c4d",
+ "4dd3cf69544d44c785485cde9e1cbadf",
+ "07aabd90699847fe8ed89aa98644d29b",
+ "96c889ea82384c36baaff99470cde678",
+ "8afea3489b2b4a278279dc5a05d1b1b2",
+ "4c1991173ffd46a5af8f3eff48e00755",
+ "b34006bb60fe47868586bc33fabde306",
+ "327a0ab3d5f748c1ae6b93d851ef1f5f",
+ "3b4de26403a24f8f8cad26c594d0c485",
+ "06a6ff819b9441a4ad436c75e7837c86",
+ "7b69ea48666d4fb3a81bbbf6aaf0d8a2",
+ "8f1a3e026f3c46afb57db907f2940a8d",
+ "d681fbb2c17c4bd09a3a794daf57f4b2",
+ "f98c013f6f924237a99ec520f35ebcde",
+ "c48fb808314a436080c1cde975e2e1cd",
+ "86f733ba965b4237889cd89d28b63d61",
+ "faa00b65663441c880758c5abb09a519",
+ "052ea223c8f740249ddbde6ac90df588",
+ "d0ad350985de47f09d98f57dd98a9b00",
+ "d9387aa9d6384146a03f63f52839ef3a",
+ "e5fc60f9587e4ac1b38b520e26575220",
+ "5420518b6b0e47bd8df8e9023325c74d",
+ "95bfadbb334246b3b8c6fbf004c513f1",
+ "06cabd4f473e47c0b72dea34a3d53912",
+ "280d9b3120c540f98c7e00065e7c8750",
+ "df9e42625d714c5da22fa90ed1fb5491",
+ "7cc5e6af64c64127bd9df959dc6a3345",
+ "515c241f259b498796088d2261099ff1",
+ "c34232310c05418aa06aac3a9127319c",
+ "ec38a625ae68438c99066889f866336d"
+ ]
+ },
+ "id": "QmUBVEnvCDJv",
+ "outputId": "feb6263a-e44a-4d43-bea8-a1ff28569b37"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "π¦₯ Unsloth: Will patch your computer to enable 2x faster free finetuning.\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/home/darth/.pyenv/versions/unsloth/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
+ " from .autonotebook import tqdm as notebook_tqdm\n",
+ "WARNING[XFORMERS]: xFormers can't load C++/CUDA extensions. xFormers was built for:\n",
+ " PyTorch 2.6.0+cu124 with CUDA 1204 (you have 2.7.0+cu126)\n",
+ " Python 3.10.16 (you have 3.10.12)\n",
+ " Please reinstall xformers (see https://github.com/facebookresearch/xformers#installing-xformers)\n",
+ " Memory-efficient attention, SwiGLU, sparse and more won't be available.\n",
+ " Set XFORMERS_MORE_DETAILS=1 for more details\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Unsloth: Failed to patch SmolVLMForConditionalGeneration forward function.\n",
+ "π¦₯ Unsloth Zoo will now patch everything to make training faster!\n",
+ "==((====))== Unsloth 2025.4.1: Fast Llama patching. Transformers: 4.51.3.\n",
+ " \\\\ /| NVIDIA GeForce RTX 3060. Num GPUs = 1. Max memory: 11.633 GB. Platform: Linux.\n",
+ "O^O/ \\_/ \\ Torch: 2.7.0+cu126. CUDA: 8.6. CUDA Toolkit: 12.6. Triton: 3.3.0\n",
+ "\\ / Bfloat16 = TRUE. FA [Xformers = None. FA2 = False]\n",
+ " \"-____-\" Free license: http://github.com/unslothai/unsloth\n",
+ "Unsloth: Fast downloading is enabled - ignore downloading bars which are red colored!\n"
+ ]
+ }
+ ],
+ "source": [
+ "from unsloth import FastLanguageModel\n",
+ "import torch\n",
+ "max_seq_length = 4096 # Choose any! We auto support RoPE Scaling internally!\n",
+ "dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+\n",
+ "load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.\n",
+ "\n",
+ "# 4bit pre quantized models we support for 4x faster downloading + no OOMs.\n",
+ "fourbit_models = [\n",
+ " \"unsloth/Meta-Llama-3.1-8B-bnb-4bit\", # Llama-3.1 15 trillion tokens model 2x faster!\n",
+ " \"unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit\",\n",
+ " \"unsloth/Meta-Llama-3.1-70B-bnb-4bit\",\n",
+ " \"unsloth/Meta-Llama-3.1-405B-bnb-4bit\", # We also uploaded 4bit for 405b!\n",
+ " \"unsloth/Mistral-Nemo-Base-2407-bnb-4bit\", # New Mistral 12b 2x faster!\n",
+ " \"unsloth/Mistral-Nemo-Instruct-2407-bnb-4bit\",\n",
+ " \"unsloth/mistral-7b-v0.3-bnb-4bit\", # Mistral v3 2x faster!\n",
+ " \"unsloth/mistral-7b-instruct-v0.3-bnb-4bit\",\n",
+ " \"unsloth/Phi-3.5-mini-instruct\", # Phi-3.5 2x faster!\n",
+ " \"unsloth/Phi-3-medium-4k-instruct\",\n",
+ " \"unsloth/gemma-2-9b-bnb-4bit\",\n",
+ " \"unsloth/gemma-2-27b-bnb-4bit\", # Gemma 2x faster!\n",
+ "] # More models at https://huggingface.co/unsloth\n",
+ "\n",
+ "model, tokenizer = FastLanguageModel.from_pretrained(\n",
+ " model_name = \"unsloth/Meta-Llama-3.1-8B\",\n",
+ " max_seq_length = max_seq_length,\n",
+ " dtype = dtype,\n",
+ " load_in_4bit = load_in_4bit,\n",
+ " # token = \"hf_...\", # use one if using gated models like meta-llama/Llama-2-7b-hf\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "SXd9bTZd1aaL"
+ },
+ "source": [
+ "We now add LoRA adapters so we only need to update 1 to 10% of all parameters!"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "count 162653.000000\n",
+ "mean 246.336317\n",
+ "std 388.429023\n",
+ "min 11.000000\n",
+ "25% 71.000000\n",
+ "50% 150.000000\n",
+ "75% 289.000000\n",
+ "max 21548.000000\n",
+ "Name: token_length, dtype: float64\n"
+ ]
+ }
+ ],
+ "source": [
+ "# from transformers import AutoTokenizer\n",
+ "# import pandas as pd\n",
+ "\n",
+ "# # Load your dataset\n",
+ "# df = pd.read_csv(\"all_combined_data.csv\")\n",
+ "\n",
+ "\n",
+ "# # Load tokenizer\n",
+ "# tokenizer = AutoTokenizer.from_pretrained(\"meta-llama/Llama-3.1-8B\")\n",
+ "\n",
+ "# # Calculate token lengths\n",
+ "# df['token_length'] = df['Input'].apply(lambda x: len(tokenizer.encode(x)))\n",
+ "\n",
+ "# # Analyze token length distribution \n",
+ "# token_length_stats = df['token_length'].describe()\n",
+ "# print(token_length_stats)\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "6bZsfBuZDeCL",
+ "outputId": "133f4cfb-60fd-496b-c53d-3a9b51811004"
+ },
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Unsloth 2025.4.1 patched 32 layers with 32 QKV layers, 32 O layers and 32 MLP layers.\n"
+ ]
+ }
+ ],
+ "source": [
+ "model = FastLanguageModel.get_peft_model(\n",
+ " model,\n",
+ " r = 16, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128\n",
+ " target_modules = [\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\",\n",
+ " \"gate_proj\", \"up_proj\", \"down_proj\",],\n",
+ " lora_alpha = 16,\n",
+ " lora_dropout = 0, # Supports any, but = 0 is optimized\n",
+ " bias = \"none\", # Supports any, but = \"none\" is optimized\n",
+ " # [NEW] \"unsloth\" uses 30% less VRAM, fits 2x larger batch sizes!\n",
+ " use_gradient_checkpointing = \"unsloth\", # True or \"unsloth\" for very long context\n",
+ " random_state = 3407,\n",
+ " use_rslora = False, # We support rank stabilized LoRA\n",
+ " loftq_config = None, # And LoftQ\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "vITh0KVJ10qX"
+ },
+ "source": [
+ "\n",
+ "### Data Prep\n",
+ "We now use the Alpaca dataset from [yahma](https://huggingface.co/datasets/yahma/alpaca-cleaned), which is a filtered version of 52K of the original [Alpaca dataset](https://crfm.stanford.edu/2023/03/13/alpaca.html). You can replace this code section with your own data prep.\n",
+ "\n",
+ "**[NOTE]** To train only on completions (ignoring the user's input) read TRL's docs [here](https://huggingface.co/docs/trl/sft_trainer#train-on-completions-only).\n",
+ "\n",
+ "**[NOTE]** Remember to add the **EOS_TOKEN** to the tokenized output!! Otherwise you'll get infinite generations!\n",
+ "\n",
+ "If you want to use the `llama-3` template for ShareGPT datasets, try our conversational [notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3_(8B)-Alpaca.ipynb)\n",
+ "\n",
+ "For text completions like novel writing, try this [notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Mistral_(7B)-Text_Completion.ipynb)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "vkB_i8OIqRn5",
+ "outputId": "b609590e-25bf-4fab-c187-7e60933aa1f9"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "--2025-04-28 02:49:32-- https://huggingface.co/datasets/skywalker290/SE/resolve/main/all_combined_data.csv\n",
+ "Resolving huggingface.co (huggingface.co)... 54.192.171.56, 54.192.171.40, 54.192.171.38, ...\n",
+ "Connecting to huggingface.co (huggingface.co)|54.192.171.56|:443... connected.\n",
+ "HTTP request sent, awaiting response... 302 Found\n",
+ "Location: https://cdn-lfs-us-1.hf.co/repos/d6/94/d6944363a9254f1af670b6f4e8862c0627a2f69d48d4bebfa02bae242eef7e2e/29cfef6c46dcd2826d4ca866c576fb5f806be5fa79d292c6f5b9f7542c117053?response-content-disposition=inline%3B+filename*%3DUTF-8%27%27all_combined_data.csv%3B+filename%3D%22all_combined_data.csv%22%3B&response-content-type=text%2Fcsv&Expires=1745792373&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTc0NTc5MjM3M319LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmhmLmNvL3JlcG9zL2Q2Lzk0L2Q2OTQ0MzYzYTkyNTRmMWFmNjcwYjZmNGU4ODYyYzA2MjdhMmY2OWQ0OGQ0YmViZmEwMmJhZTI0MmVlZjdlMmUvMjljZmVmNmM0NmRjZDI4MjZkNGNhODY2YzU3NmZiNWY4MDZiZTVmYTc5ZDI5MmM2ZjViOWY3NTQyYzExNzA1Mz9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSomcmVzcG9uc2UtY29udGVudC10eXBlPSoifV19&Signature=WH3P%7EM9D9xkhNqAGvyekLY66RG%7EHWxhLi-3ptFld7YlbYlP3galQ0jh0vWo0dXBsE6X90n0U3OTxS9Q%7Es1SRSD3Oo0ZfH%7EkhwVMxWrDNUAFQM50k47c5Ac2lcgexgWTg6ih1dsDA-V3VfYjE9RWXJIeoXLgV1gD8%7EluwEBEpqPTXM7JuzHp7odZRf0IBMtcoUnAmqV2YK6lEohJVKsigflRbug3wq7GhogP1euHJ%7El1qqcr7C4Mp5cqt6XdAb9m090dSK31GPkF%7Eb1gCLjuU0EyJeeMwh1WZQwkooB8PubtIKuRnTlm6oVEudBb8gX9QHf6e5DFWwXgJwiv7LPH6zA__&Key-Pair-Id=K24J24Z295AEI9 [following]\n",
+ "--2025-04-28 02:49:33-- https://cdn-lfs-us-1.hf.co/repos/d6/94/d6944363a9254f1af670b6f4e8862c0627a2f69d48d4bebfa02bae242eef7e2e/29cfef6c46dcd2826d4ca866c576fb5f806be5fa79d292c6f5b9f7542c117053?response-content-disposition=inline%3B+filename*%3DUTF-8%27%27all_combined_data.csv%3B+filename%3D%22all_combined_data.csv%22%3B&response-content-type=text%2Fcsv&Expires=1745792373&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTc0NTc5MjM3M319LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmhmLmNvL3JlcG9zL2Q2Lzk0L2Q2OTQ0MzYzYTkyNTRmMWFmNjcwYjZmNGU4ODYyYzA2MjdhMmY2OWQ0OGQ0YmViZmEwMmJhZTI0MmVlZjdlMmUvMjljZmVmNmM0NmRjZDI4MjZkNGNhODY2YzU3NmZiNWY4MDZiZTVmYTc5ZDI5MmM2ZjViOWY3NTQyYzExNzA1Mz9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSomcmVzcG9uc2UtY29udGVudC10eXBlPSoifV19&Signature=WH3P%7EM9D9xkhNqAGvyekLY66RG%7EHWxhLi-3ptFld7YlbYlP3galQ0jh0vWo0dXBsE6X90n0U3OTxS9Q%7Es1SRSD3Oo0ZfH%7EkhwVMxWrDNUAFQM50k47c5Ac2lcgexgWTg6ih1dsDA-V3VfYjE9RWXJIeoXLgV1gD8%7EluwEBEpqPTXM7JuzHp7odZRf0IBMtcoUnAmqV2YK6lEohJVKsigflRbug3wq7GhogP1euHJ%7El1qqcr7C4Mp5cqt6XdAb9m090dSK31GPkF%7Eb1gCLjuU0EyJeeMwh1WZQwkooB8PubtIKuRnTlm6oVEudBb8gX9QHf6e5DFWwXgJwiv7LPH6zA__&Key-Pair-Id=K24J24Z295AEI9\n",
+ "Resolving cdn-lfs-us-1.hf.co (cdn-lfs-us-1.hf.co)... 18.164.217.50, 18.164.217.71, 18.164.217.92, ...\n",
+ "Connecting to cdn-lfs-us-1.hf.co (cdn-lfs-us-1.hf.co)|18.164.217.50|:443... connected.\n",
+ "HTTP request sent, awaiting response... 200 OK\n",
+ "Length: 167805959 (160M) [text/csv]\n",
+ "Saving to: βall_combined_data.csv.1β\n",
+ "\n",
+ "all_combined_data.c 100%[===================>] 160.03M 11.2MB/s in 14s \n",
+ "\n",
+ "2025-04-28 02:49:47 (11.0 MB/s) - βall_combined_data.csv.1β saved [167805959/167805959]\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "!wget https://huggingface.co/datasets/skywalker290/SE/resolve/main/all_combined_data.csv"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "id": "3Z6dFIXVqd2a"
+ },
+ "outputs": [],
+ "source": [
+ "import pandas as pd\n",
+ "dataset = pd.read_csv('all_combined_data.csv')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {
+ "id": "lTBBZTVoqpMQ"
+ },
+ "outputs": [],
+ "source": [
+ "import ast\n",
+ "\n",
+ "def clean_tags(tag_string):\n",
+ " # Convert the string to a list\n",
+ " tag_list = ast.literal_eval(tag_string)\n",
+ " # Join the list into a comma-separated string\n",
+ " return ', '.join(tag_list)\n",
+ "\n",
+ "# Apply the function to the 'Tags' column\n",
+ "dataset['Tags'] = dataset['Tags'].apply(clean_tags)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dataset.to_csv('all_combined_data.csv', index=False)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {
+ "id": "XSi1mcj8qaBu"
+ },
+ "outputs": [],
+ "source": [
+ "\n",
+ "\n",
+ "def instruction(text):\n",
+ " instruct = f\"\"\"\n",
+ " Task:\n",
+ " Given a Stack Exchange question (title and body), generate 3 to 10 relevant tags that best describe the topics, technologies, or concepts involved.\n",
+ "\n",
+ " Guidelines:\n",
+ " - Tags must be lowercase.\n",
+ " - Tags must be comma-separated (no extra spaces after commas).\n",
+ " - Tags should be focused, concise, and relevant.\n",
+ " - Only use existing or plausible Stack Exchange tags (no imaginary tags).\n",
+ " - Do not invent new tags.\n",
+ " - Do not repeat tags.\n",
+ " - Always return at least 3 tags.\n",
+ "\n",
+ " Question:\n",
+ " {text}\n",
+ "\n",
+ " Example:\n",
+ " Question:\n",
+ " Title: How can I merge two dictionaries in Python?\n",
+ " Body: I have two dictionaries in Python and I want to combine them into a single dictionary. What is the best way to do this efficiently in Python 3?\n",
+ "\n",
+ " Your Output (tags):\n",
+ " python, dictionary, merge, python-3.x\n",
+ " \"\"\"\n",
+ " return instruct;\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "def convert_to_conversation(sample):\n",
+ " conversation = [\n",
+ " {\n",
+ " \"role\": \"user\",\n",
+ " \"content\": [\n",
+ " {\"type\": \"text\", \"text\": instruction(sample['Input'])}\n",
+ " ],\n",
+ " },\n",
+ " {\n",
+ " \"role\": \"assistant\",\n",
+ " \"content\": [{\"type\": \"text\", \"text\": sample['Tags']}],\n",
+ " },\n",
+ " ]\n",
+ " return {\"messages\": conversation}\n",
+ "\n",
+ "\n",
+ "pass\n",
+ "\n",
+ "converted_dataset = []\n",
+ "for i in range(len(dataset)):\n",
+ " sample = {'Input':dataset['Input'][i],'Tags':clean_tags(dataset['Tags'][i])}\n",
+ " converted_dataset.append(convert_to_conversation(sample))\n",
+ "\n",
+ "\n",
+ "# converted_dataset = [convert_to_conversation(sample) for sample in dataset]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {
+ "id": "LjY75GoYUCB8"
+ },
+ "outputs": [],
+ "source": [
+ "alpaca_prompt = \"\"\"Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n",
+ "\n",
+ "### Instruction:\n",
+ "{}\n",
+ "\n",
+ "### Input:\n",
+ "{}\n",
+ "\n",
+ "### Response:\n",
+ "{}\"\"\"\n",
+ "\n",
+ "# instruction = \"\"\"\n",
+ "# ### Task:\n",
+ "# Given a Stack Exchange question (title and body), generate 3 to 7 relevant tags that best describe the topics, technologies, or concepts involved.\n",
+ "\n",
+ "# ### Input:\n",
+ "# {Input}\n",
+ "\n",
+ "# ### Response:\n",
+ "# {output}\n",
+ "# \"\"\"\n",
+ "\n",
+ "# instruction = f\"\"\"\n",
+ "# ### Task:\n",
+ "# Given a Stack Exchange question (title and body), generate 3 to 7 relevant tags that best describe the topics, technologies, or concepts involved.\n",
+ "\n",
+ "# ### Guidelines:\n",
+ "# - Tags must be lowercase.\n",
+ "# - Tags must be comma-separated (no extra spaces after commas).\n",
+ "# - Tags should be focused, concise, and relevant.\n",
+ "# - Only use existing or plausible Stack Exchange tags (no imaginary tags).\n",
+ "# - Do not invent new tags.\n",
+ "# - Do not repeat tags.\n",
+ "# - Always return at least 3 tags.\n",
+ "\n",
+ "# ### Input:\n",
+ "# {text}\n",
+ "\n",
+ "# ### Response:\n",
+ "# {tags}\n",
+ "# \"\"\"\n",
+ "\n",
+ "import ast\n",
+ "\n",
+ "def clean_tags(tag_string):\n",
+ " # Convert the string to a list\n",
+ " tag_list = ast.literal_eval(tag_string)\n",
+ " # Join the list into a comma-separated string\n",
+ " return ', '.join(tag_list)\n",
+ "\n",
+ "Task = \"Given a Stack Exchange question (title and body), generate 3 to 7 relevant tags that best describe the topics, technologies, or concepts involved.\"\n",
+ "\n",
+ "EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN\n",
+ "\n",
+ "def formatting_prompts_func(row):\n",
+ " Input = row[\"Input\"]\n",
+ " output = row[\"Tags\"]\n",
+ "\n",
+ " # instruction = f\"\"\"\n",
+ " # ### Task:\n",
+ " # Given a Stack Exchange question (title and body), generate 3 to 7 relevant tags that best describe the topics, technologies, or concepts involved.\n",
+ "\n",
+ " # ### Input:\n",
+ " # {Input}\n",
+ "\n",
+ " # ### Response:\n",
+ " # {output}\n",
+ " # \"\"\"\n",
+ "\n",
+ " text = alpaca_prompt.format(Task,Input,output) + EOS_TOKEN\n",
+ " return text\n",
+ "\n",
+ "import pandas as pd\n",
+ "dataset = pd.read_csv('all_combined_data.csv')\n",
+ "dataset['Tags'] = dataset['Tags'].apply(clean_tags)\n",
+ "\n",
+ "# Apply row-wise\n",
+ "dataset[\"text\"] = dataset.apply(formatting_prompts_func, axis=1)\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "\"Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\\n\\n### Instruction:\\nGiven a Stack Exchange question (title and body), generate 3 to 7 relevant tags that best describe the topics, technologies, or concepts involved.\\n\\n### Input:\\nTitle: What is the effective differencial effective of this circuit Body: I'm trying to work out, in general terms, the effective capacitance of this circuit (see diagram: http://i.stack.imgur.com/BS85b.png). \\n\\nWhat is the effective capacitance of this circuit and will the ...\\r\\n \\n\\n### Response:\\nelectronics<|end_of_text|>\""
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "dataset[\"text\"][0]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "idAEIeSQ3xdS"
+ },
+ "source": [
+ "\n",
+ "### Train the model\n",
+ "Now let's use Huggingface TRL's `SFTTrainer`! More docs here: [TRL SFT docs](https://huggingface.co/docs/trl/sft_trainer). We do 60 steps to speed things up, but you can set `num_train_epochs=1` for a full run, and turn off `max_steps=None`. We also support TRL's `DPOTrainer`!"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 49,
+ "referenced_widgets": [
+ "3bf71d4085424a89b9ccf8372c94d887",
+ "a4e3dfc277534401a831ca51f8a8c838",
+ "c8501d4f4a80481d90511872b84e1225",
+ "2772654a3eb0467cb3aba0b6e812a134",
+ "c0975cebc67743739b6c760c851c324f",
+ "a6e1e666100240be89148555d199073d",
+ "98138a6fc06644998373e4034f821ec3",
+ "52509ddf39fe49c7a24ea28828e10ef9",
+ "8147cbf8ef4b4b64a1420ad01bdf6ade",
+ "cf5d0dd9220641edb3a230e246d523f5",
+ "0bff2a1b0f724e16aad83b74dc82769e"
+ ]
+ },
+ "id": "95_Nn-89DhsL",
+ "outputId": "155b6aa2-9d5c-40f5-a2f2-6cbe58aa30df"
+ },
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Map (num_proc=2): 100%|ββββββββββ| 130122/130122 [01:26<00:00, 1507.42 examples/s]\n",
+ "Map (num_proc=2): 100%|ββββββββββ| 32531/32531 [00:21<00:00, 1524.63 examples/s]\n"
+ ]
+ }
+ ],
+ "source": [
+ "from datasets import Dataset, DatasetDict\n",
+ "from trl import SFTTrainer\n",
+ "from transformers import TrainingArguments\n",
+ "from sklearn.metrics import accuracy_score, hamming_loss\n",
+ "from unsloth import is_bfloat16_supported\n",
+ "from sklearn.model_selection import train_test_split\n",
+ "\n",
+ "# Function for sequential split\n",
+ "\n",
+ "\n",
+ "\n",
+ "# Split the dataset\n",
+ "train_df, test_df = train_test_split(dataset, test_size=0.2, random_state=42)\n",
+ "\n",
+ "# 2. Convert to Huggingface Dataset\n",
+ "train_dataset = Dataset.from_pandas(train_df)\n",
+ "eval_dataset = Dataset.from_pandas(test_df)\n",
+ "\n",
+ "# Tokenize function\n",
+ "def tokenize_function(example):\n",
+ " return tokenizer(\n",
+ " example[\"text\"], \n",
+ " padding=\"max_length\", # <-- important\n",
+ " truncation=True, # <-- important\n",
+ " max_length=max_seq_length,\n",
+ " )\n",
+ "\n",
+ "# Apply tokenization to datasets\n",
+ "train_dataset = train_dataset.map(tokenize_function, batched=True, num_proc=2, remove_columns=train_dataset.column_names)\n",
+ "eval_dataset = eval_dataset.map(tokenize_function, batched=True, num_proc=2, remove_columns=eval_dataset.column_names)\n",
+ "\n",
+ "\n",
+ "\n",
+ "# Compute metrics\n",
+ "def compute_metrics(eval_pred):\n",
+ " predictions, labels = eval_pred\n",
+ " if predictions.ndim > 1:\n",
+ " preds = predictions.argmax(axis=-1)\n",
+ " else:\n",
+ " preds = predictions\n",
+ " return {\n",
+ " \"accuracy\": accuracy_score(labels, preds),\n",
+ " \"hamming_loss\": hamming_loss(labels, preds)\n",
+ " }\n",
+ "\n",
+ "# Trainer\n",
+ "trainer = SFTTrainer(\n",
+ " model = model,\n",
+ " tokenizer = tokenizer,\n",
+ " train_dataset = train_dataset,\n",
+ " eval_dataset = eval_dataset,\n",
+ " dataset_text_field = \"text\",\n",
+ " max_seq_length = max_seq_length,\n",
+ " dataset_num_proc = 2,\n",
+ " packing = False,\n",
+ " args = TrainingArguments(\n",
+ " output_dir = \"outputs\",\n",
+ " per_device_train_batch_size = 2,\n",
+ " gradient_accumulation_steps = 4,\n",
+ " # evaluation_strategy = \"steps\",\n",
+ " eval_steps = 10,\n",
+ " warmup_steps = 5,\n",
+ " max_steps = 60,\n",
+ " learning_rate = 2e-4,\n",
+ " fp16 = not is_bfloat16_supported(),\n",
+ " bf16 = is_bfloat16_supported(),\n",
+ " logging_steps = 1,\n",
+ " optim = \"adamw_8bit\",\n",
+ " weight_decay = 0.01,\n",
+ " lr_scheduler_type = \"linear\",\n",
+ " seed = 3407,\n",
+ " report_to = \"none\",\n",
+ " remove_unused_columns=False, # This line is important\n",
+ "\n",
+ " ),\n",
+ " compute_metrics = compute_metrics,\n",
+ ")\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {
+ "cellView": "form",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "2ejIt2xSNKKp",
+ "outputId": "dee483c8-1f49-4a9e-a97f-80175ec36ffe"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "GPU = NVIDIA GeForce RTX 3060. Max memory = 11.633 GB.\n",
+ "5.748 GB of memory reserved.\n"
+ ]
+ }
+ ],
+ "source": [
+ "# @title Show current memory stats\n",
+ "gpu_stats = torch.cuda.get_device_properties(0)\n",
+ "start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)\n",
+ "max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)\n",
+ "print(f\"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.\")\n",
+ "print(f\"{start_gpu_memory} GB of memory reserved.\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 1000
+ },
+ "id": "yqxqAZ7KJ4oL",
+ "outputId": "8db828a9-45b5-4b95-b61b-539e5f4326f6"
+ },
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "==((====))== Unsloth - 2x faster free finetuning | Num GPUs used = 1\n",
+ " \\\\ /| Num examples = 130,122 | Num Epochs = 1 | Total steps = 60\n",
+ "O^O/ \\_/ \\ Batch size per device = 2 | Gradient accumulation steps = 4\n",
+ "\\ / Data Parallel GPUs = 1 | Total batch size (2 x 4 x 1) = 8\n",
+ " \"-____-\" Trainable parameters = 41,943,040/8,000,000,000 (0.52% trained)\n"
+ ]
+ },
+ {
+ "ename": "OutOfMemoryError",
+ "evalue": "CUDA out of memory. Tried to allocate 3.94 GiB. GPU 0 has a total capacity of 11.63 GiB of which 1.40 GiB is free. Including non-PyTorch memory, this process has 10.14 GiB memory in use. Of the allocated memory 9.94 GiB is allocated by PyTorch, and 47.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)",
+ "output_type": "error",
+ "traceback": [
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+ "\u001b[0;31mOutOfMemoryError\u001b[0m Traceback (most recent call last)",
+ "Cell \u001b[0;32mIn[7], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m trainer_stats \u001b[38;5;241m=\u001b[39m \u001b[43mtrainer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtrain\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
+ "File \u001b[0;32m~/.pyenv/versions/unsloth/lib/python3.10/site-packages/transformers/trainer.py:2245\u001b[0m, in \u001b[0;36mTrainer.train\u001b[0;34m(self, resume_from_checkpoint, trial, ignore_keys_for_eval, **kwargs)\u001b[0m\n\u001b[1;32m 2243\u001b[0m hf_hub_utils\u001b[38;5;241m.\u001b[39menable_progress_bars()\n\u001b[1;32m 2244\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 2245\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43minner_training_loop\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2246\u001b[0m \u001b[43m \u001b[49m\u001b[43margs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2247\u001b[0m \u001b[43m \u001b[49m\u001b[43mresume_from_checkpoint\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mresume_from_checkpoint\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2248\u001b[0m \u001b[43m \u001b[49m\u001b[43mtrial\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtrial\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2249\u001b[0m \u001b[43m \u001b[49m\u001b[43mignore_keys_for_eval\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mignore_keys_for_eval\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2250\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n",
+ "File \u001b[0;32m:314\u001b[0m, in \u001b[0;36m_fast_inner_training_loop\u001b[0;34m(self, batch_size, args, resume_from_checkpoint, trial, ignore_keys_for_eval)\u001b[0m\n",
+ "File \u001b[0;32m:31\u001b[0m, in \u001b[0;36m_unsloth_training_step\u001b[0;34m(self, model, inputs, num_items_in_batch)\u001b[0m\n",
+ "File \u001b[0;32m~/#/SEQuestionClassifier/unsloth_compiled_cache/UnslothSFTTrainer.py:748\u001b[0m, in \u001b[0;36m_UnslothSFTTrainer.compute_loss\u001b[0;34m(self, model, inputs, return_outputs, num_items_in_batch)\u001b[0m\n\u001b[1;32m 747\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21mcompute_loss\u001b[39m(\u001b[38;5;28mself\u001b[39m, model, inputs, return_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mFalse\u001b[39;00m, num_items_in_batch \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m):\n\u001b[0;32m--> 748\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcompute_loss\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 749\u001b[0m \u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 750\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 751\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_outputs\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mreturn_outputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 752\u001b[0m \u001b[43m \u001b[49m\u001b[43mnum_items_in_batch\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mnum_items_in_batch\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 753\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 754\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m outputs\n",
+ "File \u001b[0;32m~/.pyenv/versions/unsloth/lib/python3.10/site-packages/unsloth/models/_utils.py:1029\u001b[0m, in \u001b[0;36m_unsloth_pre_compute_loss\u001b[0;34m(self, model, inputs, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1023\u001b[0m logger\u001b[38;5;241m.\u001b[39mwarning_once(\n\u001b[1;32m 1024\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUnsloth: Not an error, but \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m does not accept `num_items_in_batch`.\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\\\n\u001b[1;32m 1025\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUsing gradient accumulation will be very slightly less accurate.\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\\\n\u001b[1;32m 1026\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mRead more on gradient accumulation issues here: https://unsloth.ai/blog/gradient\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 1027\u001b[0m )\n\u001b[1;32m 1028\u001b[0m \u001b[38;5;28;01mpass\u001b[39;00m\n\u001b[0;32m-> 1029\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_old_compute_loss\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1030\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m outputs\n",
+ "File \u001b[0;32m~/.pyenv/versions/unsloth/lib/python3.10/site-packages/transformers/trainer.py:3801\u001b[0m, in \u001b[0;36mTrainer.compute_loss\u001b[0;34m(self, model, inputs, return_outputs, num_items_in_batch)\u001b[0m\n\u001b[1;32m 3799\u001b[0m loss_kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mnum_items_in_batch\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m num_items_in_batch\n\u001b[1;32m 3800\u001b[0m inputs \u001b[38;5;241m=\u001b[39m {\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39minputs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mloss_kwargs}\n\u001b[0;32m-> 3801\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[43mmodel\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43minputs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3802\u001b[0m \u001b[38;5;66;03m# Save past state if it exists\u001b[39;00m\n\u001b[1;32m 3803\u001b[0m \u001b[38;5;66;03m# TODO: this needs to be fixed and made cleaner later.\u001b[39;00m\n\u001b[1;32m 3804\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39margs\u001b[38;5;241m.\u001b[39mpast_index \u001b[38;5;241m>\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m0\u001b[39m:\n",
+ "File \u001b[0;32m~/.pyenv/versions/unsloth/lib/python3.10/site-packages/torch/nn/modules/module.py:1751\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1749\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1750\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1751\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
+ "File \u001b[0;32m~/.pyenv/versions/unsloth/lib/python3.10/site-packages/torch/nn/modules/module.py:1762\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1757\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1758\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1759\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1760\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1761\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1762\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1764\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m 1765\u001b[0m called_always_called_hooks \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mset\u001b[39m()\n",
+ "File \u001b[0;32m~/.pyenv/versions/unsloth/lib/python3.10/site-packages/accelerate/utils/operations.py:814\u001b[0m, in \u001b[0;36mconvert_outputs_to_fp32..forward\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 813\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21mforward\u001b[39m(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[0;32m--> 814\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mmodel_forward\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
+ "File \u001b[0;32m~/.pyenv/versions/unsloth/lib/python3.10/site-packages/accelerate/utils/operations.py:802\u001b[0m, in \u001b[0;36mConvertOutputsToFp32.__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 801\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21m__call__\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[0;32m--> 802\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mconvert_to_fp32\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmodel_forward\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n",
+ "File \u001b[0;32m~/.pyenv/versions/unsloth/lib/python3.10/site-packages/accelerate/utils/operations.py:781\u001b[0m, in \u001b[0;36mconvert_to_fp32\u001b[0;34m(tensor)\u001b[0m\n\u001b[1;32m 775\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21m_is_fp16_bf16_tensor\u001b[39m(tensor):\n\u001b[1;32m 776\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m (is_torch_tensor(tensor) \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mhasattr\u001b[39m(tensor, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdtype\u001b[39m\u001b[38;5;124m\"\u001b[39m)) \u001b[38;5;129;01mand\u001b[39;00m tensor\u001b[38;5;241m.\u001b[39mdtype \u001b[38;5;129;01min\u001b[39;00m (\n\u001b[1;32m 777\u001b[0m torch\u001b[38;5;241m.\u001b[39mfloat16,\n\u001b[1;32m 778\u001b[0m torch\u001b[38;5;241m.\u001b[39mbfloat16,\n\u001b[1;32m 779\u001b[0m )\n\u001b[0;32m--> 781\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mrecursively_apply\u001b[49m\u001b[43m(\u001b[49m\u001b[43m_convert_to_fp32\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtensor\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtest_type\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_is_fp16_bf16_tensor\u001b[49m\u001b[43m)\u001b[49m\n",
+ "File \u001b[0;32m~/.pyenv/versions/unsloth/lib/python3.10/site-packages/accelerate/utils/operations.py:118\u001b[0m, in \u001b[0;36mrecursively_apply\u001b[0;34m(func, data, test_type, error_on_other_type, *args, **kwargs)\u001b[0m\n\u001b[1;32m 107\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m honor_type(\n\u001b[1;32m 108\u001b[0m data,\n\u001b[1;32m 109\u001b[0m (\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 114\u001b[0m ),\n\u001b[1;32m 115\u001b[0m )\n\u001b[1;32m 116\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(data, Mapping):\n\u001b[1;32m 117\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mtype\u001b[39m(data)(\n\u001b[0;32m--> 118\u001b[0m {\n\u001b[1;32m 119\u001b[0m k: recursively_apply(\n\u001b[1;32m 120\u001b[0m func, v, \u001b[38;5;241m*\u001b[39margs, test_type\u001b[38;5;241m=\u001b[39mtest_type, error_on_other_type\u001b[38;5;241m=\u001b[39merror_on_other_type, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs\n\u001b[1;32m 121\u001b[0m )\n\u001b[1;32m 122\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m k, v \u001b[38;5;129;01min\u001b[39;00m data\u001b[38;5;241m.\u001b[39mitems()\n\u001b[1;32m 123\u001b[0m }\n\u001b[1;32m 124\u001b[0m )\n\u001b[1;32m 125\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m test_type(data):\n\u001b[1;32m 126\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m func(data, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n",
+ "File \u001b[0;32m~/.pyenv/versions/unsloth/lib/python3.10/site-packages/accelerate/utils/operations.py:119\u001b[0m, in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 107\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m honor_type(\n\u001b[1;32m 108\u001b[0m data,\n\u001b[1;32m 109\u001b[0m (\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 114\u001b[0m ),\n\u001b[1;32m 115\u001b[0m )\n\u001b[1;32m 116\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(data, Mapping):\n\u001b[1;32m 117\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mtype\u001b[39m(data)(\n\u001b[1;32m 118\u001b[0m {\n\u001b[0;32m--> 119\u001b[0m k: \u001b[43mrecursively_apply\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 120\u001b[0m \u001b[43m \u001b[49m\u001b[43mfunc\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mv\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtest_type\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtest_type\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43merror_on_other_type\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43merror_on_other_type\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\n\u001b[1;32m 121\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 122\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m k, v \u001b[38;5;129;01min\u001b[39;00m data\u001b[38;5;241m.\u001b[39mitems()\n\u001b[1;32m 123\u001b[0m }\n\u001b[1;32m 124\u001b[0m )\n\u001b[1;32m 125\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m test_type(data):\n\u001b[1;32m 126\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m func(data, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n",
+ "File \u001b[0;32m~/.pyenv/versions/unsloth/lib/python3.10/site-packages/accelerate/utils/operations.py:126\u001b[0m, in \u001b[0;36mrecursively_apply\u001b[0;34m(func, data, test_type, error_on_other_type, *args, **kwargs)\u001b[0m\n\u001b[1;32m 117\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mtype\u001b[39m(data)(\n\u001b[1;32m 118\u001b[0m {\n\u001b[1;32m 119\u001b[0m k: recursively_apply(\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 123\u001b[0m }\n\u001b[1;32m 124\u001b[0m )\n\u001b[1;32m 125\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m test_type(data):\n\u001b[0;32m--> 126\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdata\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 127\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m error_on_other_type:\n\u001b[1;32m 128\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(\n\u001b[1;32m 129\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUnsupported types (\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mtype\u001b[39m(data)\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m) passed to `\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mfunc\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m`. Only nested list/tuple/dicts of \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 130\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mobjects that are valid for `\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtest_type\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m` should be passed.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 131\u001b[0m )\n",
+ "File \u001b[0;32m~/.pyenv/versions/unsloth/lib/python3.10/site-packages/accelerate/utils/operations.py:773\u001b[0m, in \u001b[0;36mconvert_to_fp32.._convert_to_fp32\u001b[0;34m(tensor)\u001b[0m\n\u001b[1;32m 772\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21m_convert_to_fp32\u001b[39m(tensor):\n\u001b[0;32m--> 773\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mtensor\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfloat\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
+ "\u001b[0;31mOutOfMemoryError\u001b[0m: CUDA out of memory. Tried to allocate 3.94 GiB. GPU 0 has a total capacity of 11.63 GiB of which 1.40 GiB is free. Including non-PyTorch memory, this process has 10.14 GiB memory in use. Of the allocated memory 9.94 GiB is allocated by PyTorch, and 47.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
+ ]
+ }
+ ],
+ "source": [
+ "trainer_stats = trainer.train()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 34,
+ "metadata": {},
+ "outputs": [
+ {
+ "ename": "ValueError",
+ "evalue": "You should supply an encoding or a list of encodings to this method that includes input_ids, but you provided ['Input', 'Tags', 'text', '__index_level_0__']",
+ "output_type": "error",
+ "traceback": [
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+ "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
+ "Cell \u001b[0;32mIn[34], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mtrainer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mevaluate\u001b[49m\u001b[43m(\u001b[49m\u001b[43meval_dataset\u001b[49m\u001b[43m)\u001b[49m\n",
+ "File \u001b[0;32m~/.pyenv/versions/unsloth/lib/python3.10/site-packages/transformers/trainer.py:4154\u001b[0m, in \u001b[0;36mTrainer.evaluate\u001b[0;34m(self, eval_dataset, ignore_keys, metric_key_prefix)\u001b[0m\n\u001b[1;32m 4151\u001b[0m start_time \u001b[38;5;241m=\u001b[39m time\u001b[38;5;241m.\u001b[39mtime()\n\u001b[1;32m 4153\u001b[0m eval_loop \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mprediction_loop \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39margs\u001b[38;5;241m.\u001b[39muse_legacy_prediction_loop \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mevaluation_loop\n\u001b[0;32m-> 4154\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[43meval_loop\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 4155\u001b[0m \u001b[43m \u001b[49m\u001b[43meval_dataloader\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4156\u001b[0m \u001b[43m \u001b[49m\u001b[43mdescription\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mEvaluation\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4157\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;66;43;03m# No point gathering the predictions if there are no metrics, otherwise we defer to\u001b[39;49;00m\n\u001b[1;32m 4158\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;66;43;03m# self.args.prediction_loss_only\u001b[39;49;00m\n\u001b[1;32m 4159\u001b[0m \u001b[43m \u001b[49m\u001b[43mprediction_loss_only\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcompute_metrics\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mis\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01melse\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 4160\u001b[0m \u001b[43m \u001b[49m\u001b[43mignore_keys\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mignore_keys\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4161\u001b[0m \u001b[43m \u001b[49m\u001b[43mmetric_key_prefix\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmetric_key_prefix\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4162\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 4164\u001b[0m total_batch_size \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39margs\u001b[38;5;241m.\u001b[39meval_batch_size \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39margs\u001b[38;5;241m.\u001b[39mworld_size\n\u001b[1;32m 4165\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mmetric_key_prefix\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m_jit_compilation_time\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01min\u001b[39;00m output\u001b[38;5;241m.\u001b[39mmetrics:\n",
+ "File \u001b[0;32m~/.pyenv/versions/unsloth/lib/python3.10/site-packages/transformers/trainer.py:4338\u001b[0m, in \u001b[0;36mTrainer.evaluation_loop\u001b[0;34m(self, dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix)\u001b[0m\n\u001b[1;32m 4335\u001b[0m observed_num_examples \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m0\u001b[39m\n\u001b[1;32m 4337\u001b[0m \u001b[38;5;66;03m# Main evaluation loop\u001b[39;00m\n\u001b[0;32m-> 4338\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m step, inputs \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(dataloader):\n\u001b[1;32m 4339\u001b[0m \u001b[38;5;66;03m# Update the observed num examples\u001b[39;00m\n\u001b[1;32m 4340\u001b[0m observed_batch_size \u001b[38;5;241m=\u001b[39m find_batch_size(inputs)\n\u001b[1;32m 4341\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m observed_batch_size \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
+ "File \u001b[0;32m~/.pyenv/versions/unsloth/lib/python3.10/site-packages/accelerate/data_loader.py:566\u001b[0m, in \u001b[0;36mDataLoaderShard.__iter__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 564\u001b[0m \u001b[38;5;66;03m# We iterate one batch ahead to check when we are at the end\u001b[39;00m\n\u001b[1;32m 565\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 566\u001b[0m current_batch \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mnext\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mdataloader_iter\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 567\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mStopIteration\u001b[39;00m:\n\u001b[1;32m 568\u001b[0m \u001b[38;5;28;01myield\u001b[39;00m\n",
+ "File \u001b[0;32m~/.pyenv/versions/unsloth/lib/python3.10/site-packages/torch/utils/data/dataloader.py:733\u001b[0m, in \u001b[0;36m_BaseDataLoaderIter.__next__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 730\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_sampler_iter \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 731\u001b[0m \u001b[38;5;66;03m# TODO(https://github.com/pytorch/pytorch/issues/76750)\u001b[39;00m\n\u001b[1;32m 732\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_reset() \u001b[38;5;66;03m# type: ignore[call-arg]\u001b[39;00m\n\u001b[0;32m--> 733\u001b[0m data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_next_data\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 734\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_num_yielded \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[1;32m 735\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m (\n\u001b[1;32m 736\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_dataset_kind \u001b[38;5;241m==\u001b[39m _DatasetKind\u001b[38;5;241m.\u001b[39mIterable\n\u001b[1;32m 737\u001b[0m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_IterableDataset_len_called \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m 738\u001b[0m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_num_yielded \u001b[38;5;241m>\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_IterableDataset_len_called\n\u001b[1;32m 739\u001b[0m ):\n",
+ "File \u001b[0;32m~/.pyenv/versions/unsloth/lib/python3.10/site-packages/torch/utils/data/dataloader.py:789\u001b[0m, in \u001b[0;36m_SingleProcessDataLoaderIter._next_data\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 787\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21m_next_data\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n\u001b[1;32m 788\u001b[0m index \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_next_index() \u001b[38;5;66;03m# may raise StopIteration\u001b[39;00m\n\u001b[0;32m--> 789\u001b[0m data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_dataset_fetcher\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfetch\u001b[49m\u001b[43m(\u001b[49m\u001b[43mindex\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;66;03m# may raise StopIteration\u001b[39;00m\n\u001b[1;32m 790\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_pin_memory:\n\u001b[1;32m 791\u001b[0m data \u001b[38;5;241m=\u001b[39m _utils\u001b[38;5;241m.\u001b[39mpin_memory\u001b[38;5;241m.\u001b[39mpin_memory(data, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_pin_memory_device)\n",
+ "File \u001b[0;32m~/.pyenv/versions/unsloth/lib/python3.10/site-packages/torch/utils/data/_utils/fetch.py:55\u001b[0m, in \u001b[0;36m_MapDatasetFetcher.fetch\u001b[0;34m(self, possibly_batched_index)\u001b[0m\n\u001b[1;32m 53\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 54\u001b[0m data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdataset[possibly_batched_index]\n\u001b[0;32m---> 55\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcollate_fn\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdata\u001b[49m\u001b[43m)\u001b[49m\n",
+ "File \u001b[0;32m~/.pyenv/versions/unsloth/lib/python3.10/site-packages/transformers/data/data_collator.py:46\u001b[0m, in \u001b[0;36mDataCollatorMixin.__call__\u001b[0;34m(self, features, return_tensors)\u001b[0m\n\u001b[1;32m 44\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtf_call(features)\n\u001b[1;32m 45\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m return_tensors \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mpt\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n\u001b[0;32m---> 46\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtorch_call\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfeatures\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 47\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m return_tensors \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mnp\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n\u001b[1;32m 48\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mnumpy_call(features)\n",
+ "File \u001b[0;32m~/.pyenv/versions/unsloth/lib/python3.10/site-packages/transformers/data/data_collator.py:1013\u001b[0m, in \u001b[0;36mDataCollatorForLanguageModeling.torch_call\u001b[0;34m(self, examples)\u001b[0m\n\u001b[1;32m 1010\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcreate_rng()\n\u001b[1;32m 1012\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(examples[\u001b[38;5;241m0\u001b[39m], Mapping):\n\u001b[0;32m-> 1013\u001b[0m batch \u001b[38;5;241m=\u001b[39m \u001b[43mpad_without_fast_tokenizer_warning\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1014\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtokenizer\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mexamples\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mreturn_tensors\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mpt\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpad_to_multiple_of\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpad_to_multiple_of\u001b[49m\n\u001b[1;32m 1015\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1016\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 1017\u001b[0m batch \u001b[38;5;241m=\u001b[39m {\n\u001b[1;32m 1018\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124minput_ids\u001b[39m\u001b[38;5;124m\"\u001b[39m: _torch_collate_batch(examples, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtokenizer, pad_to_multiple_of\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpad_to_multiple_of)\n\u001b[1;32m 1019\u001b[0m }\n",
+ "File \u001b[0;32m~/.pyenv/versions/unsloth/lib/python3.10/site-packages/transformers/data/data_collator.py:67\u001b[0m, in \u001b[0;36mpad_without_fast_tokenizer_warning\u001b[0;34m(tokenizer, *pad_args, **pad_kwargs)\u001b[0m\n\u001b[1;32m 64\u001b[0m tokenizer\u001b[38;5;241m.\u001b[39mdeprecation_warnings[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mAsking-to-pad-a-fast-tokenizer\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[1;32m 66\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m---> 67\u001b[0m padded \u001b[38;5;241m=\u001b[39m \u001b[43mtokenizer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpad\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mpad_args\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mpad_kwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 68\u001b[0m \u001b[38;5;28;01mfinally\u001b[39;00m:\n\u001b[1;32m 69\u001b[0m \u001b[38;5;66;03m# Restore the state of the warning.\u001b[39;00m\n\u001b[1;32m 70\u001b[0m tokenizer\u001b[38;5;241m.\u001b[39mdeprecation_warnings[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mAsking-to-pad-a-fast-tokenizer\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m warning_state\n",
+ "File \u001b[0;32m~/.pyenv/versions/unsloth/lib/python3.10/site-packages/transformers/tokenization_utils_base.py:3324\u001b[0m, in \u001b[0;36mPreTrainedTokenizerBase.pad\u001b[0;34m(self, encoded_inputs, padding, max_length, pad_to_multiple_of, padding_side, return_attention_mask, return_tensors, verbose)\u001b[0m\n\u001b[1;32m 3322\u001b[0m \u001b[38;5;66;03m# The model's main input name, usually `input_ids`, has been passed for padding\u001b[39;00m\n\u001b[1;32m 3323\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmodel_input_names[\u001b[38;5;241m0\u001b[39m] \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m encoded_inputs:\n\u001b[0;32m-> 3324\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m 3325\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mYou should supply an encoding or a list of encodings to this method \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 3326\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mthat includes \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmodel_input_names[\u001b[38;5;241m0\u001b[39m]\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m, but you provided \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mlist\u001b[39m(encoded_inputs\u001b[38;5;241m.\u001b[39mkeys())\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 3327\u001b[0m )\n\u001b[1;32m 3329\u001b[0m required_input \u001b[38;5;241m=\u001b[39m encoded_inputs[\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmodel_input_names[\u001b[38;5;241m0\u001b[39m]]\n\u001b[1;32m 3331\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m required_input \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mor\u001b[39;00m (\u001b[38;5;28misinstance\u001b[39m(required_input, Sized) \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(required_input) \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m0\u001b[39m):\n",
+ "\u001b[0;31mValueError\u001b[0m: You should supply an encoding or a list of encodings to this method that includes input_ids, but you provided ['Input', 'Tags', 'text', '__index_level_0__']"
+ ]
+ }
+ ],
+ "source": [
+ "trainer.evaluate(eval_dataset)\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "cellView": "form",
+ "id": "pCqnaKmlO1U9"
+ },
+ "outputs": [],
+ "source": [
+ "# @title Show final memory and time stats\n",
+ "used_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)\n",
+ "used_memory_for_lora = round(used_memory - start_gpu_memory, 3)\n",
+ "used_percentage = round(used_memory / max_memory * 100, 3)\n",
+ "lora_percentage = round(used_memory_for_lora / max_memory * 100, 3)\n",
+ "print(f\"{trainer_stats.metrics['train_runtime']} seconds used for training.\")\n",
+ "print(\n",
+ " f\"{round(trainer_stats.metrics['train_runtime']/60, 2)} minutes used for training.\"\n",
+ ")\n",
+ "print(f\"Peak reserved memory = {used_memory} GB.\")\n",
+ "print(f\"Peak reserved memory for training = {used_memory_for_lora} GB.\")\n",
+ "print(f\"Peak reserved memory % of max memory = {used_percentage} %.\")\n",
+ "print(f\"Peak reserved memory for training % of max memory = {lora_percentage} %.\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "ekOmTR1hSNcr"
+ },
+ "source": [
+ "\n",
+ "### Inference\n",
+ "Let's run the model! You can change the instruction and input - leave the output blank!\n",
+ "\n",
+ "**[NEW] Try 2x faster inference in a free Colab for Llama-3.1 8b Instruct [here](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Unsloth_Studio.ipynb)**"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 36,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "\"\\n ### Task:\\n Given a Stack Exchange question (title and body), generate 3 to 7 relevant tags that best describe the topics, technologies, or concepts involved.\\n\\n ### Guidelines:\\n - Tags must be lowercase.\\n - Tags must be comma-separated (no extra spaces after commas).\\n - Tags should be focused, concise, and relevant.\\n - Only use existing or plausible Stack Exchange tags (no imaginary tags).\\n - Do not invent new tags.\\n - Do not repeat tags.\\n - Always return at least 3 tags.\\n\\n ### Input:\\n Title: What kind of battery does WALLβ’E run on? Body: WALLβ’E has been working for several hundred years and one of the keys to this is his ability to use parts of broken robots to replace his own broken parts.\\n\\nNow this won't work with the electrical ...\\r\\n \\n\\n ### Response:\\n ['scifi']\\n <|end_of_text|>\""
+ ]
+ },
+ "execution_count": 36,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "train_dataset[0][\"text\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 37,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "text = \"\"\"\n",
+ "\\n ### Task:\\n Given a Stack Exchange question (title and body), generate 3 to 7 relevant tags that best describe the topics, technologies, or concepts involved.\\n\\n ### Guidelines:\\n - Tags must be lowercase.\\n - Tags must be comma-separated (no extra spaces after commas).\\n - Tags should be focused, concise, and relevant.\\n - Only use existing or plausible Stack Exchange tags (no imaginary tags).\\n - Do not invent new tags.\\n - Do not repeat tags.\\n - Always return at least 3 tags.\\n\\n ### Input:\\n Title: What kind of battery does WALLβ’E run on? Body: WALLβ’E has been working for several hundred years and one of the keys to this is his ability to use parts of broken robots to replace his own broken parts.\\n\\nNow this won't work with the electrical ...\\r\\n \\n\\n ### Response:\\n <|end_of_text|>\n",
+ "\"\"\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 42,
+ "metadata": {
+ "id": "kR3gIAX-SM2q"
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[\"<|begin_of_text|>### Task:\\n Given a Stack Exchange question (title and body), generate 3 to 7 relevant tags that best describe the topics, technologies, or concepts involved.\\n\\n ### Guidelines:\\n - Tags must be lowercase.\\n - Tags must be comma-separated (no extra spaces after commas).\\n - Tags should be focused, concise, and relevant.\\n - Only use existing or plausible Stack Exchange tags (no imaginary tags).\\n - Do not invent new tags.\\n - Do not repeat tags.\\n - Always return at least 3 tags.\\n\\n ### Input:\\n Title: What kind of battery does WALLβ’E run on? Body: WALLβ’E has been working for several hundred years and one of the keys to this is his ability to use parts of broken robots to replace his own broken parts.\\n\\nNow this won't work with the electrical...\\r\\n \\n\\n ### Response:\\n Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return\"]"
+ ]
+ },
+ "execution_count": 42,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# alpaca_prompt = Copied from above\n",
+ "FastLanguageModel.for_inference(model) # Enable native 2x faster inference\n",
+ "inputs = tokenizer(\n",
+ "[\n",
+ " text,\n",
+ "], return_tensors = \"pt\").to(\"cuda\")\n",
+ "\n",
+ "outputs = model.generate(**inputs, max_new_tokens = 64, use_cache = True)\n",
+ "tokenizer.batch_decode(outputs)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "CrSvZObor0lY"
+ },
+ "source": [
+ " You can also use a `TextStreamer` for continuous inference - so you can see the generation token by token, instead of waiting the whole time!"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 40,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "text = \"### Task:\\n Given a Stack Exchange question (title and body), generate 3 to 7 relevant tags that best describe the topics, technologies, or concepts involved.\\n\\n ### Guidelines:\\n - Tags must be lowercase.\\n - Tags must be comma-separated (no extra spaces after commas).\\n - Tags should be focused, concise, and relevant.\\n - Only use existing or plausible Stack Exchange tags (no imaginary tags).\\n - Do not invent new tags.\\n - Do not repeat tags.\\n - Always return at least 3 tags.\\n\\n ### Input:\\n Title: What kind of battery does WALLβ’E run on? Body: WALLβ’E has been working for several hundred years and one of the keys to this is his ability to use parts of broken robots to replace his own broken parts.\\n\\nNow this won't work with the electrical...\\r\\n \\n\\n ### Response:\\n\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 39,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "'Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\\n\\n### Instruction:\\nContinue the fibonnaci sequence.\\n\\n### Input:\\n1, 1, 2, 3, 5, 8\\n\\n### Response:\\n'"
+ ]
+ },
+ "execution_count": 39,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "alpaca_prompt.format(\n",
+ " \"Continue the fibonnaci sequence.\", # instruction\n",
+ " \"1, 1, 2, 3, 5, 8\", # input\n",
+ " \"\", # output - leave this blank for generation!\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 41,
+ "metadata": {
+ "id": "e2pEuRb1r2Vg"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "<|begin_of_text|>### Task:\n",
+ " Given a Stack Exchange question (title and body), generate 3 to 7 relevant tags that best describe the topics, technologies, or concepts involved.\n",
+ "\n",
+ " ### Guidelines:\n",
+ " - Tags must be lowercase.\n",
+ " - Tags must be comma-separated (no extra spaces after commas).\n",
+ " - Tags should be focused, concise, and relevant.\n",
+ " - Only use existing or plausible Stack Exchange tags (no imaginary tags).\n",
+ " - Do not invent new tags.\n",
+ " - Do not repeat tags.\n",
+ " - Always return at least 3 tags.\n",
+ "\n",
+ " ### Input:\n",
+ " Title: What kind of battery does WALLβ’E run on? Body: WALLβ’E has been working for several hundred years and one of the keys to this is his ability to use parts of broken robots to replace his own broken parts.\n",
+ "\n",
+ "Now this won't work with the electrical...\n",
+ " \n",
+ "\n",
+ " ### Response:\n",
+ " Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return Return\n"
+ ]
+ }
+ ],
+ "source": [
+ "# alpaca_prompt = Copied from above\n",
+ "FastLanguageModel.for_inference(model) # Enable native 2x faster inference\n",
+ "inputs = tokenizer(\n",
+ "[\n",
+ " text,\n",
+ "], return_tensors = \"pt\").to(\"cuda\")\n",
+ "\n",
+ "from transformers import TextStreamer\n",
+ "text_streamer = TextStreamer(tokenizer)\n",
+ "_ = model.generate(**inputs, streamer = text_streamer, max_new_tokens = 128)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "uMuVrWbjAzhc"
+ },
+ "source": [
+ "\n",
+ "### Saving, loading finetuned models\n",
+ "To save the final model as LoRA adapters, either use Huggingface's `push_to_hub` for an online save or `save_pretrained` for a local save.\n",
+ "\n",
+ "**[NOTE]** This ONLY saves the LoRA adapters, and not the full model. To save to 16bit or GGUF, scroll down!"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "upcOlWe7A1vc"
+ },
+ "outputs": [],
+ "source": [
+ "model.save_pretrained(\"lora_model\") # Local saving\n",
+ "tokenizer.save_pretrained(\"lora_model\")\n",
+ "# model.push_to_hub(\"your_name/lora_model\", token = \"...\") # Online saving\n",
+ "# tokenizer.push_to_hub(\"your_name/lora_model\", token = \"...\") # Online saving"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "AEEcJ4qfC7Lp"
+ },
+ "source": [
+ "Now if you want to load the LoRA adapters we just saved for inference, set `False` to `True`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "MKX_XKs_BNZR"
+ },
+ "outputs": [],
+ "source": [
+ "if False:\n",
+ " from unsloth import FastLanguageModel\n",
+ " model, tokenizer = FastLanguageModel.from_pretrained(\n",
+ " model_name = \"lora_model\", # YOUR MODEL YOU USED FOR TRAINING\n",
+ " max_seq_length = max_seq_length,\n",
+ " dtype = dtype,\n",
+ " load_in_4bit = load_in_4bit,\n",
+ " )\n",
+ " FastLanguageModel.for_inference(model) # Enable native 2x faster inference\n",
+ "\n",
+ "# alpaca_prompt = You MUST copy from above!\n",
+ "\n",
+ "inputs = tokenizer(\n",
+ "[\n",
+ " alpaca_prompt.format(\n",
+ " \"What is a famous tall tower in Paris?\", # instruction\n",
+ " \"\", # input\n",
+ " \"\", # output - leave this blank for generation!\n",
+ " )\n",
+ "], return_tensors = \"pt\").to(\"cuda\")\n",
+ "\n",
+ "from transformers import TextStreamer\n",
+ "text_streamer = TextStreamer(tokenizer)\n",
+ "_ = model.generate(**inputs, streamer = text_streamer, max_new_tokens = 128)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "QQMjaNrjsU5_"
+ },
+ "source": [
+ "You can also use Hugging Face's `AutoModelForPeftCausalLM`. Only use this if you do not have `unsloth` installed. It can be hopelessly slow, since `4bit` model downloading is not supported, and Unsloth's **inference is 2x faster**."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "yFfaXG0WsQuE"
+ },
+ "outputs": [],
+ "source": [
+ "if False:\n",
+ " # I highly do NOT suggest - use Unsloth if possible\n",
+ " from peft import AutoPeftModelForCausalLM\n",
+ " from transformers import AutoTokenizer\n",
+ " model = AutoPeftModelForCausalLM.from_pretrained(\n",
+ " \"lora_model\", # YOUR MODEL YOU USED FOR TRAINING\n",
+ " load_in_4bit = load_in_4bit,\n",
+ " )\n",
+ " tokenizer = AutoTokenizer.from_pretrained(\"lora_model\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "f422JgM9sdVT"
+ },
+ "source": [
+ "### Saving to float16 for VLLM\n",
+ "\n",
+ "We also support saving to `float16` directly. Select `merged_16bit` for float16 or `merged_4bit` for int4. We also allow `lora` adapters as a fallback. Use `push_to_hub_merged` to upload to your Hugging Face account! You can go to https://huggingface.co/settings/tokens for your personal tokens."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "iHjt_SMYsd3P"
+ },
+ "outputs": [],
+ "source": [
+ "# Merge to 16bit\n",
+ "if False: model.save_pretrained_merged(\"model\", tokenizer, save_method = \"merged_16bit\",)\n",
+ "if False: model.push_to_hub_merged(\"hf/model\", tokenizer, save_method = \"merged_16bit\", token = \"\")\n",
+ "\n",
+ "# Merge to 4bit\n",
+ "if False: model.save_pretrained_merged(\"model\", tokenizer, save_method = \"merged_4bit\",)\n",
+ "if False: model.push_to_hub_merged(\"hf/model\", tokenizer, save_method = \"merged_4bit\", token = \"\")\n",
+ "\n",
+ "# Just LoRA adapters\n",
+ "if False: model.save_pretrained_merged(\"model\", tokenizer, save_method = \"lora\",)\n",
+ "if False: model.push_to_hub_merged(\"hf/model\", tokenizer, save_method = \"lora\", token = \"\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "TCv4vXHd61i7"
+ },
+ "source": [
+ "### GGUF / llama.cpp Conversion\n",
+ "To save to `GGUF` / `llama.cpp`, we support it natively now! We clone `llama.cpp` and we default save it to `q8_0`. We allow all methods like `q4_k_m`. Use `save_pretrained_gguf` for local saving and `push_to_hub_gguf` for uploading to HF.\n",
+ "\n",
+ "Some supported quant methods (full list on our [Wiki page](https://github.com/unslothai/unsloth/wiki#gguf-quantization-options)):\n",
+ "* `q8_0` - Fast conversion. High resource use, but generally acceptable.\n",
+ "* `q4_k_m` - Recommended. Uses Q6_K for half of the attention.wv and feed_forward.w2 tensors, else Q4_K.\n",
+ "* `q5_k_m` - Recommended. Uses Q6_K for half of the attention.wv and feed_forward.w2 tensors, else Q5_K.\n",
+ "\n",
+ "[**NEW**] To finetune and auto export to Ollama, try our [Ollama notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3_(8B)-Ollama.ipynb)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "FqfebeAdT073"
+ },
+ "outputs": [],
+ "source": [
+ "# Save to 8bit Q8_0\n",
+ "if False: model.save_pretrained_gguf(\"model\", tokenizer,)\n",
+ "# Remember to go to https://huggingface.co/settings/tokens for a token!\n",
+ "# And change hf to your username!\n",
+ "if False: model.push_to_hub_gguf(\"hf/model\", tokenizer, token = \"\")\n",
+ "\n",
+ "# Save to 16bit GGUF\n",
+ "if False: model.save_pretrained_gguf(\"model\", tokenizer, quantization_method = \"f16\")\n",
+ "if False: model.push_to_hub_gguf(\"hf/model\", tokenizer, quantization_method = \"f16\", token = \"\")\n",
+ "\n",
+ "# Save to q4_k_m GGUF\n",
+ "if False: model.save_pretrained_gguf(\"model\", tokenizer, quantization_method = \"q4_k_m\")\n",
+ "if False: model.push_to_hub_gguf(\"hf/model\", tokenizer, quantization_method = \"q4_k_m\", token = \"\")\n",
+ "\n",
+ "# Save to multiple GGUF options - much faster if you want multiple!\n",
+ "if False:\n",
+ " model.push_to_hub_gguf(\n",
+ " \"hf/model\", # Change hf to your username!\n",
+ " tokenizer,\n",
+ " quantization_method = [\"q4_k_m\", \"q8_0\", \"q5_k_m\",],\n",
+ " token = \"\",\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "rfhsLp6ZjCNC"
+ },
+ "source": [
+ "Now, use the `model-unsloth.gguf` file or `model-unsloth-Q4_K_M.gguf` file in llama.cpp or a UI based system like Jan or Open WebUI. You can install Jan [here](https://github.com/janhq/jan) and Open WebUI [here](https://github.com/open-webui/open-webui)\n",
+ "\n",
+ "And we're done! If you have any questions on Unsloth, we have a [Discord](https://discord.gg/unsloth) channel! If you find any bugs or want to keep updated with the latest LLM stuff, or need help, join projects etc, feel free to join our Discord!\n",
+ "\n",
+ "Some other links:\n",
+ "1. Train your own reasoning model - Llama GRPO notebook [Free Colab](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.1_(8B)-GRPO.ipynb)\n",
+ "2. Saving finetunes to Ollama. [Free notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3_(8B)-Ollama.ipynb)\n",
+ "3. Llama 3.2 Vision finetuning - Radiography use case. [Free Colab](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(11B)-Vision.ipynb)\n",
+ "6. See notebooks for DPO, ORPO, Continued pretraining, conversational finetuning and more on our [documentation](https://docs.unsloth.ai/get-started/unsloth-notebooks)!\n",
+ "\n",
+ "\n",
+ "

\n",
+ "

\n",
+ "

\n",
+ "\n",
+ " Join Discord if you need help + βοΈ
Star us on Github βοΈ\n",
+ "
\n"
+ ]
+ }
+ ],
+ "metadata": {
+ "accelerator": "GPU",
+ "colab": {
+ "gpuType": "T4",
+ "provenance": []
+ },
+ "kernelspec": {
+ "display_name": "major02",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.12"
+ },
+ "widgets": {
+ "application/vnd.jupyter.widget-state+json": {
+ "047a514f9797432098ea1ca08791f17d": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HBoxModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_31f86416918f4369836a15e411c76004",
+ "IPY_MODEL_e6ffc8ffcdd242a18444c113c65afc3a",
+ "IPY_MODEL_c971bc042e67481d88ea618946c31557"
+ ],
+ "layout": "IPY_MODEL_4648982d046046e4af427d6effa7552f"
+ }
+ },
+ "047e3f535c244ca98e954bb3eec36842": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HBoxModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_8ecdc33c1cfe42a7b43b36296f5c80cc",
+ "IPY_MODEL_d8b08814df4d4f7c8a28704d5b1d6f97",
+ "IPY_MODEL_7b2a51645df941cc9911b996b3aef06b"
+ ],
+ "layout": "IPY_MODEL_f4466db8776a45ae8026eb621affcfa8"
+ }
+ },
+ "0498617989a345f2833f9191fc8d0c0b": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "FloatProgressModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "danger",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_9208c1d37e7b49659d386896a86936ec",
+ "max": 5964186429,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_bec4bd89dc6d472db216b376055d486c",
+ "value": 5964185861
+ }
+ },
+ "052ea223c8f740249ddbde6ac90df588": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "06a6ff819b9441a4ad436c75e7837c86": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_f98c013f6f924237a99ec520f35ebcde",
+ "placeholder": "β",
+ "style": "IPY_MODEL_c48fb808314a436080c1cde975e2e1cd",
+ "value": "special_tokens_map.json:β100%"
+ }
+ },
+ "06cabd4f473e47c0b72dea34a3d53912": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "07aabd90699847fe8ed89aa98644d29b": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "0bff2a1b0f724e16aad83b74dc82769e": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "DescriptionStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "0d6542b46c464d8b9013ccf0a28a9572": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_07aabd90699847fe8ed89aa98644d29b",
+ "placeholder": "β",
+ "style": "IPY_MODEL_96c889ea82384c36baaff99470cde678",
+ "value": "tokenizer_config.json:β100%"
+ }
+ },
+ "0da95cbd0ed745ef9002d11298b6295c": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "ProgressStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "147e5a626c7848c1adf46cf559f858bd": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "ProgressStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "162cbc90157a42bd86c40cd76a75d031": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "FloatProgressModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_8afea3489b2b4a278279dc5a05d1b1b2",
+ "max": 50642,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_4c1991173ffd46a5af8f3eff48e00755",
+ "value": 50642
+ }
+ },
+ "19f9fe73153b46bc960e8e7e60a705ee": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "1c47fd0322f44242b31425aaf759262f": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "1c7db46fc15f4b0a869d2541d4fdb3e9": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "226ae004bd1a486da39d316e0e047750": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "DescriptionStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "2772654a3eb0467cb3aba0b6e812a134": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_cf5d0dd9220641edb3a230e246d523f5",
+ "placeholder": "β",
+ "style": "IPY_MODEL_0bff2a1b0f724e16aad83b74dc82769e",
+ "value": "β1000/162653β[00:10<28:29,β94.54βexamples/s]"
+ }
+ },
+ "280d9b3120c540f98c7e00065e7c8750": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "2a0b9d8479ba4470a7e23f0e590779f1": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "DescriptionStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "2de170c9830549cab477265a9020a6ff": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "31f86416918f4369836a15e411c76004": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_874a8e03473c498e913bfeb2cae2284f",
+ "placeholder": "β",
+ "style": "IPY_MODEL_6649c127f38a4a878446c6f7cf155057",
+ "value": "generation_config.json:β100%"
+ }
+ },
+ "327a0ab3d5f748c1ae6b93d851ef1f5f": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "DescriptionStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "39f85329cf6440c4960046e7aee87462": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "3b4de26403a24f8f8cad26c594d0c485": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HBoxModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_06a6ff819b9441a4ad436c75e7837c86",
+ "IPY_MODEL_7b69ea48666d4fb3a81bbbf6aaf0d8a2",
+ "IPY_MODEL_8f1a3e026f3c46afb57db907f2940a8d"
+ ],
+ "layout": "IPY_MODEL_d681fbb2c17c4bd09a3a794daf57f4b2"
+ }
+ },
+ "3bf71d4085424a89b9ccf8372c94d887": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HBoxModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_a4e3dfc277534401a831ca51f8a8c838",
+ "IPY_MODEL_c8501d4f4a80481d90511872b84e1225",
+ "IPY_MODEL_2772654a3eb0467cb3aba0b6e812a134"
+ ],
+ "layout": "IPY_MODEL_c0975cebc67743739b6c760c851c324f"
+ }
+ },
+ "4648982d046046e4af427d6effa7552f": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "4c1991173ffd46a5af8f3eff48e00755": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "ProgressStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "4dd3cf69544d44c785485cde9e1cbadf": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "515c241f259b498796088d2261099ff1": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "ProgressStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "52509ddf39fe49c7a24ea28828e10ef9": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "5420518b6b0e47bd8df8e9023325c74d": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "FloatProgressModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_7cc5e6af64c64127bd9df959dc6a3345",
+ "max": 17209920,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_515c241f259b498796088d2261099ff1",
+ "value": 17209920
+ }
+ },
+ "57a7d8220c86425592ae87c157b56485": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "5a76aeeee3ca4fbd8e5fc50fa7febce0": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "DescriptionStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "5d30d40769ad435c8e5367254b3ce416": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_8bf81c3194ba4a70b13138d9dc5bd32c",
+ "placeholder": "β",
+ "style": "IPY_MODEL_5a76aeeee3ca4fbd8e5fc50fa7febce0",
+ "value": "β5.96G/5.96Gβ[00:57<00:00,β318MB/s]"
+ }
+ },
+ "6358f3dd281e450e9d862fb819431e94": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HBoxModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_d3c86a31c63c4bcc8044d869c307d650",
+ "IPY_MODEL_0498617989a345f2833f9191fc8d0c0b",
+ "IPY_MODEL_5d30d40769ad435c8e5367254b3ce416"
+ ],
+ "layout": "IPY_MODEL_1c7db46fc15f4b0a869d2541d4fdb3e9"
+ }
+ },
+ "6649c127f38a4a878446c6f7cf155057": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "DescriptionStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "7b2a51645df941cc9911b996b3aef06b": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_1c47fd0322f44242b31425aaf759262f",
+ "placeholder": "β",
+ "style": "IPY_MODEL_2a0b9d8479ba4470a7e23f0e590779f1",
+ "value": "β51760/51760β[00:00<00:00,β71020.08βexamples/s]"
+ }
+ },
+ "7b69ea48666d4fb3a81bbbf6aaf0d8a2": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "FloatProgressModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_86f733ba965b4237889cd89d28b63d61",
+ "max": 459,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_faa00b65663441c880758c5abb09a519",
+ "value": 459
+ }
+ },
+ "7cc5e6af64c64127bd9df959dc6a3345": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "8147cbf8ef4b4b64a1420ad01bdf6ade": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "ProgressStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "86f733ba965b4237889cd89d28b63d61": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "874a8e03473c498e913bfeb2cae2284f": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "8afea3489b2b4a278279dc5a05d1b1b2": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "8bf81c3194ba4a70b13138d9dc5bd32c": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "8ecdc33c1cfe42a7b43b36296f5c80cc": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_39f85329cf6440c4960046e7aee87462",
+ "placeholder": "β",
+ "style": "IPY_MODEL_cd5199c24cd941e9b02c8444f740e0dc",
+ "value": "Map:β100%"
+ }
+ },
+ "8f1a3e026f3c46afb57db907f2940a8d": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_052ea223c8f740249ddbde6ac90df588",
+ "placeholder": "β",
+ "style": "IPY_MODEL_d0ad350985de47f09d98f57dd98a9b00",
+ "value": "β459/459β[00:00<00:00,β50.6kB/s]"
+ }
+ },
+ "9208c1d37e7b49659d386896a86936ec": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "95bfadbb334246b3b8c6fbf004c513f1": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_c34232310c05418aa06aac3a9127319c",
+ "placeholder": "β",
+ "style": "IPY_MODEL_ec38a625ae68438c99066889f866336d",
+ "value": "β17.2M/17.2Mβ[00:00<00:00,β63.2MB/s]"
+ }
+ },
+ "96c889ea82384c36baaff99470cde678": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "DescriptionStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "98138a6fc06644998373e4034f821ec3": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "DescriptionStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "a4e3dfc277534401a831ca51f8a8c838": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_a6e1e666100240be89148555d199073d",
+ "placeholder": "β",
+ "style": "IPY_MODEL_98138a6fc06644998373e4034f821ec3",
+ "value": "Unsloth:βTokenizingβ["text"]β(num_proc=2):βββ1%"
+ }
+ },
+ "a6e1e666100240be89148555d199073d": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "b34006bb60fe47868586bc33fabde306": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "bec4bd89dc6d472db216b376055d486c": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "ProgressStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "c0975cebc67743739b6c760c851c324f": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "c34232310c05418aa06aac3a9127319c": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "c48fb808314a436080c1cde975e2e1cd": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "DescriptionStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "c8501d4f4a80481d90511872b84e1225": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "FloatProgressModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_52509ddf39fe49c7a24ea28828e10ef9",
+ "max": 162653,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_8147cbf8ef4b4b64a1420ad01bdf6ade",
+ "value": 1000
+ }
+ },
+ "c971bc042e67481d88ea618946c31557": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_19f9fe73153b46bc960e8e7e60a705ee",
+ "placeholder": "β",
+ "style": "IPY_MODEL_226ae004bd1a486da39d316e0e047750",
+ "value": "β235/235β[00:00<00:00,β14.1kB/s]"
+ }
+ },
+ "cd5199c24cd941e9b02c8444f740e0dc": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "DescriptionStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "cd549bc1372e429cb466cf91c4cb14f6": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "DescriptionStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "cf5d0dd9220641edb3a230e246d523f5": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "d0ad350985de47f09d98f57dd98a9b00": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "DescriptionStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "d3c86a31c63c4bcc8044d869c307d650": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_fcd62c3342a7435c8fa582f46e25a7fe",
+ "placeholder": "β",
+ "style": "IPY_MODEL_cd549bc1372e429cb466cf91c4cb14f6",
+ "value": "model.safetensors:β100%"
+ }
+ },
+ "d681fbb2c17c4bd09a3a794daf57f4b2": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "d8b08814df4d4f7c8a28704d5b1d6f97": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "FloatProgressModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_2de170c9830549cab477265a9020a6ff",
+ "max": 51760,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_0da95cbd0ed745ef9002d11298b6295c",
+ "value": 51760
+ }
+ },
+ "d9387aa9d6384146a03f63f52839ef3a": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HBoxModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_e5fc60f9587e4ac1b38b520e26575220",
+ "IPY_MODEL_5420518b6b0e47bd8df8e9023325c74d",
+ "IPY_MODEL_95bfadbb334246b3b8c6fbf004c513f1"
+ ],
+ "layout": "IPY_MODEL_06cabd4f473e47c0b72dea34a3d53912"
+ }
+ },
+ "da1fdb9955774c95a33bd3ba03a5fc5b": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HBoxModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_0d6542b46c464d8b9013ccf0a28a9572",
+ "IPY_MODEL_162cbc90157a42bd86c40cd76a75d031",
+ "IPY_MODEL_f383dc65ca65440eb77740c383fb2c4d"
+ ],
+ "layout": "IPY_MODEL_4dd3cf69544d44c785485cde9e1cbadf"
+ }
+ },
+ "df9e42625d714c5da22fa90ed1fb5491": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "DescriptionStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "e5fc60f9587e4ac1b38b520e26575220": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_280d9b3120c540f98c7e00065e7c8750",
+ "placeholder": "β",
+ "style": "IPY_MODEL_df9e42625d714c5da22fa90ed1fb5491",
+ "value": "tokenizer.json:β100%"
+ }
+ },
+ "e6ffc8ffcdd242a18444c113c65afc3a": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "FloatProgressModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_57a7d8220c86425592ae87c157b56485",
+ "max": 235,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_147e5a626c7848c1adf46cf559f858bd",
+ "value": 235
+ }
+ },
+ "ec38a625ae68438c99066889f866336d": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "DescriptionStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "f383dc65ca65440eb77740c383fb2c4d": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_b34006bb60fe47868586bc33fabde306",
+ "placeholder": "β",
+ "style": "IPY_MODEL_327a0ab3d5f748c1ae6b93d851ef1f5f",
+ "value": "β50.6k/50.6kβ[00:00<00:00,β3.15MB/s]"
+ }
+ },
+ "f4466db8776a45ae8026eb621affcfa8": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "f98c013f6f924237a99ec520f35ebcde": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "faa00b65663441c880758c5abb09a519": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "ProgressStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "fcd62c3342a7435c8fa582f46e25a7fe": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ }
+ }
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}