{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [], "gpuType": "T4" }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" }, "accelerator": "GPU" }, "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": { "id": "m7rU-pjX3Y1O" }, "outputs": [], "source": [ "%%capture\n", "!pip install gradio transformers accelerate numpy requests\n", "!pip install torch torchvision av hf_xet qwen-vl-utils\n", "!pip install pillow huggingface_hub opencv-python spaces" ] }, { "cell_type": "code", "source": [ "from huggingface_hub import notebook_login, HfApi\n", "notebook_login()" ], "metadata": { "id": "dZUVag_jJMck" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "import os\n", "import random\n", "import uuid\n", "import json\n", "import time\n", "import asyncio\n", "import re\n", "from threading import Thread\n", "\n", "import gradio as gr\n", "import spaces\n", "import torch\n", "import numpy as np\n", "from PIL import Image\n", "import cv2\n", "\n", "from transformers import (\n", " AutoProcessor,\n", " Gemma3ForConditionalGeneration,\n", " Qwen2VLForConditionalGeneration,\n", " TextIteratorStreamer,\n", ")\n", "from transformers.image_utils import load_image\n", "\n", "# Constants\n", "MAX_MAX_NEW_TOKENS = 2048\n", "DEFAULT_MAX_NEW_TOKENS = 1024\n", "MAX_INPUT_TOKEN_LENGTH = int(os.getenv(\"MAX_INPUT_TOKEN_LENGTH\", \"4096\"))\n", "MAX_SEED = np.iinfo(np.int32).max\n", "\n", "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", "\n", "# Helper function to return a progress bar HTML snippet.\n", "def progress_bar_html(label: str) -> str:\n", " return f'''\n", "