{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [], "gpuType": "T4" }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" }, "accelerator": "GPU" }, "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": { "id": "m7rU-pjX3Y1O" }, "outputs": [], "source": [ "%%capture\n", "!pip install gradio transformers accelerate numpy requests\n", "!pip install torch torchvision av hf_xet qwen-vl-utils\n", "!pip install pillow huggingface_hub opencv-python spaces" ] }, { "cell_type": "code", "source": [ "from huggingface_hub import notebook_login, HfApi\n", "notebook_login()" ], "metadata": { "id": "dZUVag_jJMck" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "import os\n", "import random\n", "import uuid\n", "import json\n", "import time\n", "import asyncio\n", "import re\n", "from threading import Thread\n", "\n", "import gradio as gr\n", "import spaces\n", "import torch\n", "import numpy as np\n", "from PIL import Image\n", "import cv2\n", "\n", "from transformers import (\n", " AutoProcessor,\n", " Gemma3ForConditionalGeneration,\n", " Qwen2VLForConditionalGeneration,\n", " TextIteratorStreamer,\n", ")\n", "from transformers.image_utils import load_image\n", "\n", "# Constants\n", "MAX_MAX_NEW_TOKENS = 2048\n", "DEFAULT_MAX_NEW_TOKENS = 1024\n", "MAX_INPUT_TOKEN_LENGTH = int(os.getenv(\"MAX_INPUT_TOKEN_LENGTH\", \"4096\"))\n", "MAX_SEED = np.iinfo(np.int32).max\n", "\n", "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", "\n", "# Helper function to return a progress bar HTML snippet.\n", "def progress_bar_html(label: str) -> str:\n", " return f'''\n", "
\n", " {label}\n", "
\n", "
\n", "
\n", "
\n", "\n", " '''\n", "\n", "# Qwen2-VL (for optional image inference)\n", "\n", "MODEL_ID_VL = \"prithivMLmods/Qwen2-VL-OCR-2B-Instruct\"\n", "processor = AutoProcessor.from_pretrained(MODEL_ID_VL, trust_remote_code=True)\n", "model_m = Qwen2VLForConditionalGeneration.from_pretrained(\n", " MODEL_ID_VL,\n", " trust_remote_code=True,\n", " torch_dtype=torch.float16\n", ").to(\"cuda\").eval()\n", "\n", "def clean_chat_history(chat_history):\n", " cleaned = []\n", " for msg in chat_history:\n", " if isinstance(msg, dict) and isinstance(msg.get(\"content\"), str):\n", " cleaned.append(msg)\n", " return cleaned\n", "\n", "bad_words = json.loads(os.getenv('BAD_WORDS', \"[]\"))\n", "bad_words_negative = json.loads(os.getenv('BAD_WORDS_NEGATIVE', \"[]\"))\n", "default_negative = os.getenv(\"default_negative\", \"\")\n", "\n", "def check_text(prompt, negative=\"\"):\n", " for i in bad_words:\n", " if i in prompt:\n", " return True\n", " for i in bad_words_negative:\n", " if i in negative:\n", " return True\n", " return False\n", "\n", "def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:\n", " if randomize_seed:\n", " seed = random.randint(0, MAX_SEED)\n", " return seed\n", "\n", "CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv(\"CACHE_EXAMPLES\", \"0\") == \"1\"\n", "MAX_IMAGE_SIZE = int(os.getenv(\"MAX_IMAGE_SIZE\", \"2048\"))\n", "USE_TORCH_COMPILE = os.getenv(\"USE_TORCH_COMPILE\", \"0\") == \"1\"\n", "ENABLE_CPU_OFFLOAD = os.getenv(\"ENABLE_CPU_OFFLOAD\", \"0\") == \"1\"\n", "\n", "dtype = torch.float16 if device.type == \"cuda\" else torch.float32\n", "\n", "\n", "# Gemma3 Model (default for text, image, & video inference)\n", "\n", "gemma3_model_id = \"google/gemma-3-4b-it\" # alternative: google/gemma-3-12b-it\n", "gemma3_model = Gemma3ForConditionalGeneration.from_pretrained(\n", " gemma3_model_id, device_map=\"auto\"\n", ").eval()\n", "gemma3_processor = AutoProcessor.from_pretrained(gemma3_model_id)\n", "\n", "# VIDEO PROCESSING HELPER\n", "\n", "def downsample_video(video_path):\n", " vidcap = cv2.VideoCapture(video_path)\n", " total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))\n", " fps = vidcap.get(cv2.CAP_PROP_FPS)\n", " frames = []\n", " # Sample 10 evenly spaced frames.\n", " frame_indices = np.linspace(0, total_frames - 1, 10, dtype=int)\n", " for i in frame_indices:\n", " vidcap.set(cv2.CAP_PROP_POS_FRAMES, i)\n", " success, image = vidcap.read()\n", " if success:\n", " # Convert from BGR to RGB and then to PIL Image.\n", " image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n", " pil_image = Image.fromarray(image)\n", " timestamp = round(i / fps, 2)\n", " frames.append((pil_image, timestamp))\n", " vidcap.release()\n", " return frames\n", "\n", "# MAIN GENERATION FUNCTION\n", "\n", "@spaces.GPU\n", "def generate(\n", " input_dict: dict,\n", " chat_history: list[dict],\n", " max_new_tokens: int = DEFAULT_MAX_NEW_TOKENS,\n", " temperature: float = 0.6,\n", " top_p: float = 0.9,\n", " top_k: int = 50,\n", " repetition_penalty: float = 1.2,\n", "):\n", " text = input_dict[\"text\"]\n", " files = input_dict.get(\"files\", [])\n", " lower_text = text.lower().strip()\n", "\n", " # ----- Qwen2-VL branch (triggered with @qwen2-vl) -----\n", " if lower_text.startswith(\"@qwen2-vl\"):\n", " prompt_clean = re.sub(r\"@qwen2-vl\", \"\", text, flags=re.IGNORECASE).strip().strip('\"')\n", " if files:\n", " images = [load_image(f) for f in files]\n", " messages = [{\n", " \"role\": \"user\",\n", " \"content\": [\n", " *[{\"type\": \"image\", \"image\": image} for image in images],\n", " {\"type\": \"text\", \"text\": prompt_clean},\n", " ]\n", " }]\n", " prompt = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n", " inputs = processor(text=[prompt], images=images, return_tensors=\"pt\", padding=True).to(\"cuda\")\n", " else:\n", " messages = [\n", " {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"text\": \"You are a helpful assistant.\"}]},\n", " {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"text\": prompt_clean}]}\n", " ]\n", " inputs = processor.apply_chat_template(\n", " messages, add_generation_prompt=True, tokenize=True,\n", " return_dict=True, return_tensors=\"pt\"\n", " ).to(\"cuda\", dtype=torch.float16)\n", " streamer = TextIteratorStreamer(processor.tokenizer, timeout=20.0, skip_prompt=True, skip_special_tokens=True)\n", " generation_kwargs = {\n", " **inputs,\n", " \"streamer\": streamer,\n", " \"max_new_tokens\": max_new_tokens,\n", " \"do_sample\": True,\n", " \"temperature\": temperature,\n", " \"top_p\": top_p,\n", " \"top_k\": top_k,\n", " \"repetition_penalty\": repetition_penalty,\n", " }\n", " thread = Thread(target=model_m.generate, kwargs=generation_kwargs)\n", " thread.start()\n", " buffer = \"\"\n", " yield progress_bar_html(\"Processing with Qwen2VL\")\n", " for new_text in streamer:\n", " buffer += new_text\n", " buffer = buffer.replace(\"<|im_end|>\", \"\")\n", " time.sleep(0.01)\n", " yield buffer\n", " return\n", "\n", " # ----- Default branch: Gemma3 (for text, image, & video inference) -----\n", " if files:\n", " # Check if any provided file is a video based on extension.\n", " video_extensions = (\".mp4\", \".mov\", \".avi\", \".mkv\", \".webm\")\n", " if any(str(f).lower().endswith(video_extensions) for f in files):\n", " # Video inference branch.\n", " prompt_clean = re.sub(r\"@video-infer\", \"\", text, flags=re.IGNORECASE).strip().strip('\"')\n", " video_path = files[0]\n", " frames = downsample_video(video_path)\n", " messages = [\n", " {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"text\": \"You are a helpful assistant.\"}]},\n", " {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"text\": prompt_clean}]}\n", " ]\n", " # Append each frame (with its timestamp) to the conversation.\n", " for frame in frames:\n", " image, timestamp = frame\n", " image_path = f\"video_frame_{uuid.uuid4().hex}.png\"\n", " image.save(image_path)\n", " messages[1][\"content\"].append({\"type\": \"text\", \"text\": f\"Frame {timestamp}:\"})\n", " messages[1][\"content\"].append({\"type\": \"image\", \"url\": image_path})\n", " inputs = gemma3_processor.apply_chat_template(\n", " messages, add_generation_prompt=True, tokenize=True,\n", " return_dict=True, return_tensors=\"pt\"\n", " ).to(gemma3_model.device, dtype=torch.bfloat16)\n", " streamer = TextIteratorStreamer(gemma3_processor.tokenizer, timeout=20.0, skip_prompt=True, skip_special_tokens=True)\n", " generation_kwargs = {\n", " **inputs,\n", " \"streamer\": streamer,\n", " \"max_new_tokens\": max_new_tokens,\n", " \"do_sample\": True,\n", " \"temperature\": temperature,\n", " \"top_p\": top_p,\n", " \"top_k\": top_k,\n", " \"repetition_penalty\": repetition_penalty,\n", " }\n", " thread = Thread(target=gemma3_model.generate, kwargs=generation_kwargs)\n", " thread.start()\n", " buffer = \"\"\n", " yield progress_bar_html(\"Processing video with Gemma3\")\n", " for new_text in streamer:\n", " buffer += new_text\n", " time.sleep(0.01)\n", " yield buffer\n", " return\n", " else:\n", " # Image inference branch.\n", " prompt_clean = re.sub(r\"@gemma3\", \"\", text, flags=re.IGNORECASE).strip().strip('\"')\n", " images = [load_image(f) for f in files]\n", " messages = [{\n", " \"role\": \"user\",\n", " \"content\": [\n", " *[{\"type\": \"image\", \"image\": image} for image in images],\n", " {\"type\": \"text\", \"text\": prompt_clean},\n", " ]\n", " }]\n", " inputs = gemma3_processor.apply_chat_template(\n", " messages, tokenize=True, add_generation_prompt=True,\n", " return_dict=True, return_tensors=\"pt\"\n", " ).to(gemma3_model.device, dtype=torch.bfloat16)\n", " streamer = TextIteratorStreamer(gemma3_processor.tokenizer, timeout=20.0, skip_prompt=True, skip_special_tokens=True)\n", " generation_kwargs = {\n", " **inputs,\n", " \"streamer\": streamer,\n", " \"max_new_tokens\": max_new_tokens,\n", " \"do_sample\": True,\n", " \"temperature\": temperature,\n", " \"top_p\": top_p,\n", " \"top_k\": top_k,\n", " \"repetition_penalty\": repetition_penalty,\n", " }\n", " thread = Thread(target=gemma3_model.generate, kwargs=generation_kwargs)\n", " thread.start()\n", " buffer = \"\"\n", " yield progress_bar_html(\"Processing with Gemma3\")\n", " for new_text in streamer:\n", " buffer += new_text\n", " time.sleep(0.01)\n", " yield buffer\n", " return\n", " else:\n", " # Text-only inference branch.\n", " messages = [\n", " {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"text\": \"You are a helpful assistant.\"}]},\n", " {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"text\": text}]}\n", " ]\n", " inputs = gemma3_processor.apply_chat_template(\n", " messages, add_generation_prompt=True, tokenize=True,\n", " return_dict=True, return_tensors=\"pt\"\n", " ).to(gemma3_model.device, dtype=torch.bfloat16)\n", " streamer = TextIteratorStreamer(gemma3_processor.tokenizer, timeout=20.0, skip_prompt=True, skip_special_tokens=True)\n", " generation_kwargs = {\n", " **inputs,\n", " \"streamer\": streamer,\n", " \"max_new_tokens\": max_new_tokens,\n", " \"do_sample\": True,\n", " \"temperature\": temperature,\n", " \"top_p\": top_p,\n", " \"top_k\": top_k,\n", " \"repetition_penalty\": repetition_penalty,\n", " }\n", " thread = Thread(target=gemma3_model.generate, kwargs=generation_kwargs)\n", " thread.start()\n", " outputs = []\n", " for new_text in streamer:\n", " outputs.append(new_text)\n", " yield \"\".join(outputs)\n", " final_response = \"\".join(outputs)\n", " yield final_response\n", "\n", "\n", "# Gradio Interface\n", "\n", "demo = gr.ChatInterface(\n", " fn=generate,\n", " additional_inputs=[\n", " gr.Slider(label=\"Max new tokens\", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS),\n", " gr.Slider(label=\"Temperature\", minimum=0.1, maximum=4.0, step=0.1, value=0.6),\n", " gr.Slider(label=\"Top-p (nucleus sampling)\", minimum=0.05, maximum=1.0, step=0.05, value=0.9),\n", " gr.Slider(label=\"Top-k\", minimum=1, maximum=1000, step=1, value=50),\n", " gr.Slider(label=\"Repetition penalty\", minimum=1.0, maximum=2.0, step=0.05, value=1.2),\n", " ],\n", " type=\"messages\",\n", " description=\"# **Gemma 3 Multimodal** \\n`Use @qwen2-vl to switch to Qwen2-VL OCR for image inference and @video-infer for video input`\",\n", " fill_height=True,\n", " textbox=gr.MultimodalTextbox(label=\"Query Input\", file_types=[\"image\", \"video\"], file_count=\"multiple\", placeholder=\"Tag with @qwen2-vl for Qwen2-VL inference if needed.\"),\n", " stop_btn=\"Stop Generation\",\n", " multimodal=True,\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.queue(max_size=20).launch(share=True)" ], "metadata": { "id": "kW4MjaOs3c9E" }, "execution_count": null, "outputs": [] } ] }