Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- groundingLMM/gradio-dev/demo/Echocardiogram-Segmentation/img1.jpg +0 -0
- groundingLMM/gradio-dev/demo/Echocardiogram-Segmentation/requirements.txt +7 -0
- groundingLMM/gradio-dev/demo/Echocardiogram-Segmentation/run.ipynb +1 -0
- groundingLMM/gradio-dev/demo/Echocardiogram-Segmentation/run.py +88 -0
- groundingLMM/gradio-dev/demo/all_demos/run.py +35 -0
- groundingLMM/gradio-dev/demo/altair_plot/requirements.txt +2 -0
- groundingLMM/gradio-dev/demo/altair_plot/run.ipynb +1 -0
- groundingLMM/gradio-dev/demo/altair_plot/run.py +140 -0
- groundingLMM/gradio-dev/demo/blocks_component_shortcut/run.ipynb +1 -0
- groundingLMM/gradio-dev/demo/blocks_gpt/run.ipynb +1 -0
- groundingLMM/gradio-dev/demo/blocks_gpt/run.py +16 -0
- groundingLMM/gradio-dev/demo/blocks_xray/run.ipynb +1 -0
- groundingLMM/gradio-dev/demo/blocks_xray/run.py +61 -0
- groundingLMM/gradio-dev/demo/calculator/run.ipynb +1 -0
- groundingLMM/gradio-dev/demo/calculator/run.py +33 -0
- groundingLMM/gradio-dev/demo/calculator_live/run.ipynb +1 -0
- groundingLMM/gradio-dev/demo/calculator_live/run.py +24 -0
- groundingLMM/gradio-dev/demo/cancel_events/run.ipynb +1 -0
- groundingLMM/gradio-dev/demo/cancel_events/run.py +49 -0
- groundingLMM/gradio-dev/demo/chicago-bikeshare-dashboard/requirements.txt +3 -0
- groundingLMM/gradio-dev/demo/chicago-bikeshare-dashboard/run.ipynb +1 -0
- groundingLMM/gradio-dev/demo/chicago-bikeshare-dashboard/run.py +91 -0
- groundingLMM/gradio-dev/demo/colorpicker_component/run.ipynb +1 -0
- groundingLMM/gradio-dev/demo/colorpicker_component/run.py +8 -0
- groundingLMM/gradio-dev/demo/depth_estimation/DESCRIPTION.md +1 -0
- groundingLMM/gradio-dev/demo/depth_estimation/packages.txt +1 -0
- groundingLMM/gradio-dev/demo/depth_estimation/requirements.txt +6 -0
- groundingLMM/gradio-dev/demo/depth_estimation/run.ipynb +1 -0
- groundingLMM/gradio-dev/demo/depth_estimation/run.py +117 -0
- groundingLMM/gradio-dev/demo/diffusers_with_batching/requirements.txt +3 -0
- groundingLMM/gradio-dev/demo/diffusers_with_batching/run.ipynb +1 -0
- groundingLMM/gradio-dev/demo/diffusers_with_batching/run.py +22 -0
- groundingLMM/gradio-dev/demo/fake_diffusion/DESCRIPTION.md +1 -0
- groundingLMM/gradio-dev/demo/fake_diffusion/requirements.txt +1 -0
- groundingLMM/gradio-dev/demo/fake_diffusion/run.ipynb +1 -0
- groundingLMM/gradio-dev/demo/fake_diffusion/run.py +20 -0
- groundingLMM/gradio-dev/demo/gender_sentence_custom_interpretation/run.ipynb +1 -0
- groundingLMM/gradio-dev/demo/gender_sentence_custom_interpretation/run.py +46 -0
- groundingLMM/gradio-dev/demo/gender_sentence_custom_interpretation/screenshot.gif +0 -0
- groundingLMM/gradio-dev/demo/hello_world_3/run.ipynb +1 -0
- groundingLMM/gradio-dev/demo/hello_world_3/run.py +15 -0
- groundingLMM/gradio-dev/demo/highlightedtext_component/run.ipynb +1 -0
- groundingLMM/gradio-dev/demo/highlightedtext_component/run.py +8 -0
- groundingLMM/gradio-dev/demo/image_classifier/requirements.txt +2 -0
- groundingLMM/gradio-dev/demo/image_classifier/run.ipynb +1 -0
- groundingLMM/gradio-dev/demo/image_classifier/run.py +36 -0
- groundingLMM/gradio-dev/demo/image_mod/run.ipynb +1 -0
- groundingLMM/gradio-dev/demo/image_mod/run.py +23 -0
- groundingLMM/gradio-dev/demo/interface_parallel_load/run.ipynb +1 -0
- groundingLMM/gradio-dev/demo/interface_parallel_load/run.py +10 -0
groundingLMM/gradio-dev/demo/Echocardiogram-Segmentation/img1.jpg
ADDED
![]() |
groundingLMM/gradio-dev/demo/Echocardiogram-Segmentation/requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
-f https://download.pytorch.org/whl/torch_stable.html
|
2 |
+
numpy
|
3 |
+
matplotlib
|
4 |
+
wget
|
5 |
+
torch
|
6 |
+
torchvision
|
7 |
+
|
groundingLMM/gradio-dev/demo/Echocardiogram-Segmentation/run.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: Echocardiogram-Segmentation"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio -f https://download.pytorch.org/whl/torch_stable.html numpy matplotlib wget torch torchvision "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/Echocardiogram-Segmentation/img1.jpg\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/Echocardiogram-Segmentation/img2.jpg"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import os\n", "import numpy as np\n", "import torch\n", "import torchvision\n", "import wget \n", "\n", "\n", "destination_folder = \"output\"\n", "destination_for_weights = \"weights\"\n", "\n", "if os.path.exists(destination_for_weights):\n", " print(\"The weights are at\", destination_for_weights)\n", "else:\n", " print(\"Creating folder at \", destination_for_weights, \" to store weights\")\n", " os.mkdir(destination_for_weights)\n", " \n", "segmentationWeightsURL = 'https://github.com/douyang/EchoNetDynamic/releases/download/v1.0.0/deeplabv3_resnet50_random.pt'\n", "\n", "if not os.path.exists(os.path.join(destination_for_weights, os.path.basename(segmentationWeightsURL))):\n", " print(\"Downloading Segmentation Weights, \", segmentationWeightsURL,\" to \",os.path.join(destination_for_weights, os.path.basename(segmentationWeightsURL)))\n", " filename = wget.download(segmentationWeightsURL, out = destination_for_weights)\n", "else:\n", " print(\"Segmentation Weights already present\")\n", "\n", "torch.cuda.empty_cache()\n", "\n", "def collate_fn(x):\n", " x, f = zip(*x)\n", " i = list(map(lambda t: t.shape[1], x))\n", " x = torch.as_tensor(np.swapaxes(np.concatenate(x, 1), 0, 1))\n", " return x, f, i\n", "\n", "model = torchvision.models.segmentation.deeplabv3_resnet50(pretrained=False, aux_loss=False)\n", "model.classifier[-1] = torch.nn.Conv2d(model.classifier[-1].in_channels, 1, kernel_size=model.classifier[-1].kernel_size)\n", "\n", "print(\"loading weights from \", os.path.join(destination_for_weights, \"deeplabv3_resnet50_random\"))\n", "\n", "if torch.cuda.is_available():\n", " print(\"cuda is available, original weights\")\n", " device = torch.device(\"cuda\")\n", " model = torch.nn.DataParallel(model)\n", " model.to(device)\n", " checkpoint = torch.load(os.path.join(destination_for_weights, os.path.basename(segmentationWeightsURL)))\n", " model.load_state_dict(checkpoint['state_dict'])\n", "else:\n", " print(\"cuda is not available, cpu weights\")\n", " device = torch.device(\"cpu\")\n", " checkpoint = torch.load(os.path.join(destination_for_weights, os.path.basename(segmentationWeightsURL)), map_location = \"cpu\")\n", " state_dict_cpu = {k[7:]: v for (k, v) in checkpoint['state_dict'].items()}\n", " model.load_state_dict(state_dict_cpu)\n", "\n", "model.eval()\n", "\n", "def segment(input):\n", " inp = input\n", " x = inp.transpose([2, 0, 1]) # channels-first\n", " x = np.expand_dims(x, axis=0) # adding a batch dimension \n", " \n", " mean = x.mean(axis=(0, 2, 3))\n", " std = x.std(axis=(0, 2, 3))\n", " x = x - mean.reshape(1, 3, 1, 1)\n", " x = x / std.reshape(1, 3, 1, 1)\n", " \n", " with torch.no_grad():\n", " x = torch.from_numpy(x).type('torch.FloatTensor').to(device)\n", " output = model(x) \n", " \n", " y = output['out'].numpy()\n", " y = y.squeeze()\n", " \n", " out = y>0 \n", " \n", " mask = inp.copy()\n", " mask[out] = np.array([0, 0, 255])\n", " \n", " return mask\n", "\n", "import gradio as gr\n", "\n", "i = gr.Image(shape=(112, 112), label=\"Echocardiogram\")\n", "o = gr.Image(label=\"Segmentation Mask\")\n", "\n", "examples = [[\"img1.jpg\"], [\"img2.jpg\"]]\n", "title = None #\"Left Ventricle Segmentation\"\n", "description = \"This semantic segmentation model identifies the left ventricle in echocardiogram images.\"\n", "# videos. Accurate evaluation of the motion and size of the left ventricle is crucial for the assessment of cardiac function and ejection fraction. In this interface, the user inputs apical-4-chamber images from echocardiography videos and the model will output a prediction of the localization of the left ventricle in blue. This model was trained on the publicly released EchoNet-Dynamic dataset of 10k echocardiogram videos with 20k expert annotations of the left ventricle and published as part of \u2018Video-based AI for beat-to-beat assessment of cardiac function\u2019 by Ouyang et al. in Nature, 2020.\"\n", "thumbnail = \"https://raw.githubusercontent.com/gradio-app/hub-echonet/master/thumbnail.png\"\n", "gr.Interface(segment, i, o, examples=examples, allow_flagging=False, analytics_enabled=False, thumbnail=thumbnail, cache_examples=False).launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
groundingLMM/gradio-dev/demo/Echocardiogram-Segmentation/run.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import numpy as np
|
3 |
+
import torch
|
4 |
+
import torchvision
|
5 |
+
import wget
|
6 |
+
|
7 |
+
|
8 |
+
destination_folder = "output"
|
9 |
+
destination_for_weights = "weights"
|
10 |
+
|
11 |
+
if os.path.exists(destination_for_weights):
|
12 |
+
print("The weights are at", destination_for_weights)
|
13 |
+
else:
|
14 |
+
print("Creating folder at ", destination_for_weights, " to store weights")
|
15 |
+
os.mkdir(destination_for_weights)
|
16 |
+
|
17 |
+
segmentationWeightsURL = 'https://github.com/douyang/EchoNetDynamic/releases/download/v1.0.0/deeplabv3_resnet50_random.pt'
|
18 |
+
|
19 |
+
if not os.path.exists(os.path.join(destination_for_weights, os.path.basename(segmentationWeightsURL))):
|
20 |
+
print("Downloading Segmentation Weights, ", segmentationWeightsURL," to ",os.path.join(destination_for_weights, os.path.basename(segmentationWeightsURL)))
|
21 |
+
filename = wget.download(segmentationWeightsURL, out = destination_for_weights)
|
22 |
+
else:
|
23 |
+
print("Segmentation Weights already present")
|
24 |
+
|
25 |
+
torch.cuda.empty_cache()
|
26 |
+
|
27 |
+
def collate_fn(x):
|
28 |
+
x, f = zip(*x)
|
29 |
+
i = list(map(lambda t: t.shape[1], x))
|
30 |
+
x = torch.as_tensor(np.swapaxes(np.concatenate(x, 1), 0, 1))
|
31 |
+
return x, f, i
|
32 |
+
|
33 |
+
model = torchvision.models.segmentation.deeplabv3_resnet50(pretrained=False, aux_loss=False)
|
34 |
+
model.classifier[-1] = torch.nn.Conv2d(model.classifier[-1].in_channels, 1, kernel_size=model.classifier[-1].kernel_size)
|
35 |
+
|
36 |
+
print("loading weights from ", os.path.join(destination_for_weights, "deeplabv3_resnet50_random"))
|
37 |
+
|
38 |
+
if torch.cuda.is_available():
|
39 |
+
print("cuda is available, original weights")
|
40 |
+
device = torch.device("cuda")
|
41 |
+
model = torch.nn.DataParallel(model)
|
42 |
+
model.to(device)
|
43 |
+
checkpoint = torch.load(os.path.join(destination_for_weights, os.path.basename(segmentationWeightsURL)))
|
44 |
+
model.load_state_dict(checkpoint['state_dict'])
|
45 |
+
else:
|
46 |
+
print("cuda is not available, cpu weights")
|
47 |
+
device = torch.device("cpu")
|
48 |
+
checkpoint = torch.load(os.path.join(destination_for_weights, os.path.basename(segmentationWeightsURL)), map_location = "cpu")
|
49 |
+
state_dict_cpu = {k[7:]: v for (k, v) in checkpoint['state_dict'].items()}
|
50 |
+
model.load_state_dict(state_dict_cpu)
|
51 |
+
|
52 |
+
model.eval()
|
53 |
+
|
54 |
+
def segment(input):
|
55 |
+
inp = input
|
56 |
+
x = inp.transpose([2, 0, 1]) # channels-first
|
57 |
+
x = np.expand_dims(x, axis=0) # adding a batch dimension
|
58 |
+
|
59 |
+
mean = x.mean(axis=(0, 2, 3))
|
60 |
+
std = x.std(axis=(0, 2, 3))
|
61 |
+
x = x - mean.reshape(1, 3, 1, 1)
|
62 |
+
x = x / std.reshape(1, 3, 1, 1)
|
63 |
+
|
64 |
+
with torch.no_grad():
|
65 |
+
x = torch.from_numpy(x).type('torch.FloatTensor').to(device)
|
66 |
+
output = model(x)
|
67 |
+
|
68 |
+
y = output['out'].numpy()
|
69 |
+
y = y.squeeze()
|
70 |
+
|
71 |
+
out = y>0
|
72 |
+
|
73 |
+
mask = inp.copy()
|
74 |
+
mask[out] = np.array([0, 0, 255])
|
75 |
+
|
76 |
+
return mask
|
77 |
+
|
78 |
+
import gradio as gr
|
79 |
+
|
80 |
+
i = gr.Image(shape=(112, 112), label="Echocardiogram")
|
81 |
+
o = gr.Image(label="Segmentation Mask")
|
82 |
+
|
83 |
+
examples = [["img1.jpg"], ["img2.jpg"]]
|
84 |
+
title = None #"Left Ventricle Segmentation"
|
85 |
+
description = "This semantic segmentation model identifies the left ventricle in echocardiogram images."
|
86 |
+
# videos. Accurate evaluation of the motion and size of the left ventricle is crucial for the assessment of cardiac function and ejection fraction. In this interface, the user inputs apical-4-chamber images from echocardiography videos and the model will output a prediction of the localization of the left ventricle in blue. This model was trained on the publicly released EchoNet-Dynamic dataset of 10k echocardiogram videos with 20k expert annotations of the left ventricle and published as part of ‘Video-based AI for beat-to-beat assessment of cardiac function’ by Ouyang et al. in Nature, 2020."
|
87 |
+
thumbnail = "https://raw.githubusercontent.com/gradio-app/hub-echonet/master/thumbnail.png"
|
88 |
+
gr.Interface(segment, i, o, examples=examples, allow_flagging=False, analytics_enabled=False, thumbnail=thumbnail, cache_examples=False).launch()
|
groundingLMM/gradio-dev/demo/all_demos/run.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import importlib
|
2 |
+
import gradio as gr
|
3 |
+
import os
|
4 |
+
import sys
|
5 |
+
import copy
|
6 |
+
import pathlib
|
7 |
+
|
8 |
+
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
|
9 |
+
|
10 |
+
demo_dir = pathlib.Path(__file__).parent / "demos"
|
11 |
+
|
12 |
+
|
13 |
+
all_demos = []
|
14 |
+
demo_module = None
|
15 |
+
for p in sorted(os.listdir("./demos")):
|
16 |
+
old_path = copy.deepcopy(sys.path)
|
17 |
+
sys.path = [os.path.join(demo_dir, p)] + sys.path
|
18 |
+
try: # Some demos may not be runnable because of 429 timeouts, etc.
|
19 |
+
if demo_module is None:
|
20 |
+
demo_module = importlib.import_module(f"run")
|
21 |
+
else:
|
22 |
+
demo_module = importlib.reload(demo_module)
|
23 |
+
all_demos.append((p, demo_module.demo))
|
24 |
+
except Exception as e:
|
25 |
+
p = p + " ❌"
|
26 |
+
with gr.Blocks() as demo:
|
27 |
+
gr.Markdown(f"Error loading demo: {e}")
|
28 |
+
all_demos.append((p, demo))
|
29 |
+
|
30 |
+
with gr.Blocks() as mega_demo:
|
31 |
+
for demo_name, demo in all_demos:
|
32 |
+
with gr.Tab(demo_name):
|
33 |
+
demo.render()
|
34 |
+
|
35 |
+
mega_demo.queue().launch()
|
groundingLMM/gradio-dev/demo/altair_plot/requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
altair
|
2 |
+
vega_datasets
|
groundingLMM/gradio-dev/demo/altair_plot/run.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: altair_plot"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio altair vega_datasets"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import altair as alt\n", "import gradio as gr\n", "import numpy as np\n", "import pandas as pd\n", "from vega_datasets import data\n", "\n", "\n", "def make_plot(plot_type):\n", " if plot_type == \"scatter_plot\":\n", " cars = data.cars()\n", " return alt.Chart(cars).mark_point().encode(\n", " x='Horsepower',\n", " y='Miles_per_Gallon',\n", " color='Origin',\n", " )\n", " elif plot_type == \"heatmap\":\n", " # Compute x^2 + y^2 across a 2D grid\n", " x, y = np.meshgrid(range(-5, 5), range(-5, 5))\n", " z = x ** 2 + y ** 2\n", "\n", " # Convert this grid to columnar data expected by Altair\n", " source = pd.DataFrame({'x': x.ravel(),\n", " 'y': y.ravel(),\n", " 'z': z.ravel()})\n", " return alt.Chart(source).mark_rect().encode(\n", " x='x:O',\n", " y='y:O',\n", " color='z:Q'\n", " )\n", " elif plot_type == \"us_map\":\n", " states = alt.topo_feature(data.us_10m.url, 'states')\n", " source = data.income.url\n", "\n", " return alt.Chart(source).mark_geoshape().encode(\n", " shape='geo:G',\n", " color='pct:Q',\n", " tooltip=['name:N', 'pct:Q'],\n", " facet=alt.Facet('group:N', columns=2),\n", " ).transform_lookup(\n", " lookup='id',\n", " from_=alt.LookupData(data=states, key='id'),\n", " as_='geo'\n", " ).properties(\n", " width=300,\n", " height=175,\n", " ).project(\n", " type='albersUsa'\n", " )\n", " elif plot_type == \"interactive_barplot\":\n", " source = data.movies.url\n", "\n", " pts = alt.selection(type=\"single\", encodings=['x'])\n", "\n", " rect = alt.Chart(data.movies.url).mark_rect().encode(\n", " alt.X('IMDB_Rating:Q', bin=True),\n", " alt.Y('Rotten_Tomatoes_Rating:Q', bin=True),\n", " alt.Color('count()',\n", " scale=alt.Scale(scheme='greenblue'),\n", " legend=alt.Legend(title='Total Records')\n", " )\n", " )\n", "\n", " circ = rect.mark_point().encode(\n", " alt.ColorValue('grey'),\n", " alt.Size('count()',\n", " legend=alt.Legend(title='Records in Selection')\n", " )\n", " ).transform_filter(\n", " pts\n", " )\n", "\n", " bar = alt.Chart(source).mark_bar().encode(\n", " x='Major_Genre:N',\n", " y='count()',\n", " color=alt.condition(pts, alt.ColorValue(\"steelblue\"), alt.ColorValue(\"grey\"))\n", " ).properties(\n", " width=550,\n", " height=200\n", " ).add_selection(pts)\n", "\n", " plot = alt.vconcat(\n", " rect + circ,\n", " bar\n", " ).resolve_legend(\n", " color=\"independent\",\n", " size=\"independent\"\n", " )\n", " return plot\n", " elif plot_type == \"radial\":\n", " source = pd.DataFrame({\"values\": [12, 23, 47, 6, 52, 19]})\n", "\n", " base = alt.Chart(source).encode(\n", " theta=alt.Theta(\"values:Q\", stack=True),\n", " radius=alt.Radius(\"values\", scale=alt.Scale(type=\"sqrt\", zero=True, rangeMin=20)),\n", " color=\"values:N\",\n", " )\n", "\n", " c1 = base.mark_arc(innerRadius=20, stroke=\"#fff\")\n", "\n", " c2 = base.mark_text(radiusOffset=10).encode(text=\"values:Q\")\n", "\n", " return c1 + c2\n", " elif plot_type == \"multiline\":\n", " source = data.stocks()\n", "\n", " highlight = alt.selection(type='single', on='mouseover',\n", " fields=['symbol'], nearest=True)\n", "\n", " base = alt.Chart(source).encode(\n", " x='date:T',\n", " y='price:Q',\n", " color='symbol:N'\n", " )\n", "\n", " points = base.mark_circle().encode(\n", " opacity=alt.value(0)\n", " ).add_selection(\n", " highlight\n", " ).properties(\n", " width=600\n", " )\n", "\n", " lines = base.mark_line().encode(\n", " size=alt.condition(~highlight, alt.value(1), alt.value(3))\n", " )\n", "\n", " return points + lines\n", "\n", "\n", "with gr.Blocks() as demo:\n", " button = gr.Radio(label=\"Plot type\",\n", " choices=['scatter_plot', 'heatmap', 'us_map',\n", " 'interactive_barplot', \"radial\", \"multiline\"], value='scatter_plot')\n", " plot = gr.Plot(label=\"Plot\")\n", " button.change(make_plot, inputs=button, outputs=[plot])\n", " demo.load(make_plot, inputs=[button], outputs=[plot])\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
groundingLMM/gradio-dev/demo/altair_plot/run.py
ADDED
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import altair as alt
|
2 |
+
import gradio as gr
|
3 |
+
import numpy as np
|
4 |
+
import pandas as pd
|
5 |
+
from vega_datasets import data
|
6 |
+
|
7 |
+
|
8 |
+
def make_plot(plot_type):
|
9 |
+
if plot_type == "scatter_plot":
|
10 |
+
cars = data.cars()
|
11 |
+
return alt.Chart(cars).mark_point().encode(
|
12 |
+
x='Horsepower',
|
13 |
+
y='Miles_per_Gallon',
|
14 |
+
color='Origin',
|
15 |
+
)
|
16 |
+
elif plot_type == "heatmap":
|
17 |
+
# Compute x^2 + y^2 across a 2D grid
|
18 |
+
x, y = np.meshgrid(range(-5, 5), range(-5, 5))
|
19 |
+
z = x ** 2 + y ** 2
|
20 |
+
|
21 |
+
# Convert this grid to columnar data expected by Altair
|
22 |
+
source = pd.DataFrame({'x': x.ravel(),
|
23 |
+
'y': y.ravel(),
|
24 |
+
'z': z.ravel()})
|
25 |
+
return alt.Chart(source).mark_rect().encode(
|
26 |
+
x='x:O',
|
27 |
+
y='y:O',
|
28 |
+
color='z:Q'
|
29 |
+
)
|
30 |
+
elif plot_type == "us_map":
|
31 |
+
states = alt.topo_feature(data.us_10m.url, 'states')
|
32 |
+
source = data.income.url
|
33 |
+
|
34 |
+
return alt.Chart(source).mark_geoshape().encode(
|
35 |
+
shape='geo:G',
|
36 |
+
color='pct:Q',
|
37 |
+
tooltip=['name:N', 'pct:Q'],
|
38 |
+
facet=alt.Facet('group:N', columns=2),
|
39 |
+
).transform_lookup(
|
40 |
+
lookup='id',
|
41 |
+
from_=alt.LookupData(data=states, key='id'),
|
42 |
+
as_='geo'
|
43 |
+
).properties(
|
44 |
+
width=300,
|
45 |
+
height=175,
|
46 |
+
).project(
|
47 |
+
type='albersUsa'
|
48 |
+
)
|
49 |
+
elif plot_type == "interactive_barplot":
|
50 |
+
source = data.movies.url
|
51 |
+
|
52 |
+
pts = alt.selection(type="single", encodings=['x'])
|
53 |
+
|
54 |
+
rect = alt.Chart(data.movies.url).mark_rect().encode(
|
55 |
+
alt.X('IMDB_Rating:Q', bin=True),
|
56 |
+
alt.Y('Rotten_Tomatoes_Rating:Q', bin=True),
|
57 |
+
alt.Color('count()',
|
58 |
+
scale=alt.Scale(scheme='greenblue'),
|
59 |
+
legend=alt.Legend(title='Total Records')
|
60 |
+
)
|
61 |
+
)
|
62 |
+
|
63 |
+
circ = rect.mark_point().encode(
|
64 |
+
alt.ColorValue('grey'),
|
65 |
+
alt.Size('count()',
|
66 |
+
legend=alt.Legend(title='Records in Selection')
|
67 |
+
)
|
68 |
+
).transform_filter(
|
69 |
+
pts
|
70 |
+
)
|
71 |
+
|
72 |
+
bar = alt.Chart(source).mark_bar().encode(
|
73 |
+
x='Major_Genre:N',
|
74 |
+
y='count()',
|
75 |
+
color=alt.condition(pts, alt.ColorValue("steelblue"), alt.ColorValue("grey"))
|
76 |
+
).properties(
|
77 |
+
width=550,
|
78 |
+
height=200
|
79 |
+
).add_selection(pts)
|
80 |
+
|
81 |
+
plot = alt.vconcat(
|
82 |
+
rect + circ,
|
83 |
+
bar
|
84 |
+
).resolve_legend(
|
85 |
+
color="independent",
|
86 |
+
size="independent"
|
87 |
+
)
|
88 |
+
return plot
|
89 |
+
elif plot_type == "radial":
|
90 |
+
source = pd.DataFrame({"values": [12, 23, 47, 6, 52, 19]})
|
91 |
+
|
92 |
+
base = alt.Chart(source).encode(
|
93 |
+
theta=alt.Theta("values:Q", stack=True),
|
94 |
+
radius=alt.Radius("values", scale=alt.Scale(type="sqrt", zero=True, rangeMin=20)),
|
95 |
+
color="values:N",
|
96 |
+
)
|
97 |
+
|
98 |
+
c1 = base.mark_arc(innerRadius=20, stroke="#fff")
|
99 |
+
|
100 |
+
c2 = base.mark_text(radiusOffset=10).encode(text="values:Q")
|
101 |
+
|
102 |
+
return c1 + c2
|
103 |
+
elif plot_type == "multiline":
|
104 |
+
source = data.stocks()
|
105 |
+
|
106 |
+
highlight = alt.selection(type='single', on='mouseover',
|
107 |
+
fields=['symbol'], nearest=True)
|
108 |
+
|
109 |
+
base = alt.Chart(source).encode(
|
110 |
+
x='date:T',
|
111 |
+
y='price:Q',
|
112 |
+
color='symbol:N'
|
113 |
+
)
|
114 |
+
|
115 |
+
points = base.mark_circle().encode(
|
116 |
+
opacity=alt.value(0)
|
117 |
+
).add_selection(
|
118 |
+
highlight
|
119 |
+
).properties(
|
120 |
+
width=600
|
121 |
+
)
|
122 |
+
|
123 |
+
lines = base.mark_line().encode(
|
124 |
+
size=alt.condition(~highlight, alt.value(1), alt.value(3))
|
125 |
+
)
|
126 |
+
|
127 |
+
return points + lines
|
128 |
+
|
129 |
+
|
130 |
+
with gr.Blocks() as demo:
|
131 |
+
button = gr.Radio(label="Plot type",
|
132 |
+
choices=['scatter_plot', 'heatmap', 'us_map',
|
133 |
+
'interactive_barplot', "radial", "multiline"], value='scatter_plot')
|
134 |
+
plot = gr.Plot(label="Plot")
|
135 |
+
button.change(make_plot, inputs=button, outputs=[plot])
|
136 |
+
demo.load(make_plot, inputs=[button], outputs=[plot])
|
137 |
+
|
138 |
+
|
139 |
+
if __name__ == "__main__":
|
140 |
+
demo.launch()
|
groundingLMM/gradio-dev/demo/blocks_component_shortcut/run.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_component_shortcut"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "def greet(str):\n", " return str\n", "\n", "\n", "with gr.Blocks() as demo:\n", " \"\"\"\n", " You can make use of str shortcuts you use in Interface within Blocks as well.\n", " \n", " Interface shortcut example:\n", " Interface(greet, \"textarea\", \"textarea\")\n", " \n", " You can use \n", " 1. gr.component()\n", " 2. gr.templates.Template()\n", " 3. gr.Template()\n", " All the templates are listed in gradio/templates.py\n", " \"\"\"\n", " with gr.Row():\n", " text1 = gr.component(\"textarea\")\n", " text2 = gr.TextArea()\n", " text3 = gr.templates.TextArea()\n", " text1.blur(greet, text1, text2)\n", " text2.blur(greet, text2, text3)\n", " text3.blur(greet, text3, text1)\n", " button = gr.component(\"button\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
groundingLMM/gradio-dev/demo/blocks_gpt/run.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_gpt"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "api = gr.load(\"huggingface/EleutherAI/gpt-j-6B\")\n", "\n", "def complete_with_gpt(text):\n", " # Use the last 50 characters of the text as context\n", " return text[:-50] + api(text[-50:])\n", "\n", "with gr.Blocks() as demo:\n", " textbox = gr.Textbox(placeholder=\"Type here and press enter...\", lines=4)\n", " btn = gr.Button(\"Generate\")\n", " \n", " btn.click(complete_with_gpt, textbox, textbox)\n", " \n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
groundingLMM/gradio-dev/demo/blocks_gpt/run.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
api = gr.load("huggingface/EleutherAI/gpt-j-6B")
|
4 |
+
|
5 |
+
def complete_with_gpt(text):
|
6 |
+
# Use the last 50 characters of the text as context
|
7 |
+
return text[:-50] + api(text[-50:])
|
8 |
+
|
9 |
+
with gr.Blocks() as demo:
|
10 |
+
textbox = gr.Textbox(placeholder="Type here and press enter...", lines=4)
|
11 |
+
btn = gr.Button("Generate")
|
12 |
+
|
13 |
+
btn.click(complete_with_gpt, textbox, textbox)
|
14 |
+
|
15 |
+
if __name__ == "__main__":
|
16 |
+
demo.launch()
|
groundingLMM/gradio-dev/demo/blocks_xray/run.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_xray"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import random\n", "import time\n", "\n", "def xray_model(diseases, img):\n", " time.sleep(4)\n", " return [{disease: random.random() for disease in diseases}]\n", "\n", "\n", "def ct_model(diseases, img):\n", " time.sleep(3)\n", " return [{disease: 0.1 for disease in diseases}]\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown(\n", " \"\"\"\n", "# Detect Disease From Scan\n", "With this model you can lorem ipsum\n", "- ipsum 1\n", "- ipsum 2\n", "\"\"\"\n", " )\n", " disease = gr.CheckboxGroup(\n", " info=\"Select the diseases you want to scan for.\",\n", " choices=[\"Covid\", \"Malaria\", \"Lung Cancer\"], label=\"Disease to Scan For\"\n", " )\n", " slider = gr.Slider(0, 100)\n", "\n", " with gr.Tab(\"X-ray\") as x_tab:\n", " with gr.Row():\n", " xray_scan = gr.Image()\n", " xray_results = gr.JSON()\n", " xray_run = gr.Button(\"Run\")\n", " xray_run.click(\n", " xray_model,\n", " inputs=[disease, xray_scan],\n", " outputs=xray_results,\n", " api_name=\"xray_model\"\n", " )\n", "\n", " with gr.Tab(\"CT Scan\"):\n", " with gr.Row():\n", " ct_scan = gr.Image()\n", " ct_results = gr.JSON()\n", " ct_run = gr.Button(\"Run\")\n", " ct_run.click(\n", " ct_model,\n", " inputs=[disease, ct_scan],\n", " outputs=ct_results,\n", " api_name=\"ct_model\"\n", " )\n", "\n", " upload_btn = gr.Button(\"Upload Results\", variant=\"primary\")\n", " upload_btn.click(\n", " lambda ct, xr: time.sleep(5),\n", " inputs=[ct_results, xray_results],\n", " outputs=[],\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
groundingLMM/gradio-dev/demo/blocks_xray/run.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import random
|
3 |
+
import time
|
4 |
+
|
5 |
+
def xray_model(diseases, img):
|
6 |
+
time.sleep(4)
|
7 |
+
return [{disease: random.random() for disease in diseases}]
|
8 |
+
|
9 |
+
|
10 |
+
def ct_model(diseases, img):
|
11 |
+
time.sleep(3)
|
12 |
+
return [{disease: 0.1 for disease in diseases}]
|
13 |
+
|
14 |
+
with gr.Blocks() as demo:
|
15 |
+
gr.Markdown(
|
16 |
+
"""
|
17 |
+
# Detect Disease From Scan
|
18 |
+
With this model you can lorem ipsum
|
19 |
+
- ipsum 1
|
20 |
+
- ipsum 2
|
21 |
+
"""
|
22 |
+
)
|
23 |
+
disease = gr.CheckboxGroup(
|
24 |
+
info="Select the diseases you want to scan for.",
|
25 |
+
choices=["Covid", "Malaria", "Lung Cancer"], label="Disease to Scan For"
|
26 |
+
)
|
27 |
+
slider = gr.Slider(0, 100)
|
28 |
+
|
29 |
+
with gr.Tab("X-ray") as x_tab:
|
30 |
+
with gr.Row():
|
31 |
+
xray_scan = gr.Image()
|
32 |
+
xray_results = gr.JSON()
|
33 |
+
xray_run = gr.Button("Run")
|
34 |
+
xray_run.click(
|
35 |
+
xray_model,
|
36 |
+
inputs=[disease, xray_scan],
|
37 |
+
outputs=xray_results,
|
38 |
+
api_name="xray_model"
|
39 |
+
)
|
40 |
+
|
41 |
+
with gr.Tab("CT Scan"):
|
42 |
+
with gr.Row():
|
43 |
+
ct_scan = gr.Image()
|
44 |
+
ct_results = gr.JSON()
|
45 |
+
ct_run = gr.Button("Run")
|
46 |
+
ct_run.click(
|
47 |
+
ct_model,
|
48 |
+
inputs=[disease, ct_scan],
|
49 |
+
outputs=ct_results,
|
50 |
+
api_name="ct_model"
|
51 |
+
)
|
52 |
+
|
53 |
+
upload_btn = gr.Button("Upload Results", variant="primary")
|
54 |
+
upload_btn.click(
|
55 |
+
lambda ct, xr: time.sleep(5),
|
56 |
+
inputs=[ct_results, xray_results],
|
57 |
+
outputs=[],
|
58 |
+
)
|
59 |
+
|
60 |
+
if __name__ == "__main__":
|
61 |
+
demo.launch()
|
groundingLMM/gradio-dev/demo/calculator/run.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: calculator"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('examples')\n", "!wget -q -O examples/log.csv https://github.com/gradio-app/gradio/raw/main/demo/calculator/examples/log.csv"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "def calculator(num1, operation, num2):\n", " if operation == \"add\":\n", " return num1 + num2\n", " elif operation == \"subtract\":\n", " return num1 - num2\n", " elif operation == \"multiply\":\n", " return num1 * num2\n", " elif operation == \"divide\":\n", " if num2 == 0:\n", " raise gr.Error(\"Cannot divide by zero!\")\n", " return num1 / num2\n", "\n", "demo = gr.Interface(\n", " calculator,\n", " [\n", " \"number\", \n", " gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n", " \"number\"\n", " ],\n", " \"number\",\n", " examples=[\n", " [5, \"add\", 3],\n", " [4, \"divide\", 2],\n", " [-4, \"multiply\", 2.5],\n", " [0, \"subtract\", 1.2],\n", " ],\n", " title=\"Toy Calculator\",\n", " description=\"Here's a sample toy calculator. Allows you to calculate things like $2+2=4$\",\n", ")\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
groundingLMM/gradio-dev/demo/calculator/run.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
def calculator(num1, operation, num2):
|
4 |
+
if operation == "add":
|
5 |
+
return num1 + num2
|
6 |
+
elif operation == "subtract":
|
7 |
+
return num1 - num2
|
8 |
+
elif operation == "multiply":
|
9 |
+
return num1 * num2
|
10 |
+
elif operation == "divide":
|
11 |
+
if num2 == 0:
|
12 |
+
raise gr.Error("Cannot divide by zero!")
|
13 |
+
return num1 / num2
|
14 |
+
|
15 |
+
demo = gr.Interface(
|
16 |
+
calculator,
|
17 |
+
[
|
18 |
+
"number",
|
19 |
+
gr.Radio(["add", "subtract", "multiply", "divide"]),
|
20 |
+
"number"
|
21 |
+
],
|
22 |
+
"number",
|
23 |
+
examples=[
|
24 |
+
[5, "add", 3],
|
25 |
+
[4, "divide", 2],
|
26 |
+
[-4, "multiply", 2.5],
|
27 |
+
[0, "subtract", 1.2],
|
28 |
+
],
|
29 |
+
title="Toy Calculator",
|
30 |
+
description="Here's a sample toy calculator. Allows you to calculate things like $2+2=4$",
|
31 |
+
)
|
32 |
+
if __name__ == "__main__":
|
33 |
+
demo.launch()
|
groundingLMM/gradio-dev/demo/calculator_live/run.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: calculator_live"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "def calculator(num1, operation, num2):\n", " if operation == \"add\":\n", " return num1 + num2\n", " elif operation == \"subtract\":\n", " return num1 - num2\n", " elif operation == \"multiply\":\n", " return num1 * num2\n", " elif operation == \"divide\":\n", " return num1 / num2\n", "\n", "demo = gr.Interface(\n", " calculator,\n", " [\n", " \"number\",\n", " gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n", " \"number\"\n", " ],\n", " \"number\",\n", " live=True,\n", ")\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
groundingLMM/gradio-dev/demo/calculator_live/run.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
def calculator(num1, operation, num2):
|
4 |
+
if operation == "add":
|
5 |
+
return num1 + num2
|
6 |
+
elif operation == "subtract":
|
7 |
+
return num1 - num2
|
8 |
+
elif operation == "multiply":
|
9 |
+
return num1 * num2
|
10 |
+
elif operation == "divide":
|
11 |
+
return num1 / num2
|
12 |
+
|
13 |
+
demo = gr.Interface(
|
14 |
+
calculator,
|
15 |
+
[
|
16 |
+
"number",
|
17 |
+
gr.Radio(["add", "subtract", "multiply", "divide"]),
|
18 |
+
"number"
|
19 |
+
],
|
20 |
+
"number",
|
21 |
+
live=True,
|
22 |
+
)
|
23 |
+
if __name__ == "__main__":
|
24 |
+
demo.launch()
|
groundingLMM/gradio-dev/demo/cancel_events/run.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: cancel_events"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import time\n", "import gradio as gr\n", "\n", "\n", "def fake_diffusion(steps):\n", " for i in range(steps):\n", " print(f\"Current step: {i}\")\n", " time.sleep(1)\n", " yield str(i)\n", "\n", "\n", "def long_prediction(*args, **kwargs):\n", " time.sleep(10)\n", " return 42\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " n = gr.Slider(1, 10, value=9, step=1, label=\"Number Steps\")\n", " run = gr.Button()\n", " output = gr.Textbox(label=\"Iterative Output\")\n", " stop = gr.Button(value=\"Stop Iterating\")\n", " with gr.Column():\n", " textbox = gr.Textbox(label=\"Prompt\")\n", " prediction = gr.Number(label=\"Expensive Calculation\")\n", " run_pred = gr.Button(value=\"Run Expensive Calculation\")\n", " with gr.Column():\n", " cancel_on_change = gr.Textbox(label=\"Cancel Iteration and Expensive Calculation on Change\")\n", " cancel_on_submit = gr.Textbox(label=\"Cancel Iteration and Expensive Calculation on Submit\")\n", " echo = gr.Textbox(label=\"Echo\")\n", " with gr.Row():\n", " with gr.Column():\n", " image = gr.Image(source=\"webcam\", tool=\"editor\", label=\"Cancel on edit\", interactive=True)\n", " with gr.Column():\n", " video = gr.Video(source=\"webcam\", label=\"Cancel on play\", interactive=True)\n", "\n", " click_event = run.click(fake_diffusion, n, output)\n", " stop.click(fn=None, inputs=None, outputs=None, cancels=[click_event])\n", " pred_event = run_pred.click(fn=long_prediction, inputs=[textbox], outputs=prediction)\n", "\n", " cancel_on_change.change(None, None, None, cancels=[click_event, pred_event])\n", " cancel_on_submit.submit(lambda s: s, cancel_on_submit, echo, cancels=[click_event, pred_event])\n", " image.edit(None, None, None, cancels=[click_event, pred_event])\n", " video.play(None, None, None, cancels=[click_event, pred_event])\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.queue(concurrency_count=2, max_size=20).launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
groundingLMM/gradio-dev/demo/cancel_events/run.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
import gradio as gr
|
3 |
+
|
4 |
+
|
5 |
+
def fake_diffusion(steps):
|
6 |
+
for i in range(steps):
|
7 |
+
print(f"Current step: {i}")
|
8 |
+
time.sleep(1)
|
9 |
+
yield str(i)
|
10 |
+
|
11 |
+
|
12 |
+
def long_prediction(*args, **kwargs):
|
13 |
+
time.sleep(10)
|
14 |
+
return 42
|
15 |
+
|
16 |
+
|
17 |
+
with gr.Blocks() as demo:
|
18 |
+
with gr.Row():
|
19 |
+
with gr.Column():
|
20 |
+
n = gr.Slider(1, 10, value=9, step=1, label="Number Steps")
|
21 |
+
run = gr.Button()
|
22 |
+
output = gr.Textbox(label="Iterative Output")
|
23 |
+
stop = gr.Button(value="Stop Iterating")
|
24 |
+
with gr.Column():
|
25 |
+
textbox = gr.Textbox(label="Prompt")
|
26 |
+
prediction = gr.Number(label="Expensive Calculation")
|
27 |
+
run_pred = gr.Button(value="Run Expensive Calculation")
|
28 |
+
with gr.Column():
|
29 |
+
cancel_on_change = gr.Textbox(label="Cancel Iteration and Expensive Calculation on Change")
|
30 |
+
cancel_on_submit = gr.Textbox(label="Cancel Iteration and Expensive Calculation on Submit")
|
31 |
+
echo = gr.Textbox(label="Echo")
|
32 |
+
with gr.Row():
|
33 |
+
with gr.Column():
|
34 |
+
image = gr.Image(source="webcam", tool="editor", label="Cancel on edit", interactive=True)
|
35 |
+
with gr.Column():
|
36 |
+
video = gr.Video(source="webcam", label="Cancel on play", interactive=True)
|
37 |
+
|
38 |
+
click_event = run.click(fake_diffusion, n, output)
|
39 |
+
stop.click(fn=None, inputs=None, outputs=None, cancels=[click_event])
|
40 |
+
pred_event = run_pred.click(fn=long_prediction, inputs=[textbox], outputs=prediction)
|
41 |
+
|
42 |
+
cancel_on_change.change(None, None, None, cancels=[click_event, pred_event])
|
43 |
+
cancel_on_submit.submit(lambda s: s, cancel_on_submit, echo, cancels=[click_event, pred_event])
|
44 |
+
image.edit(None, None, None, cancels=[click_event, pred_event])
|
45 |
+
video.play(None, None, None, cancels=[click_event, pred_event])
|
46 |
+
|
47 |
+
|
48 |
+
if __name__ == "__main__":
|
49 |
+
demo.queue(concurrency_count=2, max_size=20).launch()
|
groundingLMM/gradio-dev/demo/chicago-bikeshare-dashboard/requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
psycopg2
|
2 |
+
matplotlib
|
3 |
+
SQLAlchemy
|
groundingLMM/gradio-dev/demo/chicago-bikeshare-dashboard/run.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: chicago-bikeshare-dashboard"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio psycopg2 matplotlib SQLAlchemy "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import os\n", "import gradio as gr\n", "import pandas as pd\n", "\n", "DB_USER = os.getenv(\"DB_USER\")\n", "DB_PASSWORD = os.getenv(\"DB_PASSWORD\")\n", "DB_HOST = os.getenv(\"DB_HOST\")\n", "PORT = 8080\n", "DB_NAME = \"bikeshare\"\n", "\n", "connection_string = (\n", " f\"postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}?port={PORT}&dbname={DB_NAME}\"\n", ")\n", "\n", "\n", "def get_count_ride_type():\n", " df = pd.read_sql(\n", " \"\"\"\n", " SELECT COUNT(ride_id) as n, rideable_type\n", " FROM rides\n", " GROUP BY rideable_type\n", " ORDER BY n DESC\n", " \"\"\",\n", " con=connection_string,\n", " )\n", " return df\n", "\n", "\n", "def get_most_popular_stations():\n", "\n", " df = pd.read_sql(\n", " \"\"\"\n", " SELECT COUNT(ride_id) as n, MAX(start_station_name) as station\n", " FROM RIDES\n", " WHERE start_station_name is NOT NULL\n", " GROUP BY start_station_id\n", " ORDER BY n DESC\n", " LIMIT 5\n", " \"\"\",\n", " con=connection_string,\n", " )\n", " return df\n", "\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown(\n", " \"\"\"\n", " # Chicago Bike Share Dashboard\n", " \n", " This demo pulls Chicago bike share data for March 2022 from a postgresql database hosted on AWS.\n", " This demo uses psycopg2 but any postgresql client library (SQLAlchemy)\n", " is compatible with gradio.\n", " \n", " Connection credentials are handled by environment variables\n", " defined as secrets in the Space.\n", "\n", " If data were added to the database, the plots in this demo would update\n", " whenever the webpage is reloaded.\n", " \n", " This demo serves as a starting point for your database-connected apps!\n", " \"\"\"\n", " )\n", " with gr.Row():\n", " bike_type = gr.BarPlot(\n", " x=\"rideable_type\",\n", " y='n',\n", " title=\"Number of rides per bicycle type\",\n", " y_title=\"Number of Rides\",\n", " x_title=\"Bicycle Type\",\n", " vertical=False,\n", " tooltip=['rideable_type', \"n\"],\n", " height=300,\n", " width=300,\n", " )\n", " station = gr.BarPlot(\n", " x='station',\n", " y='n',\n", " title=\"Most Popular Stations\",\n", " y_title=\"Number of Rides\",\n", " x_title=\"Station Name\",\n", " vertical=False,\n", " tooltip=['station', 'n'],\n", " height=300,\n", " width=300\n", " )\n", "\n", " demo.load(get_count_ride_type, inputs=None, outputs=bike_type)\n", " demo.load(get_most_popular_stations, inputs=None, outputs=station)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
groundingLMM/gradio-dev/demo/chicago-bikeshare-dashboard/run.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gradio as gr
|
3 |
+
import pandas as pd
|
4 |
+
|
5 |
+
DB_USER = os.getenv("DB_USER")
|
6 |
+
DB_PASSWORD = os.getenv("DB_PASSWORD")
|
7 |
+
DB_HOST = os.getenv("DB_HOST")
|
8 |
+
PORT = 8080
|
9 |
+
DB_NAME = "bikeshare"
|
10 |
+
|
11 |
+
connection_string = (
|
12 |
+
f"postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}?port={PORT}&dbname={DB_NAME}"
|
13 |
+
)
|
14 |
+
|
15 |
+
|
16 |
+
def get_count_ride_type():
|
17 |
+
df = pd.read_sql(
|
18 |
+
"""
|
19 |
+
SELECT COUNT(ride_id) as n, rideable_type
|
20 |
+
FROM rides
|
21 |
+
GROUP BY rideable_type
|
22 |
+
ORDER BY n DESC
|
23 |
+
""",
|
24 |
+
con=connection_string,
|
25 |
+
)
|
26 |
+
return df
|
27 |
+
|
28 |
+
|
29 |
+
def get_most_popular_stations():
|
30 |
+
|
31 |
+
df = pd.read_sql(
|
32 |
+
"""
|
33 |
+
SELECT COUNT(ride_id) as n, MAX(start_station_name) as station
|
34 |
+
FROM RIDES
|
35 |
+
WHERE start_station_name is NOT NULL
|
36 |
+
GROUP BY start_station_id
|
37 |
+
ORDER BY n DESC
|
38 |
+
LIMIT 5
|
39 |
+
""",
|
40 |
+
con=connection_string,
|
41 |
+
)
|
42 |
+
return df
|
43 |
+
|
44 |
+
|
45 |
+
with gr.Blocks() as demo:
|
46 |
+
gr.Markdown(
|
47 |
+
"""
|
48 |
+
# Chicago Bike Share Dashboard
|
49 |
+
|
50 |
+
This demo pulls Chicago bike share data for March 2022 from a postgresql database hosted on AWS.
|
51 |
+
This demo uses psycopg2 but any postgresql client library (SQLAlchemy)
|
52 |
+
is compatible with gradio.
|
53 |
+
|
54 |
+
Connection credentials are handled by environment variables
|
55 |
+
defined as secrets in the Space.
|
56 |
+
|
57 |
+
If data were added to the database, the plots in this demo would update
|
58 |
+
whenever the webpage is reloaded.
|
59 |
+
|
60 |
+
This demo serves as a starting point for your database-connected apps!
|
61 |
+
"""
|
62 |
+
)
|
63 |
+
with gr.Row():
|
64 |
+
bike_type = gr.BarPlot(
|
65 |
+
x="rideable_type",
|
66 |
+
y='n',
|
67 |
+
title="Number of rides per bicycle type",
|
68 |
+
y_title="Number of Rides",
|
69 |
+
x_title="Bicycle Type",
|
70 |
+
vertical=False,
|
71 |
+
tooltip=['rideable_type', "n"],
|
72 |
+
height=300,
|
73 |
+
width=300,
|
74 |
+
)
|
75 |
+
station = gr.BarPlot(
|
76 |
+
x='station',
|
77 |
+
y='n',
|
78 |
+
title="Most Popular Stations",
|
79 |
+
y_title="Number of Rides",
|
80 |
+
x_title="Station Name",
|
81 |
+
vertical=False,
|
82 |
+
tooltip=['station', 'n'],
|
83 |
+
height=300,
|
84 |
+
width=300
|
85 |
+
)
|
86 |
+
|
87 |
+
demo.load(get_count_ride_type, inputs=None, outputs=bike_type)
|
88 |
+
demo.load(get_most_popular_stations, inputs=None, outputs=station)
|
89 |
+
|
90 |
+
if __name__ == "__main__":
|
91 |
+
demo.launch()
|
groundingLMM/gradio-dev/demo/colorpicker_component/run.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: colorpicker_component"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr \n", "\n", "css = \"footer {display: none !important;} .gradio-container {min-height: 0px !important;}\"\n", "\n", "with gr.Blocks(css=css) as demo:\n", " gr.ColorPicker()\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
groundingLMM/gradio-dev/demo/colorpicker_component/run.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
css = "footer {display: none !important;} .gradio-container {min-height: 0px !important;}"
|
4 |
+
|
5 |
+
with gr.Blocks(css=css) as demo:
|
6 |
+
gr.ColorPicker()
|
7 |
+
|
8 |
+
demo.launch()
|
groundingLMM/gradio-dev/demo/depth_estimation/DESCRIPTION.md
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
A demo for predicting the depth of an image and generating a 3D model of it.
|
groundingLMM/gradio-dev/demo/depth_estimation/packages.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
libgl1-mesa-glx
|
groundingLMM/gradio-dev/demo/depth_estimation/requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
git+https://github.com/nielsrogge/transformers.git@add_dpt_redesign#egg=transformers
|
3 |
+
numpy
|
4 |
+
Pillow
|
5 |
+
jinja2
|
6 |
+
open3d
|
groundingLMM/gradio-dev/demo/depth_estimation/run.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: depth_estimation\n", "### A demo for predicting the depth of an image and generating a 3D model of it.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch git+https://github.com/nielsrogge/transformers.git@add_dpt_redesign#egg=transformers numpy Pillow jinja2 open3d"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('examples')\n", "!wget -q -O examples/1-jonathan-borba-CgWTqYxHEkg-unsplash.jpg https://github.com/gradio-app/gradio/raw/main/demo/depth_estimation/examples/1-jonathan-borba-CgWTqYxHEkg-unsplash.jpg\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/depth_estimation/packages.txt"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from transformers import DPTFeatureExtractor, DPTForDepthEstimation\n", "import torch\n", "import numpy as np\n", "from PIL import Image\n", "import open3d as o3d\n", "from pathlib import Path\n", "\n", "feature_extractor = DPTFeatureExtractor.from_pretrained(\"Intel/dpt-large\")\n", "model = DPTForDepthEstimation.from_pretrained(\"Intel/dpt-large\")\n", "\n", "def process_image(image_path):\n", " image_path = Path(image_path)\n", " image_raw = Image.open(image_path)\n", " image = image_raw.resize(\n", " (800, int(800 * image_raw.size[1] / image_raw.size[0])),\n", " Image.Resampling.LANCZOS)\n", "\n", " # prepare image for the model\n", " encoding = feature_extractor(image, return_tensors=\"pt\")\n", "\n", " # forward pass\n", " with torch.no_grad():\n", " outputs = model(**encoding)\n", " predicted_depth = outputs.predicted_depth\n", "\n", " # interpolate to original size\n", " prediction = torch.nn.functional.interpolate(\n", " predicted_depth.unsqueeze(1),\n", " size=image.size[::-1],\n", " mode=\"bicubic\",\n", " align_corners=False,\n", " ).squeeze()\n", " output = prediction.cpu().numpy()\n", " depth_image = (output * 255 / np.max(output)).astype('uint8')\n", " try:\n", " gltf_path = create_3d_obj(np.array(image), depth_image, image_path)\n", " img = Image.fromarray(depth_image)\n", " return [img, gltf_path, gltf_path]\n", " except Exception:\n", " gltf_path = create_3d_obj(\n", " np.array(image), depth_image, image_path, depth=8)\n", " img = Image.fromarray(depth_image)\n", " return [img, gltf_path, gltf_path]\n", " except:\n", " print(\"Error reconstructing 3D model\")\n", " raise Exception(\"Error reconstructing 3D model\")\n", "\n", "\n", "def create_3d_obj(rgb_image, depth_image, image_path, depth=10):\n", " depth_o3d = o3d.geometry.Image(depth_image)\n", " image_o3d = o3d.geometry.Image(rgb_image)\n", " rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(\n", " image_o3d, depth_o3d, convert_rgb_to_intensity=False)\n", " w = int(depth_image.shape[1])\n", " h = int(depth_image.shape[0])\n", "\n", " camera_intrinsic = o3d.camera.PinholeCameraIntrinsic()\n", " camera_intrinsic.set_intrinsics(w, h, 500, 500, w/2, h/2)\n", "\n", " pcd = o3d.geometry.PointCloud.create_from_rgbd_image(\n", " rgbd_image, camera_intrinsic)\n", "\n", " print('normals')\n", " pcd.normals = o3d.utility.Vector3dVector(\n", " np.zeros((1, 3))) # invalidate existing normals\n", " pcd.estimate_normals(\n", " search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.01, max_nn=30))\n", " pcd.orient_normals_towards_camera_location(\n", " camera_location=np.array([0., 0., 1000.]))\n", " pcd.transform([[1, 0, 0, 0],\n", " [0, -1, 0, 0],\n", " [0, 0, -1, 0],\n", " [0, 0, 0, 1]])\n", " pcd.transform([[-1, 0, 0, 0],\n", " [0, 1, 0, 0],\n", " [0, 0, 1, 0],\n", " [0, 0, 0, 1]])\n", "\n", " print('run Poisson surface reconstruction')\n", " with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug):\n", " mesh_raw, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(\n", " pcd, depth=depth, width=0, scale=1.1, linear_fit=True)\n", "\n", " voxel_size = max(mesh_raw.get_max_bound() - mesh_raw.get_min_bound()) / 256\n", " print(f'voxel_size = {voxel_size:e}')\n", " mesh = mesh_raw.simplify_vertex_clustering(\n", " voxel_size=voxel_size,\n", " contraction=o3d.geometry.SimplificationContraction.Average)\n", "\n", " # vertices_to_remove = densities < np.quantile(densities, 0.001)\n", " # mesh.remove_vertices_by_mask(vertices_to_remove)\n", " bbox = pcd.get_axis_aligned_bounding_box()\n", " mesh_crop = mesh.crop(bbox)\n", " gltf_path = f'./{image_path.stem}.gltf'\n", " o3d.io.write_triangle_mesh(\n", " gltf_path, mesh_crop, write_triangle_uvs=True)\n", " return gltf_path\n", "\n", "title = \"Demo: zero-shot depth estimation with DPT + 3D Point Cloud\"\n", "description = \"This demo is a variation from the original <a href='https://huggingface.co/spaces/nielsr/dpt-depth-estimation' target='_blank'>DPT Demo</a>. It uses the DPT model to predict the depth of an image and then uses 3D Point Cloud to create a 3D object.\"\n", "examples = [[\"examples/1-jonathan-borba-CgWTqYxHEkg-unsplash.jpg\"]]\n", "\n", "iface = gr.Interface(fn=process_image,\n", " inputs=[gr.Image(\n", " type=\"filepath\", label=\"Input Image\")],\n", " outputs=[gr.Image(label=\"predicted depth\", type=\"pil\"),\n", " gr.Model3D(label=\"3d mesh reconstruction\", clear_color=[\n", " 1.0, 1.0, 1.0, 1.0]),\n", " gr.File(label=\"3d gLTF\")],\n", " title=title,\n", " description=description,\n", " examples=examples,\n", " allow_flagging=\"never\",\n", " cache_examples=False)\n", "\n", "iface.launch(debug=True, enable_queue=False)"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
groundingLMM/gradio-dev/demo/depth_estimation/run.py
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import DPTFeatureExtractor, DPTForDepthEstimation
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image
|
6 |
+
import open3d as o3d
|
7 |
+
from pathlib import Path
|
8 |
+
|
9 |
+
feature_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-large")
|
10 |
+
model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large")
|
11 |
+
|
12 |
+
def process_image(image_path):
|
13 |
+
image_path = Path(image_path)
|
14 |
+
image_raw = Image.open(image_path)
|
15 |
+
image = image_raw.resize(
|
16 |
+
(800, int(800 * image_raw.size[1] / image_raw.size[0])),
|
17 |
+
Image.Resampling.LANCZOS)
|
18 |
+
|
19 |
+
# prepare image for the model
|
20 |
+
encoding = feature_extractor(image, return_tensors="pt")
|
21 |
+
|
22 |
+
# forward pass
|
23 |
+
with torch.no_grad():
|
24 |
+
outputs = model(**encoding)
|
25 |
+
predicted_depth = outputs.predicted_depth
|
26 |
+
|
27 |
+
# interpolate to original size
|
28 |
+
prediction = torch.nn.functional.interpolate(
|
29 |
+
predicted_depth.unsqueeze(1),
|
30 |
+
size=image.size[::-1],
|
31 |
+
mode="bicubic",
|
32 |
+
align_corners=False,
|
33 |
+
).squeeze()
|
34 |
+
output = prediction.cpu().numpy()
|
35 |
+
depth_image = (output * 255 / np.max(output)).astype('uint8')
|
36 |
+
try:
|
37 |
+
gltf_path = create_3d_obj(np.array(image), depth_image, image_path)
|
38 |
+
img = Image.fromarray(depth_image)
|
39 |
+
return [img, gltf_path, gltf_path]
|
40 |
+
except Exception:
|
41 |
+
gltf_path = create_3d_obj(
|
42 |
+
np.array(image), depth_image, image_path, depth=8)
|
43 |
+
img = Image.fromarray(depth_image)
|
44 |
+
return [img, gltf_path, gltf_path]
|
45 |
+
except:
|
46 |
+
print("Error reconstructing 3D model")
|
47 |
+
raise Exception("Error reconstructing 3D model")
|
48 |
+
|
49 |
+
|
50 |
+
def create_3d_obj(rgb_image, depth_image, image_path, depth=10):
|
51 |
+
depth_o3d = o3d.geometry.Image(depth_image)
|
52 |
+
image_o3d = o3d.geometry.Image(rgb_image)
|
53 |
+
rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(
|
54 |
+
image_o3d, depth_o3d, convert_rgb_to_intensity=False)
|
55 |
+
w = int(depth_image.shape[1])
|
56 |
+
h = int(depth_image.shape[0])
|
57 |
+
|
58 |
+
camera_intrinsic = o3d.camera.PinholeCameraIntrinsic()
|
59 |
+
camera_intrinsic.set_intrinsics(w, h, 500, 500, w/2, h/2)
|
60 |
+
|
61 |
+
pcd = o3d.geometry.PointCloud.create_from_rgbd_image(
|
62 |
+
rgbd_image, camera_intrinsic)
|
63 |
+
|
64 |
+
print('normals')
|
65 |
+
pcd.normals = o3d.utility.Vector3dVector(
|
66 |
+
np.zeros((1, 3))) # invalidate existing normals
|
67 |
+
pcd.estimate_normals(
|
68 |
+
search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.01, max_nn=30))
|
69 |
+
pcd.orient_normals_towards_camera_location(
|
70 |
+
camera_location=np.array([0., 0., 1000.]))
|
71 |
+
pcd.transform([[1, 0, 0, 0],
|
72 |
+
[0, -1, 0, 0],
|
73 |
+
[0, 0, -1, 0],
|
74 |
+
[0, 0, 0, 1]])
|
75 |
+
pcd.transform([[-1, 0, 0, 0],
|
76 |
+
[0, 1, 0, 0],
|
77 |
+
[0, 0, 1, 0],
|
78 |
+
[0, 0, 0, 1]])
|
79 |
+
|
80 |
+
print('run Poisson surface reconstruction')
|
81 |
+
with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug):
|
82 |
+
mesh_raw, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(
|
83 |
+
pcd, depth=depth, width=0, scale=1.1, linear_fit=True)
|
84 |
+
|
85 |
+
voxel_size = max(mesh_raw.get_max_bound() - mesh_raw.get_min_bound()) / 256
|
86 |
+
print(f'voxel_size = {voxel_size:e}')
|
87 |
+
mesh = mesh_raw.simplify_vertex_clustering(
|
88 |
+
voxel_size=voxel_size,
|
89 |
+
contraction=o3d.geometry.SimplificationContraction.Average)
|
90 |
+
|
91 |
+
# vertices_to_remove = densities < np.quantile(densities, 0.001)
|
92 |
+
# mesh.remove_vertices_by_mask(vertices_to_remove)
|
93 |
+
bbox = pcd.get_axis_aligned_bounding_box()
|
94 |
+
mesh_crop = mesh.crop(bbox)
|
95 |
+
gltf_path = f'./{image_path.stem}.gltf'
|
96 |
+
o3d.io.write_triangle_mesh(
|
97 |
+
gltf_path, mesh_crop, write_triangle_uvs=True)
|
98 |
+
return gltf_path
|
99 |
+
|
100 |
+
title = "Demo: zero-shot depth estimation with DPT + 3D Point Cloud"
|
101 |
+
description = "This demo is a variation from the original <a href='https://huggingface.co/spaces/nielsr/dpt-depth-estimation' target='_blank'>DPT Demo</a>. It uses the DPT model to predict the depth of an image and then uses 3D Point Cloud to create a 3D object."
|
102 |
+
examples = [["examples/1-jonathan-borba-CgWTqYxHEkg-unsplash.jpg"]]
|
103 |
+
|
104 |
+
iface = gr.Interface(fn=process_image,
|
105 |
+
inputs=[gr.Image(
|
106 |
+
type="filepath", label="Input Image")],
|
107 |
+
outputs=[gr.Image(label="predicted depth", type="pil"),
|
108 |
+
gr.Model3D(label="3d mesh reconstruction", clear_color=[
|
109 |
+
1.0, 1.0, 1.0, 1.0]),
|
110 |
+
gr.File(label="3d gLTF")],
|
111 |
+
title=title,
|
112 |
+
description=description,
|
113 |
+
examples=examples,
|
114 |
+
allow_flagging="never",
|
115 |
+
cache_examples=False)
|
116 |
+
|
117 |
+
iface.launch(debug=True, enable_queue=False)
|
groundingLMM/gradio-dev/demo/diffusers_with_batching/requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
transformers
|
3 |
+
diffusers
|
groundingLMM/gradio-dev/demo/diffusers_with_batching/run.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: diffusers_with_batching"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch transformers diffusers"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import torch\n", "from diffusers import DiffusionPipeline\n", "import gradio as gr\n", "\n", "generator = DiffusionPipeline.from_pretrained(\"CompVis/ldm-text2im-large-256\")\n", "# move to GPU if available\n", "if torch.cuda.is_available():\n", " generator = generator.to(\"cuda\")\n", "\n", "def generate(prompts):\n", " images = generator(list(prompts)).images\n", " return [images]\n", "\n", "demo = gr.Interface(generate, \n", " \"textbox\", \n", " \"image\", \n", " batch=True, \n", " max_batch_size=4 # Set the batch size based on your CPU/GPU memory\n", ").queue()\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
groundingLMM/gradio-dev/demo/diffusers_with_batching/run.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from diffusers import DiffusionPipeline
|
3 |
+
import gradio as gr
|
4 |
+
|
5 |
+
generator = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256")
|
6 |
+
# move to GPU if available
|
7 |
+
if torch.cuda.is_available():
|
8 |
+
generator = generator.to("cuda")
|
9 |
+
|
10 |
+
def generate(prompts):
|
11 |
+
images = generator(list(prompts)).images
|
12 |
+
return [images]
|
13 |
+
|
14 |
+
demo = gr.Interface(generate,
|
15 |
+
"textbox",
|
16 |
+
"image",
|
17 |
+
batch=True,
|
18 |
+
max_batch_size=4 # Set the batch size based on your CPU/GPU memory
|
19 |
+
).queue()
|
20 |
+
|
21 |
+
if __name__ == "__main__":
|
22 |
+
demo.launch()
|
groundingLMM/gradio-dev/demo/fake_diffusion/DESCRIPTION.md
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
This demo uses a fake model to showcase iterative output. The Image output will update every time a generator is returned until the final image.
|
groundingLMM/gradio-dev/demo/fake_diffusion/requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
numpy
|
groundingLMM/gradio-dev/demo/fake_diffusion/run.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: fake_diffusion\n", "### This demo uses a fake model to showcase iterative output. The Image output will update every time a generator is returned until the final image.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "import time\n", "\n", "# define core fn, which returns a generator {steps} times before returning the image\n", "def fake_diffusion(steps):\n", " for _ in range(steps):\n", " time.sleep(1)\n", " image = np.random.random((600, 600, 3))\n", " yield image\n", " image = \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\"\n", " yield image\n", "\n", "\n", "demo = gr.Interface(fake_diffusion, inputs=gr.Slider(1, 10, 3), outputs=\"image\")\n", "\n", "# define queue - required for generators\n", "demo.queue()\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
groundingLMM/gradio-dev/demo/fake_diffusion/run.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import numpy as np
|
3 |
+
import time
|
4 |
+
|
5 |
+
# define core fn, which returns a generator {steps} times before returning the image
|
6 |
+
def fake_diffusion(steps):
|
7 |
+
for _ in range(steps):
|
8 |
+
time.sleep(1)
|
9 |
+
image = np.random.random((600, 600, 3))
|
10 |
+
yield image
|
11 |
+
image = "https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg"
|
12 |
+
yield image
|
13 |
+
|
14 |
+
|
15 |
+
demo = gr.Interface(fake_diffusion, inputs=gr.Slider(1, 10, 3), outputs="image")
|
16 |
+
|
17 |
+
# define queue - required for generators
|
18 |
+
demo.queue()
|
19 |
+
|
20 |
+
demo.launch()
|
groundingLMM/gradio-dev/demo/gender_sentence_custom_interpretation/run.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: gender_sentence_custom_interpretation"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import re\n", "\n", "import gradio as gr\n", "\n", "male_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n", "\n", "\n", "def gender_of_sentence(sentence):\n", " male_count = len([word for word in sentence.split() if word.lower() in male_words])\n", " female_count = len(\n", " [word for word in sentence.split() if word.lower() in female_words]\n", " )\n", " total = max(male_count + female_count, 1)\n", " return {\"male\": male_count / total, \"female\": female_count / total}\n", "\n", "\n", "# Number of arguments to interpretation function must\n", "# match number of inputs to prediction function\n", "def interpret_gender(sentence):\n", " result = gender_of_sentence(sentence)\n", " is_male = result[\"male\"] > result[\"female\"]\n", " interpretation = []\n", " for word in re.split(\"( )\", sentence):\n", " score = 0\n", " token = word.lower()\n", " if (is_male and token in male_words) or (not is_male and token in female_words):\n", " score = 1\n", " elif (is_male and token in female_words) or (\n", " not is_male and token in male_words\n", " ):\n", " score = -1\n", " interpretation.append((word, score))\n", " # Output must be a list of lists containing the same number of elements as inputs\n", " # Each element corresponds to the interpretation scores for the given input\n", " return [interpretation]\n", "\n", "\n", "demo = gr.Interface(\n", " fn=gender_of_sentence,\n", " inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n", " outputs=\"label\",\n", " interpretation=interpret_gender,\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
groundingLMM/gradio-dev/demo/gender_sentence_custom_interpretation/run.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
|
5 |
+
male_words, female_words = ["he", "his", "him"], ["she", "hers", "her"]
|
6 |
+
|
7 |
+
|
8 |
+
def gender_of_sentence(sentence):
|
9 |
+
male_count = len([word for word in sentence.split() if word.lower() in male_words])
|
10 |
+
female_count = len(
|
11 |
+
[word for word in sentence.split() if word.lower() in female_words]
|
12 |
+
)
|
13 |
+
total = max(male_count + female_count, 1)
|
14 |
+
return {"male": male_count / total, "female": female_count / total}
|
15 |
+
|
16 |
+
|
17 |
+
# Number of arguments to interpretation function must
|
18 |
+
# match number of inputs to prediction function
|
19 |
+
def interpret_gender(sentence):
|
20 |
+
result = gender_of_sentence(sentence)
|
21 |
+
is_male = result["male"] > result["female"]
|
22 |
+
interpretation = []
|
23 |
+
for word in re.split("( )", sentence):
|
24 |
+
score = 0
|
25 |
+
token = word.lower()
|
26 |
+
if (is_male and token in male_words) or (not is_male and token in female_words):
|
27 |
+
score = 1
|
28 |
+
elif (is_male and token in female_words) or (
|
29 |
+
not is_male and token in male_words
|
30 |
+
):
|
31 |
+
score = -1
|
32 |
+
interpretation.append((word, score))
|
33 |
+
# Output must be a list of lists containing the same number of elements as inputs
|
34 |
+
# Each element corresponds to the interpretation scores for the given input
|
35 |
+
return [interpretation]
|
36 |
+
|
37 |
+
|
38 |
+
demo = gr.Interface(
|
39 |
+
fn=gender_of_sentence,
|
40 |
+
inputs=gr.Textbox(value="She went to his house to get her keys."),
|
41 |
+
outputs="label",
|
42 |
+
interpretation=interpret_gender,
|
43 |
+
)
|
44 |
+
|
45 |
+
if __name__ == "__main__":
|
46 |
+
demo.launch()
|
groundingLMM/gradio-dev/demo/gender_sentence_custom_interpretation/screenshot.gif
ADDED
![]() |
groundingLMM/gradio-dev/demo/hello_world_3/run.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: hello_world_3"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "def greet(name, is_morning, temperature):\n", " salutation = \"Good morning\" if is_morning else \"Good evening\"\n", " greeting = f\"{salutation} {name}. It is {temperature} degrees today\"\n", " celsius = (temperature - 32) * 5 / 9\n", " return greeting, round(celsius, 2)\n", "\n", "demo = gr.Interface(\n", " fn=greet,\n", " inputs=[\"text\", \"checkbox\", gr.Slider(0, 100)],\n", " outputs=[\"text\", \"number\"],\n", ")\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
groundingLMM/gradio-dev/demo/hello_world_3/run.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
def greet(name, is_morning, temperature):
|
4 |
+
salutation = "Good morning" if is_morning else "Good evening"
|
5 |
+
greeting = f"{salutation} {name}. It is {temperature} degrees today"
|
6 |
+
celsius = (temperature - 32) * 5 / 9
|
7 |
+
return greeting, round(celsius, 2)
|
8 |
+
|
9 |
+
demo = gr.Interface(
|
10 |
+
fn=greet,
|
11 |
+
inputs=["text", "checkbox", gr.Slider(0, 100)],
|
12 |
+
outputs=["text", "number"],
|
13 |
+
)
|
14 |
+
if __name__ == "__main__":
|
15 |
+
demo.launch()
|
groundingLMM/gradio-dev/demo/highlightedtext_component/run.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: highlightedtext_component"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr \n", "\n", "css = \"footer {display: none !important;} .gradio-container {min-height: 0px !important;}\"\n", "\n", "with gr.Blocks(css=css) as demo:\n", " gr.HighlightedText(value=[(\"Text\",\"Label 1\"),(\"to be\",\"Label 2\"),(\"highlighted\",\"Label 3\")])\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
groundingLMM/gradio-dev/demo/highlightedtext_component/run.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
css = "footer {display: none !important;} .gradio-container {min-height: 0px !important;}"
|
4 |
+
|
5 |
+
with gr.Blocks(css=css) as demo:
|
6 |
+
gr.HighlightedText(value=[("Text","Label 1"),("to be","Label 2"),("highlighted","Label 3")])
|
7 |
+
|
8 |
+
demo.launch()
|
groundingLMM/gradio-dev/demo/image_classifier/requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
numpy
|
2 |
+
tensorflow
|
groundingLMM/gradio-dev/demo/image_classifier/run.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: image_classifier"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy tensorflow"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/imagenet_labels.json https://github.com/gradio-app/gradio/raw/main/demo/image_classifier/files/imagenet_labels.json\n", "os.mkdir('images')\n", "!wget -q -O images/cheetah1.jpg https://github.com/gradio-app/gradio/raw/main/demo/image_classifier/images/cheetah1.jpg\n", "!wget -q -O images/lion.jpg https://github.com/gradio-app/gradio/raw/main/demo/image_classifier/images/lion.jpg"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import os\n", "import requests\n", "import tensorflow as tf\n", "\n", "import gradio as gr\n", "\n", "inception_net = tf.keras.applications.MobileNetV2() # load the model\n", "\n", "# Download human-readable labels for ImageNet.\n", "response = requests.get(\"https://git.io/JJkYN\")\n", "labels = response.text.split(\"\\n\")\n", "\n", "\n", "def classify_image(inp):\n", " inp = inp.reshape((-1, 224, 224, 3))\n", " inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n", " prediction = inception_net.predict(inp).flatten()\n", " return {labels[i]: float(prediction[i]) for i in range(1000)}\n", "\n", "\n", "image = gr.Image(shape=(224, 224))\n", "label = gr.Label(num_top_classes=3)\n", "\n", "demo = gr.Interface(\n", " fn=classify_image,\n", " inputs=image,\n", " outputs=label,\n", " examples=[\n", " os.path.join(os.path.abspath(''), \"images/cheetah1.jpg\"),\n", " os.path.join(os.path.abspath(''), \"images/lion.jpg\")\n", " ]\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n", "\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
groundingLMM/gradio-dev/demo/image_classifier/run.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import requests
|
3 |
+
import tensorflow as tf
|
4 |
+
|
5 |
+
import gradio as gr
|
6 |
+
|
7 |
+
inception_net = tf.keras.applications.MobileNetV2() # load the model
|
8 |
+
|
9 |
+
# Download human-readable labels for ImageNet.
|
10 |
+
response = requests.get("https://git.io/JJkYN")
|
11 |
+
labels = response.text.split("\n")
|
12 |
+
|
13 |
+
|
14 |
+
def classify_image(inp):
|
15 |
+
inp = inp.reshape((-1, 224, 224, 3))
|
16 |
+
inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)
|
17 |
+
prediction = inception_net.predict(inp).flatten()
|
18 |
+
return {labels[i]: float(prediction[i]) for i in range(1000)}
|
19 |
+
|
20 |
+
|
21 |
+
image = gr.Image(shape=(224, 224))
|
22 |
+
label = gr.Label(num_top_classes=3)
|
23 |
+
|
24 |
+
demo = gr.Interface(
|
25 |
+
fn=classify_image,
|
26 |
+
inputs=image,
|
27 |
+
outputs=label,
|
28 |
+
examples=[
|
29 |
+
os.path.join(os.path.dirname(__file__), "images/cheetah1.jpg"),
|
30 |
+
os.path.join(os.path.dirname(__file__), "images/lion.jpg")
|
31 |
+
]
|
32 |
+
)
|
33 |
+
|
34 |
+
if __name__ == "__main__":
|
35 |
+
demo.launch()
|
36 |
+
|
groundingLMM/gradio-dev/demo/image_mod/run.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: image_mod"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('images')\n", "!wget -q -O images/cheetah1.jpg https://github.com/gradio-app/gradio/raw/main/demo/image_mod/images/cheetah1.jpg\n", "!wget -q -O images/lion.jpg https://github.com/gradio-app/gradio/raw/main/demo/image_mod/images/lion.jpg\n", "!wget -q -O images/logo.png https://github.com/gradio-app/gradio/raw/main/demo/image_mod/images/logo.png\n", "!wget -q -O images/tower.jpg https://github.com/gradio-app/gradio/raw/main/demo/image_mod/images/tower.jpg"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "\n", "\n", "def image_mod(image):\n", " return image.rotate(45)\n", "\n", "\n", "demo = gr.Interface(\n", " image_mod,\n", " gr.Image(type=\"pil\"),\n", " \"image\",\n", " flagging_options=[\"blurry\", \"incorrect\", \"other\"],\n", " examples=[\n", " os.path.join(os.path.abspath(''), \"images/cheetah1.jpg\"),\n", " os.path.join(os.path.abspath(''), \"images/lion.jpg\"),\n", " os.path.join(os.path.abspath(''), \"images/logo.png\"),\n", " os.path.join(os.path.abspath(''), \"images/tower.jpg\"),\n", " ],\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
groundingLMM/gradio-dev/demo/image_mod/run.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
|
4 |
+
|
5 |
+
def image_mod(image):
|
6 |
+
return image.rotate(45)
|
7 |
+
|
8 |
+
|
9 |
+
demo = gr.Interface(
|
10 |
+
image_mod,
|
11 |
+
gr.Image(type="pil"),
|
12 |
+
"image",
|
13 |
+
flagging_options=["blurry", "incorrect", "other"],
|
14 |
+
examples=[
|
15 |
+
os.path.join(os.path.dirname(__file__), "images/cheetah1.jpg"),
|
16 |
+
os.path.join(os.path.dirname(__file__), "images/lion.jpg"),
|
17 |
+
os.path.join(os.path.dirname(__file__), "images/logo.png"),
|
18 |
+
os.path.join(os.path.dirname(__file__), "images/tower.jpg"),
|
19 |
+
],
|
20 |
+
)
|
21 |
+
|
22 |
+
if __name__ == "__main__":
|
23 |
+
demo.launch()
|
groundingLMM/gradio-dev/demo/interface_parallel_load/run.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: interface_parallel_load"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "generator1 = gr.load(\"huggingface/gpt2\")\n", "generator2 = gr.load(\"huggingface/EleutherAI/gpt-neo-2.7B\")\n", "generator3 = gr.load(\"huggingface/EleutherAI/gpt-j-6B\")\n", "\n", "demo = gr.Parallel(generator1, generator2, generator3)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
groundingLMM/gradio-dev/demo/interface_parallel_load/run.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
generator1 = gr.load("huggingface/gpt2")
|
4 |
+
generator2 = gr.load("huggingface/EleutherAI/gpt-neo-2.7B")
|
5 |
+
generator3 = gr.load("huggingface/EleutherAI/gpt-j-6B")
|
6 |
+
|
7 |
+
demo = gr.Parallel(generator1, generator2, generator3)
|
8 |
+
|
9 |
+
if __name__ == "__main__":
|
10 |
+
demo.launch()
|