tuandunghcmut commited on
Commit
4f5b7df
·
verified ·
1 Parent(s): 9a25cd6

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +88 -0
  2. Emu/Emu1/assets/generalist.png +3 -0
  3. Emu/Emu1/data/yt-sb-1b/video2dataset-1.1.0/benchmark/benchmark_vids.parquet +3 -0
  4. Emu/Emu1/data/yt-sb-1b/video2dataset-1.1.0/tests/test_files/test_audio.mp3 +3 -0
  5. Emu/Emu1/data/yt-sb-1b/video2dataset-1.1.0/tests/test_files/test_video.mp4 +3 -0
  6. Emu/Emu1/examples/AppleVR.mp4 +3 -0
  7. Emu/Emu1/examples/dog.png +3 -0
  8. Emu/Emu1/examples/oil_sunflower.jpg +3 -0
  9. Emu/Emu1/examples/panda.png +3 -0
  10. Emu/Emu1/examples/sunflower.png +3 -0
  11. Emu/Emu1/models/llama_config/tokenizer.model +3 -0
  12. groundingLMM/GLaMM-FullScope/.ipynb_checkpoints/tokenizer-checkpoint.model +3 -0
  13. groundingLMM/GLaMM-FullScope/pytorch_model-00001-of-00002.bin +3 -0
  14. groundingLMM/GLaMM-FullScope/pytorch_model-00002-of-00002.bin +3 -0
  15. groundingLMM/GLaMM-FullScope/tokenizer.model +3 -0
  16. groundingLMM/GranD/level_1_inference/9_ov_sam/ext/open_clip/bpe_simple_vocab_16e6.txt.gz +3 -0
  17. groundingLMM/GranD/level_2_inference/5_label_assignment/eva_clip/bpe_simple_vocab_16e6.txt.gz +3 -0
  18. groundingLMM/LLaVA/images/demo_cli.gif +3 -0
  19. groundingLMM/LLaVA/images/llava_example_cmp.png +3 -0
  20. groundingLMM/LLaVA/images/llava_logo.png +3 -0
  21. groundingLMM/LLaVA/images/llava_v1_5_radar.jpg +3 -0
  22. groundingLMM/gradio-dev/GLaMM-FullScope/pytorch_model-00001-of-00002.bin +3 -0
  23. groundingLMM/gradio-dev/GLaMM-FullScope/pytorch_model-00002-of-00002.bin +3 -0
  24. groundingLMM/gradio-dev/GLaMM-FullScope/tokenizer.model +3 -0
  25. groundingLMM/gradio-dev/box_demo.gif +3 -0
  26. groundingLMM/gradio-dev/demo/Echocardiogram-Segmentation/img2.jpg +3 -0
  27. groundingLMM/gradio-dev/demo/all_demos/tmp.zip +3 -0
  28. groundingLMM/gradio-dev/demo/animeganv2/groot.jpeg +3 -0
  29. groundingLMM/gradio-dev/demo/blocks_component_shortcut/run.py +31 -0
  30. groundingLMM/gradio-dev/demo/blocks_flipper/screenshot.gif +3 -0
  31. groundingLMM/gradio-dev/demo/blocks_layout/run.ipynb +1 -0
  32. groundingLMM/gradio-dev/demo/blocks_layout/run.py +39 -0
  33. groundingLMM/gradio-dev/demo/blocks_multiple_event_triggers/requirements.txt +2 -0
  34. groundingLMM/gradio-dev/demo/blocks_multiple_event_triggers/run.ipynb +1 -0
  35. groundingLMM/gradio-dev/demo/blocks_multiple_event_triggers/run.py +39 -0
  36. groundingLMM/gradio-dev/demo/blocks_neural_instrument_coding/run.ipynb +1 -0
  37. groundingLMM/gradio-dev/demo/blocks_scroll/run.ipynb +1 -0
  38. groundingLMM/gradio-dev/demo/blocks_scroll/run.py +24 -0
  39. groundingLMM/gradio-dev/demo/blocks_speech_text_sentiment/requirements.txt +2 -0
  40. groundingLMM/gradio-dev/demo/blocks_speech_text_sentiment/run.ipynb +1 -0
  41. groundingLMM/gradio-dev/demo/blocks_speech_text_sentiment/run.py +32 -0
  42. groundingLMM/gradio-dev/demo/blocks_webcam/run.ipynb +1 -0
  43. groundingLMM/gradio-dev/demo/blocks_webcam/run.py +13 -0
  44. groundingLMM/gradio-dev/demo/button_component/run.ipynb +1 -0
  45. groundingLMM/gradio-dev/demo/button_component/run.py +8 -0
  46. groundingLMM/gradio-dev/demo/calculator/screenshot.gif +3 -0
  47. groundingLMM/gradio-dev/demo/calculator_live/screenshot.gif +3 -0
  48. groundingLMM/gradio-dev/demo/chatbot_multimodal/run.ipynb +1 -0
  49. groundingLMM/gradio-dev/demo/chatbot_multimodal/run.py +41 -0
  50. groundingLMM/gradio-dev/demo/chatbot_streaming/run.ipynb +1 -0
.gitattributes CHANGED
@@ -252,3 +252,91 @@ Emu/Emu2/examples/emu.png filter=lfs diff=lfs merge=lfs -text
252
  Emu/Emu2/examples/shapes.jpeg filter=lfs diff=lfs merge=lfs -text
253
  Emu/Emu2/examples/dog.jpg filter=lfs diff=lfs merge=lfs -text
254
  Emu/Emu1/assets/Emu.png filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252
  Emu/Emu2/examples/shapes.jpeg filter=lfs diff=lfs merge=lfs -text
253
  Emu/Emu2/examples/dog.jpg filter=lfs diff=lfs merge=lfs -text
254
  Emu/Emu1/assets/Emu.png filter=lfs diff=lfs merge=lfs -text
255
+ Emu/Emu1/assets/generalist.png filter=lfs diff=lfs merge=lfs -text
256
+ Emu/Emu1/examples/sunflower.png filter=lfs diff=lfs merge=lfs -text
257
+ Emu/Emu1/examples/panda.png filter=lfs diff=lfs merge=lfs -text
258
+ Emu/Emu1/examples/dog.png filter=lfs diff=lfs merge=lfs -text
259
+ Emu/Emu1/examples/AppleVR.mp4 filter=lfs diff=lfs merge=lfs -text
260
+ Emu/Emu1/examples/oil_sunflower.jpg filter=lfs diff=lfs merge=lfs -text
261
+ Emu/Emu1/data/yt-sb-1b/video2dataset-1.1.0/tests/test_files/test_audio.mp3 filter=lfs diff=lfs merge=lfs -text
262
+ Emu/Emu1/data/yt-sb-1b/video2dataset-1.1.0/tests/test_files/test_video.mp4 filter=lfs diff=lfs merge=lfs -text
263
+ groundingLMM/gradio-dev/box_demo.gif filter=lfs diff=lfs merge=lfs -text
264
+ groundingLMM/mmcv/docs/en/_static/wechat_qrcode.jpg filter=lfs diff=lfs merge=lfs -text
265
+ groundingLMM/mmcv/docs/en/_static/progress.gif filter=lfs diff=lfs merge=lfs -text
266
+ groundingLMM/mmcv/docs/en/_static/flow_warp_diff.png filter=lfs diff=lfs merge=lfs -text
267
+ groundingLMM/mmcv/docs/en/_static/flow_warp.png filter=lfs diff=lfs merge=lfs -text
268
+ groundingLMM/mmcv/docs/en/_static/flow_raw_images.png filter=lfs diff=lfs merge=lfs -text
269
+ groundingLMM/mmcv/docs/en/_static/zhihu_qrcode.jpg filter=lfs diff=lfs merge=lfs -text
270
+ groundingLMM/mmcv/docs/en/_static/community/3.png filter=lfs diff=lfs merge=lfs -text
271
+ groundingLMM/mmcv/build/temp.linux-x86_64-cpython-310/.ninja_deps filter=lfs diff=lfs merge=lfs -text
272
+ groundingLMM/mmcv/build/temp.linux-x86_64-cpython-310/mmcv/ops/csrc/pytorch/cuda/cudabind.o filter=lfs diff=lfs merge=lfs -text
273
+ groundingLMM/mmcv/tests/data/palette.gif filter=lfs diff=lfs merge=lfs -text
274
+ groundingLMM/mmcv/tests/data/uint16-5channel.tif filter=lfs diff=lfs merge=lfs -text
275
+ groundingLMM/mmcv/tests/data/test.mp4 filter=lfs diff=lfs merge=lfs -text
276
+ groundingLMM/LLaVA/images/llava_v1_5_radar.jpg filter=lfs diff=lfs merge=lfs -text
277
+ groundingLMM/LLaVA/images/llava_logo.png filter=lfs diff=lfs merge=lfs -text
278
+ groundingLMM/LLaVA/images/llava_example_cmp.png filter=lfs diff=lfs merge=lfs -text
279
+ groundingLMM/LLaVA/images/demo_cli.gif filter=lfs diff=lfs merge=lfs -text
280
+ groundingLMM/images/logos/face.png filter=lfs diff=lfs merge=lfs -text
281
+ groundingLMM/images/demo_resources/surfer.jpg filter=lfs diff=lfs merge=lfs -text
282
+ groundingLMM/images/demo_resources/mansion.jpg filter=lfs diff=lfs merge=lfs -text
283
+ groundingLMM/images/demo_resources/beetle.jpg filter=lfs diff=lfs merge=lfs -text
284
+ groundingLMM/images/demo_resources/tokyo.jpg filter=lfs diff=lfs merge=lfs -text
285
+ groundingLMM/images/demo_resources/balloon.jpg filter=lfs diff=lfs merge=lfs -text
286
+ groundingLMM/images/demo_resources/church.jpg filter=lfs diff=lfs merge=lfs -text
287
+ groundingLMM/images/demo_resources/joker.png filter=lfs diff=lfs merge=lfs -text
288
+ groundingLMM/images/demo_resources/old.jpg filter=lfs diff=lfs merge=lfs -text
289
+ groundingLMM/images/demo_resources/joker_old.jpg filter=lfs diff=lfs merge=lfs -text
290
+ groundingLMM/images/demo_resources/snow.png filter=lfs diff=lfs merge=lfs -text
291
+ groundingLMM/images/demo_resources/paddle.jpg filter=lfs diff=lfs merge=lfs -text
292
+ groundingLMM/images/demo_resources/yacht.jpg filter=lfs diff=lfs merge=lfs -text
293
+ groundingLMM/images/demo_resources/elephant.jpg filter=lfs diff=lfs merge=lfs -text
294
+ groundingLMM/images/demo_resources/japan.jpg filter=lfs diff=lfs merge=lfs -text
295
+ groundingLMM/images/tables/ReferSeg_Table.png filter=lfs diff=lfs merge=lfs -text
296
+ groundingLMM/images/tables/GCG_Table.png filter=lfs diff=lfs merge=lfs -text
297
+ groundingLMM/images/glamm/dataset_pipeline.png filter=lfs diff=lfs merge=lfs -text
298
+ groundingLMM/images/glamm/grand_sample_1.png filter=lfs diff=lfs merge=lfs -text
299
+ groundingLMM/images/glamm/model_arch.png filter=lfs diff=lfs merge=lfs -text
300
+ groundingLMM/images/glamm/results_2_downstream.png filter=lfs diff=lfs merge=lfs -text
301
+ groundingLMM/images/glamm/grand_sample_2.png filter=lfs diff=lfs merge=lfs -text
302
+ groundingLMM/images/glamm/results_3_refseg.png filter=lfs diff=lfs merge=lfs -text
303
+ groundingLMM/images/glamm/results_5_conv.png filter=lfs diff=lfs merge=lfs -text
304
+ groundingLMM/images/glamm/results_6_cap.png filter=lfs diff=lfs merge=lfs -text
305
+ groundingLMM/images/glamm/results_4_conv.png filter=lfs diff=lfs merge=lfs -text
306
+ groundingLMM/images/glamm/results_4_regcap.png filter=lfs diff=lfs merge=lfs -text
307
+ groundingLMM/images/glamm/grand_f_samples.png filter=lfs diff=lfs merge=lfs -text
308
+ groundingLMM/images/glamm/results_1_gcg.png filter=lfs diff=lfs merge=lfs -text
309
+ groundingLMM/images/glamm/results_7_gcg_combined.png filter=lfs diff=lfs merge=lfs -text
310
+ groundingLMM/gradio-dev/readme_files/header-image.jpg filter=lfs diff=lfs merge=lfs -text
311
+ groundingLMM/gradio-dev/test/test_files/video_sample.ogg filter=lfs diff=lfs merge=lfs -text
312
+ groundingLMM/gradio-dev/test/test_files/playable_but_bad_container.mkv filter=lfs diff=lfs merge=lfs -text
313
+ groundingLMM/gradio-dev/test/test_files/video_sample.webm filter=lfs diff=lfs merge=lfs -text
314
+ groundingLMM/gradio-dev/test/test_files/video_sample.mp4 filter=lfs diff=lfs merge=lfs -text
315
+ groundingLMM/gradio-dev/test/golden/image_mod/cheetah1.png filter=lfs diff=lfs merge=lfs -text
316
+ groundingLMM/gradio-dev/website/homepage/src/assets/img/meta-image.png filter=lfs diff=lfs merge=lfs -text
317
+ groundingLMM/gradio-dev/website/homepage/src/assets/img/header-image.jpg filter=lfs diff=lfs merge=lfs -text
318
+ groundingLMM/gradio-dev/js/workbench/src/assets/cantina.wav filter=lfs diff=lfs merge=lfs -text
319
+ groundingLMM/gradio-dev/js/app/public/static/img/Duck.glb filter=lfs diff=lfs merge=lfs -text
320
+ groundingLMM/gradio-dev/guides/assets/hf_demo.mp4 filter=lfs diff=lfs merge=lfs -text
321
+ groundingLMM/gradio-dev/guides/assets/annotated.png filter=lfs diff=lfs merge=lfs -text
322
+ groundingLMM/gradio-dev/guides/assets/flagging-callback-hf.png filter=lfs diff=lfs merge=lfs -text
323
+ groundingLMM/gradio-dev/demo/webcam/screenshot.png filter=lfs diff=lfs merge=lfs -text
324
+ groundingLMM/gradio-dev/demo/sepia_filter/screenshot.gif filter=lfs diff=lfs merge=lfs -text
325
+ groundingLMM/gradio-dev/demo/sales_projections/screenshot.gif filter=lfs diff=lfs merge=lfs -text
326
+ groundingLMM/gradio-dev/demo/hello_world_2/screenshot.gif filter=lfs diff=lfs merge=lfs -text
327
+ groundingLMM/gradio-dev/demo/image_classifier_interpretation/screenshot.gif filter=lfs diff=lfs merge=lfs -text
328
+ groundingLMM/gradio-dev/demo/image_classifier_interpretation/screenshot.png filter=lfs diff=lfs merge=lfs -text
329
+ groundingLMM/gradio-dev/demo/hello_world/screenshot.gif filter=lfs diff=lfs merge=lfs -text
330
+ groundingLMM/gradio-dev/demo/spectogram/screenshot.png filter=lfs diff=lfs merge=lfs -text
331
+ groundingLMM/gradio-dev/demo/stock_forecast/screenshot.png filter=lfs diff=lfs merge=lfs -text
332
+ groundingLMM/gradio-dev/demo/color_picker/screenshot.gif filter=lfs diff=lfs merge=lfs -text
333
+ groundingLMM/gradio-dev/demo/blocks_flipper/screenshot.gif filter=lfs diff=lfs merge=lfs -text
334
+ groundingLMM/gradio-dev/demo/animeganv2/groot.jpeg filter=lfs diff=lfs merge=lfs -text
335
+ groundingLMM/gradio-dev/demo/hello_world_3/screenshot.gif filter=lfs diff=lfs merge=lfs -text
336
+ groundingLMM/gradio-dev/demo/calculator_live/screenshot.gif filter=lfs diff=lfs merge=lfs -text
337
+ groundingLMM/gradio-dev/demo/image_mod/screenshot.png filter=lfs diff=lfs merge=lfs -text
338
+ groundingLMM/gradio-dev/demo/Echocardiogram-Segmentation/img2.jpg filter=lfs diff=lfs merge=lfs -text
339
+ groundingLMM/gradio-dev/demo/image_classifier/screenshot.gif filter=lfs diff=lfs merge=lfs -text
340
+ groundingLMM/gradio-dev/demo/image_classifier/screenshot.png filter=lfs diff=lfs merge=lfs -text
341
+ groundingLMM/gradio-dev/demo/calculator/screenshot.gif filter=lfs diff=lfs merge=lfs -text
342
+ groundingLMM/gradio-dev/demo/video_identity/screenshot.png filter=lfs diff=lfs merge=lfs -text
Emu/Emu1/assets/generalist.png ADDED

Git LFS Details

  • SHA256: 6738ca596ad02203e0846ce0dbf5ba5e48cb3d72deea90ee6ac8fa14cd6c2b63
  • Pointer size: 132 Bytes
  • Size of remote file: 3.27 MB
Emu/Emu1/data/yt-sb-1b/video2dataset-1.1.0/benchmark/benchmark_vids.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9494b8e91351aad8dda22712dfdd2611d483eabc8ed23429445405ec9ec9ab24
3
+ size 48934
Emu/Emu1/data/yt-sb-1b/video2dataset-1.1.0/tests/test_files/test_audio.mp3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed92db32c3a6826502ffd600e778d01ae6317b36fb1fe1c7bbffb0f0870059ea
3
+ size 2406693
Emu/Emu1/data/yt-sb-1b/video2dataset-1.1.0/tests/test_files/test_video.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3075ae64193919325c7c8a33117f8d2bc98579f76c1920b8ebe6abeff857c48f
3
+ size 4090211
Emu/Emu1/examples/AppleVR.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9869d2309cbcba84064813e56e3fe746ed41d35ecf55e39904acf83448ecec7f
3
+ size 3469070
Emu/Emu1/examples/dog.png ADDED

Git LFS Details

  • SHA256: b623861fc2176f6ad6d596417ed82a1cefd3f264b288fcdb26fa58dc55895183
  • Pointer size: 132 Bytes
  • Size of remote file: 1.28 MB
Emu/Emu1/examples/oil_sunflower.jpg ADDED

Git LFS Details

  • SHA256: c244728525468b6a50d90827c281ee6b1a7b65e2f3488b4e70f2001ee7d24d67
  • Pointer size: 131 Bytes
  • Size of remote file: 201 kB
Emu/Emu1/examples/panda.png ADDED

Git LFS Details

  • SHA256: e5c335f0b46bd4f0aed771e1aecfa5ce2d067b186e76fdb81407805d73853ede
  • Pointer size: 131 Bytes
  • Size of remote file: 285 kB
Emu/Emu1/examples/sunflower.png ADDED

Git LFS Details

  • SHA256: b0f836deadeac13e53e8b19c1c5e173f6df43c29eed13f854e65628076b4df47
  • Pointer size: 131 Bytes
  • Size of remote file: 918 kB
Emu/Emu1/models/llama_config/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
groundingLMM/GLaMM-FullScope/.ipynb_checkpoints/tokenizer-checkpoint.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a8f238a200be6c23fbba0f9a999ab4fe3c09ca303b29805e68cf6659bfb7d89
3
+ size 131
groundingLMM/GLaMM-FullScope/pytorch_model-00001-of-00002.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:508524edd1e9b3630560aced18a0fb3bb59a46e2d8b640bd184dcd02e5254d75
3
+ size 135
groundingLMM/GLaMM-FullScope/pytorch_model-00002-of-00002.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fccfa2b9cdc0f12382ec6454c80e50fd426a9b24b25792db64f13bb295e6a08e
3
+ size 135
groundingLMM/GLaMM-FullScope/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a8f238a200be6c23fbba0f9a999ab4fe3c09ca303b29805e68cf6659bfb7d89
3
+ size 131
groundingLMM/GranD/level_1_inference/9_ov_sam/ext/open_clip/bpe_simple_vocab_16e6.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a
3
+ size 1356917
groundingLMM/GranD/level_2_inference/5_label_assignment/eva_clip/bpe_simple_vocab_16e6.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a
3
+ size 1356917
groundingLMM/LLaVA/images/demo_cli.gif ADDED

Git LFS Details

  • SHA256: 09227563f4fe04f077587eeb7b7c33ace2fbb8830e6cc9cfce03a25a57c43bfe
  • Pointer size: 133 Bytes
  • Size of remote file: 10 MB
groundingLMM/LLaVA/images/llava_example_cmp.png ADDED

Git LFS Details

  • SHA256: 722b358a3ea285deedda3c9d278390f8f5cbb0b5e924465248f458c4d7aa1c51
  • Pointer size: 131 Bytes
  • Size of remote file: 324 kB
groundingLMM/LLaVA/images/llava_logo.png ADDED

Git LFS Details

  • SHA256: 49324ed27269d31bd05f94fec215555ada39e4e75e6d6b69aa3896ed90bce759
  • Pointer size: 131 Bytes
  • Size of remote file: 268 kB
groundingLMM/LLaVA/images/llava_v1_5_radar.jpg ADDED

Git LFS Details

  • SHA256: 1f6f7f4385da27b0fbc9eb466a04db20013f53f0ab5e28cd4483e0b59d17def0
  • Pointer size: 131 Bytes
  • Size of remote file: 103 kB
groundingLMM/gradio-dev/GLaMM-FullScope/pytorch_model-00001-of-00002.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:508524edd1e9b3630560aced18a0fb3bb59a46e2d8b640bd184dcd02e5254d75
3
+ size 135
groundingLMM/gradio-dev/GLaMM-FullScope/pytorch_model-00002-of-00002.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fccfa2b9cdc0f12382ec6454c80e50fd426a9b24b25792db64f13bb295e6a08e
3
+ size 135
groundingLMM/gradio-dev/GLaMM-FullScope/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a8f238a200be6c23fbba0f9a999ab4fe3c09ca303b29805e68cf6659bfb7d89
3
+ size 131
groundingLMM/gradio-dev/box_demo.gif ADDED

Git LFS Details

  • SHA256: 22af0c7ee6f0e1f112003fd422eab3f6bd5ec2e2a0be4c592009024e7714cb7b
  • Pointer size: 131 Bytes
  • Size of remote file: 367 kB
groundingLMM/gradio-dev/demo/Echocardiogram-Segmentation/img2.jpg ADDED

Git LFS Details

  • SHA256: cb163302574d298051b63aa6adec389dbe1c7dc94814ef3a05ebfbd697173ef3
  • Pointer size: 131 Bytes
  • Size of remote file: 106 kB
groundingLMM/gradio-dev/demo/all_demos/tmp.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3f385135e9c9b37cd546c01acb64d4a4aae431450c86a75a649e9e8cf5a538d
3
+ size 184378
groundingLMM/gradio-dev/demo/animeganv2/groot.jpeg ADDED

Git LFS Details

  • SHA256: fff4d05d30088da0f073ae4ab21d22d11cbd56cfab711f3c359faabbf8577076
  • Pointer size: 131 Bytes
  • Size of remote file: 350 kB
groundingLMM/gradio-dev/demo/blocks_component_shortcut/run.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+
4
+ def greet(str):
5
+ return str
6
+
7
+
8
+ with gr.Blocks() as demo:
9
+ """
10
+ You can make use of str shortcuts you use in Interface within Blocks as well.
11
+
12
+ Interface shortcut example:
13
+ Interface(greet, "textarea", "textarea")
14
+
15
+ You can use
16
+ 1. gr.component()
17
+ 2. gr.templates.Template()
18
+ 3. gr.Template()
19
+ All the templates are listed in gradio/templates.py
20
+ """
21
+ with gr.Row():
22
+ text1 = gr.component("textarea")
23
+ text2 = gr.TextArea()
24
+ text3 = gr.templates.TextArea()
25
+ text1.blur(greet, text1, text2)
26
+ text2.blur(greet, text2, text3)
27
+ text3.blur(greet, text3, text1)
28
+ button = gr.component("button")
29
+
30
+ if __name__ == "__main__":
31
+ demo.launch()
groundingLMM/gradio-dev/demo/blocks_flipper/screenshot.gif ADDED

Git LFS Details

  • SHA256: 21b814857d694e576b3e6db4cabe069f56e7386f7a1fabc6be81431c7176d700
  • Pointer size: 132 Bytes
  • Size of remote file: 1.11 MB
groundingLMM/gradio-dev/demo/blocks_layout/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_layout"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "demo = gr.Blocks()\n", "\n", "with demo:\n", " with gr.Row():\n", " gr.Image(interactive=True, scale=2)\n", " gr.Image()\n", " with gr.Row():\n", " gr.Textbox(label=\"Text\")\n", " gr.Number(label=\"Count\", scale=2)\n", " gr.Radio(choices=[\"One\", \"Two\"])\n", " with gr.Row():\n", " gr.Button(\"500\", scale=0, min_width=500)\n", " gr.Button(\"A\").style(full_width=False)\n", " gr.Button(\"grow\")\n", " with gr.Row():\n", " gr.Textbox()\n", " gr.Textbox()\n", " gr.Button() \n", " with gr.Row():\n", " with gr.Row():\n", " with gr.Column():\n", " gr.Textbox(label=\"Text\")\n", " gr.Number(label=\"Count\")\n", " gr.Radio(choices=[\"One\", \"Two\"])\n", " gr.Image()\n", " with gr.Column():\n", " gr.Image(interactive=True)\n", " gr.Image()\n", " gr.Image()\n", " gr.Textbox(label=\"Text\")\n", " gr.Number(label=\"Count\")\n", " gr.Radio(choices=[\"One\", \"Two\"])\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
groundingLMM/gradio-dev/demo/blocks_layout/run.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+
4
+ demo = gr.Blocks()
5
+
6
+ with demo:
7
+ with gr.Row():
8
+ gr.Image(interactive=True, scale=2)
9
+ gr.Image()
10
+ with gr.Row():
11
+ gr.Textbox(label="Text")
12
+ gr.Number(label="Count", scale=2)
13
+ gr.Radio(choices=["One", "Two"])
14
+ with gr.Row():
15
+ gr.Button("500", scale=0, min_width=500)
16
+ gr.Button("A").style(full_width=False)
17
+ gr.Button("grow")
18
+ with gr.Row():
19
+ gr.Textbox()
20
+ gr.Textbox()
21
+ gr.Button()
22
+ with gr.Row():
23
+ with gr.Row():
24
+ with gr.Column():
25
+ gr.Textbox(label="Text")
26
+ gr.Number(label="Count")
27
+ gr.Radio(choices=["One", "Two"])
28
+ gr.Image()
29
+ with gr.Column():
30
+ gr.Image(interactive=True)
31
+ gr.Image()
32
+ gr.Image()
33
+ gr.Textbox(label="Text")
34
+ gr.Number(label="Count")
35
+ gr.Radio(choices=["One", "Two"])
36
+
37
+
38
+ if __name__ == "__main__":
39
+ demo.launch()
groundingLMM/gradio-dev/demo/blocks_multiple_event_triggers/requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ plotly
2
+ pypistats
groundingLMM/gradio-dev/demo/blocks_multiple_event_triggers/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_multiple_event_triggers"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio plotly pypistats"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pypistats\n", "from datetime import date\n", "from dateutil.relativedelta import relativedelta\n", "import pandas as pd\n", "\n", "def get_plot(lib, time):\n", " data = pypistats.overall(lib, total=True, format=\"pandas\")\n", " data = data.groupby(\"category\").get_group(\"with_mirrors\").sort_values(\"date\")\n", " start_date = date.today() - relativedelta(months=int(time.split(\" \")[0]))\n", " data = data[(data['date'] > str(start_date))]\n", " data.date = pd.to_datetime(pd.to_datetime(data.date))\n", " return gr.LinePlot.update(value=data, x=\"date\", y=\"downloads\",\n", " tooltip=['date', 'downloads'],\n", " title=f\"Pypi downloads of {lib} over last {time}\",\n", " overlay_point=True,\n", " height=400,\n", " width=900)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown(\n", " \"\"\"\n", " ## Pypi Download Stats \ud83d\udcc8\n", " See live download stats for all of Hugging Face's open-source libraries \ud83e\udd17\n", " \"\"\")\n", " with gr.Row():\n", " lib = gr.Dropdown([\"transformers\", \"datasets\", \"huggingface-hub\", \"gradio\", \"accelerate\"],\n", " value=\"gradio\", label=\"Library\")\n", " time = gr.Dropdown([\"3 months\", \"6 months\", \"9 months\", \"12 months\"],\n", " value=\"3 months\", label=\"Downloads over the last...\")\n", "\n", " plt = gr.LinePlot()\n", " # You can add multiple event triggers in 2 lines like this\n", " for event in [lib.change, time.change, demo.load]:\n", " event(get_plot, [lib, time], [plt])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
groundingLMM/gradio-dev/demo/blocks_multiple_event_triggers/run.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pypistats
3
+ from datetime import date
4
+ from dateutil.relativedelta import relativedelta
5
+ import pandas as pd
6
+
7
+ def get_plot(lib, time):
8
+ data = pypistats.overall(lib, total=True, format="pandas")
9
+ data = data.groupby("category").get_group("with_mirrors").sort_values("date")
10
+ start_date = date.today() - relativedelta(months=int(time.split(" ")[0]))
11
+ data = data[(data['date'] > str(start_date))]
12
+ data.date = pd.to_datetime(pd.to_datetime(data.date))
13
+ return gr.LinePlot.update(value=data, x="date", y="downloads",
14
+ tooltip=['date', 'downloads'],
15
+ title=f"Pypi downloads of {lib} over last {time}",
16
+ overlay_point=True,
17
+ height=400,
18
+ width=900)
19
+
20
+
21
+ with gr.Blocks() as demo:
22
+ gr.Markdown(
23
+ """
24
+ ## Pypi Download Stats 📈
25
+ See live download stats for all of Hugging Face's open-source libraries 🤗
26
+ """)
27
+ with gr.Row():
28
+ lib = gr.Dropdown(["transformers", "datasets", "huggingface-hub", "gradio", "accelerate"],
29
+ value="gradio", label="Library")
30
+ time = gr.Dropdown(["3 months", "6 months", "9 months", "12 months"],
31
+ value="3 months", label="Downloads over the last...")
32
+
33
+ plt = gr.LinePlot()
34
+ # You can add multiple event triggers in 2 lines like this
35
+ for event in [lib.change, time.change, demo.load]:
36
+ event(get_plot, [lib, time], [plt])
37
+
38
+ if __name__ == "__main__":
39
+ demo.launch()
groundingLMM/gradio-dev/demo/blocks_neural_instrument_coding/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_neural_instrument_coding"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/blocks_neural_instrument_coding/flute.wav\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/blocks_neural_instrument_coding/new-sax-1.mp3\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/blocks_neural_instrument_coding/new-sax-1.wav\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/blocks_neural_instrument_coding/new-sax.wav\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/blocks_neural_instrument_coding/sax.wav\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/blocks_neural_instrument_coding/sax2.wav\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/blocks_neural_instrument_coding/trombone.wav"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["# A Blocks implementation of https://erlj.notion.site/Neural-Instrument-Cloning-from-very-few-samples-2cf41d8b630842ee8c7eb55036a1bfd6\n", "\n", "import datetime\n", "import os\n", "import random\n", "\n", "import gradio as gr\n", "from gradio.components import Markdown as m\n", "\n", "\n", "def get_time():\n", " now = datetime.datetime.now()\n", " return now.strftime(\"%m/%d/%Y, %H:%M:%S\")\n", "\n", "\n", "def generate_recording():\n", " return random.choice([\"new-sax-1.mp3\", \"new-sax-1.wav\"])\n", "\n", "\n", "def reconstruct(audio):\n", " return random.choice([\"new-sax-1.mp3\", \"new-sax-1.wav\"])\n", "\n", "\n", "io1 = gr.Interface(\n", " lambda x, y, z: os.path.join(os.path.abspath(''),\"sax.wav\"),\n", " [\n", " gr.Slider(label=\"pitch\"),\n", " gr.Slider(label=\"loudness\"),\n", " gr.Audio(label=\"base audio file (optional)\"),\n", " ],\n", " gr.Audio(),\n", ")\n", "\n", "io2 = gr.Interface(\n", " lambda x, y, z: os.path.join(os.path.abspath(''),\"flute.wav\"),\n", " [\n", " gr.Slider(label=\"pitch\"),\n", " gr.Slider(label=\"loudness\"),\n", " gr.Audio(label=\"base audio file (optional)\"),\n", " ],\n", " gr.Audio(),\n", ")\n", "\n", "io3 = gr.Interface(\n", " lambda x, y, z: os.path.join(os.path.abspath(''),\"trombone.wav\"),\n", " [\n", " gr.Slider(label=\"pitch\"),\n", " gr.Slider(label=\"loudness\"),\n", " gr.Audio(label=\"base audio file (optional)\"),\n", " ],\n", " gr.Audio(),\n", ")\n", "\n", "io4 = gr.Interface(\n", " lambda x, y, z: os.path.join(os.path.abspath(''),\"sax2.wav\"),\n", " [\n", " gr.Slider(label=\"pitch\"),\n", " gr.Slider(label=\"loudness\"),\n", " gr.Audio(label=\"base audio file (optional)\"),\n", " ],\n", " gr.Audio(),\n", ")\n", "\n", "demo = gr.Blocks(title=\"Neural Instrument Cloning\")\n", "\n", "with demo.clear():\n", " m(\n", " \"\"\"\n", " ## Neural Instrument Cloning from Very Few Samples\n", " <center><img src=\"https://media.istockphoto.com/photos/brass-trombone-picture-id490455809?k=20&m=490455809&s=612x612&w=0&h=l9KJvH_25z0QTLggHrcH_MsR4gPLH7uXwDPUAZ_C5zk=\" width=\"400px\"></center>\"\"\"\n", " )\n", " m(\n", " \"\"\"\n", " This Blocks implementation is an adaptation [a report written](https://erlj.notion.site/Neural-Instrument-Cloning-from-very-few-samples-2cf41d8b630842ee8c7eb55036a1bfd6) by Nicolas Jonason and Bob L.T. Sturm.\n", " \n", " I've implemented it in Blocks to show off some cool features, such as embedding live ML demos. More on that ahead...\n", " \n", " ### What does this machine learning model do?\n", " It combines techniques from neural voice cloning with musical instrument synthesis. This makes it possible to produce neural instrument synthesisers from just seconds of target instrument audio.\n", " \n", " ### Audio Examples\n", " Here are some **real** 16 second saxophone recordings:\n", " \"\"\"\n", " )\n", " gr.Audio(os.path.join(os.path.abspath(''),\"sax.wav\"), label=\"Here is a real 16 second saxophone recording:\")\n", " gr.Audio(os.path.join(os.path.abspath(''),\"sax.wav\"))\n", "\n", " m(\n", " \"\"\"\\n\n", " Here is a **generated** saxophone recordings:\"\"\"\n", " )\n", " a = gr.Audio(os.path.join(os.path.abspath(''),\"new-sax.wav\"))\n", "\n", " gr.Button(\"Generate a new saxophone recording\")\n", "\n", " m(\n", " \"\"\"\n", " ### Inputs to the model\n", " The inputs to the model are:\n", " * pitch\n", " * loudness\n", " * base audio file\n", " \"\"\"\n", " )\n", "\n", " m(\n", " \"\"\"\n", " Try the model live!\n", " \"\"\"\n", " )\n", "\n", " gr.TabbedInterface(\n", " [io1, io2, io3, io4], [\"Saxophone\", \"Flute\", \"Trombone\", \"Another Saxophone\"]\n", " )\n", "\n", " m(\n", " \"\"\"\n", " ### Using the model for cloning\n", " You can also use this model a different way, to simply clone the audio file and reconstruct it \n", " using machine learning. Here, we'll show a demo of that below:\n", " \"\"\"\n", " )\n", "\n", " a2 = gr.Audio()\n", " a2.change(reconstruct, a2, a2)\n", "\n", " m(\n", " \"\"\"\n", " Thanks for reading this! As you may have realized, all of the \"models\" in this demo are fake. They are just designed to show you what is possible using Blocks \ud83e\udd17.\n", " \n", " For details of the model, read the [original report here](https://erlj.notion.site/Neural-Instrument-Cloning-from-very-few-samples-2cf41d8b630842ee8c7eb55036a1bfd6).\n", " \n", " *Details for nerds*: this report was \"launched\" on:\n", " \"\"\"\n", " )\n", "\n", " t = gr.Textbox(label=\"timestamp\")\n", "\n", " demo.load(get_time, [], t)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
groundingLMM/gradio-dev/demo/blocks_scroll/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_scroll"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "demo = gr.Blocks()\n", "\n", "with demo:\n", " inp = gr.Textbox(placeholder=\"Enter text.\")\n", " scroll_btn = gr.Button(\"Scroll\")\n", " no_scroll_btn = gr.Button(\"No Scroll\")\n", " big_block = gr.HTML(\"\"\"\n", " <div style='height: 800px; width: 100px; background-color: pink;'></div>\n", " \"\"\")\n", " out = gr.Textbox()\n", " \n", " scroll_btn.click(lambda x: x, \n", " inputs=inp, \n", " outputs=out,\n", " scroll_to_output=True)\n", " no_scroll_btn.click(lambda x: x, \n", " inputs=inp, \n", " outputs=out)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
groundingLMM/gradio-dev/demo/blocks_scroll/run.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+
4
+ demo = gr.Blocks()
5
+
6
+ with demo:
7
+ inp = gr.Textbox(placeholder="Enter text.")
8
+ scroll_btn = gr.Button("Scroll")
9
+ no_scroll_btn = gr.Button("No Scroll")
10
+ big_block = gr.HTML("""
11
+ <div style='height: 800px; width: 100px; background-color: pink;'></div>
12
+ """)
13
+ out = gr.Textbox()
14
+
15
+ scroll_btn.click(lambda x: x,
16
+ inputs=inp,
17
+ outputs=out,
18
+ scroll_to_output=True)
19
+ no_scroll_btn.click(lambda x: x,
20
+ inputs=inp,
21
+ outputs=out)
22
+
23
+ if __name__ == "__main__":
24
+ demo.launch()
groundingLMM/gradio-dev/demo/blocks_speech_text_sentiment/requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ torch
2
+ transformers
groundingLMM/gradio-dev/demo/blocks_speech_text_sentiment/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_speech_text_sentiment"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch transformers"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["from transformers import pipeline\n", "\n", "import gradio as gr\n", "\n", "asr = pipeline(\"automatic-speech-recognition\", \"facebook/wav2vec2-base-960h\")\n", "classifier = pipeline(\"text-classification\")\n", "\n", "\n", "def speech_to_text(speech):\n", " text = asr(speech)[\"text\"]\n", " return text\n", "\n", "\n", "def text_to_sentiment(text):\n", " return classifier(text)[0][\"label\"]\n", "\n", "\n", "demo = gr.Blocks()\n", "\n", "with demo:\n", " audio_file = gr.Audio(type=\"filepath\")\n", " text = gr.Textbox()\n", " label = gr.Label()\n", "\n", " b1 = gr.Button(\"Recognize Speech\")\n", " b2 = gr.Button(\"Classify Sentiment\")\n", "\n", " b1.click(speech_to_text, inputs=audio_file, outputs=text)\n", " b2.click(text_to_sentiment, inputs=text, outputs=label)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
groundingLMM/gradio-dev/demo/blocks_speech_text_sentiment/run.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+
3
+ import gradio as gr
4
+
5
+ asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
6
+ classifier = pipeline("text-classification")
7
+
8
+
9
+ def speech_to_text(speech):
10
+ text = asr(speech)["text"]
11
+ return text
12
+
13
+
14
+ def text_to_sentiment(text):
15
+ return classifier(text)[0]["label"]
16
+
17
+
18
+ demo = gr.Blocks()
19
+
20
+ with demo:
21
+ audio_file = gr.Audio(type="filepath")
22
+ text = gr.Textbox()
23
+ label = gr.Label()
24
+
25
+ b1 = gr.Button("Recognize Speech")
26
+ b2 = gr.Button("Classify Sentiment")
27
+
28
+ b1.click(speech_to_text, inputs=audio_file, outputs=text)
29
+ b2.click(text_to_sentiment, inputs=text, outputs=label)
30
+
31
+ if __name__ == "__main__":
32
+ demo.launch()
groundingLMM/gradio-dev/demo/blocks_webcam/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_webcam"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import numpy as np\n", "\n", "import gradio as gr\n", "\n", "\n", "def snap(image):\n", " return np.flipud(image)\n", "\n", "\n", "demo = gr.Interface(snap, \"webcam\", \"image\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
groundingLMM/gradio-dev/demo/blocks_webcam/run.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ import gradio as gr
4
+
5
+
6
+ def snap(image):
7
+ return np.flipud(image)
8
+
9
+
10
+ demo = gr.Interface(snap, "webcam", "image")
11
+
12
+ if __name__ == "__main__":
13
+ demo.launch()
groundingLMM/gradio-dev/demo/button_component/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: button_component"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "css = \"footer {display: none !important;} .gradio-container {min-height: 0px !important;}\"\n", "\n", "with gr.Blocks(css=css) as demo:\n", " gr.Button()\n", " \n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
groundingLMM/gradio-dev/demo/button_component/run.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ css = "footer {display: none !important;} .gradio-container {min-height: 0px !important;}"
4
+
5
+ with gr.Blocks(css=css) as demo:
6
+ gr.Button()
7
+
8
+ demo.launch()
groundingLMM/gradio-dev/demo/calculator/screenshot.gif ADDED

Git LFS Details

  • SHA256: 3698fb03b6507ff954de47559f6830dfff88aa66487d2029a9bcf1c2f3762e08
  • Pointer size: 132 Bytes
  • Size of remote file: 5.72 MB
groundingLMM/gradio-dev/demo/calculator_live/screenshot.gif ADDED

Git LFS Details

  • SHA256: aa2424ecfa662e71fdb73f35809fb06f2ce9cbf0be7ffea3a72c05f0513063e4
  • Pointer size: 131 Bytes
  • Size of remote file: 776 kB
groundingLMM/gradio-dev/demo/chatbot_multimodal/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: chatbot_multimodal"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "def add_text(history, text):\n", " history = history + [(text, None)]\n", " return history, gr.update(value=\"\", interactive=False)\n", "\n", "\n", "def add_file(history, file):\n", " history = history + [((file.name,), None)]\n", " return history\n", "\n", "\n", "def bot(history):\n", " response = \"**That's cool!**\"\n", " history[-1][1] = response\n", " return history\n", "\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot([], elem_id=\"chatbot\").style(height=750)\n", "\n", " with gr.Row():\n", " with gr.Column(scale=0.85):\n", " txt = gr.Textbox(\n", " show_label=False,\n", " placeholder=\"Enter text and press enter, or upload an image\",\n", " ).style(container=False)\n", " with gr.Column(scale=0.15, min_width=0):\n", " btn = gr.UploadButton(\"\ud83d\udcc1\", file_types=[\"image\", \"video\", \"audio\"])\n", "\n", " txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(\n", " bot, chatbot, chatbot\n", " )\n", " txt_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False)\n", " file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then(\n", " bot, chatbot, chatbot\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
groundingLMM/gradio-dev/demo/chatbot_multimodal/run.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+
4
+ def add_text(history, text):
5
+ history = history + [(text, None)]
6
+ return history, gr.update(value="", interactive=False)
7
+
8
+
9
+ def add_file(history, file):
10
+ history = history + [((file.name,), None)]
11
+ return history
12
+
13
+
14
+ def bot(history):
15
+ response = "**That's cool!**"
16
+ history[-1][1] = response
17
+ return history
18
+
19
+
20
+ with gr.Blocks() as demo:
21
+ chatbot = gr.Chatbot([], elem_id="chatbot").style(height=750)
22
+
23
+ with gr.Row():
24
+ with gr.Column(scale=0.85):
25
+ txt = gr.Textbox(
26
+ show_label=False,
27
+ placeholder="Enter text and press enter, or upload an image",
28
+ ).style(container=False)
29
+ with gr.Column(scale=0.15, min_width=0):
30
+ btn = gr.UploadButton("📁", file_types=["image", "video", "audio"])
31
+
32
+ txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
33
+ bot, chatbot, chatbot
34
+ )
35
+ txt_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False)
36
+ file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then(
37
+ bot, chatbot, chatbot
38
+ )
39
+
40
+ if __name__ == "__main__":
41
+ demo.launch()
groundingLMM/gradio-dev/demo/chatbot_streaming/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: chatbot_streaming"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import random\n", "import time\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot()\n", " msg = gr.Textbox()\n", " clear = gr.ClearButton([msg, chatbot])\n", "\n", " def user(user_message, history):\n", " return gr.update(value=\"\", interactive=False), history + [[user_message, None]]\n", "\n", " def bot(history):\n", " bot_message = random.choice([\"How are you?\", \"I love you\", \"I'm very hungry\"])\n", " history[-1][1] = \"\"\n", " for character in bot_message:\n", " history[-1][1] += character\n", " time.sleep(0.05)\n", " yield history\n", "\n", " response = msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n", " bot, chatbot, chatbot\n", " )\n", " response.then(lambda: gr.update(interactive=True), None, [msg], queue=False)\n", "\n", "demo.queue()\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}