Spaces:
Running
Running
import gradio as gr | |
import numpy as np | |
from model.DiffSynthSampler import DiffSynthSampler | |
from tools import safe_int, read_wav_to_numpy | |
from webUI.natural_language_guided.utils import latent_representation_to_Gradio_image, \ | |
encodeBatch2GradioOutput_STFT, add_instrument | |
from webUI.natural_language_guided_4.utils import resize_image_to_aspect_ratio | |
def get_instruments_module(gradioWebUI, virtual_instruments_state): | |
with gr.Tab("intruments"): | |
gr.Markdown("Use neural networks to select random sounds using your favorite instrument!") | |
with gr.Row(variant="panel"): | |
with gr.Column(scale=1): | |
input_text = gr.Textbox(label="input") | |
def show_split(text): | |
textboxes = [] | |
if len(text) == 0: | |
gr.Markdown("## No Input Provided") | |
else: | |
for letter in text: | |
textboxes.append(gr.Textbox(letter, interactive=True)) | |
def merge(*splitted_texts): | |
out = "" | |
for t in splitted_texts: | |
out += t | |
return out | |
submit_botton.click(merge, inputs=textboxes, outputs=merged_textbox) | |
submit_botton = gr.Button("submit") | |
merged_textbox = gr.Textbox(placeholder="placeholder", interactive=False) | |
with gr.Column(scale=1): | |
def check_instruments(virtual_instruments_dict): | |
virtual_instruments = virtual_instruments_dict["virtual_instruments"] | |
instrument_names = list(virtual_instruments.keys()) | |
instrument_dropdown = gr.Dropdown( | |
instrument_names, label="instrument", info="info placeholder" | |
) | |
def select_instrument(instrument): | |
print(f"instrument: {instrument}") | |
sr, signal = virtual_instruments[instrument]["signal"] | |
return {selected_instrument_audio: (sr, signal)} | |
instrument_dropdown.select(select_instrument, inputs=instrument_dropdown, | |
outputs=selected_instrument_audio) | |
selected_instrument_audio = gr.Audio(type="numpy", label="Play", scale=1, interactive=False) | |