Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,74 +3,120 @@ import logging
|
|
| 3 |
import gradio as gr
|
| 4 |
from gradio_client import Client, handle_file
|
| 5 |
|
| 6 |
-
#
|
| 7 |
logging.basicConfig(level=logging.INFO)
|
| 8 |
logger = logging.getLogger(__name__)
|
| 9 |
|
| 10 |
token = os.getenv("HUGGINGFACE_TOKEN")
|
| 11 |
|
| 12 |
def create_dubsync_interface():
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
gr.
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
)
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
|
| 71 |
if __name__ == "__main__":
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
interface.launch(show_error=True, share=False, server_name="0.0.0.0", server_port=7860)
|
| 75 |
-
else:
|
| 76 |
-
logger.error("Failed to create interface")
|
|
|
|
| 3 |
import gradio as gr
|
| 4 |
from gradio_client import Client, handle_file
|
| 5 |
|
| 6 |
+
# Logging setup
|
| 7 |
logging.basicConfig(level=logging.INFO)
|
| 8 |
logger = logging.getLogger(__name__)
|
| 9 |
|
| 10 |
token = os.getenv("HUGGINGFACE_TOKEN")
|
| 11 |
|
| 12 |
def create_dubsync_interface():
|
| 13 |
+
client = Client("Tamiloneto8/Test1", hf_token=token, verbose=True)
|
| 14 |
+
|
| 15 |
+
target_langs = ["Assamese","Bengali","Gujarati","Hindi","Kannada",
|
| 16 |
+
"Malayalam","Marathi","Odia","Punjabi","Tamil","Telugu"]
|
| 17 |
+
|
| 18 |
+
css = """
|
| 19 |
+
.gradio-container { max-width: 1200px; margin: auto; }
|
| 20 |
+
.main-header { text-align: center; font-size: 2.5em; margin-bottom: 1em; }
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
with gr.Blocks(title="DubSync - AI Audio Dubbing", css=css) as demo:
|
| 24 |
+
gr.HTML("<h1 class='main-header'>π¬ DubSync - AI Audio Dubbing Pipeline</h1>")
|
| 25 |
+
gr.Markdown("Transform your audio into another Indian language with full editing control.")
|
| 26 |
+
|
| 27 |
+
# Step 1 upload and process
|
| 28 |
+
with gr.Row():
|
| 29 |
+
with gr.Column(scale=1):
|
| 30 |
+
audio_input = gr.Audio(sources=["upload"], type="filepath", label="π΅ Upload Audio File")
|
| 31 |
+
lang_dropdown = gr.Dropdown(target_langs, label="π Target Language")
|
| 32 |
+
process_btn = gr.Button("π― Start Processing", variant="primary")
|
| 33 |
+
|
| 34 |
+
status_box = gr.Textbox(label="π Processing Status", interactive=False)
|
| 35 |
+
chunk_info_state = gr.State([])
|
| 36 |
+
current_index_state = gr.State(0)
|
| 37 |
+
|
| 38 |
+
# Step 2 (chunks)
|
| 39 |
+
with gr.Row(visible=False) as edit_section:
|
| 40 |
+
with gr.Column():
|
| 41 |
+
nav_row = gr.Row()
|
| 42 |
+
prev_btn = gr.Button("βοΈ Previous")
|
| 43 |
+
chunk_counter = gr.Textbox(interactive=False)
|
| 44 |
+
next_btn = gr.Button("Next βΆοΈ")
|
| 45 |
+
nav_row.update(prev_btn, chunk_counter, next_btn)
|
| 46 |
+
|
| 47 |
+
original_audio = gr.Audio(label="Original Chunk Audio", type="filepath")
|
| 48 |
+
transcription_text = gr.Textbox(label="Transcription (edit if needed)", lines=2)
|
| 49 |
+
translation_text = gr.Textbox(label="Translation (edit if needed)", lines=2)
|
| 50 |
+
|
| 51 |
+
with gr.Row():
|
| 52 |
+
gen_btn = gr.Button("π Generate Dubbed Chunk")
|
| 53 |
+
final_btn = gr.Button("βοΈ Finalize Chunk", variant="secondary")
|
| 54 |
+
|
| 55 |
+
dubbed_audio = gr.Audio(label="Dubbed Chunk Audio", type="filepath")
|
| 56 |
+
progress_text = gr.Textbox(label="Progress", interactive=False)
|
| 57 |
+
|
| 58 |
+
# Step 3 (merge)
|
| 59 |
+
merge_btn = gr.Button("π Merge All Finalized Chunks", variant="primary", visible=False)
|
| 60 |
+
final_output = gr.Textbox(label="π Final Results", interactive=False)
|
| 61 |
+
full_audio = gr.Audio(label="π Final Dubbed Audio", type="filepath", interactive=False)
|
| 62 |
+
|
| 63 |
+
def step1_fn(audio_file, target_language):
|
| 64 |
+
hf_audio = handle_file(audio_file)
|
| 65 |
+
logger.info("Calling /process_audio_pipeline_step1")
|
| 66 |
+
out = client.predict(audio_file=hf_audio, target_lang=target_language, api_name="/process_audio_pipeline_step1")
|
| 67 |
+
logger.info("Step1 result: %s", out)
|
| 68 |
+
|
| 69 |
+
status_text = out[0]
|
| 70 |
+
chunks = out[1] # assuming this is chunk info list
|
| 71 |
+
first = chunks[0] if chunks else {}
|
| 72 |
+
return status_text, chunks, 0, \
|
| 73 |
+
f"Chunk 1 of {len(chunks)}", \
|
| 74 |
+
first.get('original_audio'), first.get('transcription'), first.get('translation'), first.get('dubbed_audio'), \
|
| 75 |
+
"0 of 0 chunks finalized", True, True
|
| 76 |
+
|
| 77 |
+
def navigate_fn(idx, chunks, trans, tr, direction):
|
| 78 |
+
if chunks and 0 <= idx < len(chunks):
|
| 79 |
+
chunks[idx]['transcription'] = trans
|
| 80 |
+
chunks[idx]['translation'] = tr
|
| 81 |
+
new_idx = max(0, min(len(chunks)-1, idx + direction))
|
| 82 |
+
chunk = chunks[new_idx]
|
| 83 |
+
return new_idx, f"Chunk {new_idx+1} of {len(chunks)}", chunk.get('original_audio'), chunk.get('transcription'), chunk.get('translation'), chunk.get('dubbed_audio'), f"{sum(1 for c in chunks if c.get('finalized',False))} of {len(chunks)} chunks finalized"
|
| 84 |
+
|
| 85 |
+
def gen_chunk_fn(idx, chunks, trans, tr):
|
| 86 |
+
chunk = chunks[idx]
|
| 87 |
+
logger.info("Calling /generate_dubbed_chunk")
|
| 88 |
+
dubbed = client.predict(transcription=trans, translation=tr, api_name="/generate_dubbed_chunk")
|
| 89 |
+
chunks[idx]['dubbed_audio'] = dubbed
|
| 90 |
+
return dubbed, chunks
|
| 91 |
+
|
| 92 |
+
def finalize_fn(idx, chunks):
|
| 93 |
+
if chunks and chunks[idx].get('dubbed_audio'):
|
| 94 |
+
chunks[idx]['finalized'] = True
|
| 95 |
+
return chunks, f"{sum(1 for c in chunks if c.get('finalized',False))} of {len(chunks)} chunks finalized"
|
| 96 |
+
|
| 97 |
+
def merge_fn(chunks):
|
| 98 |
+
logger.info("Calling /merge_audio_files")
|
| 99 |
+
out = client.predict(api_name="/merge_audio_files")
|
| 100 |
+
return out[0], out[1]
|
| 101 |
+
|
| 102 |
+
# Bind events
|
| 103 |
+
process_btn.click(step1_fn, [audio_input, lang_dropdown],
|
| 104 |
+
[status_box, chunk_info_state, current_index_state,
|
| 105 |
+
chunk_counter, original_audio, transcription_text, translation_text,
|
| 106 |
+
dubbed_audio, progress_text, edit_section, merge_btn])
|
| 107 |
+
prev_btn.click(navigate_fn, [current_index_state, chunk_info_state, transcription_text, translation_text, gr.literal(-1)],
|
| 108 |
+
[current_index_state, chunk_counter, original_audio, transcription_text, translation_text, dubbed_audio, progress_text])
|
| 109 |
+
next_btn.click(navigate_fn, [current_index_state, chunk_info_state, transcription_text, translation_text, gr.literal(1)],
|
| 110 |
+
[current_index_state, chunk_counter, original_audio, transcription_text, translation_text, dubbed_audio, progress_text])
|
| 111 |
+
|
| 112 |
+
gen_btn.click(gen_chunk_fn, [current_index_state, chunk_info_state, transcription_text, translation_text],
|
| 113 |
+
[dubbed_audio, chunk_info_state])
|
| 114 |
+
final_btn.click(finalize_fn, [current_index_state, chunk_info_state],
|
| 115 |
+
[chunk_info_state, progress_text])
|
| 116 |
+
merge_btn.click(merge_fn, [chunk_info_state], [final_output, full_audio])
|
| 117 |
+
|
| 118 |
+
return demo
|
| 119 |
|
| 120 |
if __name__ == "__main__":
|
| 121 |
+
ui = create_dubsync_interface()
|
| 122 |
+
ui.launch(show_error=True, share=False, server_name="0.0.0.0", server_port=7860)
|
|
|
|
|
|
|
|
|