|
import torch |
|
from transformers import pipeline |
|
import gradio as gr |
|
|
|
device = 0 if torch.cuda.is_available() else -1 |
|
pipe = pipeline("text-generation", model="harishchaurasia/gpt2-lyrics-model-fine-tuned", device=device) |
|
|
|
def generate_lyrics(prompt, max_tokens, temperature): |
|
result = pipe(prompt, max_new_tokens=max_tokens, temperature=temperature) |
|
return result[0]["generated_text"] |
|
|
|
iface = gr.Interface( |
|
fn=generate_lyrics, |
|
inputs=[ |
|
gr.Textbox(label="Enter a lyric prompt", placeholder="e.g., I feel the music..."), |
|
gr.Slider(10, 200, value=50, label="Max Tokens"), |
|
gr.Slider(0.1, 1.5, value=1.0, label="Temperature") |
|
], |
|
outputs=gr.Textbox(label="Generated Lyrics"), |
|
title="🎵 GPT-2 Lyrics Generator", |
|
description="Fine-tuned on 400K+ lyrics. Give it a prompt and get some musical magic!" |
|
) |
|
|
|
if __name__ == "__main__": |
|
iface.launch() |
|
|