Musica / app.py
DanLeBossDeESGI's picture
Update app.py
5b1c9fd
raw
history blame
8.91 kB
import streamlit as st
from PIL import Image, ImageDraw
import numpy as np
#@title Standard/Simple Continuation
#@markdown Text-To-Music Settings
#@markdown NOTE: You can enter any desired title or artist, or both
enter_desired_song_title = "Family Guy" #@param {type:"string"}
enter_desired_artist = "TV Themes" #@param {type:"string"}
#@markdown Generation Settings
number_of_tokens_to_generate = 426 #@param {type:"slider", min:30, max:2046, step:33}
number_of_batches_to_generate = 4 #@param {type:"slider", min:1, max:16, step:1}
temperature = 0.9 #@param {type:"slider", min:0.1, max:1, step:0.1}
allow_model_to_stop_generation_if_needed = False #@param {type:"boolean"}
print('=' * 70)
print('Euterpe X TTM Model Generator')
print('=' * 70)
print('Searching titles...Please wait...')
random.shuffle(AUX_DATA)
titles_index = []
for A in AUX_DATA:
titles_index.append(A[0])
search_string = ''
if enter_desired_song_title != '' and enter_desired_artist != '':
search_string = enter_desired_song_title + ' --- ' + enter_desired_artist
else:
search_string = enter_desired_song_title + enter_desired_artist
search_match = process.extract(query=search_string, choices=titles_index, limit=1)
search_index = titles_index.index(search_match[0][0])
print('Done!')
print('=' * 70)
print('Selected title:', AUX_DATA[search_index][0])
print('=' * 70)
if allow_model_to_stop_generation_if_needed:
min_stop_token = 3343
else:
min_stop_token = None
# Velocities
velocities_map = [80, 80, 70, 100, 90, 80, 100, 100, 100, 90, 110, 100]
vel_map = AUX_DATA[search_index][1]
for i in range(12):
if vel_map[i] != 0:
velocities_map[i] = vel_map[i]
# Loading data...
outy = AUX_DATA[search_index][2][3:]
block_marker = sum([(y * 8) for y in outy if y < 256]) / 1000
inp = [outy] * number_of_batches_to_generate
inp = torch.LongTensor(inp).cuda()
out = model.module.generate(inp,
number_of_tokens_to_generate,
temperature=temperature,
return_prime=True,
eos_token=min_stop_token,
verbose=True)
out0 = out.tolist()
print('=' * 70)
print('Done!')
print('=' * 70)
#======================================================================
print('Rendering results...')
for i in range(number_of_batches_to_generate):
print('=' * 70)
print('Batch #', i)
print('=' * 70)
out1 = out0[i]
print('Sample INTs', out1[:12])
print('=' * 70)
if len(out) != 0:
song = out1
song_f = []
time = 0
dur = 0
channel = 0
pitch = 0
vel = 90
for ss in song:
if ss > 0 and ss < 256:
time += ss * 8
if ss >= 256 and ss < 256+(12*128):
dur = ((ss-256) % 128) * 30
if ss >= 256+(12*128) and ss < 256+(12*128)+(12*128):
channel = (ss-(256+(12*128))) // 128
pitch = (ss-(256+(12*128))) % 128
vel = velocities_map[channel]
song_f.append(['note', time, dur, channel, pitch, vel ])
detailed_stats = TMIDIX.Tegridy_SONG_to_MIDI_Converter(song_f,
output_signature = 'Euterpe X',
output_file_name = '/content/Euterpe-X-Music-Composition_'+str(i),
track_name='Project Los Angeles',
list_of_MIDI_patches=[0, 24, 32, 40, 42, 46, 56, 71, 73, 0, 53, 19, 0, 0, 0, 0],
number_of_ticks_per_quarter=500)
print('=' * 70)
print('Displaying resulting composition...')
print('=' * 70)
fname = '/content/Euterpe-X-Music-Composition_'+str(i)
x = []
y =[]
c = []
colors = ['red', 'yellow', 'green', 'cyan', 'blue', 'pink', 'orange', 'purple', 'gray', 'white', 'gold', 'silver']
for s in song_f:
x.append(s[1] / 1000)
y.append(s[4])
c.append(colors[s[3]])
FluidSynth("/usr/share/sounds/sf2/FluidR3_GM.sf2", 16000).midi_to_audio(str(fname + '.mid'), str(fname + '.wav'))
display(Audio(str(fname + '.wav'), rate=16000))
plt.figure(figsize=(14,5))
ax=plt.axes(title=fname)
ax.set_facecolor('black')
plt.scatter(x,y, c=c)
ax.axvline(x=block_marker, c='w')
plt.xlabel("Time")
plt.ylabel("Pitch")
plt.show()#@title Standard/Simple Continuation
#@markdown Text-To-Music Settings
#@markdown NOTE: You can enter any desired title or artist, or both
enter_desired_song_title = "Family Guy" #@param {type:"string"}
enter_desired_artist = "TV Themes" #@param {type:"string"}
#@markdown Generation Settings
number_of_tokens_to_generate = 426 #@param {type:"slider", min:30, max:2046, step:33}
number_of_batches_to_generate = 4 #@param {type:"slider", min:1, max:16, step:1}
temperature = 0.9 #@param {type:"slider", min:0.1, max:1, step:0.1}
allow_model_to_stop_generation_if_needed = False #@param {type:"boolean"}
print('=' * 70)
print('Euterpe X TTM Model Generator')
print('=' * 70)
print('Searching titles...Please wait...')
random.shuffle(AUX_DATA)
titles_index = []
for A in AUX_DATA:
titles_index.append(A[0])
search_string = ''
if enter_desired_song_title != '' and enter_desired_artist != '':
search_string = enter_desired_song_title + ' --- ' + enter_desired_artist
else:
search_string = enter_desired_song_title + enter_desired_artist
search_match = process.extract(query=search_string, choices=titles_index, limit=1)
search_index = titles_index.index(search_match[0][0])
print('Done!')
print('=' * 70)
print('Selected title:', AUX_DATA[search_index][0])
print('=' * 70)
if allow_model_to_stop_generation_if_needed:
min_stop_token = 3343
else:
min_stop_token = None
# Velocities
velocities_map = [80, 80, 70, 100, 90, 80, 100, 100, 100, 90, 110, 100]
vel_map = AUX_DATA[search_index][1]
for i in range(12):
if vel_map[i] != 0:
velocities_map[i] = vel_map[i]
# Loading data...
outy = AUX_DATA[search_index][2][3:]
block_marker = sum([(y * 8) for y in outy if y < 256]) / 1000
inp = [outy] * number_of_batches_to_generate
inp = torch.LongTensor(inp).cuda()
out = model.module.generate(inp,
number_of_tokens_to_generate,
temperature=temperature,
return_prime=True,
eos_token=min_stop_token,
verbose=True)
out0 = out.tolist()
print('=' * 70)
print('Done!')
print('=' * 70)
#======================================================================
print('Rendering results...')
for i in range(number_of_batches_to_generate):
print('=' * 70)
print('Batch #', i)
print('=' * 70)
out1 = out0[i]
print('Sample INTs', out1[:12])
print('=' * 70)
if len(out) != 0:
song = out1
song_f = []
time = 0
dur = 0
channel = 0
pitch = 0
vel = 90
for ss in song:
if ss > 0 and ss < 256:
time += ss * 8
if ss >= 256 and ss < 256+(12*128):
dur = ((ss-256) % 128) * 30
if ss >= 256+(12*128) and ss < 256+(12*128)+(12*128):
channel = (ss-(256+(12*128))) // 128
pitch = (ss-(256+(12*128))) % 128
vel = velocities_map[channel]
song_f.append(['note', time, dur, channel, pitch, vel ])
detailed_stats = TMIDIX.Tegridy_SONG_to_MIDI_Converter(song_f,
output_signature = 'Euterpe X',
output_file_name = '/content/Euterpe-X-Music-Composition_'+str(i),
track_name='Project Los Angeles',
list_of_MIDI_patches=[0, 24, 32, 40, 42, 46, 56, 71, 73, 0, 53, 19, 0, 0, 0, 0],
number_of_ticks_per_quarter=500)
print('=' * 70)
print('Displaying resulting composition...')
print('=' * 70)
fname = '/content/Euterpe-X-Music-Composition_'+str(i)
x = []
y =[]
c = []
colors = ['red', 'yellow', 'green', 'cyan', 'blue', 'pink', 'orange', 'purple', 'gray', 'white', 'gold', 'silver']
for s in song_f:
x.append(s[1] / 1000)
y.append(s[4])
c.append(colors[s[3]])
FluidSynth("/usr/share/sounds/sf2/FluidR3_GM.sf2", 16000).midi_to_audio(str(fname + '.mid'), str(fname + '.wav'))
display(Audio(str(fname + '.wav'), rate=16000))
plt.figure(figsize=(14,5))
ax=plt.axes(title=fname)
ax.set_facecolor('black')
plt.scatter(x,y, c=c)
ax.axvline(x=block_marker, c='w')
plt.xlabel("Time")
plt.ylabel("Pitch")
plt.show()