Spaces:
				
			
			
	
			
			
		Runtime error
		
	
	
	
			
			
	
	
	
	
		
		
		Runtime error
		
	Commit 
							
							·
						
						48ca759
	
1
								Parent(s):
							
							3e02820
								
Update app.py
Browse files
    	
        app.py
    CHANGED
    
    | @@ -158,6 +158,7 @@ def infer( | |
| 158 | 
             
                    en_bert = en_bert.to(device).unsqueeze(0)
         | 
| 159 | 
             
                    x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device)
         | 
| 160 | 
             
                    emo = emo.to(device).unsqueeze(0)
         | 
|  | |
| 161 | 
             
                    del phones
         | 
| 162 | 
             
                    speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device)
         | 
| 163 | 
             
                    audio = (
         | 
| @@ -203,7 +204,7 @@ if __name__ == "__main__": | |
| 203 | 
             
                emotional_model = EmotionModel.from_pretrained(emotional_model_name).to(device)
         | 
| 204 | 
             
                hps = utils.get_hparams_from_file('Data/BanGDream/configs/config.json')
         | 
| 205 | 
             
                net_g = get_net_g(
         | 
| 206 | 
            -
                    model_path='Data/ | 
| 207 | 
             
                )
         | 
| 208 | 
             
                speaker_ids = hps.data.spk2id
         | 
| 209 | 
             
                speakers = list(speaker_ids.keys())
         | 
| @@ -222,14 +223,14 @@ if __name__ == "__main__": | |
| 222 | 
             
                                            with gr.Row():
         | 
| 223 | 
             
                                                gr.Markdown(
         | 
| 224 | 
             
                                                    '<div align="center">'
         | 
| 225 | 
            -
                                                    f'<img style="width:auto;height:400px;" src="file/image/{name}.png">' 
         | 
| 226 | 
             
                                                    '</div>'
         | 
| 227 | 
             
                                                )
         | 
| 228 | 
             
                                            length_scale = gr.Slider(
         | 
| 229 | 
             
                                                    minimum=0.1, maximum=2, value=1, step=0.01, label="语速调节"
         | 
| 230 | 
             
                                                )
         | 
| 231 | 
             
                                            emotion = gr.Slider(
         | 
| 232 | 
            -
                                                minimum | 
| 233 | 
             
                                            )
         | 
| 234 | 
             
                                            with gr.Accordion(label="参数设定", open=False):
         | 
| 235 | 
             
                                                sdp_ratio = gr.Slider(
         | 
|  | |
| 158 | 
             
                    en_bert = en_bert.to(device).unsqueeze(0)
         | 
| 159 | 
             
                    x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device)
         | 
| 160 | 
             
                    emo = emo.to(device).unsqueeze(0)
         | 
| 161 | 
            +
                    print(emo)
         | 
| 162 | 
             
                    del phones
         | 
| 163 | 
             
                    speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device)
         | 
| 164 | 
             
                    audio = (
         | 
|  | |
| 204 | 
             
                emotional_model = EmotionModel.from_pretrained(emotional_model_name).to(device)
         | 
| 205 | 
             
                hps = utils.get_hparams_from_file('Data/BanGDream/configs/config.json')
         | 
| 206 | 
             
                net_g = get_net_g(
         | 
| 207 | 
            +
                    model_path='Data/BangDream/models/G_49000.pth', version="2.1", device=device, hps=hps
         | 
| 208 | 
             
                )
         | 
| 209 | 
             
                speaker_ids = hps.data.spk2id
         | 
| 210 | 
             
                speakers = list(speaker_ids.keys())
         | 
|  | |
| 223 | 
             
                                            with gr.Row():
         | 
| 224 | 
             
                                                gr.Markdown(
         | 
| 225 | 
             
                                                    '<div align="center">'
         | 
| 226 | 
            +
                                                    f'<img style="width:auto;height:400px;" src="https://mahiruoshi-bangdream-bert-vits2.hf.space/file/image/{name}.png">' 
         | 
| 227 | 
             
                                                    '</div>'
         | 
| 228 | 
             
                                                )
         | 
| 229 | 
             
                                            length_scale = gr.Slider(
         | 
| 230 | 
             
                                                    minimum=0.1, maximum=2, value=1, step=0.01, label="语速调节"
         | 
| 231 | 
             
                                                )
         | 
| 232 | 
             
                                            emotion = gr.Slider(
         | 
| 233 | 
            +
                                                minimum=-10, maximum=10, value=0, step=0.1, label="Emotion"
         | 
| 234 | 
             
                                            )
         | 
| 235 | 
             
                                            with gr.Accordion(label="参数设定", open=False):
         | 
| 236 | 
             
                                                sdp_ratio = gr.Slider(
         | 
