Spaces:
				
			
			
	
			
			
		Runtime error
		
	
	
	
			
			
	
	
	
	
		
		
		Runtime error
		
	Upload app.py
Browse files
    	
        app.py
    CHANGED
    
    | @@ -170,7 +170,7 @@ def resize_img(input_image, max_side=640, min_side=512, size=None, | |
| 170 | 
             
            @spaces.GPU
         | 
| 171 | 
             
            def dress_process(garm_img, face_img, pose_img, prompt, cloth_guidance_scale, caption_guidance_scale,
         | 
| 172 | 
             
                              face_guidance_scale,self_guidance_scale, cross_guidance_scale,if_ipa, if_post, if_control, denoise_steps, seed=42):
         | 
| 173 | 
            -
                image_face_fusion = pipeline('face_fusion_torch', model='damo/cv_unet_face_fusion_torch',model_revision='v1.0.0')
         | 
| 174 | 
             
                if prompt is None:
         | 
| 175 | 
             
                    prompt = "a photography of a model"
         | 
| 176 | 
             
                prompt = prompt + ', best quality, high quality'
         | 
| @@ -242,15 +242,15 @@ def dress_process(garm_img, face_img, pose_img, prompt, cloth_guidance_scale, ca | |
| 242 | 
             
                    num_inference_steps=denoise_steps,
         | 
| 243 | 
             
                ).images
         | 
| 244 |  | 
| 245 | 
            -
                if if_post and if_ipa:
         | 
| 246 | 
            -
             | 
| 247 | 
            -
             | 
| 248 | 
            -
             | 
| 249 | 
            -
             | 
| 250 | 
            -
             | 
| 251 | 
            -
             | 
| 252 | 
            -
             | 
| 253 | 
            -
             | 
| 254 | 
             
                return output[0]
         | 
| 255 |  | 
| 256 |  | 
| @@ -298,8 +298,8 @@ with image_blocks as demo: | |
| 298 | 
             
                            outputs=imgs,
         | 
| 299 | 
             
                            examples=face_list_path
         | 
| 300 | 
             
                        )
         | 
| 301 | 
            -
                        with gr.Row():
         | 
| 302 | 
            -
             | 
| 303 |  | 
| 304 | 
             
                    with gr.Column():
         | 
| 305 | 
             
                        pose_img = gr.Image(label="Pose", sources='upload', type="pil")
         | 
|  | |
| 170 | 
             
            @spaces.GPU
         | 
| 171 | 
             
            def dress_process(garm_img, face_img, pose_img, prompt, cloth_guidance_scale, caption_guidance_scale,
         | 
| 172 | 
             
                              face_guidance_scale,self_guidance_scale, cross_guidance_scale,if_ipa, if_post, if_control, denoise_steps, seed=42):
         | 
| 173 | 
            +
                # image_face_fusion = pipeline('face_fusion_torch', model='damo/cv_unet_face_fusion_torch',model_revision='v1.0.0')
         | 
| 174 | 
             
                if prompt is None:
         | 
| 175 | 
             
                    prompt = "a photography of a model"
         | 
| 176 | 
             
                prompt = prompt + ', best quality, high quality'
         | 
|  | |
| 242 | 
             
                    num_inference_steps=denoise_steps,
         | 
| 243 | 
             
                ).images
         | 
| 244 |  | 
| 245 | 
            +
                # if if_post and if_ipa:
         | 
| 246 | 
            +
                #
         | 
| 247 | 
            +
                #     output_array = np.array(output[0])
         | 
| 248 | 
            +
                #
         | 
| 249 | 
            +
                #     bgr_array = cv2.cvtColor(output_array, cv2.COLOR_RGB2BGR)
         | 
| 250 | 
            +
                #
         | 
| 251 | 
            +
                #     bgr_image = Image.fromarray(bgr_array)
         | 
| 252 | 
            +
                #     result = image_face_fusion(dict(template=bgr_image, user=Image.fromarray(face_image.astype('uint8'))))
         | 
| 253 | 
            +
                #     return result[OutputKeys.OUTPUT_IMG]
         | 
| 254 | 
             
                return output[0]
         | 
| 255 |  | 
| 256 |  | 
|  | |
| 298 | 
             
                            outputs=imgs,
         | 
| 299 | 
             
                            examples=face_list_path
         | 
| 300 | 
             
                        )
         | 
| 301 | 
            +
                        # with gr.Row():
         | 
| 302 | 
            +
                        #     is_checked_postprocess = gr.Checkbox(label="Yes", info="Use postprocess ", value=False)
         | 
| 303 |  | 
| 304 | 
             
                    with gr.Column():
         | 
| 305 | 
             
                        pose_img = gr.Image(label="Pose", sources='upload', type="pil")
         |