VectorizeAnything / demo /generation.py
承弱
update t2v
4ffefef
import json
import os
import time
import gradio as gr
import requests
from demo.log import logger
from demo.util import download_svgs, upload_np_2_oss, download_images
API_KEY = os.getenv("API_KEY_GENERATION")
def convert_bool_to_str(value):
if value:
return "True"
else:
return "False"
def call_generation(input_path,
preprocess,
simplify,
optimize,
mode,
subsample_ratio,
speckle_removal,
sorting_method,
sorting_order,
use_gpu):
## generate image name based on time stamp
time_str = time.strftime("%Y%m%d%H%M%S", time.localtime())
img_name = f"upload_{time_str}.png"
svg_name = f"result_{time_str}"
BATCH_SIZE = 1
if simplify:
BATCH_SIZE += 1
if optimize:
BATCH_SIZE += 1
img_url = upload_np_2_oss(input_path, name=img_name)
simplify = convert_bool_to_str(simplify)
optimize = convert_bool_to_str(optimize)
speckle_removal = convert_bool_to_str(speckle_removal)
use_gpu = convert_bool_to_str(use_gpu)
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": f"Bearer {API_KEY}",
"X-DashScope-Async": "enable",
}
data = {
"model": "pre-vectorize_anything-2333",
"input": {
"base_image_url": img_url
},
"parameters":{
"preprocess": preprocess,
"mode": mode,
"simplify": simplify,
"optimize": optimize,
"sorting_method": sorting_method,
"sorting_order": sorting_order,
"subsample_ratio": subsample_ratio,
"speckle_removal": speckle_removal,
"use_GPU": use_gpu
}
}
url_create_task = 'https://poc-dashscope.aliyuncs.com/api/v1/services/vision/image-process/process'
all_res_ = []
REPEAT = 1
for _ in range(REPEAT):
try:
res_ = requests.post(url_create_task, data=json.dumps(data), headers=headers, timeout=60)
print(json.dumps(data))
all_res_.append(res_)
except requests.Timeout:
# back off and retry
raise gr.Error("网络波动,请求失败,请再次尝试")
all_image_data = []
for res_ in all_res_:
respose_code = res_.status_code
if 200 == respose_code:
res = json.loads(res_.content.decode())
request_id = res['request_id']
task_id = res['output']['task_id']
logger.info(f"task_id: {task_id}: Create Vectorization I2V request success. Params: {data}")
# 异步查询
is_running = True
while is_running:
# url_query = f'https://dashscope.aliyuncs.com/api/v1/tasks/{task_id}'
url_query = f'https://poc-dashscope.aliyuncs.com/api/v1/tasks/{task_id}'
try:
res_ = requests.post(url_query, headers=headers, timeout=60)
except requests.Timeout:
# back off and retry
raise gr.Error("网络波动,请求失败,请再次尝试")
respose_code = res_.status_code
if 200 == respose_code:
res = json.loads(res_.content.decode())
if "SUCCEEDED" == res['output']['task_status']:
logger.info(f"task_id: {task_id}: Generation task query success.")
results = res['output']
img_urls = results['output_img']
logger.info(f"task_id: {task_id}: {res}")
break
elif "FAILED" != res['output']['task_status']:
logger.debug(f"task_id: {task_id}: query result...")
time.sleep(1)
else:
raise gr.Error('Fail to get results from Generation task.')
else:
logger.error(f'task_id: {task_id}: Fail to query task result: {res_.content}')
raise gr.Error("Fail to query task result.")
logger.info(f"task_id: {task_id}: download generated images.")
img_data = download_svgs(img_urls, BATCH_SIZE, svg_name)
logger.info(f"task_id: {task_id}: Generate done.")
all_image_data += img_data
else:
logger.error(f'Fail to create Generation task: {res_.content}')
raise gr.Error("Fail to create Generation task.")
if len(all_image_data) != REPEAT * BATCH_SIZE:
raise gr.Error("Fail to Generation.")
return all_image_data[-1:]
def call_generation_t2v(prompt,
num_imgs,
image_resolution_h,
image_resolution_w,
details,
style,
vectorize,
preprocess,
simplify,
optimize,
mode,
subsample_ratio,
speckle_removal,
sorting_method,
sorting_order,
use_gpu):
## generate image name based on time stamp
time_str = time.strftime("%Y%m%d%H%M%S", time.localtime())
# img_name = f"upload_{time_str}.png"
svg_name = f"result_{time_str}"
generate_img_name = f"generate_{time_str}"
BATCH_SIZE = 1
count = 1
start_ind = 0
if simplify:
BATCH_SIZE += 1
count +=1
start_ind += 1
if optimize:
BATCH_SIZE += 1
start_ind += 1
count +=1
BATCH_SIZE *= num_imgs
# img_url = upload_np_2_oss(input_path, name=img_name)
# simplify = convert_bool_to_str(simplify)
# optimize = convert_bool_to_str(optimize)
# speckle_removal = convert_bool_to_str(speckle_removal)
# use_gpu = convert_bool_to_str(use_gpu)
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": f"Bearer {API_KEY}",
"X-DashScope-Async": "enable",
}
data = {
"model": "pre-vectorize_anything_t2v-2352",
"input": {
"prompt": prompt
},
"parameters":{
"num_imgs" : num_imgs,
"image_resolution_h": image_resolution_h,
"image_resolution_w": image_resolution_w,
"details" : details,
"style" : style,
"vectorize" : vectorize,
"preprocess": preprocess,
"mode": mode,
"simplify": simplify,
"optimize": optimize,
"sorting_method": sorting_method,
"sorting_order": sorting_order,
"subsample_ratio": subsample_ratio,
"speckle_removal": speckle_removal,
"use_GPU": use_gpu
}
}
url_create_task = 'https://poc-dashscope.aliyuncs.com/api/v1/services/aigc/text2image/image-synthesis'
all_res_ = []
REPEAT = 1
for _ in range(REPEAT):
try:
res_ = requests.post(url_create_task, data=json.dumps(data), headers=headers, timeout=120)
print(json.dumps(data))
all_res_.append(res_)
except requests.Timeout:
# back off and retry
raise gr.Error("网络波动,请求失败,请再次尝试")
all_image_data = []
for res_ in all_res_:
respose_code = res_.status_code
if 200 == respose_code:
res = json.loads(res_.content.decode())
request_id = res['request_id']
task_id = res['output']['task_id']
logger.info(f"task_id: {task_id}: Create Vectorize T2V request success. Params: {data}")
# 异步查询
is_running = True
while is_running:
# url_query = f'https://dashscope.aliyuncs.com/api/v1/tasks/{task_id}'
url_query = f'https://poc-dashscope.aliyuncs.com/api/v1/tasks/{task_id}'
try:
res_ = requests.post(url_query, headers=headers, timeout=120)
except requests.Timeout:
# back off and retry
raise gr.Error("网络波动,请求失败,请再次尝试")
respose_code = res_.status_code
if 200 == respose_code:
res = json.loads(res_.content.decode())
if "SUCCEEDED" == res['output']['task_status']:
logger.info(f"task_id: {task_id}: Generation task query success.")
results = res['output']
img_urls = results['output_img']
logger.info(f"task_id: {task_id}: {res}")
break
elif "FAILED" != res['output']['task_status']:
logger.debug(f"task_id: {task_id}: query result...")
time.sleep(1)
else:
raise gr.Error('Fail to get results from Generation task.')
else:
logger.error(f'task_id: {task_id}: Fail to query task result: {res_.content}')
raise gr.Error("Fail to query task result.")
logger.info(f"task_id: {task_id}: download generated images.")
if vectorize:
img_data = download_svgs(img_urls, BATCH_SIZE, svg_name)
else:
img_data = download_images(img_urls, num_imgs, generate_img_name)
logger.info(f"task_id: {task_id}: Generate done.")
all_image_data += img_data
else:
logger.error(f'Fail to create Generation task: {res_.content}')
raise gr.Error("Fail to create Generation task.")
if vectorize:
if len(all_image_data) != REPEAT * BATCH_SIZE:
raise gr.Error("Fail to Generation.")
else:
if len(all_image_data) != REPEAT * num_imgs:
raise gr.Error("Fail to Generation.")
return all_image_data[start_ind::BATCH_SIZE//num_imgs]
if __name__ == "__main__":
call_generation()