update
Browse files- app.py +7 -8
- app_mnnsr.py +1 -3
- mnnsr.py +25 -15
- pth2onnx.py +6 -5
app.py
CHANGED
@@ -282,7 +282,7 @@ with gr.Blocks() as demo:
|
|
282 |
# 添加fp16和try_run复选框
|
283 |
fp16 = gr.Checkbox(label="FP16", value=False)
|
284 |
onnxsim = gr.Checkbox(label="ONNX export simplify model", value=False)
|
285 |
-
opset = gr.Number(label="ONNX export opset version", value=
|
286 |
try_run = gr.Checkbox(label="MNNSR test", value=False)
|
287 |
convert_btn = gr.Button("Run")
|
288 |
with gr.Column():
|
@@ -329,19 +329,18 @@ with gr.Blocks() as demo:
|
|
329 |
)
|
330 |
|
331 |
if not model_input:
|
332 |
-
log +=
|
333 |
print_log(task_counter, f'模型文件载', '失败')
|
334 |
yield None, None, log, None
|
335 |
return
|
336 |
|
337 |
-
log += f'
|
338 |
print_log(task_counter, f'模型文件已下载到: {model_input}', '完成')
|
339 |
yield None, None, log, None
|
340 |
elif input_type == model_type_opt[1] and file_input:
|
341 |
model_input = file_input
|
342 |
else:
|
343 |
-
|
344 |
-
log = '\n请选择输入类型并提供有效的输入!'
|
345 |
yield None, None, log,None
|
346 |
return
|
347 |
|
@@ -361,10 +360,10 @@ with gr.Blocks() as demo:
|
|
361 |
if mnn_path:
|
362 |
if try_run:
|
363 |
print_log(task_counter, f'测试模型: {mnn_path}', '开始')
|
364 |
-
processed_image_np = modelTest_for_gradio(mnn_path, "./sample.jpg", int(tilesize), 0)
|
365 |
processed_image_pil = Image.fromarray(cv2.cvtColor(processed_image_np, cv2.COLOR_BGR2RGB))
|
366 |
# processed_image_pil = Image.fromarray(processed_image_np)
|
367 |
-
yield onnx_path, mnn_path, log+process_log, processed_image_pil
|
368 |
else:
|
369 |
yield onnx_path, mnn_path, log+process_log, None
|
370 |
return
|
@@ -373,7 +372,7 @@ with gr.Blocks() as demo:
|
|
373 |
process_model,
|
374 |
inputs=[input_type, url_input, file_input, tilesize, fp16, onnxsim, opset, try_run],
|
375 |
outputs=[onnx_output, mnn_output, log_box, img_output],
|
376 |
-
api_name="
|
377 |
)
|
378 |
|
379 |
# 将示例移至底部并包裹在列组件中
|
|
|
282 |
# 添加fp16和try_run复选框
|
283 |
fp16 = gr.Checkbox(label="FP16", value=False)
|
284 |
onnxsim = gr.Checkbox(label="ONNX export simplify model", value=False)
|
285 |
+
opset = gr.Number(label="ONNX export opset version, suggest 9/11/13/16/17/18", value=13, precision=0)
|
286 |
try_run = gr.Checkbox(label="MNNSR test", value=False)
|
287 |
convert_btn = gr.Button("Run")
|
288 |
with gr.Column():
|
|
|
329 |
)
|
330 |
|
331 |
if not model_input:
|
332 |
+
log += '模型文件下载失败\n'
|
333 |
print_log(task_counter, f'模型文件载', '失败')
|
334 |
yield None, None, log, None
|
335 |
return
|
336 |
|
337 |
+
log += f'模型文件已下载到: {model_input}\n'
|
338 |
print_log(task_counter, f'模型文件已下载到: {model_input}', '完成')
|
339 |
yield None, None, log, None
|
340 |
elif input_type == model_type_opt[1] and file_input:
|
341 |
model_input = file_input
|
342 |
else:
|
343 |
+
log = '请选择输入类型并提供有效的输入!\n'
|
|
|
344 |
yield None, None, log,None
|
345 |
return
|
346 |
|
|
|
360 |
if mnn_path:
|
361 |
if try_run:
|
362 |
print_log(task_counter, f'测试模型: {mnn_path}', '开始')
|
363 |
+
processed_image_np, load_time, infer_time = modelTest_for_gradio(mnn_path, "./sample.jpg", int(tilesize), 0)
|
364 |
processed_image_pil = Image.fromarray(cv2.cvtColor(processed_image_np, cv2.COLOR_BGR2RGB))
|
365 |
# processed_image_pil = Image.fromarray(processed_image_np)
|
366 |
+
yield onnx_path, mnn_path, log+process_log+f"MNNSR 加载模型用时 {load_time:.4f} 秒, 推理({tilesize} px)用时 {infer_time:.4f} 秒", processed_image_pil
|
367 |
else:
|
368 |
yield onnx_path, mnn_path, log+process_log, None
|
369 |
return
|
|
|
372 |
process_model,
|
373 |
inputs=[input_type, url_input, file_input, tilesize, fp16, onnxsim, opset, try_run],
|
374 |
outputs=[onnx_output, mnn_output, log_box, img_output],
|
375 |
+
api_name="convert_nmm_model"
|
376 |
)
|
377 |
|
378 |
# 将示例移至底部并包裹在列组件中
|
app_mnnsr.py
CHANGED
@@ -7,10 +7,8 @@ from PIL import Image
|
|
7 |
from mnnsr import modelTest_for_gradio
|
8 |
|
9 |
def gradio_interface(modelPath, input_image):
|
10 |
-
processed_image_np = modelTest_for_gradio(modelPath, input_image)
|
11 |
-
|
12 |
processed_image_pil = Image.fromarray(cv2.cvtColor(processed_image_np, cv2.COLOR_BGR2RGB))
|
13 |
-
|
14 |
return processed_image_pil
|
15 |
|
16 |
# 创建Gradio界面
|
|
|
7 |
from mnnsr import modelTest_for_gradio
|
8 |
|
9 |
def gradio_interface(modelPath, input_image):
|
10 |
+
processed_image_np, load_time, infer_time = modelTest_for_gradio(modelPath, input_image)
|
|
|
11 |
processed_image_pil = Image.fromarray(cv2.cvtColor(processed_image_np, cv2.COLOR_BGR2RGB))
|
|
|
12 |
return processed_image_pil
|
13 |
|
14 |
# 创建Gradio界面
|
mnnsr.py
CHANGED
@@ -4,6 +4,7 @@ import MNN
|
|
4 |
import numpy as np
|
5 |
import cv2
|
6 |
from PIL import Image
|
|
|
7 |
|
8 |
|
9 |
# 复制原始modelTest函数中的必要函数
|
@@ -64,11 +65,14 @@ def modelTest_for_gradio(modelPath, image_path, tilesize = 0, backend = 3):
|
|
64 |
model_name = os.path.basename(modelPath)
|
65 |
if "-Grayscale" in model_name:
|
66 |
model_channel = 1
|
67 |
-
elif "-4ch" in model_name:
|
68 |
model_channel = 4
|
69 |
else:
|
70 |
model_channel = 3
|
71 |
|
|
|
|
|
|
|
72 |
net = MNN.Interpreter(modelPath)
|
73 |
# set 9 for Session_Backend_Auto, Let BackGround Tuning
|
74 |
net.setSessionMode(9)
|
@@ -79,8 +83,14 @@ def modelTest_for_gradio(modelPath, image_path, tilesize = 0, backend = 3):
|
|
79 |
config['backend'] = backend
|
80 |
#config['precision'] = "low"
|
81 |
session = net.createSession(config)
|
82 |
-
|
83 |
print("Run on backendtype: %d \n" % net.getSessionInfo(session, 2))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
# 读取图像
|
85 |
image = cv2.imread(image_path)
|
86 |
if image.ndim == 2:
|
@@ -93,6 +103,9 @@ def modelTest_for_gradio(modelPath, image_path, tilesize = 0, backend = 3):
|
|
93 |
|
94 |
image = cv2.resize(image, (tilesize, tilesize))
|
95 |
|
|
|
|
|
|
|
96 |
# 处理通道数不匹配的情况
|
97 |
if image_channel == 3:
|
98 |
if model_channel == 1:
|
@@ -119,23 +132,17 @@ def modelTest_for_gradio(modelPath, image_path, tilesize = 0, backend = 3):
|
|
119 |
# display(Image(data=cv2.imencode('.jpg', image)[1].tobytes()))
|
120 |
# print("image.shape=", image.shape)
|
121 |
image = image/255.0
|
122 |
-
#preprocess it
|
123 |
if model_channel>=3:
|
124 |
image = image.transpose((2, 0, 1))
|
125 |
-
#change numpy data type as np.float32 to match tensor's format
|
126 |
image = image.astype(np.float32)
|
127 |
-
|
128 |
tmp_input = MNN.Tensor((1, model_channel, tilesize, tilesize), MNN.Halide_Type_Float, image, MNN.Tensor_DimensionType_Caffe)
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
net.resizeTensor(inputTensor, (1, model_channel, tilesize, tilesize))
|
133 |
-
net.resizeSession(session)
|
134 |
-
inputTensor.copyFrom(tmp_input)
|
135 |
-
# infer
|
136 |
net.runSession(session)
|
|
|
137 |
outputTensor = net.getSessionOutput(session)
|
138 |
-
# output
|
139 |
outputShape = outputTensor.getShape()
|
140 |
print("outputShape",outputShape)
|
141 |
outputHost = createTensor(outputTensor)
|
@@ -143,8 +150,11 @@ def modelTest_for_gradio(modelPath, image_path, tilesize = 0, backend = 3):
|
|
143 |
|
144 |
outimage = process_image(outputHost.getData(), outputShape[2], outputShape[3], outputShape[1], color='RGB')
|
145 |
|
146 |
-
#
|
147 |
-
|
|
|
|
|
|
|
148 |
|
149 |
# def gradio_interface(modelPath, input_image):
|
150 |
# processed_image_np = modelTest_for_gradio(modelPath, input_image)
|
|
|
4 |
import numpy as np
|
5 |
import cv2
|
6 |
from PIL import Image
|
7 |
+
import time
|
8 |
|
9 |
|
10 |
# 复制原始modelTest函数中的必要函数
|
|
|
65 |
model_name = os.path.basename(modelPath)
|
66 |
if "-Grayscale" in model_name:
|
67 |
model_channel = 1
|
68 |
+
elif "-4ch" in model_name or "RGBA" in model_name:
|
69 |
model_channel = 4
|
70 |
else:
|
71 |
model_channel = 3
|
72 |
|
73 |
+
# 记录模型加载开始时间
|
74 |
+
load_start_time = time.time()
|
75 |
+
# 加载模型(计时范围内)
|
76 |
net = MNN.Interpreter(modelPath)
|
77 |
# set 9 for Session_Backend_Auto, Let BackGround Tuning
|
78 |
net.setSessionMode(9)
|
|
|
83 |
config['backend'] = backend
|
84 |
#config['precision'] = "low"
|
85 |
session = net.createSession(config)
|
|
|
86 |
print("Run on backendtype: %d \n" % net.getSessionInfo(session, 2))
|
87 |
+
inputTensor = net.getSessionInput(session)
|
88 |
+
net.resizeTensor(inputTensor, (1, model_channel, tilesize, tilesize))
|
89 |
+
net.resizeSession(session)
|
90 |
+
# 计算模型加载耗时
|
91 |
+
load_time = time.time() - load_start_time
|
92 |
+
print(f"Load mnn model: {load_time:.4f} sec")
|
93 |
+
|
94 |
# 读取图像
|
95 |
image = cv2.imread(image_path)
|
96 |
if image.ndim == 2:
|
|
|
103 |
|
104 |
image = cv2.resize(image, (tilesize, tilesize))
|
105 |
|
106 |
+
# 记录推理开始时间
|
107 |
+
infer_start_time = time.time()
|
108 |
+
|
109 |
# 处理通道数不匹配的情况
|
110 |
if image_channel == 3:
|
111 |
if model_channel == 1:
|
|
|
132 |
# display(Image(data=cv2.imencode('.jpg', image)[1].tobytes()))
|
133 |
# print("image.shape=", image.shape)
|
134 |
image = image/255.0
|
|
|
135 |
if model_channel>=3:
|
136 |
image = image.transpose((2, 0, 1))
|
|
|
137 |
image = image.astype(np.float32)
|
138 |
+
|
139 |
tmp_input = MNN.Tensor((1, model_channel, tilesize, tilesize), MNN.Halide_Type_Float, image, MNN.Tensor_DimensionType_Caffe)
|
140 |
+
inputTensor.copyFrom(tmp_input)
|
141 |
+
|
142 |
+
# 执行推理
|
|
|
|
|
|
|
|
|
143 |
net.runSession(session)
|
144 |
+
|
145 |
outputTensor = net.getSessionOutput(session)
|
|
|
146 |
outputShape = outputTensor.getShape()
|
147 |
print("outputShape",outputShape)
|
148 |
outputHost = createTensor(outputTensor)
|
|
|
150 |
|
151 |
outimage = process_image(outputHost.getData(), outputShape[2], outputShape[3], outputShape[1], color='RGB')
|
152 |
|
153 |
+
# 计算推理耗时
|
154 |
+
infer_time = time.time() - infer_start_time
|
155 |
+
print(f"Infer latency time: {infer_time:.4f} sec")
|
156 |
+
# 返回图像、加载时间、推理时间
|
157 |
+
return outimage, load_time, infer_time
|
158 |
|
159 |
# def gradio_interface(modelPath, input_image):
|
160 |
# processed_image_np = modelTest_for_gradio(modelPath, input_image)
|
pth2onnx.py
CHANGED
@@ -70,7 +70,7 @@ def convert_pth_to_onnx(pth_path: str, onnx_path: str=None, channel:int=0, tiles
|
|
70 |
filename = os.path.basename(pth_path).upper()
|
71 |
pattern = f'(^|[_-])({scale}X|X{scale})([_-]|$)'
|
72 |
if re.search(pattern, filename):
|
73 |
-
print(f'
|
74 |
else:
|
75 |
base_path = f"{base_path}-x{scale}"
|
76 |
# print("final use_fp16", str(use_fp16) )
|
@@ -81,8 +81,9 @@ def convert_pth_to_onnx(pth_path: str, onnx_path: str=None, channel:int=0, tiles
|
|
81 |
elif output_folder:
|
82 |
onnx_path = os.path.join(output_folder, onnx_path)
|
83 |
|
84 |
-
print(f"output_folder: {output_folder}, onnx_path: {onnx_path}")
|
85 |
|
|
|
86 |
try:
|
87 |
# Export the model
|
88 |
torch.onnx.export(
|
@@ -99,7 +100,7 @@ def convert_pth_to_onnx(pth_path: str, onnx_path: str=None, channel:int=0, tiles
|
|
99 |
"output": {0: "batch_size", 2: "height", 3: "width"},# Batch, H, W can vary
|
100 |
}
|
101 |
)
|
102 |
-
print(f"ONNX export successful: {onnx_path}")
|
103 |
|
104 |
# Optional: Simplify the ONNX model
|
105 |
if simplify_model:
|
@@ -115,11 +116,11 @@ def convert_pth_to_onnx(pth_path: str, onnx_path: str=None, channel:int=0, tiles
|
|
115 |
return onnx_path
|
116 |
|
117 |
os.remove(onnx_path)
|
118 |
-
print(f"
|
119 |
return ""
|
120 |
|
121 |
except Exception as e:
|
122 |
-
print(f"
|
123 |
return ""
|
124 |
|
125 |
if __name__ == "__main__":
|
|
|
70 |
filename = os.path.basename(pth_path).upper()
|
71 |
pattern = f'(^|[_-])({scale}X|X{scale})([_-]|$)'
|
72 |
if re.search(pattern, filename):
|
73 |
+
print(f'File name contains scale info: {filename} ')
|
74 |
else:
|
75 |
base_path = f"{base_path}-x{scale}"
|
76 |
# print("final use_fp16", str(use_fp16) )
|
|
|
81 |
elif output_folder:
|
82 |
onnx_path = os.path.join(output_folder, onnx_path)
|
83 |
|
84 |
+
# print(f"output_folder: {output_folder}, onnx_path: {onnx_path}")
|
85 |
|
86 |
+
print(f"ONNX model exporting...")
|
87 |
try:
|
88 |
# Export the model
|
89 |
torch.onnx.export(
|
|
|
100 |
"output": {0: "batch_size", 2: "height", 3: "width"},# Batch, H, W can vary
|
101 |
}
|
102 |
)
|
103 |
+
print(f"ONNX model export successful: {onnx_path}")
|
104 |
|
105 |
# Optional: Simplify the ONNX model
|
106 |
if simplify_model:
|
|
|
116 |
return onnx_path
|
117 |
|
118 |
os.remove(onnx_path)
|
119 |
+
print(f"ONNX model has unexpected file size ({file_size} bytes), deleted invalid file")
|
120 |
return ""
|
121 |
|
122 |
except Exception as e:
|
123 |
+
print(f"ONNX model export error: {e}")
|
124 |
return ""
|
125 |
|
126 |
if __name__ == "__main__":
|