58 lines
1.3 KiB
Python
58 lines
1.3 KiB
Python
|
|
|
|
from flask import Flask, request,jsonify ,Response
|
|
import msgpack
|
|
import numpy as np
|
|
app = Flask(__name__)
|
|
|
|
import gzip
|
|
|
|
|
|
|
|
|
|
import onnxruntime
|
|
|
|
ort_session = onnxruntime.InferenceSession("super_resolution.onnx", providers=["CPUExecutionProvider"])
|
|
|
|
@app.route('/')
|
|
def hello_world():
|
|
return 'hello world'
|
|
|
|
@app.route('/post', methods=['POST'])
|
|
def register():
|
|
# print(request.headers)
|
|
# print(request.data)
|
|
# print(len(request.data))
|
|
data=msgpack.unpackb(gzip.decompress(request.data) )
|
|
|
|
# print(len(data["c"]),data["c"])
|
|
data=np.frombuffer(data["spectral_data_bin"],dtype=np.uint16).reshape(1,1,224,512)
|
|
# print(data.shape)
|
|
|
|
# input_data=torch.tensor(data,dtype=torch.float32)/4095
|
|
# output=model(input_data)
|
|
# print(output.shape)
|
|
|
|
ort_inputs = {ort_session.get_inputs()[0].name: data.astype(np.float32)/4095}
|
|
output = ort_session.run(None, ort_inputs)[0]
|
|
print(output)
|
|
|
|
response={}
|
|
response["temp"]= float(output[0,0]*(1692-1516)+1516)
|
|
response["C"]= float(output[0,1]*(0.915-0.062)+0.062)
|
|
d=gzip.compress(msgpack.packb(response))
|
|
print(d)
|
|
|
|
# d=jsonify(response)
|
|
# print(d,type(d))
|
|
|
|
response = Response()
|
|
response.data=d
|
|
|
|
print(msgpack.unpackb(gzip.decompress(d)))
|
|
|
|
|
|
return response
|
|
|
|
if __name__ == '__main__':
|
|
app.run(debug=True,host="0.0.0.0",port=22111) |