Skip to content

Commit 70f02c4

Browse files
[Misc][Bugfix] Update Potential Bug by Half Vector (#456)
* add proxy for github download onnxruntime * update code to fix potential bug of half vector
1 parent cd89b65 commit 70f02c4

File tree

3 files changed

+6
-6
lines changed

3 files changed

+6
-6
lines changed

cmake/onnxruntime.cmake

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ set(OnnxRuntime_DIR ${THIRD_PARTY_PATH}/onnxruntime)
33
# download from github if OnnxRuntime library is not exists
44
if (NOT EXISTS ${OnnxRuntime_DIR})
55
set(OnnxRuntime_Filename "onnxruntime-linux-x64-${OnnxRuntime_Version}.tgz")
6-
set(OnnxRuntime_URL https://github.com/microsoft/onnxruntime/releases/download/v1.17.1/${OnnxRuntime_Filename})
6+
set(OnnxRuntime_URL https://ghfast.top/https://github.com/microsoft/onnxruntime/releases/download/v1.17.1/${OnnxRuntime_Filename})
77
message("[Lite.AI.Toolkit][I] Downloading onnxruntime library: ${OnnxRuntime_URL}")
88
download_and_decompress(${OnnxRuntime_URL} ${OnnxRuntime_Filename} ${OnnxRuntime_DIR})
99
else()

lite/trt/sd/trt_unet.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ void TRTUNet::inference(const std::vector<std::vector<float>> &clip_output, std:
105105
std::transform(combined_embedding.begin(), combined_embedding.end(), combined_embedding_fp16.begin(),[](float f) { return __float2half(f);});
106106

107107

108-
std::vector<half> latents_fp16(latents.size(),0);
108+
std::vector<half> latents_fp16(latents.size(), __float2half(0));
109109
std::transform(latents.begin(), latents.end(), latents_fp16.begin(),[](float f) { return __float2half(f);});
110110

111111

@@ -191,7 +191,7 @@ void TRTUNet::inference(const std::vector<std::vector<float>> &clip_output, std:
191191
scheduler.step(noise_pred,noise_pred_dims, latents_fp32, noise_pred_dims,
192192
pred_sample, t);
193193

194-
std::vector<half> pred_sample_fp16(pred_sample.size(),0);
194+
std::vector<half> pred_sample_fp16(pred_sample.size(), __float2half(0));
195195
std::transform(pred_sample.begin(), pred_sample.end(),
196196
pred_sample_fp16.begin(),[](float f) { return __float2half(f);});
197197

@@ -283,7 +283,7 @@ void TRTUNet::inference(const std::vector<std::vector<float>> &clip_output, cons
283283
std::transform(combined_embedding.begin(), combined_embedding.end(), combined_embedding_fp16.begin(),[](float f) { return __float2half(f);});
284284

285285

286-
std::vector<half> latents_fp16( 2 * latents.size(),0);
286+
std::vector<half> latents_fp16( 2 * latents.size(), __float2half(0));
287287

288288
// auto latents_0 = load_binary_file("/home/lite.ai.toolkit/tensor_data.bin");
289289
// latents.insert(latents_0.end(), latents_0.begin(), latents_0.end());
@@ -378,7 +378,7 @@ void TRTUNet::inference(const std::vector<std::vector<float>> &clip_output, cons
378378
scheduler.step(noise_pred,noise_pred_dims, latents_fp32, noise_pred_dims,
379379
pred_sample, t);
380380

381-
std::vector<half> pred_sample_fp16(pred_sample.size(),0);
381+
std::vector<half> pred_sample_fp16(pred_sample.size(), __float2half(0));
382382
std::transform(pred_sample.begin(), pred_sample.end(),
383383
pred_sample_fp16.begin(),[](float f) { return __float2half(f);});
384384

lite/trt/sd/trt_vae_encoder.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ TRTVaeEncoder::~TRTVaeEncoder() {
4646
}
4747

4848
void TRTVaeEncoder::inference(const std::vector<float> &input_images, std::vector<float> &output_latents) {
49-
std::vector<half > vae_encoder_input(input_images.size(),0);
49+
std::vector<half > vae_encoder_input(input_images.size(), __float2half(0));
5050
std::transform(input_images.begin(),input_images.end(),vae_encoder_input.begin(),
5151
[](float x){return __float2half(x);});
5252

0 commit comments

Comments
 (0)