Skip to content

Commit bba280b

Browse files
authored
Merge pull request #152 from intel/update-branch
chores: update default model precision in text to image generation microservice (#406)
2 parents b10ae25 + e74e59b commit bba280b

File tree

6 files changed

+11
-14
lines changed

6 files changed

+11
-14
lines changed

usecases/ai/microservices/text-to-image/flux-dev/backend/server.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
# SPDX-License-Identifier: Apache-2.0
33

44
import time
5-
import openvino.runtime as ov_runtime
65
import openvino_genai as ov_genai
76
import openvino as ov
87
import torch
@@ -105,7 +104,7 @@ def convert_models(self):
105104
@staticmethod
106105
def get_device(user_device=None):
107106
try:
108-
ov_core = ov_runtime.Core()
107+
ov_core = ov.Core()
109108
available_devices = [device.upper() for device in ov_core.available_devices] # Normalize device names
110109
print(f"Available devices: {available_devices}")
111110

usecases/ai/microservices/text-to-image/flux-schnell/backend/client.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -98,9 +98,9 @@ def main():
9898
"/pipeline/run",
9999
{
100100
"prompt": "A raccoon trapped inside a glass jar full of colorful candies, the background is steamy with vivid colors",
101-
"width": 512, # Additional parameter: width
102-
"height": 512, # Additional parameter: height
103-
"num_inference_steps": 5 # Additional parameter: num_inference_steps
101+
"width": 256, # Additional parameter: width
102+
"height": 256, # Additional parameter: height
103+
"num_inference_steps": 1 # Additional parameter: num_inference_steps
104104
},
105105
)
106106
if not response:

usecases/ai/microservices/text-to-image/flux-schnell/backend/server.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
# SPDX-License-Identifier: Apache-2.0
33

44
import time
5-
import openvino.runtime as ov_runtime
65
import openvino_genai as ov_genai
76
import openvino as ov
87
import torch
@@ -97,15 +96,15 @@ def convert_models(self):
9796
if not self.model_dir.exists():
9897
print(f"Downloading model: {self.model_name} to {self.model_dir}...")
9998
additional_args = {}
100-
additional_args.update({"weight-format": "int4", "group-size": "64", "ratio": "1.0"})
99+
additional_args.update({"weight-format": "int8", "group-size": "64", "ratio": "1.0"})
101100
optimum_cli(self.model_name, self.model_dir, additional_args=additional_args)
102101
# optimum_cli(self.model_name, self.model_dir)
103102
print("Model conversion completed.")
104103

105104
@staticmethod
106105
def get_device(user_device=None):
107106
try:
108-
ov_core = ov_runtime.Core()
107+
ov_core = ov.Core()
109108
available_devices = [device.upper() for device in ov_core.available_devices] # Normalize device names
110109
print(f"Available devices: {available_devices}")
111110

usecases/ai/microservices/text-to-image/stable-diffusion-v3.5/backend/server.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
# SPDX-License-Identifier: Apache-2.0
33

44
import time
5-
import openvino.runtime as ov_runtime
65
import openvino_genai as ov_genai
76
import openvino as ov
87
import torch
@@ -102,7 +101,7 @@ def convert_models(self):
102101
@staticmethod
103102
def get_device(user_device=None):
104103
try:
105-
ov_core = ov_runtime.Core()
104+
ov_core = ov.Core()
106105
available_devices = [device.upper() for device in ov_core.available_devices] # Normalize device names
107106
print(f"Available devices: {available_devices}")
108107

usecases/ai/microservices/text-to-image/stable-diffusion-v3/backend/server.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
from pydantic import BaseModel
99

1010
import torch
11-
import openvino.runtime as ov_runtime
11+
import openvino as ov
1212

1313
from sd3_helper import (
1414
get_pipeline_options,
@@ -106,7 +106,7 @@ def convert_models(self):
106106
@staticmethod
107107
def get_device(user_device=None):
108108
try:
109-
ov_core = ov_runtime.Core()
109+
ov_core = ov.Core()
110110
available_devices = [device.upper() for device in ov_core.available_devices] # Normalize device names
111111
print(f"Available devices: {available_devices}")
112112

usecases/ai/microservices/text-to-image/stable-diffusion-xl/backend/server.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
# SPDX-License-Identifier: Apache-2.0
33

44
import time
5-
import openvino.runtime as ov_runtime
5+
import openvino as ov
66
import random
77
import torch
88
import gc
@@ -105,7 +105,7 @@ def convert_models(self):
105105
@staticmethod
106106
def get_device(user_device=None):
107107
try:
108-
ov_core = ov_runtime.Core()
108+
ov_core = ov.Core()
109109
available_devices = [device.upper() for device in ov_core.available_devices] # Normalize device names
110110
print(f"Available devices: {available_devices}")
111111

0 commit comments

Comments
 (0)