We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent a5ee957 commit 0376f0dCopy full SHA for 0376f0d
app.py
@@ -25,10 +25,21 @@ def inference(model_inputs:dict) -> dict:
25
prompt = model_inputs.get('prompt', None)
26
if prompt == None:
27
return {'message': "No prompt provided"}
28
+
29
+ width = model_inputs.get('width', 512);
30
+ height = model_inputs.get('height', 512);
31
+ num_inference_steps = model_inputs.get('num_inference_steps', 50);
32
+ guidance_scale = model_inputs.get('guidance_scale', 7.5);
33
34
# Run the model
35
with autocast("cuda"):
- image = model(prompt)["sample"][0]
36
+ image = model(
37
+ prompt,
38
+ width,
39
+ height,
40
+ num_inference_steps,
41
+ guidance_scale
42
+ )["sample"][0]
43
44
buffered = BytesIO()
45
image.save(buffered,format="JPEG")
0 commit comments