generated from runpod-workers/worker-template
/
handler.py
35 lines (26 loc) · 984 Bytes
/
handler.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
""" Example handler file. """
import runpod
from diffusers import AutoPipelineForText2Image
import torch
import base64
import io
import time
# If your handler runs inference on a model, load the model here.
# You will want models to be loaded into memory before starting serverless.
try:
pipe = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16")
pipe.to("cuda")
except RuntimeError:
quit()
def handler(job):
""" Handler function that will be used to process jobs. """
job_input = job['input']
prompt = job_input['prompt']
time_start = time.time()
image = pipe(prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0]
print(f"Time taken: {time.time() - time_start}")
buffer = io.BytesIO()
image.save(buffer, format="PNG")
image_bytes = buffer.getvalue()
return base64.b64encode(image_bytes).decode('utf-8')
runpod.serverless.start({"handler": handler})