Stable Diffusion
A text to image foundation model that can be adapted for a wide range of image generation tasks.
Deploy Stable Diffusion behind an API endpoint in seconds.
Deploy modelExample usage
The model accepts a prompt
which is some text describing the image you want to generate. The output images tend to get better as you add more descriptive words to the prompt.
The output JSON object contains a key called output
which represents the generated image as a base64 string.
1import requests
2import os
3import base64
4from PIL import Image
5from io import BytesIO
6
7# Replace the empty string with your model id below
8model_id = ""
9baseten_api_key = os.environ["BASETEN_API_KEY"]
10BASE64_PREAMBLE = "data:image/png;base64,"
11
12def b64_to_pil(b64_str):
13 return Image.open(BytesIO(base64.b64decode(b64_str.replace(BASE64_PREAMBLE, ""))))
14
15data = {
16 "prompt": "a oil painting of a Japanese garden in autumn, with a bridge over a koi pond",
17 "negative_prompt": "blurry, low quality",
18 "steps": 50
19}
20
21# Call the model endpoint
22res = requests.post(
23 f"https://model-{model_id}.api.baseten.co/production/predict",
24 headers={"Authorization": f"Api-Key {baseten_api_key}"},
25 json=data
26)
27
28# Get the response from the model
29res = res.json()
30
31# Save the base64 string to a PNG image
32img = b64_to_pil(res.get("output"))
33img.show()
34img.save("sd-v2-1.png")
1{
2 "output": "iVBORw0KGgoAAAANSUhEUgAABAAAAAQACAIAAA..."
3}