Before — LiteLLM (self-hosted proxy)
# config.yaml
# model_list:
# - model_name: gpt-5.4
# litellm_params:
# model: openai/gpt-5.4
# api_key: os.environ/OPENAI_API_KEY
import openai
client = openai.OpenAI(
base_url="http://your-litellm-proxy:4000",
api_key="sk-litellm-...",
)
r = client.chat.completions.create(
model="gpt-5.4",
messages=[{"role": "user", "content": "Hello"}],
)After — AIgateway
from openai import OpenAI
client = OpenAI(
base_url="https://api.aigateway.sh/v1",
api_key="sk-aig-...",
)
r = client.chat.completions.create(
model="openai/gpt-5.4",
messages=[{"role": "user", "content": "Hello"}],
)