2025-07-11 13:52:19 +00:00

120 lines
3.3 KiB
Python

"""
Example usage of the TextGen client.
"""
import asyncio
import logging
from typing import AsyncGenerator
from .client import TextGenClient
# Configure logging
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
logger = logging.getLogger(__name__)
async def chat_example():
"""Example of using the chat completion API."""
client = TextGenClient()
try:
# Simple chat example
messages = [{"role": "user", "content": "Hello! Who are you?"}]
# Non-streaming response
logger.info("Sending chat request (non-streaming)...")
response = await client.simple_chat(
messages=messages, temperature=0.7, max_tokens=500, mode="instruct"
)
logger.info(f"Response: {response}")
# Streaming response
logger.info("Sending chat request (streaming)...")
stream_response = await client.simple_chat(
messages=messages,
temperature=0.7,
max_tokens=500,
stream=True,
mode="instruct",
)
# Check if the response is a stream
if isinstance(stream_response, AsyncGenerator):
logger.info("Streaming response:")
async for chunk in stream_response:
print(chunk, end="", flush=True)
print()
else:
logger.info(f"Expected stream but got: {stream_response}")
except Exception as e:
logger.error(f"Error: {str(e)}")
finally:
await client.close()
async def completion_example():
"""Example of using the text completion API."""
client = TextGenClient()
try:
prompt = "This is a cake recipe:\n\n1."
# Non-streaming response
logger.info("Sending completion request (non-streaming)...")
response = await client.simple_completion(
prompt=prompt, temperature=0.7, max_tokens=200
)
logger.info(f"Response: {response}")
# Streaming response
logger.info("Sending completion request (streaming)...")
stream_response = await client.simple_completion(
prompt=prompt, temperature=0.7, max_tokens=200, stream=True
)
# Check if the response is a stream
if isinstance(stream_response, AsyncGenerator):
logger.info("Streaming response:")
async for chunk in stream_response:
print(chunk, end="", flush=True)
print()
else:
logger.info(f"Expected stream but got: {stream_response}")
except Exception as e:
logger.error(f"Error: {str(e)}")
finally:
await client.close()
async def list_models_example():
"""Example of listing available models."""
client = TextGenClient()
try:
logger.info("Listing available models...")
models = await client.list_models()
for model in models:
logger.info(f"Model: {model.id}")
except Exception as e:
logger.error(f"Error: {str(e)}")
finally:
await client.close()
async def main():
"""Run all examples."""
logger.info("Running TextGen client examples")
await list_models_example()
await chat_example()
await completion_example()
if __name__ == "__main__":
asyncio.run(main())