Python

Python Examples

Use QuietStack with Python - no SDK installation required, just HTTP requests

No SDK Installation Required!

These examples only use Python's built-in libraries and the popular requests library. Just install: pip install requests (or pip install aiohttp for async examples)

Basic Requests Example

Simple example using the popular requests library

import requests

# Basic chat completion with QuietStack
def ask_ai(prompt):
    response = requests.post(
        'https://api.quietstack.ai/v1/proxy',
        headers={
            'Authorization': 'Bearer qs_live_YOUR_API_KEY',
            'Content-Type': 'application/json'
        },
        json={
            'provider': 'openai',
            'model': 'gpt-3.5-turbo',
            'messages': [{
                'role': 'user',
                'content': prompt
            }]
        }
    )
    
    data = response.json()
    return data

# Usage
result = ask_ai('What is the capital of France?')
print('AI Response:', result['response'])
print('Blockchain Hash:', result['verification']['hash'])

Async Example

Asynchronous requests using aiohttp for better performance

import aiohttp
import asyncio
import json

async def ask_ai_async(prompt):
    """Async version using aiohttp"""
    async with aiohttp.ClientSession() as session:
        async with session.post(
            'https://api.quietstack.ai/v1/proxy',
            headers={
                'Authorization': 'Bearer qs_live_YOUR_API_KEY',
                'Content-Type': 'application/json'
            },
            json={
                'provider': 'openai',
                'model': 'gpt-4',
                'messages': [{
                    'role': 'user',
                    'content': prompt
                }]
            }
        ) as response:
            return await response.json()

# Usage
async def main():
    result = await ask_ai_async('Explain quantum computing')
    print('Response:', result['response'])
    print('Verification:', result['verification']['blockchain_tx'])

# Run the async function
asyncio.run(main())

Client Class

Reusable client class for organized code

import requests
import os
from datetime import datetime

class QuietStackClient:
    """Simple QuietStack client class"""
    
    def __init__(self, api_key=None):
        self.api_key = api_key or os.getenv('QUIETSTACK_API_KEY')
        self.base_url = 'https://api.quietstack.ai/v1'
        
        if not self.api_key:
            raise ValueError("API key is required")
    
    def chat(self, prompt, provider='openai', model='gpt-3.5-turbo'):
        """Send a chat message and get blockchain-verified response"""
        headers = {
            'Authorization': f'Bearer {self.api_key}',
            'Content-Type': 'application/json'
        }
        
        payload = {
            'provider': provider,
            'model': model,
            'messages': [{
                'role': 'user',
                'content': prompt
            }]
        }
        
        try:
            response = requests.post(
                f'{self.base_url}/proxy',
                headers=headers,
                json=payload,
                timeout=30
            )
            response.raise_for_status()
            return response.json()
            
        except requests.exceptions.RequestException as e:
            print(f"Error calling QuietStack API: {e}")
            return None
    
    def verify_transaction(self, tx_hash):
        """Verify a blockchain transaction"""
        # This would call a verification endpoint
        print(f"Verifying transaction: {tx_hash}")
        return True

# Usage
client = QuietStackClient()
result = client.chat('What are the benefits of blockchain verification?')

if result:
    print(f"Response: \{result['response']\}")
    print(f"Cost: \$\{result['usage']['cost']\}")
    print(f"Verified on blockchain: \{result['verification']['hash']\}")

Flask Web App

Flask endpoint that integrates with QuietStack

from flask import Flask, request, jsonify
import requests
import os

app = Flask(__name__)

@app.route('/chat', methods=['POST'])
def chat_endpoint():
    """Flask endpoint that proxies to QuietStack"""
    try:
        user_message = request.json.get('message')
        if not user_message:
            return jsonify({'error': 'Message is required'}), 400
        
        # Call QuietStack API
        response = requests.post(
            'https://api.quietstack.ai/v1/proxy',
            headers={
                'Authorization': f'Bearer {os.getenv("QUIETSTACK_API_KEY")}',
                'Content-Type': 'application/json'
            },
            json={
                'provider': 'openai',
                'model': 'gpt-3.5-turbo',
                'messages': [{
                    'role': 'user',
                    'content': user_message
                }]
            }
        )
        
        if response.status_code == 200:
            data = response.json()
            return jsonify({
                'response': data['response'],
                'verified': True,
                'blockchain_hash': data['verification']['hash'],
                'cost': data['usage']['cost']
            })
        else:
            return jsonify({'error': 'API request failed'}), 500
            
    except Exception as e:
        return jsonify({'error': str(e)}), 500

if __name__ == '__main__':
    app.run(debug=True)

# Test with:
# curl -X POST http://localhost:5000/chat \
#      -H "Content-Type: application/json" \
#      -d '{"message": "Hello, world!"}'

Response Processing

Process AI responses and handle verification data

import requests
import json

def stream_ai_response(prompt):
    """Stream AI responses chunk by chunk"""
    
    # Note: This is a conceptual example
    # Actual streaming would depend on QuietStack's streaming support
    
    response = requests.post(
        'https://api.quietstack.ai/v1/proxy',
        headers={
            'Authorization': 'Bearer qs_live_YOUR_API_KEY',
            'Content-Type': 'application/json'
        },
        json={
            'provider': 'openai',
            'model': 'gpt-3.5-turbo',
            'messages': [{
                'role': 'user',
                'content': prompt
            }],
            'stream': False  # Set to True when streaming is supported
        },
        stream=False
    )
    
    if response.status_code == 200:
        data = response.json()
        
        # Simulate streaming by yielding words
        words = data['response'].split(' ')
        for i, word in enumerate(words):
            yield {
                'chunk': word + (' ' if i < len(words) - 1 else ''),
                'done': i == len(words) - 1,
                'verification': data['verification'] if i == len(words) - 1 else None
            }

# Usage
for chunk in stream_ai_response('Tell me about Python'):
    print(chunk['chunk'], end='', flush=True)
    if chunk['done']:
        print(f"\n\nVerified: {chunk['verification']['hash']}")

Environment Setup

Keep your API key secure using environment variables

1. Create .env file:

QUIETSTACK_API_KEY=qs_live_your_actual_api_key_here

2. Load in Python:

import os
from dotenv import load_dotenv  # pip install python-dotenv

load_dotenv()
api_key = os.getenv('QUIETSTACK_API_KEY')

Ready to try it out?

Get your API key and test these examples in our playground