Python SDK
Official Python SDK for the Inspira API.
Installation
pip
bash
pip install inspira-sdk
Poetry
bash
poetry add inspira-sdk
Requirements
- Python 3.8 or higher
requests
library (installed automatically)aiohttp
for async support (optional)
Quick Start
Initialize Client
python
from inspira import InspiraClient
# Basic initialization
client = InspiraClient(
api_key="inspira_your_api_key_here"
)
# With configuration
client = InspiraClient(
api_key="inspira_your_api_key_here",
base_url="https://app.inspirahub.net/api/rest",
timeout=30,
max_retries=3
)
Async Support
python
from inspira import AsyncInspiraClient
import asyncio
async def main():
client = AsyncInspiraClient(api_key="inspira_your_api_key_here")
response = await client.chat.complete({
"messages": [{"role": "user", "content": "Hello!"}]
})
print(response.message)
asyncio.run(main())
Core Features
Chat Completions
python
# Simple chat
response = client.chat.complete(
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What is quantum computing?"}
]
)
print(response.message)
# Advanced options
response = client.chat.complete(
messages=messages,
temperature=0.7,
max_tokens=1000,
top_p=0.9,
presence_penalty=0.1,
frequency_penalty=0.1
)
# Streaming responses
for chunk in client.chat.stream(messages=messages):
print(chunk.content, end="", flush=True)
Image Generation
python
# Generate single image
image = client.images.generate(
prompt="A futuristic city at sunset",
size="1024x1024",
quality="high",
style="photorealistic"
)
print(f"Image URL: {image.url}")
# Generate multiple variations
images = client.images.generate(
prompt="Abstract art",
n=4,
size="512x512"
)
for i, img in enumerate(images):
print(f"Image {i+1}: {img.url}")
# Download image
client.images.download(image.url, "output/city.png")
Video Generation
python
# Generate video
video = client.videos.generate(
prompt="A spaceship landing on Mars",
duration=5,
resolution="1080p",
fps=30
)
# Poll for completion
import time
while True:
status = client.videos.get_status(video.id)
if status.complete:
print(f"Video ready: {status.url}")
break
print(f"Progress: {status.progress}%")
time.sleep(2)
# Download video
client.videos.download(video.id, "output/spaceship.mp4")
Book Grading
python
# Grade written content
result = client.education.grade_book(
content=essay_text,
grade_level="high school",
subject="english",
rubric={
"grammar": 25,
"content": 40,
"structure": 20,
"creativity": 15
}
)
print(f"Overall Grade: {result.overall_grade}")
print(f"Letter Grade: {result.letter_grade}")
print(f"\nFeedback: {result.feedback}")
# Detailed analysis
for category in result.categories:
print(f"\n{category.name}:")
print(f" Score: {category.score}/{category.max_score}")
print(f" Comments: {category.comments}")
EduFi Tutoring
US Education System
python
# AP Course Assistant
ap_response = client.edufi.us.ap(
message="Explain the Krebs cycle for AP Biology",
conversation_history=[]
)
print(ap_response.response)
# College Prep Assistant
college_response = client.edufi.us.college_prep(
message="Help me prepare for SAT math",
subject="mathematics",
conversation_history=[]
)
# Maintain conversation context
history = [
{"role": "user", "content": "What topics should I focus on?"},
{"role": "assistant", "content": college_response.response}
]
followup = client.edufi.us.college_prep(
message="Can you give me practice problems?",
conversation_history=history
)
UK Education System
python
# GCSE Assistant
gcse_response = client.edufi.uk.gcse(
message="Explain photosynthesis for GCSE Biology",
conversation_history=[]
)
# A-Level Assistant
a_level_response = client.edufi.uk.a_level(
message="Discuss themes in Shakespeare's Hamlet",
subject="english literature",
conversation_history=[]
)
# College Prep
uk_college = client.edufi.uk.college_prep(
message="Oxford interview preparation tips",
subject="physics",
conversation_history=[]
)
European Education System
python
# European Baccalaureate
bac_response = client.edufi.europe.baccalaureate(
message="Expliquez la Révolution française",
language="fr",
conversation_history=[]
)
# National Exams
exam_response = client.edufi.europe.national_exams(
message="Prepare me for Abitur mathematics",
country="Germany",
conversation_history=[]
)
# University Prep
uni_response = client.edufi.europe.university_prep(
message="Engineering entrance exam preparation",
country="Germany",
conversation_history=[]
)
Global Education
python
# International Baccalaureate
ib_response = client.edufi.global.ib(
message="Explain Theory of Knowledge concepts",
subject="TOK",
conversation_history=[]
)
# Language Preparation
lang_response = client.edufi.global.language_prep(
message="Help me prepare for IELTS speaking",
target_score=7.5,
exam_type="IELTS",
conversation_history=[]
)
Detection Tools
Image Analysis
python
# Deepfake Detection
deepfake_result = client.detection.deepfake(
image_url="https://example.com/image.jpg",
sensitivity="high"
)
print(f"Deepfake probability: {deepfake_result.probability}%")
print(f"Confidence: {deepfake_result.confidence}")
print(f"Analysis: {deepfake_result.analysis}")
# Explicit Content Detection
explicit_result = client.detection.explicit_content(
image_url=image_url,
categories=["violence", "adult", "gore"]
)
for detection in explicit_result.detections:
print(f"{detection.category}: {detection.probability}%")
if detection.detected:
print(f" Warning: {detection.message}")
# Label Detection
labels = client.detection.labels(
image_url=image_url,
max_labels=10,
min_confidence=0.7
)
print("Detected objects:")
for label in labels:
print(f"- {label.name} ({label.confidence:.1%})")
if label.bounding_box:
print(f" Location: {label.bounding_box}")
# Logo Detection
logos = client.detection.logos(
image_url=image_url,
brands=["Nike", "Apple", "Google"]
)
for logo in logos.detected:
print(f"Found {logo.brand} logo")
print(f" Confidence: {logo.confidence:.1%}")
print(f" Location: {logo.location}")
Writing Tools
Text Analysis
python
# AI Detection
ai_detection = client.writing.detect_ai(
text="Text to analyze for AI generation...",
model="advanced"
)
print(f"AI Generated: {'Yes' if ai_detection.is_ai else 'No'}")
print(f"Confidence: {ai_detection.confidence}%")
print(f"Analysis: {ai_detection.analysis}")
# Grammar Checking
grammar = client.writing.check_grammar(
text="Text with erors to check",
language="en",
style="formal"
)
print(f"Found {len(grammar.corrections)} issues:")
for correction in grammar.corrections:
print(f"\nIssue: {correction.type}")
print(f"Position: {correction.offset}")
print(f"Error: '{correction.text}'")
print(f"Suggestion: '{correction.suggestion}'")
print(f"Explanation: {correction.message}")
# Text Rephrasing
rephrased = client.writing.rephrase(
text="This is the original text that needs rephrasing",
tone="professional",
length="similar"
)
print(f"Original: {rephrased.original}")
print(f"Rephrased: {rephrased.text}")
print(f"Tone: {rephrased.tone}")
# Summarization
summary = client.writing.summarize(
text=long_article,
length="medium",
format="bullet_points"
)
print("Summary:")
for point in summary.points:
print(f"• {point}")
print(f"\nReduction: {summary.reduction_percentage}%")
Blockchain Tools
Smart Contract Operations
python
# Audit Smart Contract
audit = client.blockchain.audit(
contract_code=solidity_code,
blockchain="ethereum",
audit_level="comprehensive"
)
print(f"Security Score: {audit.security_score}/100")
print(f"Gas Efficiency: {audit.gas_efficiency}/100")
print("\nVulnerabilities found:")
for vuln in audit.vulnerabilities:
print(f"\n[{vuln.severity}] {vuln.title}")
print(f"Line {vuln.line}: {vuln.description}")
print(f"Recommendation: {vuln.recommendation}")
# Generate Contract Code
contract = client.blockchain.generate_contract(
description="ERC-20 token with burn function",
blockchain="ethereum",
features=["mintable", "burnable", "pausable"],
security_level="high"
)
print(f"Contract generated: {contract.name}")
print(f"Gas estimate: {contract.gas_estimate}")
print("\nCode:")
print(contract.code)
# Save contract
with open("contracts/MyToken.sol", "w") as f:
f.write(contract.code)
# Launch Advisor
launch_advice = client.blockchain.launch_advisor(
project_type="DeFi",
stage="pre-launch",
budget="medium",
target_chains=["ethereum", "polygon"]
)
print("Launch Recommendations:")
for i, rec in enumerate(launch_advice.recommendations, 1):
print(f"\n{i}. [{rec.priority}] {rec.action}")
print(f" Timeline: {rec.timeline}")
print(f" Cost: {rec.estimated_cost}")
# Marketing Advisor
marketing_plan = client.blockchain.marketing_advisor(
project_name="MyDeFi Protocol",
target_audience="DeFi enthusiasts",
budget="$50,000",
duration="3 months"
)
print(f"Marketing Strategy: {marketing_plan.strategy}")
print(f"\nChannels:")
for channel in marketing_plan.channels:
print(f"- {channel.name}: ${channel.budget}")
print(f" ROI: {channel.expected_roi}")
Advanced Features
Error Handling
python
from inspira.exceptions import (
InspiraError,
AuthenticationError,
InsufficientCreditsError,
RateLimitError,
ValidationError
)
try:
result = client.images.generate(prompt="Beautiful sunset")
except InsufficientCreditsError as e:
print(f"Need {e.required} credits, have {e.available}")
print(f"Purchase URL: {e.purchase_url}")
except RateLimitError as e:
print(f"Rate limited. Retry after {e.retry_after} seconds")
time.sleep(e.retry_after)
# Retry request
except ValidationError as e:
print(f"Invalid {e.field}: {e.message}")
except InspiraError as e:
print(f"API Error [{e.code}]: {e.message}")
print(f"Request ID: {e.request_id}")
Context Managers
python
# Automatic resource cleanup
with InspiraClient(api_key=api_key) as client:
response = client.chat.complete(messages=messages)
# Client automatically closed after use
# Batch operations with context
with client.batch() as batch:
batch.add(client.images.generate, prompt="Sunset")
batch.add(client.images.generate, prompt="Ocean")
batch.add(client.images.generate, prompt="Forest")
results = batch.execute() # Executes all in parallel
Callbacks and Events
python
# Progress callbacks
def on_progress(progress):
print(f"Progress: {progress.percentage}%")
print(f"Stage: {progress.stage}")
video = client.videos.generate(
prompt="Nature documentary scene",
on_progress=on_progress
)
# Event handlers
client.on("credits_low", lambda e: print(f"Low credits: {e.balance}"))
client.on("rate_limit", lambda e: print(f"Rate limited: {e.endpoint}"))
# Custom logging
import logging
logging.basicConfig(level=logging.INFO)
client = InspiraClient(
api_key=api_key,
logger=logging.getLogger("inspira")
)
Async Operations
python
import asyncio
from inspira import AsyncInspiraClient
async def generate_images_async(prompts):
async with AsyncInspiraClient(api_key=api_key) as client:
tasks = [
client.images.generate(prompt=prompt)
for prompt in prompts
]
results = await asyncio.gather(*tasks)
return results
# Run async function
prompts = ["Sunset", "Ocean", "Mountains", "Forest"]
images = asyncio.run(generate_images_async(prompts))
Credit Management
python
# Check balance
balance = client.credits.get_balance()
print(f"Current balance: {balance.credits} credits")
print(f"Currency: {balance.currency}")
# Get usage history
usage = client.credits.get_usage(
start_date="2025-01-01",
end_date="2025-01-31",
limit=100
)
print(f"Total usage: {usage.total_credits} credits")
print(f"Total requests: {usage.total_requests}")
# Usage by endpoint
for endpoint in usage.by_endpoint:
print(f"{endpoint.name}: {endpoint.credits} credits ({endpoint.count} requests)")
# Set up alerts
client.credits.set_alert(
threshold=50,
webhook_url="https://your-app.com/low-credits"
)
# Purchase credits
order = client.credits.purchase(
amount=100,
currency="USDT"
)
print(f"Order ID: {order.id}")
print(f"Payment address: {order.payment_address}")
print(f"Amount: {order.amount} {order.currency}")
print(f"Credits to receive: {order.credits}")
Retry and Backoff
python
from inspira.retry import RetryConfig
# Configure retry behavior
client = InspiraClient(
api_key=api_key,
retry_config=RetryConfig(
max_retries=3,
retry_delay=1.0,
exponential_backoff=True,
retry_on=[429, 502, 503, 504],
on_retry=lambda attempt, error: print(f"Retry {attempt}: {error}")
)
)
# Or use decorator
from inspira.decorators import retry
@retry(max_attempts=3, delay=1.0)
def generate_image_with_retry(prompt):
return client.images.generate(prompt=prompt)
Caching
python
from inspira.cache import MemoryCache, RedisCache
# In-memory cache
client = InspiraClient(
api_key=api_key,
cache=MemoryCache(max_size=100, ttl=300)
)
# Redis cache
client = InspiraClient(
api_key=api_key,
cache=RedisCache(
host="localhost",
port=6379,
ttl=3600
)
)
# Cached requests
response1 = client.chat.complete(messages=messages) # API call
response2 = client.chat.complete(messages=messages) # From cache
Testing
Mock Client
python
from inspira.testing import MockInspiraClient
# Create mock client
mock_client = MockInspiraClient()
# Configure responses
mock_client.set_response(
"chat.complete",
{"message": "Mocked response", "tokens": 10}
)
mock_client.set_response(
"images.generate",
{"url": "https://example.com/mock.jpg", "id": "img_123"}
)
# Use in tests
def test_my_function():
result = my_function(mock_client)
assert result.url == "https://example.com/mock.jpg"
# Verify calls
assert mock_client.call_count("images.generate") == 1
assert mock_client.last_call("images.generate").prompt == "Test prompt"
Fixtures
python
import pytest
from inspira.testing import create_fixtures
# Auto-generate fixtures
inspira_fixtures = create_fixtures()
@pytest.fixture
def client():
return inspira_fixtures.client()
@pytest.fixture
def mock_image():
return inspira_fixtures.image()
def test_image_download(client, mock_image, tmp_path):
output_path = tmp_path / "test.jpg"
client.images.download(mock_image.url, output_path)
assert output_path.exists()
Configuration
Environment Variables
python
# .env file
INSPIRA_API_KEY=inspira_your_api_key
INSPIRA_BASE_URL=https://app.inspirahub.net/api/rest
INSPIRA_TIMEOUT=30
INSPIRA_MAX_RETRIES=3
INSPIRA_LOG_LEVEL=INFO
# Load from environment
import os
from dotenv import load_dotenv
load_dotenv()
client = InspiraClient(
api_key=os.getenv("INSPIRA_API_KEY"),
base_url=os.getenv("INSPIRA_BASE_URL"),
timeout=int(os.getenv("INSPIRA_TIMEOUT", 30))
)
Configuration File
python
# config.yaml
api:
key: ${INSPIRA_API_KEY}
base_url: https://app.inspirahub.net/api/rest
timeout: 30
retry:
max_attempts: 3
delay: 1.0
exponential_backoff: true
cache:
type: redis
host: localhost
port: 6379
ttl: 3600
# Load configuration
import yaml
with open("config.yaml") as f:
config = yaml.safe_load(f)
client = InspiraClient(**config["api"])
Best Practices
Resource Management
python
# Always close clients
client = InspiraClient(api_key=api_key)
try:
# Use client
response = client.chat.complete(messages=messages)
finally:
client.close()
# Or use context manager
with InspiraClient(api_key=api_key) as client:
response = client.chat.complete(messages=messages)
Logging
python
import logging
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
# Enable debug logging
client = InspiraClient(
api_key=api_key,
debug=True
)
# Custom logger
logger = logging.getLogger("my_app")
client = InspiraClient(
api_key=api_key,
logger=logger
)