Skip to content

Error Handling

The Clio SDK provides comprehensive error handling to help you build resilient automation workflows. This guide covers error handling strategies and best practices.

The Clio SDK follows a graceful degradation approach:

  • Your automation tests should continue running even if monitoring fails
  • Detailed error information is available when needed
  • Production systems should handle errors silently
  • Development environments should show full error details

Use raise_on_error=True during development to see detailed errors:

from clio import ClioMonitor
# Development - see all errors
monitor = ClioMonitor(
api_key="clio_your_key",
raise_on_error=True # Raises exceptions for debugging
)
try:
await monitor.start_run(context, "Debug Test")
except Exception as e:
print(f"Full error details: {e}")
import traceback
traceback.print_exc()

Use raise_on_error=False (default) for graceful degradation:

# Production - continue even if monitoring fails
monitor = ClioMonitor(
api_key="clio_your_key",
raise_on_error=False # Default: log errors but don't raise
)
# Monitoring errors won't interrupt your automation
await monitor.start_run(context, "Production Test")
# Test continues even if monitoring fails
from clio.exceptions import (
ClioAuthError,
ClioUploadError,
ClioRateLimitError,
ClioConfigError,
ClioError
)
async def robust_automation():
try:
monitor = ClioMonitor(api_key=os.getenv("CLIO_API_KEY"))
await monitor.start_run(context, "Robust Test")
# Your automation code here
page = await context.new_page()
await page.goto("https://example.com")
await context.close()
except ClioAuthError as e:
print(f"❌ Authentication failed: {e}")
print("💡 Check your API key and try again")
# Continue with test, just without monitoring
except ClioRateLimitError as e:
print(f"⏳ Rate limit exceeded: {e}")
print("💡 Wait or upgrade your plan")
# Continue with test
except ClioUploadError as e:
print(f"📤 Upload failed: {e}")
print("💡 Check network connection")
# Test completed, just upload failed
except ClioConfigError as e:
print(f"⚙️ Configuration error: {e}")
print("💡 Fix configuration and retry")
return False # Config errors should stop execution
except ClioError as e:
print(f"❌ Clio SDK error: {e}")
# Other Clio errors - continue with test
except Exception as e:
print(f"💥 Unexpected error: {e}")
# Non-Clio errors should be handled by your application
raise
print("✅ Test completed")
return True

Best for most scenarios - test continues even if monitoring fails:

async def test_with_optional_monitoring():
"""Test that continues even if monitoring fails"""
monitor = None
# Try to initialize monitoring
try:
monitor = ClioMonitor(
api_key=os.getenv("CLIO_API_KEY"),
raise_on_error=False # Silent mode
)
print("📹 Monitoring enabled")
except Exception as e:
print(f"⚠️ Monitoring unavailable: {e}")
print("🔄 Continuing without monitoring...")
async with async_playwright() as p:
browser = await p.chromium.launch()
context = await browser.new_context(record_video_dir="./videos")
# Try to start monitoring
if monitor:
try:
await monitor.start_run(context, "Optional Monitoring Test")
except Exception as e:
print(f"⚠️ Could not start monitoring: {e}")
# Continue anyway
# Run your test regardless of monitoring status
page = await context.new_page()
await page.goto("https://example.com")
# Verify your test logic
title = await page.title()
assert "Example Domain" in title
await context.close()
print("✅ Test passed")

For scenarios where monitoring is required:

async def test_requiring_monitoring():
"""Test that fails if monitoring cannot be enabled"""
try:
monitor = ClioMonitor(
api_key=os.getenv("CLIO_API_KEY"),
raise_on_error=True # Fail fast
)
except ClioConfigError as e:
print(f"❌ Configuration error - cannot proceed: {e}")
return False
except ClioAuthError as e:
print(f"❌ Authentication failed - cannot proceed: {e}")
return False
# Monitoring is required for this test
async with async_playwright() as p:
browser = await p.chromium.launch()
context = await browser.new_context(record_video_dir="./videos")
await monitor.start_run(context, "Required Monitoring Test")
# Test continues knowing monitoring is active
page = await context.new_page()
await page.goto("https://example.com")
await context.close()
return True

For handling transient errors:

import asyncio
from clio.exceptions import ClioUploadError, ClioRateLimitError
async def test_with_retries():
"""Test with retry logic for transient errors"""
max_retries = 3
for attempt in range(max_retries):
try:
monitor = ClioMonitor(api_key=os.getenv("CLIO_API_KEY"))
async with async_playwright() as p:
browser = await p.chromium.launch()
context = await browser.new_context(record_video_dir="./videos")
await monitor.start_run(context, f"Retry Test - Attempt {attempt + 1}")
# Your test logic
page = await context.new_page()
await page.goto("https://example.com")
await context.close()
print("✅ Test completed successfully")
return True
except (ClioUploadError, ClioRateLimitError) as e:
print(f"⚠️ Attempt {attempt + 1} failed: {e}")
if attempt < max_retries - 1:
wait_time = 2 ** attempt # Exponential backoff
print(f"⏳ Waiting {wait_time} seconds before retry...")
await asyncio.sleep(wait_time)
else:
print("❌ All retry attempts failed")
return False
except Exception as e:
print(f"❌ Non-retryable error: {e}")
return False
return False
import logging
from clio.exceptions import ClioError
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
async def test_with_logging():
"""Test with comprehensive error logging"""
try:
monitor = ClioMonitor(
api_key=os.getenv("CLIO_API_KEY"),
raise_on_error=False # Log errors instead of raising
)
logger.info("🎬 Starting monitored automation")
async with async_playwright() as p:
browser = await p.chromium.launch()
context = await browser.new_context(record_video_dir="./videos")
await monitor.start_run(context, "Logged Test")
# Your test logic with logging
logger.info("🌐 Navigating to target site")
page = await context.new_page()
await page.goto("https://example.com")
logger.info("✅ Navigation completed successfully")
await context.close()
logger.info("📤 Upload process initiated")
logger.info("🎉 Test completed successfully")
except ClioAuthError as e:
logger.error(f"Authentication failed: {e}")
# Send alert to monitoring system
send_alert("clio_auth_failure", str(e))
except ClioUploadError as e:
logger.warning(f"Upload failed: {e}")
# Upload failure is not critical - test still passed
except ClioError as e:
logger.error(f"Clio SDK error: {e}")
# Log for debugging but don't fail the test
except Exception as e:
logger.error(f"Unexpected error: {e}", exc_info=True)
raise # Re-raise non-Clio errors
def send_alert(alert_type, message):
"""Send alert to monitoring system"""
# Implement your alerting logic
print(f"ALERT [{alert_type}]: {message}")
from functools import wraps
def handle_clio_errors(continue_on_error=True):
"""Decorator for handling Clio errors"""
def decorator(func):
@wraps(func)
async def wrapper(*args, **kwargs):
try:
return await func(*args, **kwargs)
except ClioAuthError as e:
print(f"❌ Authentication error: {e}")
if not continue_on_error:
raise
except ClioUploadError as e:
print(f"⚠️ Upload error: {e}")
# Upload errors don't affect test results
except ClioError as e:
print(f"❌ Clio error: {e}")
if not continue_on_error:
raise
except Exception as e:
# Non-Clio errors are always re-raised
raise
return wrapper
return decorator
# Usage
@handle_clio_errors(continue_on_error=True)
async def my_test():
monitor = ClioMonitor(api_key=os.getenv("CLIO_API_KEY"))
# Test code here
async def test_with_error_context():
"""Capture context information when errors occur"""
context_info = {
"test_name": "Error Context Test",
"timestamp": datetime.now().isoformat(),
"environment": os.getenv("ENVIRONMENT", "unknown")
}
try:
monitor = ClioMonitor(api_key=os.getenv("CLIO_API_KEY"))
async with async_playwright() as p:
browser = await p.chromium.launch()
context = await browser.new_context(record_video_dir="./videos")
# Add context info to monitoring
await monitor.start_run(
context=context,
automation_name=context_info["test_name"],
success_criteria="Test error handling with context"
)
# Your test logic
page = await context.new_page()
await page.goto("https://example.com")
await context.close()
except Exception as e:
# Log error with context
error_details = {
"error": str(e),
"error_type": type(e).__name__,
"context": context_info
}
logger.error(f"Test failed with context: {error_details}")
# Save error context for debugging
with open(f"error_{context_info['timestamp']}.json", "w") as f:
json.dump(error_details, f, indent=2)
# Don't re-raise Clio errors in production
if not isinstance(e, ClioError) or os.getenv("ENVIRONMENT") == "development":
raise
def create_debug_monitor():
"""Create monitor with debug configuration"""
debug_enabled = os.getenv("DEBUG", "false").lower() == "true"
if debug_enabled:
# Enable debug logging
logging.getLogger("clio").setLevel(logging.DEBUG)
return ClioMonitor(
api_key=os.getenv("CLIO_API_KEY"),
raise_on_error=True, # See all errors
timeout=300, # Longer timeout for debugging
retry_attempts=1 # Fail fast in debug mode
)
else:
return ClioMonitor(
api_key=os.getenv("CLIO_API_KEY"),
raise_on_error=False, # Silent in production
timeout=60,
retry_attempts=3
)
# Usage
monitor = create_debug_monitor()
import pytest
from unittest.mock import patch
from clio.exceptions import ClioAuthError
@pytest.mark.asyncio
async def test_auth_error_handling():
"""Test handling of authentication errors"""
with patch('clio.client.ClioMonitor.start_run') as mock_start_run:
mock_start_run.side_effect = ClioAuthError("Invalid API key")
# Test should handle auth error gracefully
result = await test_with_optional_monitoring()
# Test should still pass even with auth error
assert result is True
@pytest.mark.asyncio
async def test_upload_error_handling():
"""Test handling of upload errors"""
with patch('clio.uploader.Uploader.upload_files') as mock_upload:
mock_upload.side_effect = ClioUploadError("Network timeout")
# Upload error shouldn't fail the test
result = await test_with_optional_monitoring()
assert result is True
# Development
monitor = ClioMonitor(api_key="...", raise_on_error=True)
# Production
monitor = ClioMonitor(api_key="...", raise_on_error=False)
# Good - handle specific errors
except ClioAuthError:
handle_auth_error()
except ClioUploadError:
handle_upload_error()
# Avoid - too broad
except Exception:
# Might catch unrelated errors
except ClioAuthError:
print("❌ Authentication failed. Please check your API key.")
print("💡 Visit https://cliomonitoring.com to generate a new key")
except ClioRateLimitError:
print("⏳ Monthly upload limit reached.")
print("💡 Upgrade your plan or wait until next month")
# Include context but mask sensitive data
logger.error(f"Upload failed for test '{test_name}': {error}")
# Don't log full exception details in production
if os.getenv("ENVIRONMENT") == "development":
logger.exception("Full error details:")
class ClioCircuitBreaker:
def __init__(self, failure_threshold=5, timeout=300):
self.failure_count = 0
self.failure_threshold = failure_threshold
self.timeout = timeout
self.last_failure_time = None
self.state = "closed" # closed, open, half-open
def should_attempt_monitoring(self):
if self.state == "closed":
return True
elif self.state == "open":
if time.time() - self.last_failure_time > self.timeout:
self.state = "half-open"
return True
return False
else: # half-open
return True
def record_success(self):
self.failure_count = 0
self.state = "closed"
def record_failure(self):
self.failure_count += 1
self.last_failure_time = time.time()
if self.failure_count >= self.failure_threshold:
self.state = "open"
# Usage
circuit_breaker = ClioCircuitBreaker()
if circuit_breaker.should_attempt_monitoring():
try:
monitor = ClioMonitor(api_key=os.getenv("CLIO_API_KEY"))
# ... monitoring code ...
circuit_breaker.record_success()
except ClioError:
circuit_breaker.record_failure()

Proper error handling ensures your automation tests remain reliable while still benefiting from Clio monitoring when it’s available.