Error Handling
The Clio SDK provides comprehensive error handling to help you build resilient automation workflows. This guide covers error handling strategies and best practices.
Error Handling Philosophy
Section titled “Error Handling Philosophy”The Clio SDK follows a graceful degradation approach:
- Your automation tests should continue running even if monitoring fails
- Detailed error information is available when needed
- Production systems should handle errors silently
- Development environments should show full error details
Configuration-Based Error Handling
Section titled “Configuration-Based Error Handling”Development Mode
Section titled “Development Mode”Use raise_on_error=True during development to see detailed errors:
from clio import ClioMonitor
# Development - see all errorsmonitor = ClioMonitor( api_key="clio_your_key", raise_on_error=True # Raises exceptions for debugging)
try: await monitor.start_run(context, "Debug Test")except Exception as e: print(f"Full error details: {e}") import traceback traceback.print_exc()Production Mode
Section titled “Production Mode”Use raise_on_error=False (default) for graceful degradation:
# Production - continue even if monitoring failsmonitor = ClioMonitor( api_key="clio_your_key", raise_on_error=False # Default: log errors but don't raise)
# Monitoring errors won't interrupt your automationawait monitor.start_run(context, "Production Test")# Test continues even if monitoring failsException Types and Handling
Section titled “Exception Types and Handling”Specific Exception Handling
Section titled “Specific Exception Handling”from clio.exceptions import ( ClioAuthError, ClioUploadError, ClioRateLimitError, ClioConfigError, ClioError)
async def robust_automation(): try: monitor = ClioMonitor(api_key=os.getenv("CLIO_API_KEY"))
await monitor.start_run(context, "Robust Test")
# Your automation code here page = await context.new_page() await page.goto("https://example.com")
await context.close()
except ClioAuthError as e: print(f"❌ Authentication failed: {e}") print("💡 Check your API key and try again") # Continue with test, just without monitoring
except ClioRateLimitError as e: print(f"⏳ Rate limit exceeded: {e}") print("💡 Wait or upgrade your plan") # Continue with test
except ClioUploadError as e: print(f"📤 Upload failed: {e}") print("💡 Check network connection") # Test completed, just upload failed
except ClioConfigError as e: print(f"⚙️ Configuration error: {e}") print("💡 Fix configuration and retry") return False # Config errors should stop execution
except ClioError as e: print(f"❌ Clio SDK error: {e}") # Other Clio errors - continue with test
except Exception as e: print(f"💥 Unexpected error: {e}") # Non-Clio errors should be handled by your application raise
print("✅ Test completed") return TrueError Handling Patterns
Section titled “Error Handling Patterns”Pattern 1: Continue on Monitoring Failure
Section titled “Pattern 1: Continue on Monitoring Failure”Best for most scenarios - test continues even if monitoring fails:
async def test_with_optional_monitoring(): """Test that continues even if monitoring fails""" monitor = None
# Try to initialize monitoring try: monitor = ClioMonitor( api_key=os.getenv("CLIO_API_KEY"), raise_on_error=False # Silent mode ) print("📹 Monitoring enabled") except Exception as e: print(f"⚠️ Monitoring unavailable: {e}") print("🔄 Continuing without monitoring...")
async with async_playwright() as p: browser = await p.chromium.launch() context = await browser.new_context(record_video_dir="./videos")
# Try to start monitoring if monitor: try: await monitor.start_run(context, "Optional Monitoring Test") except Exception as e: print(f"⚠️ Could not start monitoring: {e}") # Continue anyway
# Run your test regardless of monitoring status page = await context.new_page() await page.goto("https://example.com")
# Verify your test logic title = await page.title() assert "Example Domain" in title
await context.close() print("✅ Test passed")Pattern 2: Fail Fast on Critical Errors
Section titled “Pattern 2: Fail Fast on Critical Errors”For scenarios where monitoring is required:
async def test_requiring_monitoring(): """Test that fails if monitoring cannot be enabled""" try: monitor = ClioMonitor( api_key=os.getenv("CLIO_API_KEY"), raise_on_error=True # Fail fast ) except ClioConfigError as e: print(f"❌ Configuration error - cannot proceed: {e}") return False except ClioAuthError as e: print(f"❌ Authentication failed - cannot proceed: {e}") return False
# Monitoring is required for this test async with async_playwright() as p: browser = await p.chromium.launch() context = await browser.new_context(record_video_dir="./videos")
await monitor.start_run(context, "Required Monitoring Test")
# Test continues knowing monitoring is active page = await context.new_page() await page.goto("https://example.com")
await context.close() return TruePattern 3: Retry Logic
Section titled “Pattern 3: Retry Logic”For handling transient errors:
import asynciofrom clio.exceptions import ClioUploadError, ClioRateLimitError
async def test_with_retries(): """Test with retry logic for transient errors""" max_retries = 3
for attempt in range(max_retries): try: monitor = ClioMonitor(api_key=os.getenv("CLIO_API_KEY"))
async with async_playwright() as p: browser = await p.chromium.launch() context = await browser.new_context(record_video_dir="./videos")
await monitor.start_run(context, f"Retry Test - Attempt {attempt + 1}")
# Your test logic page = await context.new_page() await page.goto("https://example.com")
await context.close()
print("✅ Test completed successfully") return True
except (ClioUploadError, ClioRateLimitError) as e: print(f"⚠️ Attempt {attempt + 1} failed: {e}")
if attempt < max_retries - 1: wait_time = 2 ** attempt # Exponential backoff print(f"⏳ Waiting {wait_time} seconds before retry...") await asyncio.sleep(wait_time) else: print("❌ All retry attempts failed") return False
except Exception as e: print(f"❌ Non-retryable error: {e}") return False
return FalseLogging Integration
Section titled “Logging Integration”Structured Error Logging
Section titled “Structured Error Logging”import loggingfrom clio.exceptions import ClioError
# Configure logginglogging.basicConfig( level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')logger = logging.getLogger(__name__)
async def test_with_logging(): """Test with comprehensive error logging""" try: monitor = ClioMonitor( api_key=os.getenv("CLIO_API_KEY"), raise_on_error=False # Log errors instead of raising )
logger.info("🎬 Starting monitored automation")
async with async_playwright() as p: browser = await p.chromium.launch() context = await browser.new_context(record_video_dir="./videos")
await monitor.start_run(context, "Logged Test")
# Your test logic with logging logger.info("🌐 Navigating to target site") page = await context.new_page() await page.goto("https://example.com")
logger.info("✅ Navigation completed successfully")
await context.close() logger.info("📤 Upload process initiated")
logger.info("🎉 Test completed successfully")
except ClioAuthError as e: logger.error(f"Authentication failed: {e}") # Send alert to monitoring system send_alert("clio_auth_failure", str(e))
except ClioUploadError as e: logger.warning(f"Upload failed: {e}") # Upload failure is not critical - test still passed
except ClioError as e: logger.error(f"Clio SDK error: {e}") # Log for debugging but don't fail the test
except Exception as e: logger.error(f"Unexpected error: {e}", exc_info=True) raise # Re-raise non-Clio errors
def send_alert(alert_type, message): """Send alert to monitoring system""" # Implement your alerting logic print(f"ALERT [{alert_type}]: {message}")Custom Error Handler
Section titled “Custom Error Handler”from functools import wraps
def handle_clio_errors(continue_on_error=True): """Decorator for handling Clio errors""" def decorator(func): @wraps(func) async def wrapper(*args, **kwargs): try: return await func(*args, **kwargs) except ClioAuthError as e: print(f"❌ Authentication error: {e}") if not continue_on_error: raise except ClioUploadError as e: print(f"⚠️ Upload error: {e}") # Upload errors don't affect test results except ClioError as e: print(f"❌ Clio error: {e}") if not continue_on_error: raise except Exception as e: # Non-Clio errors are always re-raised raise return wrapper return decorator
# Usage@handle_clio_errors(continue_on_error=True)async def my_test(): monitor = ClioMonitor(api_key=os.getenv("CLIO_API_KEY")) # Test code hereError Context and Debugging
Section titled “Error Context and Debugging”Capture Error Context
Section titled “Capture Error Context”async def test_with_error_context(): """Capture context information when errors occur""" context_info = { "test_name": "Error Context Test", "timestamp": datetime.now().isoformat(), "environment": os.getenv("ENVIRONMENT", "unknown") }
try: monitor = ClioMonitor(api_key=os.getenv("CLIO_API_KEY"))
async with async_playwright() as p: browser = await p.chromium.launch() context = await browser.new_context(record_video_dir="./videos")
# Add context info to monitoring await monitor.start_run( context=context, automation_name=context_info["test_name"], success_criteria="Test error handling with context" )
# Your test logic page = await context.new_page() await page.goto("https://example.com")
await context.close()
except Exception as e: # Log error with context error_details = { "error": str(e), "error_type": type(e).__name__, "context": context_info }
logger.error(f"Test failed with context: {error_details}")
# Save error context for debugging with open(f"error_{context_info['timestamp']}.json", "w") as f: json.dump(error_details, f, indent=2)
# Don't re-raise Clio errors in production if not isinstance(e, ClioError) or os.getenv("ENVIRONMENT") == "development": raiseDebug Mode
Section titled “Debug Mode”def create_debug_monitor(): """Create monitor with debug configuration""" debug_enabled = os.getenv("DEBUG", "false").lower() == "true"
if debug_enabled: # Enable debug logging logging.getLogger("clio").setLevel(logging.DEBUG)
return ClioMonitor( api_key=os.getenv("CLIO_API_KEY"), raise_on_error=True, # See all errors timeout=300, # Longer timeout for debugging retry_attempts=1 # Fail fast in debug mode ) else: return ClioMonitor( api_key=os.getenv("CLIO_API_KEY"), raise_on_error=False, # Silent in production timeout=60, retry_attempts=3 )
# Usagemonitor = create_debug_monitor()Testing Error Scenarios
Section titled “Testing Error Scenarios”Mock Error Conditions
Section titled “Mock Error Conditions”import pytestfrom unittest.mock import patchfrom clio.exceptions import ClioAuthError
@pytest.mark.asyncioasync def test_auth_error_handling(): """Test handling of authentication errors"""
with patch('clio.client.ClioMonitor.start_run') as mock_start_run: mock_start_run.side_effect = ClioAuthError("Invalid API key")
# Test should handle auth error gracefully result = await test_with_optional_monitoring()
# Test should still pass even with auth error assert result is True
@pytest.mark.asyncioasync def test_upload_error_handling(): """Test handling of upload errors"""
with patch('clio.uploader.Uploader.upload_files') as mock_upload: mock_upload.side_effect = ClioUploadError("Network timeout")
# Upload error shouldn't fail the test result = await test_with_optional_monitoring() assert result is TrueBest Practices
Section titled “Best Practices”1. Use Appropriate Error Modes
Section titled “1. Use Appropriate Error Modes”# Developmentmonitor = ClioMonitor(api_key="...", raise_on_error=True)
# Productionmonitor = ClioMonitor(api_key="...", raise_on_error=False)2. Handle Specific Exceptions
Section titled “2. Handle Specific Exceptions”# Good - handle specific errorsexcept ClioAuthError: handle_auth_error()except ClioUploadError: handle_upload_error()
# Avoid - too broadexcept Exception: # Might catch unrelated errors3. Provide User-Friendly Messages
Section titled “3. Provide User-Friendly Messages”except ClioAuthError: print("❌ Authentication failed. Please check your API key.") print("💡 Visit https://cliomonitoring.com to generate a new key")
except ClioRateLimitError: print("⏳ Monthly upload limit reached.") print("💡 Upgrade your plan or wait until next month")4. Log Errors Appropriately
Section titled “4. Log Errors Appropriately”# Include context but mask sensitive datalogger.error(f"Upload failed for test '{test_name}': {error}")
# Don't log full exception details in productionif os.getenv("ENVIRONMENT") == "development": logger.exception("Full error details:")5. Implement Circuit Breaker Pattern
Section titled “5. Implement Circuit Breaker Pattern”class ClioCircuitBreaker: def __init__(self, failure_threshold=5, timeout=300): self.failure_count = 0 self.failure_threshold = failure_threshold self.timeout = timeout self.last_failure_time = None self.state = "closed" # closed, open, half-open
def should_attempt_monitoring(self): if self.state == "closed": return True elif self.state == "open": if time.time() - self.last_failure_time > self.timeout: self.state = "half-open" return True return False else: # half-open return True
def record_success(self): self.failure_count = 0 self.state = "closed"
def record_failure(self): self.failure_count += 1 self.last_failure_time = time.time() if self.failure_count >= self.failure_threshold: self.state = "open"
# Usagecircuit_breaker = ClioCircuitBreaker()
if circuit_breaker.should_attempt_monitoring(): try: monitor = ClioMonitor(api_key=os.getenv("CLIO_API_KEY")) # ... monitoring code ... circuit_breaker.record_success() except ClioError: circuit_breaker.record_failure()Proper error handling ensures your automation tests remain reliable while still benefiting from Clio monitoring when it’s available.