from reducto import Reductoimport reductotry: client = Reducto(api_key="invalid_key") result = client.parse.run(input=upload.file_id)except reducto.AuthenticationError as e: print(f"Authentication failed: {e}") print("Check your API key in Studio")
from reducto import Reductoimport reductoimport timetry: result = client.parse.run(input=upload.file_id)except reducto.RateLimitError as e: print(f"Rate limit exceeded: {e}") # The SDK automatically retries rate limit errors # But you can also handle it manually time.sleep(1) # Retry the request
from reducto import Reductoimport reductotry: result = client.parse.run(input=upload.file_id)except reducto.APITimeoutError as e: print(f"Request timed out: {e}") print("Consider using async methods for long-running operations")
from reducto import Reductoimport reductotry: result = client.parse.run(input=upload.file_id)except reducto.APIStatusError as e: print(f"Status code: {e.status_code}") print(f"Response: {e.response}") # Access response body if needed if hasattr(e.response, 'text'): print(f"Response body: {e.response.text}")
The SDK automatically retries certain errors by default (2 times with exponential backoff):
Connection errors (network issues)
408 Request Timeout
409 Conflict
429 Rate Limit
=500 Internal Server errors
You can configure retry behavior:
from reducto import Reducto# Configure the default for all requestsclient = Reducto( max_retries=0, # Disable retries (default is 2))# Or configure per-requestclient.with_options(max_retries=5).parse.run( input=upload.file_id)
For large documents like lengthy PDFs or spreadsheets with many sheets, we recommend using the async endpoint. The async endpoint lets you submit a job and poll for results without holding an open connection, which is ideal for documents that take longer to process.If you need to use the sync endpoint with a custom timeout, set max_retries=0 to disable automatic retries. Otherwise, if your request times out while the server is still processing, the SDK will retry and create additional processing jobs for the same document.
from reducto import Reducto# If using sync with a custom timeout, disable retriesclient = Reducto(timeout=300.0, max_retries=0)
from pathlib import Pathfrom reducto import Reductoimport reductodef parse_with_comprehensive_error_handling(client, upload): """Parse with comprehensive error handling.""" try: result = client.parse.run(input=upload.file_id) return result except reducto.AuthenticationError as e: print(f"Authentication failed: {e}") print("Check your API key in Studio") raise # Don't retry auth errors except reducto.RateLimitError as e: print(f"Rate limited: {e}") # SDK will automatically retry, but you can also handle manually raise except reducto.APIConnectionError as e: print(f"Connection failed: {e}") print(f"Underlying error: {e.__cause__}") raise except reducto.APITimeoutError as e: print(f"Request timed out: {e}") print("Consider using async methods or increasing timeout") raise except reducto.APIStatusError as e: print(f"API error {e.status_code}: {e}") if e.status_code >= 500: print("Server error - may be transient") raise except Exception as e: print(f"Unexpected error: {e}") raise# Use the functionclient = Reducto()upload = client.upload(file=Path("document.pdf"))result = parse_with_comprehensive_error_handling(client, upload)