Troubleshooting - Black-Lights/planetscope-py GitHub Wiki

Troubleshooting

Comprehensive troubleshooting guide for planetscope-py users covering installation, authentication, API communication, performance optimization, and advanced debugging techniques.

Quick Navigation

Issue Category Common Problems Quick Solutions
Installation Issues ImportError, dependency conflicts, permission errors pip install -e ., virtual environments
Authentication Issues No API key, invalid key, network errors Set PL_API_KEY, check ~/.planet.json
Validation Errors Invalid geometry, ROI too large, date format Fix polygon closure, reduce area, correct dates
Planet API Communication Rate limits, no results, circuit breaker Automatic rate limiting, adjust search criteria
Performance Issues Slow searches, high memory usage Batch processing, filter scenes early
Configuration Issues Config not loading, environment variables Validate JSON, check variable names
Development Issues Test failures, import errors, type checking Install dev requirements, development mode
Advanced Troubleshooting Debug logging, profiling, system requirements Enable debug mode, performance monitoring
Emergency Recovery Complete system reset, corrupted configs Fresh install, reset all settings

Quick Diagnostics

Run the Health Check

First, run this comprehensive diagnostic script:

#!/usr/bin/env python3
"""planetscope-py health check and diagnostics"""

import sys
import os
import platform
from datetime import datetime

def run_diagnostics():
    print("planetscope-py Health Check")
    print("=" * 50)
    print(f"Timestamp: {datetime.now().isoformat()}")
    print(f"Platform: {platform.system()} {platform.release()}")
    print()
    
    # 1. Check Python version
    print(f"Python version: {sys.version}")
    if sys.version_info < (3, 10):
        print("ERROR: Python 3.10+ required")
        return False
    else:
        print("SUCCESS: Python version OK")
    
    # 2. Test imports
    try:
        import planetscope_py as psp
        print(f"SUCCESS: planetscope-py v{psp.__version__} imported")
        
        from planetscope_py import PlanetAuth, PlanetScopeConfig, PlanetScopeQuery, MetadataProcessor
        from planetscope_py.utils import validate_geometry
        from planetscope_py.rate_limiter import RateLimiter
        print("SUCCESS: Core components imported")
        
    except ImportError as e:
        print(f"ERROR: Import error: {e}")
        return False
    
    # 3. Test dependencies
    missing_deps = []
    dependencies = {
        'requests': 'HTTP client',
        'shapely': 'Geometry processing', 
        'pyproj': 'Coordinate transformations',
        'numpy': 'Numerical computing',
        'pandas': 'Data analysis'
    }
    
    for dep, desc in dependencies.items():
        try:
            module = __import__(dep)
            version = getattr(module, '__version__', 'unknown')
            print(f"SUCCESS: {dep} {version} ({desc})")
        except ImportError:
            print(f"ERROR: {dep} missing ({desc})")
            missing_deps.append(dep)
    
    if missing_deps:
        print(f"Install missing dependencies: pip install {' '.join(missing_deps)}")
        return False
    
    # 4. Test configuration
    try:
        config = PlanetScopeConfig()
        print(f"SUCCESS: Configuration loaded from {config.base_url}")
        print(f"  Rate limits: {config.rate_limits}")
        print(f"  Timeouts: {config.timeouts}")
    except Exception as e:
        print(f"ERROR: Configuration error: {e}")
        return False
    
    # 5. Test authentication
    try:
        auth = PlanetAuth()
        if auth.api_key:
            key_preview = f"{auth.api_key[:8]}...{auth.api_key[-4:]}" if len(auth.api_key) > 12 else "****"
            print(f"SUCCESS: API key configured: {key_preview}")
            
            # Test API connection
            session = auth.get_session()
            response = session.get("https://api.planet.com/data/v1/", timeout=10)
            print(f"SUCCESS: API connection: HTTP {response.status_code}")
            
        else:
            print("WARNING: No API key configured")
            print("  Set PL_API_KEY environment variable or create ~/.planet.json")
            
    except Exception as e:
        print(f"WARNING: Authentication issue: {e}")
        print("  This is OK if you haven't set up your API key yet")
    
    # 6. Test core functionality
    try:
        # Test utilities
        from planetscope_py.utils import validate_date_range, calculate_area_km2
        start, end = validate_date_range("2025-01-01", "2025-12-31")
        
        test_geom = {
            "type": "Polygon",
            "coordinates": [[0, 0], [1, 0], [1, 1], [0, 1], [0, 0](/Black-Lights/planetscope-py/wiki/[0,-0],-[1,-0],-[1,-1],-[0,-1],-[0,-0)]
        }
        area = calculate_area_km2(test_geom)
        
        print("SUCCESS: Core utilities working")
        print(f"  Date validation: {start} to {end}")
        print(f"  Area calculation: {area:.2f} km²")
        
    except Exception as e:
        print(f"ERROR: Utility error: {e}")
        return False
    
    # 7. Test Planet API integration
    try:
        query = PlanetScopeQuery()
        print("SUCCESS: Query system initialized")
        
        processor = MetadataProcessor()
        print("SUCCESS: Metadata processor initialized")
        
        rate_limiter = RateLimiter()
        status = rate_limiter.get_current_rate_status()
        print(f"SUCCESS: Rate limiter active ({len(status)} endpoints)")
        
    except Exception as e:
        print(f"ERROR: API integration error: {e}")
        return False
    
    # 8. System resources check
    try:
        import psutil
        memory = psutil.virtual_memory()
        disk = psutil.disk_usage('/')
        
        print(f"SUCCESS: System resources")
        print(f"  Memory: {memory.total // (1024**3)} GB total, {memory.available // (1024**3)} GB available")
        print(f"  Disk: {disk.free // (1024**3)} GB free")
        
        if memory.available < 1024**3:  # Less than 1GB
            print("WARNING: Low available memory")
        if disk.free < 5 * 1024**3:  # Less than 5GB
            print("WARNING: Low disk space")
            
    except ImportError:
        print("INFO: psutil not available for system monitoring")
    except Exception as e:
        print(f"WARNING: System check error: {e}")
    
    print("\nHealth check complete!")
    return True

if __name__ == "__main__":
    success = run_diagnostics()
    print(f"\nResult: {'PASS' if success else 'FAIL'}")
    if not success:
        print("See troubleshooting guide: https://github.com/Black-Lights/planetscope-py/wiki/Troubleshooting")
    sys.exit(0 if success else 1)

Installation Issues

Problem: ImportError when importing planetscope_py

Error:

ImportError: No module named 'planetscope_py'

Solutions:

  1. Check installation:
pip list | grep planetscope
# Should show: planetscope-py x.x.x
  1. Reinstall package:
pip uninstall planetscope-py -y
pip install planetscope-py
  1. Virtual environment issues:
# Check which Python/pip you're using
which python
which pip

# Activate virtual environment
source venv/bin/activate  # Linux/macOS
venv\Scripts\activate     # Windows

# Verify you're in the right environment
pip list | grep planetscope
  1. Development installation:
git clone https://github.com/Black-Lights/planetscope-py.git
cd planetscope-py
pip install -e .

# Verify installation
python -c "import planetscope_py; print(f'Installed: {planetscope_py.__version__}')"

Problem: Shapely installation fails

Error:

ERROR: Failed building wheel for shapely
Microsoft Visual C++ 14.0 is required

Solutions:

  1. Install system dependencies:
# Ubuntu/Debian
sudo apt-get update
sudo apt-get install libgeos-dev libproj-dev

# CentOS/RHEL
sudo yum install geos-devel proj-devel

# macOS
brew install geos proj

# Windows - use conda
conda install -c conda-forge shapely
  1. Use conda-forge (recommended for Windows):
# Create conda environment
conda create -n planetscope python=3.10
conda activate planetscope

# Install geospatial packages
conda install -c conda-forge shapely pyproj geopandas

# Install planetscope-py
pip install planetscope-py
  1. Pre-compiled wheels:
pip install --upgrade pip setuptools wheel
pip install shapely --no-cache-dir --force-reinstall

Problem: Permission errors during installation

Error:

PermissionError: [Errno 13] Permission denied: '/usr/local/lib/python3.x/site-packages'

Solutions:

  1. Use user installation:
pip install --user planetscope-py
  1. Use virtual environment (recommended):
python -m venv planetscope_env
source planetscope_env/bin/activate  # Linux/macOS
planetscope_env\Scripts\activate     # Windows

pip install planetscope-py
  1. Fix permissions (Linux/macOS):
sudo chown -R $USER:$USER ~/.local/
pip install --user planetscope-py

Problem: Package conflicts

Error:

ERROR: pip's dependency resolver does not currently take into account all the packages that are installed

Solutions:

  1. Create fresh environment:
python -m venv fresh_env
source fresh_env/bin/activate
pip install --upgrade pip
pip install planetscope-py
  1. Use pip-tools for dependency management:
pip install pip-tools
echo "planetscope-py" > requirements.in
pip-compile requirements.in
pip install -r requirements.txt
  1. Force reinstall with no dependencies:
pip install --force-reinstall --no-deps planetscope-py
pip install requests shapely pyproj numpy pandas

Authentication Issues

Problem: "No Planet API key found"

Error:

AuthenticationError: No Planet API key found. Please set PL_API_KEY environment variable, create ~/.planet.json config file, or pass api_key parameter.

Solutions:

  1. Set environment variable (Linux/macOS):
export PL_API_KEY="PLAKxxxxxxxxxxxxxxxxxxxxx"

# Make permanent by adding to shell profile
echo 'export PL_API_KEY="PLAKxxxxxxxxxxxxxxxxxxxxx"' >> ~/.bashrc
source ~/.bashrc
  1. Set environment variable (Windows):
REM Command Prompt
set PL_API_KEY=PLAKxxxxxxxxxxxxxxxxxxxxx

REM PowerShell
$env:PL_API_KEY="PLAKxxxxxxxxxxxxxxxxxxxxx"

REM Permanent (Command Prompt as Administrator)
setx PL_API_KEY "PLAKxxxxxxxxxxxxxxxxxxxxx"
  1. Create config file:
# Create config file
echo '{"api_key": "PLAKxxxxxxxxxxxxxxxxxxxxx"}' > ~/.planet.json

# Set secure permissions
chmod 600 ~/.planet.json

# Verify
cat ~/.planet.json
  1. Pass directly in code:
from planetscope_py import PlanetAuth
auth = PlanetAuth(api_key="PLAKxxxxxxxxxxxxxxxxxxxxx")

# Or with PlanetScopeQuery
from planetscope_py import PlanetScopeQuery
query = PlanetScopeQuery(api_key="PLAKxxxxxxxxxxxxxxxxxxxxx")

Problem: "Invalid Planet API key"

Error:

AuthenticationError: Invalid Planet API key

Solutions:

  1. Verify API key format:
api_key = "your_key_here"
print(f"Key length: {len(api_key)}")
print(f"Starts with PLAK: {api_key.startswith('PLAK')}")
print(f"Key preview: {api_key[:8]}...{api_key[-4:]}")

# Valid format: PLAKxxxxxxxxxxxxxxxxxxxxxxxx (32 characters total)
  1. Test manually with curl:
curl -u "PLAKxxxxxxxxxxxxxxxxxxxxx:" https://api.planet.com/data/v1/
# Should return JSON with API information
  1. Check Planet account:

    • Visit Planet Account Settings
    • Verify your API key is active and not expired
    • Check subscription status
    • Regenerate key if needed
  2. Debug authentication step by step:

import os
import requests

# Check environment
api_key = os.environ.get('PL_API_KEY')
print(f"Environment key: {'SET' if api_key else 'NOT SET'}")

# Check config file
from pathlib import Path
config_path = Path.home() / ".planet.json"
print(f"Config file exists: {config_path.exists()}")

if config_path.exists():
    import json
    with open(config_path) as f:
        config = json.load(f)
        print(f"Config has api_key: {'api_key' in config}")

# Test direct authentication
if api_key:
    response = requests.get(
        "https://api.planet.com/data/v1/",
        auth=(api_key, "")
    )
    print(f"Direct auth test: {response.status_code}")

Problem: Network connectivity issues

Error:

requests.exceptions.ConnectionError: Failed to establish a new connection

Solutions:

  1. Basic connectivity test:
# Test DNS resolution
nslookup api.planet.com

# Test basic connectivity
ping api.planet.com

# Test HTTPS connection
curl -I https://api.planet.com/data/v1/
  1. Check firewall and proxy:
import requests
import os

# Test direct connection
try:
    response = requests.get("https://api.planet.com/data/v1/", timeout=10)
    print(f"Direct connection: {response.status_code}")
except Exception as e:
    print(f"Direct connection failed: {e}")

# Test with proxy (if behind corporate firewall)
proxies = {
    'http': os.environ.get('HTTP_PROXY'),
    'https': os.environ.get('HTTPS_PROXY')
}

if any(proxies.values()):
    try:
        response = requests.get(
            "https://api.planet.com/data/v1/", 
            proxies=proxies, 
            timeout=10
        )
        print(f"Proxy connection: {response.status_code}")
    except Exception as e:
        print(f"Proxy connection failed: {e}")
  1. Configure proxy for planetscope-py:
import os

# Set proxy environment variables
os.environ['HTTP_PROXY'] = 'http://proxy.company.com:8080'
os.environ['HTTPS_PROXY'] = 'http://proxy.company.com:8080'

# Now use planetscope-py normally
from planetscope_py import PlanetAuth
auth = PlanetAuth()

Problem: SSL/TLS certificate errors

Error:

requests.exceptions.SSLError: [SSL: CERTIFICATE_VERIFY_FAILED]

Solutions:

  1. Update certificates:
# macOS
/Applications/Python\ 3.x/Install\ Certificates.command

# Ubuntu/Debian
sudo apt-get update && sudo apt-get install ca-certificates

# CentOS/RHEL
sudo yum update ca-certificates
  1. Check system time:
# Ensure system clock is correct (SSL certificates are time-sensitive)
date
# If incorrect, update system time
  1. Test SSL connection:
# Test SSL handshake
openssl s_client -connect api.planet.com:443 -servername api.planet.com

# Should show certificate chain and "Verify return code: 0 (ok)"
  1. Python SSL debugging:
import ssl
import socket

# Check SSL context
context = ssl.create_default_context()
print(f"SSL protocol: {context.protocol}")
print(f"Check hostname: {context.check_hostname}")
print(f"Verify mode: {context.verify_mode}")

# Test connection
try:
    with socket.create_connection(('api.planet.com', 443), timeout=10) as sock:
        with context.wrap_socket(sock, server_hostname='api.planet.com') as ssock:
            print(f"SSL connection successful")
            print(f"Protocol: {ssock.version()}")
            cert = ssock.getpeercert()
            print(f"Certificate subject: {cert['subject']}")
except Exception as e:
    print(f"SSL connection failed: {e}")

Validation Errors

Problem: Invalid geometry errors

Error:

ValidationError: Invalid geometry: Polygon coordinates must form a closed ring

Solutions:

  1. Fix polygon closure:
# Bad: Not closed (first point != last point)
bad_coords = [
    [-122.5, 37.7], [-122.3, 37.7], 
    [-122.3, 37.8], [-122.5, 37.8]
]

# Good: Closed (first point == last point)
good_coords = [
    [-122.5, 37.7], [-122.3, 37.7], 
    [-122.3, 37.8], [-122.5, 37.8], [-122.5, 37.7]
]

geometry = {
    "type": "Polygon",
    "coordinates": [good_coords]
}
  1. Fix coordinate order and bounds:
# Bad: Invalid longitude/latitude values
bad_coords = [200, 40], [-120, 40], [-120, 50], [200, 50], [200, 40](/Black-Lights/planetscope-py/wiki/200,-40],-[-120,-40],-[-120,-50],-[200,-50],-[200,-40)

# Good: Valid coordinates (longitude: -180 to 180, latitude: -90 to 90)
good_coords = [-120, 40], [-110, 40], [-110, 50], [-120, 50], [-120, 40](/Black-Lights/planetscope-py/wiki/-120,-40],-[-110,-40],-[-110,-50],-[-120,-50],-[-120,-40)

# Bad: Wrong coordinate order (lat, lon instead of lon, lat)
wrong_order = [37.7, -122.5], [37.7, -122.3], [37.8, -122.3], [37.8, -122.5], [37.7, -122.5](/Black-Lights/planetscope-py/wiki/37.7,--122.5],-[37.7,--122.3],-[37.8,--122.3],-[37.8,--122.5],-[37.7,--122.5)

# Good: Correct order [longitude, latitude]
correct_order = [-122.5, 37.7], [-122.3, 37.7], [-122.3, 37.8], [-122.5, 37.8], [-122.5, 37.7](/Black-Lights/planetscope-py/wiki/-122.5,-37.7],-[-122.3,-37.7],-[-122.3,-37.8],-[-122.5,-37.8],-[-122.5,-37.7)
  1. Validate geometry before use:
from planetscope_py.utils import validate_geometry
from planetscope_py.exceptions import ValidationError

def safe_geometry_validation(geometry):
    try:
        validated = validate_geometry(geometry)
        print("SUCCESS: Geometry is valid")
        return validated
    except ValidationError as e:
        print(f"ERROR: Geometry validation failed: {e.message}")
        print(f"Details: {e.details}")
        
        # Try to fix common issues
        if "closed" in e.message.lower():
            print("Suggestion: Ensure first and last coordinates are identical")
        elif "coordinate" in e.message.lower():
            print("Suggestion: Check coordinate order [longitude, latitude]")
            print("Suggestion: Ensure longitude is between -180 and 180")
            print("Suggestion: Ensure latitude is between -90 and 90")
        
        return None

# Test your geometry
test_geom = {
    "type": "Polygon",
    "coordinates": [your_coordinates]
}

validated = safe_geometry_validation(test_geom)

Problem: ROI area too large

Error:

ValidationError: ROI area too large: 50000.00 km² exceeds maximum allowed area of 25000.00 km²

Solutions:

  1. Check area before validation:
from planetscope_py.utils import calculate_area_km2

# Calculate area first
area_km2 = calculate_area_km2(your_geometry)
print(f"Geometry area: {area_km2:.2f} km²")

# Default limit is 25,000 km²
if area_km2 > 25000:
    print(f"Area too large by {area_km2 - 25000:.2f} km²")
    print("Consider splitting into smaller regions")
  1. Split large geometries:
from shapely.geometry import shape
import numpy as np

def split_large_geometry(geometry, max_area_km2=20000):
    """Split large geometry into smaller pieces"""
    
    area = calculate_area_km2(geometry)
    if area <= max_area_km2:
        return [geometry]  # No need to split
    
    # Simple grid-based splitting for polygons
    if geometry['type'] == 'Polygon':
        geom = shape(geometry)
        bounds = geom.bounds  # (minx, miny, maxx, maxy)
        
        # Calculate grid size
        pieces_needed = int(np.ceil(area / max_area_km2))
        grid_size = int(np.ceil(np.sqrt(pieces_needed)))
        
        # Create grid
        x_step = (bounds[2] - bounds[0]) / grid_size
        y_step = (bounds[3] - bounds[1]) / grid_size
        
        pieces = []
        for i in range(grid_size):
            for j in range(grid_size):
                minx = bounds[0] + i * x_step
                miny = bounds[1] + j * y_step
                maxx = minx + x_step
                maxy = miny + y_step
                
                # Create grid cell
                cell_coords = [
                    [minx, miny], [maxx, miny], 
                    [maxx, maxy], [minx, maxy], [minx, miny]
                ]
                
                cell_geom = {
                    "type": "Polygon",
                    "coordinates": [cell_coords]
                }
                
                # Check if cell intersects with original geometry
                from shapely.geometry import Polygon
                cell_shape = Polygon(cell_coords)
                if geom.intersects(cell_shape):
                    intersection = geom.intersection(cell_shape)
                    if intersection.area > 0:
                        pieces.append(cell_geom)
        
        return pieces
    
    else:
        raise ValueError("Splitting only supported for Polygon geometries")

# Usage
large_geometry = your_large_polygon
pieces = split_large_geometry(large_geometry)
print(f"Split into {len(pieces)} pieces")

for i, piece in enumerate(pieces):
    area = calculate_area_km2(piece)
    print(f"Piece {i+1}: {area:.2f} km²")
  1. Increase area limit (if justified):
from planetscope_py import PlanetScopeConfig

# Create custom configuration with higher limit
config = PlanetScopeConfig()
config.set('max_roi_area_km2', 50000)  # Increase to 50,000 km²

# Use with PlanetScopeQuery
from planetscope_py import PlanetScopeQuery
query = PlanetScopeQuery(config=config.to_dict())

# Note: Very large areas may cause slow API responses or timeouts

Problem: Invalid date format

Error:

ValidationError: Invalid date format. Expected YYYY-MM-DD or YYYY-MM-DDTHH:MM:SS

Solutions:

  1. Use correct date formats:
from planetscope_py.utils import validate_date_range

# Good formats
start, end = validate_date_range("2025-01-01", "2025-12-31")
start, end = validate_date_range("2025-01-01T00:00:00Z", "2025-12-31T23:59:59Z")

# Also acceptable
start, end = validate_date_range("2025-01-01T00:00:00", "2025-12-31T23:59:59")

# Bad formats (will cause errors)
# "2025-13-01"     # Invalid month
# "01-01-2025"     # Wrong order  
# "2025/01/01"     # Wrong separator
# "Jan 1, 2025"    # Text format
  1. Convert from different formats:
from datetime import datetime

# Convert from different formats
def convert_to_planet_date(date_input):
    """Convert various date formats to Planet API format"""
    
    if isinstance(date_input, datetime):
        return date_input.strftime("%Y-%m-%dT%H:%M:%S")
    
    if isinstance(date_input, str):
        # Try common formats
        formats = [
            "%Y-%m-%d",
            "%Y-%m-%dT%H:%M:%S",
            "%Y-%m-%dT%H:%M:%SZ",
            "%m/%d/%Y",
            "%d/%m/%Y",
            "%Y/%m/%d"
        ]
        
        for fmt in formats:
            try:
                dt = datetime.strptime(date_input, fmt)
                return dt.strftime("%Y-%m-%d")
            except ValueError:
                continue
        
        raise ValueError(f"Unable to parse date: {date_input}")
    
    raise ValueError(f"Unsupported date type: {type(date_input)}")

# Examples
dates = ["01/15/2025", "15/01/2025", "2025/01/15", "Jan 1, 2025"]
for date_str in dates:
    try:
        converted = convert_to_planet_date(date_str)
        print(f"{date_str} -> {converted}")
    except ValueError as e:
        print(f"{date_str} -> ERROR: {e}")
  1. Validate date ranges:
from datetime import datetime, timedelta

def validate_date_range_logic(start_date, end_date):
    """Validate date range makes logical sense"""
    
    # Convert to datetime for comparison
    if isinstance(start_date, str):
        start_dt = datetime.fromisoformat(start_date.replace('Z', ''))
    else:
        start_dt = start_date
        
    if isinstance(end_date, str):
        end_dt = datetime.fromisoformat(end_date.replace('Z', ''))
    else:
        end_dt = end_date
    
    # Check order
    if start_dt > end_dt:
        raise ValueError(f"Start date {start_dt} is after end date {end_dt}")
    
    # Check reasonable range
    diff = end_dt - start_dt
    if diff.days > 366:
        print(f"WARNING: Large date range ({diff.days} days)")
        print("Consider smaller ranges for better performance")
    
    if diff.days < 1:
        print(f"WARNING: Very short date range ({diff.days} days)")
    
    # Check if dates are in the future
    now = datetime.now()
    if start_dt > now:
        print(f"WARNING: Start date is in the future")
    
    return start_dt, end_dt

# Test your dates
start, end = validate_date_range_logic("2025-01-01", "2025-12-31")
print(f"Validated range: {start.date()} to {end.date()}")

Planet API Communication Issues

Problem: Rate limit exceeded

Error:

RateLimitError: Rate limit exceeded for search endpoint. Retry after 60 seconds.

Solutions:

  1. Check rate limit status:
from planetscope_py import PlanetScopeQuery

query = PlanetScopeQuery()
status = query.rate_limiter.get_current_rate_status()

print("Rate Limit Status:")
for endpoint, info in status.items():
    capacity_used = info['capacity_used']
    status_icon = "🔴" if capacity_used > 0.9 else "🟡" if capacity_used > 0.7 else "🟢"
    print(f"  {endpoint}: {capacity_used:.1%} capacity used")
    print(f"    Current rate: {info['current_rate']:.1f}/{info['limit']} req/min")
    
    if capacity_used > 0.8:
        print(f"    WARNING: High usage on {endpoint} endpoint")
  1. Implement proper retry logic:
import time
from planetscope_py.exceptions import RateLimitError

def search_with_retry(query, geometry, start_date, end_date, max_retries=3):
    """Search with automatic retry on rate limits"""
    
    for attempt in range(max_retries):
        try:
            results = query.search_scenes(
                geometry=geometry,
                start_date=start_date,
                end_date=end_date
            )
            return results
            
        except RateLimitError as e:
            if attempt < max_retries - 1:
                retry_after = getattr(e, 'retry_after', 60)
                print(f"Rate limited. Waiting {retry_after}s before retry {attempt + 1}/{max_retries}")
                time.sleep(retry_after)
                continue
            else:
                print(f"Failed after {max_retries} attempts")
                raise
        
        except Exception as e:
            print(f"Non-rate-limit error: {e}")
            raise
    
    return None

# Usage
results = search_with_retry(query, geometry, "2025-01-01", "2025-01-31")
  1. Monitor and optimize request patterns:
def optimize_batch_requests(query, geometries, start_date, end_date):
    """Optimize batch requests to avoid rate limits"""
    
    results = []
    
    # Check initial capacity
    status = query.rate_limiter.get_current_rate_status()
    search_capacity = status.get('search', {}).get('capacity_used', 0)
    
    if search_capacity > 0.7:
        print("High capacity usage detected. Adding delays...")
        delay_between_requests = 2.0
    else:
        delay_between_requests = 0.5
    
    for i, geometry in enumerate(geometries):
        print(f"Processing geometry {i+1}/{len(geometries)}")
        
        try:
            # Wait for capacity if needed
            query.rate_limiter.wait_for_capacity("search", required_capacity=0.8)
            
            result = query.search_scenes(
                geometry=geometry,
                start_date=start_date,
                end_date=end_date
            )
            
            results.append({
                'geometry_index': i,
                'success': True,
                'result': result,
                'scene_count': len(result['features'])
            })
            
            print(f"  Found {len(result['features'])} scenes")
            
        except Exception as e:
            results.append({
                'geometry_index': i,
                'success': False,
                'error': str(e)
            })
            print(f"  ERROR: {e}")
        
        # Add delay between requests
        if i < len(geometries) - 1:  # Don't delay after last request
            time.sleep(delay_between_requests)
            
            # Check rate limit status periodically
            if (i + 1) % 5 == 0:
                status = query.rate_limiter.get_current_rate_status()
                search_capacity = status.get('search', {}).get('capacity_used', 0)
                print(f"  Rate limit capacity: {search_capacity:.1%}")
    
    return results

# Usage
batch_results = optimize_batch_requests(query, geometries, "2025-01-01", "2025-01-31")

# Summary
successful = [r for r in batch_results if r['success']]
failed = [r for r in batch_results if not r['success']]
print(f"\nBatch complete: {len(successful)}/{len(batch_results)} successful")

Problem: No search results returned

Error:

No scenes found matching the specified criteria

Solutions:

  1. Debug search parameters:
def debug_search_parameters(query, geometry, start_date, end_date, **kwargs):
    """Debug why search returns no results"""
    
    from planetscope_py.utils import calculate_area_km2
    
    print("Search Parameter Analysis")
    print("=" * 40)
    
    # Geometry analysis
    area = calculate_area_km2(geometry)
    print(f"Search area: {area:.2f} km²")
    
    if geometry['type'] == 'Point':
        coords = geometry['coordinates']
        print(f"Search location: {coords[1]:.4f}°N, {coords[0]:.4f}°E")
    elif geometry['type'] == 'Polygon':
        from shapely.geometry import shape
        geom = shape(geometry)
        centroid = geom.centroid
        print(f"Polygon centroid: {centroid.y:.4f}°N, {centroid.x:.4f}°E")
        print(f"Polygon bounds: {geom.bounds}")
    
    # Date range analysis
    from datetime import datetime
    start_dt = datetime.fromisoformat(start_date.replace('Z', ''))
    end_dt = datetime.fromisoformat(end_date.replace('Z', ''))
    days = (end_dt - start_dt).days
    
    print(f"Date range: {start_date} to {end_date} ({days} days)")
    
    # Check if date range is reasonable
    now = datetime.now()
    if start_dt > now:
        print("WARNING: Start date is in the future")
    if end_dt > now:
        print("INFO: End date includes future dates")
    
    # Filter analysis
    filters_applied = []
    if 'cloud_cover_max' in kwargs:
        filters_applied.append(f"cloud_cover <= {kwargs['cloud_cover_max']}")
    if 'sun_elevation_min' in kwargs:
        filters_applied.append(f"sun_elevation >= {kwargs['sun_elevation_min']}")
    if 'item_types' in kwargs:
        filters_applied.append(f"item_types = {kwargs['item_types']}")
    
    print(f"Filters applied: {', '.join(filters_applied) if filters_applied else 'None'}")
    
    # Try relaxed search
    print("\nTrying relaxed search...")
    relaxed_results = query.search_scenes(
        geometry=geometry,
        start_date=start_date,
        end_date=end_date,
        cloud_cover_max=1.0,  # Allow all cloud cover
        item_types=["PSScene"]
    )
    
    relaxed_count = len(relaxed_results['features'])
    print(f"Relaxed search found: {relaxed_count} scenes")
    
    if relaxed_count > 0:
        print("Suggestion: Your filters may be too restrictive")
        
        # Analyze why scenes were filtered out
        if 'cloud_cover_max' in kwargs:
            cloud_covers = [s['properties'].get('cloud_cover', 1.0) 
                          for s in relaxed_results['features']]
            min_cloud = min(cloud_covers)
            max_cloud = max(cloud_covers)
            print(f"  Available cloud cover range: {min_cloud:.3f} to {max_cloud:.3f}")
            print(f"  Your filter: <= {kwargs['cloud_cover_max']}")
            
        if 'sun_elevation_min' in kwargs:
            sun_elevations = [s['properties'].get('sun_elevation', 0) 
                            for s in relaxed_results['features']]
            min_sun = min(sun_elevations)
            max_sun = max(sun_elevations)
            print(f"  Available sun elevation range: {min_sun:.1f}° to {max_sun:.1f}°")
            print(f"  Your filter: >= {kwargs['sun_elevation_min']}°")
    
    else:
        print("No scenes available even with relaxed criteria")
        print("Suggestions:")
        print("  - Try a different date range")
        print("  - Check if location has PlanetScope coverage")
        print("  - Verify geometry coordinates are correct")
    
    return relaxed_results

# Usage
debug_results = debug_search_parameters(
    query, geometry, "2025-01-01", "2025-01-31",
    cloud_cover_max=0.1, sun_elevation_min=45
)
  1. Progressive filter relaxation:
def progressive_search(query, geometry, start_date, end_date):
    """Try progressively relaxed search criteria"""
    
    search_configs = [
        {
            "name": "Strict (Publication Quality)",
            "cloud_cover_max": 0.05,
            "sun_elevation_min": 45
        },
        {
            "name": "High Quality", 
            "cloud_cover_max": 0.15,
            "sun_elevation_min": 35
        },
        {
            "name": "Good Quality",
            "cloud_cover_max": 0.3,
            "sun_elevation_min": 25
        },
        {
            "name": "Acceptable",
            "cloud_cover_max": 0.5,
            "sun_elevation_min": 15
        },
        {
            "name": "Any Available",
            "cloud_cover_max": 1.0,
            "sun_elevation_min": 0
        }
    ]
    
    for config in search_configs:
        print(f"\nTrying {config['name']} criteria...")
        print(f"  Cloud cover: <= {config['cloud_cover_max']}")
        print(f"  Sun elevation: >= {config['sun_elevation_min']}°")
        
        try:
            results = query.search_scenes(
                geometry=geometry,
                start_date=start_date,
                end_date=end_date,
                cloud_cover_max=config['cloud_cover_max'],
                sun_elevation_min=config['sun_elevation_min'],
                item_types=["PSScene"]
            )
            
            scene_count = len(results['features'])
            print(f"  Found: {scene_count} scenes")
            
            if scene_count > 0:
                print(f"SUCCESS: Found scenes with {config['name']} criteria")
                return results, config
                
        except Exception as e:
            print(f"  ERROR: {e}")
    
    print("\nNo scenes found with any criteria")
    return None, None

# Usage
results, used_config = progressive_search(query, geometry, "2025-01-01", "2025-01-31")
if results:
    print(f"Best available quality: {used_config['name']}")

Problem: Circuit breaker activated

Error:

APIError: Circuit breaker is OPEN - too many recent failures detected

Solutions:

  1. Check circuit breaker status:
def check_circuit_breaker_status(query):
    """Check and analyze circuit breaker status"""
    
    from planetscope_py.rate_limiter import RetryableSession
    
    if hasattr(query, 'session') and isinstance(query.session, RetryableSession):
        cb = query.session.circuit_breaker
        
        print("Circuit Breaker Status")
        print("=" * 30)
        print(f"State: {cb.state}")
        print(f"Failure count: {cb.failure_count}")
        print(f"Failure threshold: {cb.failure_threshold}")
        
        if hasattr(cb, 'last_failure_time') and cb.last_failure_time:
            import time
            time_since_failure = time.time() - cb.last_failure_time
            print(f"Time since last failure: {time_since_failure:.1f}s")
            print(f"Recovery timeout: {cb.recovery_timeout}s")
            
            if cb.state == "OPEN":
                time_until_recovery = cb.recovery_timeout - time_since_failure
                if time_until_recovery > 0:
                    print(f"Time until recovery attempt: {time_until_recovery:.1f}s")
                else:
                    print("Ready for recovery attempt")
        
        return cb
    else:
        print("No circuit breaker found")
        return None

# Check status
cb = check_circuit_breaker_status(query)
  1. Wait for circuit breaker recovery:
def wait_for_circuit_breaker_recovery(query, max_wait_time=300):
    """Wait for circuit breaker to recover"""
    
    import time
    
    cb = check_circuit_breaker_status(query)
    if not cb or cb.state == "CLOSED":
        print("Circuit breaker is already closed")
        return True
    
    print(f"Waiting for circuit breaker recovery (max {max_wait_time}s)...")
    
    start_time = time.time()
    while time.time() - start_time < max_wait_time:
        if cb.state == "CLOSED":
            print("Circuit breaker recovered!")
            return True
        elif cb.state == "HALF_OPEN":
            print("Circuit breaker is testing recovery...")
        
        time.sleep(10)  # Check every 10 seconds
        
        # Update status
        elapsed = time.time() - start_time
        print(f"Still waiting... ({elapsed:.0f}s elapsed)")
    
    print(f"Circuit breaker did not recover within {max_wait_time}s")
    return False

# Usage
if wait_for_circuit_breaker_recovery(query):
    # Try your operation again
    results = query.search_scenes(...)
  1. Manual circuit breaker reset:
def reset_circuit_breaker(query):
    """Manually reset circuit breaker"""
    
    from planetscope_py.rate_limiter import RetryableSession
    
    if hasattr(query, 'session') and isinstance(query.session, RetryableSession):
        cb = query.session.circuit_breaker
        
        print(f"Circuit breaker state before reset: {cb.state}")
        cb.reset()
        print(f"Circuit breaker state after reset: {cb.state}")
        
        # Test with a simple request
        try:
            auth = query.auth
            session = auth.get_session()
            response = session.get("https://api.planet.com/data/v1/", timeout=10)
            print(f"Test request successful: {response.status_code}")
            return True
        except Exception as e:
            print(f"Test request failed: {e}")
            return False
    else:
        print("No circuit breaker to reset")
        return False

# Usage
if reset_circuit_breaker(query):
    print("Circuit breaker reset successful")
else:
    print("Circuit breaker reset failed or underlying issue persists")

Problem: Timeout errors

Error:

requests.exceptions.ReadTimeout: HTTPSConnectionPool read timed out

Solutions:

  1. Increase timeout values:
from planetscope_py import PlanetScopeConfig

# Create config with longer timeouts
config = PlanetScopeConfig()
config.set('timeouts', {
    'connect': 30.0,    # 30 seconds to establish connection
    'read': 180.0       # 3 minutes to read response
})

# Use with query
query = PlanetScopeQuery(config=config.to_dict())
  1. Test network latency:
import requests
import time

def test_network_latency(url="https://api.planet.com/data/v1/", num_tests=5):
    """Test network latency to Planet API"""
    
    latencies = []
    
    for i in range(num_tests):
        try:
            start = time.time()
            response = requests.get(url, timeout=30)
            end = time.time()
            
            latency = (end - start) * 1000  # Convert to milliseconds
            latencies.append(latency)
            
            print(f"Test {i+1}: {latency:.0f}ms (Status: {response.status_code})")
            
        except Exception as e:
            print(f"Test {i+1}: FAILED - {e}")
    
    if latencies:
        avg_latency = sum(latencies) / len(latencies)
        min_latency = min(latencies)
        max_latency = max(latencies)
        
        print(f"\nLatency Summary:")
        print(f"  Average: {avg_latency:.0f}ms")
        print(f"  Range: {min_latency:.0f}ms - {max_latency:.0f}ms")
        
        if avg_latency > 5000:  # > 5 seconds
            print("WARNING: High latency detected")
            print("Consider increasing timeout values")
        elif avg_latency > 2000:  # > 2 seconds
            print("INFO: Moderate latency detected")
    
    return latencies

# Test your connection
latencies = test_network_latency()
  1. Implement progressive timeout strategy:
def search_with_progressive_timeout(query, geometry, start_date, end_date):
    """Try search with progressively longer timeouts"""
    
    timeouts = [30, 60, 120, 300]  # 30s, 1m, 2m, 5m
    
    for timeout in timeouts:
        print(f"Trying with {timeout}s timeout...")
        
        # Create temporary config with this timeout
        from planetscope_py import PlanetScopeConfig
        config = PlanetScopeConfig()
        config.set('timeouts', {'connect': 30, 'read': timeout})
        
        # Create temporary query with this timeout
        temp_query = PlanetScopeQuery(
            api_key=query.auth.api_key,
            config=config.to_dict()
        )
        
        try:
            start_time = time.time()
            results = temp_query.search_scenes(
                geometry=geometry,
                start_date=start_date,
                end_date=end_date
            )
            end_time = time.time()
            
            actual_time = end_time - start_time
            print(f"SUCCESS: Completed in {actual_time:.1f}s")
            return results
            
        except requests.exceptions.ReadTimeout:
            print(f"TIMEOUT: Exceeded {timeout}s limit")
            continue
        except Exception as e:
            print(f"ERROR: {e}")
            break
    
    print("All timeout attempts failed")
    return None

# Usage
results = search_with_progressive_timeout(query, geometry, "2025-01-01", "2025-01-31")

Performance Issues

Problem: Slow scene searches

Symptoms:

  • Searches taking >30 seconds consistently
  • High memory usage during searches
  • System becoming unresponsive

Solutions:

  1. Profile search performance:
import time
import cProfile
import pstats
from io import StringIO

def profile_search_performance(query, geometry, start_date, end_date):
    """Profile search performance to identify bottlenecks"""
    
    def timed_search():
        return query.search_scenes(
            geometry=geometry,
            start_date=start_date,
            end_date=end_date
        )
    
    # Time the search
    start_time = time.time()
    results = timed_search()
    end_time = time.time()
    
    total_time = end_time - start_time
    scene_count = len(results['features'])
    
    print(f"Search Performance Summary")
    print(f"=" * 30)
    print(f"Total time: {total_time:.2f}s")
    print(f"Scenes found: {scene_count}")
    print(f"Time per scene: {total_time/scene_count:.3f}s" if scene_count > 0 else "N/A")
    
    # Detailed profiling
    pr = cProfile.Profile()
    pr.enable()
    timed_search()
    pr.disable()
    
    # Analyze results
    s = StringIO()
    ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
    ps.print_stats(10)  # Top 10 functions
    
    print(f"\nTop Time Consumers:")
    print(s.getvalue())
    
    return results, total_time

# Usage
results, search_time = profile_search_performance(query, geometry, "2025-01-01", "2025-01-31")
  1. Optimize geometry complexity:
def optimize_geometry_for_performance(geometry, tolerance=0.001):
    """Simplify geometry to improve search performance"""
    
    from shapely.geometry import shape
    from planetscope_py.utils import calculate_area_km2
    
    original_area = calculate_area_km2(geometry)
    
    if geometry['type'] in ['Polygon', 'MultiPolygon']:
        # Simplify geometry
        geom = shape(geometry)
        simplified = geom.simplify(tolerance, preserve_topology=True)
        
        # Convert back to GeoJSON
        simplified_geoj = {
            "type": simplified.geom_type,
            "coordinates": list(simplified.exterior.coords) if hasattr(simplified, 'exterior') else []
        }
        
        simplified_area = calculate_area_km2(simplified_geoj)
        area_diff = abs(original_area - simplified_area)
        
        print(f"Geometry Optimization:")
        print(f"  Original area: {original_area:.2f} km²")
        print(f"  Simplified area: {simplified_area:.2f} km²")
        print(f"  Area difference: {area_diff:.2f} km² ({area_diff/original_area*100:.1f}%)")
        
        if area_diff / original_area < 0.01:  # Less than 1% difference
            print("  Using simplified geometry")
            return simplified_geoj
        else:
            print("  Simplification changed area too much, using original")
            return geometry
    
    return geometry

# Usage
optimized_geom = optimize_geometry_for_performance(complex_polygon)
results = query.search_scenes(optimized_geom, "2025-01-01", "2025-01-31")
  1. Implement search caching:
import hashlib
import json
import pickle
import os
from pathlib import Path

class SearchCache:
    """Cache search results to avoid repeated API calls"""
    
    def __init__(self, cache_dir="~/.planetscope_cache"):
        self.cache_dir = Path(cache_dir).expanduser()
        self.cache_dir.mkdir(exist_ok=True)
        
    def _generate_cache_key(self, geometry, start_date, end_date, **kwargs):
        """Generate unique cache key for search parameters"""
        cache_data = {
            'geometry': geometry,
            'start_date': start_date,
            'end_date': end_date,
            **kwargs
        }
        
        # Create hash of parameters
        cache_str = json.dumps(cache_data, sort_keys=True)
        cache_hash = hashlib.md5(cache_str.encode()).hexdigest()
        return cache_hash
    
    def get_cached_results(self, geometry, start_date, end_date, **kwargs):
        """Get cached results if available"""
        cache_key = self._generate_cache_key(geometry, start_date, end_date, **kwargs)
        cache_file = self.cache_dir / f"{cache_key}.pkl"
        
        if cache_file.exists():
            try:
                with open(cache_file, 'rb') as f:
                    cached_data = pickle.load(f)
                
                # Check if cache is still valid (e.g., less than 24 hours old)
                import time
                cache_age = time.time() - cache_file.stat().st_mtime
                if cache_age < 86400:  # 24 hours
                    print(f"Using cached results ({cache_age/3600:.1f}h old)")
                    return cached_data
                else:
                    print("Cache expired, will fetch fresh results")
                    cache_file.unlink()  # Remove expired cache
                    
            except Exception as e:
                print(f"Cache read error: {e}")
        
        return None
    
    def cache_results(self, results, geometry, start_date, end_date, **kwargs):
        """Cache search results"""
        cache_key = self._generate_cache_key(geometry, start_date, end_date, **kwargs)
        cache_file = self.cache_dir / f"{cache_key}.pkl"
        
        try:
            with open(cache_file, 'wb') as f:
                pickle.dump(results, f)
            print(f"Results cached for future use")
        except Exception as e:
            print(f"Cache write error: {e}")

def cached_search(query, geometry, start_date, end_date, **kwargs):
    """Search with caching support"""
    
    cache = SearchCache()
    
    # Try cache first
    cached_results = cache.get_cached_results(geometry, start_date, end_date, **kwargs)
    if cached_results:
        return cached_results
    
    # Perform actual search
    print("Fetching fresh results from API...")
    results = query.search_scenes(
        geometry=geometry,
        start_date=start_date,
        end_date=end_date,
        **kwargs
    )
    
    # Cache results
    cache.cache_results(results, geometry, start_date, end_date, **kwargs)
    
    return results

# Usage
results = cached_search(query, geometry, "2025-01-01", "2025-01-31", cloud_cover_max=0.2)

Problem: High memory usage

Symptoms:

  • Out of memory errors with large scene collections
  • System swap usage increasing
  • Slow performance due to memory pressure

Solutions:

  1. Monitor memory usage:
import psutil
import os
import gc

class MemoryMonitor:
    """Monitor memory usage during operations"""
    
    def __init__(self):
        self.process = psutil.Process(os.getpid())
        self.initial_memory = self.get_memory_usage()
        
    def get_memory_usage(self):
        """Get current memory usage in MB"""
        return self.process.memory_info().rss / 1024 / 1024
    
    def report(self, operation=""):
        """Report current memory usage"""
        current = self.get_memory_usage()
        increase = current - self.initial_memory
        
        print(f"Memory usage{' (' + operation + ')' if operation else ''}: "
              f"{current:.1f} MB (+{increase:.1f} MB)")
        
        if increase > 1000:  # > 1GB increase
            print("WARNING: High memory usage detected")
            
        return current
    
    def force_cleanup(self):
        """Force garbage collection"""
        collected = gc.collect()
        print(f"Garbage collected {collected} objects")
        return self.report("after cleanup")

# Usage
monitor = MemoryMonitor()

# Monitor during operations
results = query.search_scenes(geometry, "2025-01-01", "2025-01-31")
monitor.report("after search")

assessment = processor.assess_coverage_quality(scenes=results['features'])
monitor.report("after processing")

# Force cleanup if needed
monitor.force_cleanup()
  1. Process scenes in batches:
def process_scenes_in_batches(processor, scenes, target_geometry, batch_size=50):
    """Process large scene collections in smaller batches"""
    
    total_scenes = len(scenes)
    print(f"Processing {total_scenes} scenes in batches of {batch_size}")
    
    # Initialize aggregated results
    aggregated_assessment = {
        'total_scenes': 0,
        'quality_analysis': {
            'suitability_distribution': {},
            'cloud_cover': {'values': []},
            'sun_elevation': {'values': []},
            'overall_quality': {'values': []}
        },
        'recommendations': []
    }
    
    monitor = MemoryMonitor()
    
    for i in range(0, total_scenes, batch_size):
        batch = scenes[i:i+batch_size]
        batch_num = i // batch_size + 1
        total_batches = (total_scenes + batch_size - 1) // batch_size
        
        print(f"\nProcessing batch {batch_num}/{total_batches} ({len(batch)} scenes)")
        
        try:
            # Process batch
            batch_assessment = processor.assess_coverage_quality(
                scenes=batch,
                target_geometry=target_geometry
            )
            
            # Aggregate results
            aggregated_assessment['total_scenes'] += batch_assessment['total_scenes']
            
            # Aggregate quality distribution
            batch_quality = batch_assessment['quality_analysis']['suitability_distribution']
            for quality, count in batch_quality.items():
                if quality in aggregated_assessment['quality_analysis']['suitability_distribution']:
                    aggregated_assessment['quality_analysis']['suitability_distribution'][quality] += count
                else:
                    aggregated_assessment['quality_analysis']['suitability_distribution'][quality] = count
            
            # Collect raw values for later statistics
            for scene in batch:
                try:
                    metadata = processor.extract_scene_metadata(scene)
                    if metadata.get('cloud_cover') is not None:
                        aggregated_assessment['quality_analysis']['cloud_cover']['values'].append(metadata['cloud_cover'])
                    if metadata.get('sun_elevation') is not None:
                        aggregated_assessment['quality_analysis']['sun_elevation']['values'].append(metadata['sun_elevation'])
                    if metadata.get('overall_quality') is not None:
                        aggregated_assessment['quality_analysis']['overall_quality']['values'].append(metadata['overall_quality'])
                except Exception as e:
                    print(f"Warning: Failed to extract metadata from scene: {e}")
            
            # Clean up batch data
            del batch, batch_assessment
            gc.collect()
            
            monitor.report(f"batch {batch_num}")
            
        except Exception as e:
            print(f"Error processing batch {batch_num}: {e}")
            continue
    
    # Calculate final statistics
    import statistics
    
    for metric in ['cloud_cover', 'sun_elevation', 'overall_quality']:
        values = aggregated_assessment['quality_analysis'][metric]['values']
        if values:
            aggregated_assessment['quality_analysis'][metric] = {
                'mean': statistics.mean(values),
                'median': statistics.median(values),
                'min': min(values),
                'max': max(values),
                'std': statistics.stdev(values) if len(values) > 1 else 0.0
            }
        else:
            del aggregated_assessment['quality_analysis'][metric]
    
    print(f"\nProcessing complete: {aggregated_assessment['total_scenes']} scenes processed")
    return aggregated_assessment

# Usage
large_scene_collection = results['features']  # Assume this is large (1000+ scenes)
assessment = process_scenes_in_batches(processor, large_scene_collection, geometry)
  1. Implement streaming processing:
def stream_process_scenes(processor, scenes, target_geometry):
    """Stream process scenes without loading all into memory"""
    
    class SceneIterator:
        """Iterator that yields scenes one at a time"""
        def __init__(self, scenes):
            self.scenes = scenes
            self.index = 0
            
        def __iter__(self):
            return self
            
        def __next__(self):
            if self.index >= len(self.scenes):
                raise StopIteration
            scene = self.scenes[self.index]
            self.index += 1
            return scene
    
    # Statistics collectors
    scene_count = 0
    quality_counts = {}
    cloud_covers = []
    sun_elevations = []
    overall_qualities = []
    
    monitor = MemoryMonitor()
    
    # Process scenes one by one
    for scene in SceneIterator(scenes):
        try:
            metadata = processor.extract_scene_metadata(scene)
            scene_count += 1
            
            # Collect statistics
            suitability = metadata.get('suitability', 'unknown')
            quality_counts[suitability] = quality_counts.get(suitability, 0) + 1
            
            if metadata.get('cloud_cover') is not None:
                cloud_covers.append(metadata['cloud_cover'])
            if metadata.get('sun_elevation') is not None:
                sun_elevations.append(metadata['sun_elevation'])
            if metadata.get('overall_quality') is not None:
                overall_qualities.append(metadata['overall_quality'])
            
            # Report progress periodically
            if scene_count % 100 == 0:
                print(f"Processed {scene_count} scenes...")
                monitor.report()
                
                # Force cleanup every 100 scenes
                gc.collect() 
                except Exception as e:
            print(f"Warning: Failed to process scene {scene_count}: {e}")
            continue
    
    # Calculate final statistics
    import statistics
    
    assessment = {
        'total_scenes': scene_count,
        'quality_analysis': {
            'suitability_distribution': quality_counts
        }
    }
    
    if cloud_covers:
        assessment['quality_analysis']['cloud_cover'] = {
            'mean': statistics.mean(cloud_covers),
            'median': statistics.median(cloud_covers),
            'min': min(cloud_covers),
            'max': max(cloud_covers),
            'std': statistics.stdev(cloud_covers) if len(cloud_covers) > 1 else 0.0
        }
    
    if sun_elevations:
        assessment['quality_analysis']['sun_elevation'] = {
            'mean': statistics.mean(sun_elevations),
            'median': statistics.median(sun_elevations),
            'min': min(sun_elevations),
            'max': max(sun_elevations)
        }
    
    if overall_qualities:
        assessment['quality_analysis']['overall_quality'] = {
            'mean': statistics.mean(overall_qualities),
            'median': statistics.median(overall_qualities),
            'min': min(overall_qualities),
            'max': max(overall_qualities)
        }
    
    print(f"\nStreaming processing complete: {scene_count} scenes")
    monitor.report("final")
    
    return assessment

# Usage for very large collections
assessment = stream_process_scenes(processor, large_scene_collection, geometry)

Configuration Issues

Problem: Configuration file not loading

Error:

ConfigurationError: Failed to load configuration from ~/.planet.json

Solutions:

  1. Check file existence and permissions:
# Check if file exists
ls -la ~/.planet.json

# Check file contents
cat ~/.planet.json

# Fix permissions (should be readable by user only)
chmod 600 ~/.planet.json
  1. Validate JSON syntax:
import json
from pathlib import Path

def validate_config_file(config_path="~/.planet.json"):
    """Validate Planet configuration file"""
    
    config_file = Path(config_path).expanduser()
    
    print(f"Validating config file: {config_file}")
    
    # Check existence
    if not config_file.exists():
        print("ERROR: Config file does not exist")
        print("Create with: echo '{\"api_key\": \"your_key_here\"}' > ~/.planet.json")
        return False
    
    # Check permissions
    import stat
    file_mode = config_file.stat().st_mode
    if stat.filemode(file_mode) != '-rw-------':
        print("WARNING: Config file permissions are not secure")
        print("Fix with: chmod 600 ~/.planet.json")
    
    # Check JSON syntax
    try:
        with open(config_file, 'r') as f:
            config_data = json.load(f)
        
        print("SUCCESS: JSON syntax is valid")
        
        # Check required fields
        if 'api_key' not in config_data:
            print("ERROR: 'api_key' field is missing")
            return False
        
        api_key = config_data['api_key']
        if not isinstance(api_key, str):
            print("ERROR: 'api_key' must be a string")
            return False
        
        if not api_key.startswith('PLAK'):
            print("WARNING: API key doesn't start with 'PLAK'")
        
        if len(api_key) != 32:
            print("WARNING: API key length is not 32 characters")
        
        print(f"SUCCESS: Config file is valid")
        print(f"API key: {api_key[:8]}...{api_key[-4:]}")
        
        return True
        
    except json.JSONDecodeError as e:
        print(f"ERROR: Invalid JSON syntax: {e}")
        print("Fix the JSON syntax or recreate the file")
        return False
    
    except Exception as e:
        print(f"ERROR: {e}")
        return False

# Usage
if validate_config_file():
    print("Config file is ready to use")
else:
    print("Config file needs to be fixed")
  1. Create valid configuration file:
def create_config_file(api_key, config_path="~/.planet.json"):
    """Create a valid Planet configuration file"""
    
    import json
    from pathlib import Path
    import os
    
    config_file = Path(config_path).expanduser()
    
    # Validate API key
    if not api_key or not isinstance(api_key, str):
        raise ValueError("API key must be a non-empty string")
    
    if not api_key.startswith('PLAK'):
        print("WARNING: API key doesn't start with 'PLAK' - verify this is correct")
    
    # Create config data
    config_data = {
        "api_key": api_key
    }
    
    # Write config file
    try:
        with open(config_file, 'w') as f:
            json.dump(config_data, f, indent=2)
        
        # Set secure permissions
        os.chmod(config_file, 0o600)
        
        print(f"SUCCESS: Created config file at {config_file}")
        print("File permissions set to 600 (owner read/write only)")
        
        return True
        
    except Exception as e:
        print(f"ERROR: Failed to create config file: {e}")
        return False

# Usage
# create_config_file("PLAKyour_api_key_here")

Problem: Environment variables not recognized

Solutions:

  1. Check environment variable setup:
import os

def check_environment_variables():
    """Check all planetscope-py related environment variables"""
    
    env_vars = {
        'PL_API_KEY': 'Planet API key',
        'PLANETSCOPE_LOG_LEVEL': 'Logging level',
        'PLANETSCOPE_CONFIG_PATH': 'Custom config file path',
        'HTTP_PROXY': 'HTTP proxy server',
        'HTTPS_PROXY': 'HTTPS proxy server'
    }
    
    print("Environment Variables Check")
    print("=" * 40)
    
    for var_name, description in env_vars.items():
        value = os.environ.get(var_name)
        
        if value:
            if 'API_KEY' in var_name:
                # Mask API key for security
                masked_value = f"{value[:8]}...{value[-4:]}" if len(value) > 12 else "****"
                print(f"✓ {var_name}: {masked_value} ({description})")
            else:
                print(f"✓ {var_name}: {value} ({description})")
        else:
            print(f"✗ {var_name}: NOT SET ({description})")
    
    # Check shell environment
    shell = os.environ.get('SHELL', 'unknown')
    print(f"\nCurrent shell: {shell}")
    
    return {var: os.environ.get(var) for var in env_vars.keys()}

# Usage
env_status = check_environment_variables()
  1. Set environment variables correctly:
# Linux/macOS - Temporary (current session only)
export PL_API_KEY="PLAKyour_api_key_here"
export PLANETSCOPE_LOG_LEVEL="INFO"

# Linux/macOS - Permanent (add to shell profile)
echo 'export PL_API_KEY="PLAKyour_api_key_here"' >> ~/.bashrc
echo 'export PLANETSCOPE_LOG_LEVEL="INFO"' >> ~/.bashrc
source ~/.bashrc

# Verify
echo $PL_API_KEY
REM Windows Command Prompt - Temporary
set PL_API_KEY=PLAKyour_api_key_here
set PLANETSCOPE_LOG_LEVEL=INFO

REM Windows Command Prompt - Permanent (run as Administrator)
setx PL_API_KEY "PLAKyour_api_key_here"
setx PLANETSCOPE_LOG_LEVEL "INFO"

REM Verify
echo %PL_API_KEY%
# Windows PowerShell - Temporary
$env:PL_API_KEY="PLAKyour_api_key_here"
$env:PLANETSCOPE_LOG_LEVEL="INFO"

# Windows PowerShell - Permanent
[Environment]::SetEnvironmentVariable("PL_API_KEY", "PLAKyour_api_key_here", "User")
[Environment]::SetEnvironmentVariable("PLANETSCOPE_LOG_LEVEL", "INFO", "User")

# Verify
$env:PL_API_KEY
  1. Debug environment variable loading:
def debug_environment_loading():
    """Debug how planetscope-py loads environment variables"""
    
    import os
    from planetscope_py import PlanetAuth, PlanetScopeConfig
    
    print("Environment Variable Loading Debug")
    print("=" * 45)
    
    # Check raw environment
    api_key_env = os.environ.get('PL_API_KEY')
    print(f"Raw PL_API_KEY from environment: {'SET' if api_key_env else 'NOT SET'}")
    
    if api_key_env:
        print(f"Length: {len(api_key_env)}")
        print(f"First 8 chars: {api_key_env[:8]}")
        print(f"Last 4 chars: {api_key_env[-4:]}")
    
    # Test PlanetAuth loading
    try:
        auth = PlanetAuth()
        print(f"PlanetAuth detected API key: {'YES' if auth.api_key else 'NO'}")
        
        if auth.api_key:
            print(f"Source: environment variable")
        else:
            print("Will try config file next...")
            
    except Exception as e:
        print(f"PlanetAuth error: {e}")
    
    # Test config loading
    try:
        config = PlanetScopeConfig()
        print(f"PlanetScopeConfig loaded: YES")
        print(f"Base URL: {config.base_url}")
    except Exception as e:
        print(f"PlanetScopeConfig error: {e}")

# Usage
debug_environment_loading()

Development Issues

Problem: Tests failing

Error:

FAILED tests/test_query.py::test_search_scenes - AssertionError: Expected 200, got 401

Solutions:

  1. Run tests with verbose output:
# Basic test run
python -m pytest tests/ -v

# Extra verbose with full output
python -m pytest tests/ -vvv --tb=long

# Show local variables in tracebacks
python -m pytest tests/ -v --tb=long --showlocals

# Run specific test file
python -m pytest tests/test_query.py -v

# Run specific test function
python -m pytest tests/test_query.py::test_search_scenes -v
  1. Check test environment setup:
# Ensure you're in the right directory
pwd  # Should be in planetscope-py root

# Check Python environment
which python
python --version

# Install test dependencies
pip install -r requirements-dev.txt

# Install package in development mode
pip install -e .
  1. Run tests with different markers:
# Skip network-dependent tests
python -m pytest tests/ -v -m "not network"

# Run only unit tests (no integration tests)
python -m pytest tests/ -v -m "unit"

# Run only fast tests
python -m pytest tests/ -v -m "not slow"

# Run with coverage report
python -m pytest tests/ -v --cov=planetscope_py --cov-report=html
  1. Debug specific test failures:
# Run single failing test with maximum verbosity
python -m pytest tests/test_query.py::test_search_scenes -vvv --tb=long

# Drop into debugger on failure
python -m pytest tests/test_query.py::test_search_scenes --pdb

# Stop on first failure
python -m pytest tests/ -x

# Show stdout/stderr during tests
python -m pytest tests/ -s
  1. Handle authentication in tests:
# For tests that require API key
import os
import pytest

def test_api_function():
    api_key = os.environ.get('PL_API_KEY')
    if not api_key:
        pytest.skip("PL_API_KEY not set - skipping API test")
    
    # Your test code here
    from planetscope_py import PlanetScopeQuery
    query = PlanetScopeQuery(api_key=api_key)
    # ... rest of test
  1. Run tests in clean environment:
# Create clean test environment
python -m venv test_env
source test_env/bin/activate  # Linux/macOS
test_env\Scripts\activate     # Windows

# Install package and dependencies
pip install -e .
pip install -r requirements-dev.txt

# Run tests
python -m pytest tests/ -v

Problem: Import errors in development

Error:

ModuleNotFoundError: No module named 'planetscope_py'

Solutions:

  1. Install in development mode:
# From project root directory
pip install -e .

# Verify installation
python -c "import planetscope_py; print(f'Version: {planetscope_py.__version__}')"
  1. Check PYTHONPATH:
# Add current directory to PYTHONPATH
export PYTHONPATH="${PYTHONPATH}:$(pwd)"

# Verify
python -c "import sys; print('\n'.join(sys.path))"

# Test import
python -c "import planetscope_py; print('Import successful')"
  1. Verify package structure:
# Check directory structure
ls -la planetscope_py/

# Should contain:
# __init__.py
# auth.py
# config.py
# query.py
# metadata.py
# etc.

# Check __init__.py imports
cat planetscope_py/__init__.py
  1. Debug import issues:
import sys
import os

def debug_import_issues():
    """Debug import path and module loading issues"""
    
    print("Import Debug Information")
    print("=" * 30)
    
    print(f"Current working directory: {os.getcwd()}")
    print(f"Python executable: {sys.executable}")
    print(f"Python version: {sys.version}")
    
    print("\nPython path:")
    for i, path in enumerate(sys.path):
        print(f"  {i}: {path}")
    
    # Check if planetscope_py directory exists
    import pathlib
    current_dir = pathlib.Path.cwd()
    package_dir = current_dir / "planetscope_py"
    
    print(f"\nPackage directory check:")
    print(f"  Looking for: {package_dir}")
    print(f"  Exists: {package_dir.exists()}")
    
    if package_dir.exists():
        print(f"  Contents: {list(package_dir.iterdir())}")
        
        init_file = package_dir / "__init__.py"
        print(f"  __init__.py exists: {init_file.exists()}")
    
    # Try import
    try:
        import planetscope_py
        print(f"\n✓ Import successful")
        print(f"  Module file: {planetscope_py.__file__}")
        print(f"  Version: {getattr(planetscope_py, '__version__', 'unknown')}")
    except ImportError as e:
        print(f"\n✗ Import failed: {e}")
        
        # Suggest solutions
        print("\nSuggestions:")
        print("  1. Run 'pip install -e .' from project root")
        print("  2. Add project root to PYTHONPATH")
        print("  3. Check __init__.py exists in planetscope_py/")

# Usage
debug_import_issues()

Problem: Type checking errors

Error:

mypy: error: Cannot find implementation or library stub for module 'planetscope_py'

Solutions:

  1. Install type stubs:
# Install common type stubs
pip install types-requests types-setuptools types-PyYAML

# For specific packages
pip install types-beautifulsoup4 types-Pillow
  1. Configure mypy:
# Create mypy.ini or add to pyproject.toml
[mypy]
python_version = 3.10
warn_return_any = True
warn_unused_configs = True
disallow_untyped_defs = True

# Ignore missing imports for third-party packages without stubs
[mypy-shapely.*]
ignore_missing_imports = True

[mypy-pyproj.*]
ignore_missing_imports = True

[mypy-pandas.*]
ignore_missing_imports = True
  1. Run mypy with options:
# Ignore missing imports
mypy planetscope_py/ --ignore-missing-imports

# Check specific file
mypy planetscope_py/auth.py --ignore-missing-imports

# Generate report
mypy planetscope_py/ --html-report mypy_report/
  1. Add type annotations gradually:
# Example of adding type hints
from typing import Optional, Dict, List, Union
from datetime import datetime

def example_function(
    geometry: Dict, 
    start_date: Union[str, datetime],
    api_key: Optional[str] = None
) -> Dict:
    """Function with proper type annotations"""
    # Implementation here
    return {}

Advanced Troubleshooting

Debug Logging

Enable comprehensive logging to understand what's happening:

import logging
import sys

def setup_debug_logging():
    """Setup comprehensive debug logging for planetscope-py"""
    
    # Configure root logger
    logging.basicConfig(
        level=logging.DEBUG,
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
        handlers=[
            logging.StreamHandler(sys.stdout),
            logging.FileHandler('planetscope_debug.log')
        ]
    )
    
    # Enable specific loggers
    loggers = [
        'planetscope_py',
        'planetscope_py.auth',
        'planetscope_py.query', 
        'planetscope_py.metadata',
        'planetscope_py.rate_limiter',
        'requests.packages.urllib3'
    ]
    
    for logger_name in loggers:
        logger = logging.getLogger(logger_name)
        logger.setLevel(logging.DEBUG)
        print(f"Enabled debug logging for: {logger_name}")
    
    print("Debug logging enabled. Check 'planetscope_debug.log' for detailed logs.")

# Usage
setup_debug_logging()

# Now run your planetscope-py operations
from planetscope_py import PlanetScopeQuery
query = PlanetScopeQuery()  # Will show detailed debug output

System Requirements Check

import sys
import platform
import subprocess

def comprehensive_system_check():
    """Comprehensive system requirements check"""
    
    print("Comprehensive System Check")
    print("=" * 50)
    
    # Python version
    python_version = sys.version_info
    print(f"Python: {python_version.major}.{python_version.minor}.{python_version.micro}")
    
    if python_version < (3, 10):
        print("❌ ERROR: Python 3.10+ required")
        return False
    else:
        print("✅ Python version OK")
    
    # Platform information
    print(f"Platform: {platform.system()} {platform.release()}")
    print(f"Architecture: {platform.machine()}")
    print(f"Processor: {platform.processor()}")
    
    # Memory check
    try:
        import psutil
        memory = psutil.virtual_memory()
        memory_gb = memory.total / (1024**3)
        available_gb = memory.available / (1024**3)
        
        print(f"Memory: {memory_gb:.1f} GB total, {available_gb:.1f} GB available")
        
        if memory_gb < 4:
            print("⚠️ WARNING: Less than 4GB RAM - may impact performance")
        else:
            print("✅ Memory OK")
            
    except ImportError:
        print("ℹ️ psutil not available - cannot check memory")
    
    # Disk space
    try:
        import shutil
        disk_usage = shutil.disk_usage('/')
        free_gb = disk_usage.free / (1024**3)
        
        print(f"Disk space: {free_gb:.1f} GB free")
        
        if free_gb < 5:
            print("⚠️ WARNING: Less than 5GB free disk space")
        else:
            print("✅ Disk space OK")
            
    except Exception as e:
        print(f"ℹ️ Cannot check disk space: {e}")
    
    # Network connectivity
    print("\nNetwork Connectivity:")
    
    try:
        import socket
        socket.create_connection(("api.planet.com", 443), timeout=10)
        print("✅ Can reach api.planet.com:443")
    except Exception as e:
        print(f"❌ Cannot reach api.planet.com:443 - {e}")
    
    try:
        import requests
        response = requests.get("https://api.planet.com/data/v1/", timeout=10)
        print(f"✅ Planet API responds: HTTP {response.status_code}")
    except Exception as e:
        print(f"❌ Planet API error: {e}")
    
    # Git version (for development)
    try:
        git_version = subprocess.check_output(['git', '--version'], 
                                            stderr=subprocess.DEVNULL).decode().strip()
        print(f"Git: {git_version}")
    except:
        print("Git: Not available")
    
    # Package versions
    print("\nKey Package Versions:")
    packages = ['requests', 'shapely', 'pyproj', 'numpy', 'pandas']
    
    for package in packages:
        try:
            module = __import__(package)
            version = getattr(module, '__version__', 'unknown')
            print(f"  {package}: {version}")
        except ImportError:
            print(f"  {package}: NOT INSTALLED")
    
    return True

# Usage
system_ok = comprehensive_system_check()

Performance Profiling

import cProfile
import pstats
import io
import time

class PerformanceProfiler:
    """Profile performance of planetscope-py operations"""
    
    def __init__(self):
        self.profiler = cProfile.Profile()
        
    def start_profiling(self):
        """Start performance profiling"""
        self.profiler.enable()
        self.start_time = time.time()
        
    def stop_profiling(self):
        """Stop profiling and return results"""
        self.profiler.disable()
        self.end_time = time.time()
        return self.end_time - self.start_time
    
    def get_stats(self, sort_by='cumulative', limit=20):
        """Get profiling statistics"""
        s = io.StringIO()
        ps = pstats.Stats(self.profiler, stream=s)
        ps.sort_stats(sort_by)
        ps.print_stats(limit)
        return s.getvalue()
    
    def profile_operation(self, operation_func, *args, **kwargs):
        """Profile a specific operation"""
        self.start_profiling()
        
        try:
            result = operation_func(*args, **kwargs)
            success = True
            error = None
        except Exception as e:
            result = None
            success = False
            error = str(e)
        
        duration = self.stop_profiling()
        
        return {
            'result': result,
            'success': success,
            'error': error,
            'duration': duration,
            'stats': self.get_stats()
        }

# Usage
profiler = PerformanceProfiler()

def test_search():
    from planetscope_py import PlanetScopeQuery
    query = PlanetScopeQuery()
    
    geometry = {"type": "Point", "coordinates": [9.19, 45.46]}
    return query.search_scenes(geometry, "2025-01-01", "2025-01-31")

# Profile the search
profile_result = profiler.profile_operation(test_search)

print(f"Operation took: {profile_result['duration']:.2f} seconds")
print(f"Success: {profile_result['success']}")

if profile_result['success']:
    print("Performance breakdown:")
    print(profile_result['stats'])
else:
    print(f"Error: {profile_result['error']}")

Network Diagnostics

import socket
import time
import requests
from urllib.parse import urlparse

def network_diagnostics(url="https://api.planet.com/data/v1/"):
    """Comprehensive network diagnostics for Planet API"""
    
    print("Network Diagnostics")
    print("=" * 30)
    
    parsed_url = urlparse(url)
    hostname = parsed_url.hostname
    port = parsed_url.port or (443 if parsed_url.scheme == 'https' else 80)
    
    print(f"Target: {hostname}:{port}")
    
    # DNS resolution
    try:
        start_time = time.time()
        ip_address = socket.gethostbyname(hostname)
        dns_time = (time.time() - start_time) * 1000
        
        print(f"✅ DNS Resolution: {ip_address} ({dns_time:.1f}ms)")
    except Exception as e:
        print(f"❌ DNS Resolution failed: {e}")
        return False
    
    # TCP connection
    try:
        start_time = time.time()
        sock = socket.create_connection((hostname, port), timeout=10)
        tcp_time = (time.time() - start_time) * 1000
        sock.close()
        
        print(f"✅ TCP Connection: {tcp_time:.1f}ms")
    except Exception as e:
        print(f"❌ TCP Connection failed: {e}")
        return False
    
    # SSL handshake (for HTTPS)
    if parsed_url.scheme == 'https':
        try:
            import ssl
            context = ssl.create_default_context()
            
            start_time = time.time()
            with socket.create_connection((hostname, port), timeout=10) as sock:
                with context.wrap_socket(sock, server_hostname=hostname) as ssock:
                    ssl_time = (time.time() - start_time) * 1000
                    cert = ssock.getpeercert()
            
            print(f"✅ SSL Handshake: {ssl_time:.1f}ms")
            print(f"   Certificate: {cert['subject'][0][0][1]}")
            
        except Exception as e:
            print(f"❌ SSL Handshake failed: {e}")
            return False
    
    # HTTP request
    try:
        start_time = time.time()
        response = requests.get(url, timeout=30)
        http_time = (time.time() - start_time) * 1000
        
        print(f"✅ HTTP Request: {response.status_code} ({http_time:.1f}ms)")
        print(f"   Response size: {len(response.content)} bytes")
        
        # Check response headers
        relevant_headers = ['server', 'content-type', 'x-ratelimit-limit']
        for header in relevant_headers:
            if header in response.headers:
                print(f"   {header}: {response.headers[header]}")
                
    except Exception as e:
        print(f"❌ HTTP Request failed: {e}")
        return False
    
    print("✅ All network tests passed")
    return True

# Usage
network_ok = network_diagnostics()

Emergency Recovery Procedures

Complete Library Reset

If you're experiencing persistent issues across multiple components:

#!/bin/bash
# Complete planetscope-py reset script

echo "Starting planetscope-py emergency reset..."

# 1. Complete uninstall
echo "Step 1: Uninstalling planetscope-py..."
pip uninstall planetscope-py -y
pip cache purge

# 2. Clean Python cache
echo "Step 2: Cleaning Python cache..."
find . -type d -name "__pycache__" -exec rm -rf {} + 2>/dev/null
find . -name "*.pyc" -delete 2>/dev/null

# 3. Clear any conda packages (if using conda)
if command -v conda &> /dev/null; then
    echo "Step 3: Cleaning conda cache..."
    conda clean --all -y
fi

# 4. Reset virtual environment (optional)
read -p "Do you want to recreate virtual environment? (y/n): " -n 1 -r
echo
if [ $REPLY =~ ^[Yy]$ ](/Black-Lights/planetscope-py/wiki/-$REPLY-=~-^[Yy]$-); then
    echo "Step 4: Recreating virtual environment..."
    deactivate 2>/dev/null || true
    rm -rf venv
    python -m venv venv
    source venv/bin/activate
fi

# 5. Fresh install
echo "Step 5: Fresh installation..."
pip install --upgrade pip setuptools wheel
pip install planetscope-py

# 6. Verify installation
echo "Step 6: Verifying installation..."
python -c "import planetscope_py; print(f'✅ planetscope-py v{planetscope_py.__version__} installed successfully')" || echo "❌ Installation verification failed"

echo "Emergency reset complete!"

Reset All Configuration

import os
import shutil
from pathlib import Path

def reset_all_configuration():
    """Reset all planetscope-py configuration to defaults"""
    
    print("Resetting all planetscope-py configuration...")
    
    # Clear environment variables
    env_vars_to_clear = [
        'PL_API_KEY',
        'PLANETSCOPE_LOG_LEVEL', 
        'PLANETSCOPE_CONFIG_PATH',
        'PLANETSCOPE_CACHE_DIR'
    ]
    
    for var in env_vars_to_clear:
        if var in os.environ:
            del os.environ[var]
            print(f"Cleared environment variable: {var}")
    
    # Remove config files
    config_files = [
        Path.home() / '.planet.json',
        Path.home() / '.planetscope.json',
        Path.home() / '.config' / 'planetscope.json'
    ]
    
    for config_file in config_files:
        if config_file.exists():
            config_file.unlink()
            print(f"Removed config file: {config_file}")
    
    # Remove cache directories
    cache_dirs = [
        Path.home() / '.planetscope_cache',
        Path.home() / '.cache' / 'planetscope',
        Path('/tmp') / 'planetscope_cache'
    ]
    
    for cache_dir in cache_dirs:
        if cache_dir.exists():
            shutil.rmtree(cache_dir)
            print(f"Removed cache directory: {cache_dir}")
    
    print("Configuration reset complete!")
    print("\nNext steps:")
    print("1. Set PL_API_KEY environment variable or create ~/.planet.json")
    print("2. Test basic functionality with planetscope-py")
    
    return True

# Usage
reset_all_configuration()

Manual Recovery Steps

If automated reset fails, follow these manual steps:

# 1. Stop all Python processes
pkill -f python

# 2. Remove package completely
pip freeze | grep planetscope | xargs pip uninstall -y

# 3. Clear Python cache manually
find ~/.cache -name "*planetscope*" -type d -exec rm -rf {} +
find /tmp -name "*planetscope*" -type d -exec rm -rf {} +

# 4. Remove config files
rm -f ~/.planet.json ~/.planetscope.json
rm -rf ~/.config/planetscope

# 5. Clear environment
unset PL_API_KEY PLANETSCOPE_LOG_LEVEL PLANETSCOPE_CONFIG_PATH

# 6. Fresh Python environment
python -m venv fresh_planetscope_env
source fresh_planetscope_env/bin/activate
pip install --upgrade pip
pip install planetscope-py

Common Error Solutions Quick Reference

Error Code Lookup

Error Code Problem Quick Fix
ImportError Module not found pip install planetscope-py
AuthenticationError No API key Set PL_API_KEY environment variable
ValidationError Invalid geometry Check polygon closure and coordinates
RateLimitError Too many requests Wait and retry with backoff
ConnectionError Network issues Check internet connection and firewall
SSLError Certificate problems Update certificates and system time
TimeoutError Request timeout Increase timeout values in config
CircuitBreakerOpen Too many failures Wait for automatic recovery

Platform-Specific Issues

Windows

Problem: SSL certificate verification fails

# Update certificates
pip install --upgrade certifi

# Set certificate bundle location
set SSL_CERT_FILE=C:\Path\To\Python\Lib\site-packages\certifi\cacert.pem

Problem: Shapely installation fails

# Use conda instead of pip
conda install -c conda-forge shapely pyproj geopandas
pip install planetscope-py --no-deps

macOS

Problem: Permission denied errors

# Fix ownership of Python installation
sudo chown -R $(whoami) /usr/local/lib/python3.*/site-packages/

# Use Homebrew Python
brew install [email protected]
/usr/local/opt/[email protected]/bin/pip3 install planetscope-py

Problem: SSL certificate verification fails

# Run certificate update command
/Applications/Python\ 3.*/Install\ Certificates.command

Linux

Problem: Missing system dependencies

# Ubuntu/Debian
sudo apt-get update
sudo apt-get install python3-dev libgeos-dev libproj-dev build-essential

# CentOS/RHEL
sudo yum groupinstall "Development Tools"
sudo yum install geos-devel proj-devel

Problem: Permission issues with user installation

# Fix user directory permissions
chmod 755 ~/.local
chmod -R 755 ~/.local/lib/python*/site-packages/
pip install --user --force-reinstall planetscope-py

Performance Optimization

Memory Usage Optimization

For large-scale operations, implement these memory-saving strategies:

def memory_efficient_batch_processing(query, geometries, batch_size=10):
    """Process large geometry collections with minimal memory usage"""
    
    import gc
    from contextlib import contextmanager
    
    @contextmanager
    def memory_monitor():
        """Context manager to monitor memory usage"""
        try:
            import psutil
            process = psutil.Process()
            start_memory = process.memory_info().rss / 1024 / 1024
            yield
        finally:
            if 'process' in locals():
                end_memory = process.memory_info().rss / 1024 / 1024
                print(f"Memory change: {end_memory - start_memory:.1f} MB")
            gc.collect()
    
    results = []
    total_geometries = len(geometries)
    
    for i in range(0, total_geometries, batch_size):
        batch = geometries[i:i+batch_size]
        batch_num = i // batch_size + 1
        total_batches = (total_geometries + batch_size - 1) // batch_size
        
        print(f"Processing batch {batch_num}/{total_batches}")
        
        with memory_monitor():
            for j, geometry in enumerate(batch):
                try:
                    result = query.search_scenes(
                        geometry=geometry,
                        start_date="2025-01-01",
                        end_date="2025-01-31",
                        cloud_cover_max=0.3
                    )
                    
                    # Extract only essential data to reduce memory
                    essential_result = {
                        'geometry_index': i + j,
                        'scene_count': len(result['features']),
                        'total_area_km2': sum(
                            scene['properties'].get('pixel_resolution', 3) ** 2 * 
                            scene['properties'].get('pixel_count', 0) / 1e6
                            for scene in result['features']
                        )
                    }
                    
                    results.append(essential_result)
                    
                    # Clear large objects immediately
                    del result
                    
                except Exception as e:
                    print(f"Error processing geometry {i + j}: {e}")
                    results.append({
                        'geometry_index': i + j,
                        'error': str(e)
                    })
        
        # Force garbage collection between batches
        gc.collect()
    
    return results

Network Performance Optimization

class OptimizedPlanetQuery:
    """Optimized query class with connection pooling and caching"""
    
    def __init__(self, api_key=None):
        import requests
        from requests.adapters import HTTPAdapter
        from urllib3.util.retry import Retry
        
        self.session = requests.Session()
        
        # Configure connection pooling
        retry_strategy = Retry(
            total=3,
            status_forcelist=[429, 500, 502, 503, 504],
            method_whitelist=["HEAD", "GET", "OPTIONS"],
            backoff_factor=1
        )
        
        adapter = HTTPAdapter(
            pool_connections=10,
            pool_maxsize=10,
            max_retries=retry_strategy,
            pool_block=False
        )
        
        self.session.mount("http://", adapter)
        self.session.mount("https://", adapter)
        
        # Set optimized headers
        self.session.headers.update({
            'User-Agent': 'planetscope-py/1.0',
            'Accept-Encoding': 'gzip, deflate',
            'Connection': 'keep-alive'
        })
        
        if api_key:
            self.session.auth = (api_key, '')
    
    def __enter__(self):
        return self
    
    def __exit__(self, exc_type, exc_val, exc_tb):
        self.session.close()

# Usage
with OptimizedPlanetQuery(api_key="your_key") as optimized_query:
    # Perform multiple requests with connection reuse
    for geometry in geometries:
        result = optimized_query.search_scenes(geometry, "2025-01-01", "2025-01-31")

Troubleshooting Workflow

Step-by-Step Diagnostic Process

When encountering issues, follow this systematic approach:

  1. Initial Assessment

    # Run the health check script from earlier in this guide
    python health_check.py
    
  2. Identify Error Category

  3. Collect Debug Information

    # Enable debug logging
    import logging
    logging.basicConfig(level=logging.DEBUG)
    
    # Run problematic operation
    # Review logs for specific error details
    
  4. Try Progressive Solutions

    • Start with simplest solution (restart, reinstall)
    • Progress to more complex fixes (environment reset)
    • Use emergency recovery as last resort
  5. Verify Fix

    # Test basic functionality
    from planetscope_py import PlanetScopeQuery
    query = PlanetScopeQuery()
    
    test_geometry = {"type": "Point", "coordinates": [9.19, 45.46]}
    results = query.search_scenes(test_geometry, "2025-01-01", "2025-01-31")
    print(f"Test successful: Found {len(results['features'])} scenes")
    

Getting Help

Before Seeking Help

  1. Check this troubleshooting guide for your specific error
  2. Search existing issues on GitHub
  3. Run the diagnostic script and collect the output
  4. Try the progressive solutions listed for your error type

When Reporting Issues

Include the following information:

  1. Environment Details

    # Collect system information
    python -c "
    import sys, platform
    print(f'Python: {sys.version}')
    print(f'Platform: {platform.platform()}')
    print(f'Architecture: {platform.architecture()}')
    
    import planetscope_py
    print(f'planetscope-py: {planetscope_py.__version__}')
    "
    
  2. Error Details

    • Complete error message and traceback
    • Steps to reproduce the error
    • Expected vs actual behavior
  3. Configuration

    • Authentication method used
    • Any custom configuration
    • Network environment (proxy, firewall, etc.)

Community Resources

Professional Support

For enterprise users or complex deployment scenarios:

  • Email: [email protected]
  • Priority Support: Available for commercial users
  • Custom Integration: Development services available

Maintenance and Updates

Keeping planetscope-py Updated

# Check for updates
pip list --outdated | grep planetscope

# Update to latest version
pip install --upgrade planetscope-py

# Update with dependencies
pip install --upgrade planetscope-py --upgrade-strategy eager

Monitoring for Issues

Set up monitoring for production deployments:

def setup_production_monitoring():
    """Setup monitoring for production planetscope-py usage"""
    
    import logging
    import logging.handlers
    
    # Setup rotating log files
    handler = logging.handlers.RotatingFileHandler(
        'planetscope_production.log',
        maxBytes=10*1024*1024,  # 10MB
        backupCount=5
    )
    
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    )
    handler.setFormatter(formatter)
    
    # Configure planetscope-py loggers
    logger = logging.getLogger('planetscope_py')
    logger.addHandler(handler)
    logger.setLevel(logging.INFO)
    
    # Log system information periodically
    import psutil
    import time
    
    def log_system_status():
        memory = psutil.virtual_memory()
        logger.info(f"System status - Memory: {memory.percent}% used")
        
    # Schedule periodic status logging
    # (Implementation depends on your scheduling system)

# Usage in production
setup_production_monitoring()

Breaking Changes and Migration

When updating between major versions, check the migration guide:

def check_version_compatibility():
    """Check for breaking changes between versions"""
    
    import planetscope_py
    current_version = planetscope_py.__version__
    
    print(f"Current version: {current_version}")
    
    # Version-specific compatibility checks
    major_version = int(current_version.split('.')[0])
    
    if major_version >= 2:
        print("✅ Compatible with latest API features")
    else:
        print("⚠️ Consider upgrading for latest features")
        print("   Check migration guide for breaking changes")

# Usage
check_version_compatibility()

This completes the comprehensive troubleshooting guide for planetscope-py. The guide covers all major error categories, provides systematic diagnostic procedures, and includes recovery mechanisms for various failure scenarios.

For the most up-to-date troubleshooting information, always check the project's GitHub repository and documentation wiki.