Performance Optimization - jvPalma/dotrun GitHub Wiki
Performance Optimization
Skill Level: Advanced
Optimize DotRun scripts and collections for better performance and user experience.
Script Performance
Caching Strategies
Implement intelligent caching to avoid expensive operations:
#!/usr/bin/env bash
### DOC
# Script with intelligent caching
### DOC
set -euo pipefail
CACHE_DIR="$DR_CONFIG/cache"
CACHE_TTL=3600 # 1 hour
cache_key() {
local input="$1"
echo -n "$input" | sha256sum | cut -d' ' -f1
}
is_cache_valid() {
local cache_file="$1"
local ttl="$2"
if [ ! -f "$cache_file" ](/jvPalma/dotrun/wiki/-!--f-"$cache_file"-); then
return 1
fi
local file_age
file_age=$(($(date +%s) - $(stat -c %Y "$cache_file")))
[ $file_age -lt $ttl ](/jvPalma/dotrun/wiki/-$file_age--lt-$ttl-)
}
cached_execution() {
local cache_key="$1"
local command="$2"
local ttl="${3:-$CACHE_TTL}"
local cache_file="$CACHE_DIR/$cache_key"
mkdir -p "$CACHE_DIR"
if is_cache_valid "$cache_file" "$ttl"; then
echo "📋 Using cached result..."
cat "$cache_file"
return 0
fi
echo "🔄 Executing and caching result..."
eval "$command" | tee "$cache_file"
}
main() {
local operation="$1"
case "$operation" in
expensive-query)
local query_params="$2"
local key
key=$(cache_key "$operation:$query_params")
cached_execution "$key" "dr database/expensive-query '$query_params'" 1800
;;
build-dependencies)
local key
key=$(cache_key "$operation:$(cat package.json | sha256sum)")
cached_execution "$key" "npm install" 86400
;;
*)
echo "Usage: dr cached <expensive-query|build-dependencies> [params]"
exit 1
;;
esac
}
main "$@"
Parallel Execution
Execute independent tasks in parallel:
#!/usr/bin/env bash
### DOC
# Parallel task execution with job control
### DOC
set -euo pipefail
declare -a PIDS=()
declare -a TASKS=()
MAX_PARALLEL=${MAX_PARALLEL:-4}
add_task() {
local task_name="$1"
local task_command="$2"
TASKS+=("$task_name:$task_command")
}
wait_for_slot() {
while [ ${#PIDS[@]} -ge $MAX_PARALLEL ](/jvPalma/dotrun/wiki/-${#PIDS[@]}--ge-$MAX_PARALLEL-); do
for i in "${!PIDS[@]}"; do
if ! kill -0 "${PIDS[$i]}" 2>/dev/null; then
wait "${PIDS[$i]}"
local exit_code=$?
if [ $exit_code -ne 0 ](/jvPalma/dotrun/wiki/-$exit_code--ne-0-); then
echo "❌ Task failed with exit code $exit_code"
fi
unset PIDS[$i]
fi
done
PIDS=("${PIDS[@]}") # Reindex array
sleep 0.1
done
}
run_task() {
local task_info="$1"
local task_name="${task_info%%:*}"
local task_command="${task_info#*:}"
echo "🚀 Starting: $task_name"
{
eval "$task_command"
echo "✅ Completed: $task_name"
} &
PIDS+=($!)
}
main() {
# Add tasks
add_task "Frontend Build" "dr frontend/build"
add_task "Backend Build" "dr backend/build"
add_task "Database Migration" "dr database/migrate"
add_task "Asset Processing" "dr assets/process"
add_task "Documentation" "dr docs/generate"
echo "📋 Running ${#TASKS[@]} tasks with max $MAX_PARALLEL parallel..."
# Execute tasks
for task in "${TASKS[@]}"; do
wait_for_slot
run_task "$task"
done
# Wait for remaining tasks
for pid in "${PIDS[@]}"; do
wait "$pid"
done
echo "✅ All tasks completed"
}
main "$@"
Resource Management
Adaptive Resource Usage
Adjust script behavior based on available system resources:
#!/usr/bin/env bash
### DOC
# Resource-aware script execution
### DOC
set -euo pipefail
check_system_resources() {
local min_memory_gb="$1"
local min_disk_gb="$2"
local min_cpu_cores="$3"
# Check available memory
local available_memory_gb
available_memory_gb=$(free -g | awk '/^Mem:/{print $7}')
if [ $available_memory_gb -lt $min_memory_gb ](/jvPalma/dotrun/wiki/-$available_memory_gb--lt-$min_memory_gb-); then
echo "❌ Insufficient memory: ${available_memory_gb}GB available, ${min_memory_gb}GB required"
return 1
fi
# Check available disk space
local available_disk_gb
available_disk_gb=$(df -BG . | awk 'NR==2{print $4}' | sed 's/G//')
if [ $available_disk_gb -lt $min_disk_gb ](/jvPalma/dotrun/wiki/-$available_disk_gb--lt-$min_disk_gb-); then
echo "❌ Insufficient disk space: ${available_disk_gb}GB available, ${min_disk_gb}GB required"
return 1
fi
# Check CPU cores
local available_cores
available_cores=$(nproc)
if [ $available_cores -lt $min_cpu_cores ](/jvPalma/dotrun/wiki/-$available_cores--lt-$min_cpu_cores-); then
echo "❌ Insufficient CPU cores: ${available_cores} available, ${min_cpu_cores} required"
return 1
fi
echo "✅ System resources sufficient"
return 0
}
adaptive_parallelism() {
local task_type="$1"
local available_cores
available_cores=$(nproc)
local available_memory_gb
available_memory_gb=$(free -g | awk '/^Mem:/{print $7}')
case "$task_type" in
cpu-intensive)
echo $((available_cores - 1)) # Leave one core free
;;
memory-intensive)
echo $((available_memory_gb / 2)) # One task per 2GB
;;
io-intensive)
echo $((available_cores * 2)) # I/O can handle more parallelism
;;
*)
echo "$available_cores"
;;
esac
}
main() {
local operation="$1"
case "$operation" in
resource-check)
check_system_resources "${2:-4}" "${3:-10}" "${4:-2}"
;;
adaptive-build)
local parallelism
parallelism=$(adaptive_parallelism "cpu-intensive")
echo "🏗️ Building with $parallelism parallel jobs..."
make -j"$parallelism"
;;
*)
echo "Usage: dr resources <resource-check|adaptive-build> [args]"
exit 1
;;
esac
}
main "$@"
Collection Performance
Optimizing Collection Structure
- Logical organization: Group related scripts together
- Minimal dependencies: Reduce external tool requirements
- Efficient completion: Optimize tab completion performance
- Regular cleanup: Remove unused or deprecated scripts
Collection Caching
Implement collection-level caching for faster operations:
#!/usr/bin/env bash
### DOC
# Collection update with intelligent caching
### DOC
set -euo pipefail
COLLECTION_CACHE="$DR_CONFIG/cache/collections"
should_update_collection() {
local collection_name="$1"
local cache_file="$COLLECTION_CACHE/$collection_name.last_update"
local update_interval="${COLLECTION_UPDATE_INTERVAL:-86400}" # 24 hours
if [ ! -f "$cache_file" ](/jvPalma/dotrun/wiki/-!--f-"$cache_file"-); then
return 0 # No cache, should update
fi
local last_update
last_update=$(cat "$cache_file")
local current_time
current_time=$(date +%s)
if [ $((current_time - last_update)) -gt $update_interval ](/jvPalma/dotrun/wiki/-$((current_time---last_update))--gt-$update_interval-); then
return 0 # Cache expired, should update
fi
return 1 # Cache still valid
}
update_collection_cache() {
local collection_name="$1"
mkdir -p "$COLLECTION_CACHE"
date +%s >"$COLLECTION_CACHE/$collection_name.last_update"
}
main() {
local collection_name="$1"
if should_update_collection "$collection_name"; then
echo "🔄 Updating collection: $collection_name"
dr collections update "$collection_name"
update_collection_cache "$collection_name"
else
echo "✅ Collection $collection_name is up to date"
fi
}
main "$@"
DotRun System Performance
Startup Optimization
- Cold start: ~50ms (script discovery + resolution)
- Warm start: ~10ms (cached resolution)
- Tab completion: ~100ms (full script enumeration)
Performance Monitoring
Monitor script execution performance:
#!/usr/bin/env bash
### DOC
# Performance monitoring for scripts
### DOC
set -euo pipefail
PERF_LOG="$DR_CONFIG/performance.log"
measure_execution() {
local script_name="$1"
shift
local start_time
start_time=$(date +%s.%3N)
# Execute the script
dr "$script_name" "$@"
local exit_code=$?
local end_time
end_time=$(date +%s.%3N)
local duration
duration=$(echo "$end_time - $start_time" | bc)
# Log performance data
echo "$(date -u +%Y-%m-%dT%H:%M:%SZ),$script_name,$duration,$exit_code" >>"$PERF_LOG"
echo "⏱️ Execution time: ${duration}s"
return $exit_code
}
analyze_performance() {
if [ ! -f "$PERF_LOG" ](/jvPalma/dotrun/wiki/-!--f-"$PERF_LOG"-); then
echo "No performance data available"
return
fi
echo "📊 Performance Analysis"
echo "====================="
echo
echo "Slowest scripts:"
sort -t',' -k3 -nr "$PERF_LOG" | head -10 | while IFS=, read -r timestamp script duration exit_code; do
echo " $script: ${duration}s"
done
echo
echo "Most frequently executed:"
awk -F',' '{print $2}' "$PERF_LOG" | sort | uniq -c | sort -nr | head -10
}
main() {
case "${1:-}" in
measure)
shift
measure_execution "$@"
;;
analyze)
analyze_performance
;;
*)
echo "Usage: dr perf <measure|analyze> [script-name] [args...]"
exit 1
;;
esac
}
main "$@"
Optimization Guidelines
Script-Level Optimizations
- Minimize external commands: Use bash built-ins when possible
- Cache expensive operations: Store results for reuse
- Use efficient tools: Prefer
ripgrepovergrep,fdoverfind - Batch operations: Group related operations together
- Early exits: Return early when possible
System-Level Optimizations
- Collection management: Remove unused collections
- Script organization: Keep frequently used scripts easily accessible
- Dependency optimization: Minimize required external tools
- Regular maintenance: Clean up old cache files and logs
Team-Level Optimizations
- Collection structure: Organize for team efficiency
- Documentation quality: Reduce time spent understanding scripts
- Standardization: Consistent patterns reduce cognitive load
- Automation: Automate routine maintenance tasks
Next Steps
- Advanced Script Patterns - Complex script development techniques
- CI/CD Integration - Optimize automation pipelines
- Security Patterns - Secure performance optimization
Performance optimization should balance speed with maintainability and security. Focus on optimizing the most frequently used scripts and workflows first.