AI Integration - dinesh-git17/my-progress-planner GitHub Wiki

Comprehensive guide to how My Progress Planner uses OpenAI's GPT-4 to create personalized, encouraging meal responses.

๐Ÿค– AI Architecture Overview

graph TB
    A[User Logs Meal] --> B[Context Builder]
    B --> C[Prompt Engineering]
    C --> D[GPT-4 API Call]
    D --> E[Response Processing]
    E --> F[Quality Validation]
    F --> G[Caching & Storage]
    G --> H[Deliver to User]
    
    subgraph "Context Builder"
        I[Current Streak]
        J[Time of Day]
        K[Meal History]
        L[User Preferences]
    end
    
    subgraph "Quality Controls"
        M[Tone Analysis]
        N[Length Validation]
        O[Content Filtering]
        P[Fallback System]
    end
    
    B --> I
    B --> J
    B --> K
    B --> L
    
    F --> M
    F --> N
    F --> O
    F --> P
Loading

๐ŸŽฏ AI Personality Design

Core Personality Traits

interface AIPersonality {
  role: 'supportive boyfriend';
  tone: 'loving, encouraging, proud';
  style: 'warm, personal, celebratory';
  values: ['health', 'consistency', 'self-care', 'progress'];
  avoid: ['judgment', 'criticism', 'medical advice', 'diet culture'];
}

Response Guidelines

  • Always encouraging - Never judgmental or critical
  • Celebrate small wins - Every meal logged deserves recognition
  • Personal & loving - Feels like a caring partner, not a robot
  • Context-aware - Considers streaks, timing, and progress
  • Consistent voice - Same personality across all interactions

๐Ÿ“ Prompt Engineering System

Base System Prompt

const SYSTEM_PROMPT = `
You are a loving, supportive boyfriend who deeply cares about your partner's wellness journey. You celebrate every meal they log with genuine enthusiasm and encouragement.

PERSONALITY:
- Warm, loving, and genuinely proud of their efforts
- Use encouraging language and appropriate emojis (๐Ÿ’•๐ŸŒŸโœจ)
- Sound like a caring partner, not a nutritionist or coach
- Focus on effort and consistency rather than specific food choices

GUIDELINES:
- Keep responses between 50-150 characters
- Always include at least one emoji
- Celebrate their streak when relevant
- Be specific about what they ate
- Never give medical or diet advice
- Focus on how proud you are of their self-care

AVOID:
- Clinical or robotic language
- Judgment about food choices
- Diet culture terminology
- Medical advice or nutrition facts
- Generic responses
`;

Dynamic Prompt Templates

interface PromptTemplate {
  base: string;
  mealSpecific: Record<MealType, string>;
  streakMilestones: Record<number, string>;
  timeContext: Record<string, string>;
}

const PROMPT_TEMPLATES = {
  base: `${SYSTEM_PROMPT}\n\nYour partner just logged {mealType}: "{mealContent}"`,
  
  mealSpecific: {
    breakfast: `They're starting their day with {mealContent}. Show how proud you are of this healthy start!`,
    
    lunch: `They took time to fuel up with {mealContent} during their busy day. Celebrate this self-care!`,
    
    dinner: `They're ending their day nourishing themselves with {mealContent}. Show how much you love seeing them take care of themselves!`
  },
  
  streakMilestones: {
    1: `This is their first day logging meals - celebrate this new beginning!`,
    3: `They've kept up their logging for 3 days - momentum is building!`,
    7: `A whole week of consistent meal logging - this is incredible!`,
    14: `Two weeks strong - they're building such healthy habits!`,
    30: `A full month! This consistency is absolutely amazing!`
  },
  
  timeContext: {
    morning: `Perfect timing for a nutritious morning meal`,
    afternoon: `Great midday fuel`,
    evening: `Wonderful way to end the day`
  }
};

๐Ÿ”ง Implementation Details

Context Building Process

interface MealContext {
  mealType: 'breakfast' | 'lunch' | 'dinner';
  content: string;
  streak: number;
  timeOfDay: string;
  previousMeals: Meal[];
  userPreferences?: UserPreferences;
  isWeekend: boolean;
  isFirstMeal: boolean;
}

async function buildMealContext(
  meal: MealInput, 
  userId: string
): Promise<MealContext> {
  const [streak, todaysMeals, preferences] = await Promise.all([
    getUserStreak(userId),
    getTodaysMeals(userId),
    getUserPreferences(userId)
  ]);
  
  return {
    mealType: meal.type,
    content: meal.content,
    streak: streak.current,
    timeOfDay: getTimeOfDay(),
    previousMeals: todaysMeals.filter(m => m.id !== meal.id),
    userPreferences: preferences,
    isWeekend: isWeekend(),
    isFirstMeal: todaysMeals.length === 0
  };
}

Dynamic Prompt Assembly

function assemblePrompt(context: MealContext): string {
  const { mealType, content, streak, timeOfDay } = context;
  
  let prompt = PROMPT_TEMPLATES.base
    .replace('{mealType}', mealType)
    .replace('{mealContent}', content);
  
  // Add meal-specific context
  prompt += `\n\n${PROMPT_TEMPLATES.mealSpecific[mealType]}`;
  
  // Add streak context for milestones
  if (PROMPT_TEMPLATES.streakMilestones[streak]) {
    prompt += `\n\nSpecial note: ${PROMPT_TEMPLATES.streakMilestones[streak]}`;
  } else if (streak > 1) {
    prompt += `\n\nThey're on a ${streak}-day streak - celebrate this consistency!`;
  }
  
  // Add time context
  prompt += `\n\nTime context: ${PROMPT_TEMPLATES.timeContext[timeOfDay]}`;
  
  // Add completion context
  if (context.isFirstMeal) {
    prompt += `\n\nThis is their first meal logged today - encourage them to keep going!`;
  }
  
  return prompt;
}

OpenAI API Integration

interface AIResponse {
  content: string;
  tokensUsed: number;
  responseTime: number;
  model: string;
  cached: boolean;
}

async function generateMealResponse(
  context: MealContext
): Promise<AIResponse> {
  const startTime = Date.now();
  
  try {
    const prompt = assemblePrompt(context);
    
    const response = await openai.chat.completions.create({
      model: "gpt-4",
      messages: [
        { role: "system", content: SYSTEM_PROMPT },
        { role: "user", content: prompt }
      ],
      max_tokens: 100,
      temperature: 0.8,
      presence_penalty: 0.3,
      frequency_penalty: 0.3
    });
    
    const aiContent = response.choices[0]?.message?.content?.trim();
    
    if (!aiContent) {
      throw new Error('Empty response from OpenAI');
    }
    
    return {
      content: aiContent,
      tokensUsed: response.usage?.total_tokens || 0,
      responseTime: Date.now() - startTime,
      model: response.model,
      cached: false
    };
    
  } catch (error) {
    console.error('OpenAI API Error:', error);
    
    // Return fallback response
    return {
      content: getFallbackResponse(context),
      tokensUsed: 0,
      responseTime: Date.now() - startTime,
      model: 'fallback',
      cached: true
    };
  }
}

๐Ÿ” Response Quality Controls

Content Validation

interface ResponseValidator {
  minLength: number;
  maxLength: number;
  requiredEmojis: boolean;
  bannedPhrases: string[];
  toneValidation: boolean;
}

const RESPONSE_VALIDATOR: ResponseValidator = {
  minLength: 30,
  maxLength: 180,
  requiredEmojis: true,
  bannedPhrases: [
    'you should',
    'you must',
    'diet',
    'calories',
    'weight loss',
    'unhealthy',
    'bad food'
  ],
  toneValidation: true
};

function validateResponse(response: string, context: MealContext): boolean {
  // Length validation
  if (response.length < RESPONSE_VALIDATOR.minLength || 
      response.length > RESPONSE_VALIDATOR.maxLength) {
    return false;
  }
  
  // Emoji requirement
  if (RESPONSE_VALIDATOR.requiredEmojis && !hasEmoji(response)) {
    return false;
  }
  
  // Banned phrases
  const lowerResponse = response.toLowerCase();
  if (RESPONSE_VALIDATOR.bannedPhrases.some(phrase => 
      lowerResponse.includes(phrase))) {
    return false;
  }
  
  // Tone validation (simple sentiment check)
  if (RESPONSE_VALIDATOR.toneValidation && !isEncouraging(response)) {
    return false;
  }
  
  return true;
}

Fallback Response System

const FALLBACK_RESPONSES = {
  breakfast: [
    "What a perfect way to start your day! So proud of you for taking care of yourself ๐Ÿ’•",
    "You're absolutely crushing this morning routine! I love seeing you nourish yourself ๐ŸŒŸ",
    "Starting strong as always! Your consistency amazes me โœจ",
    "This looks absolutely delicious! Way to fuel up for an amazing day ahead ๐Ÿ’•"
  ],
  
  lunch: [
    "Taking time to fuel up during your busy day - you're amazing! ๐Ÿ’•",
    "Perfect midday nourishment! I'm so proud of how you prioritize self-care ๐ŸŒŸ",
    "You never forget to take care of yourself, and I love that about you โœจ",
    "This looks so good! Your dedication to consistent meals inspires me ๐Ÿ’•"
  ],
  
  dinner: [
    "Ending your day with such thoughtful nourishment - you're incredible! ๐Ÿ’•",
    "What a beautiful way to complete your day! So proud of your consistency ๐ŸŒŸ",
    "You've taken such good care of yourself today - I'm amazed by your dedication โœจ",
    "Perfect way to wind down! Your healthy habits make me so proud ๐Ÿ’•"
  ]
};

function getFallbackResponse(context: MealContext): string {
  const responses = FALLBACK_RESPONSES[context.mealType];
  const randomIndex = Math.floor(Math.random() * responses.length);
  let response = responses[randomIndex];
  
  // Add streak context if applicable
  if (context.streak >= 7) {
    response = response.replace('๐Ÿ’•', `and your ${context.streak}-day streak is absolutely amazing! ๐Ÿ’•`);
  }
  
  return response;
}

๐Ÿ“Š Response Caching Strategy

Cache Implementation

interface CachedResponse {
  content: string;
  context: Partial<MealContext>;
  createdAt: Date;
  hitCount: number;
}

class AIResponseCache {
  private cache = new Map<string, CachedResponse>();
  private maxSize = 1000;
  private ttl = 24 * 60 * 60 * 1000; // 24 hours
  
  private generateCacheKey(context: MealContext): string {
    // Create cache key from context (without user-specific data)
    return `${context.mealType}:${context.content.toLowerCase()}:${context.streak >= 7 ? 'streak' : 'normal'}`;
  }
  
  get(context: MealContext): string | null {
    const key = this.generateCacheKey(context);
    const cached = this.cache.get(key);
    
    if (!cached) return null;
    
    // Check if expired
    if (Date.now() - cached.createdAt.getTime() > this.ttl) {
      this.cache.delete(key);
      return null;
    }
    
    cached.hitCount++;
    return cached.content;
  }
  
  set(context: MealContext, content: string): void {
    const key = this.generateCacheKey(context);
    
    // Implement LRU eviction if cache is full
    if (this.cache.size >= this.maxSize) {
      const oldestKey = this.cache.keys().next().value;
      this.cache.delete(oldestKey);
    }
    
    this.cache.set(key, {
      content,
      context: {
        mealType: context.mealType,
        streak: context.streak >= 7 ? 'milestone' : 'normal'
      },
      createdAt: new Date(),
      hitCount: 0
    });
  }
}

๐ŸŽจ Personalization Features

User Preference Integration

interface UserPreferences {
  preferredTone: 'enthusiastic' | 'gentle' | 'playful';
  emojiPreference: 'lots' | 'some' | 'minimal';
  mentionStreaks: boolean;
  celebrateMilestones: boolean;
  timeAwareness: boolean;
}

function personalizePrompt(basePrompt: string, preferences: UserPreferences): string {
  let prompt = basePrompt;
  
  // Adjust tone based on preference
  switch (preferences.preferredTone) {
    case 'enthusiastic':
      prompt += '\n\nBe extra enthusiastic and excited in your response!';
      break;
    case 'gentle':
      prompt += '\n\nUse gentle, calm, and soothing language.';
      break;
    case 'playful':
      prompt += '\n\nBe playful and fun while staying encouraging!';
      break;
  }
  
  // Emoji guidance
  switch (preferences.emojiPreference) {
    case 'lots':
      prompt += '\n\nUse multiple relevant emojis to show excitement!';
      break;
    case 'minimal':
      prompt += '\n\nUse just one perfect emoji.';
      break;
    default:
      prompt += '\n\nUse 2-3 emojis appropriately.';
  }
  
  return prompt;
}

Contextual Adaptations

function addContextualElements(prompt: string, context: MealContext): string {
  // Time-based adaptations
  const hour = new Date().getHours();
  
  if (context.mealType === 'breakfast' && hour < 7) {
    prompt += '\n\nNote: They\'re up early taking care of themselves - extra recognition for this dedication!';
  }
  
  if (context.mealType === 'dinner' && hour > 21) {
    prompt += '\n\nNote: Even with a busy day, they prioritized proper nutrition - celebrate this commitment!';
  }
  
  // Weekend vs weekday context
  if (context.isWeekend && context.mealType === 'breakfast' && hour > 9) {
    prompt += '\n\nNote: Weekend morning - they could have skipped but chose self-care instead!';
  }
  
  // Progress context
  if (context.previousMeals.length === 2) {
    prompt += '\n\nSpecial: This completes their full day of meal logging - huge celebration needed!';
  }
  
  return prompt;
}

๐Ÿ“ˆ Performance & Cost Optimization

Token Usage Optimization

interface TokenOptimization {
  maxPromptTokens: 500;
  maxResponseTokens: 100;
  targetResponseLength: 80;
}

function optimizePromptForTokens(prompt: string): string {
  // Remove redundant phrases
  let optimized = prompt
    .replace(/\n\s*\n/g, '\n')  // Remove empty lines
    .replace(/\s+/g, ' ')       // Normalize whitespace
    .trim();
  
  // Truncate if too long (keep essential context)
  if (optimized.length > 2000) {
    const essential = extractEssentialContext(optimized);
    optimized = essential;
  }
  
  return optimized;
}

Rate Limiting & Cost Control

interface AIUsageTracking {
  userId: string;
  dailyRequests: number;
  monthlyTokens: number;
  lastRequest: Date;
}

class AIRateLimiter {
  private readonly DAILY_LIMIT = 50;      // Requests per user per day
  private readonly MONTHLY_TOKEN_LIMIT = 10000; // Tokens per user per month
  
  async checkLimits(userId: string): Promise<boolean> {
    const usage = await this.getUserUsage(userId);
    
    // Check daily request limit
    if (usage.dailyRequests >= this.DAILY_LIMIT) {
      return false;
    }
    
    // Check monthly token limit
    if (usage.monthlyTokens >= this.MONTHLY_TOKEN_LIMIT) {
      return false;
    }
    
    return true;
  }
  
  async updateUsage(userId: string, tokensUsed: number): Promise<void> {
    await this.incrementUsage(userId, tokensUsed);
  }
}

๐Ÿงช Testing & Quality Assurance

Automated Response Testing

// Unit tests for AI response generation
describe('AI Response Generation', () => {
  it('should generate appropriate breakfast responses', async () => {
    const context: MealContext = {
      mealType: 'breakfast',
      content: 'Oatmeal with berries',
      streak: 5,
      timeOfDay: 'morning',
      previousMeals: [],
      isWeekend: false,
      isFirstMeal: true
    };
    
    const response = await generateMealResponse(context);
    
    expect(response.content).toMatch(/oatmeal|berries/i);
    expect(response.content).toMatch(/๐Ÿ’•|๐ŸŒŸ|โœจ/);
    expect(response.content.length).toBeGreaterThan(30);
    expect(response.content.length).toBeLessThan(180);
  });
  
  it('should celebrate streak milestones', async () => {
    const context: MealContext = {
      mealType: 'lunch',
      content: 'Chicken salad',
      streak: 7,
      timeOfDay: 'afternoon',
      previousMeals: [],
      isWeekend: false,
      isFirstMeal: false
    };
    
    const response = await generateMealResponse(context);
    
    expect(response.content).toMatch(/7|week|streak/i);
  });
});

Response Quality Metrics

interface QualityMetrics {
  averageResponseTime: number;
  fallbackRate: number;
  userSatisfactionScore: number;
  toneConsistencyScore: number;
  uniquenessScore: number;
}

class AIQualityMonitor {
  async trackResponse(
    context: MealContext,
    response: AIResponse,
    userFeedback?: 'positive' | 'negative'
  ): Promise<void> {
    await this.logMetrics({
      userId: context.userId,
      responseTime: response.responseTime,
      tokensUsed: response.tokensUsed,
      wasFallback: response.model === 'fallback',
      userFeedback,
      timestamp: new Date()
    });
  }
  
  async generateQualityReport(): Promise<QualityMetrics> {
    // Aggregate metrics from logs
    return {
      averageResponseTime: await this.calculateAverageResponseTime(),
      fallbackRate: await this.calculateFallbackRate(),
      userSatisfactionScore: await this.calculateSatisfactionScore(),
      toneConsistencyScore: await this.analyzeToneConsistency(),
      uniquenessScore: await this.calculateUniqueness()
    };
  }
}

๐Ÿ”ง Configuration & Environment

Environment Variables

// AI configuration environment variables
interface AIConfig {
  OPENAI_API_KEY: string;
  OPENAI_MODEL: string;              // Default: "gpt-4"
  AI_MAX_TOKENS: number;             // Default: 100
  AI_TEMPERATURE: number;            // Default: 0.8
  AI_ENABLE_CACHING: boolean;        // Default: true
  AI_CACHE_TTL_HOURS: number;        // Default: 24
  AI_RATE_LIMIT_PER_USER: number;    // Default: 50
  AI_FALLBACK_ENABLED: boolean;      // Default: true
  AI_RESPONSE_TIMEOUT_MS: number;    // Default: 10000
}

Configuration Management

class AIConfigManager {
  private config: AIConfig;
  
  constructor() {
    this.config = {
      OPENAI_API_KEY: this.requireEnv('OPENAI_API_KEY'),
      OPENAI_MODEL: process.env.OPENAI_MODEL || 'gpt-4',
      AI_MAX_TOKENS: parseInt(process.env.AI_MAX_TOKENS || '100'),
      AI_TEMPERATURE: parseFloat(process.env.AI_TEMPERATURE || '0.8'),
      AI_ENABLE_CACHING: process.env.AI_ENABLE_CACHING !== 'false',
      AI_CACHE_TTL_HOURS: parseInt(process.env.AI_CACHE_TTL_HOURS || '24'),
      AI_RATE_LIMIT_PER_USER: parseInt(process.env.AI_RATE_LIMIT_PER_USER || '50'),
      AI_FALLBACK_ENABLED: process.env.AI_FALLBACK_ENABLED !== 'false',
      AI_RESPONSE_TIMEOUT_MS: parseInt(process.env.AI_RESPONSE_TIMEOUT_MS || '10000')
    };
    
    this.validateConfig();
  }
  
  private requireEnv(key: string): string {
    const value = process.env[key];
    if (!value) {
      throw new Error(`Required environment variable ${key} is not set`);
    }
    return value;
  }
  
  private validateConfig(): void {
    if (this.config.AI_TEMPERATURE < 0 || this.config.AI_TEMPERATURE > 2) {
      throw new Error('AI_TEMPERATURE must be between 0 and 2');
    }
    
    if (this.config.AI_MAX_TOKENS < 10 || this.config.AI_MAX_TOKENS > 500) {
      throw new Error('AI_MAX_TOKENS must be between 10 and 500');
    }
  }
}

๐Ÿ“Š Analytics & Insights

AI Usage Analytics

interface AIAnalytics {
  totalRequests: number;
  successRate: number;
  averageTokensPerRequest: number;
  mostCommonMealTypes: Record<string, number>;
  averageResponseTime: number;
  fallbackUsageRate: number;
  userSatisfactionTrend: number[];
}

async function generateAIInsights(): Promise<AIAnalytics> {
  const last30Days = new Date(Date.now() - 30 * 24 * 60 * 60 * 1000);
  
  const analytics = await db.query(`
    SELECT 
      COUNT(*) as total_requests,
      AVG(CASE WHEN ai_response IS NOT NULL THEN 1 ELSE 0 END) as success_rate,
      AVG(tokens_used) as avg_tokens,
      meal_type,
      AVG(response_time_ms) as avg_response_time,
      COUNT(CASE WHEN model = 'fallback' THEN 1 END) as fallback_count
    FROM meal_ai_logs 
    WHERE created_at >= $1
    GROUP BY meal_type
  `, [last30Days]);
  
  return processAnalyticsData(analytics);
}

๐Ÿšจ Error Handling & Monitoring

Comprehensive Error Tracking

interface AIError {
  type: 'api_error' | 'validation_error' | 'timeout_error' | 'rate_limit_error';
  message: string;
  context: MealContext;
  timestamp: Date;
  resolved: boolean;
}

class AIErrorHandler {
  async handleError(error: Error, context: MealContext): Promise<AIResponse> {
    const aiError: AIError = {
      type: this.categorizeError(error),
      message: error.message,
      context,
      timestamp: new Date(),
      resolved: false
    };
    
    // Log error for monitoring
    await this.logError(aiError);
    
    // Return appropriate fallback
    return {
      content: getFallbackResponse(context),
      tokensUsed: 0,
      responseTime: 0,
      model: 'fallback',
      cached: true
    };
  }
  
  private categorizeError(error: Error): AIError['type'] {
    if (error.message.includes('timeout')) return 'timeout_error';
    if (error.message.includes('rate limit')) return 'rate_limit_error';
    if (error.message.includes('validation')) return 'validation_error';
    return 'api_error';
  }
}

๐Ÿ“ž AI Integration Support

For questions about AI integration, prompt engineering, or response quality:


Our AI integration is designed to provide consistent, encouraging, and personalized responses that make users feel truly supported on their wellness journey. Every technical decision prioritizes the human experience of feeling cared for and celebrated. ๐Ÿค–๐Ÿ’•

โš ๏ธ **GitHub.com Fallback** โš ๏ธ