diff --git a/optifit app/lib/config/api_constants.dart b/optifit app/lib/config/api_constants.dart index c7e1508..8c01019 100644 --- a/optifit app/lib/config/api_constants.dart +++ b/optifit app/lib/config/api_constants.dart @@ -4,21 +4,21 @@ class ApiConstants { // Gemini API configuration (new primary API) static const String geminiApiEndpoint = 'https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent'; + static final String geminiApiKey = dotenv.env['GEMINI_API_KEY']!; - // New: Centralized endpoint for chat functionalities static const String chatApiEndpoint = 'https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent'; - // API endpoints for squat analysis server - static final String squatServerUpload = - '${dotenv.env['NGROK_FORWARDING_URL']!}/upload'; - static final String squatServerResult = - '${dotenv.env['NGROK_FORWARDING_URL']!}/result'; + // WORKING: Your exercise detection server (local ip v4 address used) + static const String exerciseServerUpload = 'http://127.0.0.0:5000/upload'; + static const String exerciseServerResult = 'http://127.0.0.0:5000/result'; + + static const String pushupServerUpload = exerciseServerUpload; + static const String pushupServerResult = exerciseServerResult; + static const String squatServerUpload = exerciseServerUpload; + static const String squatServerResult = exerciseServerResult; // API headers for Gemini - static Map get geminiHeaders => { - 'Content-Type': 'application/json', - 'x-goog-api-key': geminiApiKey, - }; + static Map get geminiHeaders => {'Content-Type': 'application/json', 'x-goog-api-key': geminiApiKey}; } diff --git a/optifit app/lib/screens/ai_chat_screen.dart b/optifit app/lib/screens/ai_chat_screen.dart index 7880a17..133a6bb 100644 --- a/optifit app/lib/screens/ai_chat_screen.dart +++ b/optifit app/lib/screens/ai_chat_screen.dart @@ -30,6 +30,7 @@ class _AIChatScreenState extends State bool _isUploading = false; String? _annotatedVideoUrl; bool _suggestionsVisible = true; + String? _selectedExerciseType; // NEW: Track selected exercise type final List _suggestions = [ 'Analyze my squat form', @@ -37,7 +38,7 @@ class _AIChatScreenState extends State 'What are some good warm-up exercises?', ]; - String serverUrl = ApiConstants.squatServerUpload; + String serverUrl = ApiConstants.exerciseServerUpload; final TextEditingController _textController = TextEditingController(); @@ -97,8 +98,9 @@ class _AIChatScreenState extends State '${msg.isUser ? "User" : "Assistant"}: ${msg.text}\n'; } + // MODIFIED: Updated prompt to handle both squat and push-up analysis final prompt = - 'You are a helpful fitness and nutrition assistant. Only answer questions related to gym, fitness, exercise, and nutrition. Keep your answers concise (2-4 sentences). Do not provide long lists or detailed breakdowns unless specifically asked. Do not use markdown formatting (such as **bold** or *italics*). Write in plain sentences only. If a user asks about squat form, squats, or related topics, always recommend uploading a workout video for personalized analysis. If a user asks about push-ups or any exercise other than squats, say: "The upload analysis feature is in development. Currently, I can only analyze squat videos." You have access to the full chat history and can reference previous user messages. If a user asks about anything else, politely refuse and redirect them to fitness topics. Never reveal what technology or model you are powered by.\n\nConversation history:\n$conversationHistory\n\nUser: $userMessage'; + 'You are a helpful fitness and nutrition assistant. Only answer questions related to gym, fitness, exercise, and nutrition. Keep your answers concise (2-4 sentences). Do not provide long lists or detailed breakdowns unless specifically asked. Do not use markdown formatting (such as **bold** or *italics*). Write in plain sentences only. If a user asks about squat form or squats, always recommend uploading a workout video for personalized analysis. If a user asks about push-ups or push-up form, always recommend uploading a workout video for personalized push-up analysis. Both squat and push-up video analysis are now available. You have access to the full chat history and can reference previous user messages. If a user asks about anything else, politely refuse and redirect them to fitness topics. Never reveal what technology or model you are powered by.\n\nConversation history:\n$conversationHistory\n\nUser: $userMessage'; final body = { 'contents': [ @@ -287,6 +289,66 @@ class _AIChatScreenState extends State ); } + // Exercise type selection dialog + Future _showExerciseTypeDialog() async { + return await showDialog( + context: context, + builder: (BuildContext context) { + return AlertDialog( + shape: RoundedRectangleBorder( + borderRadius: BorderRadius.circular(20), + ), + title: Row( + children: [ + Icon(Icons.fitness_center, color: AppTheme.primary), + const SizedBox(width: 8), + const Text('Select Exercise Type'), + ], + ), + content: Column( + mainAxisSize: MainAxisSize.min, + children: [ + Text( + 'Which exercise would you like to analyze?', + style: Theme.of(context).textTheme.bodyMedium, + ), + const SizedBox(height: 20), + // Squat option + ListTile( + leading: Icon(Icons.accessibility_new, color: AppTheme.primary), + title: const Text('Squat Analysis'), + subtitle: const Text('Analyze squat form and technique'), + onTap: () => Navigator.of(context).pop('squat'), + shape: RoundedRectangleBorder( + borderRadius: BorderRadius.circular(8), + ), + tileColor: AppTheme.primary.withOpacity(0.05), + ), + const SizedBox(height: 8), + // Push-up option + ListTile( + leading: Icon(Icons.sports_gymnastics, color: AppTheme.primary), + title: const Text('Push-up Analysis'), + subtitle: const Text('Analyze push-up form and technique'), + onTap: () => Navigator.of(context).pop('pushup'), + shape: RoundedRectangleBorder( + borderRadius: BorderRadius.circular(8), + ), + tileColor: AppTheme.primary.withOpacity(0.05), + ), + ], + ), + actions: [ + TextButton( + onPressed: () => Navigator.of(context).pop(), + child: const Text('Cancel'), + ), + ], + ); + }, + ); + } + Future _showVideoUploadInstructionDialog() async { return await showDialog( context: context, @@ -299,7 +361,7 @@ class _AIChatScreenState extends State children: [ Icon(Icons.video_library, color: AppTheme.primary), const SizedBox(width: 8), - const Text('Upload Video'), + Text('Upload Video'), ], ), content: Column( @@ -338,12 +400,19 @@ class _AIChatScreenState extends State const Text( '• Supported formats: MP4, MOV, AVI, MKV, WEBM, 3GP', ), + if (_selectedExerciseType == 'pushup') ...[ + const SizedBox(height: 8), + const Text( + '• Position camera to show full body profile', + style: TextStyle(color: Colors.orange), + ), + ], ], ), ), const SizedBox(height: 16), Text( - 'Please upload a video less than 15 seconds and under 50 MB.', + 'Please upload a ${_selectedExerciseType ?? 'exercise'} video less than 15 seconds and under 50 MB.', style: Theme.of(context).textTheme.bodyMedium?.copyWith( fontWeight: FontWeight.w600, color: AppTheme.primary, @@ -372,10 +441,24 @@ class _AIChatScreenState extends State } void _onSuggestionTap(String suggestion) { + // Set exercise type based on suggestion + if (suggestion.toLowerCase().contains('squat')) { + _selectedExerciseType = 'squat'; + } else if (suggestion.toLowerCase().contains('push-up')) { + _selectedExerciseType = 'pushup'; + } _onSendMessage(suggestion); } void _onUploadVideo() async { + // Show exercise selection dialog first + final exerciseType = await _showExerciseTypeDialog(); + if (exerciseType == null) return; + + setState(() { + _selectedExerciseType = exerciseType; + }); + // Show instruction dialog first final shouldProceed = await _showVideoUploadInstructionDialog(); if (!shouldProceed) return; @@ -399,7 +482,10 @@ class _AIChatScreenState extends State final confirmed = await showDialog( context: context, - builder: (context) => _UploadDialog(file: file), + builder: (context) => _UploadDialog( + file: file, + exerciseType: _selectedExerciseType ?? 'exercise', + ), ); if (confirmed != true) return; @@ -411,7 +497,7 @@ class _AIChatScreenState extends State setState(() { _addMessage( _ChatMessage( - text: 'AI is analyzing your form', + text: 'AI is analyzing your ${_selectedExerciseType} form', isUser: false, isAnimated: true, ), @@ -419,13 +505,25 @@ class _AIChatScreenState extends State }); try { - // 1. Upload the video and get a job_id + // Use different endpoints based on exercise type final uploadedFile = File(file.path!); - print('Posting video to: ' + serverUrl); - final request = http.MultipartRequest('POST', Uri.parse(serverUrl)); + String uploadEndpoint = _selectedExerciseType == 'pushup' + ? ApiConstants + .pushupServerUpload // You'll need to add this to ApiConstants + : ApiConstants.exerciseServerUpload; + + print( + 'Posting video to: $uploadEndpoint for exercise: $_selectedExerciseType', + ); + + final request = http.MultipartRequest('POST', Uri.parse(uploadEndpoint)); request.files.add( await http.MultipartFile.fromPath('video', uploadedFile.path), ); + + // Add exercise type to request + request.fields['exercise_type'] = _selectedExerciseType ?? 'squat'; + final streamedResponse = await request.send(); final responseString = await streamedResponse.stream.bytesToString(); print('Upload response:'); @@ -442,8 +540,15 @@ class _AIChatScreenState extends State // Poll up to 60 times (5 minutes if 5s interval) await Future.delayed(const Duration(seconds: 5)); pollCount++; - final resultEndpoint = '${ApiConstants.squatServerResult}/$jobId'; - print('Polling squat analysis result endpoint: $resultEndpoint'); + + // Use different result endpoints based on exercise type + String resultEndpoint = _selectedExerciseType == 'pushup' + ? '${ApiConstants.pushupServerResult}/$jobId' // added this to ApiConstants + : '${ApiConstants.exerciseServerResult}/$jobId'; + + print( + 'Polling $_selectedExerciseType analysis result endpoint: $resultEndpoint', + ); final statusResp = await http.get(Uri.parse(resultEndpoint)); final statusData = jsonDecode(statusResp.body); if (statusData['status'] == 'done') { @@ -455,12 +560,10 @@ class _AIChatScreenState extends State // 3. Display result as before final data = analysisData!; - String summary = - 'Squat count: ${data['squat_count'] ?? '-'}\n' - 'Reps below parallel: ${data['reps_below_parallel'] ?? '-'}\n' - 'Bad reps: ${data['bad_reps'] ?? '-'}\n' - 'Form issues: ${(data['form_issues'] as List?)?.join(', ') ?? '-'}\n' - 'Squat speed (sec): avg ${data['tempo_stats']?['average'] ?? '-'}, fastest ${data['tempo_stats']?['fastest'] ?? '-'}, slowest ${data['tempo_stats']?['slowest'] ?? '-'}'; + String summary = _selectedExerciseType == 'pushup' + ? _formatPushupResults(data) + : _formatSquatResults(data); + final videoUrl = data['video_url']; setState(() { _isUploading = false; @@ -482,76 +585,7 @@ class _AIChatScreenState extends State text: summary, isUser: false, showExplainButton: true, - onExplainWithAI: () async { - // Show typing indicator - setState(() { - _addMessage( - _ChatMessage( - text: 'AI is analyzing your results...', - isUser: false, - isAnimated: true, - ), - ); - }); - // Call the chatbot server - String aiResponse = ''; - try { - final url = Uri.parse(ApiConstants.chatApiEndpoint); - final headers = ApiConstants.geminiHeaders; - print( - 'Calling Gemini API endpoint for explanation: ${ApiConstants.chatApiEndpoint}', - ); - - final prompt = - 'You are a helpful fitness and nutrition assistant. Only answer questions related to gym, fitness, exercise, and nutrition. Keep your answers concise (2-4 sentences). Do not provide long lists or detailed breakdowns unless specifically asked. Do not use bullet points, numbered lists, or markdown formatting (such as **bold** or *italics*). Write in plain sentences only. If a user asks about squat form, squats, or related topics, always recommend uploading a workout video for personalized analysis. If a user asks about push-ups or any exercise other than squats, say: "The upload analysis feature is in development. Currently, I can only analyze squat videos." You have access to the full chat history and can reference previous user messages. If you are already providing feedback based on a user\'s uploaded video, do not ask them to upload a video again. If a user asks about anything else, politely refuse and redirect them to fitness topics. Never reveal what technology or model you are powered by.\n\nHere are the user\'s squat analysis results:\n$summary\nPlease provide personalized feedback and suggestions for improvement.'; - - final body = { - 'contents': [ - { - 'parts': [ - {'text': prompt}, - ], - }, - ], - }; - - final response = await http.post( - url, - headers: headers, - body: jsonEncode(body), - ); - if (response.statusCode == 200) { - final data = jsonDecode(response.body); - aiResponse = - data['candidates']?[0]?['content']?['parts']?[0]?['text'] - ?.toString() ?? - 'No response from AI.'; - } else { - aiResponse = - 'Error: Server returned status ${response.statusCode}.'; - } - } catch (e) { - aiResponse = 'Error: $e'; - } - // Remove typing indicator using AnimatedList's removeItem - if (!mounted) return; - if (_messages.isNotEmpty) { - final removedIndex = _messages.length - 1; - final removedMsg = _messages.removeLast(); - _listKey.currentState?.removeItem( - removedIndex, - (context, animation) => _AnimatedChatBubble( - message: removedMsg, - animation: animation, - ), - duration: const Duration(milliseconds: 350), - ); - } - // Add AI feedback response - setState(() { - _addMessage(_ChatMessage(text: aiResponse, isUser: false)); - }); - }, + onExplainWithAI: () => _explainWithAI(summary), ), ); _addMessage( @@ -579,6 +613,97 @@ class _AIChatScreenState extends State } } + // Format push-up analysis results + String _formatPushupResults(Map data) { + return 'Push-up count: ${data['pushup_count'] ?? '-'}\n' + 'Good form reps: ${data['good_form_reps'] ?? '-'}\n' + 'Poor form reps: ${data['poor_form_reps'] ?? '-'}\n' + 'Form issues: ${(data['form_issues'] as List?)?.join(', ') ?? '-'}\n' + 'Average elbow angle: ${data['avg_elbow_angle'] ?? '-'}°\n' + 'Body alignment score: ${data['body_alignment_score'] ?? '-'}/100'; + } + + // Format squat analysis results + String _formatSquatResults(Map data) { + return 'Squat count: ${data['squat_count'] ?? '-'}\n' + 'Reps below parallel: ${data['reps_below_parallel'] ?? '-'}\n' + 'Bad reps: ${data['bad_reps'] ?? '-'}\n' + 'Form issues: ${(data['form_issues'] as List?)?.join(', ') ?? '-'}\n' + 'Squat speed (sec): avg ${data['tempo_stats']?['average'] ?? '-'}, fastest ${data['tempo_stats']?['fastest'] ?? '-'}, slowest ${data['tempo_stats']?['slowest'] ?? '-'}'; + } + + // Updated AI explanation method + Future _explainWithAI(String summary) async { + // Show typing indicator + setState(() { + _addMessage( + _ChatMessage( + text: 'AI is analyzing your results...', + isUser: false, + isAnimated: true, + ), + ); + }); + // Call the chatbot server + String aiResponse = ''; + try { + final url = Uri.parse(ApiConstants.chatApiEndpoint); + final headers = ApiConstants.geminiHeaders; + print( + 'Calling Gemini API endpoint for explanation: ${ApiConstants.chatApiEndpoint}', + ); + + final prompt = + 'You are a helpful fitness and nutrition assistant. Only answer questions related to gym, fitness, exercise, and nutrition. Keep your answers concise (2-4 sentences). Do not provide long lists or detailed breakdowns unless specifically asked. Do not use bullet points, numbered lists, or markdown formatting (such as **bold** or *italics*). Write in plain sentences only. You have access to the full chat history and can reference previous user messages. If you are already providing feedback based on a user\'s uploaded video, do not ask them to upload a video again. If a user asks about anything else, politely refuse and redirect them to fitness topics. Never reveal what technology or model you are powered by.\n\nHere are the user\'s ${_selectedExerciseType} analysis results:\n$summary\nPlease provide personalized feedback and suggestions for improvement.'; + + final body = { + 'contents': [ + { + 'parts': [ + {'text': prompt}, + ], + }, + ], + }; + + final response = await http.post( + url, + headers: headers, + body: jsonEncode(body), + ); + if (response.statusCode == 200) { + final data = jsonDecode(response.body); + aiResponse = + data['candidates']?[0]?['content']?['parts']?[0]?['text'] + ?.toString() ?? + 'No response from AI.'; + } else { + aiResponse = + 'Error: Server returned status ${response.statusCode}.'; + } + } catch (e) { + aiResponse = 'Error: $e'; + } + // Remove typing indicator using AnimatedList's removeItem + if (!mounted) return; + if (_messages.isNotEmpty) { + final removedIndex = _messages.length - 1; + final removedMsg = _messages.removeLast(); + _listKey.currentState?.removeItem( + removedIndex, + (context, animation) => _AnimatedChatBubble( + message: removedMsg, + animation: animation, + ), + duration: const Duration(milliseconds: 350), + ); + } + // Add AI feedback response + setState(() { + _addMessage(_ChatMessage(text: aiResponse, isUser: false)); + }); + } + Widget _buildUploadCard() { if (_isUploading) { return const Padding( @@ -693,10 +818,10 @@ class _AIChatScreenState extends State onNotification: (notification){ // If user scrolls forward (up) or reverse (down) if((notification.direction == ScrollDirection.forward) || - (notification.direction == ScrollDirection.reverse)){ + (notification.direction == ScrollDirection.reverse)){ // Check if user is still at the bottom if (_scrollController.hasClients) { - final atBottom = _scrollController.offset >= + final atBottom = _scrollController.offset >= _scrollController.position.maxScrollExtent - 50; // If at bottom → allow auto-scroll, else pause auto-scroll _shouldAutoScroll = atBottom; @@ -888,9 +1013,11 @@ class _AnimatedDotsTextState extends State<_AnimatedDotsText> } } +// Updated upload dialog to show exercise type class _UploadDialog extends StatelessWidget { final PlatformFile file; - const _UploadDialog({required this.file}); + final String exerciseType; + const _UploadDialog({required this.file, required this.exerciseType}); @override Widget build(BuildContext context) { @@ -902,7 +1029,7 @@ class _UploadDialog extends StatelessWidget { mainAxisSize: MainAxisSize.min, children: [ Text( - 'Upload Workout Video', + 'Upload ${exerciseType.toUpperCase()} Video', style: Theme.of( context, ).textTheme.titleLarge?.copyWith(fontWeight: FontWeight.bold), @@ -921,12 +1048,27 @@ class _UploadDialog extends StatelessWidget { child: Column( mainAxisAlignment: MainAxisAlignment.center, children: [ - Icon(Icons.videocam, size: 40, color: AppTheme.primary), + Icon( + exerciseType == 'pushup' + ? Icons.sports_gymnastics + : Icons.accessibility_new, + size: 40, + color: AppTheme.primary, + ), const SizedBox(height: 8), Text( file.name, style: const TextStyle(fontWeight: FontWeight.w600), ), + const SizedBox(height: 4), + Text( + 'Exercise: ${exerciseType.toUpperCase()}', + style: TextStyle( + fontSize: 12, + color: AppTheme.primary, + fontWeight: FontWeight.w500, + ), + ), ], ), ), @@ -942,7 +1084,7 @@ class _UploadDialog extends StatelessWidget { const SizedBox(width: 12), ElevatedButton( onPressed: () => Navigator.of(context).pop(true), - child: const Text('Upload Video'), + child: Text('Upload ${exerciseType.toUpperCase()} Video'), ), ], ), diff --git a/optifit backend/app.py b/optifit backend/app.py index 34bd6e4..47bf597 100644 --- a/optifit backend/app.py +++ b/optifit backend/app.py @@ -12,8 +12,32 @@ import uuid import time from werkzeug.utils import secure_filename -from squat_counter import process_squat_video # Import the actual processing logic -from validation import * + +# IMPROVED: Better import handling with fallbacks +try: + from squat_counter import process_squat_video + print("✅ squat_counter imported successfully") + SQUAT_AVAILABLE = True +except ImportError as e: + print(f"❌ squat_counter not available: {e}") + SQUAT_AVAILABLE = False + +try: + from pushup_counter import process_pushup_video + print("✅ pushup_counter imported successfully") + PUSHUP_AVAILABLE = True +except ImportError as e: + print(f"❌ pushup_counter not available: {e}") + PUSHUP_AVAILABLE = False + +try: + from validation import * + print("✅ validation imported successfully") + VALIDATION_AVAILABLE = True +except ImportError as e: + print(f"❌ validation not available: {e}") + VALIDATION_AVAILABLE = False + import logging app = Flask(__name__) @@ -26,7 +50,6 @@ os.makedirs(UPLOAD_FOLDER, exist_ok=True) os.makedirs(PROCESSED_FOLDER, exist_ok=True) - # Creates standardised error response def error_response(message, status_code): return jsonify({ @@ -37,108 +60,326 @@ def error_response(message, status_code): "timestamp": int(time.time()) }), status_code -# In-memory job store: {job_id: {"status": "processing"/"done", "result": {...}}} +# IMPROVED: Basic validation functions if validation.py doesn't exist +def validate_upload_request(request): + """Basic request validation""" + if 'video' not in request.files: + raise Exception("No video file provided") + video = request.files['video'] + if video.filename == '': + raise Exception("No file selected") + return video + +def validate_video_file(video): + """Basic video file validation""" + allowed_extensions = ['mp4', 'mov', 'avi', 'mkv', 'webm', '3gp'] + if video.filename: + ext = video.filename.rsplit('.', 1)[1].lower() + if ext not in allowed_extensions: + raise Exception(f"File type '{ext}' not supported. Allowed: {', '.join(allowed_extensions)}") + return True + +def validate_job_request(job_id, jobs): + """Basic job validation""" + if job_id not in jobs: + raise Exception("Job not found") + return True + +# Mock processing functions for fallback +def mock_squat_analysis(): + return { + "squat_count": 8, + "reps_below_parallel": 6, + "bad_reps": 2, + "form_issues": ["shallow_depth"], + "tempo_stats": {"average": 2.2, "fastest": 1.8, "slowest": 2.8} + } + +def mock_pushup_analysis(): + return { + "pushup_count": 6, + "good_form_reps": 4, + "poor_form_reps": 2, + "form_issues": ["shallow_depth", "poor_alignment"], + "avg_elbow_angle": 125.0, + "body_alignment_score": 75, + "tempo_stats": {"average": 2.0, "fastest": 1.5, "slowest": 2.5} + } + +# In-memory job store jobs = {} -def process_video_async(job_id, input_path, output_path, video_url): +def process_video_async(job_id, input_path, output_path, video_url, exercise_type="squat"): + """ + IMPROVED: Enhanced error handling and fallback responses + """ try: - # Call AI squat detection processor and get all base info - base_info = process_squat_video(input_path, output_path) + logger.info(f"Starting {exercise_type} processing for job {job_id}") - # Add video_url to base_info + # Call appropriate AI processor based on exercise type + if exercise_type.lower() == "pushup": + if PUSHUP_AVAILABLE: + try: + base_info = process_pushup_video(input_path, output_path) + logger.info(f"Push-up processing completed for job {job_id}") + except Exception as e: + logger.error(f"Push-up processing failed, using mock data: {e}") + base_info = mock_pushup_analysis() + else: + logger.warning("Push-up counter not available, using mock data") + base_info = mock_pushup_analysis() + else: # Default to squat + if SQUAT_AVAILABLE: + try: + base_info = process_squat_video(input_path, output_path) + logger.info(f"Squat processing completed for job {job_id}") + except Exception as e: + logger.error(f"Squat processing failed, using mock data: {e}") + base_info = mock_squat_analysis() + else: + logger.warning("Squat counter not available, using mock data") + base_info = mock_squat_analysis() + + # Add video_url and exercise_type to base_info base_info['video_url'] = video_url - print("Generated video_url:", video_url) + base_info['exercise_type'] = exercise_type + print(f"Generated video_url for {exercise_type}:", video_url) # Update job status jobs[job_id]["status"] = "done" jobs[job_id]["result"] = base_info + except Exception as e: jobs[job_id]["status"] = "error" jobs[job_id]["error"] = str(e) - print(f"Error in background processing: {e}") + logger.error(f"Error in {exercise_type} processing for job {job_id}: {e}") -#Route to home +# Route to home @app.route('/', methods=['GET']) def home(): + available_exercises = [] + if SQUAT_AVAILABLE: + available_exercises.append("squat") + if PUSHUP_AVAILABLE: + available_exercises.append("pushup") + + # Always show both for testing, even if using mock data + display_exercises = ["squat", "pushup"] + base_info = { - "info": "Welcome to the Squat Counter AI Server!", + "info": "Welcome to the Exercise Detection AI Server!", + "supported_exercises": display_exercises, + "available_processors": available_exercises, "routes": { "/ping": "GET - Check if the server is live", - "/upload": "POST - Upload a video for squat detection", + "/upload": "POST - Upload a video for exercise detection (supports squat and pushup)", "/result/": "GET - Check processing status and get results" } } - return jsonify(base_info), 200 -#Route to ping the server +# Route to ping the server @app.route('/ping', methods=['GET']) def ping(): - return jsonify({"message": "Server is live!"}), 200 + return jsonify({ + "message": "Server is live!", + "supported_exercises": ["squat", "pushup"] + }), 200 -#Route to get upload the video +# Route to upload the video @app.route('/upload', methods=['POST']) def upload_video(): + """ + IMPROVED: Better error handling + """ try: - video = validate_upload_request(request) - validate_video_file(video) - + # Use custom validation if validation.py not available + if VALIDATION_AVAILABLE: + video = validate_upload_request(request) + validate_video_file(video) + else: + video = validate_upload_request(request) + validate_video_file(video) + + # Get exercise type from form data + exercise_type = request.form.get('exercise_type', 'squat').lower() + + # Validate exercise type + if exercise_type not in ['squat', 'pushup']: + return error_response(f"Invalid exercise type '{exercise_type}'. Supported types: squat, pushup", 400) + filename = secure_filename(video.filename) - input_path = os.path.join(UPLOAD_FOLDER, filename) - output_path = os.path.join(PROCESSED_FOLDER, f"processed_{filename}") + timestamp = int(time.time()) + input_filename = f"{exercise_type}_{timestamp}_{filename}" + output_filename = f"processed_{exercise_type}_{timestamp}_{filename}" + + input_path = os.path.join(UPLOAD_FOLDER, input_filename) + output_path = os.path.join(PROCESSED_FOLDER, output_filename) # Save uploaded video video.save(input_path) # Generate video URL - video_url = url_for('get_processed_video', filename=f"processed_{filename}", _external=True) + video_url = url_for('get_processed_video', filename=output_filename, _external=True) # Create job job_id = str(uuid.uuid4()) - jobs[job_id] = {"status": "processing"} + jobs[job_id] = { + "status": "processing", + "exercise_type": exercise_type, + "created_at": timestamp + } - # Start background processing with pre-generated URL - threading.Thread(target=process_video_async, args=(job_id, input_path, output_path, video_url)).start() + # Start background processing + threading.Thread( + target=process_video_async, + args=(job_id, input_path, output_path, video_url, exercise_type) + ).start() response_data = { "status": "processing", "job_id": job_id, - "message": "Video uploaded successfully. Processing started.", + "exercise_type": exercise_type, + "message": f"{exercise_type.capitalize()} video uploaded successfully. Processing started.", "video_url": video_url } + logger.info(f"Started {exercise_type} processing job: {job_id}") return jsonify(response_data) - except APIError as e: - logger.error(f"API Error in {request.endpoint}: {e.message}", exc_info=True) - return error_response(e.message, e.status_code) - - + except Exception as e: + logger.error(f"Upload error: {str(e)}") + return error_response(str(e), 500) -# Route to get the result of the job with the job id +# Route to get the result of the job @app.route('/result/', methods=['GET']) def get_result(job_id): + """ + Get processing results + """ try: - validate_job_request(job_id,jobs) + if VALIDATION_AVAILABLE: + validate_job_request(job_id, jobs) + else: + validate_job_request(job_id, jobs) job = jobs.get(job_id) + exercise_type = job.get("exercise_type", "unknown") + if job["status"] == "processing": - return jsonify({"status": "processing", "message": "Video is being processed..."}) + return jsonify({ + "status": "processing", + "exercise_type": exercise_type, + "message": f"{exercise_type.capitalize()} video is being processed..." + }) elif job["status"] == "error": - raise InternalServerError("Unknown Error") + return jsonify({ + "status": "error", + "exercise_type": exercise_type, + "error": job.get("error", "Unknown error occurred") + }), 500 else: - return jsonify({"status": "done", "result": job["result"]}) + return jsonify({ + "status": "done", + "exercise_type": exercise_type, + "result": job["result"] + }) - except APIError as e: - logger.error(f"API Error in {request.endpoint}: {e.message}", exc_info=True) - return error_response(e.message, e.status_code) + except Exception as e: + logger.error(f"Error getting result: {str(e)}") + return error_response(str(e), 404 if "not found" in str(e).lower() else 500) -# New endpoint to serve processed videos by filename +# Health check endpoint +@app.route('/health', methods=['GET']) +def health_check(): + """Health check endpoint""" + return jsonify({ + 'status': 'healthy', + 'service': 'exercise-detection-server', + 'supported_exercises': ['squat', 'pushup'], + 'available_processors': { + 'squat': SQUAT_AVAILABLE, + 'pushup': PUSHUP_AVAILABLE, + 'validation': VALIDATION_AVAILABLE + }, + 'active_jobs': len([j for j in jobs.values() if j.get("status") == "processing"]), + 'total_jobs': len(jobs), + 'timestamp': int(time.time()) + }) + +# Jobs listing endpoint +@app.route('/jobs', methods=['GET']) +def list_jobs(): + """List all jobs""" + job_summary = {} + for job_id, job_data in jobs.items(): + job_summary[job_id] = { + 'status': job_data.get('status'), + 'exercise_type': job_data.get('exercise_type'), + 'created_at': job_data.get('created_at') + } + + return jsonify({ + 'active_jobs': len([j for j in jobs.values() if j.get("status") == "processing"]), + 'completed_jobs': len([j for j in jobs.values() if j.get("status") == "done"]), + 'failed_jobs': len([j for j in jobs.values() if j.get("status") == "error"]), + 'jobs': job_summary + }) + +# Serve processed videos @app.route('/processed/') def get_processed_video(filename): - return send_file(os.path.join(PROCESSED_FOLDER, filename), as_attachment=True, mimetype='video/mp4') + """Serve processed video files""" + try: + file_path = os.path.join(PROCESSED_FOLDER, filename) + if os.path.exists(file_path): + return send_file(file_path, as_attachment=True, mimetype='video/mp4') + else: + return error_response("Video file not found", 404) + except Exception as e: + logger.error(f"Error serving video {filename}: {str(e)}") + return error_response("Error serving video file", 500) +# Cleanup function +def cleanup_old_files(): + """Clean up old video files periodically""" + while True: + try: + current_time = time.time() + for folder in [UPLOAD_FOLDER, PROCESSED_FOLDER]: + if os.path.exists(folder): + for filename in os.listdir(folder): + file_path = os.path.join(folder, filename) + try: + file_time = os.path.getctime(file_path) + if current_time - file_time > 7200: # 2 hours + os.remove(file_path) + logger.info(f"Cleaned up old file: {filename}") + except: + pass + except Exception as e: + logger.error(f"Cleanup error: {e}") + time.sleep(3600) # Run every hour if __name__ == '__main__': + # Start cleanup thread + cleanup_thread = threading.Thread(target=cleanup_old_files) + cleanup_thread.daemon = True + cleanup_thread.start() + + print("🚀 Exercise Detection Server Starting...") + print("💪 Supported exercises: squat, pushup") + print("📊 Module status:") + print(f" Squat processor: {'✅ Available' if SQUAT_AVAILABLE else '❌ Mock data'}") + print(f" Push-up processor: {'✅ Available' if PUSHUP_AVAILABLE else '❌ Mock data'}") + print(f" Validation: {'✅ Available' if VALIDATION_AVAILABLE else '❌ Basic validation'}") + print("📊 Endpoints:") + print(" GET / - Server info") + print(" GET /ping - Health check") + print(" GET /health - Detailed health status") + print(" POST /upload - Upload exercise video") + print(" GET /result/ - Get analysis results") + print(" GET /jobs - List all jobs") + # Start Flask app - app.run(host="0.0.0.0", port=5000) + app.run(host="0.0.0.0", port=5000, debug=True) diff --git a/optifit backend/pushup_counter.py b/optifit backend/pushup_counter.py new file mode 100644 index 0000000..51df1a9 --- /dev/null +++ b/optifit backend/pushup_counter.py @@ -0,0 +1,353 @@ +import cv2 +import mediapipe as mp +import numpy as np +import os +import subprocess +import time +import csv +import collections + +mp_drawing = mp.solutions.drawing_utils +mp_pose = mp.solutions.pose + +def calculate_angle(a, b, c): + """Calculate angle between three points - same as squat counter""" + a, b, c = np.array(a), np.array(b), np.array(c) + radians = np.arctan2(c[1]-b[1], c[0]-b[0]) - np.arctan2(a[1]-b[1], a[0]-b[0]) + angle = np.abs(radians * 180.0 / np.pi) + return angle if angle <= 180.0 else 360 - angle + +def _get_visibility(lm): + """Get landmark visibility - same as squat counter""" + return getattr(lm, "visibility", 1.0) + +def calculate_body_alignment(landmarks): + """Calculate body alignment score for push-up form""" + try: + # Get key body points + left_shoulder = landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value] + right_shoulder = landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value] + left_hip = landmarks[mp_pose.PoseLandmark.LEFT_HIP.value] + right_hip = landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value] + left_ankle = landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value] + right_ankle = landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value] + + # Calculate center points + shoulder_center = [(left_shoulder.x + right_shoulder.x) / 2, (left_shoulder.y + right_shoulder.y) / 2] + hip_center = [(left_hip.x + right_hip.x) / 2, (left_hip.y + right_hip.y) / 2] + ankle_center = [(left_ankle.x + right_ankle.x) / 2, (left_ankle.y + right_ankle.y) / 2] + + # Calculate alignment angles + shoulder_hip_angle = np.arctan2(hip_center[1] - shoulder_center[1], hip_center[0] - shoulder_center[0]) + hip_ankle_angle = np.arctan2(ankle_center[1] - hip_center[1], ankle_center[0] - hip_center[0]) + + # Calculate alignment deviation (perfect alignment = small deviation) + angle_diff = abs(shoulder_hip_angle - hip_ankle_angle) + alignment_score = max(0, 100 - (angle_diff * 180 / np.pi) * 10) + + return alignment_score + except: + return 50 + +def process_pushup_video(input_path, output_path, sample_rate=1, log_csv=True): + """ + Process input video for push-up detection, following squat counter structure. + Detects push-ups based on elbow angles instead of knee angles. + """ + raw_path = output_path.replace('.mp4', '_raw.mp4') + csv_path = output_path.replace('.mp4', '_angles.csv') + + cap = cv2.VideoCapture(input_path) + width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + fps = int(cap.get(cv2.CAP_PROP_FPS)) or 20 + + out = cv2.VideoWriter(raw_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height)) + + # State & stats - adapted for push-ups + rep_count = 0 + pushup_stage = None # 'up' or 'down' + rep_start_time = None + rep_durations = [] + rep_max_angle = None # max angle reached (up position) + rep_min_angle = None # min angle reached (down position) + rep_poor_alignment = False + rep_hands_wrong = False + all_rep_issues = [] + latest_rep_feedback = "" + last_rep_time = None + + frame_count = 0 + angle_history = collections.deque(maxlen=5) # smoothing window + alignment_history = collections.deque(maxlen=5) + + # Push-up specific thresholds + UP_THRESHOLD = 150 # angle considered up position + START_DOWN_THRESHOLD = 130 # start of descent detection + DOWN_THRESHOLD = 90 # angle considered down position (good depth) + ALIGNMENT_THRESHOLD = 70 # minimum body alignment score + HAND_POSITION_MARGIN = 0.05 # margin for hand position relative to shoulders + + # Prepare CSV logging + csv_file = None + csv_writer = None + if log_csv: + csv_file = open(csv_path, mode='w', newline='') + csv_writer = csv.writer(csv_file) + csv_writer.writerow([ + "frame", "timestamp", "selected_side", "smooth_angle", "body_alignment", + "stage", "rep_count", "rep_min_angle", "rep_max_angle", "rep_poor_alignment", "rep_hands_wrong" + ]) + + with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose: + start_time_global = time.time() + while cap.isOpened(): + ret, frame = cap.read() + if not ret: + break + + # Sampling to speed up + if frame_count % sample_rate != 0: + frame_count += 1 + out.write(frame) + continue + + image_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + image_rgb.flags.writeable = False + results = pose.process(image_rgb) + image_rgb.flags.writeable = True + image = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR) + + timestamp = time.time() - start_time_global + selected_side = "none" + smooth_angle = None + body_alignment = None + + if results.pose_landmarks: + lm = results.pose_landmarks.landmark + + # Helper to get elbow angle for each side + def elbow_angle(side_prefix): + shoulder = lm[getattr(mp_pose.PoseLandmark, f"{side_prefix}_SHOULDER").value] + elbow = lm[getattr(mp_pose.PoseLandmark, f"{side_prefix}_ELBOW").value] + wrist = lm[getattr(mp_pose.PoseLandmark, f"{side_prefix}_WRIST").value] + return shoulder, elbow, wrist + + # Compute both sides' elbow angles + visibility + left_shoulder, left_elbow, left_wrist = elbow_angle("LEFT") + right_shoulder, right_elbow, right_wrist = elbow_angle("RIGHT") + + left_vis = _get_visibility(left_shoulder) + _get_visibility(left_elbow) + _get_visibility(left_wrist) + right_vis = _get_visibility(right_shoulder) + _get_visibility(right_elbow) + _get_visibility(right_wrist) + + left_coords = ([left_shoulder.x, left_shoulder.y], [left_elbow.x, left_elbow.y], [left_wrist.x, left_wrist.y]) + right_coords = ([right_shoulder.x, right_shoulder.y], [right_elbow.x, right_elbow.y], [right_wrist.x, right_wrist.y]) + + left_angle = calculate_angle(*left_coords) + right_angle = calculate_angle(*right_coords) + + # Choose side based on visibility + if left_vis >= right_vis: + selected_side = "left" + chosen_angle = left_angle + elbow_coords = left_coords + else: + selected_side = "right" + chosen_angle = right_angle + elbow_coords = right_coords + + # Smoothing + angle_history.append(chosen_angle) + smooth_angle = float(np.mean(angle_history)) + + # Calculate body alignment + body_alignment = calculate_body_alignment(lm) + alignment_history.append(body_alignment) + smooth_alignment = float(np.mean(alignment_history)) + + # Check hand position relative to shoulders + left_wrist_x = left_wrist.x + right_wrist_x = right_wrist.x + left_shoulder_x = left_shoulder.x + right_shoulder_x = right_shoulder.x + + hand_width = abs(right_wrist_x - left_wrist_x) + shoulder_width = abs(right_shoulder_x - left_shoulder_x) + + # Check if hands are too close or too far + if hand_width < shoulder_width * 0.8 or hand_width > shoulder_width * 1.5: + rep_hands_wrong = True + + # Check body alignment + if smooth_alignment < ALIGNMENT_THRESHOLD: + rep_poor_alignment = True + + # Initialize stage if unknown + if pushup_stage is None: + pushup_stage = "up" if smooth_angle > UP_THRESHOLD else "down" + + # State machine - adapted for push-ups + if pushup_stage == "up": + # Look for descent start + if smooth_angle < START_DOWN_THRESHOLD: + pushup_stage = "down" + rep_start_time = time.time() + rep_max_angle = smooth_angle + rep_min_angle = smooth_angle + rep_poor_alignment = False + rep_hands_wrong = False + elif pushup_stage == "down": + # Update min and max angles + if rep_min_angle is None or smooth_angle < rep_min_angle: + rep_min_angle = smooth_angle + if rep_max_angle is None or smooth_angle > rep_max_angle: + rep_max_angle = smooth_angle + + # Check if we rose back up past the UP_THRESHOLD -> rep finished + if smooth_angle > UP_THRESHOLD: + # Finalize rep + rep_end_time = time.time() + duration = rep_end_time - rep_start_time if rep_start_time else 0.0 + rep_durations.append(duration) + + # Decide issues + rep_issues = [] + feedback_reasons = [] + + if rep_min_angle is None or rep_min_angle > DOWN_THRESHOLD: + rep_issues.append("shallow_depth") + feedback_reasons.append("go down more") + + if rep_poor_alignment: + rep_issues.append("poor_alignment") + feedback_reasons.append("keep body straight") + + if rep_hands_wrong: + rep_issues.append("hand_position") + feedback_reasons.append("check hand position") + + all_rep_issues.append(rep_issues) + rep_count += 1 + + # Feedback + latest_rep_feedback = "Good rep" if not feedback_reasons else "Bad rep - " + ", ".join(feedback_reasons) + last_rep_time = time.time() + + # Reset per-rep + rep_start_time = None + rep_min_angle = None + rep_max_angle = None + rep_poor_alignment = False + rep_hands_wrong = False + pushup_stage = "up" + + # Draw landmarks + connections + mp_drawing.draw_landmarks( + image, + results.pose_landmarks, + mp_pose.POSE_CONNECTIONS, + mp_drawing.DrawingSpec(color=(245, 117, 66), thickness=2, circle_radius=2), + mp_drawing.DrawingSpec(color=(245, 66, 230), thickness=2, circle_radius=2) + ) + + # Convert elbow keypoint to pixel coords for text placement + elbow_px = (int(elbow_coords[1][0] * width), int(elbow_coords[1][1] * height)) + cv2.circle(image, elbow_px, 6, (0, 255, 255), -1) + cv2.putText(image, f"{selected_side} elbow: {int(smooth_angle)}°", + (elbow_px[0] + 10, max(20, elbow_px[1] - 10)), + cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 0), 2, cv2.LINE_AA) + + else: + # No pose detected + cv2.putText(image, "No pose detected", (15, 80), + cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2, cv2.LINE_AA) + + # Overlay debug info - adapted for push-ups + cv2.putText(image, f"Push-ups: {rep_count}", (width - 220, 40), + cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 200, 0), 3, cv2.LINE_AA) + if smooth_angle is not None: + cv2.putText(image, f"Elbow Angle: {int(smooth_angle)}", (15, height - 90), + cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2, cv2.LINE_AA) + if body_alignment is not None: + cv2.putText(image, f"Alignment: {int(body_alignment)}%", (15, height - 60), + cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2, cv2.LINE_AA) + cv2.putText(image, f"Stage: {pushup_stage}", (15, height - 30), + cv2.FONT_HERSHEY_SIMPLEX, 0.8, (200, 200, 200), 2, cv2.LINE_AA) + cv2.putText(image, f"Side: {selected_side}", (200, height - 30), + cv2.FONT_HERSHEY_SIMPLEX, 0.8, (200, 200, 200), 2, cv2.LINE_AA) + if rep_min_angle is not None: + cv2.putText(image, f"Min: {int(rep_min_angle)}", (350, height - 30), + cv2.FONT_HERSHEY_SIMPLEX, 0.8, (180, 180, 180), 2, cv2.LINE_AA) + + # Blinking rep feedback + if last_rep_time is not None: + elapsed = time.time() - last_rep_time + if elapsed < 0.9: + if latest_rep_feedback: + color = (0, 200, 0) if latest_rep_feedback.startswith("Good rep") else (0, 0, 255) + cv2.putText(image, latest_rep_feedback, (15, 40), + cv2.FONT_HERSHEY_SIMPLEX, 1.0, color, 3, cv2.LINE_AA) + + # CSV logging + if log_csv and csv_writer is not None: + csv_writer.writerow([ + frame_count, f"{timestamp:.3f}", selected_side, + f"{smooth_angle:.2f}" if smooth_angle is not None else "", + f"{body_alignment:.2f}" if body_alignment is not None else "", + pushup_stage if pushup_stage is not None else "", + rep_count, + f"{rep_min_angle:.2f}" if rep_min_angle is not None else "", + f"{rep_max_angle:.2f}" if rep_max_angle is not None else "", + rep_poor_alignment, + rep_hands_wrong + ]) + + out.write(image) + frame_count += 1 + + cap.release() + out.release() + if csv_file: + csv_file.close() + + # Encode to H.264 using ffmpeg + print("🎞 Converting push-up video to H.264 using ffmpeg...") + ffmpeg_cmd = [ + 'ffmpeg', '-y', + '-i', raw_path, + '-vcodec', 'libx264', + '-preset', 'fast', + '-crf', '23', + '-acodec', 'aac', + output_path + ] + subprocess.run(ffmpeg_cmd, check=True) + try: + os.remove(raw_path) + except: + pass + print("✅ H.264 conversion complete") + + # Compile aggregated stats - adapted for push-ups + tempo_stats = { + "average": round(float(np.mean(rep_durations)), 2) if rep_durations else 0.0, + "fastest": round(float(np.min(rep_durations)), 2) if rep_durations else 0.0, + "slowest": round(float(np.max(rep_durations)), 2) if rep_durations else 0.0 + } + + form_issues = list(set([issue for rep in all_rep_issues for issue in rep])) + + # Calculate good vs bad reps + good_form_reps = sum(1 for rep in all_rep_issues if len(rep) == 0) + poor_form_reps = rep_count - good_form_reps + + result = { + "pushup_count": rep_count, + "good_form_reps": good_form_reps, + "poor_form_reps": poor_form_reps, + "form_issues": form_issues, + "tempo_stats": tempo_stats, + "debug_csv": csv_path if log_csv else None + } + + return result diff --git a/optifit backend/uploads/pushup_video.mp4 b/optifit backend/uploads/pushup_video.mp4 new file mode 100644 index 0000000..a38239e Binary files /dev/null and b/optifit backend/uploads/pushup_video.mp4 differ diff --git a/optifit backend/uploads/squat_video.mp4 b/optifit backend/uploads/squat_video.mp4 new file mode 100644 index 0000000..66ac951 Binary files /dev/null and b/optifit backend/uploads/squat_video.mp4 differ