vitorcalvi commited on
Commit
27d4540
·
1 Parent(s): 2afd7e1

Jaw Initial

Browse files
Files changed (3) hide show
  1. README.md +114 -14
  2. app.py +232 -0
  3. requirements.txt +4 -0
README.md CHANGED
@@ -1,14 +1,114 @@
1
- ---
2
- title: Hg Jaw Movement
3
- emoji: 👀
4
- colorFrom: pink
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 5.12.0
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- short_description: Perform a comprehensive TMJ assessment using video analysis.
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # JawTrack
2
+
3
+ JawTrack is a real-time jaw motion analysis system that uses computer vision to track and analyze jaw movements. Built with MediaPipe and OpenCV, it provides quantitative measurements for jaw motion assessment.
4
+
5
+ ## Features
6
+
7
+ - Real-time jaw motion tracking
8
+ - Video-based analysis
9
+ - Quantitative measurements:
10
+ - Jaw opening distance
11
+ - Lateral deviation
12
+ - Movement patterns
13
+ - Data visualization
14
+ - Assessment reports
15
+ - CSV data export
16
+
17
+ ## Requirements
18
+
19
+ - Python 3.10+
20
+ - OpenCV
21
+ - MediaPipe
22
+ - Gradio
23
+ - NumPy
24
+ - Pandas
25
+ - Matplotlib
26
+
27
+ ## Installation
28
+
29
+ 1. Clone the repository:
30
+
31
+ ```bash
32
+ git clone https://github.com/yourusername/jawtrack.git
33
+ cd jawtrack
34
+ ```
35
+
36
+ 2. Create a virtual environment:
37
+
38
+ ```bash
39
+ python -m venv venv
40
+ source venv/bin/activate # On Windows: venv\Scripts\activate
41
+ ```
42
+
43
+ 3. Install dependencies:
44
+
45
+ ```bash
46
+ pip install -r requirements.txt
47
+ ```
48
+
49
+ ## Usage
50
+
51
+ 1. Start the application:
52
+
53
+ ```bash
54
+ python app.py
55
+ ```
56
+
57
+ 2. Open your web browser and navigate to:
58
+
59
+ ```
60
+ http://localhost:7860
61
+ ```
62
+
63
+ 3. Upload a video or use webcam for real-time analysis
64
+
65
+ ## Development Setup
66
+
67
+ 1. Install development dependencies:
68
+
69
+ ```bash
70
+ pip install -r requirements-dev.txt
71
+ ```
72
+
73
+ 2. Run tests:
74
+
75
+ ```bash
76
+ pytest tests/
77
+ ```
78
+
79
+ ## Project Structure
80
+
81
+ ```
82
+ jawtrack/
83
+ ├── README.md
84
+ ├── requirements.txt
85
+ ├── setup.py
86
+ ├── jawtrack/
87
+ │ ├── core/
88
+ │ ├── analysis/
89
+ │ └── ui/
90
+ ├── tests/
91
+ └── examples/
92
+ ```
93
+
94
+ ## Contributing
95
+
96
+ 1. Fork the repository
97
+ 2. Create a feature branch
98
+ 3. Commit your changes
99
+ 4. Push to the branch
100
+ 5. Create a Pull Request
101
+
102
+ ## License
103
+
104
+ This project is licensed under the MIT License - see the LICENSE file for details.
105
+
106
+ ## Authors
107
+
108
+ - Your Name - Initial work
109
+
110
+ ## Acknowledgments
111
+
112
+ - MediaPipe team for face mesh implementation
113
+ - OpenCV community
114
+ - Gradio team for the web interface framework
app.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import pandas as pd
4
+ import matplotlib.pyplot as plt
5
+ from datetime import datetime
6
+ import json
7
+ from dataclasses import dataclass, asdict
8
+ from typing import List, Dict, Optional, Tuple
9
+ import cv2
10
+ import mediapipe as mp
11
+ import os
12
+ import tempfile
13
+
14
+ @dataclass
15
+ class AssessmentMeasurement:
16
+ timestamp: float
17
+ jaw_opening: float
18
+ lateral_deviation: float
19
+ frame_number: int
20
+ movement_type: str
21
+ quality_score: float
22
+
23
+ class JawAssessment:
24
+ def __init__(self):
25
+ self.measurements: List[AssessmentMeasurement] = []
26
+ self.current_movement: str = "baseline"
27
+ self.calibration_factor: float = 1.0
28
+ self.assessment_date = datetime.now()
29
+
30
+ def set_calibration(self, pixel_distance: float, real_distance: float = 20.0):
31
+ """Set calibration using known distance marker"""
32
+ self.calibration_factor = real_distance / pixel_distance
33
+
34
+ def add_measurement(self, jaw_opening: float, lateral_dev: float,
35
+ frame_num: int, quality: float = 1.0):
36
+ """Add a new measurement to the assessment"""
37
+ measurement = AssessmentMeasurement(
38
+ timestamp=datetime.now().timestamp(),
39
+ jaw_opening=jaw_opening * self.calibration_factor,
40
+ lateral_deviation=lateral_dev * self.calibration_factor,
41
+ frame_number=frame_num,
42
+ movement_type=self.current_movement,
43
+ quality_score=quality
44
+ )
45
+ self.measurements.append(measurement)
46
+
47
+ def set_movement_type(self, movement: str):
48
+ """Set current movement being assessed"""
49
+ self.current_movement = movement
50
+
51
+ def get_analysis(self) -> Dict:
52
+ """Analyze collected measurements"""
53
+ if not self.measurements:
54
+ return {}
55
+
56
+ df = pd.DataFrame([asdict(m) for m in self.measurements])
57
+
58
+ analysis = {
59
+ 'max_opening': df['jaw_opening'].max(),
60
+ 'avg_lateral': df['lateral_deviation'].mean(),
61
+ 'movement_range': df['jaw_opening'].max() - df['jaw_opening'].min(),
62
+ 'quality_average': df['quality_score'].mean(),
63
+ 'movement_counts': df['movement_type'].value_counts().to_dict(),
64
+ 'timestamp': self.assessment_date.isoformat()
65
+ }
66
+
67
+ return analysis
68
+
69
+ def plot_movements(self) -> plt.Figure:
70
+ """Generate movement pattern plot"""
71
+ if not self.measurements:
72
+ fig, ax = plt.subplots(figsize=(10, 6))
73
+ ax.text(0.5, 0.5, 'No measurements available',
74
+ ha='center', va='center')
75
+ return fig
76
+
77
+ df = pd.DataFrame([asdict(m) for m in self.measurements])
78
+
79
+ fig, ax = plt.subplots(figsize=(10, 6))
80
+ ax.plot(df['frame_number'], df['jaw_opening'],
81
+ label='Jaw Opening', color='blue')
82
+ ax.plot(df['frame_number'], df['lateral_deviation'],
83
+ label='Lateral Deviation', color='red')
84
+
85
+ ax.set_title('Jaw Movement Patterns')
86
+ ax.set_xlabel('Frame Number')
87
+ ax.set_ylabel('Distance (mm)')
88
+ ax.grid(True)
89
+ ax.legend()
90
+
91
+ return fig
92
+
93
+ def generate_report(self) -> str:
94
+ """Generate assessment report"""
95
+ analysis = self.get_analysis()
96
+
97
+ if not analysis:
98
+ return "No measurements available for report generation."
99
+
100
+ report = f"""
101
+ # Jaw Motion Assessment Report
102
+
103
+ Date: {self.assessment_date.strftime('%Y-%m-%d %H:%M:%S')}
104
+
105
+ ## Measurements
106
+ - Maximum Opening: {analysis.get('max_opening', 0):.1f} mm
107
+ - Average Lateral Deviation: {analysis.get('avg_lateral', 0):.1f} mm
108
+ - Movement Range: {analysis.get('movement_range', 0):.1f} mm
109
+ - Quality Score: {analysis.get('quality_average', 0):.1f}/10
110
+
111
+ ## Movement Analysis
112
+ """
113
+
114
+ for movement, count in analysis.get('movement_counts', {}).items():
115
+ report += f"- {movement}: {count} frames\n"
116
+
117
+ return report
118
+
119
+ def process_video(video_path: str, assessment: JawAssessment) -> Optional[str]:
120
+ """Process video and update assessment with measurements"""
121
+ try:
122
+ if not video_path:
123
+ return None
124
+
125
+ # Initialize MediaPipe Face Mesh
126
+ mp_face_mesh = mp.solutions.face_mesh.FaceMesh(
127
+ static_image_mode=False,
128
+ max_num_faces=1,
129
+ min_detection_confidence=0.5,
130
+ min_tracking_confidence=0.5
131
+ )
132
+
133
+ cap = cv2.VideoCapture(video_path)
134
+ if not cap.isOpened():
135
+ return None
136
+
137
+ # Get video properties
138
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
139
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
140
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
141
+
142
+ # Create output video
143
+ output_path = tempfile.mktemp(suffix='.mp4')
144
+ fourcc = cv2.VideoWriter_fourcc(*'avc1')
145
+ out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
146
+
147
+ frame_count = 0
148
+ while cap.isOpened():
149
+ ret, frame = cap.read()
150
+ if not ret:
151
+ break
152
+
153
+ # Convert BGR to RGB
154
+ rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
155
+ results = mp_face_mesh.process(rgb_frame)
156
+
157
+ if results.multi_face_landmarks:
158
+ landmarks = results.multi_face_landmarks[0].landmark
159
+
160
+ # Get key points
161
+ upper_lip = np.array([landmarks[13].x, landmarks[13].y, landmarks[13].z])
162
+ lower_lip = np.array([landmarks[14].x, landmarks[14].y, landmarks[14].z])
163
+ left_jaw = np.array([landmarks[389].x, landmarks[389].y, landmarks[389].z])
164
+ right_jaw = np.array([landmarks[356].x, landmarks[356].y, landmarks[356].z])
165
+
166
+ # Calculate measurements
167
+ jaw_opening = np.linalg.norm(upper_lip - lower_lip) * height
168
+ lateral_dev = np.linalg.norm(left_jaw - right_jaw) * width
169
+
170
+ # Add to assessment
171
+ assessment.add_measurement(jaw_opening, lateral_dev, frame_count)
172
+
173
+ # Draw landmarks
174
+ h, w = frame.shape[:2]
175
+ for point in [upper_lip, lower_lip, left_jaw, right_jaw]:
176
+ px = tuple(np.multiply(point[:2], [w, h]).astype(int))
177
+ cv2.circle(frame, px, 2, (0, 255, 0), -1)
178
+
179
+ # Add measurements to frame
180
+ cv2.putText(frame, f"Opening: {jaw_opening:.1f}px", (10, 30),
181
+ cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
182
+ cv2.putText(frame, f"Lateral: {lateral_dev:.1f}px", (10, 60),
183
+ cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
184
+
185
+ out.write(frame)
186
+ frame_count += 1
187
+
188
+ # Cleanup
189
+ cap.release()
190
+ out.release()
191
+ mp_face_mesh.close()
192
+
193
+ return output_path
194
+
195
+ except Exception as e:
196
+ print(f"Error processing video: {str(e)}")
197
+ return None
198
+
199
+ def process_assessment(video_path: str, movement: str) -> Tuple[Optional[str], str, plt.Figure]:
200
+ """Main assessment processing function"""
201
+ assessment = JawAssessment()
202
+ assessment.set_movement_type(movement)
203
+
204
+ processed_path = process_video(video_path, assessment)
205
+ report = assessment.generate_report()
206
+ plot = assessment.plot_movements()
207
+
208
+ return processed_path, report, plot
209
+
210
+ # Create Gradio interface
211
+ demo = gr.Interface(
212
+ fn=process_assessment,
213
+ inputs=[
214
+ gr.Video(label="Record Assessment"),
215
+ gr.Radio(
216
+ choices=["baseline", "maximum_opening", "lateral_left",
217
+ "lateral_right", "combined"],
218
+ label="Movement Type",
219
+ value="baseline"
220
+ )
221
+ ],
222
+ outputs=[
223
+ gr.Video(label="Processed Recording"),
224
+ gr.Textbox(label="Analysis Report", lines=10),
225
+ gr.Plot(label="Movement Patterns")
226
+ ],
227
+ title="Jaw Motion Assessment",
228
+ description="Upload a video recording to analyze jaw movements."
229
+ )
230
+
231
+ if __name__ == "__main__":
232
+ demo.launch(share=True)
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio==4.19.2
2
+ mediapipe==0.10.9
3
+ numpy==1.26.4
4
+ opencv-python==4.9.0.80