lirony commited on
Commit
a6bdbe4
·
0 Parent(s):

Initial public commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .dockerignore +6 -0
  2. .gitattributes +38 -0
  3. .gitignore +6 -0
  4. Dockerfile +58 -0
  5. README.md +159 -0
  6. __init__.py +14 -0
  7. app.py +86 -0
  8. auth.py +77 -0
  9. cache.py +67 -0
  10. cache_archive.zip +3 -0
  11. evaluation.py +69 -0
  12. frontend/package-lock.json +0 -0
  13. frontend/package.json +31 -0
  14. frontend/public/assets/ai_headshot.svg +1 -0
  15. frontend/public/assets/alex.avif +0 -0
  16. frontend/public/assets/alex.mp4 +3 -0
  17. frontend/public/assets/alex_300.avif +0 -0
  18. frontend/public/assets/alex_fhir.json +246 -0
  19. frontend/public/assets/gemini.avif +0 -0
  20. frontend/public/assets/jason_fhir.json +167 -0
  21. frontend/public/assets/jordan.avif +0 -0
  22. frontend/public/assets/jordan.mp4 +3 -0
  23. frontend/public/assets/jordan_300.avif +0 -0
  24. frontend/public/assets/medgemma.avif +0 -0
  25. frontend/public/assets/patients_and_conditions.json +46 -0
  26. frontend/public/assets/sacha.avif +0 -0
  27. frontend/public/assets/sacha.mp4 +3 -0
  28. frontend/public/assets/sacha_150.avif +0 -0
  29. frontend/public/assets/sacha_fhir.json +266 -0
  30. frontend/public/assets/welcome_bottom_graphics.svg +1 -0
  31. frontend/public/assets/welcome_graphics.svg +1 -0
  32. frontend/public/assets/welcome_top_graphics.svg +1 -0
  33. frontend/public/index.html +29 -0
  34. frontend/src/App.js +88 -0
  35. frontend/src/components/DetailsPopup/DetailsPopup.css +46 -0
  36. frontend/src/components/DetailsPopup/DetailsPopup.js +76 -0
  37. frontend/src/components/Interview/Interview.css +282 -0
  38. frontend/src/components/Interview/Interview.js +491 -0
  39. frontend/src/components/PatientBuilder/PatientBuilder.css +205 -0
  40. frontend/src/components/PatientBuilder/PatientBuilder.js +235 -0
  41. frontend/src/components/PreloadImages.js +42 -0
  42. frontend/src/components/RolePlayDialogs/RolePlayDialogs.css +101 -0
  43. frontend/src/components/RolePlayDialogs/RolePlayDialogs.js +103 -0
  44. frontend/src/components/WelcomePage/WelcomePage.css +144 -0
  45. frontend/src/components/WelcomePage/WelcomePage.js +62 -0
  46. frontend/src/index.js +23 -0
  47. frontend/src/shared/Style.css +205 -0
  48. gemini.py +59 -0
  49. gemini_tts.py +212 -0
  50. interview_simulator.py +385 -0
.dockerignore ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ frontend/build
2
+ frontend/node_modules
3
+ env.list
4
+ __pycache__
5
+ **/__pycache__
6
+ .*
.gitattributes ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ frontend/public/assets/jordan.mp4 filter=lfs diff=lfs merge=lfs -text
37
+ frontend/public/assets/sacha.mp4 filter=lfs diff=lfs merge=lfs -text
38
+ frontend/public/assets/alex.mp4 filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ env.list
2
+ __pycache__
3
+ **/__pycache__
4
+ frontend/node_modules
5
+ frontend/build
6
+ .git
Dockerfile ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Build React app
16
+ FROM node:24-slim AS frontend-build
17
+ WORKDIR /app/frontend
18
+ # Upgrade npm to the desired version
19
+ RUN npm install -g npm@11.4.x
20
+ COPY frontend/ ./
21
+ RUN npm install
22
+ RUN npm run build
23
+
24
+ # Python backend
25
+ FROM python:3.10-slim
26
+ WORKDIR /app
27
+
28
+ # Install ffmpeg for audio conversion
29
+ RUN apt-get update && apt-get install -y ffmpeg && rm -rf /var/lib/apt/lists/*
30
+
31
+ # Copy requirements.txt first for better caching
32
+ COPY requirements.txt ./
33
+ RUN pip install --no-cache-dir -r requirements.txt
34
+
35
+ # Copy Flask app
36
+ COPY *.py ./
37
+ COPY symptoms.json ./
38
+ COPY report_template.txt ./
39
+
40
+ # Copy built React app
41
+ COPY --from=frontend-build /app/frontend/build ./frontend/build
42
+ ENV FRONTEND_BUILD=/app/frontend/build
43
+
44
+ # Create cache directory and set permissions, then assign the env variable
45
+ RUN mkdir -p /cache && chmod 777 /cache
46
+ ENV CACHE_DIR=/cache
47
+
48
+ # If cache.zip exists, extract it into /cache
49
+ COPY cache* /tmp/
50
+ RUN if [ -f /tmp/cache_archive.zip ]; then \
51
+ apt-get update && apt-get install -y unzip && \
52
+ unzip /tmp/cache_archive.zip -d /cache && \
53
+ rm /tmp/cache_archive.zip && \
54
+ chmod -R 777 /cache; \
55
+ fi
56
+
57
+ EXPOSE 7860
58
+ CMD ["gunicorn", "-b", "0.0.0.0:7860", "app:app", "--threads", "4", "--timeout", "300"]
README.md ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Appoint Ready - MedGemma Demo
3
+ emoji: 📋
4
+ colorFrom: blue
5
+ colorTo: gray
6
+ sdk: docker
7
+ models:
8
+ - google/medgemma-27b-text-it
9
+ pinned: false
10
+ license: apache-2.0
11
+ short_description: 'Simulated Pre-visit Intake Demo built using MedGemma Demo'
12
+ ---
13
+
14
+ ## Table of Contents
15
+ - [Demo Description](#demo-description)
16
+ - [Technical Architecture](#technical-architecture)
17
+ - [Running the Demo Locally](#running-the-demo-locally)
18
+ - [Models used](#models-used)
19
+ - [Caching](#caching)
20
+ - [Disclaimer](#disclaimer)
21
+ - [Other Models and Demos](#other-models-and-demos)
22
+
23
+ # AppointReady: Simulated Pre-visit Intake Demo built using MedGemma
24
+
25
+ Healthcare providers often seek efficient ways to gather comprehensive patient information before appointments. This demo illustrates how MedGemma could be used in an application to streamline pre-visit information collection and utilization.
26
+
27
+ The demonstration first asks questions to gather pre-visit information.
28
+ After it has identified and collected relevant information, the demo application generates a pre-visit report using both collected and health record information (stored as FHIR resources for this demonstration). This type of intelligent pre-visit report can help providers be more efficient and effective while also providing an improved experience for patients compared to traditional intake forms.
29
+
30
+ At the conclusion of the demo, you can view an evaluation of the pre-visit report which provides additional insights into the quality of the demonstrated capabilities. For this evaluation, MedGemma is provided the patient's reference diagnosis, allowing MedGemma to create a self-evaluation report highlighting strengths as well as areas for improvement.
31
+
32
+ ## Technical Architecture
33
+ This application is composed of several key components:
34
+
35
+ * **Frontend**: A web interface built with React that provides the user interface for the chat and report visualization.
36
+ * **Backend**: A Python server built with Gunicorn/Flask that handles the application logic. It communicates with the LLMs, manages the conversation flow, and generates the final pre-visit report.
37
+ * **API called**:
38
+ * **MedGemma**: Acts as the clinical assistant, asking relevant questions and summarizing information.
39
+ * **Gemini**: Role-plays as the patient, providing responses based on a predefined scenario.
40
+ * **Gemini TTS**: Generative text-to-speech model.
41
+ * **Deployment**: The entire application is containerized using Docker for easy deployment and scalability.
42
+
43
+ A high-level overview of the architecture:
44
+
45
+ [![](https://mermaid.ink/img/pako:eNqFk81u00AQx19ltVIlkJzIdpM69i1NShpEpQqnHMAcFnvTrGqvrfW6DUS5cUTiS-IKByRegQPileARmN1N7DgE4Yt3_vObnfHMeIXjPKE4wNeCFAs0G0ccwXN0hMZ0zjhFE5FXRWnUsnphsAhflVSgKZdUkFiynEfYIOpRvmcR_v35_Vd9jvDzxvlA5BDFEw28eVfb6N6pyO-Avl_joEb8r8ynJL5RfEjFLYtpuZt5csVZnAsOl2-P7eQpKW_Aqd9oWBQpi4kpf4fSn3XL6F3IsirVfl3tty-NCzW-VuyIxAuq6Q8_0SUVJSvh66TRW-TZ-eMxkUSzH18rEym7xYQSMsTDsqSy1OCnH7--v0UXNGHEQuezi0cWehj-p2FnS6iZkxQNpweb9oQKSZfDKSQwRwVCignNsnY5oDDOhpcKNWcExgFkNgsbBIwWtlNls2ajnHOqN2mza3rBOp1OvSBGrtdFueoZ893hmyg14U2IHrYSDwx2H9lMZV_eHYTxHbhLk2bQvN4FLW5bvKd3mo7-wwO9wxb8myzBgRQVtXBGRUaUiVcqJsJyQTPYrACOCZ2TKpVquGsIKwh_mufZNhJ-5OsFDuYkLcGqioRIOmYE1iSrVQGtpWKUV1zioNf39SU4WOElDhzH7_quNzhxB57XcwZuz8IvQe47XdexvZ5r9zzH953-2sKvdF67Ozju2_aJ43gD1_aOfW_9ByuAZrY?type=png)](https://mermaid.live/edit#pako:eNqFk81u00AQx19ltVIlkJzIdpM69i1NShpEpQqnHMAcFnvTrGqvrfW6DUS5cUTiS-IKByRegQPileARmN1N7DgE4Yt3_vObnfHMeIXjPKE4wNeCFAs0G0ccwXN0hMZ0zjhFE5FXRWnUsnphsAhflVSgKZdUkFiynEfYIOpRvmcR_v35_Vd9jvDzxvlA5BDFEw28eVfb6N6pyO-Avl_joEb8r8ynJL5RfEjFLYtpuZt5csVZnAsOl2-P7eQpKW_Aqd9oWBQpi4kpf4fSn3XL6F3IsirVfl3tty-NCzW-VuyIxAuq6Q8_0SUVJSvh66TRW-TZ-eMxkUSzH18rEym7xYQSMsTDsqSy1OCnH7--v0UXNGHEQuezi0cWehj-p2FnS6iZkxQNpweb9oQKSZfDKSQwRwVCignNsnY5oDDOhpcKNWcExgFkNgsbBIwWtlNls2ajnHOqN2mza3rBOp1OvSBGrtdFueoZ893hmyg14U2IHrYSDwx2H9lMZV_eHYTxHbhLk2bQvN4FLW5bvKd3mo7-wwO9wxb8myzBgRQVtXBGRUaUiVcqJsJyQTPYrACOCZ2TKpVquGsIKwh_mufZNhJ-5OsFDuYkLcGqioRIOmYE1iSrVQGtpWKUV1zioNf39SU4WOElDhzH7_quNzhxB57XcwZuz8IvQe47XdexvZ5r9zzH953-2sKvdF67Ozju2_aJ43gD1_aOfW_9ByuAZrY)
46
+
47
+ <!--```mermaid
48
+ graph TD
49
+ %% Define Groups
50
+ subgraph "User Interaction"
51
+ User["👤 User"]
52
+ Frontend["🌐 Frontend (Browser)"]
53
+ end
54
+
55
+ subgraph "Backend Services"
56
+ GUnicorn["GUnicorn"]
57
+ Flask["Flask Application"]
58
+ InterviewSimulation["🧠 Interview Simulation"]
59
+ Cache["💾 Persistent Cache"]
60
+ EHRData["📄 EHR Data"]
61
+ StaticAssets["🖼️ Media, HTML, JS"]
62
+ end
63
+
64
+ subgraph "External AI Services"
65
+ VertexAI["Vertex AI MedGemma"]
66
+ GeminiAPI["Gemini API"]
67
+ GeminiTTS["Gemini TTS API"]
68
+ end
69
+
70
+ %% Define Connections
71
+ User --- Frontend
72
+ Frontend --- GUnicorn
73
+ GUnicorn --- Flask
74
+ Flask --- InterviewSimulation
75
+ Flask --- EHRData
76
+ Flask --- StaticAssets
77
+ InterviewSimulation --- Cache
78
+ Cache --- VertexAI
79
+ Cache --- GeminiAPI
80
+ Cache --- GeminiTTS
81
+ ```-->
82
+
83
+ ## Running the Demo Locally
84
+
85
+ To run this demo on your own machine, you'll need to have Docker installed.
86
+
87
+ ### Prerequisites
88
+ * Docker
89
+ * Git
90
+ * A Google Cloud project with the Vertex AI API enabled.
91
+
92
+ ### Setup & Configuration
93
+ 1. **Clone the repository:**
94
+ ```bash
95
+ git clone https://huggingface.co/spaces/google/appoint-ready
96
+ cd appoint-ready
97
+ ```
98
+
99
+ 2. **Configure environment variables:**
100
+ This project uses an `env.list` file for configuration, which is passed to Docker. Create this file in the root directory.
101
+ ```ini
102
+ # env.list
103
+ GEMINI_API_KEY="your-gemini-token"
104
+ GENERATE_SPEECH=false or true
105
+ GCP_MEDGEMMA_ENDPOINT=medgemma vertex ai endpoint
106
+ GCP_MEDGEMMA_SERVICE_ACCOUNT_KEY="service-account-key json"
107
+ ```
108
+
109
+ GEMINI_API_KEY: Key can be generated via [aistudio](https://aistudio.google.com/apikey).
110
+ GENERATE_SPEECH: Should the demo generate speech not found in cache. Default is false.
111
+ GCP_MEDGEMMA_ENDPOINT: Deploy MedGemma via [Model Garden](https://console.cloud.google.com/vertex-ai/publishers/google/model-garden/medgemma).
112
+
113
+ ### Execution
114
+ 1. **Build and run the Docker containers:**
115
+ ```bash
116
+ run_local.sh
117
+ ```
118
+
119
+ 2. **Access the application:**
120
+ Once the containers are running, you can access the demo in your web browser at `http://localhost:[PORT]`. (e.g., `http://localhost:7860`).
121
+
122
+ # Models used
123
+ This demo uses four models:
124
+
125
+ * MedGemma 27b-text-it: https://huggingface.co/google/medgemma-27b-text-it \
126
+ For this demo MedGemma-27b was deployed via Model Garden (https://console.cloud.google.com/vertex-ai/publishers/google/model-garden/medgemma).
127
+ * Gemini: https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini/2-5-flash \
128
+ We use Gemini to role play the patient while MedGemma plays the clinical assistant.
129
+ * Gemini TTS: https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview-tts \
130
+ As an alternative consider using Clould Text-to-Speech: https://cloud.google.com/text-to-speech
131
+ * Veo 3: Was used to generate patient avatar animation https://gemini.google/overview/video-generation
132
+
133
+
134
+
135
+ ## Caching
136
+ This demo is functional, and results are persistently cached to reduce environmental impact.
137
+
138
+ ## Disclaimer
139
+ This demonstration is for illustrative purposes only and does not represent a finished or approved
140
+ product. It is not representative of compliance to any regulations or standards for
141
+ quality, safety or efficacy. Any real-world application would require additional development,
142
+ training, and adaptation. The experience highlighted in this demo shows MedGemma's baseline
143
+ capability for the displayed task and is intended to help developers and users explore possible
144
+ applications and inspire further development.
145
+
146
+ This is not an officially supported Google product. This project is not
147
+ eligible for the [Google Open Source Software Vulnerability Rewards
148
+ Program](https://bughunters.google.com/open-source-security).
149
+
150
+ # Other Models and Demos
151
+ See other demos here: https://huggingface.co/collections/google/hai-def-concept-apps-6837acfccce400abe6ec26c1
152
+
153
+ MedGemma is finetunable - see colab here: https://github.com/Google-Health/medgemma/blob/main/notebooks/fine_tune_with_hugging_face.ipynb
154
+
155
+ # Contacts
156
+
157
+ * This demo is part of Google's [Health AI Developer Foundations (HAI-DEF)](https://developers.google.com/health-ai-developer-foundations?referral=appoint-ready)
158
+ * Technical info - contact [@lirony](https://huggingface.co/lirony)
159
+ * Press only: press@google.com
__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
app.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from evaluation import evaluate_report, evaluation_prompt
16
+ from flask import Flask, send_from_directory, request, jsonify, Response, stream_with_context, send_file
17
+ from flask_cors import CORS
18
+ import os, time, json, re
19
+ from gemini import gemini_get_text_response
20
+ from interview_simulator import stream_interview
21
+ from cache import create_cache_zip
22
+ from medgemma import medgemma_get_text_response
23
+
24
+ app = Flask(__name__, static_folder=os.environ.get("FRONTEND_BUILD", "frontend/build"), static_url_path="/")
25
+ CORS(app, resources={r"/api/*": {"origins": "http://localhost:3000"}})
26
+
27
+ @app.route("/")
28
+ def serve():
29
+ """Serves the main index.html file."""
30
+ return send_from_directory(app.static_folder, "index.html")
31
+
32
+
33
+ @app.route("/api/stream_conversation", methods=["GET"])
34
+ def stream_conversation():
35
+ """Streams the conversation with the interview simulator."""
36
+ patient = request.args.get("patient", "Patient")
37
+ condition = request.args.get("condition", "unknown condition")
38
+
39
+ def generate():
40
+ try:
41
+ for message in stream_interview(patient, condition):
42
+ yield f"data: {message}\n\n"
43
+ except Exception as e:
44
+ yield f"data: Error: {str(e)}\n\n"
45
+ raise e
46
+
47
+ return Response(stream_with_context(generate()), mimetype="text/event-stream")
48
+
49
+ @app.route("/api/evaluate_report", methods=["POST"])
50
+ def evaluate_report_call():
51
+ """Evaluates the provided medical report."""
52
+ data = request.get_json()
53
+ report = data.get("report", "")
54
+ if not report:
55
+ return jsonify({"error": "Report is required"}), 400
56
+ condition = data.get("condition", "")
57
+ if not condition:
58
+ return jsonify({"error": "Condition is required"}), 400
59
+
60
+ evaluation_text = evaluate_report(report, condition)
61
+
62
+ return jsonify({"evaluation": evaluation_text})
63
+
64
+
65
+ @app.route("/api/download_cache")
66
+ def download_cache_zip():
67
+ """Creates a zip file of the cache and returns it for download."""
68
+ zip_filepath, error = create_cache_zip()
69
+ if error:
70
+ return jsonify({"error": error}), 500
71
+ if not os.path.isfile(zip_filepath):
72
+ return jsonify({"error": f"File not found: {zip_filepath}"}), 404
73
+ return send_file(zip_filepath, as_attachment=True)
74
+
75
+
76
+ @app.route("/<path:path>")
77
+ def static_proxy(path):
78
+ """Serves static files and defaults to index.html for unknown paths."""
79
+ file_path = os.path.join(app.static_folder, path)
80
+ if os.path.isfile(file_path):
81
+ return send_from_directory(app.static_folder, path)
82
+ else:
83
+ return send_from_directory(app.static_folder, "index.html")
84
+
85
+ if __name__ == "__main__":
86
+ app.run(host="0.0.0.0", port=7860, threaded=True)
auth.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+ import datetime
17
+ from google.oauth2 import service_account
18
+ import google.auth.transport.requests
19
+
20
+ def create_credentials(secret_key_json) -> service_account.Credentials:
21
+ """Creates Google Cloud credentials from the provided service account key.
22
+
23
+ Returns:
24
+ service_account.Credentials: The created credentials object.
25
+
26
+ Raises:
27
+ ValueError: If the environment variable is not set or is empty, or if the
28
+ JSON format is invalid.
29
+ """
30
+
31
+ if not secret_key_json:
32
+ raise ValueError("Userdata variable 'GCP_MEDGEMMA_SERVICE_ACCOUNT_KEY' is not set or is empty.")
33
+ try:
34
+ service_account_info = json.loads(secret_key_json)
35
+ except (SyntaxError, ValueError) as e:
36
+ raise ValueError("Invalid service account key JSON format.") from e
37
+ return service_account.Credentials.from_service_account_info(
38
+ service_account_info,
39
+ scopes=['https://www.googleapis.com/auth/cloud-platform']
40
+ )
41
+
42
+ def refresh_credentials(credentials: service_account.Credentials) -> service_account.Credentials:
43
+ """Refreshes the provided Google Cloud credentials if they are about to expire
44
+ (within 5 minutes) or if they don't have an expiry time set.
45
+
46
+ Args:
47
+ credentials: The credentials object to refresh.
48
+
49
+ Returns:
50
+ service_account.Credentials: The refreshed credentials object.
51
+ """
52
+ if credentials.expiry:
53
+ expiry_time = credentials.expiry.replace(tzinfo=datetime.timezone.utc)
54
+ # Calculate the time remaining until expiration
55
+ time_remaining = expiry_time - datetime.datetime.now(datetime.timezone.utc)
56
+ # Check if the token is about to expire (e.g., within 5 minutes)
57
+ if time_remaining < datetime.timedelta(minutes=5):
58
+ request = google.auth.transport.requests.Request()
59
+ credentials.refresh(request)
60
+ else:
61
+ # If no expiry is set, always attempt to refresh (e.g., for certain credential types)
62
+ request = google.auth.transport.requests.Request()
63
+ credentials.refresh(request)
64
+ return credentials
65
+
66
+ def get_access_token_refresh_if_needed(credentials: service_account.Credentials) -> str:
67
+ """Gets the access token from the credentials, refreshing them if needed.
68
+
69
+ Args:
70
+ credentials: The credentials object.
71
+
72
+ Returns:
73
+ str: The access token.
74
+ """
75
+ credentials = refresh_credentials(credentials)
76
+ return credentials.token
77
+
cache.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from diskcache import Cache
16
+ import os
17
+ import shutil
18
+ import tempfile
19
+ import zipfile
20
+ import logging
21
+
22
+ cache = Cache(os.environ.get("CACHE_DIR", "/cache"))
23
+ # Print cache statistics after loading
24
+ try:
25
+ item_count = len(cache)
26
+ size_bytes = cache.volume()
27
+ print(f"Cache loaded: {item_count} items, approx {size_bytes} bytes")
28
+ except Exception as e:
29
+ print(f"Could not retrieve cache statistics: {e}")
30
+
31
+ def create_cache_zip():
32
+ temp_dir = tempfile.gettempdir()
33
+ base_name = os.path.join(temp_dir, "cache_archive") # A more descriptive name
34
+ archive_path = base_name + ".zip"
35
+ cache_directory = os.environ.get("CACHE_DIR", "/cache")
36
+
37
+ if not os.path.isdir(cache_directory):
38
+ logging.error(f"Cache directory not found at {cache_directory}")
39
+ return None, f"Cache directory not found on server: {cache_directory}"
40
+
41
+ logging.info("Forcing a cache checkpoint for safe backup...")
42
+ try:
43
+ # Open and immediately close a connection.
44
+ # This forces SQLite to perform a checkpoint, merging the .wal file
45
+ # into the main .db file, ensuring the on-disk files are consistent.
46
+ with Cache(cache_directory) as temp_cache:
47
+ temp_cache.close()
48
+
49
+ # Clean up temporary files before archiving.
50
+ tmp_path = os.path.join(cache_directory, 'tmp')
51
+ if os.path.isdir(tmp_path):
52
+ logging.info(f"Removing temporary cache directory: {tmp_path}")
53
+ shutil.rmtree(tmp_path)
54
+
55
+ logging.info(f"Checkpoint complete. Creating zip archive of {cache_directory} to {archive_path}")
56
+ with zipfile.ZipFile(archive_path, 'w', zipfile.ZIP_DEFLATED, compresslevel=9) as zipf:
57
+ for root, _, files in os.walk(cache_directory):
58
+ for file in files:
59
+ file_path = os.path.join(root, file)
60
+ arcname = os.path.relpath(file_path, cache_directory)
61
+ zipf.write(file_path, arcname)
62
+ logging.info("Zip archive created successfully.")
63
+ return archive_path, None
64
+
65
+ except Exception as e:
66
+ logging.error(f"Error creating zip archive of cache directory: {e}", exc_info=True)
67
+ return None, f"Error creating zip archive: {e}"
cache_archive.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c4735cc77e6df31539abaa76ef2389252ab6057d875bba8a30b09fdaa5f84e0
3
+ size 6150757
evaluation.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import re
16
+ from medgemma import medgemma_get_text_response
17
+
18
+
19
+ def evaluation_prompt(defacto_condition):
20
+ # Returns a detailed prompt for the LLM to evaluate a pre-visit report for a specific condition
21
+ return f"""
22
+ Your role is to evaluate the helpfulness of a pre-visit report, which is based on a pre-visit patient interview and existing health records.
23
+ The patient was de facto diagnosed condition: "{defacto_condition}" which was not known at the time of the interview.
24
+
25
+ List the specific elements in the previsit report text that are helpful or necessary for the PCP to diagnose the de facto diagnosed condition: "{defacto_condition}".
26
+
27
+ This include pertinet positives or negatives.
28
+ List critical elements that are MISSING from the previsit report text that would have been helpful for the PCP to diagnose the de facto diagnosed condition.
29
+ This include pertinet positives or negatives that were missing from the report.
30
+ (keep in mind that the condition "{defacto_condition}" was not known at the time)
31
+
32
+ The evaluation output should be in HTML format.
33
+
34
+ REPORT TEMPLATE START
35
+
36
+ <h3 class="helpful">Helpful Facts:</h3>
37
+
38
+ <h3 class="missing">What wasn't covered but would be helpful:</h3>
39
+
40
+ REPORT TEMPLATE END
41
+ """
42
+
43
+ def evaluate_report(report, condition):
44
+ """Evaluate the pre-visit report based on the condition using MedGemma LLM."""
45
+ evaluation_text = medgemma_get_text_response([
46
+ {
47
+ "role": "system",
48
+ "content": [
49
+ {
50
+ "type": "text",
51
+ "text": f"{evaluation_prompt(condition)}"
52
+ }
53
+ ]
54
+ },
55
+ {
56
+ "role": "user",
57
+ "content": [
58
+ {
59
+ "type": "text",
60
+ "text": f"Here is the report text:\n{report}"
61
+ }
62
+ ]
63
+ },
64
+ ])
65
+
66
+ # Remove any LLM "thinking" blocks (special tokens sometimes present in output)
67
+ evaluation_text = re.sub(r'<unused94>.*?<unused95>', '', evaluation_text, flags=re.DOTALL)
68
+
69
+ return evaluation_text
frontend/package-lock.json ADDED
The diff for this file is too large to render. See raw diff
 
frontend/package.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "frontend",
3
+ "version": "0.1.0",
4
+ "private": true,
5
+ "dependencies": {
6
+ "diff": "^8.0.2",
7
+ "html-react-parser": "^5.2.5",
8
+ "marked": "^15.0.12",
9
+ "react": "^18.2.0",
10
+ "react-dom": "^18.2.0",
11
+ "react-scripts": "^5.0.1",
12
+ "@textea/json-viewer": "^3.2.1",
13
+ "@mui/material": "^5.15.20"
14
+ },
15
+ "scripts": {
16
+ "start": "cross-env NODE_OPTIONS=--openssl-legacy-provider react-scripts start",
17
+ "build": "react-scripts build"
18
+ },
19
+ "browserslist": {
20
+ "production": [
21
+ ">0.2%",
22
+ "not dead",
23
+ "not op_mini all"
24
+ ],
25
+ "development": [
26
+ "last 1 chrome version",
27
+ "last 1 firefox version",
28
+ "last 1 safari version"
29
+ ]
30
+ }
31
+ }
frontend/public/assets/ai_headshot.svg ADDED
frontend/public/assets/alex.avif ADDED
frontend/public/assets/alex.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae46c52980e7d7a176245a8f42e90c7386af502e2efed87721937a0e3c9e53ae
3
+ size 1122871
frontend/public/assets/alex_300.avif ADDED
frontend/public/assets/alex_fhir.json ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "resourceType": "Patient",
4
+ "id": "alex-sharma-63-female",
5
+ "meta": {
6
+ "profile": [
7
+ "http://hl7.org/fhir/R4/StructureDefinition/Patient"
8
+ ]
9
+ },
10
+ "text": {
11
+ "status": "generated",
12
+ "div": "<div xmlns=\"http://www.w3.org/1999/xhtml\"><p><b>Alex Sharma</b> (Female, 63)</p><p>Known Conditions: Diabetes</p></div>"
13
+ },
14
+ "identifier": [
15
+ {
16
+ "use": "usual",
17
+ "type": {
18
+ "coding": [
19
+ {
20
+ "system": "http://terminology.hl7.org/CodeSystem/v2-0203",
21
+ "code": "MR",
22
+ "display": "Medical record number"
23
+ }
24
+ ]
25
+ },
26
+ "system": "http://example.org/patients",
27
+ "value": "PAT-2023-001"
28
+ }
29
+ ],
30
+ "name": [
31
+ {
32
+ "use": "official",
33
+ "family": "Sharma",
34
+ "given": [
35
+ "Alex"
36
+ ]
37
+ }
38
+ ],
39
+ "gender": "female",
40
+ "birthDate": "1962-01-15",
41
+ "deceasedBoolean": false
42
+ },
43
+ {
44
+ "resourceType": "Encounter",
45
+ "id": "encounter-diabetes-followup",
46
+ "meta": {
47
+ "profile": [
48
+ "http://hl7.org/fhir/R4/StructureDefinition/Encounter"
49
+ ]
50
+ },
51
+ "text": {
52
+ "status": "generated",
53
+ "div": "<div xmlns=\"http://www.w3.org/1999/xhtml\"><p><b>Diabetes Follow-up Visit</b> for Alex Sharma on 2024-03-10</p></div>"
54
+ },
55
+ "status": "finished",
56
+ "class": {
57
+ "system": "http://terminology.hl7.org/CodeSystem/v3-ActCode",
58
+ "code": "AMB",
59
+ "display": "Ambulatory"
60
+ },
61
+ "type": [
62
+ {
63
+ "coding": [
64
+ {
65
+ "system": "http://terminology.hl7.org/CodeSystem/v3-ActCode",
66
+ "code": "FLD",
67
+ "display": "Field"
68
+ }
69
+ ],
70
+ "text": "Follow-up visit for Diabetes"
71
+ }
72
+ ],
73
+ "subject": {
74
+ "reference": "Patient/alex-sharma-63-female",
75
+ "display": "Alex Sharma"
76
+ },
77
+ "period": {
78
+ "start": "2024-03-10T10:00:00Z",
79
+ "end": "2024-03-10T10:45:00Z"
80
+ },
81
+ "serviceProvider": {
82
+ "reference": "Organization/example-org",
83
+ "display": "Example Medical Center"
84
+ }
85
+ },
86
+ {
87
+ "resourceType": "Condition",
88
+ "id": "condition-diabetes-mellitus",
89
+ "meta": {
90
+ "profile": [
91
+ "http://hl7.org/fhir/R4/StructureDefinition/Condition"
92
+ ]
93
+ },
94
+ "text": {
95
+ "status": "generated",
96
+ "div": "<div xmlns=\"http://www.w3.org/1999/xhtml\"><p><b>Diabetes Mellitus, Type 2</b> for Alex Sharma, diagnosed 2020-05-20</p></div>"
97
+ },
98
+ "clinicalStatus": {
99
+ "coding": [
100
+ {
101
+ "system": "http://terminology.hl7.org/CodeSystem/condition-clinical",
102
+ "code": "active",
103
+ "display": "Active"
104
+ }
105
+ ]
106
+ },
107
+ "verificationStatus": {
108
+ "coding": [
109
+ {
110
+ "system": "http://terminology.hl7.org/CodeSystem/condition-ver-status",
111
+ "code": "confirmed",
112
+ "display": "Confirmed"
113
+ }
114
+ ]
115
+ },
116
+ "category": [
117
+ {
118
+ "coding": [
119
+ {
120
+ "system": "http://terminology.hl7.org/CodeSystem/condition-category",
121
+ "code": "problem-list-item",
122
+ "display": "Problem List Item"
123
+ }
124
+ ]
125
+ }
126
+ ],
127
+ "severity": {
128
+ "coding": [
129
+ {
130
+ "system": "http://terminology.hl7.org/CodeSystem/condition-severity",
131
+ "code": "24484000",
132
+ "display": "Moderate"
133
+ }
134
+ ]
135
+ },
136
+ "code": {
137
+ "coding": [
138
+ {
139
+ "system": "http://snomed.info/sct",
140
+ "code": "44054006",
141
+ "display": "Diabetes mellitus"
142
+ }
143
+ ],
144
+ "text": "Diabetes Mellitus, Type 2"
145
+ },
146
+ "subject": {
147
+ "reference": "Patient/alex-sharma-63-female",
148
+ "display": "Alex Sharma"
149
+ },
150
+ "onsetDateTime": "2020-05-20"
151
+ },
152
+ {
153
+ "resourceType": "MedicationRequest",
154
+ "id": "medicationrequest-metformin",
155
+ "meta": {
156
+ "profile": [
157
+ "http://hl7.org/fhir/R4/StructureDefinition/MedicationRequest"
158
+ ]
159
+ },
160
+ "text": {
161
+ "status": "generated",
162
+ "div": "<div xmlns=\"http://www.w3.org/1999/xhtml\"><p><b>Metformin 500mg</b> for Alex Sharma, 1 tablet by mouth twice daily, ongoing</p></div>"
163
+ },
164
+ "status": "active",
165
+ "intent": "order",
166
+ "medicationCodeableConcept": {
167
+ "coding": [
168
+ {
169
+ "system": "http://www.nlm.nih.gov/research/umls/rxnorm",
170
+ "code": "6809",
171
+ "display": "Metformin"
172
+ }
173
+ ],
174
+ "text": "Metformin 500mg Tablet"
175
+ },
176
+ "subject": {
177
+ "reference": "Patient/alex-sharma-63-female",
178
+ "display": "Alex Sharma"
179
+ },
180
+ "encounter": {
181
+ "reference": "Encounter/encounter-diabetes-followup",
182
+ "display": "Diabetes Follow-up Visit"
183
+ },
184
+ "authoredOn": "2024-03-10T10:30:00Z",
185
+ "requester": {
186
+ "reference": "Practitioner/dr-smith",
187
+ "display": "Dr. Jane Smith"
188
+ },
189
+ "dosageInstruction": [
190
+ {
191
+ "sequence": 1,
192
+ "text": "One tablet by mouth twice daily",
193
+ "timing": {
194
+ "repeat": {
195
+ "frequency": 2,
196
+ "period": 1,
197
+ "periodUnit": "d"
198
+ }
199
+ },
200
+ "route": {
201
+ "coding": [
202
+ {
203
+ "system": "http://terminology.hl7.org/CodeSystem/v3-RouteOfAdministration",
204
+ "code": "PO",
205
+ "display": "Oral"
206
+ }
207
+ ]
208
+ },
209
+ "doseAndRate": [
210
+ {
211
+ "type": {
212
+ "coding": [
213
+ {
214
+ "system": "http://terminology.hl7.org/CodeSystem/dose-rate-type",
215
+ "code": "ordered",
216
+ "display": "Ordered"
217
+ }
218
+ ]
219
+ },
220
+ "doseQuantity": {
221
+ "value": 1,
222
+ "unit": "tablet",
223
+ "system": "http://unitsofmeasure.org",
224
+ "code": "{tablet}"
225
+ }
226
+ }
227
+ ]
228
+ }
229
+ ],
230
+ "dispenseRequest": {
231
+ "numberOfRepeatsAllowed": 3,
232
+ "quantity": {
233
+ "value": 60,
234
+ "unit": "tablet",
235
+ "system": "http://unitsofmeasure.org",
236
+ "code": "{tablet}"
237
+ },
238
+ "expectedSupplyDuration": {
239
+ "value": 30,
240
+ "unit": "days",
241
+ "system": "http://unitsofmeasure.org",
242
+ "code": "d"
243
+ }
244
+ }
245
+ }
246
+ ]
frontend/public/assets/gemini.avif ADDED
frontend/public/assets/jason_fhir.json ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "resourceType": "Bundle",
3
+ "id": "Jordon-Dubois-Depression-Prozac-Encounter",
4
+ "type": "collection",
5
+ "entry": [
6
+ {
7
+ "fullUrl": "urn:uuid:patient-jordon-dubois",
8
+ "resource": {
9
+ "resourceType": "Patient",
10
+ "id": "jordon-dubois",
11
+ "name": [
12
+ {
13
+ "given": ["Jordon"],
14
+ "family": "Dubois"
15
+ }
16
+ ],
17
+ "gender": "male",
18
+ "birthDate": "1990-06-11"
19
+ }
20
+ },
21
+ {
22
+ "fullUrl": "urn:uuid:condition-depression",
23
+ "resource": {
24
+ "resourceType": "Condition",
25
+ "id": "depression-jordon-dubois",
26
+ "subject": {
27
+ "reference": "urn:uuid:patient-jordon-dubois"
28
+ },
29
+ "code": {
30
+ "coding": [
31
+ {
32
+ "system": "http://snomed.info/sct",
33
+ "code": "366053000",
34
+ "display": "Depressive disorder"
35
+ }
36
+ ],
37
+ "text": "Depression"
38
+ },
39
+ "clinicalStatus": {
40
+ "coding": [
41
+ {
42
+ "system": "http://terminology.hl7.org/CodeSystem/condition-clinical",
43
+ "code": "active",
44
+ "display": "Active"
45
+ }
46
+ ]
47
+ },
48
+ "verificationStatus": {
49
+ "coding": [
50
+ {
51
+ "system": "http://terminology.hl7.org/CodeSystem/condition-ver-status",
52
+ "code": "confirmed",
53
+ "display": "Confirmed"
54
+ }
55
+ ]
56
+ },
57
+ "recordedDate": "2024-01-15T10:00:00Z"
58
+ }
59
+ },
60
+ {
61
+ "fullUrl": "urn:uuid:medication-prozac",
62
+ "resource": {
63
+ "resourceType": "Medication",
64
+ "id": "prozac",
65
+ "code": {
66
+ "coding": [
67
+ {
68
+ "system": "http://www.nlm.nih.gov/research/umls/rxnorm",
69
+ "code": "100371",
70
+ "display": "Fluoxetine"
71
+ }
72
+ ],
73
+ "text": "Prozac"
74
+ }
75
+ }
76
+ },
77
+ {
78
+ "fullUrl": "urn:uuid:medicationrequest-prozac",
79
+ "resource": {
80
+ "resourceType": "MedicationRequest",
81
+ "id": "prozac-request-jordon-dubois",
82
+ "subject": {
83
+ "reference": "urn:uuid:patient-jordon-dubois"
84
+ },
85
+ "medicationReference": {
86
+ "reference": "urn:uuid:medication-prozac"
87
+ },
88
+ "requester": {
89
+ "display": "Dr. Smith"
90
+ },
91
+ "authoredOn": "2024-01-15T11:00:00Z",
92
+ "status": "active",
93
+ "intent": "order",
94
+ "dosageInstruction": [
95
+ {
96
+ "text": "20mg daily",
97
+ "timing": {
98
+ "repeat": {
99
+ "frequency": 1,
100
+ "period": 1,
101
+ "periodUnit": "d"
102
+ }
103
+ },
104
+ "route": {
105
+ "coding": [
106
+ {
107
+ "system": "http://terminology.hl7.org/CodeSystem/v3-RouteOfAdministration",
108
+ "code": "PO",
109
+ "display": "Oral"
110
+ }
111
+ ]
112
+ },
113
+ "doseAndRate": [
114
+ {
115
+ "type": {
116
+ "coding": [
117
+ {
118
+ "system": "http://terminology.hl7.org/CodeSystem/dose-rate-type",
119
+ "code": "ordered",
120
+ "display": "Ordered"
121
+ }
122
+ ]
123
+ },
124
+ "doseQuantity": {
125
+ "value": 20,
126
+ "unit": "mg",
127
+ "system": "http://unitsofmeasure.org",
128
+ "code": "mg"
129
+ }
130
+ }
131
+ ]
132
+ }
133
+ ],
134
+ "reasonReference": [
135
+ {
136
+ "reference": "urn:uuid:condition-depression"
137
+ }
138
+ ]
139
+ }
140
+ },
141
+ {
142
+ "fullUrl": "urn:uuid:encounter-depression",
143
+ "resource": {
144
+ "resourceType": "Encounter",
145
+ "id": "encounter-jordon-dubois-depression",
146
+ "status": "finished",
147
+ "class": {
148
+ "system": "http://terminology.hl7.org/CodeSystem/v3-ActCode",
149
+ "code": "AMB",
150
+ "display": "Ambulatory"
151
+ },
152
+ "subject": {
153
+ "reference": "urn:uuid:patient-jordon-dubois"
154
+ },
155
+ "period": {
156
+ "start": "2024-01-15T09:30:00Z",
157
+ "end": "2024-01-15T10:30:00Z"
158
+ },
159
+ "reasonReference": [
160
+ {
161
+ "reference": "urn:uuid:condition-depression"
162
+ }
163
+ ]
164
+ }
165
+ }
166
+ ]
167
+ }
frontend/public/assets/jordan.avif ADDED
frontend/public/assets/jordan.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9774a57078508dbcd1626463b1deb84bd468d7b81111b568a966ef3baa56051
3
+ size 1248841
frontend/public/assets/jordan_300.avif ADDED
frontend/public/assets/medgemma.avif ADDED
frontend/public/assets/patients_and_conditions.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "patients": [
3
+ {
4
+ "id": 1,
5
+ "name": "Jordon Dubois",
6
+ "gender": "Male",
7
+ "age": 35,
8
+ "existing_condition": "Depression",
9
+ "img": "/assets/jordan.avif",
10
+ "video": "/assets/jordan.mp4",
11
+ "headshot": "/assets/jordan_300.avif",
12
+ "fhirFile": "/assets/jason_fhir.json",
13
+ "voice": "Algenib"
14
+ },
15
+ {
16
+ "id": 2,
17
+ "name": "Alex Sharma",
18
+ "gender": "Female",
19
+ "age": 63,
20
+ "existing_condition": "Diabetes",
21
+ "img": "/assets/alex.avif",
22
+ "video": "/assets/alex.mp4",
23
+ "headshot": "/assets/alex_300.avif",
24
+ "fhirFile": "/assets/alex_fhir.json",
25
+ "voice": "Gacrux"
26
+ },
27
+ {
28
+ "id": 3,
29
+ "name": "Sacha Silva",
30
+ "gender": "Female",
31
+ "age": 24,
32
+ "existing_condition": "Asthma",
33
+ "img": "/assets/sacha.avif",
34
+ "video": "/assets/sacha.mp4",
35
+ "headshot": "/assets/sacha_150.avif",
36
+ "fhirFile": "/assets/sacha_fhir.json",
37
+ "voice": "Callirrhoe"
38
+ }
39
+ ],
40
+ "conditions": [
41
+ { "name": "Flu", "description": "A common and contagious respiratory illness caused by a virus that can lead to fever, body aches, and fatigue." },
42
+ { "name": "Malaria", "description": "A serious disease spread by mosquitoes that causes recurring fevers and chills due to a parasite infecting red blood cells." },
43
+ { "name": "Migraine", "description": "A type of severe headache often accompanied by throbbing pain, sensitivity to light and sound, and sometimes nausea." },
44
+ { "name": "Serotonin Syndrome", "description": "A potentially dangerous reaction caused by too much serotonin in the brain, often due to certain medications, leading to symptoms like agitation, rapid heart rate, and confusion." }
45
+ ]
46
+ }
frontend/public/assets/sacha.avif ADDED
frontend/public/assets/sacha.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c722df889fafc7646d27e8fc9d15c279df43a0be68991f122ed17e096745a867
3
+ size 751974
frontend/public/assets/sacha_150.avif ADDED
frontend/public/assets/sacha_fhir.json ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "resourceType": "Bundle",
3
+ "type": "collection",
4
+ "entry": [
5
+ {
6
+ "resource": {
7
+ "resourceType": "Patient",
8
+ "id": "sacha-silva-patient",
9
+ "identifier": [
10
+ {
11
+ "system": "http://example.org/mrn",
12
+ "value": "1234567"
13
+ },
14
+ {
15
+ "system": "http://hl7.org/fhir/sid/us-ssn",
16
+ "value": "999-88-7777"
17
+ }
18
+ ],
19
+ "name": [
20
+ {
21
+ "family": "Silva",
22
+ "given": [
23
+ "Sacha"
24
+ ]
25
+ }
26
+ ],
27
+ "gender": "female",
28
+ "birthDate": "1993-10-27",
29
+ "address": [
30
+ {
31
+ "line": [
32
+ "123 Main Street"
33
+ ],
34
+ "city": "Anytown",
35
+ "state": "CA",
36
+ "postalCode": "91234",
37
+ "country": "US"
38
+ }
39
+ ],
40
+ "telecom": [
41
+ {
42
+ "system": "phone",
43
+ "value": "555-123-4567",
44
+ "use": "home"
45
+ },
46
+ {
47
+ "system": "email",
48
+ "value": "sacha.silva@example.com",
49
+ "use": "home"
50
+ }
51
+ ]
52
+ },
53
+ "request": {
54
+ "method": "PUT",
55
+ "url": "Patient/sacha-silva-patient"
56
+ }
57
+ },
58
+ {
59
+ "resource": {
60
+ "resourceType": "Condition",
61
+ "id": "asthma-condition",
62
+ "clinicalStatus": {
63
+ "coding": [
64
+ {
65
+ "system": "http://terminology.hl7.org/CodeSystem/condition-clinical",
66
+ "code": "active",
67
+ "display": "Active"
68
+ }
69
+ ]
70
+ },
71
+ "verificationStatus": {
72
+ "coding": [
73
+ {
74
+ "system": "http://terminology.hl7.org/CodeSystem/condition-ver-status",
75
+ "code": "confirmed",
76
+ "display": "Confirmed"
77
+ }
78
+ ]
79
+ },
80
+ "category": [
81
+ {
82
+ "coding": [
83
+ {
84
+ "system": "http://terminology.hl7.org/CodeSystem/condition-category",
85
+ "code": "problem-list-item",
86
+ "display": "Problem List Item"
87
+ }
88
+ ]
89
+ }
90
+ ],
91
+ "code": {
92
+ "coding": [
93
+ {
94
+ "system": "http://snomed.info/sct",
95
+ "code": "195967001",
96
+ "display": "Asthma"
97
+ }
98
+ ],
99
+ "text": "Asthma"
100
+ },
101
+ "subject": {
102
+ "reference": "Patient/sacha-silva-patient",
103
+ "display": "Sacha Silva"
104
+ },
105
+ "onsetDateTime": "2010-05-15"
106
+ },
107
+ "request": {
108
+ "method": "PUT",
109
+ "url": "Condition/asthma-condition"
110
+ }
111
+ },
112
+ {
113
+ "resource": {
114
+ "resourceType": "Encounter",
115
+ "id": "asthma-encounter-1",
116
+ "status": "finished",
117
+ "class": {
118
+ "system": "http://terminology.hl7.org/CodeSystem/v3-ActCode",
119
+ "code": "AMB",
120
+ "display": "Ambulatory"
121
+ },
122
+ "type": [
123
+ {
124
+ "coding": [
125
+ {
126
+ "system": "http://snomed.info/sct",
127
+ "code": "308335008",
128
+ "display": "Patient encounter procedure"
129
+ }
130
+ ],
131
+ "text": "Asthma Follow-up"
132
+ }
133
+ ],
134
+ "subject": {
135
+ "reference": "Patient/sacha-silva-patient",
136
+ "display": "Sacha Silva"
137
+ },
138
+ "period": {
139
+ "start": "2023-08-10T10:00:00-07:00",
140
+ "end": "2023-08-10T10:30:00-07:00"
141
+ },
142
+ "reasonCode": [
143
+ {
144
+ "coding": [
145
+ {
146
+ "system": "http://snomed.info/sct",
147
+ "code": "266253002",
148
+ "display": "Asthma exacerbation"
149
+ }
150
+ ],
151
+ "text": "Asthma Exacerbation"
152
+ }
153
+ ]
154
+ },
155
+ "request": {
156
+ "method": "PUT",
157
+ "url": "Encounter/asthma-encounter-1"
158
+ }
159
+ },
160
+ {
161
+ "resource": {
162
+ "resourceType": "MedicationRequest",
163
+ "id": "albuterol-mr",
164
+ "status": "active",
165
+ "intent": "order",
166
+ "medicationCodeableConcept": {
167
+ "coding": [
168
+ {
169
+ "system": "http://www.nlm.nih.gov/research/umls/rxnorm",
170
+ "code": "207182",
171
+ "display": "Albuterol 90 mcg/actuation Metered Dose Inhaler"
172
+ }
173
+ ],
174
+ "text": "Albuterol Inhaler"
175
+ },
176
+ "subject": {
177
+ "reference": "Patient/sacha-silva-patient",
178
+ "display": "Sacha Silva"
179
+ },
180
+ "encounter": {
181
+ "reference": "Encounter/asthma-encounter-1",
182
+ "display": "Asthma Follow-up"
183
+ },
184
+ "authoredOn": "2023-08-10T10:30:00-07:00",
185
+ "requester": {
186
+ "reference": "Practitioner/dr-jane-doe",
187
+ "display": "Jane Doe, MD"
188
+ },
189
+ "dosageInstruction": [
190
+ {
191
+ "sequence": 1,
192
+ "text": "2 puffs every 4-6 hours as needed for wheezing",
193
+ "timing": {
194
+ "repeat": {
195
+ "frequency": 4,
196
+ "period": 1,
197
+ "periodUnit": "h"
198
+ }
199
+ },
200
+ "doseQuantity": {
201
+ "value": 2,
202
+ "unit": "puff",
203
+ "system": "http://unitsofmeasure.org",
204
+ "code": "{puff}"
205
+ },
206
+ "asNeededCodeableConcept": {
207
+ "coding": [
208
+ {
209
+ "system": "http://snomed.info/sct",
210
+ "code": "267036007",
211
+ "display": "Wheezing"
212
+ }
213
+ ],
214
+ "text": "Wheezing"
215
+ }
216
+ }
217
+ ],
218
+ "dispenseRequest": {
219
+ "quantity": {
220
+ "value": 1,
221
+ "unit": "inhaler",
222
+ "system": "http://terminology.hl7.org/CodeSystem/v3-orderableDrugForm",
223
+ "code": "INH"
224
+ },
225
+ "expectedSupplyDuration": {
226
+ "value": 30,
227
+ "unit": "days",
228
+ "system": "http://unitsofmeasure.org",
229
+ "code": "d"
230
+ }
231
+ }
232
+ },
233
+ "request": {
234
+ "method": "PUT",
235
+ "url": "MedicationRequest/albuterol-mr"
236
+ }
237
+ },
238
+ {
239
+ "resource": {
240
+ "resourceType": "Practitioner",
241
+ "id": "dr-jane-doe",
242
+ "name": [
243
+ {
244
+ "family": "Doe",
245
+ "given": [
246
+ "Jane"
247
+ ],
248
+ "prefix": [
249
+ "Dr."
250
+ ]
251
+ }
252
+ ],
253
+ "identifier": [
254
+ {
255
+ "system": "http://example.org/npi",
256
+ "value": "1234567890"
257
+ }
258
+ ]
259
+ },
260
+ "request": {
261
+ "method": "PUT",
262
+ "url": "Practitioner/dr-jane-doe"
263
+ }
264
+ }
265
+ ]
266
+ }
frontend/public/assets/welcome_bottom_graphics.svg ADDED
frontend/public/assets/welcome_graphics.svg ADDED
frontend/public/assets/welcome_top_graphics.svg ADDED
frontend/public/index.html ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <!--
3
+ Copyright 2025 Google LLC
4
+
5
+ Licensed under the Apache License, Version 2.0 (the "License");
6
+ you may not use this file except in compliance with the License.
7
+ You may obtain a copy of the License at
8
+
9
+ http://www.apache.org/licenses/LICENSE-2.0
10
+
11
+ Unless required by applicable law or agreed to in writing, software
12
+ distributed under the License is distributed on an "AS IS" BASIS,
13
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ See the License for the specific language governing permissions and
15
+ limitations under the License.
16
+ -->
17
+
18
+ <html lang="en">
19
+ <head>
20
+ <meta charset="utf-8" />
21
+ <title>AppointReady</title>
22
+ <meta name="viewport" content="width=device-width, initial-scale=1" />
23
+ <link href="https://fonts.googleapis.com/icon?family=Material+Icons" rel="stylesheet">
24
+ </head>
25
+ <body>
26
+ <noscript>You need to enable JavaScript to run this app.</noscript>
27
+ <div id="root"></div>
28
+ </body>
29
+ </html>
frontend/src/App.js ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Copyright 2025 Google LLC
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ import React, { useState } from 'react';
18
+ import WelcomePage from './components/WelcomePage/WelcomePage';
19
+ import PatientBuilder from './components/PatientBuilder/PatientBuilder';
20
+ import RolePlayDialogs from './components/RolePlayDialogs/RolePlayDialogs';
21
+ import Interview from './components/Interview/Interview';
22
+ import PreloadImages from './components/PreloadImages';
23
+
24
+ const App = () => {
25
+ const [currentPage, setCurrentPage] = useState('welcome');
26
+ const [selectedPatient, setSelectedPatient] = useState(null);
27
+ const [selectedCondition, setSelectedCondition] = useState(null);
28
+
29
+ const handleSwitchPage = () => {
30
+ setCurrentPage('patientBuilder');
31
+ };
32
+
33
+ const handleSwitchToRolePlayDialogs = () => {
34
+ setCurrentPage('rolePlayDialogs');
35
+ };
36
+
37
+ const handleSwitchToInterview = () => {
38
+ setCurrentPage('interview');
39
+ };
40
+
41
+ const imageList = [
42
+ '/assets/gemini.avif',
43
+ '/assets/medgemma.avif',
44
+ '/assets/ai_headshot.svg',
45
+ '/assets/jordan_300.avif',
46
+ '/assets/alex_300.avif',
47
+ '/assets/sacha_150.avif',
48
+ '/assets/jordan.avif',
49
+ '/assets/alex.avif',
50
+ '/assets/sacha.avif'
51
+ ];
52
+
53
+ return (
54
+ <PreloadImages imageSources={imageList}>
55
+ {currentPage === 'welcome' ? (
56
+ <WelcomePage
57
+ onSwitchPage={handleSwitchPage}
58
+ setSelectedPatient={setSelectedPatient}
59
+ setSelectedCondition={setSelectedCondition}
60
+ />
61
+ ) : currentPage === 'patientBuilder' ? (
62
+ <PatientBuilder
63
+ selectedPatient={selectedPatient}
64
+ selectedCondition={selectedCondition}
65
+ setSelectedPatient={setSelectedPatient}
66
+ setSelectedCondition={setSelectedCondition}
67
+ onNext={handleSwitchToRolePlayDialogs}
68
+ onBack={() => setCurrentPage('welcome')} // Back to WelcomePage
69
+ />
70
+ ) : currentPage === 'rolePlayDialogs' ? (
71
+ <RolePlayDialogs
72
+ selectedPatient={selectedPatient}
73
+ selectedCondition={selectedCondition}
74
+ onStart={handleSwitchToInterview}
75
+ onBack={() => setCurrentPage('patientBuilder')} // Back to PatientBuilder
76
+ />
77
+ ) : currentPage === 'interview' ? (
78
+ <Interview
79
+ selectedPatient={selectedPatient}
80
+ selectedCondition={selectedCondition}
81
+ onBack={() => setCurrentPage('rolePlayDialogs')} // Back to RolePlayDialogs
82
+ />
83
+ ) : null}
84
+ </PreloadImages>
85
+ );
86
+ };
87
+
88
+ export default App;
frontend/src/components/DetailsPopup/DetailsPopup.css ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Copyright 2025 Google LLC
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ .popup-close-button {
18
+ position: absolute;
19
+ top: 10px;
20
+ right: 15px;
21
+ background: transparent;
22
+ border: none;
23
+ font-size: 24px;
24
+ cursor: pointer;
25
+ color: #888;
26
+ }
27
+
28
+ .popup-close-button:hover {
29
+ color: #000;
30
+ }
31
+
32
+ .details-popup-content h4 {
33
+ margin-top: 20px;
34
+ margin-bottom: 10px;
35
+ color: #333;
36
+ }
37
+
38
+ .details-popup-content ul {
39
+ list-style-type: none;
40
+ padding-left: 0;
41
+ }
42
+
43
+ .details-popup-content li {
44
+ margin-bottom: 8px;
45
+ color: #555;
46
+ }
frontend/src/components/DetailsPopup/DetailsPopup.js ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Copyright 2025 Google LLC
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ import React from 'react';
18
+ import './DetailsPopup.css';
19
+
20
+ const DetailsPopup = ({ isOpen, onClose }) => {
21
+ if (!isOpen) {
22
+ return null;
23
+ }
24
+
25
+ return (
26
+ <div className="popup-overlay" onClick={onClose}>
27
+ <div className="popup-content" onClick={(e) => e.stopPropagation()}>
28
+ <button className="popup-close-button" onClick={onClose}>&times;</button>
29
+ <h2 id="dialog-title" className="dialog-title-text">Details About This Demo</h2>
30
+ <p><b>The Model:</b> This demo features Google's MedGemma-27B, a Gemma 3-based model
31
+ fine-tuned for comprehending medical text. It demonstrates MedGemma's ability to
32
+ accelerate the development of AI-powered healthcare applications by offering advanced
33
+ interpretation of medical data.</p>
34
+ <p><b>Accessing and Using the Model:</b> Google's MedGemma-27B is available on <a
35
+ href="https://huggingface.co/google/medgemma-27b-text-it" target="_blank" rel="noopener noreferrer">HuggingFace<img
36
+ className="hf-logo"
37
+ src="https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.svg" />
38
+ </a> and is easily deployable via&nbsp;
39
+ <a href="https://console.cloud.google.com/vertex-ai/publishers/google/model-garden/medgemma" target="_blank" rel="noopener noreferrer">Model
40
+ Garden <img className="hf-logo"
41
+ src="https://www.gstatic.com/cloud/images/icons/apple-icon.png" /></a>.
42
+ Learn more about using the model and its limitations on the <a
43
+ href="https://developers.google.com/health-ai-developer-foundations?referral=appoint-ready"
44
+ target="_blank" rel="noopener noreferrer">HAI-DEF
45
+ developer site</a>.
46
+ </p>
47
+ <p><b>Health AI Developer Foundations (HAI-DEF)</b> provides a collection of open-weight models and
48
+ companion resources to empower developers in building AI models for healthcare.</p>
49
+ <p><b>Share this Demo:</b> If you find this demonstration valuable, we encourage you to share it on
50
+ social media.
51
+ <small>
52
+ &nbsp;<a href="https://www.linkedin.com/shareArticle?mini=true&url=https://huggingface.co/spaces/google/appoint-ready&text=%23MedGemma%20%23MedGemmaDemo" target="_blank" rel="noopener noreferrer">LinkedIn</a>
53
+ &nbsp;<a href="http://www.twitter.com/share?url=https://huggingface.co/spaces/google/appoint-ready&hashtags=MedGemma,MedGemmaDemo" target="_blank" rel="noopener noreferrer">X/Tweet</a>
54
+ </small>
55
+ </p>
56
+ <p><b>Explore More Demos:</b> Discover additional demonstrations on HuggingFace Spaces or via Colabs:
57
+ </p>
58
+ <ul>
59
+ <li><a href="https://huggingface.co/collections/google/hai-def-concept-apps-6837acfccce400abe6ec26c1"
60
+ target="_blank" rel="noopener noreferrer">
61
+ Collection of concept apps <img className="hf-logo" src="https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.svg" />
62
+ </a> built around HAI-DEF open models to inspire the community.</li>
63
+ <li><a href="https://github.com/Google-Health/medgemma/tree/main/notebooks/fine_tune_with_hugging_face.ipynb" target="_blank" rel="noopener noreferrer">
64
+ Finetune MedGemma Colab <img className="hf-logo"
65
+ src="https://upload.wikimedia.org/wikipedia/commons/d/d0/Google_Colaboratory_SVG_Logo.svg" /></a>
66
+ -
67
+ See an example of how to fine-tune this model.</li>
68
+ </ul>
69
+ For more technical details about this demo, please refer to the <a href="https://huggingface.co/spaces/google/appoint-ready/blob/main/README.md#table-of-contents" target="_blank" rel="noopener noreferrer">README</a> file in the repository.
70
+ <button className="popup-button" onClick={onClose}>Close</button>
71
+ </div>
72
+ </div>
73
+ );
74
+ };
75
+
76
+ export default DetailsPopup;
frontend/src/components/Interview/Interview.css ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Copyright 2025 Google LLC
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+
18
+
19
+ .page.interview-page {
20
+ height: 100%;
21
+ max-height: 100%;
22
+ }
23
+
24
+ .interview-container {
25
+ padding: 20px;
26
+ font-family: Arial, sans-serif;
27
+ background-color: #f5f5f5;
28
+ }
29
+
30
+ .interview-split-container {
31
+ display: flex;
32
+ flex-direction: row;
33
+ height: 100%;
34
+ }
35
+
36
+ .interview-left-section {
37
+ border-right: 2px solid #e0e0e0;
38
+ display: flex;
39
+ flex-direction: row;
40
+ min-width: 550px;
41
+ max-width: 600px;
42
+ }
43
+
44
+ .toggle-icon {
45
+ vertical-align: text-top;
46
+ }
47
+
48
+ .interview-page .header2 span {
49
+ font-size: 14px;
50
+ font-weight: 100;
51
+ vertical-align: text-top;
52
+ animation: fadeIn 0.4s cubic-bezier(0.4, 0, 0.2, 1) forwards;
53
+ }
54
+
55
+ .interview-right-section {
56
+ display: flex;
57
+ flex-direction: column;
58
+ width: 60%;
59
+ min-width: 550px;
60
+ flex-grow: 2;
61
+ padding-left: 20px;
62
+ justify-content: space-between;
63
+ height: 100%;
64
+ gap: 5px;
65
+ }
66
+
67
+ .interview-header-panel {
68
+ flex: 0 0 320px;
69
+ display: flex;
70
+ flex-direction: column;
71
+ justify-content: flex-start;
72
+ padding: 32px 24px 0 0;
73
+ box-sizing: border-box;
74
+ background: #f5f5f5;
75
+ }
76
+
77
+ .interview-chat-panel {
78
+ flex: 1 1 0;
79
+ display: flex;
80
+ flex-direction: column;
81
+ justify-content: flex-start;
82
+ min-width: 0;
83
+ min-height: 0;
84
+ }
85
+
86
+ .chat-container {
87
+ flex: 1;
88
+ overflow-y: auto;
89
+ display: flex;
90
+ flex-direction: column;
91
+ gap: 10px;
92
+ width: 100%;
93
+ padding-right: 20px;
94
+ }
95
+
96
+ .chat-header {
97
+ display: flex;
98
+ justify-content: space-between;
99
+ gap: 10px;
100
+ margin-top: 20px;
101
+ margin: 20px 30px 0 30px;
102
+ width: -webkit-fill-available;
103
+ }
104
+
105
+ .chat-message-wrapper {
106
+ display: flex;
107
+ align-items: center;
108
+ gap: 10px;
109
+ }
110
+
111
+ /* Fade-in animation for new chat messages */
112
+ @keyframes fadeIn {
113
+ from { opacity: 0; }
114
+ to { opacity: 1; }
115
+ }
116
+
117
+ .chat-message-wrapper.fade-in {
118
+ animation: fadeIn 0.5s ease;
119
+ }
120
+
121
+ .chat-message-wrapper.patient {
122
+ align-self: end;
123
+ }
124
+
125
+ .chat-bubble {
126
+ padding: 10px 15px;
127
+ font-size: 16px;
128
+ line-height: 1.4;
129
+ flex: 1;
130
+
131
+ }
132
+
133
+ .patient .chat-bubble {
134
+ background-color: #eaeaea;
135
+ margin-right: 5px;
136
+ border-radius: 8px;
137
+ background: #F5F5F5;
138
+ }
139
+
140
+ .chat-avatar {
141
+ width: 30px;
142
+ height: 30px;
143
+ object-fit: cover;
144
+ border-radius: 50%;
145
+ background-color: #E8DEF8;
146
+ }
147
+
148
+ .interviewer .chat-avatar {
149
+ padding: 5px;
150
+ }
151
+
152
+ .patient .chat-avatar {
153
+ width: 40px;
154
+ height: 40px;
155
+ border-color: rgb(47, 95, 207);
156
+ }
157
+
158
+ .report-content {
159
+ padding: 20px;
160
+ overflow-y: auto;
161
+ flex: 1 1 0;
162
+ min-height: 0;
163
+ border-radius: 28px;
164
+ border: 2px solid #E9E9E9;
165
+ box-shadow: 0px 4px 4px 0px rgba(0, 0, 0, 0.25);
166
+ }
167
+
168
+ .report-content pre {
169
+ white-space: pre-wrap; /* CSS3 */
170
+ white-space: -moz-pre-wrap; /* Mozilla, since 1999 */
171
+ white-space: -pre-wrap; /* Opera 4-6 */
172
+ white-space: -o-pre-wrap; /* Opera 7 */
173
+ word-wrap: break-word; /* Internet Explorer 5.5+ */
174
+ }
175
+
176
+ .thinking .chat-bubble {
177
+ display: flex;
178
+ flex-direction: column;
179
+ gap: 10px;
180
+ background-color: #E8DEF8;
181
+ padding: 20px;
182
+ border-radius: 8px;
183
+ min-width: 40px;
184
+ min-height: 40px;
185
+ position: relative;
186
+ color: #555;
187
+ border: none;
188
+ font-weight: 100;
189
+ }
190
+
191
+ .thinking-header {
192
+ font-weight: 500;
193
+ }
194
+
195
+ .chat-waiting-indicator {
196
+ color: #888;
197
+ font-size: 20px;
198
+ text-align: center;
199
+ margin: 60px 0;
200
+ font-style: italic;
201
+ opacity: 0.8;
202
+ }
203
+
204
+ .evaluate-button {
205
+ background-color: #C8B3FD;
206
+ color: #4E3B7B;
207
+ border-radius: 8px;
208
+ border-style: none;
209
+ padding: 6px;
210
+ font-size: 16px;
211
+ }
212
+
213
+ @keyframes fadeInOpacity {
214
+ 0% { opacity: 0; font-size: 0; }
215
+ 20% { opacity: 0; font-size: 1em; }
216
+ 100% { opacity: 1; font-size: 1em; }
217
+ }
218
+
219
+ /* New keyframes to unset text color after a delay */
220
+ @keyframes unsetColor {
221
+ to { color: unset; }
222
+ }
223
+
224
+ .add {
225
+ color: green;
226
+ animation: fadeInOpacity 1s forwards, unsetColor 0s forwards 5s;
227
+ }
228
+
229
+ @keyframes removeAnim {
230
+ 0% { opacity: 1; font-size: 1em; }
231
+ 80% { opacity: 0; font-size: 1em; }
232
+ 99% { font-size: 0.2em; }
233
+ 100% { opacity: 0; font-size: 0; display: none; }
234
+ }
235
+
236
+ .remove {
237
+ color: red;
238
+ text-decoration: line-through;
239
+ animation: removeAnim 1s forwards 5s;
240
+ }
241
+
242
+ .warning-icon {
243
+ color: #444746;
244
+ }
245
+
246
+ .disclaimer-container {
247
+ border-radius: 8px;
248
+ background: #FEF7E0;
249
+ display: flex;
250
+ align-items: center;
251
+ gap: 20px;
252
+ padding: 13px;
253
+ font-size: 12px;
254
+ width: 100%;
255
+ }
256
+
257
+ .helpful {
258
+ border-radius: 14.272px;
259
+ background: #C4EED0;
260
+ mix-blend-mode: multiply;
261
+ display: inline-block;
262
+ padding: 0 5px;
263
+ }
264
+
265
+ .missing {
266
+ border-radius: 14.272px;
267
+ background: #FFE07C;
268
+ mix-blend-mode: multiply;
269
+ display: inline-block;
270
+ padding: 0 5px;
271
+ }
272
+
273
+ .evaluation-text {
274
+ font-style: italic;
275
+ padding-bottom: 30px;
276
+ }
277
+
278
+ .evaluation-text::after {
279
+ content: "***";
280
+ }
281
+
282
+
frontend/src/components/Interview/Interview.js ADDED
@@ -0,0 +1,491 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Copyright 2025 Google LLC
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ import React, { useState, useEffect, useRef } from "react";
18
+ import { marked } from "marked";
19
+ import parse from "html-react-parser";
20
+ import { diffArrays, diffWords } from "diff";
21
+ import "./Interview.css";
22
+ import DetailsPopup from "../DetailsPopup/DetailsPopup";
23
+
24
+ const Interview = ({ selectedPatient, selectedCondition, onBack }) => {
25
+ const [messages, setMessages] = useState([]);
26
+ const [isInterviewComplete, setIsInterviewComplete] = useState(false);
27
+ const [showEvaluation, setShowEvaluation] = useState(false);
28
+ const [isAudioEnabled, setIsAudioEnabled] = useState(true);
29
+ const [evaluation, setEvaluation] = useState('');
30
+ const [isFetchingEvaluation, setIsFetchingEvaluation] = useState(false);
31
+ const [currentReport, setCurrentReport] = useState("");
32
+ const [prevReport, setPrevReport] = useState("");
33
+ const [waitTime, setWaitTime] = useState(3000);
34
+ const [showEvaluationInfoPopup, setShowEvaluationInfoPopup] = useState(false);
35
+ const [isDetailsPopupOpen, setIsDetailsPopupOpen] = useState(false);
36
+ const chatContainerRef = useRef(null);
37
+ const reportContentRef = useRef(null);
38
+ const lastMessageRef = useRef(null);
39
+ const messageQueue = useRef([]);
40
+ const eventSourceRef = useRef(null);
41
+ const timeoutIdRef = useRef(null);
42
+
43
+ const currentPlayingAudio = useRef(null); // To keep track of the currently playing audio instance
44
+ const isAudioEnabledRef = useRef(isAudioEnabled);
45
+ useEffect(() => {
46
+ isAudioEnabledRef.current = isAudioEnabled;
47
+ }, [isAudioEnabled]);
48
+ const waitTimeRef = useRef(waitTime);
49
+ useEffect(() => {
50
+ waitTimeRef.current = waitTime;
51
+ }, [waitTime]);
52
+
53
+ const processQueue = React.useCallback(() => {
54
+ if (timeoutIdRef.current) {
55
+ clearTimeout(timeoutIdRef.current);
56
+ }
57
+
58
+ if (messageQueue.current.length === 0) {
59
+ // The queue is empty, so the processing chain for this batch is done.
60
+ // Clear the timeout ref so a new message can start a new chain.
61
+ timeoutIdRef.current = null;
62
+ setIsInterviewComplete(
63
+ eventSourceRef.current && eventSourceRef.current.readyState === EventSource.CLOSED
64
+ );
65
+ return;
66
+ }
67
+
68
+ const nextMessage = messageQueue.current.shift();
69
+
70
+ setMessages((prev) => [...prev, nextMessage]);
71
+
72
+ if (nextMessage.audio && isAudioEnabledRef.current) {
73
+ if (currentPlayingAudio.current) {
74
+ currentPlayingAudio.current.pause();
75
+ currentPlayingAudio.current.src = '';
76
+ }
77
+ const audio = new Audio(nextMessage.audio);
78
+ currentPlayingAudio.current = audio;
79
+
80
+ audio.onended = () => {
81
+ currentPlayingAudio.current = null;
82
+ processQueue();
83
+ };
84
+ audio.onerror = (e) => {
85
+ console.error("Audio playback error:", e);
86
+ currentPlayingAudio.current = null;
87
+ processQueue();
88
+ };
89
+ audio.play().catch(e => {
90
+ console.error("Error playing audio automatically:", e);
91
+ currentPlayingAudio.current = null;
92
+ processQueue();
93
+ });
94
+ } else {
95
+ // For non-audio, schedule the next processing call with a fixed delay
96
+ // to simulate reading time. This will call processQueue again, which will
97
+ // handle an empty queue and stop the chain if needed.
98
+ timeoutIdRef.current = setTimeout(processQueue, waitTimeRef.current);
99
+ }
100
+ }, [setMessages, setIsInterviewComplete]);
101
+
102
+ useEffect(() => {
103
+ if (!selectedPatient || !selectedCondition) return;
104
+
105
+ setMessages([]);
106
+ setIsInterviewComplete(false);
107
+ messageQueue.current = [];
108
+ if (currentPlayingAudio.current) {
109
+ currentPlayingAudio.current.pause();
110
+ currentPlayingAudio.current = null;
111
+ }
112
+ // Prepend base URL if running on localhost:3000
113
+ const baseURL =
114
+ window.location.origin === "http://localhost:3000"
115
+ ? "http://localhost:7860"
116
+ : "";
117
+ const url = `${baseURL}/api/stream_conversation?patient=${encodeURIComponent(
118
+ selectedPatient.name
119
+ )}&condition=${encodeURIComponent(selectedCondition)}`;
120
+ const eventSource = new EventSource(url);
121
+ eventSourceRef.current = eventSource;
122
+
123
+ eventSource.onmessage = (event) => {
124
+ try {
125
+ const data = JSON.parse(event.data);
126
+
127
+ // Check if the parsed object is our special 'end' signal
128
+ if (data && data.event === 'end') {
129
+ console.log("Server signaled end of stream. Closing connection.");
130
+ eventSource.close();
131
+ processQueue();
132
+ return;
133
+ }
134
+ messageQueue.current.push(data);
135
+ // Always call processQueue after pushing a message, unless audio or timeout is active
136
+ if (!currentPlayingAudio.current && !timeoutIdRef.current) {
137
+ processQueue();
138
+ }
139
+ } catch (error) {
140
+ console.warn("Could not parse message data. Data received:", event.data, "Error:", error);
141
+ }
142
+ };
143
+
144
+ eventSource.onerror = (err) => {
145
+ console.error("EventSource failed:", err);
146
+ eventSource.close();
147
+ };
148
+
149
+
150
+ return () => {
151
+ if (eventSourceRef.current) {
152
+ eventSourceRef.current.close();
153
+ eventSourceRef.current = null;
154
+ }
155
+ if (timeoutIdRef.current) {
156
+ clearTimeout(timeoutIdRef.current);
157
+ timeoutIdRef.current = null;
158
+ }
159
+ // Ensure any playing audio is stopped when component unmounts or dependencies change
160
+ if (currentPlayingAudio.current) {
161
+ currentPlayingAudio.current.pause();
162
+ currentPlayingAudio.current = null;
163
+ }
164
+ };
165
+ }, [selectedPatient, selectedCondition, processQueue]);
166
+
167
+ useEffect(() => {
168
+ processQueue();
169
+ }, [waitTime, processQueue]);
170
+
171
+ useEffect(() => {
172
+ // Prevent body scroll when Interview is shown
173
+ document.body.style.overflowY = "clip";
174
+ return () => {
175
+ document.body.style.overflowY = "unset";
176
+ };
177
+ }, []);
178
+
179
+ useEffect(() => {
180
+ if (chatContainerRef.current) {
181
+ const container = chatContainerRef.current;
182
+ const lastMessage = messages[messages.length - 1];
183
+ if (lastMessage && lastMessage.speaker === "report") {
184
+ return;
185
+ }
186
+
187
+ const isNearBottom =
188
+ container.scrollHeight - container.scrollTop - container.clientHeight <
189
+ container.clientHeight;
190
+ if (isNearBottom && messages.length > 0) {
191
+ lastMessageRef.current.scrollIntoView({
192
+ behavior: "smooth",
193
+ block: "end",
194
+ });
195
+ }
196
+ }
197
+ }, [messages]);
198
+
199
+ // Update report on new messages
200
+ useEffect(() => {
201
+ const reportMessages = messages.filter((msg) => msg.speaker === "report");
202
+ if (reportMessages.length > 0) {
203
+ const latestReportMessageText =
204
+ reportMessages[reportMessages.length - 1].text;
205
+ const newReport = marked(latestReportMessageText.trim());
206
+ if (newReport !== currentReport) {
207
+ setPrevReport(currentReport);
208
+ setCurrentReport(newReport);
209
+ }
210
+ }
211
+ }, [messages, currentReport]);
212
+
213
+ // Updated diff function to tokenize HTML and use nested diffWords for text changes
214
+ const getDiffReport = () => {
215
+ // Tokenize HTML into tags and text parts
216
+ const tokenizeHTML = (html) => html.match(/(<[^>]+>|[^<]+)/g) || [];
217
+ const tokensPrev = tokenizeHTML(prevReport);
218
+ const tokensCurrent = tokenizeHTML(currentReport);
219
+ const diffParts = diffArrays(tokensPrev, tokensCurrent);
220
+
221
+ let result = "";
222
+ for (let i = 0; i < diffParts.length; i++) {
223
+ // If a removed part is immediately followed by an added part,
224
+ // and both are plain text (not an HTML tag), apply inner diffWords.
225
+ if (
226
+ diffParts[i].removed &&
227
+ i + 1 < diffParts.length &&
228
+ diffParts[i + 1].added
229
+ ) {
230
+ const removedText = diffParts[i].value.join("");
231
+ const addedText = diffParts[i + 1].value.join("");
232
+ // Check if both parts are not HTML tags
233
+ if (
234
+ (!/^<[^>]+>$/.test(removedText) && !/^<[^>]+>$/.test(addedText))
235
+ ) {
236
+ const innerDiff = diffWords(removedText, addedText);
237
+ const innerResult = innerDiff
238
+ .map((part) => {
239
+ if (part.added) {
240
+ return `<span class="add">${part.value}</span>`;
241
+ } else if (part.removed) {
242
+ return `<span class="remove">${part.value}</span>`;
243
+ }
244
+ return part.value;
245
+ })
246
+ .join("");
247
+ result += innerResult;
248
+ i++;
249
+ continue;
250
+ }
251
+ }
252
+ if (diffParts[i].added) {
253
+ result += `<span class="add">${diffParts[i].value.join("")}</span>`;
254
+ } else if (diffParts[i].removed) {
255
+ result += `<span class="remove">${diffParts[i].value.join("")}</span>`;
256
+ } else {
257
+ result += diffParts[i].value.join("");
258
+ }
259
+ }
260
+ return result;
261
+ };
262
+
263
+ // Fetch evaluation when showEvaluation is triggered
264
+ useEffect(() => {
265
+ if (!showEvaluation) return;
266
+ setIsFetchingEvaluation(true);
267
+ setEvaluation('');
268
+ // Get latest report
269
+ const reportMessages = messages.filter((msg) => msg.speaker === "report");
270
+ const report =
271
+ reportMessages.length > 0
272
+ ? marked(reportMessages[reportMessages.length - 1].text.trim())
273
+ : "<p>No report available.</p>";
274
+ // Prepend base URL if running on localhost:3000
275
+ const baseURL = window.location.origin === "http://localhost:3000" ? "http://localhost:7860" : "";
276
+ fetch(`${baseURL}/api/evaluate_report`, {
277
+ method: 'POST',
278
+ headers: { 'Content-Type': 'application/json' },
279
+ body: JSON.stringify({
280
+ report,
281
+ condition: selectedCondition
282
+ })
283
+ })
284
+ .then(response => response.json())
285
+ .then(data => {
286
+ setEvaluation(data.evaluation.replace('```html\n','').replace('\n```',''));
287
+ setIsFetchingEvaluation(false);
288
+ })
289
+ .catch(error => {
290
+ setEvaluation('Error fetching evaluation.');
291
+ setIsFetchingEvaluation(false);
292
+ });
293
+ }, [showEvaluation, messages, selectedCondition]);
294
+
295
+ // Scroll report-content to bottom when evaluate button appears
296
+ useEffect(() => {
297
+ if (isInterviewComplete && reportContentRef.current) {
298
+ reportContentRef.current.scrollTop = reportContentRef.current.scrollHeight;
299
+ }
300
+ }, [isInterviewComplete]);
301
+
302
+ const handleToggleWaitTime = () => {
303
+ setWaitTime((prev) => (prev === 1000 ? 3000 : 1000));
304
+ };
305
+
306
+ const handleToggleAudio = () => {
307
+ setIsAudioEnabled(prev => {
308
+ const isNowEnabled = !prev;
309
+ // If we are disabling audio and something is playing, stop it and continue the queue.
310
+ if (!isNowEnabled && currentPlayingAudio.current) {
311
+ currentPlayingAudio.current.pause();
312
+ currentPlayingAudio.current.src = '';
313
+ currentPlayingAudio.current = null;
314
+ }
315
+ return isNowEnabled;
316
+ });
317
+ };
318
+
319
+ const playAudio = (audioDataUrl) => {
320
+ if (audioDataUrl) {
321
+ const audio = new Audio(audioDataUrl);
322
+ audio.play().catch(e => {
323
+ console.error("Error playing audio:", e);
324
+ });
325
+ }
326
+ };
327
+
328
+ return (
329
+ <div className="page interview-page">
330
+ <div className="headerButtonsContainer">
331
+ <button className="back-button" onClick={onBack}>
332
+ <i className="material-icons back-button-icon">keyboard_arrow_left</i>
333
+ Back
334
+ </button>
335
+ <button className="details-button" onClick={() => setIsDetailsPopupOpen(true)}>
336
+ <i className="material-icons code-block-icon">code</i>&nbsp; Details
337
+ about this Demo
338
+ </button>
339
+ </div>
340
+ <div className="frame">
341
+ <div className="interview-split-container">
342
+ {/* Top: Interview Chat */}
343
+ <div className="interview-left-section">
344
+ {/* Right: Chat */}
345
+ <div className="interview-chat-panel">
346
+ <div className="header2">
347
+ Simulated Interview
348
+ &nbsp;
349
+ <i
350
+ className="material-icons toggle-icon"
351
+ style={{
352
+ cursor: "pointer",
353
+ color: isAudioEnabled ? "#1976d2" : "#888",
354
+ }}
355
+ title={`Click to ${
356
+ isAudioEnabled ? "disable" : "enable"
357
+ } audio`}
358
+ onClick={handleToggleAudio}
359
+ >
360
+ {isAudioEnabled ? "volume_up" : "volume_off"}
361
+ </i>
362
+ {isAudioEnabled && (<span>audio by Gemini TTS</span>)}
363
+ {!isAudioEnabled && (
364
+ <i
365
+ className="material-icons toggle-icon"
366
+ style={{
367
+ cursor: "pointer",
368
+ color: waitTime === 1000 ? "#1976d2" : "#888",
369
+ }}
370
+ title={`Click to ${
371
+ waitTime === 1000 ? "slow down" : "speed up"
372
+ } the interview`}
373
+ onClick={handleToggleWaitTime}
374
+ >
375
+ speed
376
+ </i>)}
377
+
378
+ </div>
379
+ <div className="chat-container" ref={chatContainerRef}>
380
+ {messages.length === 0 ? (
381
+ <div className="chat-waiting-indicator">
382
+ Waiting for the interview to start...
383
+ </div>
384
+ ) : (
385
+ messages
386
+ .filter((msg) => msg.speaker !== "report")
387
+ .map((msg, idx, filteredMessages) => (
388
+ <div
389
+ ref={idx === filteredMessages.length - 1 ? lastMessageRef : null}
390
+ className={`chat-message-wrapper ${msg.speaker}${idx === filteredMessages.length - 1 ? " fade-in" : ""}${msg.audio ? " has-audio" : ""}`}
391
+ key={idx}
392
+ >
393
+ {msg.speaker.includes("interviewer") && (
394
+ <img
395
+ className="chat-avatar"
396
+ src="assets/ai_headshot.svg"
397
+ alt="Interviewer"
398
+ />
399
+ )}
400
+ <div className={`chat-bubble ${msg.audio ? "with-audio" : ""}`}>
401
+ {msg.speaker.includes("thinking") && (
402
+ <div className="thinking-header">Thinking...</div>
403
+ )}
404
+ {msg.text}
405
+ </div>
406
+ {msg.speaker === "patient" && (
407
+ <img
408
+ className="chat-avatar"
409
+ src={selectedPatient.headshot}
410
+ alt={selectedPatient.name}
411
+ />
412
+ )}
413
+ </div>
414
+ ))
415
+ )}
416
+ </div>
417
+ </div>
418
+ </div>
419
+ {/* Right: Report Section */}
420
+ <div className="interview-right-section">
421
+ <div className="header2">Generated Report</div>
422
+ <div className="report-content" ref={reportContentRef}>
423
+ {/* Updated report rendering to show diff if available */}
424
+ <div
425
+ dangerouslySetInnerHTML={{
426
+ __html: prevReport ? getDiffReport() : currentReport,
427
+ }}
428
+ />
429
+ {isInterviewComplete && (
430
+ <button
431
+ className="evaluate-button"
432
+ onClick={() => setShowEvaluationInfoPopup(true)}
433
+ disabled={showEvaluation || showEvaluationInfoPopup}
434
+ ><i className="material-icons back-button-icon">keyboard_arrow_down</i>
435
+ View Report Evaluation
436
+ </button>
437
+ )}
438
+ <div className="evaluation-text">
439
+ {showEvaluation && (
440
+ isFetchingEvaluation
441
+ ? <div>Please wait...</div>
442
+ : parse(evaluation)
443
+ )}
444
+ </div>
445
+ </div>
446
+ <div className="disclaimer-container">
447
+ <i className="material-icons warning-icon">warning</i>
448
+ <div className="disclaimer-text">
449
+ This demonstration is for illustrative purposes of MedGemma’s baseline capabilities only. It does not represent a finished or approved product, is not intended to diagnose or suggest treatment of any disease or condition, and should not be used for medical advice.
450
+ </div>
451
+ </div>
452
+ </div>
453
+ </div>
454
+ </div>
455
+ {showEvaluationInfoPopup && (
456
+ <div className="popup-overlay">
457
+ <div className="popup-content">
458
+ <h2>About the Evaluation</h2>
459
+ <p>
460
+ Now we will ask MedGemma to evaluate its own performance at
461
+ generating this report. We will provide it with all the
462
+ information about {selectedPatient.name}, including their actual
463
+ diagnosis and aspects of condition history not included previously.
464
+ Using this new information, MedGemma will
465
+ highlight key facts it correctly included and identify other
466
+ information that would have been beneficial to add.
467
+ </p>
468
+ <p>
469
+ The purpose of this step is to provide non-medical users with a
470
+ sense of how well MedGemma did at this task. While the evaluation
471
+ is completed by MedGemma, the examples in this demo have also been
472
+ reviewed by clinicians for accuracy. Although MedGemma's evaluation
473
+ does not represent a consensus based standard,
474
+ this illustration simply shows an example of one approach developers could adopt
475
+ to evaluate quality and completeness.
476
+ </p>
477
+ <button className="popup-button" onClick={() => {
478
+ setShowEvaluationInfoPopup(false);
479
+ setShowEvaluation(true);
480
+ }}>Continue</button>
481
+ </div>
482
+ </div>)}
483
+ <DetailsPopup
484
+ isOpen={isDetailsPopupOpen}
485
+ onClose={() => setIsDetailsPopupOpen(false)}
486
+ />
487
+ </div>
488
+ );
489
+ };
490
+
491
+ export default Interview;
frontend/src/components/PatientBuilder/PatientBuilder.css ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Copyright 2025 Google LLC
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #root {
18
+ display: flex;
19
+ justify-content: center;
20
+ }
21
+
22
+ .header2 {
23
+ font-size: 24px;
24
+ font-weight: bold;
25
+ margin-bottom: 10px;
26
+ }
27
+
28
+ .lighttext {
29
+ font-size: 15px;
30
+ }
31
+
32
+
33
+
34
+ .patient-builder-container {
35
+ font-family: Arial, sans-serif;
36
+ display: flex;
37
+ flex-direction: column;
38
+ position: relative;
39
+ gap: 20px;
40
+ width: min-content;
41
+ height: min-content;
42
+ }
43
+
44
+ .patient-list {
45
+ display: flex;
46
+ gap: 20px;
47
+ }
48
+
49
+ .patient-list {
50
+ justify-content: space-between;
51
+ }
52
+
53
+ .selection-section {
54
+ margin-bottom: 30px;
55
+ flex-direction: column;
56
+ width: 958px;
57
+ }
58
+
59
+ .condition-list {
60
+ align-items: stretch;
61
+ flex-direction: column;
62
+ gap: 10px;
63
+ margin-top: 10px;
64
+ display: grid;
65
+ grid-template-columns: 1fr 1fr;
66
+ }
67
+
68
+ .condition-card {
69
+ display: grid;
70
+ grid-template-columns: 100px 1fr;
71
+ align-items: center;
72
+ padding: 0 30px;
73
+ }
74
+
75
+ .condition-card {
76
+ background: #fff;
77
+ border: 2px solid #ddd;
78
+ border-radius: 10px;
79
+ padding: 10px;
80
+ cursor: pointer;
81
+ transition: transform 0.2s ease, border-color 0.2s ease;
82
+ box-shadow: 0 2px 5px rgba(0,0,0,0.1);
83
+ }
84
+
85
+ .patient-video-container {
86
+ position: relative;
87
+ cursor: pointer;
88
+ width: 300px;
89
+ height: 300px;
90
+ overflow: hidden;
91
+ border: 4px solid transparent;
92
+ border-radius: 12px;
93
+ transition: border-color 0.2s ease;
94
+ box-sizing: border-box;
95
+ }
96
+
97
+ .patient-video, .patient-img {
98
+ position: absolute;
99
+ top: 0;
100
+ left: 0;
101
+ width: 100%;
102
+ object-fit: cover;
103
+ border-radius: 8px;
104
+ transition: opacity 0.4s ease-in-out;
105
+ }
106
+
107
+ .ehr-label {
108
+ position: absolute;
109
+ bottom: 15px;
110
+ right: 10px;
111
+ border-radius: 4px;
112
+ border: 1px solid #C8B3FD;
113
+ background: #E8DEF8;
114
+ padding: 0 5px;
115
+ }
116
+
117
+ .patient-video-container:hover {
118
+ border-color: #aaa;
119
+ }
120
+
121
+ .condition-card:hover, .ehr-label:hover {
122
+ transform: scale(1.05);
123
+ border-color: #aaa;
124
+ }
125
+ .patient-video-container.selected {
126
+ border-color: #D0BCFF;
127
+ box-shadow: 0px 4px 4px 0px rgba(0, 0, 0, 0.25);
128
+ }
129
+
130
+ .condition-card.selected {
131
+ border: 4px solid #D0BCFF;
132
+ box-shadow: 0px 4px 4px 0px rgba(0, 0, 0, 0.25);
133
+ }
134
+
135
+ .go-button {
136
+ background: #0078D7;
137
+ color: #fff;
138
+ border: none;
139
+ padding: 10px 20px;
140
+ border-radius: 5px;
141
+ cursor: pointer;
142
+ font-size: 16px;
143
+ transition: background 0.3s ease;
144
+ }
145
+ .go-button:disabled {
146
+ background: #aaa;
147
+ cursor: not-allowed;
148
+ }
149
+ .go-button:hover:not(:disabled) {
150
+ background: #005fa3;
151
+ }
152
+ .patient-info .category-label {
153
+ font-size: 12px;
154
+ font-weight: bold;
155
+ }
156
+ .patient-info .category-value {
157
+ font-size: 16px;
158
+ font-weight: normal;
159
+ }
160
+
161
+ .patient-info {
162
+ display: flex;
163
+ flex-direction: column;
164
+ justify-content: center;
165
+ }
166
+
167
+ .condition-card.disabled {
168
+ pointer-events: none;
169
+ opacity: 0.3;
170
+ transition: opacity 0.2s ease-in-out 0.1s;
171
+ }
172
+
173
+ .patient-info-right {
174
+ display: flex;
175
+ flex-direction: column;
176
+ justify-content: center;
177
+ gap: 10px;
178
+ width: min-content;
179
+ }
180
+
181
+ .patient-details {
182
+ display: flex;
183
+ flex-direction: column;
184
+ gap: 10px;
185
+ margin-top: 20px;
186
+ align-items: center;
187
+ text-align: center;
188
+ }
189
+
190
+ .json-popup-content {
191
+ max-width: 80vw;
192
+ max-height: 80vh;
193
+ display: flex;
194
+ flex-direction: column;
195
+ width: 800px;
196
+ }
197
+
198
+ .json-viewer-container {
199
+ flex-grow: 1;
200
+ overflow: auto;
201
+ border-radius: 8px;
202
+ padding: 1rem;
203
+ margin-bottom: 1.5rem;
204
+ font-family: monospace;
205
+ }
frontend/src/components/PatientBuilder/PatientBuilder.js ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Copyright 2025 Google LLC
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ import React, { useState, useEffect } from "react";
18
+ import "./PatientBuilder.css";
19
+ import { JsonViewer } from "@textea/json-viewer"; // updated import
20
+ import DetailsPopup from "../DetailsPopup/DetailsPopup";
21
+
22
+ // Global caching function to load patients & conditions once
23
+ let cachedPatientsAndConditions = null;
24
+ function getPatientsAndConditions() {
25
+ if (cachedPatientsAndConditions)
26
+ return Promise.resolve(cachedPatientsAndConditions);
27
+ return fetch("/assets/patients_and_conditions.json")
28
+ .then((response) => response.json())
29
+ .then((data) => {
30
+ cachedPatientsAndConditions = data;
31
+ return data;
32
+ });
33
+ }
34
+
35
+ const PatientBuilder = ({
36
+ selectedPatient,
37
+ selectedCondition,
38
+ setSelectedPatient,
39
+ setSelectedCondition,
40
+ onNext,
41
+ onBack,
42
+ }) => {
43
+ const [patients, setPatients] = useState([]);
44
+ const [conditions, setConditions] = useState([]);
45
+ const [hoveredPatient, setHoveredPatient] = useState(null);
46
+ const [isVideoLoading, setIsVideoLoading] = useState(false);
47
+
48
+ const [isPopupOpen, setIsPopupOpen] = useState(false);
49
+ const [popupJson, setPopupJson] = useState(null);
50
+ const [isDetailsPopupOpen, setIsDetailsPopupOpen] = useState(false);
51
+
52
+
53
+ useEffect(() => {
54
+ getPatientsAndConditions()
55
+ .then((data) => {
56
+ setPatients(data.patients);
57
+ setConditions(data.conditions);
58
+ })
59
+ .catch((error) =>
60
+ console.error("Error fetching patients and conditions:", error)
61
+ );
62
+ }, []);
63
+
64
+ useEffect(() => {
65
+ if (
66
+ selectedPatient &&
67
+ selectedPatient.existing_condition !== "depression" &&
68
+ selectedCondition === "Serotonin Syndrome"
69
+ ) {
70
+ setSelectedCondition(null);
71
+ }
72
+ }, [selectedPatient]);
73
+
74
+ // When a new patient is selected, set the video to a loading state
75
+ // to ensure the placeholder image is shown.
76
+ useEffect(() => {
77
+ if (selectedPatient) {
78
+ setIsVideoLoading(true);
79
+ }
80
+ }, [selectedPatient]);
81
+
82
+ const handleGo = () => {
83
+ if (selectedPatient && selectedCondition) {
84
+ onNext();
85
+ }
86
+ };
87
+
88
+ const openPopup = (patient) => {
89
+ if (patient && patient.fhirFile) {
90
+ fetch(patient.fhirFile)
91
+ .then((response) => response.json())
92
+ .then((json) => {
93
+ setPopupJson(json);
94
+ setIsPopupOpen(true);
95
+ })
96
+ .catch((error) => console.error("Error fetching FHIR JSON:", error));
97
+ }
98
+ };
99
+
100
+ const closePopup = () => {
101
+ setIsPopupOpen(false);
102
+ setPopupJson(null);
103
+ };
104
+
105
+ return (
106
+ <div className="patient-builder-container">
107
+ <div className="headerButtonsContainer">
108
+ <button className="back-button" onClick={onBack}>
109
+ <i className="material-icons back-button-icon">keyboard_arrow_left</i>
110
+ Back
111
+ </button>
112
+ <button className="details-button" onClick={() => setIsDetailsPopupOpen(true)}>
113
+ <i className="material-icons code-block-icon">code</i>&nbsp;
114
+ Details about this Demo
115
+ </button>
116
+ </div>
117
+ <div className="frame">
118
+ <div className="selection-section">
119
+ <div className="header2">Select a Patient</div>
120
+ <div className="patient-list">
121
+ {patients.map((patient) => {
122
+ const isSelected = selectedPatient && selectedPatient.id === patient.id;
123
+ return (
124
+ <div
125
+ key={patient.id}
126
+ className="patient-card"
127
+ >
128
+ <div
129
+ className={`patient-video-container ${isSelected ? "selected" : ""}`}
130
+ onClick={() => setSelectedPatient(patient)}
131
+ >
132
+ <img
133
+ src={patient.img}
134
+ className="patient-img"
135
+ alt={patient.name}
136
+ draggable="false"
137
+ onDragStart={(e) => e.preventDefault()}
138
+ style={{ opacity: isSelected && !isVideoLoading ? 0 : 1 }}
139
+ />
140
+ {isSelected && (
141
+ <video
142
+ key={patient.id}
143
+ src={patient.video}
144
+ className="patient-video"
145
+ autoPlay
146
+ muted
147
+ loop
148
+ onCanPlay={() => setIsVideoLoading(false)}
149
+ style={{ opacity: isVideoLoading ? 0 : 1 }}
150
+ />
151
+ )}
152
+ <div className="ehr-label" onClick={(e) => { e.stopPropagation(); openPopup(patient); }}>
153
+ Synthetic Health Record (FHIR)
154
+ </div>
155
+ </div>
156
+ <div className="patient-info">
157
+ <div className="category-value">
158
+ {patient.name}, {patient.age} years old, {patient.gender}
159
+ </div>
160
+ <div className="category-value">
161
+ Existing condition: {patient.existing_condition}
162
+ </div>
163
+ </div>
164
+ </div>
165
+ );
166
+ })}
167
+ </div>
168
+ </div>
169
+ <div className="selection-section">
170
+ <div className="header2">Explore a Condition</div>
171
+ <div className="lighttext">
172
+ In this demonstration, a persona, simulated using Gemini 2.5 Flash, will interact with an AI agent, built with MedGemma.
173
+ Neither the simulated persona nor the AI agent have been provided the diagnosis for the current condition (selected below).
174
+ The AI agent facilitates structured information-gathering, designed to usefully collect and summarize the patient's symptoms.
175
+ For the purposes of this demonstration, the AI agent also has access to elements of the patient's health record (provided as FHIR resources).
176
+ </div>
177
+ <div className="condition-list">
178
+ {conditions.map((cond) => {
179
+ const isDisabled =
180
+ cond.name === "Serotonin Syndrome" &&
181
+ selectedPatient &&
182
+ selectedPatient.existing_condition !== "Depression";
183
+ return (
184
+ <div
185
+ key={cond.name}
186
+ className={`condition-card lighttext ${
187
+ selectedCondition === cond.name ? "selected" : ""
188
+ } ${isDisabled ? "disabled" : ""}`}
189
+ onClick={
190
+ !isDisabled
191
+ ? () => setSelectedCondition(cond.name)
192
+ : undefined
193
+ }
194
+ >
195
+ <div><strong>{cond.name}</strong></div>
196
+ <div>{cond.description}</div>
197
+ </div>
198
+ );
199
+ })}
200
+ </div>
201
+ </div>
202
+ <button
203
+ className="info-button"
204
+ onClick={handleGo}
205
+ disabled={!(selectedPatient && selectedCondition)}
206
+ >
207
+ Launch simulation
208
+ </button>
209
+ </div>
210
+ {isPopupOpen && (
211
+ <div className="popup-overlay" onClick={closePopup}>
212
+ <div
213
+ className="popup-content json-popup-content"
214
+ onClick={(e) => e.stopPropagation()}
215
+ >
216
+ <h2>Synthetic Electronic Health Record</h2>
217
+ <span>This is a sample of the patient’s electronic health record, shown in a standard (FHIR) format. This FHIR record, like the patient, was generated solely for the purposes of this demo.</span>
218
+ <div className="json-viewer-container">
219
+ <JsonViewer value={popupJson} theme="monokai" />
220
+ </div>
221
+ <button className="popup-button" onClick={closePopup}>
222
+ Close
223
+ </button>
224
+ </div>
225
+ </div>
226
+ )}
227
+ <DetailsPopup
228
+ isOpen={isDetailsPopupOpen}
229
+ onClose={() => setIsDetailsPopupOpen(false)}
230
+ />
231
+ </div>
232
+ );
233
+ };
234
+
235
+ export default PatientBuilder;
frontend/src/components/PreloadImages.js ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Copyright 2025 Google LLC
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ import React, { useEffect, useState } from 'react';
18
+
19
+ const PreloadImages = ({ imageSources, children }) => {
20
+ const [loaded, setLoaded] = useState(false);
21
+
22
+ useEffect(() => {
23
+ let loadedCount = 0;
24
+ imageSources.forEach(src => {
25
+ const img = new Image();
26
+ img.src = src;
27
+ img.onload = () => {
28
+ loadedCount++;
29
+ if (loadedCount === imageSources.length) {
30
+ setLoaded(true);
31
+ }
32
+ };
33
+ });
34
+ }, [imageSources]);
35
+
36
+ if (!loaded) {
37
+ return <div>Loading images...</div>;
38
+ }
39
+ return <>{children}</>;
40
+ };
41
+
42
+ export default PreloadImages;
frontend/src/components/RolePlayDialogs/RolePlayDialogs.css ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Copyright 2025 Google LLC
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ .frame.role-play-container {
18
+ display: grid;
19
+ justify-items: center;
20
+ align-content: center;
21
+ align-items: center;
22
+ grid-gap: 30px;
23
+ width: fit-content;
24
+ align-self: center;
25
+ }
26
+
27
+ .dialogs-container {
28
+ display: flex;
29
+ justify-content: space-between;
30
+ align-items: center;
31
+ gap: 20px;
32
+ margin-top: 50px;
33
+ }
34
+
35
+
36
+ .dialog-box {
37
+ border-radius: 5.667px;
38
+ border: 1.889px solid #E9E9E9;
39
+ background: #FFF;
40
+ display: flex;
41
+ flex-direction: column;
42
+ align-items: center;
43
+ width: 477px;
44
+ }
45
+
46
+ .dialog-title-text {
47
+ padding-top: 24px;
48
+ font-size: 1.6rem;
49
+ font-weight: 500;
50
+ color: #202124;
51
+ display: flex;
52
+ align-items: center;
53
+ gap: 10px;
54
+ }
55
+
56
+ .dialog-body-scrollable {
57
+ padding: 16px;
58
+ overflow-y: auto;
59
+ flex-grow: 1;
60
+ color: #3c4043;
61
+ line-height: 1.6;
62
+ }
63
+
64
+ .dialog-subtitle {
65
+ font-weight: 500;
66
+ margin-bottom: 8px;
67
+ }
68
+
69
+ .variable {
70
+ color: #e81ad7;
71
+ font-weight: bold;
72
+ }
73
+
74
+ .patient-avatar {
75
+ border-radius: 50%;
76
+ margin: 0 10px;
77
+ width: 90px;
78
+ height: 90px;
79
+ }
80
+
81
+ .ai-avatar {
82
+ border-radius: 50%;
83
+ width: 90px;
84
+ height: 90px;
85
+ background: #E8DEF8;
86
+ }
87
+
88
+ .report-notice {
89
+ width: 974px;
90
+ }
91
+
92
+ .highlight {
93
+ background-color: #E8DEF8;
94
+ font-weight: 700;
95
+ padding: 0 4px;
96
+ border-radius: 8px;
97
+ }
98
+
99
+ .role-play-container .info-button {
100
+ justify-self: flex-start;
101
+ }
frontend/src/components/RolePlayDialogs/RolePlayDialogs.js ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Copyright 2025 Google LLC
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ import React, { useState } from "react";
18
+ import "./RolePlayDialogs.css";
19
+ import DetailsPopup from "../DetailsPopup/DetailsPopup";
20
+
21
+ const RolePlayDialogs = ({
22
+ selectedPatient,
23
+ selectedCondition,
24
+ onStart,
25
+ onBack,
26
+ }) => {
27
+ const [isDetailsPopupOpen, setIsDetailsPopupOpen] = useState(false);
28
+
29
+ return (
30
+ <div className="page">
31
+ <div className="headerButtonsContainer">
32
+ <button className="back-button" onClick={onBack}>
33
+ <i className="material-icons back-button-icon">keyboard_arrow_left</i>
34
+ Back
35
+ </button>
36
+ <button className="details-button" onClick={() => setIsDetailsPopupOpen(true)}>
37
+ <i className="material-icons code-block-icon">code</i>&nbsp; Details
38
+ about this Demo
39
+ </button>
40
+ </div>
41
+ <div className="frame role-play-container">
42
+ <div className="title-header">What’s happening in this simulation</div>
43
+ <div className="dialogs-container">
44
+ <div className="dialog-box">
45
+ <div className="dialog-title-text">Pre-visit AI agent</div>
46
+ <div className="dialog-subtitle">
47
+ Built with: <img src="assets/medgemma.avif" height="16px" />{" "}
48
+ 27b
49
+ </div>
50
+ <img
51
+ src="assets/ai_headshot.svg"
52
+ alt="AI Avatar"
53
+ className="ai-avatar"
54
+ />
55
+ <div className="dialog-body-scrollable">
56
+ In this demo, MedGemma functions as an AI agent designed to assist in pre-visit information
57
+ collection. It will interact with the patient agent to gather relevant data.
58
+ To provide additional context, MedGemma also has access to information from the patient's EHR (in FHIR format).
59
+ However, MedGemma is not provided the specific diagnois ({selectedCondition}).
60
+ MedGemma's goal is to gather details about symptoms, relevant history,
61
+ and current concerns to generate a comprehensive pre-visit report.
62
+ </div>
63
+ </div>
64
+ <div className="dialog-box">
65
+ <div className="dialog-title-text">
66
+ Patient persona: {selectedPatient.name}
67
+ </div>
68
+ <div className="dialog-subtitle">
69
+ Simulated by:{" "}Gemini 2.5 Flash
70
+ </div>
71
+ <img
72
+ src={selectedPatient.headshot}
73
+ alt="Patient Avatar"
74
+ className="patient-avatar"
75
+ />
76
+ <div className="dialog-body-scrollable">
77
+ Gemini is provided a persona and information to play the role of the patient, {selectedPatient.name}.
78
+ In this simulation, the patient agent does not know their diagnosis,
79
+ but is experiencing related symptoms and concerns that can be shared during the interview.
80
+ To simulate a real-world situation with confounding information, additional information unrelated to the presenting condition has also been provided.
81
+ </div>
82
+ </div>
83
+ </div>
84
+ <div className="report-notice">
85
+ As the conversation develops, MedGemma <span className="highlight">creates and continually updates
86
+ a real-time pre-visit report</span> capturing relevant
87
+ information. Following pre-visit report generation, an evaluation is available. The purpose of this evaluation is to provide the viewer insights into quality of the output.
88
+ For this evaluation, MedGemma is provided the previously unknown reference diagnosis, and is prompted to generate a
89
+ <span className="highlight">self evaluation that highlights strengths as well opporutunities where the conversation and report could have been improved.</span>
90
+ </div>
91
+ <button className="info-button" onClick={onStart}>
92
+ Start conversation
93
+ </button>
94
+ </div>
95
+ <DetailsPopup
96
+ isOpen={isDetailsPopupOpen}
97
+ onClose={() => setIsDetailsPopupOpen(false)}
98
+ />
99
+ </div>
100
+ );
101
+ };
102
+
103
+ export default RolePlayDialogs;
frontend/src/components/WelcomePage/WelcomePage.css ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Copyright 2025 Google LLC
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ body:has(.welcome) {
18
+ background-color: white;
19
+ }
20
+
21
+ .info-page-container {
22
+ display: flex;
23
+ align-items: center;
24
+ justify-content: center;
25
+ gap: 40px;
26
+ padding: 40px;
27
+ max-width: 1300px;
28
+ margin: auto;
29
+ }
30
+
31
+ .info-content {
32
+ flex: 1;
33
+ min-width: 500px;
34
+ max-width: 1000px;
35
+ display: flex;
36
+ flex-direction: column;
37
+ gap: 20px;
38
+ font-size: 18px;
39
+ }
40
+
41
+ .info-header {
42
+ margin-bottom: 10px;
43
+ }
44
+
45
+ .title-header {
46
+ font-family: 'Google Sans', sans-serif;
47
+ font-size: 32px;
48
+ font-weight: 500;
49
+ font-style: normal;
50
+ }
51
+
52
+ .welcome .medgemma-logo {
53
+ width: 130px;
54
+ align-self: flex-end;
55
+ margin-top: 10px;
56
+ margin-right: 10px;
57
+ }
58
+
59
+ .info-button {
60
+ background-color: #C2E7FF;
61
+ }
62
+
63
+ .info-disclaimer-text {
64
+ font-family: 'Google Sans', sans-serif;
65
+ color: #333;
66
+ line-height: 1.5;
67
+ margin: 0;
68
+ font-size: 14px;
69
+ }
70
+
71
+ .info-disclaimer-title {
72
+ border-radius: 14.272px;
73
+ border: 1.359px solid #F1E161;
74
+ background: #F1E161;
75
+ mix-blend-mode: multiply;
76
+ padding: 0 5px;
77
+ }
78
+
79
+ .graphics {
80
+ position: relative;
81
+ min-width: 250px;
82
+ max-width: 450px;
83
+ flex: 0.5;
84
+ aspect-ratio: 1.2 / 1;
85
+ }
86
+
87
+ @media (max-width: 900px) {
88
+ .info-page-container {
89
+ flex-direction: column;
90
+ padding: 20px;
91
+ margin: 10px;
92
+ }
93
+
94
+ .info-content {
95
+ max-width: 100%;
96
+ align-items: center;
97
+ text-align: center;
98
+ }
99
+
100
+ .info-button {
101
+ align-self: center;
102
+ }
103
+
104
+ .info-header {
105
+ text-align: center;
106
+ }
107
+
108
+ .title-header {
109
+ font-size: 36px;
110
+ }
111
+
112
+ .info-text {
113
+ font-size: 16px;
114
+ }
115
+ .graphics {
116
+ min-width: 200px;
117
+ }
118
+ }
119
+
120
+
121
+
122
+ .graphics-top {
123
+ position: absolute;
124
+ top: 0;
125
+ left: 0;
126
+ z-index: 0;
127
+ width: 80%;
128
+ }
129
+
130
+ .graphics-bottom {
131
+ position: absolute;
132
+ bottom: 0;
133
+ right: 0;
134
+ z-index: 1;
135
+ opacity: 0;
136
+ animation: fadeIn 1s ease forwards;
137
+ width: 62%;
138
+ }
139
+
140
+ @keyframes fadeIn {
141
+ to {
142
+ opacity: 1;
143
+ }
144
+ }
frontend/src/components/WelcomePage/WelcomePage.js ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Copyright 2025 Google LLC
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ import React from 'react';
18
+ import './WelcomePage.css';
19
+
20
+ const WelcomePage = ({ onSwitchPage }) => {
21
+ return (
22
+ <div className="welcome page">
23
+ <img src="/assets/medgemma.avif" alt="MedGemma Logo" className="medgemma-logo" />
24
+ <div className="info-page-container">
25
+ <div className="graphics">
26
+ <img className="graphics-top" src="/assets/welcome_top_graphics.svg" alt="Welcome top graphics" />
27
+ <img className="graphics-bottom" src="/assets/welcome_bottom_graphics.svg" alt="Welcome bottom graphics" />
28
+ </div>
29
+ <div className="info-content">
30
+ <div className="info-header">
31
+ <span className="title-header">Simulated Pre-visit Intake Demo</span>
32
+ </div>
33
+ <div className="info-text">
34
+ Healthcare providers often need to gather patient information before appointments.
35
+ This demo illustrates how MedGemma could be used in an application to streamline pre-visit information collection and utilization.
36
+ <br /><br/>
37
+ First, a pre-visit AI agent built with MedGemma asks questions to gather information.
38
+ After it has identified and collected relevant information, the demo application generates a pre-visit report.
39
+ <br /><br/>
40
+ This type of intelligent pre-visit report can help providers be more efficient and effective while also providing an improved experience
41
+ for patients relative to traditional intake forms.
42
+ <br /><br/>
43
+ Lastly, you can view an evaluation of the pre-visit report which provides insights into the quality of the output.
44
+ For this evaluation, MedGemma is provided the reference diagnosis, allowing "self-evaluation" that highlights both strengths and what it could have done better.
45
+ </div>
46
+ <div className="info-disclaimer-text">
47
+ <span className="info-disclaimer-title">Disclaimer</span> This
48
+ demonstration is for illustrative purposes only and does not represent a finished or approved
49
+ product. It is not representative of compliance to any regulations or standards for
50
+ quality, safety or efficacy. Any real-world application would require additional development,
51
+ training, and adaptation. The experience highlighted in this demo shows MedGemma's baseline
52
+ capability for the displayed task and is intended to help developers and users explore possible
53
+ applications and inspire further development.
54
+ </div>
55
+ <button className="info-button" onClick={onSwitchPage}>Select Patient</button>
56
+ </div>
57
+ </div>
58
+ </div>
59
+ );
60
+ };
61
+
62
+ export default WelcomePage;
frontend/src/index.js ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Copyright 2025 Google LLC
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ import React from 'react';
18
+ import ReactDOM from 'react-dom';
19
+ import App from './App';
20
+ import './shared/Style.css';
21
+
22
+ const root = ReactDOM.createRoot(document.getElementById('root'));
23
+ root.render(<App />);
frontend/src/shared/Style.css ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Copyright 2025 Google LLC
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ * {
18
+ box-sizing: border-box;
19
+ }
20
+
21
+ html {
22
+ --image-fixed-width: 280px;
23
+ height: 100%;
24
+ }
25
+
26
+ #root {
27
+ height: 100%;
28
+ margin: auto;
29
+ width: 100%;
30
+ }
31
+
32
+ body {
33
+ font-family: "Google Sans Text", sans-serif;
34
+ line-height: 1.6;
35
+ background-color: #f4f4f4;
36
+ color: #333;
37
+ margin: 0;
38
+ display: flex;
39
+ flex-direction: column;
40
+ user-select: none;
41
+ height: 100%;
42
+ }
43
+
44
+ .page {
45
+ display: flex;
46
+ flex-direction: column;
47
+ width: 100%;
48
+ height: fit-content;
49
+ }
50
+
51
+ .headerButtonsContainer {
52
+ display: flex;
53
+ justify-content: space-between;
54
+ width: -webkit-fill-available;
55
+ padding: 20px;
56
+ }
57
+
58
+ .info-button, .back-button, .details-button {
59
+ font-family: 'Google Sans Text', sans-serif;
60
+ font-size: 14px;
61
+ font-weight: 500;
62
+ padding: 6px 12px;
63
+ border-radius: 100px;
64
+ cursor: pointer;
65
+ text-align: center;
66
+ transition: background-color 0.3s ease;
67
+ align-self: flex-start;
68
+ border-width: 1px;
69
+ }
70
+
71
+ .back-button, .details-button {
72
+ padding: 8px 12px;
73
+ z-index: 10;
74
+ color: black;
75
+ background-color: transparent;
76
+ display: inline-flex;
77
+ align-items: center;
78
+ border-radius: 100px;
79
+ border: 1px solid rgba(196, 199, 197);
80
+ }
81
+
82
+ .back-button-icon {
83
+ margin-right: 4px;
84
+ }
85
+
86
+ .back-button-icon {
87
+ font-size: 14px;
88
+ }
89
+
90
+ .code-block-icon {
91
+ background-color: rgba(0, 74, 119);
92
+ color: rgba(194, 231, 255);
93
+ font-size: 14px;
94
+ }
95
+
96
+ .details-button {
97
+ background-color: rgba(194, 231, 255);
98
+ color: rgba(0, 74, 119);
99
+ border: none;
100
+ }
101
+
102
+ .info-button {
103
+ background-color: #0B57D0;
104
+ color: white;
105
+ padding: 12px;
106
+ }
107
+
108
+ .info-button:disabled {
109
+ background: #aaa;
110
+ cursor: not-allowed;
111
+ }
112
+
113
+ .info-button:hover:not(:disabled) {
114
+ background: #005fa3;
115
+ }
116
+
117
+ .frame {
118
+ border-radius: 28px;
119
+ border: 2px solid #E9E9E9;
120
+ background: #FFF;
121
+ padding: 20px 50px;
122
+ align-items: center;
123
+ display: flex;
124
+ flex-direction: column;
125
+ flex: 1;
126
+ justify-content: space-around;
127
+ margin: 0 10px;
128
+ min-height: 0;
129
+ width: fit-content;
130
+ }
131
+
132
+ .popup-overlay {
133
+ position: fixed;
134
+ top: 0;
135
+ left: 0;
136
+ width: 100%;
137
+ height: 100%;
138
+ background-color: rgba(0, 0, 0, 0.6);
139
+ display: flex;
140
+ justify-content: center;
141
+ align-items: center;
142
+ z-index: 1000;
143
+ backdrop-filter: blur(5px);
144
+ }
145
+
146
+ .popup-content {
147
+ background: #ffffff;
148
+ padding: 2rem;
149
+ border-radius: 12px;
150
+ max-width: 800px;
151
+ width: 90%;
152
+ box-shadow: 0 10px 25px rgba(0, 0, 0, 0.1);
153
+ border: 1px solid #e0e0e0;
154
+ animation: popup-fade-in 0.3s ease-out;
155
+ }
156
+
157
+ @keyframes popup-fade-in {
158
+ from {
159
+ opacity: 0;
160
+ transform: scale(0.95);
161
+ }
162
+ to {
163
+ opacity: 1;
164
+ transform: scale(1);
165
+ }
166
+ }
167
+
168
+ .popup-content h2 {
169
+ font-size: 1.5rem;
170
+ font-weight: 600;
171
+ color: #333;
172
+ margin-top: 0;
173
+ margin-bottom: 1rem;
174
+ text-align: center;
175
+ }
176
+
177
+ .popup-content p {
178
+ font-size: 1rem;
179
+ line-height: 1.6;
180
+ color: #555;
181
+ text-align: left;
182
+ margin-bottom: 1.5rem;
183
+ }
184
+
185
+ .popup-button {
186
+ display: block;
187
+ padding: 12px 20px;
188
+ font-size: 1rem;
189
+ font-weight: 600;
190
+ color: #fff;
191
+ background-color: #1a73e8;
192
+ border: none;
193
+ border-radius: 8px;
194
+ cursor: pointer;
195
+ transition: background-color 0.2s ease;
196
+ }
197
+
198
+ .popup-button:hover {
199
+ background-color: #185abc;
200
+ }
201
+
202
+ .hf-logo {
203
+ vertical-align: middle;
204
+ width: 30px;
205
+ }
gemini.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ import requests
17
+ from cache import cache # new import replacing duplicate cache initialization
18
+
19
+ GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
20
+
21
+ # Decorate the function to cache its results indefinitely.
22
+ @cache.memoize()
23
+ def gemini_get_text_response(prompt: str,
24
+ stop_sequences: list = None,
25
+ temperature: float = 0.1,
26
+ max_output_tokens: int = 4000,
27
+ top_p: float = 0.8,
28
+ top_k: int = 10):
29
+ """
30
+ Makes a text generation request to the Gemini API.
31
+ """
32
+
33
+ api_url = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent?key={GEMINI_API_KEY}"
34
+ headers = {
35
+ 'Content-Type': 'application/json'
36
+ }
37
+
38
+ data = {
39
+ "contents": [
40
+ {
41
+ "parts": [
42
+ {
43
+ "text": prompt
44
+ }
45
+ ]
46
+ }
47
+ ],
48
+ "generationConfig": {
49
+ "stopSequences": stop_sequences or ["Title"],
50
+ "temperature": temperature,
51
+ "maxOutputTokens": max_output_tokens,
52
+ "topP": top_p,
53
+ "topK": top_k
54
+ }
55
+ }
56
+
57
+ response = requests.post(api_url, headers=headers, json=data)
58
+ response.raise_for_status() # Raise an exception for bad status codes
59
+ return response.json()["candidates"][0]["content"]["parts"][0]["text"]
gemini_tts.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import google.generativeai as genai
16
+ import os
17
+ import struct
18
+ import re
19
+ import logging
20
+ from cache import cache
21
+
22
+ # Add these imports for MP3 conversion
23
+ from pydub import AudioSegment
24
+ import io
25
+
26
+ # --- Constants ---
27
+ GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
28
+ GENERATE_SPEECH = os.environ.get("GENERATE_SPEECH", "false").lower() == "true"
29
+ TTS_MODEL = "gemini-2.5-flash-preview-tts"
30
+ DEFAULT_RAW_AUDIO_MIME = "audio/L16;rate=24000"
31
+
32
+ # --- Configuration ---
33
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
34
+
35
+ genai.configure(api_key=GEMINI_API_KEY)
36
+
37
+ class TTSGenerationError(Exception):
38
+ """Custom exception for TTS generation failures."""
39
+ pass
40
+
41
+
42
+ # --- Helper functions for audio processing ---
43
+ def parse_audio_mime_type(mime_type: str) -> dict[str, int | None]:
44
+ """
45
+ Parses bits per sample and rate from an audio MIME type string.
46
+ e.g., "audio/L16;rate=24000" -> {"bits_per_sample": 16, "rate": 24000}
47
+ """
48
+ bits_per_sample = 16 # Default
49
+ rate = 24000 # Default
50
+
51
+ parts = mime_type.split(";")
52
+ for param in parts:
53
+ param = param.strip().lower()
54
+ if param.startswith("rate="):
55
+ try:
56
+ rate_str = param.split("=", 1)[1]
57
+ rate = int(rate_str)
58
+ except (ValueError, IndexError):
59
+ pass # Keep default if parsing fails
60
+ elif re.match(r"audio/l\d+", param): # Matches audio/L<digits>
61
+ try:
62
+ bits_str = param.split("l",1)[1]
63
+ bits_per_sample = int(bits_str)
64
+ except (ValueError, IndexError):
65
+ pass # Keep default
66
+ return {"bits_per_sample": bits_per_sample, "rate": rate}
67
+
68
+ def convert_to_wav(audio_data: bytes, mime_type: str) -> bytes:
69
+ """
70
+ Generates a WAV file header for the given raw audio data and parameters.
71
+ Assumes mono audio.
72
+ """
73
+ parameters = parse_audio_mime_type(mime_type)
74
+ bits_per_sample = parameters["bits_per_sample"]
75
+ sample_rate = parameters["rate"]
76
+ num_channels = 1 # Mono
77
+ data_size = len(audio_data)
78
+ bytes_per_sample = bits_per_sample // 8
79
+ block_align = num_channels * bytes_per_sample
80
+ byte_rate = sample_rate * block_align
81
+ chunk_size = 36 + data_size
82
+
83
+ header = struct.pack(
84
+ "<4sI4s4sIHHIIHH4sI",
85
+ b"RIFF", chunk_size, b"WAVE", b"fmt ",
86
+ 16, 1, num_channels, sample_rate, byte_rate, block_align,
87
+ bits_per_sample, b"data", data_size
88
+ )
89
+ return header + audio_data
90
+ # --- End of helper functions ---
91
+
92
+ def _synthesize_gemini_tts_impl(text: str, gemini_voice_name: str) -> tuple[bytes, str]:
93
+ """
94
+ Synthesizes English text using the Gemini API via the google-genai library.
95
+ Returns a tuple: (processed_audio_data_bytes, final_mime_type).
96
+ Raises TTSGenerationError on failure.
97
+ """
98
+ if not GENERATE_SPEECH:
99
+ # This should ideally not be hit if the logic outside this function is correct,
100
+ # but as a safeguard, we raise an error.
101
+ raise TTSGenerationError(
102
+ "GENERATE_SPEECH is not set. Please set it in your environment variables to generate speech."
103
+ )
104
+
105
+ try:
106
+ model = genai.GenerativeModel(TTS_MODEL)
107
+
108
+ generation_config = {
109
+ "response_modalities": ["AUDIO"],
110
+ "speech_config": {
111
+ "voice_config": {
112
+ "prebuilt_voice_config": {
113
+ "voice_name": gemini_voice_name
114
+ }
115
+ }
116
+ }
117
+ }
118
+
119
+ response = model.generate_content(
120
+ contents=[text],
121
+ generation_config=generation_config,
122
+ )
123
+
124
+ audio_part = response.candidates[0].content.parts[0]
125
+ audio_data_bytes = audio_part.inline_data.data
126
+ final_mime_type = audio_part.inline_data.mime_type
127
+ except Exception as e:
128
+ error_message = f"An unexpected error occurred with google-genai: {e}"
129
+ logging.error(error_message)
130
+ raise TTSGenerationError(error_message) from e
131
+
132
+ if not audio_data_bytes:
133
+ error_message = "No audio data was successfully retrieved or decoded."
134
+ logging.error(error_message)
135
+ raise TTSGenerationError(error_message)
136
+
137
+ # --- Audio processing ---
138
+ if final_mime_type:
139
+ final_mime_type_lower = final_mime_type.lower()
140
+ needs_wav_conversion = any(p in final_mime_type_lower for p in ("audio/l16", "audio/l24", "audio/l8")) or \
141
+ not final_mime_type_lower.startswith(("audio/wav", "audio/mpeg", "audio/ogg", "audio/opus"))
142
+
143
+ if needs_wav_conversion:
144
+ processed_audio_data = convert_to_wav(audio_data_bytes, final_mime_type)
145
+ processed_audio_mime = "audio/wav"
146
+ else:
147
+ processed_audio_data = audio_data_bytes
148
+ processed_audio_mime = final_mime_type
149
+ else:
150
+ logging.warning("MIME type not determined. Assuming raw audio and attempting WAV conversion (defaulting to %s).", DEFAULT_RAW_AUDIO_MIME)
151
+ processed_audio_data = convert_to_wav(audio_data_bytes, DEFAULT_RAW_AUDIO_MIME)
152
+ processed_audio_mime = "audio/wav"
153
+
154
+ # --- MP3 compression ---
155
+ if processed_audio_data:
156
+ try:
157
+ # Load audio into AudioSegment
158
+ audio_segment = AudioSegment.from_file(io.BytesIO(processed_audio_data), format="wav")
159
+ mp3_buffer = io.BytesIO()
160
+ audio_segment.export(mp3_buffer, format="mp3")
161
+ mp3_bytes = mp3_buffer.getvalue()
162
+ return mp3_bytes, "audio/mpeg"
163
+ except Exception as e:
164
+ logging.warning("MP3 compression failed: %s. Falling back to WAV.", e)
165
+ # Fallback to WAV if MP3 conversion fails
166
+ return processed_audio_data, processed_audio_mime
167
+ else:
168
+ error_message = "Audio processing failed."
169
+ logging.error(error_message)
170
+ raise TTSGenerationError(error_message)
171
+
172
+ # Always create the memoized function first, so we can access its .key() method
173
+ _memoized_tts_func = cache.memoize()(_synthesize_gemini_tts_impl)
174
+
175
+ if GENERATE_SPEECH:
176
+ def synthesize_gemini_tts_with_error_handling(*args, **kwargs) -> tuple[bytes | None, str | None]:
177
+ """
178
+ A wrapper for the memoized TTS function that catches errors and returns (None, None).
179
+ This makes the audio generation more resilient to individual failures.
180
+ """
181
+ try:
182
+ # Attempt to get the audio from the cache or by generating it.
183
+ return _memoized_tts_func(*args, **kwargs)
184
+ except TTSGenerationError as e:
185
+ # If generation fails, log the error and return None, None.
186
+ logging.error("Handled TTS Generation Error: %s. Continuing without audio for this segment.", e)
187
+ return None, None
188
+
189
+ synthesize_gemini_tts = synthesize_gemini_tts_with_error_handling
190
+ else:
191
+ # When not generating speech, create a read-only function that only
192
+ # checks the cache and does not generate new audio.
193
+ def read_only_synthesize_gemini_tts(*args, **kwargs):
194
+ """
195
+ Checks cache for a result, but never calls the underlying TTS function.
196
+ This is a 'read-only' memoization check.
197
+ """
198
+ # Generate the cache key using the memoized function's key method.
199
+ key = _memoized_tts_func.__cache_key__(*args, **kwargs)
200
+
201
+ # Check the cache directly using the generated key.
202
+ _sentinel = object()
203
+ result = cache.get(key, default=_sentinel)
204
+
205
+ if result is not _sentinel:
206
+ return result # Cache hit
207
+
208
+ # Cache miss
209
+ logging.info("GENERATE_SPEECH is false and no cached result found for key: %s", key)
210
+ return None, None
211
+
212
+ synthesize_gemini_tts = read_only_synthesize_gemini_tts
interview_simulator.py ADDED
@@ -0,0 +1,385 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+ import re
17
+ import os
18
+ import base64
19
+
20
+ from gemini import gemini_get_text_response
21
+ from medgemma import medgemma_get_text_response
22
+ from gemini_tts import synthesize_gemini_tts
23
+
24
+ INTERVIEWER_VOICE = "Aoede"
25
+
26
+ def read_symptoms_json():
27
+ # Load the list of symptoms for each condition from a JSON file
28
+ with open("symptoms.json", 'r') as f:
29
+ return json.load(f)
30
+
31
+ def read_patient_and_conditions_json():
32
+ # Load all patient and condition data from the frontend assets
33
+ with open(os.path.join(os.environ.get("FRONTEND_BUILD", "frontend/build"), "assets", "patients_and_conditions.json"), 'r') as f:
34
+ return json.load(f)
35
+
36
+ def get_patient(patient_name):
37
+ """Helper function to locate a patient record by name. Raises StopIteration if not found."""
38
+ return next(p for p in PATIENTS if p["name"] == patient_name)
39
+
40
+ def read_fhir_json(patient):
41
+ # Load the FHIR (EHR) JSON file for a given patient
42
+ with open(os.path.join(os.environ.get("FRONTEND_BUILD", "frontend/build"), patient["fhirFile"].lstrip("/")), 'r') as f:
43
+ return json.load(f)
44
+
45
+ def get_ehr_summary_per_patient(patient_name):
46
+ # Returns a concise EHR summary for the patient, using LLM if not already cached
47
+ patient = get_patient(patient_name)
48
+ if patient.get("ehr_summary"):
49
+ return patient["ehr_summary"]
50
+ # Use MedGemma to summarize the EHR for the patient
51
+ ehr_summary = medgemma_get_text_response([
52
+ {
53
+ "role": "system",
54
+ "content": [
55
+ {
56
+ "type": "text",
57
+ "text": f"""You are a medical assistant summarizing the EHR (FHIR) records for the patient {patient_name}.
58
+ Provide a concise summary of the patient's medical history, including any existing conditions, medications, and relevant past treatments.
59
+ Do not include personal opinions or assumptions, only factual information."""
60
+ }
61
+ ]
62
+ },
63
+ {
64
+ "role": "user",
65
+ "content": [
66
+ {
67
+ "type": "text",
68
+ "text": json.dumps(read_fhir_json(patient))
69
+ }
70
+ ]
71
+ }
72
+ ])
73
+ patient["ehr_summary"] = ehr_summary
74
+ return ehr_summary
75
+
76
+ PATIENTS = read_patient_and_conditions_json()["patients"]
77
+ SYMPTOMS = read_symptoms_json()
78
+
79
+ def patient_roleplay_instructions(patient_name, condition_name, previous_answers):
80
+ """
81
+ Generates structured instructions for the LLM to roleplay as a patient, including persona, scenario, and symptom logic.
82
+ """
83
+ # This assumes SYMPTOMS is a globally available dictionary as in the user's example
84
+ patient = get_patient(patient_name)
85
+ symptoms = "\n".join(SYMPTOMS[condition_name])
86
+
87
+ return f"""
88
+ SYSTEM INSTRUCTION: Before the interview begins, silently review the optional symptoms and decide which ones you have.
89
+
90
+ ### Your Persona ###
91
+ - **Name:** {patient_name}
92
+ - **Age:** {patient["age"]}
93
+ - **Gender:** {patient["gender"]}
94
+ - **Your Role:** You are to act as this patient. Behave naturally and realistically.
95
+
96
+ ### Scenario ###
97
+ You are at home, participating in a remote pre-visit interview with a clinical assistant. You recently booked an appointment with your doctor because you've been feeling unwell. You are now answering the assistant's questions about your symptoms.
98
+
99
+ ### Your Medical History ###
100
+ You have a known history of **{patient["existing_condition"]}**. You should mention this if asked about your medical history, but you do not know if it is related to your current problem.
101
+
102
+ ### Your Current Symptoms ###
103
+ This is how you have been feeling. Base all your answers on these facts. Do not invent new symptoms.
104
+ ---
105
+ {symptoms}
106
+ ---
107
+
108
+ ### Critical Rules of Roleplay ###
109
+ - **Handle Optional Symptoms:** Your symptom list may contain optional symptoms (e.g., "I might have..."). Before the interview starts, you MUST silently decide 'yes' or 'no' for each optional symptom. A 50% chance for each is a good approach. Remember your choices and be consistent throughout the entire interview.
110
+ - **Act as the Patient:** Your entire response must be ONLY what the patient would say. Do not add external comments, notes, or clarifications (e.g., do not write "[I am now describing the headache]").
111
+ - **No Guessing:** You DO NOT know your diagnosis or the name of your condition. Do not guess or speculate about it.
112
+ - **Answer Only What Is Asked:** Do not volunteer your entire list of symptoms at once. Respond naturally to the specific question asked by the interviewer.
113
+
114
+ ### Your previous health history ###
115
+ {patient["ehr_summary"]}
116
+
117
+ ### Your previous answers ###
118
+ ---
119
+ {previous_answers}
120
+ ---
121
+ """
122
+
123
+ def interviewer_roleplay_instructions(patient_name):
124
+ # Returns detailed instructions for the LLM to roleplay as the interviewer/clinical assistant
125
+ return f"""
126
+ SYSTEM INSTRUCTION: Always think silently before responding.
127
+
128
+ ### Persona & Objective ###
129
+ You are a clinical assistant. Your objective is to interview a patient, {patient_name.split(" ")[0]}, and build a comprehensive and detailed report for their PCP.
130
+
131
+ ### Critical Rules ###
132
+ - **No Assessments:** You are NOT authorized to provide medical advice, diagnoses, or express any form of assessment to the patient.
133
+ - **Question Format:** Ask only ONE question at a time. Do not enumerate your questions.
134
+ - **Question Length:** Each question must be 20 words or less.
135
+ - **Question Limit:** You have a maximum of 20 questions.
136
+
137
+ ### Interview Strategy ###
138
+ - **Clinical Reasoning:** Based on the patient's responses and EHR, actively consider potential diagnoses.
139
+ - **Differentiate:** Formulate your questions strategically to help differentiate between these possibilities.
140
+ - **Probe Critical Clues:** When a patient's answer reveals a high-yield clue (e.g., recent travel, a key symptom like rapid breathing), ask one or two immediate follow-up questions to explore that clue in detail before moving to a new line of questioning.
141
+ - **Exhaustive Inquiry:** Your goal is to be thorough. Do not end the interview early. Use your full allowance of questions to explore the severity, character, timing, and context of all reported symptoms.
142
+ - **Fact-Finding:** Focus exclusively on gathering specific, objective information.
143
+
144
+ ### Context: Patient EHR ###
145
+ You MUST use the following EHR summary to inform and adapt your questioning. Do not ask for information already present here unless you need to clarify it.
146
+ EHR RECORD START
147
+ {get_ehr_summary_per_patient(patient_name)}
148
+ EHR RECORD END
149
+
150
+ ### Procedure ###
151
+ 1. **Start Interview:** Begin the conversation with this exact opening: "Thank you for booking an appointment with your primary doctor. I am an assistant here to ask a few questions to help your doctor prepare for your visit. To start, what is your main concern today?"
152
+ 2. **Conduct Interview:** Proceed with your questioning, following all rules and strategies above.
153
+ 3. **End Interview:** You MUST continue the interview until you have asked 20 questions OR the patient is unable to provide more information. When the interview is complete, you MUST conclude by printing this exact phrase: "Thank you for answering my questions. I have everything needed to prepare a report for your visit. End interview."
154
+ """
155
+
156
+ def report_writer_instructions(patient_name: str) -> str:
157
+ """
158
+ Generates the system prompt with clear instructions, role, and constraints for the LLM.
159
+ """
160
+ ehr_summary = get_ehr_summary_per_patient(patient_name)
161
+
162
+ return f"""<role>
163
+ You are a highly skilled medical assistant with expertise in clinical documentation.
164
+ </role>
165
+
166
+ <task>
167
+ Your task is to generate a concise yet clinically comprehensive medical intake report for a Primary Care Physician (PCP). This report will be based on a patient interview and their Electronic Health Record (EHR).
168
+ </task>
169
+
170
+ <guiding_principles>
171
+ To ensure the report is both brief and useful, you MUST adhere to the following two principles:
172
+
173
+ 1. **Principle of Brevity**:
174
+ * **Use Professional Language**: Rephrase conversational patient language into standard medical terminology (e.g., "it hurts when I breathe deep" becomes "reports pleuritic chest pain").
175
+ * **Omit Filler**: Do not include conversational filler, pleasantries, or repeated phrases from the interview.
176
+
177
+ 2. **Principle of Clinical Relevance (What is "Critical Information")**:
178
+ * **Prioritize the HPI**: The History of Present Illness is the most important section. Include key details like onset, duration, quality of symptoms, severity, timing, and modifying factors.
179
+ * **Include "Pertinent Negatives"**: This is critical. You MUST include symptoms the patient **denies** if they are relevant to the chief complaint. For example, if the chief complaint is a cough, denying "fever" or "shortness of breath" is critical information and must be included in the report.
180
+ * **Filter History**: Only include historical EHR data that could reasonably be related to the patient's current complaint. For a cough, a history of asthma or smoking is relevant; a past appendectomy is likely not.
181
+ </guiding_principles>
182
+
183
+ <instructions>
184
+ 1. **Primary Objective**: Synthesize the interview and EHR into a clear, organized report, strictly following the <guiding_principles>.
185
+ 2. **Content Focus**:
186
+ * **Main Concern**: State the patient's chief complaint.
187
+ * **Symptoms**: Detail the History of Present Illness, including pertinent negatives.
188
+ * **Relevant History**: Include only relevant information from the EHR.
189
+ 3. **Constraints**:
190
+ * **Factual Information Only**: Report only the facts. No assumptions.
191
+ * **No Diagnosis or Assessment**: Do not provide a diagnosis.
192
+ </instructions>
193
+
194
+ <ehr_data>
195
+ <ehr_record_start>
196
+ {ehr_summary}
197
+ <ehr_record_end>
198
+ </ehr_data>
199
+
200
+ <output_format>
201
+ The final output MUST be ONLY the full, updated Markdown medical report.
202
+ DO NOT include any introductory phrases, explanations, or any text other than the report itself.
203
+ </output_format>"""
204
+
205
+ def write_report(patient_name: str, interview_text: str, existing_report: str = None) -> str:
206
+ """
207
+ Constructs the full prompt, sends it to the LLM, and processes the response.
208
+ This function handles both the initial creation and subsequent updates of a report.
209
+ """
210
+ # Generate the detailed system instructions
211
+ instructions = report_writer_instructions(patient_name)
212
+
213
+ # If no existing report is provided, load a default template from a string.
214
+ if not existing_report:
215
+ with open("report_template.txt", 'r') as f:
216
+ existing_report = f.read()
217
+
218
+ # Construct the user prompt with the specific task and data
219
+ user_prompt = f"""<interview_start>
220
+ {interview_text}
221
+ <interview_end>
222
+
223
+ <previous_report>
224
+ {existing_report}
225
+ </previous_report>
226
+
227
+ <task_instructions>
228
+ Update the report in the `<previous_report>` tags using the new information from the `<interview_start>` section.
229
+ 1. **Integrate New Information**: Add new symptoms or details from the interview into the appropriate sections.
230
+ 2. **Update Existing Information**: If the interview provides more current information, replace outdated details.
231
+ 3. **Maintain Conciseness**: Remove any information that is no longer relevant.
232
+ 4. **Preserve Critical Data**: Do not remove essential historical data (like Hypertension) that could be vital for diagnosis, but ensure it is presented concisely under "Relevant Medical History".
233
+ 5. **Adhere to Section Titles**: Do not change the existing Markdown section titles.
234
+ </task_instructions>
235
+
236
+ Now, generate the complete and updated medical report based on all system and user instructions. Your response should be the Markdown text of the report only."""
237
+
238
+ # Assemble the full message payload for the LLM API
239
+ messages = [
240
+ {
241
+ "role": "system",
242
+ "content": [{"type": "text", "text": instructions}]
243
+ },
244
+ {
245
+ "role": "user",
246
+ "content": [{"type": "text", "text": user_prompt}]
247
+ }
248
+ ]
249
+
250
+ report = medgemma_get_text_response(messages)
251
+ cleaned_report = re.sub(r'<unused94>.*?</unused95>', '', report, flags=re.DOTALL)
252
+ cleaned_report = cleaned_report.strip()
253
+
254
+ # The LLM sometimes wraps the markdown report in a markdown code block.
255
+ # This regex checks if the entire string is a code block and extracts the content.
256
+ match = re.match(r'^\s*```(?:markdown)?\s*(.*?)\s*```\s*$', cleaned_report, re.DOTALL | re.IGNORECASE)
257
+ if match:
258
+ cleaned_report = match.group(1)
259
+
260
+ return cleaned_report.strip()
261
+
262
+
263
+
264
+ def stream_interview(patient_name, condition_name):
265
+ print(f"Starting interview simulation for patient: {patient_name}, condition: {condition_name}")
266
+ # Prepare roleplay instructions and initial dialog (using existing helper functions)
267
+ interviewer_instructions = interviewer_roleplay_instructions(patient_name)
268
+
269
+ # Determine voices for TTS
270
+ patient = get_patient(patient_name)
271
+ patient_voice = patient["voice"]
272
+
273
+ dialog = [
274
+ {
275
+ "role": "system",
276
+ "content": [
277
+ {
278
+ "type": "text",
279
+ "text": interviewer_instructions
280
+ }
281
+ ]
282
+ },
283
+ {
284
+ "role": "user",
285
+ "content": [
286
+ {
287
+ "type": "text",
288
+ "text": "start interview"
289
+ }
290
+ ]
291
+ }
292
+ ]
293
+
294
+ write_report_text = ""
295
+ full_interview_q_a = ""
296
+ number_of_questions_limit = 30
297
+ for i in range(number_of_questions_limit):
298
+ # Get the next interviewer question from MedGemma
299
+ interviewer_question_text = medgemma_get_text_response(
300
+ messages=dialog,
301
+ temperature=0.1,
302
+ max_tokens=2048,
303
+ stream=False
304
+ )
305
+ # Process optional "thinking" text (if present in the LLM output)
306
+ thinking_search = re.search('<unused94>(.+?)<unused95>', interviewer_question_text, re.DOTALL)
307
+ if thinking_search:
308
+ thinking_text = thinking_search.group(1)
309
+ interviewer_question_text = interviewer_question_text.replace(f'<unused94>{thinking_text}<unused95>', "")
310
+ if i == 0:
311
+ # Only yield the "thinking" summary for the first question
312
+ thinking_text = gemini_get_text_response(
313
+ f"""Provide a summary of up to 100 words containing only the reasoning and planning from this text,
314
+ do not include instructions, use first person: {thinking_text}""")
315
+ yield json.dumps({
316
+ "speaker": "interviewer thinking",
317
+ "text": thinking_text
318
+ })
319
+
320
+ # Clean up the text for TTS and display
321
+ clean_interviewer_text = interviewer_question_text.replace("End interview.", "").strip()
322
+
323
+ # Generate audio for the interviewer's question using Gemini TTS
324
+ audio_data, mime_type = synthesize_gemini_tts(f"Speak in a slightly upbeat and brisk manner, as a friendly clinician: {clean_interviewer_text}", INTERVIEWER_VOICE)
325
+ audio_b64 = None
326
+ if audio_data and mime_type:
327
+ audio_b64 = f"data:{mime_type};base64,{base64.b64encode(audio_data).decode('utf-8')}"
328
+
329
+ # Yield interviewer message (text and audio)
330
+ yield json.dumps({
331
+ "speaker": "interviewer",
332
+ "text": clean_interviewer_text,
333
+ "audio": audio_b64
334
+ })
335
+ dialog.append({
336
+ "role": "assistant",
337
+ "content": [{
338
+ "type": "text",
339
+ "text": interviewer_question_text
340
+ }]
341
+ })
342
+ if "End interview" in interviewer_question_text:
343
+ # End the interview loop if the LLM signals completion
344
+ break
345
+
346
+ # Get the patient's response from Gemini (roleplay LLM)
347
+ patient_response_text = gemini_get_text_response(f"""
348
+ {patient_roleplay_instructions(patient_name, condition_name, full_interview_q_a)}\n\n
349
+ Question: {interviewer_question_text}""")
350
+
351
+ # Generate audio for the patient's response
352
+ audio_data, mime_type = synthesize_gemini_tts(f"Say this in faster speed, using a sick tone: {patient_response_text}", patient_voice)
353
+ audio_b64 = None
354
+ if audio_data and mime_type:
355
+ audio_b64 = f"data:{mime_type};base64,{base64.b64encode(audio_data).decode('utf-8')}"
356
+
357
+ # Yield patient message (text and audio)
358
+ yield json.dumps({
359
+ "speaker": "patient",
360
+ "text": patient_response_text,
361
+ "audio": audio_b64
362
+ })
363
+ dialog.append({
364
+ "role": "user",
365
+ "content": [{
366
+ "type": "text",
367
+ "text": patient_response_text
368
+ }]
369
+ })
370
+ # Track the full Q&A for context in future LLM calls
371
+ most_recent_q_a = f"Q: {interviewer_question_text}\nA: {patient_response_text}\n"
372
+ full_interview_q_a_with_new_q_a = "PREVIOUS Q&A:\n" + full_interview_q_a + "\nNEW Q&A:\n" + most_recent_q_a
373
+ # Update the report after each Q&A
374
+ write_report_text = write_report(patient_name, full_interview_q_a_with_new_q_a, write_report_text)
375
+ full_interview_q_a += most_recent_q_a
376
+ yield json.dumps({
377
+ "speaker": "report",
378
+ "text": write_report_text
379
+ })
380
+
381
+ print(f"""Interview simulation completed for patient: {patient_name}, condition: {condition_name}.
382
+ Patient profile used:
383
+ {patient_roleplay_instructions(patient_name, condition_name, full_interview_q_a)}""")
384
+ # Add this at the end to signal end of stream
385
+ yield json.dumps({"event": "end"})