feat: Mise à jour à la version 1.1.14 et refonte du support Audio/TTS pour le Web

- Mise à jour de la version de l'application à `1.1.14` dans `app_version.dart` et `version.json`.
- Migration de `AudioFeedbackService` vers l'API Web native (`dart:js_interop`, `package:web`) pour corriger les problèmes d'autoplay et supprimer la dépendance `audioplayers`.
- Réécriture de `TextToSpeechService` utilisant `window.speechSynthesis` en remplacement de `flutter_tts` pour une meilleure compatibilité Web (notamment sous Linux).
- Suppression des dépendances obsolètes `audioplayers` et `flutter_tts` du `pubspec.yaml`.
- Ajout d'une gestion de file d'attente (`_scanQueue`) dans `EventPreparationPage` pour traiter les scans de codes-barres de manière séquentielle.
- Intégration d'un bouton de diagnostic (`AudioDiagnosticButton`) pour tester manuellement l'audio et la synthèse vocale.
- Ajout d'un script de test JavaScript `test_audio_tts.js` pour faciliter le débogage dans la console du navigateur.
- Ajout de directives de style et d'architecture Dart/Flutter dans `.github/agents/`.
This commit is contained in:
ElPoyo
2026-03-08 19:51:13 +01:00
parent 6d320bedc9
commit bc93f3fa9a
10 changed files with 1027 additions and 108 deletions

View File

@@ -1,6 +1,6 @@
/// Configuration de la version de l'application
class AppVersion {
static const String version = '1.1.7';
static const String version = '1.1.14';
/// Retourne la version complète de l'application
static String get fullVersion => 'v$version';

View File

@@ -1,54 +1,128 @@
import 'package:flutter/foundation.dart' show kIsWeb;
import 'package:flutter/services.dart';
import 'package:audioplayers/audioplayers.dart';
import 'dart:js_interop';
import 'package:web/web.dart' as web;
import 'package:em2rp/utils/debug_log.dart';
/// Service pour émettre des feedbacks sonores lors des interactions
/// Service pour émettre des feedbacks sonores lors des interactions (Web)
class AudioFeedbackService {
static final AudioPlayer _player = AudioPlayer();
static bool _isInitialized = false;
static bool _audioUnlocked = false;
/// Initialiser le service
static Future<void> _initialize() async {
if (_isInitialized) return;
try {
DebugLog.info('[AudioFeedbackService] Initializing audio service for Web...');
_isInitialized = true;
} catch (e) {
DebugLog.error('[AudioFeedbackService] Error initializing audio', e);
}
}
/// Débloquer l'audio (à appeler lors de la première interaction utilisateur)
static Future<void> unlockAudio() async {
if (_audioUnlocked) {
DebugLog.info('[AudioFeedbackService] Audio already unlocked');
return;
}
try {
if (!_isInitialized) await _initialize();
DebugLog.info('[AudioFeedbackService] Attempting to unlock audio...');
// Créer un audio temporaire et le jouer avec volume 0
final tempAudio = web.HTMLAudioElement();
tempAudio.src = 'assets/assets/sounds/ok.mp3';
tempAudio.volume = 0.01; // Volume très faible mais pas 0
tempAudio.preload = 'auto';
try {
await tempAudio.play().toDart;
await Future.delayed(const Duration(milliseconds: 100));
tempAudio.pause();
_audioUnlocked = true;
DebugLog.info('[AudioFeedbackService] ✓ Audio unlocked successfully');
} catch (e) {
DebugLog.warning('[AudioFeedbackService] ⚠ Could not unlock audio: $e');
DebugLog.warning('[AudioFeedbackService] User interaction may be required');
}
} catch (e) {
DebugLog.error('[AudioFeedbackService] Error unlocking audio', e);
}
}
/// Créer et jouer un son
static Future<void> _playSound(String assetPath, double volume) async {
try {
if (!_isInitialized) await _initialize();
DebugLog.info('[AudioFeedbackService] Attempting to play: $assetPath (volume: $volume)');
// Créer un nouvel élément audio à chaque fois
final audio = web.HTMLAudioElement();
audio.src = assetPath;
audio.volume = volume;
audio.preload = 'auto';
// Ajouter des événements pour debug
audio.onloadeddata = ((web.Event event) {
DebugLog.info('[AudioFeedbackService] Audio data loaded: $assetPath');
}.toJS);
audio.onerror = ((web.Event event) {
DebugLog.error('[AudioFeedbackService] ✗ Audio error for $assetPath: ${audio.error}');
}.toJS);
audio.onplay = ((web.Event event) {
DebugLog.info('[AudioFeedbackService] Audio started playing');
}.toJS);
audio.onended = ((web.Event event) {
DebugLog.info('[AudioFeedbackService] Audio finished playing');
}.toJS);
try {
// Essayer de jouer
await audio.play().toDart;
DebugLog.info('[AudioFeedbackService] ✓ Sound played successfully');
} catch (e) {
DebugLog.error('[AudioFeedbackService] ✗ Play failed: $e');
// Si c'est un problème d'autoplay, essayer de débloquer
if (!_audioUnlocked) {
DebugLog.info('[AudioFeedbackService] Trying to unlock audio on error...');
_audioUnlocked = false; // Forcer le déblocage
await unlockAudio();
// Réessayer une fois après déblocage
try {
final retryAudio = web.HTMLAudioElement();
retryAudio.src = assetPath;
retryAudio.volume = volume;
await retryAudio.play().toDart;
DebugLog.info('[AudioFeedbackService] ✓ Sound played on retry');
} catch (retryError) {
DebugLog.error('[AudioFeedbackService] ✗ Retry also failed: $retryError');
}
}
}
} catch (e) {
DebugLog.error('[AudioFeedbackService] Error in _playSound', e);
}
}
/// Jouer un son de succès
static Future<void> playSuccessBeep() async {
try {
if (kIsWeb) {
// Sur Web, utiliser le chemin absolu
await _player.play(UrlSource('assets/sounds/ok.mp3'));
} else {
// Sur mobile/desktop, utiliser AssetSource
await _player.play(AssetSource('sounds/ok.mp3'));
}
await HapticFeedback.lightImpact();
} catch (e) {
DebugLog.error('[AudioFeedbackService] Error playing success beep', e);
}
await _playSound('assets/assets/sounds/ok.mp3', 1.0);
}
/// Jouer un son d'erreur
static Future<void> playErrorBeep() async {
try {
if (kIsWeb) {
// Sur Web, utiliser le chemin absolu
await _player.play(UrlSource('assets/sounds/error.mp3'));
} else {
// Sur mobile/desktop, utiliser AssetSource
await _player.play(AssetSource('sounds/error.mp3'));
}
await HapticFeedback.heavyImpact();
} catch (e) {
DebugLog.error('[AudioFeedbackService] Error playing error beep', e);
}
await _playSound('assets/assets/sounds/error.mp3', 0.8);
}
/// Jouer une vibration haptique (si disponible)
static Future<void> playHapticFeedback() async {
try {
await HapticFeedback.mediumImpact();
} catch (e) {
DebugLog.error('[AudioFeedbackService] Error playing haptic feedback', e);
}
}
/// Jouer un feedback complet (son + vibration)
/// Jouer un feedback complet (son uniquement, sans vibration)
static Future<void> playFullFeedback({bool isSuccess = true}) async {
if (isSuccess) {
await playSuccessBeep();
@@ -59,6 +133,12 @@ class AudioFeedbackService {
/// Nettoyer les ressources
static Future<void> dispose() async {
await _player.dispose();
try {
_isInitialized = false;
_audioUnlocked = false;
} catch (e) {
DebugLog.error('[AudioFeedbackService] Error disposing', e);
}
}
}

View File

@@ -1,60 +1,66 @@
import 'package:flutter_tts/flutter_tts.dart';
import 'dart:js_interop';
import 'package:web/web.dart' as web;
import 'package:em2rp/utils/debug_log.dart';
/// Service de synthèse vocale pour lire des textes à haute voix
/// Service de synthèse vocale pour lire des textes à haute voix (Web)
class TextToSpeechService {
static final FlutterTts _tts = FlutterTts();
static bool _isInitialized = false;
static bool _voicesLoaded = false;
static List<web.SpeechSynthesisVoice> _cachedVoices = [];
/// Initialiser le service TTS
static Future<void> initialize() async {
if (_isInitialized) return;
try {
await _tts.setLanguage('fr-FR');
await _tts.setSpeechRate(0.7); // Vitesse normale
await _tts.setVolume(1.0);
await _tts.setPitch(0.7); // Pitch plus bas pour une voix masculine
_isInitialized = true;
// Tenter de sélectionner une voix masculine si disponible
try {
final voices = await _tts.getVoices;
if (voices != null && voices is List) {
// Chercher une voix française masculine
final maleVoice = voices.firstWhere(
(voice) {
final voiceMap = voice as Map;
final name = voiceMap['name']?.toString().toLowerCase() ?? '';
final locale = voiceMap['locale']?.toString().toLowerCase() ?? '';
final synthesis = web.window.speechSynthesis;
// Rechercher des voix françaises masculines
return locale.startsWith('fr') &&
(name.contains('male') || name.contains('homme') ||
name.contains('thomas') || name.contains('paul'));
},
orElse: () => null,
);
// Essayer de charger les voix immédiatement
_cachedVoices = synthesis.getVoices().toDart;
if (maleVoice != null) {
final voiceMap = maleVoice as Map;
await _tts.setVoice({
'name': voiceMap['name'],
'locale': voiceMap['locale'],
});
DebugLog.info('[TextToSpeechService] Voix masculine sélectionnée: ${voiceMap['name']}');
}
}
} catch (e) {
DebugLog.info('[TextToSpeechService] Impossible de sélectionner une voix spécifique, utilisation de la voix par défaut');
if (_cachedVoices.isNotEmpty) {
_voicesLoaded = true;
DebugLog.info('[TextToSpeechService] Service initialized with ${_cachedVoices.length} voices');
return;
}
_isInitialized = true;
DebugLog.info('[TextToSpeechService] Service initialisé avec voix masculine');
// Sur certains navigateurs (Firefox notamment), les voix se chargent de manière asynchrone
DebugLog.info('[TextToSpeechService] Waiting for voices to load asynchronously...');
// Attendre l'événement voiceschanged (si supporté)
final voicesLoaded = await _waitForVoices(synthesis);
if (voicesLoaded) {
_cachedVoices = synthesis.getVoices().toDart;
_voicesLoaded = true;
DebugLog.info('[TextToSpeechService] ✓ Voices loaded asynchronously: ${_cachedVoices.length}');
} else {
DebugLog.warning('[TextToSpeechService] ⚠ No voices found after initialization');
}
} catch (e) {
DebugLog.error('[TextToSpeechService] Erreur lors de l\'initialisation', e);
}
}
/// Attendre le chargement des voix (avec timeout)
static Future<bool> _waitForVoices(web.SpeechSynthesis synthesis) async {
// Essayer plusieurs fois avec des délais croissants
for (int attempt = 0; attempt < 5; attempt++) {
await Future.delayed(Duration(milliseconds: 100 * (attempt + 1)));
final voices = synthesis.getVoices().toDart;
if (voices.isNotEmpty) {
return true;
}
DebugLog.info('[TextToSpeechService] Attempt ${attempt + 1}/5: No voices yet');
}
return false;
}
/// Lire un texte à haute voix
static Future<void> speak(String text) async {
if (!_isInitialized) {
@@ -62,12 +68,148 @@ class TextToSpeechService {
}
try {
// Arrêter toute lecture en cours
await _tts.stop();
final synthesis = web.window.speechSynthesis;
// Lire le nouveau texte
await _tts.speak(text);
DebugLog.info('[TextToSpeechService] Lecture: $text');
DebugLog.info('[TextToSpeechService] Speaking requested: "$text"');
// Arrêter toute lecture en cours
synthesis.cancel();
// Attendre un peu pour que le cancel soit effectif
await Future.delayed(const Duration(milliseconds: 50));
// Créer une nouvelle utterance
final utterance = web.SpeechSynthesisUtterance(text);
utterance.lang = 'fr-FR';
utterance.rate = 0.7;
utterance.pitch = 0.7;
utterance.volume = 1.0;
// Récupérer les voix (depuis le cache ou re-charger)
var voices = _cachedVoices;
// Si le cache est vide, essayer de recharger
if (voices.isEmpty) {
DebugLog.info('[TextToSpeechService] Cache empty, reloading voices...');
voices = synthesis.getVoices().toDart;
// Sur Firefox/Linux, les voix peuvent ne pas être disponibles immédiatement
if (voices.isEmpty && !_voicesLoaded) {
DebugLog.info('[TextToSpeechService] Waiting for voices with multiple attempts...');
// Essayer plusieurs fois avec des délais
for (int i = 0; i < 3; i++) {
await Future.delayed(Duration(milliseconds: 100 * (i + 1)));
voices = synthesis.getVoices().toDart;
if (voices.isNotEmpty) {
DebugLog.info('[TextToSpeechService] ✓ Voices loaded on attempt ${i + 1}');
break;
}
}
}
// Mettre à jour le cache
if (voices.isNotEmpty) {
_cachedVoices = voices;
_voicesLoaded = true;
}
}
DebugLog.info('[TextToSpeechService] Available voices: ${voices.length}');
if (voices.isNotEmpty) {
web.SpeechSynthesisVoice? selectedVoice;
// Lister TOUTES les voix françaises pour debug
final frenchVoices = <web.SpeechSynthesisVoice>[];
for (final voice in voices) {
final lang = voice.lang.toLowerCase();
if (lang.startsWith('fr')) {
frenchVoices.add(voice);
DebugLog.info('[TextToSpeechService] French: ${voice.name} (${voice.lang}) ${voice.localService ? 'LOCAL' : 'REMOTE'}');
}
}
if (frenchVoices.isEmpty) {
DebugLog.warning('[TextToSpeechService] ⚠ NO French voices found!');
DebugLog.info('[TextToSpeechService] Available languages:');
for (final voice in voices.take(5)) {
DebugLog.info('[TextToSpeechService] - ${voice.name} (${voice.lang})');
}
}
// Stratégie de sélection: préférer les voix LOCALES (plus fiables sur Linux)
for (final voice in frenchVoices) {
if (voice.localService) {
selectedVoice = voice;
DebugLog.info('[TextToSpeechService] ✓ Selected LOCAL French voice: ${voice.name}');
break;
}
}
// Si pas de voix locale, chercher une voix masculine
if (selectedVoice == null) {
for (final voice in frenchVoices) {
final name = voice.name.toLowerCase();
if (name.contains('male') ||
name.contains('homme') ||
name.contains('thomas') ||
name.contains('paul') ||
name.contains('bernard')) {
selectedVoice = voice;
DebugLog.info('[TextToSpeechService] Selected male voice: ${voice.name}');
break;
}
}
}
// Fallback: première voix française
selectedVoice ??= frenchVoices.isNotEmpty ? frenchVoices.first : null;
if (selectedVoice != null) {
utterance.voice = selectedVoice;
utterance.lang = selectedVoice.lang; // Utiliser la langue de la voix
DebugLog.info('[TextToSpeechService] Final voice: ${selectedVoice.name} (${selectedVoice.lang})');
} else {
DebugLog.warning('[TextToSpeechService] No French voice, using default with lang=fr-FR');
}
} else {
DebugLog.warning('[TextToSpeechService] ⚠ NO voices available at all!');
DebugLog.warning('[TextToSpeechService] On Linux: install speech-dispatcher and espeak-ng');
}
// Ajouter des événements pour le debug
utterance.onstart = (web.SpeechSynthesisEvent event) {
DebugLog.info('[TextToSpeechService] ✓ Speech started');
}.toJS;
utterance.onend = (web.SpeechSynthesisEvent event) {
DebugLog.info('[TextToSpeechService] ✓ Speech ended');
}.toJS;
utterance.onerror = (web.SpeechSynthesisErrorEvent event) {
DebugLog.error('[TextToSpeechService] ✗ Speech error: ${event.error}');
// Messages spécifiques pour aider au diagnostic
if (event.error == 'synthesis-failed') {
DebugLog.error('[TextToSpeechService] ⚠ SYNTHESIS FAILED - Common on Linux');
DebugLog.error('[TextToSpeechService] Possible causes:');
DebugLog.error('[TextToSpeechService] 1. speech-dispatcher not installed/running');
DebugLog.error('[TextToSpeechService] 2. espeak or espeak-ng not installed');
DebugLog.error('[TextToSpeechService] 3. No TTS engine configured');
DebugLog.error('[TextToSpeechService] Fix: sudo apt-get install speech-dispatcher espeak-ng');
DebugLog.error('[TextToSpeechService] Then restart browser');
} else if (event.error == 'network') {
DebugLog.error('[TextToSpeechService] Network error - online voice unavailable');
} else if (event.error == 'audio-busy') {
DebugLog.error('[TextToSpeechService] Audio system is busy');
}
}.toJS;
// Lire le texte
synthesis.speak(utterance);
DebugLog.info('[TextToSpeechService] Speech command sent');
} catch (e) {
DebugLog.error('[TextToSpeechService] Erreur lors de la lecture', e);
}
@@ -76,7 +218,7 @@ class TextToSpeechService {
/// Arrêter la lecture en cours
static Future<void> stop() async {
try {
await _tts.stop();
web.window.speechSynthesis.cancel();
} catch (e) {
DebugLog.error('[TextToSpeechService] Erreur lors de l\'arrêt', e);
}
@@ -85,8 +227,7 @@ class TextToSpeechService {
/// Vérifier si le service est en train de lire
static Future<bool> isSpeaking() async {
try {
// FlutterTts ne fournit pas directement cette info, on retourne false par défaut
return false;
return web.window.speechSynthesis.speaking;
} catch (e) {
return false;
}
@@ -95,10 +236,9 @@ class TextToSpeechService {
/// Nettoyer les ressources
static Future<void> dispose() async {
try {
await _tts.stop();
web.window.speechSynthesis.cancel();
} catch (e) {
DebugLog.error('[TextToSpeechService] Erreur lors du nettoyage', e);
}
}
}

View File

@@ -21,6 +21,7 @@ import 'package:em2rp/views/widgets/event_preparation/add_equipment_to_event_dia
import 'package:em2rp/utils/debug_log.dart';
import 'package:em2rp/views/widgets/equipment/missing_equipment_dialog.dart';
import 'package:em2rp/utils/colors.dart';
import 'package:em2rp/views/widgets/common/audio_diagnostic_button.dart';
/// Type d'étape de préparation
enum PreparationStep {
@@ -73,6 +74,10 @@ class _EventPreparationPageState extends State<EventPreparationPage> with Single
final TextEditingController _manualCodeController = TextEditingController();
final FocusNode _manualCodeFocusNode = FocusNode();
// 🆕 File d'attente pour traiter les scans séquentiellement
final List<String> _scanQueue = [];
bool _isProcessingQueue = false;
// Détermine l'étape actuelle selon le statut de l'événement
PreparationStep get _currentStep {
final prep = _currentEvent.preparationStatus ?? PreparationStatus.notStarted;
@@ -119,6 +124,9 @@ class _EventPreparationPageState extends State<EventPreparationPage> with Single
// Initialiser le service de synthèse vocale
TextToSpeechService.initialize();
// Initialiser et débloquer l'audio (pour éviter les problèmes d'autoplay)
AudioFeedbackService.unlockAudio();
// Vérification de sécurité et chargement après le premier frame
WidgetsBinding.instance.addPostFrameCallback((_) {
if (_isCurrentStepCompleted()) {
@@ -827,13 +835,33 @@ class _EventPreparationPageState extends State<EventPreparationPage> with Single
Future<void> _handleManualCodeEntry(String code) async {
if (code.trim().isEmpty) return;
await _handleScannedCode(code.trim());
// Ajouter le code à la file d'attente
_scanQueue.add(code.trim());
// Effacer le champ après traitement
// Effacer le champ immédiatement pour permettre le prochain scan
_manualCodeController.clear();
// Maintenir le focus sur le champ pour permettre une saisie continue
_manualCodeFocusNode.requestFocus();
// Démarrer le traitement de la file si pas déjà en cours
if (!_isProcessingQueue) {
_processQueue();
}
}
/// Traite la file d'attente des scans un par un
Future<void> _processQueue() async {
if (_isProcessingQueue) return;
_isProcessingQueue = true;
while (_scanQueue.isNotEmpty) {
final code = _scanQueue.removeAt(0);
await _handleScannedCode(code);
}
_isProcessingQueue = false;
}
/// Obtenir les quantités actuelles selon l'étape
@@ -1199,6 +1227,9 @@ class _EventPreparationPageState extends State<EventPreparationPage> with Single
appBar: AppBar(
title: Text(stepTitle),
backgroundColor: AppColors.bleuFonce,
actions: const [
AudioDiagnosticButton(),
],
),
body: Stack(
children: [

View File

@@ -0,0 +1,147 @@
import 'package:flutter/material.dart';
import 'package:web/web.dart' as web;
import 'package:em2rp/services/audio_feedback_service.dart';
import 'package:em2rp/services/text_to_speech_service.dart';
import 'package:em2rp/utils/debug_log.dart';
/// Bouton de diagnostic pour tester l'audio et le TTS
class AudioDiagnosticButton extends StatelessWidget {
const AudioDiagnosticButton({super.key});
Future<void> _testAudio(BuildContext context) async {
try {
DebugLog.info('[AudioDiagnostic] ========== AUDIO TEST START ==========');
DebugLog.info('[AudioDiagnostic] User Agent: ${web.window.navigator.userAgent}');
DebugLog.info('[AudioDiagnostic] Platform: ${web.window.navigator.platform}');
// Débloquer l'audio
DebugLog.info('[AudioDiagnostic] Step 1: Unlocking audio...');
await AudioFeedbackService.unlockAudio();
await Future.delayed(const Duration(milliseconds: 500));
// Tester le son de succès
DebugLog.info('[AudioDiagnostic] Step 2: Playing success beep...');
await AudioFeedbackService.playSuccessBeep();
await Future.delayed(const Duration(milliseconds: 1000));
// Tester le son d'erreur
DebugLog.info('[AudioDiagnostic] Step 3: Playing error beep...');
await AudioFeedbackService.playErrorBeep();
DebugLog.info('[AudioDiagnostic] ========== AUDIO TEST END ==========');
if (context.mounted) {
ScaffoldMessenger.of(context).showSnackBar(
const SnackBar(
content: Text('Test audio terminé - Vérifiez la console (F12)'),
backgroundColor: Colors.green,
duration: Duration(seconds: 3),
),
);
}
} catch (e) {
DebugLog.error('[AudioDiagnostic] Error during audio test', e);
if (context.mounted) {
ScaffoldMessenger.of(context).showSnackBar(
SnackBar(
content: Text('Erreur audio: $e'),
backgroundColor: Colors.red,
),
);
}
}
}
Future<void> _testTTS(BuildContext context) async {
try {
DebugLog.info('[AudioDiagnostic] ========== TTS TEST START ==========');
DebugLog.info('[AudioDiagnostic] User Agent: ${web.window.navigator.userAgent}');
DebugLog.info('[AudioDiagnostic] Platform: ${web.window.navigator.platform}');
DebugLog.info('[AudioDiagnostic] Language: ${web.window.navigator.language}');
await TextToSpeechService.initialize();
await Future.delayed(const Duration(milliseconds: 500));
DebugLog.info('[AudioDiagnostic] Speaking test phrase...');
await TextToSpeechService.speak('Test de synthèse vocale. Un, deux, trois.');
DebugLog.info('[AudioDiagnostic] ========== TTS TEST END ==========');
if (context.mounted) {
ScaffoldMessenger.of(context).showSnackBar(
const SnackBar(
content: Text('Test TTS terminé - Vérifiez la console (F12)'),
backgroundColor: Colors.green,
duration: Duration(seconds: 3),
),
);
}
} catch (e) {
DebugLog.error('[AudioDiagnostic] Error during TTS test', e);
if (context.mounted) {
ScaffoldMessenger.of(context).showSnackBar(
SnackBar(
content: Text('Erreur TTS: $e'),
backgroundColor: Colors.red,
),
);
}
}
}
@override
Widget build(BuildContext context) {
return PopupMenuButton<String>(
icon: const Icon(Icons.bug_report, color: Colors.grey),
tooltip: 'Diagnostic Audio/TTS',
onSelected: (value) async {
switch (value) {
case 'audio':
await _testAudio(context);
break;
case 'tts':
await _testTTS(context);
break;
case 'both':
await _testAudio(context);
await Future.delayed(const Duration(milliseconds: 1000));
await _testTTS(context);
break;
}
},
itemBuilder: (context) => [
const PopupMenuItem(
value: 'audio',
child: Row(
children: [
Icon(Icons.volume_up, size: 20),
SizedBox(width: 8),
Text('Test Audio'),
],
),
),
const PopupMenuItem(
value: 'tts',
child: Row(
children: [
Icon(Icons.record_voice_over, size: 20),
SizedBox(width: 8),
Text('Test TTS'),
],
),
),
const PopupMenuItem(
value: 'both',
child: Row(
children: [
Icon(Icons.play_circle, size: 20),
SizedBox(width: 8),
Text('Test Audio + TTS'),
],
),
),
],
);
}
}