Skip to content

Commit b45f587

Browse files
committed
add streamer_mode setting and write last wingman message to file
1 parent f09930a commit b45f587

8 files changed

+70
-23
lines changed

api/interface.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -776,7 +776,8 @@ class SettingsConfig(BaseModel):
776776
voice_activation: VoiceActivationSettings
777777
wingman_pro: WingmanProSettings
778778
xvasynth: XVASynthSettings
779-
debug_mode: bool = False
779+
debug_mode: bool
780+
streamer_mode: bool
780781

781782

782783
class BenchmarkResult(BaseModel):

services/config_manager.py

+18
Original file line numberDiff line numberDiff line change
@@ -508,6 +508,24 @@ def get_wingmen_configs(self, config_dir: ConfigDirInfo):
508508
wingmen.append(wingman_file)
509509
return wingmen
510510

511+
def save_last_wingman_message(
512+
self,
513+
config_dir: ConfigDirInfo,
514+
wingman_file: WingmanConfigFileInfo,
515+
last_message: str,
516+
):
517+
message_file = wingman_file.file.replace(".yaml", ".last-message.txt")
518+
message_path = path.join(self.config_dir, config_dir.directory, message_file)
519+
try:
520+
with open(message_path, "w", encoding="utf-8") as file:
521+
file.write(last_message)
522+
return True
523+
except:
524+
self.printr.toast_error(
525+
f"Unable to save last message for Wingman '{wingman_file.name}'."
526+
)
527+
return False
528+
511529
def save_wingman_config(
512530
self,
513531
config_dir: ConfigDirInfo,

services/config_migration_service.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -365,7 +365,11 @@ def migrate_settings(old: dict, new: dict) -> dict:
365365
self.log("- added new fasterwhisper settings and config")
366366

367367
old["voice_activation"]["stt_provider"] = "fasterwhisper"
368-
self.log("- set FasterWhisper as new default VA STT provider.")
368+
self.log("- set FasterWhisper as new default VA STT provider")
369+
370+
old["streamer_mode"] = False
371+
self.log("- added new property streamer_mode")
372+
369373
return old
370374

371375
def migrate_defaults(old: dict, new: dict) -> dict:

services/settings_service.py

+1
Original file line numberDiff line numberDiff line change
@@ -134,6 +134,7 @@ async def save_settings(self, settings: SettingsConfig):
134134
# rest
135135
self.config_manager.settings_config.wingman_pro = settings.wingman_pro
136136
self.config_manager.settings_config.debug_mode = settings.debug_mode
137+
self.config_manager.settings_config.streamer_mode = settings.streamer_mode
137138

138139
# save the config file
139140
self.config_manager.save_settings_config()

services/tower.py

+14
Original file line numberDiff line numberDiff line change
@@ -246,3 +246,17 @@ def save_wingman(self, wingman_name: str):
246246
source=LogSource.SYSTEM,
247247
)
248248
return False
249+
250+
def save_last_message(self, wingman_name: str, last_message: str):
251+
for wingman in self.wingmen:
252+
if wingman.name == wingman_name:
253+
for wingman_file in self.config_manager.get_wingmen_configs(
254+
self.config_dir
255+
):
256+
if wingman_file.name == wingman_name:
257+
return self.config_manager.save_last_wingman_message(
258+
config_dir=self.config_dir,
259+
wingman_file=wingman_file,
260+
last_message=last_message,
261+
)
262+
return False

templates/configs/settings.yaml

+1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
debug_mode: false
22
audio: {}
3+
streamer_mode: false
34
voice_activation:
45
enabled: false
56
mute_toggle_key: "shift+x"

wingmen/open_ai_wingman.py

+25-20
Original file line numberDiff line numberDiff line change
@@ -66,17 +66,17 @@ def __init__(self, *args, **kwargs):
6666

6767
# validate will set these:
6868
self.openai: OpenAi | None = None
69-
self.mistral: OpenAi | None = None
70-
self.groq: OpenAi | None = None
71-
self.cerebras: OpenAi | None = None
72-
self.openrouter: OpenAi | None = None
69+
self.mistral: OpenAi | None = None
70+
self.groq: OpenAi | None = None
71+
self.cerebras: OpenAi | None = None
72+
self.openrouter: OpenAi | None = None
7373
self.openrouter_model_supports_tools = False
74-
self.local_llm: OpenAi | None = None
75-
self.openai_azure: OpenAiAzure | None = None
76-
self.elevenlabs: ElevenLabs | None = None
77-
self.wingman_pro: WingmanPro | None = None
78-
self.google: GoogleGenAI | None = None
79-
self.perplexity: OpenAi | None = None
74+
self.local_llm: OpenAi | None = None
75+
self.openai_azure: OpenAiAzure | None = None
76+
self.elevenlabs: ElevenLabs | None = None
77+
self.wingman_pro: WingmanPro | None = None
78+
self.google: GoogleGenAI | None = None
79+
self.perplexity: OpenAi | None = None
8080

8181
# tool queue
8282
self.pending_tool_calls = []
@@ -556,14 +556,16 @@ async def _transcribe(self, audio_input_wav: str) -> str | None:
556556
)
557557
printr.print(traceback.format_exc(), color=LogType.ERROR, server_only=True)
558558

559-
if not transcript:
560-
return None
561-
562-
# Wingman Pro might returns a serialized dict instead of a real Azure Speech transcription object
563-
if isinstance(transcript, dict):
564-
return transcript.get("_text")
559+
result = None
560+
if transcript:
561+
# Wingman Pro might returns a serialized dict instead of a real Azure Speech transcription object
562+
result = (
563+
transcript.get("_text")
564+
if isinstance(transcript, dict)
565+
else transcript.text
566+
)
565567

566-
return transcript.text
568+
return result
567569

568570
async def _get_response_for_transcript(
569571
self, transcript: str, benchmark: Benchmark
@@ -588,7 +590,9 @@ async def _get_response_for_transcript(
588590
if instant_response:
589591
await self.add_assistant_message(instant_response)
590592
benchmark.finish_snapshot()
591-
if instant_response == '.': # thats for the "The UI should not give a response" option in commands
593+
if (
594+
instant_response == "."
595+
): # thats for the "The UI should not give a response" option in commands
592596
instant_response = None
593597
return instant_response, instant_response, None, True
594598
benchmark.finish_snapshot()
@@ -896,7 +900,8 @@ async def add_forced_assistant_command_calls(self, commands: list[CommandConfig]
896900
if (
897901
self.config.features.conversation_provider == ConversationProvider.OPENAI
898902
) or (
899-
self.config.features.conversation_provider == ConversationProvider.WINGMAN_PRO
903+
self.config.features.conversation_provider
904+
== ConversationProvider.WINGMAN_PRO
900905
and "gpt" in self.config.wingman_pro.conversation_deployment.lower()
901906
):
902907
# generate tool calls in openai style
@@ -1312,7 +1317,7 @@ async def execute_command_by_function_call(
13121317
traceback.format_exc(), color=LogType.ERROR, server_only=True
13131318
)
13141319
function_response = (
1315-
"ERROR DURING PROCESSING" # hints to AI that there was an error
1320+
"ERROR DURING PROCESSING" # hints to AI that there was an error
13161321
)
13171322
instant_response = None
13181323
finally:

wingmen/wingman.py

+4-1
Original file line numberDiff line numberDiff line change
@@ -329,8 +329,11 @@ async def process(self, audio_input_wav: str = None, transcript: str = None):
329329
benchmark_result=benchmark_llm.finish(),
330330
)
331331

332-
# the last step in the chain. You'll probably want to play the response to the user as audio using a TTS provider or mechanism of your choice.
333332
if process_result:
333+
if self.settings.streamer_mode:
334+
self.tower.save_last_message(self.name, process_result)
335+
336+
# the last step in the chain. You'll probably want to play the response to the user as audio using a TTS provider or mechanism of your choice.
334337
await self.play_to_user(str(process_result), not interrupt)
335338
except Exception as e:
336339
await printr.print_async(

0 commit comments

Comments
 (0)