Remove voice chat, conversation history, and settings UI sections

Strip unused UI components: voice chat accordion, conversation history
panel, and settings accordion. Removes associated event handlers and
helper functions.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
cora-start
PeninsulaInd 2026-02-16 18:01:57 -06:00
parent 712829a610
commit 388c800bce
1 changed files with 2 additions and 63 deletions

View File

@ -3,7 +3,6 @@
from __future__ import annotations
import logging
import tempfile
from pathlib import Path
from typing import TYPE_CHECKING
@ -91,30 +90,6 @@ def create_ui(agent: Agent, config: Config, llm: LLMAdapter,
sources=["upload", "microphone"],
)
# -- Voice Chat Mode --
with gr.Accordion("Voice Chat", open=False, elem_classes=["contain"]):
gr.Markdown("Record audio and get a spoken response.")
voice_input = gr.Audio(sources=["microphone"], type="filepath", label="Speak")
voice_output = gr.Audio(type="filepath", label="Response", autoplay=True)
voice_status = gr.Textbox(label="Transcript", interactive=False)
# -- Accordion sections --
with gr.Accordion("Conversation History", open=False, elem_classes=["contain"]):
conv_list = gr.Dataframe(
headers=["ID", "Title", "Last Updated"],
label="Past Conversations",
interactive=False,
)
load_conv_btn = gr.Button("Load Selected")
with gr.Accordion("Settings", open=False, elem_classes=["contain"]):
gr.Markdown(
"Edit `identity/SOUL.md` to change the agent's personality.\n\n"
"Edit `identity/USER.md` to update your profile.\n\n"
"Edit `config.yaml` for advanced settings."
)
# -- Event handlers --
@ -129,11 +104,7 @@ def create_ui(agent: Agent, config: Config, llm: LLMAdapter,
def on_new_chat():
agent.new_conversation()
return [], _load_conversations()
def _load_conversations():
convs = agent.db.list_conversations()
return [[c["id"], c["title"], c["updated_at"][:19]] for c in convs]
return []
def on_user_message(message, chat_history):
chat_history = chat_history or []
@ -193,29 +164,6 @@ def create_ui(agent: Agent, config: Config, llm: LLMAdapter,
chat_history = chat_history + [{"role": "assistant", "content": f"Error: {e}"}]
yield chat_history, gr.update(value=None)
def on_voice_chat(audio_path):
"""Handle voice chat: transcribe -> respond -> TTS."""
if not audio_path:
return None, "No audio received."
try:
from .media import transcribe_audio, text_to_speech
# Transcribe
transcript = transcribe_audio(audio_path)
if not transcript:
return None, "Could not transcribe audio."
# Get response
response = agent.respond_to_prompt(transcript)
# TTS
output_path = config.data_dir / "generated" / "voice_response.mp3"
text_to_speech(response, output_path)
return str(output_path), f"You said: {transcript}\n\nResponse: {response}"
except Exception as e:
return None, f"Voice chat error: {e}"
def poll_pipeline_status():
"""Poll the DB for pipeline progress updates."""
status = agent.db.kv_get("pipeline:status")
@ -243,7 +191,7 @@ def create_ui(agent: Agent, config: Config, llm: LLMAdapter,
model_dropdown.change(on_model_change, [model_dropdown], None)
refresh_btn.click(on_refresh_models, None, [model_dropdown])
new_chat_btn.click(on_new_chat, None, [chatbot, conv_list])
new_chat_btn.click(on_new_chat, None, [chatbot])
msg_input.submit(
on_user_message,
@ -251,12 +199,6 @@ def create_ui(agent: Agent, config: Config, llm: LLMAdapter,
[chatbot, msg_input],
)
voice_input.stop_recording(
on_voice_chat,
[voice_input],
[voice_output, voice_status],
)
# Pipeline status polling timer (every 3 seconds)
status_timer = gr.Timer(3)
status_timer.tick(poll_pipeline_status, None, [pipeline_status])
@ -267,7 +209,4 @@ def create_ui(agent: Agent, config: Config, llm: LLMAdapter,
timer = gr.Timer(10)
timer.tick(poll_notifications, None, [notification_display])
# Load conversation list on app start
app.load(_load_conversations, None, [conv_list])
return app, _CSS