Sanad/core/brain.py

273 lines
10 KiB
Python

"""The Brain — central orchestrator for the Sanad robot assistant.
Responsibilities:
1. Owns the SkillRegistry, resolves callbacks at runtime.
2. Coordinates voice → motion → vision pipelines.
3. Executes skills (audio + motion + callback) with configurable sync modes.
4. Exposes a thread-safe API consumed by the FastAPI dashboard.
"""
from __future__ import annotations
import asyncio
import importlib
import time
from pathlib import Path
from typing import Any, Callable
from Project.Sanad.config import (
AUDIO_RECORDINGS_DIR,
MOTIONS_DIR,
MOTION_RECORDINGS_DIR,
)
from Project.Sanad.core.event_bus import bus
from Project.Sanad.core.logger import get_logger
from Project.Sanad.core.skill_registry import Skill, SkillRegistry
log = get_logger("brain")
# Whitelist of module path prefixes allowed for skill callbacks.
# Prevents arbitrary code execution via dashboard-editable skills.json.
from Project.Sanad.core.config_loader import section as _cfg_section
_BRAIN_CFG = _cfg_section("core", "brain")
ALLOWED_CALLBACK_PREFIXES = tuple(_BRAIN_CFG.get("allowed_callback_prefixes", [
"Project.Sanad.motion.",
"Project.Sanad.voice.",
"motion.",
"voice.",
]))
class Brain:
"""Singleton-style manager that bridges all subsystems."""
def __init__(self):
self.registry = SkillRegistry()
self._lock = asyncio.Lock()
# Sub-modules are injected after construction so imports stay lazy.
self._voice = None # voice.gemini_client.GeminiVoiceClient
self._audio_mgr = None # voice.audio_manager.AudioManager
self._arm = None # motion.arm_controller.ArmController
self._macro_rec = None # motion.macro_recorder.MacroRecorder
self._macro_play = None # motion.macro_player.MacroPlayer
self._live_voice = None # voice.live_voice_loop.LiveVoiceLoop
self.gestural_speaking = False # toggle: move while Gemini speaks
self._running_skill: str | None = None
# -- dependency injection --
def attach_voice(self, client):
self._voice = client
log.info("Voice client attached")
def attach_audio_manager(self, mgr):
self._audio_mgr = mgr
log.info("Audio manager attached")
def attach_arm(self, arm):
self._arm = arm
log.info("Arm controller attached")
def attach_macro_recorder(self, rec):
self._macro_rec = rec
def attach_macro_player(self, player):
self._macro_play = player
def attach_live_voice(self, lv):
self._live_voice = lv
log.info("LiveVoiceLoop attached")
# -- callback resolution --
def _resolve_callback(self, callback_str: str) -> Callable | None:
"""Resolve 'module.submodule:function_name' → callable.
SECURITY: only modules under ALLOWED_CALLBACK_PREFIXES may be imported.
Skill JSON is dashboard-editable and otherwise an arbitrary-import RCE.
Examples:
"Project.Sanad.motion.arm_controller:wave_hand"
"motion.arm_controller:wave_hand"
"""
if not callback_str:
return None
if ":" not in callback_str:
log.error("Invalid callback (missing ':'): %s", callback_str)
return None
module_path, func_name = callback_str.rsplit(":", 1)
if not any(module_path.startswith(prefix) or module_path == prefix.rstrip(".")
for prefix in ALLOWED_CALLBACK_PREFIXES):
log.error(
"Callback %s rejected — module '%s' not in whitelist",
callback_str, module_path,
)
return None
try:
mod = importlib.import_module(module_path)
return getattr(mod, func_name)
except Exception:
log.exception("Cannot resolve callback '%s'", callback_str)
return None
# -- skill execution --
async def execute_skill(self, skill_id: str) -> dict[str, Any]:
"""Run a skill: play audio + execute motion + fire callback."""
skill = self.registry.get(skill_id)
if skill is None:
raise KeyError(f"Skill not found: {skill_id}")
if not skill.enabled:
raise RuntimeError(f"Skill '{skill_id}' is disabled.")
async with self._lock:
if self._running_skill:
raise RuntimeError(f"Skill '{self._running_skill}' is already running.")
self._running_skill = skill_id
t0 = time.monotonic()
result: dict[str, Any] = {"skill_id": skill_id, "ok": True}
try:
await bus.emit("skill.started", skill_id=skill_id)
# Validate required attachments before partial execution
if skill.audio_file and self._audio_mgr is None:
raise RuntimeError("AudioManager not attached but skill requires audio")
if skill.motion_file and self._arm is None:
raise RuntimeError("ArmController not attached but skill requires motion")
if skill.sync_mode == "parallel":
await self._exec_parallel(skill, result)
elif skill.sync_mode == "audio_first":
await self._exec_audio_first(skill, result)
elif skill.sync_mode == "motion_first":
await self._exec_motion_first(skill, result)
else:
await self._exec_parallel(skill, result)
# Fire callback — run blocking callbacks in a thread to avoid stalling the loop
cb = self._resolve_callback(skill.callback)
if cb is not None:
if asyncio.iscoroutinefunction(cb):
cb_result = await cb()
else:
cb_result = await asyncio.to_thread(cb)
result["callback_result"] = str(cb_result) if cb_result else "ok"
except Exception as exc:
result["ok"] = False
result["error"] = str(exc)
log.exception("Skill %s failed", skill_id)
finally:
elapsed = time.monotonic() - t0
result["elapsed_sec"] = round(elapsed, 3)
async with self._lock:
self._running_skill = None
await bus.emit("skill.finished", skill_id=skill_id, result=result)
return result
async def cancel_skill(self) -> dict[str, Any]:
"""Cancel any running skill — sends cancel to arm controller."""
cancelled = self._running_skill
if self._arm is not None and hasattr(self._arm, "cancel"):
try:
self._arm.cancel()
except Exception:
log.exception("arm.cancel() failed")
if self._audio_mgr is not None and hasattr(self._audio_mgr, "stop_playback"):
try:
self._audio_mgr.stop_playback()
except Exception:
pass
return {"cancelled": cancelled}
async def _exec_parallel(self, skill: Skill, result: dict):
tasks = []
if skill.audio_file:
tasks.append(asyncio.create_task(self._play_audio(skill.audio_file, result)))
if skill.motion_file:
tasks.append(asyncio.create_task(self._play_motion(skill.motion_file, result)))
if tasks:
await asyncio.gather(*tasks)
async def _exec_audio_first(self, skill: Skill, result: dict):
if skill.audio_file:
await self._play_audio(skill.audio_file, result)
if skill.motion_file:
await self._play_motion(skill.motion_file, result)
async def _exec_motion_first(self, skill: Skill, result: dict):
if skill.motion_file:
await self._play_motion(skill.motion_file, result)
if skill.audio_file:
await self._play_audio(skill.audio_file, result)
async def _play_audio(self, audio_file: str, result: dict):
path = Path(audio_file)
if not path.is_absolute():
path = AUDIO_RECORDINGS_DIR / path
if not path.exists():
result["audio_error"] = f"File not found: {path}"
log.warning("Audio file missing: %s", path)
return
if self._audio_mgr is not None:
await asyncio.to_thread(self._audio_mgr.play_wav, path)
result["audio_played"] = str(path)
else:
result["audio_error"] = "AudioManager not attached"
async def _play_motion(self, motion_file: str, result: dict):
path = Path(motion_file)
if not path.is_absolute():
path = MOTIONS_DIR / path
if not path.exists():
result["motion_error"] = f"File not found: {path}"
log.warning("Motion file missing: %s", path)
return
if self._arm is not None:
await asyncio.to_thread(self._arm.replay_file, str(path))
result["motion_played"] = str(path)
else:
result["motion_error"] = "ArmController not attached"
# -- macro recording --
async def start_macro_recording(self, name: str) -> dict[str, Any]:
if self._macro_rec is None:
raise RuntimeError("MacroRecorder not attached.")
return await asyncio.to_thread(self._macro_rec.start, name)
async def stop_macro_recording(self) -> dict[str, Any]:
if self._macro_rec is None:
raise RuntimeError("MacroRecorder not attached.")
return await asyncio.to_thread(self._macro_rec.stop)
async def play_macro(self, name: str) -> dict[str, Any]:
if self._macro_play is None:
raise RuntimeError("MacroPlayer not attached.")
return await asyncio.to_thread(self._macro_play.play, name)
# -- gestural speaking toggle --
def set_gestural_speaking(self, enabled: bool):
self.gestural_speaking = enabled
bus.emit_sync("brain.gestural_speaking_changed", enabled=enabled)
log.info("Gestural speaking: %s", "ON" if enabled else "OFF")
# -- status --
def status(self) -> dict[str, Any]:
return {
"voice_attached": self._voice is not None,
"arm_attached": self._arm is not None,
"audio_manager_attached": self._audio_mgr is not None,
"live_voice_attached": self._live_voice is not None,
"gestural_speaking": self.gestural_speaking,
"running_skill": self._running_skill,
"total_skills": len(self.registry.list_skills()),
}