Files
novafarma/src/systems/CinematicVoiceSystem.js

365 lines
12 KiB
JavaScript

/**
* CINEMATIC VOICE SYSTEM
* Mrtva Dolina - Filmski pristop k dialogom
*
* Features:
* - Emocionalna globina (vdihi, premori, poudarki)
* - Reverb za flashbacke (Kaijevi spomini)
* - Ambient blending (veter, ruševine)
* - Typewriter sync (glas + tekst)
* - Dynamic background audio (glasba se poduši)
*/
export class CinematicVoiceSystem {
constructor(scene) {
this.scene = scene;
this.audioContext = null;
this.currentVoice = null;
this.isFlashback = false;
// Voice parameters
this.emotionalParams = {
kai_confused: { rate: 0.9, pitch: 1.0, breathPauses: true, emphasis: 'low' },
kai_determined: { rate: 1.0, pitch: 1.1, breathPauses: false, emphasis: 'strong' },
ana_gentle: { rate: 0.95, pitch: 1.15, breathPauses: true, emphasis: 'soft' },
ana_urgent: { rate: 1.1, pitch: 1.2, breathPauses: false, emphasis: 'strong' },
zombie_scout_hungry: { rate: 0.7, pitch: 0.6, breathPauses: false, emphasis: 'guttural' },
zombie_scout_happy: { rate: 0.8, pitch: 0.7, breathPauses: false, emphasis: 'friendly' }
};
// Ambient sounds
this.ambientSounds = new Map();
this.currentAmbient = null;
this.initializeAudioContext();
this.loadAmbientSounds();
}
initializeAudioContext() {
try {
this.audioContext = new (window.AudioContext || window.webkitAudioContext)();
console.log('✅ CinematicVoiceSystem: Audio Context initialized');
} catch (error) {
console.error('❌ Failed to initialize Audio Context:', error);
}
}
loadAmbientSounds() {
// Define ambient sound layers
const ambients = [
{ id: 'wind_ruins', file: 'assets/audio/ambient/wind_ruins.mp3', volume: 0.3 },
{ id: 'crackling_fire', file: 'assets/audio/ambient/fire.mp3', volume: 0.2 },
{ id: 'rain_outside', file: 'assets/audio/ambient/rain.mp3', volume: 0.4 },
{ id: 'rain_inside', file: 'assets/audio/ambient/rain_muffled.mp3', volume: 0.2 }
];
ambients.forEach(ambient => {
// These will be loaded on-demand
this.ambientSounds.set(ambient.id, {
file: ambient.file,
volume: ambient.volume,
audio: null
});
});
}
/**
* Speak dialogue with cinematic voice
* @param {string} text - Text to speak
* @param {string} character - Character name (kai, ana, zombie_scout)
* @param {string} emotion - Emotion type (confused, determined, gentle, urgent, hungry, happy)
* @param {object} options - Additional options (typewriterElement, flashback, ambient)
*/
async speak(text, character, emotion, options = {}) {
const voiceKey = `${character}_${emotion}`;
const params = this.emotionalParams[voiceKey] || this.emotionalParams.kai_confused;
// Add breath pauses if enabled
let processedText = text;
if (params.breathPauses) {
processedText = this.addBreathPauses(text);
}
// Add emphasis to key words
processedText = this.addEmphasis(processedText, params.emphasis);
// Set ambient if specified
if (options.ambient) {
this.setAmbient(options.ambient);
}
// Apply flashback effect if needed
this.isFlashback = options.flashback || false;
// Create speech synthesis
const utterance = new SpeechSynthesisUtterance(processedText);
utterance.rate = params.rate;
utterance.pitch = params.pitch;
utterance.volume = options.volume || 0.8;
// Select voice based on character
const voice = this.selectVoice(character);
if (voice) {
utterance.voice = voice;
}
// Sync with typewriter if provided
if (options.typewriterElement) {
this.syncWithTypewriter(utterance, options.typewriterElement, text);
}
// Duck background music
if (this.scene.sound && this.scene.sound.get('background_music')) {
this.duckMusic(0.3, 500); // Lower to 30% over 500ms
}
// Apply reverb for flashbacks
if (this.isFlashback && this.audioContext) {
await this.applyReverbEffect(utterance);
} else {
// Standard speech
window.speechSynthesis.speak(utterance);
}
// Return promise that resolves when speech ends
return new Promise((resolve) => {
utterance.onend = () => {
// Restore music volume
if (this.scene.sound && this.scene.sound.get('background_music')) {
this.duckMusic(1.0, 800); // Restore to 100% over 800ms
}
resolve();
};
});
}
/**
* Add natural breath pauses to text
*/
addBreathPauses(text) {
// Add slight pauses after commas and periods
return text
.replace(/,/g, ',<break time="200ms"/>')
.replace(/\./g, '.<break time="400ms"/>');
}
/**
* Add emphasis to key words
*/
addEmphasis(text, emphasisType) {
if (emphasisType === 'strong') {
// Emphasize question words and important terms
const keywords = ['kje', 'kaj', 'kdo', 'zakaj', 'kako', 'Ana', 'Kai', 'spomin'];
keywords.forEach(word => {
const regex = new RegExp(`\\b${word}\\b`, 'gi');
text = text.replace(regex, `<emphasis level="strong">${word}</emphasis>`);
});
} else if (emphasisType === 'soft') {
// Soft emphasis for gentle speech
const regex = /([A-ZČŠŽ][a-zčšž]+)/g;
text = text.replace(regex, '<prosody volume="soft">$1</prosody>');
}
return text;
}
/**
* Select appropriate voice for character
*/
selectVoice(character) {
const voices = window.speechSynthesis.getVoices();
// Prefer Slovenian voices, fallback to similar languages
const preferredLangs = ['sl-SI', 'hr-HR', 'sr-RS', 'en-US'];
if (character === 'kai') {
// Male voice
return voices.find(v =>
preferredLangs.some(lang => v.lang.startsWith(lang.split('-')[0])) &&
v.name.toLowerCase().includes('male')
) || voices[0];
} else if (character === 'ana') {
// Female voice
return voices.find(v =>
preferredLangs.some(lang => v.lang.startsWith(lang.split('-')[0])) &&
v.name.toLowerCase().includes('female')
) || voices[1];
} else if (character === 'zombie_scout') {
// Deep, gravelly voice
return voices.find(v =>
v.name.toLowerCase().includes('deep') ||
v.name.toLowerCase().includes('bass')
) || voices[0];
}
return voices[0];
}
/**
* Sync voice with typewriter text animation
*/
syncWithTypewriter(utterance, element, fullText) {
const charDuration = (utterance.rate > 0) ? (60 / utterance.rate) : 60; // ms per character
utterance.onboundary = (event) => {
// Update displayed text as speech progresses
const charIndex = event.charIndex;
if (element && charIndex < fullText.length) {
element.textContent = fullText.substring(0, charIndex + 1);
}
};
}
/**
* Apply reverb effect for flashback sequences
*/
async applyReverbEffect(utterance) {
if (!this.audioContext) return;
try {
// Create convolver for reverb
const convolver = this.audioContext.createConvolver();
const reverbTime = 2.0; // 2 seconds reverb
// Generate impulse response
const sampleRate = this.audioContext.sampleRate;
const length = sampleRate * reverbTime;
const impulse = this.audioContext.createBuffer(2, length, sampleRate);
for (let channel = 0; channel < 2; channel++) {
const channelData = impulse.getChannelData(channel);
for (let i = 0; i < length; i++) {
channelData[i] = (Math.random() * 2 - 1) * Math.pow(1 - i / length, 2);
}
}
convolver.buffer = impulse;
// Note: SpeechSynthesis doesn't directly support Web Audio routing
// This is a placeholder for when we implement proper audio streaming
console.log('🎙️ Reverb effect would be applied here (requires audio streaming)');
// Fallback: Just speak with modified parameters for now
window.speechSynthesis.speak(utterance);
} catch (error) {
console.error('❌ Reverb effect failed:', error);
window.speechSynthesis.speak(utterance);
}
}
/**
* Duck/restore background music volume
*/
duckMusic(targetVolume, duration) {
const music = this.scene.sound.get('background_music');
if (!music) return;
this.scene.tweens.add({
targets: music,
volume: targetVolume,
duration: duration,
ease: 'Sine.easeInOut'
});
}
/**
* Set ambient background sound
*/
setAmbient(ambientId) {
// Stop current ambient
if (this.currentAmbient) {
if (this.currentAmbient.audio) {
this.currentAmbient.audio.pause();
}
}
// Start new ambient
const ambient = this.ambientSounds.get(ambientId);
if (ambient) {
if (!ambient.audio) {
ambient.audio = new Audio(ambient.file);
ambient.audio.loop = true;
ambient.audio.volume = ambient.volume;
}
ambient.audio.play().catch(err => {
console.warn('Ambient sound play failed:', err);
});
this.currentAmbient = ambient;
}
}
/**
* Blend voice with ambient (main feature)
*/
blendWithAmbient(voiceVolume = 0.8, ambientVolume = 0.3) {
if (this.currentAmbient && this.currentAmbient.audio) {
this.currentAmbient.audio.volume = ambientVolume;
}
// Voice volume is set in speak() method
}
/**
* Stop all audio
*/
stopAll() {
window.speechSynthesis.cancel();
if (this.currentAmbient && this.currentAmbient.audio) {
this.currentAmbient.audio.pause();
}
}
/**
* ZOMBIE SCOUT SPECIFIC SOUNDS
*/
zombieScoutHunger() {
const hungerLines = [
'Braaaaains...',
'Možgaaaaani...',
'Hrrrngh... lačen...',
'*zombie groan*'
];
const randomLine = hungerLines[Math.floor(Math.random() * hungerLines.length)];
this.speak(randomLine, 'zombie_scout', 'hungry', {
volume: 0.6,
ambient: 'wind_ruins'
});
}
zombieScoutDiscovery() {
const discoveryLines = [
'*tiho godrnjanje*',
'Hrrm! Tukaj!',
'*zadovoljno zavijanje*'
];
const randomLine = discoveryLines[Math.floor(Math.random() * discoveryLines.length)];
this.speak(randomLine, 'zombie_scout', 'happy', {
volume: 0.7
});
}
/**
* Play zombie scout footstep with gear sounds
*/
zombieScoutFootstep() {
// Composite sound: footstep + gear rattle
const footstep = this.scene.sound.add('zombie_footstep', { volume: 0.4 });
const gearRattle = this.scene.sound.add('gear_rattle', { volume: 0.2 });
footstep.play();
setTimeout(() => gearRattle.play(), 50); // Slight delay for realism
}
destroy() {
this.stopAll();
if (this.audioContext) {
this.audioContext.close();
}
}
}