# 감정 임베딩 설계와 벡터 공간 정의 ## 감정 벡터 공간 아키텍처 ### 다차원 감정 모델 ```python import numpy as np from dataclasses import dataclass @dataclass class EmotionSpace: """Russell의 Circumplex Model + Plutchik의 감정 바퀴 통합""" # 기본 차원 (3D) valence: float # -1 (부정) to +1 (긍정) arousal: float # 0 (차분) to 1 (흥분) dominance: float # 0 (순종) to 1 (지배) # 확장 차원 (추가 5D) anticipation: float # 0 to 1 (기대감) trust: float # 0 to 1 (신뢰) surprise: float # 0 to 1 (놀람) social_connection: float # -1 (고립) to +1 (연결) certainty: float # 0 (불확실) to 1 (확신) def to_vector(self): """8차원 감정 벡터 생성""" return np.array([ self.valence, self.arousal, self.dominance, self.anticipation, self.trust, self.surprise, self.social_connection, self.certainty ]) ``` ### 기본 감정 임베딩 ```python class BasicEmotionEmbeddings: """Ekman의 6가지 기본 감정 + 확장""" EMOTIONS = { 'joy': { 'vector': [0.9, 0.7, 0.7, 0.6, 0.8, 0.1, 0.8, 0.8], 'color': '#FFD700', # Gold 'intensity_range': (0.3, 1.0) }, 'sadness': { 'vector': [-0.8, 0.2, 0.2, 0.1, 0.3, 0.0, -0.3, 0.4], 'color': '#4169E1', # Royal Blue 'intensity_range': (0.2, 0.9) }, 'anger': { 'vector': [-0.7, 0.9, 0.8, 0.2, 0.1, 0.2, -0.2, 0.7], 'color': '#DC143C', # Crimson 'intensity_range': (0.3, 1.0) }, 'fear': { 'vector': [-0.8, 0.8, 0.1, 0.7, 0.1, 0.7, -0.1, 0.1], 'color': '#8B008B', # Dark Magenta 'intensity_range': (0.4, 1.0) }, 'disgust': { 'vector': [-0.9, 0.5, 0.6, 0.0, 0.0, 0.3, -0.4, 0.8], 'color': '#556B2F', # Dark Olive Green 'intensity_range': (0.3, 0.9) }, 'surprise': { 'vector': [0.1, 0.8, 0.3, 0.1, 0.4, 1.0, 0.2, 0.0], 'color': '#FF69B4', # Hot Pink 'intensity_range': (0.5, 1.0) }, 'anticipation': { 'vector': [0.3, 0.6, 0.5, 1.0, 0.5, 0.4, 0.4, 0.3], 'color': '#FFA500', # Orange 'intensity_range': (0.2, 0.8) }, 'trust': { 'vector': [0.7, 0.3, 0.4, 0.3, 1.0, 0.0, 0.9, 0.9], 'color': '#32CD32', # Lime Green 'intensity_range': (0.1, 1.0) } } def get_embedding(self, emotion_name, intensity=0.5): """감정 이름과 강도로 임베딩 벡터 생성""" if emotion_name not in self.EMOTIONS: return None base_vector = np.array(self.EMOTIONS[emotion_name]['vector']) min_int, max_int = self.EMOTIONS[emotion_name]['intensity_range'] # 강도 조정 adjusted_intensity = min_int + (max_int - min_int) * intensity return base_vector * adjusted_intensity ``` ## 복합 감정 모델링 ### 감정 블렌딩 알고리즘 ```python class EmotionBlending: def __init__(self): self.basic_emotions = BasicEmotionEmbeddings() def blend_emotions(self, emotion_mix): """ 여러 감정을 혼합하여 복합 감정 생성 emotion_mix: {'joy': 0.6, 'surprise': 0.4} """ blended_vector = np.zeros(8) total_weight = sum(emotion_mix.values()) for emotion, weight in emotion_mix.items(): emotion_vector = self.basic_emotions.get_embedding(emotion, weight) if emotion_vector is not None: blended_vector += emotion_vector * (weight / total_weight) # 정규화 return self.normalize_vector(blended_vector) def create_complex_emotions(self): """복합 감정 정의""" complex_emotions = { 'nostalgia': self.blend_emotions({'joy': 0.4, 'sadness': 0.6}), 'bittersweetness': self.blend_emotions({'joy': 0.5, 'sadness': 0.5}), 'anxiety': self.blend_emotions({'fear': 0.7, 'anticipation': 0.3}), 'contempt': self.blend_emotions({'disgust': 0.6, 'anger': 0.4}), 'awe': self.blend_emotions({'surprise': 0.5, 'fear': 0.2, 'joy': 0.3}), 'guilt': self.blend_emotions({'sadness': 0.5, 'fear': 0.3, 'disgust': 0.2}), 'pride': self.blend_emotions({'joy': 0.7, 'dominance': 0.3}), 'shame': self.blend_emotions({'sadness': 0.4, 'fear': 0.3, 'disgust': 0.3}) } return complex_emotions def normalize_vector(self, vector): """벡터 정규화""" norm = np.linalg.norm(vector) if norm == 0: return vector return vector / norm ``` ## 시간적 감정 역학 ### 감정 전이 모델 ```python class EmotionDynamics: def __init__(self): self.emotion_history = [] self.transition_matrix = self.build_transition_matrix() def build_transition_matrix(self): """감정 간 전이 확률 매트릭스""" # 8x8 매트릭스 (8개 기본 감정) # 행: 현재 감정, 열: 다음 감정 transitions = np.array([ # joy → [joy, sad, anger, fear, disgust, surprise, anticipation, trust] [0.6, 0.1, 0.05, 0.05, 0.02, 0.08, 0.1, 0.1], # sadness → ... [0.15, 0.5, 0.1, 0.1, 0.05, 0.02, 0.03, 0.05], # ... 나머지 감정들 ]) return transitions def predict_next_emotion(self, current_emotion, context_modifier=None): """현재 감정에서 다음 감정 예측""" current_idx = self.emotion_to_index(current_emotion) transition_probs = self.transition_matrix[current_idx] if context_modifier: # 컨텍스트에 따른 확률 조정 transition_probs = self.apply_context(transition_probs, context_modifier) next_emotion_idx = np.random.choice(8, p=transition_probs) return self.index_to_emotion(next_emotion_idx) def emotion_trajectory(self, initial_emotion, steps=10): """감정 변화 궤적 시뮬레이션""" trajectory = [initial_emotion] current = initial_emotion for _ in range(steps): current = self.predict_next_emotion(current) trajectory.append(current) return trajectory ``` ### 감정 반감기와 지속성 ```python class EmotionPersistence: def __init__(self): # 각 감정의 반감기 (초 단위) self.half_lives = { 'surprise': 30, # 빠르게 사라짐 'anger': 300, # 5분 'fear': 600, # 10분 'joy': 1800, # 30분 'sadness': 3600, # 1시간 'disgust': 1200, # 20분 'anticipation': 900, # 15분 'trust': 7200 # 2시간 } def decay_function(self, emotion, time_elapsed): """지수 감쇠 함수""" half_life = self.half_lives.get(emotion, 1800) decay_rate = np.log(2) / half_life intensity = np.exp(-decay_rate * time_elapsed) return intensity def emotional_residue(self, past_emotions, current_time): """과거 감정의 현재 영향""" residue = np.zeros(8) for emotion_event in past_emotions: time_diff = current_time - emotion_event['timestamp'] intensity = self.decay_function( emotion_event['emotion'], time_diff ) if intensity > 0.01: # 1% 이상만 고려 emotion_vector = emotion_event['vector'] residue += emotion_vector * intensity return residue ``` ## 감정-인지 상호작용 ### 감정이 의사결정에 미치는 영향 ```python class EmotionCognitionInterface: def __init__(self): self.emotion_weights = { 'risk_assessment': { 'fear': -0.8, # 위험 회피 'anger': 0.3, # 위험 감수 'joy': 0.2, # 낙관적 평가 'anticipation': 0.4 # 기회 추구 }, 'creativity': { 'joy': 0.7, 'surprise': 0.6, 'sadness': 0.3, # 성찰적 창의성 'anger': -0.2 }, 'attention_focus': { 'fear': 0.9, # 높은 집중 'anger': 0.7, 'joy': -0.1, # 분산된 주의 'sadness': 0.4 # 세부사항 집중 } } def modulate_decision(self, base_decision, emotional_state): """감정 상태에 따른 의사결정 조정""" modulated = base_decision.copy() for cognitive_function, emotion_effects in self.emotion_weights.items(): for emotion, weight in emotion_effects.items(): emotion_intensity = emotional_state.get(emotion, 0) modulated[cognitive_function] *= (1 + weight * emotion_intensity) return modulated def emotional_bias_correction(self, decision, emotion_intensity): """감정 편향 보정""" if emotion_intensity > 0.7: # 강한 감정 상태에서는 합리성 체크 강화 return self.apply_rationality_filter(decision) return decision ``` ## 감정 임베딩 학습 ### 자기지도 학습 모델 ```python class EmotionEmbeddingLearning: def __init__(self, embedding_dim=128): self.embedding_dim = embedding_dim self.emotion_encoder = self.build_encoder() def build_encoder(self): """감정 인코더 네트워크""" import tensorflow as tf model = tf.keras.Sequential([ tf.keras.layers.Input(shape=(8,)), # 8D 기본 감정 벡터 tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(self.embedding_dim, activation='tanh'), tf.keras.layers.Lambda(lambda x: tf.nn.l2_normalize(x, axis=1)) ]) return model def contrastive_learning(self, anchor, positive, negative): """대조 학습으로 감정 임베딩 개선""" anchor_emb = self.emotion_encoder(anchor) positive_emb = self.emotion_encoder(positive) negative_emb = self.emotion_encoder(negative) # Triplet loss pos_dist = tf.reduce_sum(tf.square(anchor_emb - positive_emb), axis=1) neg_dist = tf.reduce_sum(tf.square(anchor_emb - negative_emb), axis=1) loss = tf.maximum(pos_dist - neg_dist + 0.2, 0) return tf.reduce_mean(loss) ``` ## 감정 시각화 ### 감정 공간 매핑 ```python class EmotionVisualization: def __init__(self): self.color_map = self.create_color_map() def create_color_map(self): """감정을 색상으로 매핑""" return { 'valence': { 'positive': '#FFD700', # 황금색 'negative': '#4B0082' # 인디고 }, 'arousal': { 'high': '#FF0000', # 빨강 'low': '#0000FF' # 파랑 } } def emotion_to_color(self, emotion_vector): """감정 벡터를 RGB 색상으로 변환""" valence = emotion_vector[0] # -1 to 1 arousal = emotion_vector[1] # 0 to 1 dominance = emotion_vector[2] # 0 to 1 # RGB 매핑 r = int(128 + valence * 127) # Valence → Red g = int(arousal * 255) # Arousal → Green b = int(dominance * 255) # Dominance → Blue return f'#{r:02x}{g:02x}{b:02x}' def plot_emotion_trajectory(self, emotion_sequence): """감정 변화를 2D/3D 공간에 플로팅""" import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D fig = plt.figure(figsize=(12, 5)) # 2D plot (Valence-Arousal) ax1 = fig.add_subplot(121) valences = [e[0] for e in emotion_sequence] arousals = [e[1] for e in emotion_sequence] ax1.plot(valences, arousals, 'o-', markersize=8) ax1.set_xlabel('Valence') ax1.set_ylabel('Arousal') ax1.set_title('Emotion Trajectory (2D)') ax1.grid(True, alpha=0.3) # 3D plot (Valence-Arousal-Dominance) ax2 = fig.add_subplot(122, projection='3d') dominances = [e[2] for e in emotion_sequence] ax2.plot(valences, arousals, dominances, 'o-', markersize=8) ax2.set_xlabel('Valence') ax2.set_ylabel('Arousal') ax2.set_zlabel('Dominance') ax2.set_title('Emotion Trajectory (3D)') plt.tight_layout() return fig ``` ## 감정 메트릭 ### 감정 다양성과 안정성 측정 ```python class EmotionMetrics: def emotional_entropy(self, emotion_distribution): """감정 분포의 엔트로피 (다양성 측정)""" probs = np.array(list(emotion_distribution.values())) probs = probs / probs.sum() entropy = -np.sum(probs * np.log(probs + 1e-10)) return entropy def emotional_stability(self, emotion_history): """감정 안정성 지수""" if len(emotion_history) < 2: return 1.0 changes = [] for i in range(1, len(emotion_history)): prev = emotion_history[i-1] curr = emotion_history[i] distance = np.linalg.norm(prev - curr) changes.append(distance) # 변화량의 표준편차 (낮을수록 안정적) stability = 1 / (1 + np.std(changes)) return stability def emotional_valence_bias(self, emotion_history): """긍정/부정 편향 측정""" valences = [e[0] for e in emotion_history] mean_valence = np.mean(valences) if mean_valence > 0.2: return 'positive_bias', mean_valence elif mean_valence < -0.2: return 'negative_bias', mean_valence else: return 'neutral', mean_valence ```