Upload 4 files
Browse files- 2Gv37_AutoLR/__init__.py +0 -0
- 2Gv37_AutoLR/emoairy.py +208 -0
- 2Gv37_AutoLR/emocats.py +208 -0
- 2Gv37_AutoLR/emosens.py +191 -0
2Gv37_AutoLR/__init__.py
ADDED
|
File without changes
|
2Gv37_AutoLR/emoairy.py
ADDED
|
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch.optim import Optimizer
|
| 3 |
+
import math
|
| 4 |
+
from collections import deque
|
| 5 |
+
|
| 6 |
+
"""
|
| 7 |
+
EmoAiry v3.7.0 (260101) shadow-system v3.1 -moment v3.1 emoDrive v3.6 emoPulse v3.7
|
| 8 |
+
EmoFact v3.6 継承、 emoPulse 機構により完全自動化を目指す(emoScope により微調整可)
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
class EmoAiry(Optimizer):
|
| 12 |
+
# クラス定義&初期化
|
| 13 |
+
def __init__(self, params,
|
| 14 |
+
lr=1.0,
|
| 15 |
+
eps=1e-8,
|
| 16 |
+
betas=(0.9, 0.995),
|
| 17 |
+
weight_decay=0.01,
|
| 18 |
+
use_shadow:bool=False,
|
| 19 |
+
writer=None):
|
| 20 |
+
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
|
| 21 |
+
super().__init__(params, defaults)
|
| 22 |
+
self._init_lr = lr
|
| 23 |
+
self.should_stop = False # 停止フラグの初期化
|
| 24 |
+
self.use_shadow = use_shadow # 🔸shadow 使用フラグを保存
|
| 25 |
+
self.writer = writer # 動的学習率や感情スカラー等を渡す
|
| 26 |
+
self.emoScope = 20.0 * lr # 学習速度ではなく「視界の広さ」
|
| 27 |
+
self.noise_est = 0.0
|
| 28 |
+
self.d_est = 0.0
|
| 29 |
+
|
| 30 |
+
# 感情EMA更新(緊張と安静)
|
| 31 |
+
def _update_ema(self, state, loss_val):
|
| 32 |
+
ema = state.setdefault('ema', {})
|
| 33 |
+
ema['short'] = 0.3 * loss_val + 0.7 * ema.get('short', loss_val)
|
| 34 |
+
ema['medium'] = 0.05 * loss_val + 0.95 * ema.get('medium', loss_val)
|
| 35 |
+
ema['long'] = 0.01 * loss_val + 0.99 * ema.get('long', loss_val)
|
| 36 |
+
return ema
|
| 37 |
+
|
| 38 |
+
# 感情スカラー値生成(EMA差分、滑らかな非線形スカラー、tanh(diff) は ±1.0 で有界性)
|
| 39 |
+
# 係数"1":ema差分 のスケール調整処理に活用(感度調節係数)/通常は1(タスクに応じ調整可(非推奨))
|
| 40 |
+
# scale_base:Loss値とema値の乖離を修正(分母 ema(long) 「改善率」共通化/loss種に非依存)
|
| 41 |
+
# 1e-5(デフォルト)/1e-6(感度向上)/1e-4(安定性向上):分母を0にせず安定させる
|
| 42 |
+
# トラウマ的反応や慣れによる鈍化で安定性向上(ema-medium 安定と急変を信頼度で感知)
|
| 43 |
+
def _compute_scalar(self, ema):
|
| 44 |
+
scale_base_l = max(ema['long'], 1e-5)
|
| 45 |
+
scale_base_m = max(ema['medium'], 1e-5)
|
| 46 |
+
diff_l = (ema['long'] - ema['short']) / scale_base_l
|
| 47 |
+
diff_m = (ema['long'] - ema['short']) / scale_base_m
|
| 48 |
+
# longが十分静かなら、常にlongを優先
|
| 49 |
+
if abs(diff_l) < 0.05:
|
| 50 |
+
return math.tanh(diff_l)
|
| 51 |
+
# longが静かでない時のみ、mediumの静けさを条件付きで採用
|
| 52 |
+
if abs(diff_m) * scale_base_m < abs(diff_l) * scale_base_l:
|
| 53 |
+
return math.tanh(1 * diff_m)
|
| 54 |
+
else:
|
| 55 |
+
return math.tanh(1 * diff_l)
|
| 56 |
+
|
| 57 |
+
# アーリーストップ専用(静けさ判定の感情スカラ生成)
|
| 58 |
+
def _early_scalar(self, ema):
|
| 59 |
+
scale_base_l = max(ema['long'], 1e-5)
|
| 60 |
+
diff = (ema['long'] - ema['short']) / scale_base_l
|
| 61 |
+
return math.tanh(1 * diff)
|
| 62 |
+
|
| 63 |
+
# 論文通りの抑制則/急変時は強抑制/悪化時は微減速/平時は無介入で収束を安定させる
|
| 64 |
+
# 区分別けは現状では無意味ですが後々にカスタマイズしやすい形式として整理してあります
|
| 65 |
+
def _decide_coeff(self, scalar): # B <= x <= A: 等も可能
|
| 66 |
+
if abs(scalar) > 0.75:
|
| 67 |
+
return 1.0 - abs(scalar) # 緊急|急制動|tanh 0.97(0.03)
|
| 68 |
+
elif abs(scalar) > 0.50:
|
| 69 |
+
return 1.0 - abs(scalar) # 急変|強抑制|tanh 0.55(0.45)
|
| 70 |
+
elif abs(scalar) > 0.25:
|
| 71 |
+
return 1.0 - abs(scalar) # 悪化|微減速|tanh 0.26(0.74)
|
| 72 |
+
else:
|
| 73 |
+
return 1.0 # 平時|無介入|他(常に 1.0 を返す)
|
| 74 |
+
|
| 75 |
+
# (重要)現在は shadow-effect を参考に得た動的フィルタ効果の近似により use_shadow=False です
|
| 76 |
+
# しかし全機能は shadow なしで全て成立します/コード確認時はこの shadow を考慮外として無視してください
|
| 77 |
+
|
| 78 |
+
# Shadow混合比 3段階構成 タスクに応じ調整可、以下を参考に 開始値・範囲量・変化幅を調整
|
| 79 |
+
# return 開始値 + ((scalar) - 閾値) / 範囲量 * 変化幅 も可能(特殊用途向け)
|
| 80 |
+
def _decide_ratio(self, scalar):
|
| 81 |
+
if not self.use_shadow:
|
| 82 |
+
return 0.0 # 🔸use_shadow = False のとき常に比率を 0 にする
|
| 83 |
+
if abs(scalar) > 0.625:
|
| 84 |
+
return 1.0 - abs(scalar) # 急変|強抑制|tanh 0.73(0.27)
|
| 85 |
+
else:
|
| 86 |
+
return 0.0 # return<0 の場合は leap 専用(書き戻しはしないが履歴更新のみ)
|
| 87 |
+
|
| 88 |
+
# 損失取得(損失値 loss_val を数値化、感情判定に使用、存在しないパラメータ(更新不要)はスキップ)
|
| 89 |
+
@torch.no_grad()
|
| 90 |
+
def step(self, closure=None):
|
| 91 |
+
loss = closure() if closure is not None else None
|
| 92 |
+
loss_val = loss.item() if loss is not None else 0.0
|
| 93 |
+
|
| 94 |
+
# EMA更新・スカラー生成(EMA差分からスカラーを生成しスパイク比率等を決定)
|
| 95 |
+
ema = self._update_ema(self.state, loss_val)
|
| 96 |
+
early_scalar = self._early_scalar(ema)
|
| 97 |
+
scalar = self._compute_scalar(ema)
|
| 98 |
+
coeff = self._decide_coeff(scalar)
|
| 99 |
+
ratio = self._decide_ratio(scalar)
|
| 100 |
+
trust = math.copysign((1.0 - abs(scalar)), scalar)
|
| 101 |
+
emoDpt = 8.0 * abs(trust)
|
| 102 |
+
|
| 103 |
+
for group in self.param_groups:
|
| 104 |
+
for p in group['params']:
|
| 105 |
+
if p.grad is None:
|
| 106 |
+
continue
|
| 107 |
+
|
| 108 |
+
grad = p.grad
|
| 109 |
+
state = self.state[p]
|
| 110 |
+
|
| 111 |
+
# 動的学習率補正により shadow 形成を信頼度で調整(trustは正値化(負にならない))
|
| 112 |
+
# shadow:必要時のみ(スパイクp部分に現在値を最大10%追従させる動的履歴更新)
|
| 113 |
+
# 混合比率:スカラーが閾値を超える場合にのみ計算される(信頼できる感情信号かどうかの選別)
|
| 114 |
+
# 急変時は感情機構による shadow 混合で強く抑制する(急制動による安定性の確保)
|
| 115 |
+
# 新 shadow-system は動的学習率と信頼度で協調し選択的スパース性も発揮する
|
| 116 |
+
if self.use_shadow :
|
| 117 |
+
if 'shadow' not in state: # 🔸shadow = False (デフォルト)
|
| 118 |
+
state['shadow'] = p.clone()
|
| 119 |
+
if ratio > 0: # 書き戻しと履歴更新(急変時の強い抑制と弱めの履歴更新)
|
| 120 |
+
p.mul_(1-ratio).add_(state['shadow'], alpha=abs(trust))
|
| 121 |
+
else: # 書き戻しせず履歴更新のみ:10%×trust
|
| 122 |
+
leap_ratio = 0.1 * abs(trust)
|
| 123 |
+
state['shadow'].lerp_(p, leap_ratio)
|
| 124 |
+
|
| 125 |
+
# emoDrive 作動域 (Turbo & Trust LR system)
|
| 126 |
+
if 0.25 < abs(scalar) < 0.5:
|
| 127 |
+
emoDrive = emoDpt * (1.0 + 0.1 * trust) # 加速/減速ゾーン補正
|
| 128 |
+
elif abs(scalar) > 0.75:
|
| 129 |
+
emoDrive = coeff # 緊急|急制動|tanh 0.97(0.03)
|
| 130 |
+
else:
|
| 131 |
+
emoDrive = 1.0 # 無介入ゾーン
|
| 132 |
+
|
| 133 |
+
# emoPulse (loss 時系列から D / noise を推定し完全自動LRを生成)
|
| 134 |
+
# noise_estimate: loss の揺れ(不安定性)のEMA
|
| 135 |
+
self.noise_est = 0.8 * self.noise_est + 0.2 * abs(trust)
|
| 136 |
+
noise = max(self.noise_est, 1e-10) # 下限 1e-10
|
| 137 |
+
# d_estimate: loss の改善傾向の EMA(距離 D の代理)
|
| 138 |
+
self.d_est = 0.9 * self.d_est + 0.1 * max(trust, 0.0) # 非負にする
|
| 139 |
+
# 上限 妙に遅い/早すぎる、 emoScorpe:5.0~20.0くらいがいい/基準値20.0
|
| 140 |
+
d = min(self.d_est, self.emoScope)
|
| 141 |
+
|
| 142 |
+
# --- Start Gradient Update Logic ---
|
| 143 |
+
# 行列の形状が2次元以上の場合、分散情報ベースのAB近似を使用
|
| 144 |
+
if grad.dim() >= 2:
|
| 145 |
+
# 行と列の2乗平均を計算 (分散の軽量な近似)
|
| 146 |
+
r_sq = torch.mean(grad * grad, dim=tuple(range(1, grad.dim())), keepdim=True).add_(group['eps'])
|
| 147 |
+
c_sq = torch.mean(grad * grad, dim=0, keepdim=True).add_(group['eps'])
|
| 148 |
+
|
| 149 |
+
# 分散情報から勾配の近似行列を生成
|
| 150 |
+
# AB行列として見立てたものを直接生成し更新項を計算する
|
| 151 |
+
# A = sqrt(r_sq), B = sqrt(c_sq) AB行列の近似を再現しEMAで平滑化する
|
| 152 |
+
beta1, beta2 = group['betas']
|
| 153 |
+
state.setdefault('exp_avg_r', torch.zeros_like(r_sq)).mul_(beta1).add_(torch.sqrt(r_sq), alpha=1 - beta1)
|
| 154 |
+
state.setdefault('exp_avg_c', torch.zeros_like(c_sq)).mul_(beta1).add_(torch.sqrt(c_sq), alpha=1 - beta1)
|
| 155 |
+
|
| 156 |
+
# 再構築した近似勾配の平方根の積で正規化
|
| 157 |
+
denom = torch.sqrt(state['exp_avg_r'] * state['exp_avg_c']).add_(group['eps'])
|
| 158 |
+
|
| 159 |
+
# 最終的な更新項を計算
|
| 160 |
+
#update_term = grad / denom # sign化で1次ベクトルとのバランス改善
|
| 161 |
+
update_term = torch.sign(grad / denom)
|
| 162 |
+
|
| 163 |
+
# 1次元(ベクトル)の勾配補正
|
| 164 |
+
else:
|
| 165 |
+
beta1, beta2 = group['betas']
|
| 166 |
+
exp_avg_sq = state.setdefault('exp_avg_sq', torch.zeros_like(p))
|
| 167 |
+
exp_avg_sq.mul_(beta1).addcmul_(grad, grad, value=(1 - beta2))
|
| 168 |
+
denom = exp_avg_sq.sqrt().add_(group['eps'])
|
| 169 |
+
#update_term = grad / denom # sign化で2次momentとのバランス改善
|
| 170 |
+
update_term = torch.sign(grad / denom)
|
| 171 |
+
|
| 172 |
+
# 最終的なパラメータ更新 (decoupled weight decayも適用)
|
| 173 |
+
# 完��自動LR / 安全クリップ 0.3〜0.5 程度でもいい(emoPulse = step_size)
|
| 174 |
+
#step_size = group['lr']
|
| 175 |
+
emoPulse = min((d / noise), 1e-3)
|
| 176 |
+
p.add_(p, alpha=-group['weight_decay'] * emoPulse)
|
| 177 |
+
p.add_(update_term, alpha=-emoPulse * emoDrive)
|
| 178 |
+
# --- End Gradient Update Logic ---
|
| 179 |
+
|
| 180 |
+
# 感情機構の発火が収まり"十分に安定"していることを外部伝達できる(自動停止ロジックではない)
|
| 181 |
+
# Early Stop用 scalar 記録(バッファ共通で管理/最大32件保持/動静評価)
|
| 182 |
+
hist = self.state.setdefault('scalar_hist', deque(maxlen=32))
|
| 183 |
+
hist.append(scalar)
|
| 184 |
+
|
| 185 |
+
# Early Stop判断(静けさの合図)
|
| 186 |
+
# 32ステップ分のスカラー値の静かな条件を満たした時"フラグ" should_stop = True になるだけ
|
| 187 |
+
if len(hist) >= 32:
|
| 188 |
+
avg_abs = sum(abs(s) for s in hist) / len(hist)
|
| 189 |
+
mean = sum(hist) / len(hist)
|
| 190 |
+
var = sum((s - mean)**2 for s in hist) / len(hist)
|
| 191 |
+
if avg_abs < 0.05 and var < 0.005:
|
| 192 |
+
self.should_stop = True # 💡 外部からこれを見て判断可
|
| 193 |
+
|
| 194 |
+
# TensorBoardへの記録(step関数の末尾に追加)
|
| 195 |
+
if hasattr(self, 'writer') and self.writer is not None:
|
| 196 |
+
self._step_count = getattr(self, "_step_count", 0) + 1
|
| 197 |
+
self.writer.add_scalar("emoLR/base", emoPulse, self._step_count)
|
| 198 |
+
self.writer.add_scalar("emoLR/Turbo", emoPulse * emoDrive, self._step_count)
|
| 199 |
+
self.writer.add_scalar("emostate/emoDrive", emoDrive, self._step_count)
|
| 200 |
+
self.writer.add_scalar("emostate/scalar", scalar, self._step_count)
|
| 201 |
+
|
| 202 |
+
return
|
| 203 |
+
|
| 204 |
+
"""
|
| 205 |
+
https://github.com/muooon/EmoSens
|
| 206 |
+
Airy is inspired by Adafactor, and emofact,
|
| 207 |
+
and its VRAM-friendly design is something everyone loves.
|
| 208 |
+
"""
|
2Gv37_AutoLR/emocats.py
ADDED
|
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch.optim import Optimizer
|
| 3 |
+
import math
|
| 4 |
+
from typing import Tuple, Callable, Union
|
| 5 |
+
from collections import deque
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
EmoCats v3.7.0 (260101) shadow-system v3.1 -moment v3.1 emoDrive v3.6 emoPulse v3.7
|
| 9 |
+
EmoLynx v3.6 継承、 emoPulse 機構により完全自動化を目指す(emoScope により微調整可)
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
# Helper function (Lynx)
|
| 13 |
+
def exists(val):
|
| 14 |
+
return val is not None
|
| 15 |
+
|
| 16 |
+
class EmoCats(Optimizer):
|
| 17 |
+
# クラス定義&初期化 lynx用ベータ・互換性の追加(lynx用beta1・beta2)
|
| 18 |
+
def __init__(self, params:
|
| 19 |
+
Union[list, torch.nn.Module],
|
| 20 |
+
lr=1.0,
|
| 21 |
+
eps=1e-8,
|
| 22 |
+
betas=(0.9, 0.995),
|
| 23 |
+
weight_decay=0.01,
|
| 24 |
+
use_shadow: bool = False,
|
| 25 |
+
writer=None):
|
| 26 |
+
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
|
| 27 |
+
super().__init__(params, defaults)
|
| 28 |
+
# lynxに応じてウェイト減衰のため保存
|
| 29 |
+
self._init_lr = lr
|
| 30 |
+
self.should_stop = False # 停止フラグの初期化
|
| 31 |
+
self.use_shadow = use_shadow # 🔸shadow 使用フラグを保存
|
| 32 |
+
self.writer = writer # 動的学習率や感情スカラー等を渡す
|
| 33 |
+
self.emoScope = 20.0 * lr # 学習速度ではなく「視界の広さ」
|
| 34 |
+
self.noise_est = 0.0
|
| 35 |
+
self.d_est = 0.0
|
| 36 |
+
|
| 37 |
+
# 感情EMA更新(緊張と安静)
|
| 38 |
+
def _update_ema(self, state, loss_val):
|
| 39 |
+
ema = state.setdefault('ema', {})
|
| 40 |
+
ema['short'] = 0.3 * loss_val + 0.7 * ema.get('short', loss_val)
|
| 41 |
+
ema['medium'] = 0.05 * loss_val + 0.95 * ema.get('medium', loss_val)
|
| 42 |
+
ema['long'] = 0.01 * loss_val + 0.99 * ema.get('long', loss_val)
|
| 43 |
+
return ema
|
| 44 |
+
|
| 45 |
+
# 感情スカラー値生成(EMA差分、滑らかな非線形スカラー、tanh(diff) は ±1.0 で有界性)
|
| 46 |
+
# 係数"1":ema差分 のスケール調整処理に活用(感度調節係数)/通常は1(タスクに応じ調整可(非推奨))
|
| 47 |
+
# scale_base:Loss値とema値の乖離を修正(分母 ema(long) 「改善率」共通化/loss種に非依存)
|
| 48 |
+
# 1e-5(デフォルト)/1e-6(感度向上)/1e-4(安定性向上):分母を0にせず安定させる
|
| 49 |
+
# トラウマ的反応や慣れによる鈍化で安定性向上(ema-medium 安定と急変を信頼度で感知)
|
| 50 |
+
def _compute_scalar(self, ema):
|
| 51 |
+
scale_base_l = max(ema['long'], 1e-5)
|
| 52 |
+
scale_base_m = max(ema['medium'], 1e-5)
|
| 53 |
+
diff_l = (ema['long'] - ema['short']) / scale_base_l
|
| 54 |
+
diff_m = (ema['long'] - ema['short']) / scale_base_m
|
| 55 |
+
# longが十分静かなら、常にlongを優先
|
| 56 |
+
if abs(diff_l) < 0.05:
|
| 57 |
+
return math.tanh(diff_l)
|
| 58 |
+
# longが静かでない時のみ、mediumの静けさを条件付きで採用
|
| 59 |
+
if abs(diff_m) * scale_base_m < abs(diff_l) * scale_base_l:
|
| 60 |
+
return math.tanh(1 * diff_m)
|
| 61 |
+
else:
|
| 62 |
+
return math.tanh(1 * diff_l)
|
| 63 |
+
|
| 64 |
+
# アーリーストップ専用(静けさ判定の感情スカラ生成)
|
| 65 |
+
def _early_scalar(self, ema):
|
| 66 |
+
scale_base_l = max(ema['long'], 1e-5)
|
| 67 |
+
diff = (ema['long'] - ema['short']) / scale_base_l
|
| 68 |
+
return math.tanh(1 * diff)
|
| 69 |
+
|
| 70 |
+
# 論文通りの抑制則/急変時は強抑制/悪化時は微減速/平時は無介入で収束を安定させる
|
| 71 |
+
# 区分別けは現状では無意味ですが後々にカスタマイズしやすい形式として整理してあります
|
| 72 |
+
def _decide_coeff(self, scalar): # B <= x <= A: 等も可能
|
| 73 |
+
if abs(scalar) > 0.75:
|
| 74 |
+
return 1.0 - abs(scalar) # 緊急|急制動|tanh 0.97(0.03)
|
| 75 |
+
elif abs(scalar) > 0.50:
|
| 76 |
+
return 1.0 - abs(scalar) # 急変|強抑制|tanh 0.55(0.45)
|
| 77 |
+
elif abs(scalar) > 0.25:
|
| 78 |
+
return 1.0 - abs(scalar) # 悪化|微減速|tanh 0.26(0.74)
|
| 79 |
+
else:
|
| 80 |
+
return 1.0 # 平時|無介入|他(常に 1.0 を返す)
|
| 81 |
+
|
| 82 |
+
# (重要)現在は shadow-effect を参考に得た動的フィルタ効果の近似により use_shadow=False です
|
| 83 |
+
# しかし全機能は shadow なしで全て成立します/コード確認時はこの shadow を考慮外として無視してください
|
| 84 |
+
|
| 85 |
+
# Shadow混合比 3段階構成 タスクに応じ調整可、以下を参考に 開始値・範囲量・変化幅を調整
|
| 86 |
+
# return 開始値 + ((scalar) - 閾値) / 範囲量 * 変化幅 も可能(特殊用途向け)
|
| 87 |
+
def _decide_ratio(self, scalar):
|
| 88 |
+
if not self.use_shadow:
|
| 89 |
+
return 0.0 # 🔸use_shadow = False のとき常に比率を 0 にする
|
| 90 |
+
if abs(scalar) > 0.625:
|
| 91 |
+
return 1.0 - abs(scalar) # 急変|強抑制|tanh 0.73(0.27)
|
| 92 |
+
else:
|
| 93 |
+
return 0.0 # return<0 の場合は leap 専用(書き戻しはしないが履歴更新のみ)
|
| 94 |
+
|
| 95 |
+
# 損失取得(損失値 loss_val を数値化、感情判定に使用、存在しないパラメータ(更新不要)はスキップ)
|
| 96 |
+
@torch.no_grad()
|
| 97 |
+
def step(self, closure: Callable | None = None): # クロージャの型ヒントを追加
|
| 98 |
+
loss = None
|
| 99 |
+
if exists(closure): # 一貫性のためにexistsヘルパーを使う
|
| 100 |
+
with torch.enable_grad():
|
| 101 |
+
loss = closure()
|
| 102 |
+
loss_val = loss.item() if loss is not None else 0.0
|
| 103 |
+
|
| 104 |
+
# EMA更新・スカラー生成(EMA差分からスカラーを生成しスパイク比率等を決定)
|
| 105 |
+
ema = self._update_ema(self.state, loss_val)
|
| 106 |
+
early_scalar = self._early_scalar(ema)
|
| 107 |
+
scalar = self._compute_scalar(ema)
|
| 108 |
+
coeff = self._decide_coeff(scalar)
|
| 109 |
+
ratio = self._decide_ratio(scalar)
|
| 110 |
+
trust = math.copysign((1.0 - abs(scalar)), scalar)
|
| 111 |
+
emoDpt = 8.0 * abs(trust)
|
| 112 |
+
|
| 113 |
+
for group in self.param_groups:
|
| 114 |
+
# リンクス共通パラメータ抽出
|
| 115 |
+
lr, wd, beta1, beta2 = group['lr'], group['weight_decay'], *group['betas']
|
| 116 |
+
|
| 117 |
+
# ウェイト減衰の処理を分離 (from lynx)
|
| 118 |
+
_wd_actual = wd
|
| 119 |
+
|
| 120 |
+
for p in filter(lambda p: exists(p.grad), group['params']): # PGチェックにフィルタ
|
| 121 |
+
|
| 122 |
+
grad = p.grad # PG直接使用(計算に".data"不要)
|
| 123 |
+
state = self.state[p]
|
| 124 |
+
|
| 125 |
+
# 動的学習率補正により shadow 形成を信頼度で調整(trustは正値化(負にならない))
|
| 126 |
+
# shadow:必要時のみ(スパイクp部分に現在値を最大10%追従させる動的履歴更新)
|
| 127 |
+
# 混合比率:スカラーが閾値を超える場合にのみ計算される(信頼できる感情信号かどうかの選別)
|
| 128 |
+
# 急変時は感情機構による shadow 混合で強く抑制する(急制動による安定性の確保)
|
| 129 |
+
# 新 shadow-system は動的学習率と信頼度で協調し選択的スパース性も発揮する
|
| 130 |
+
if self.use_shadow :
|
| 131 |
+
if 'shadow' not in state: # 🔸shadow = False (デフォルト)
|
| 132 |
+
state['shadow'] = p.clone()
|
| 133 |
+
if ratio > 0: # 書き戻しと履歴更新(急変時の強い抑制と弱めの履歴更新)
|
| 134 |
+
p.mul_(1-ratio).add_(state['shadow'], alpha=abs(trust))
|
| 135 |
+
else: # 書き戻しせず履歴更新のみ:10%×trust
|
| 136 |
+
leap_ratio = 0.1 * abs(trust)
|
| 137 |
+
state['shadow'].lerp_(p, leap_ratio)
|
| 138 |
+
|
| 139 |
+
# emoDrive 作動域 (Turbo & Trust LR system)
|
| 140 |
+
if 0.25 < abs(scalar) < 0.5:
|
| 141 |
+
emoDrive = emoDpt * (1.0 + 0.1 * trust) # 加速/減速ゾーン補正
|
| 142 |
+
elif abs(scalar) > 0.75:
|
| 143 |
+
emoDrive = coeff # 緊急|急制動|tanh 0.97(0.03)
|
| 144 |
+
else:
|
| 145 |
+
emoDrive = 1.0 # 無介入ゾーン
|
| 146 |
+
|
| 147 |
+
# emoPulse (loss 時系列から D / noise を推定し完全自動LRを生成)
|
| 148 |
+
# noise_estimate: loss の揺れ(不安定性)のEMA
|
| 149 |
+
self.noise_est = 0.8 * self.noise_est + 0.2 * abs(trust)
|
| 150 |
+
noise = max(self.noise_est, 1e-10) # 下限 1e-10
|
| 151 |
+
# d_estimate: loss の改善傾向の EMA(距離 D の代理)
|
| 152 |
+
self.d_est = 0.9 * self.d_est + 0.1 * max(trust, 0.0) # 非負にする
|
| 153 |
+
# 上限 妙に遅い/早すぎる、 emoScorpe:5.0~20.0くらいがいい/基準値20.0
|
| 154 |
+
d = min(self.d_est, self.emoScope)
|
| 155 |
+
|
| 156 |
+
# --- Start Gradient Update Logic ---
|
| 157 |
+
# lynx初期化(exp_avg_sq)
|
| 158 |
+
if 'exp_avg' not in state:
|
| 159 |
+
state['exp_avg'] = torch.zeros_like(p)
|
| 160 |
+
exp_avg = state['exp_avg']
|
| 161 |
+
|
| 162 |
+
# Stepweight decay (from lynx): p = p * (1 - lr * wd)
|
| 163 |
+
# decoupled_wd 考慮 _wd_actual 使用(EmoNaviのwdは最後に適用)
|
| 164 |
+
emoPulse = min((d / noise), 1e-3)
|
| 165 |
+
p.mul_(1 - emoPulse * _wd_actual)
|
| 166 |
+
beta1, beta2 = group['betas']
|
| 167 |
+
|
| 168 |
+
# 勾配ブレンド
|
| 169 |
+
# m_t = beta1 * exp_avg_prev + (1 - beta1) * grad
|
| 170 |
+
blended_grad = grad.mul(1 - beta1).add_(exp_avg, alpha=beta1)
|
| 171 |
+
|
| 172 |
+
# p: p = p - lr * sign(blended_grad)
|
| 173 |
+
p.add_(blended_grad.sign_(), alpha = -emoPulse * emoDrive)
|
| 174 |
+
|
| 175 |
+
# exp_avg = beta2 * exp_avg + (1 - beta2) * grad
|
| 176 |
+
exp_avg.mul_(beta2).add_(grad, alpha = 1 - beta2)
|
| 177 |
+
# --- End Gradient Update Logic ---
|
| 178 |
+
|
| 179 |
+
# 感情機構の発火が収まり"十分に安定"していることを外部伝達できる(自動停止ロジックではない)
|
| 180 |
+
# Early Stop用 scalar 記録(バッファ共通で管理/最大32件保持/動静評価)
|
| 181 |
+
hist = self.state.setdefault('scalar_hist', deque(maxlen=32))
|
| 182 |
+
hist.append(scalar)
|
| 183 |
+
|
| 184 |
+
# Early Stop判断(静けさの合図)
|
| 185 |
+
# 32ステップ分のスカラー値の静かな条件を満たした時"フラグ" should_stop = True になるだけ
|
| 186 |
+
if len(hist) >= 32:
|
| 187 |
+
avg_abs = sum(abs(s) for s in hist) / len(hist)
|
| 188 |
+
mean = sum(hist) / len(hist)
|
| 189 |
+
var = sum((s - mean)**2 for s in hist) / len(hist)
|
| 190 |
+
if avg_abs < 0.05 and var < 0.005:
|
| 191 |
+
self.should_stop = True # 💡 外部からこれを見て判断可
|
| 192 |
+
|
| 193 |
+
# TensorBoardへの記録(step関数の末尾に追加)
|
| 194 |
+
if hasattr(self, 'writer') and self.writer is not None:
|
| 195 |
+
self._step_count = getattr(self, "_step_count", 0) + 1
|
| 196 |
+
self.writer.add_scalar("emoLR/base", emoPulse, self._step_count)
|
| 197 |
+
self.writer.add_scalar("emoLR/Turbo", emoPulse * emoDrive, self._step_count)
|
| 198 |
+
self.writer.add_scalar("emostate/emoDrive", emoDrive, self._step_count)
|
| 199 |
+
self.writer.add_scalar("emostate/scalar", scalar, self._step_count)
|
| 200 |
+
|
| 201 |
+
return
|
| 202 |
+
|
| 203 |
+
"""
|
| 204 |
+
https://github.com/muooon/EmoSens
|
| 205 |
+
Cats was developed with inspiration from Lion, Tiger, and emolynx,
|
| 206 |
+
which we deeply respect for their lightweight and intelligent design.
|
| 207 |
+
Cats also integrates EmoNAVI to enhance its capabilities.
|
| 208 |
+
"""
|
2Gv37_AutoLR/emosens.py
ADDED
|
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch.optim import Optimizer
|
| 3 |
+
import math
|
| 4 |
+
from collections import deque
|
| 5 |
+
|
| 6 |
+
"""
|
| 7 |
+
EmoSens v3.7.0 (260101) shadow-system v3.1 -moment v3.1 emoDrive v3.6 emoPulse v3.7
|
| 8 |
+
EmoNavi v3.6 継承、 emoPulse 機構により完全自動化を目指す(emoScope により微調整可)
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
class EmoSens(Optimizer):
|
| 12 |
+
# クラス定義&初期化
|
| 13 |
+
def __init__(self, params,
|
| 14 |
+
lr=1.0,
|
| 15 |
+
eps=1e-8,
|
| 16 |
+
betas=(0.9, 0.995),
|
| 17 |
+
weight_decay=0.01,
|
| 18 |
+
use_shadow:bool=False,
|
| 19 |
+
writer=None):
|
| 20 |
+
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
|
| 21 |
+
super().__init__(params, defaults)
|
| 22 |
+
self._init_lr = lr
|
| 23 |
+
self.should_stop = False # 停止フラグの初期化
|
| 24 |
+
self.use_shadow = use_shadow # 🔸shadow 使用フラグを保存
|
| 25 |
+
self.writer = writer # 動的学習率や感情スカラー等を渡す
|
| 26 |
+
self.emoScope = 20.0 * lr # 学習速度ではなく「視界の広さ」
|
| 27 |
+
self.noise_est = 0.0
|
| 28 |
+
self.d_est = 0.0
|
| 29 |
+
|
| 30 |
+
# 感情EMA更新(緊張と安静)
|
| 31 |
+
def _update_ema(self, state, loss_val):
|
| 32 |
+
ema = state.setdefault('ema', {})
|
| 33 |
+
ema['short'] = 0.3 * loss_val + 0.7 * ema.get('short', loss_val)
|
| 34 |
+
ema['medium'] = 0.05 * loss_val + 0.95 * ema.get('medium', loss_val)
|
| 35 |
+
ema['long'] = 0.01 * loss_val + 0.99 * ema.get('long', loss_val)
|
| 36 |
+
return ema
|
| 37 |
+
|
| 38 |
+
# 感情スカラー値生成(EMA差分、滑らかな非線形スカラー、tanh(diff) は ±1.0 で有界性)
|
| 39 |
+
# 係数"1":ema差分 のスケール調整処理に活用(感度調節係数)/通常は1(タスクに応じ調整可(非推奨))
|
| 40 |
+
# scale_base:Loss値とema値の乖離を修正(分母 ema(long) 「改善率」共通化/loss種に非依存)
|
| 41 |
+
# 1e-5(デフォルト)/1e-6(感度向上)/1e-4(安定性向上):分母を0にせず安定させる
|
| 42 |
+
# トラウマ的反応や慣れによる鈍化で安定性向上(ema-medium 安定と急変を信頼度で感知)
|
| 43 |
+
def _compute_scalar(self, ema):
|
| 44 |
+
scale_base_l = max(ema['long'], 1e-5)
|
| 45 |
+
scale_base_m = max(ema['medium'], 1e-5)
|
| 46 |
+
diff_l = (ema['long'] - ema['short']) / scale_base_l
|
| 47 |
+
diff_m = (ema['long'] - ema['short']) / scale_base_m
|
| 48 |
+
# longが十分静かなら、常にlongを優先
|
| 49 |
+
if abs(diff_l) < 0.05:
|
| 50 |
+
return math.tanh(diff_l)
|
| 51 |
+
# longが静かでない時のみ、mediumの静けさを条件付きで採用
|
| 52 |
+
if abs(diff_m) * scale_base_m < abs(diff_l) * scale_base_l:
|
| 53 |
+
return math.tanh(1 * diff_m)
|
| 54 |
+
else:
|
| 55 |
+
return math.tanh(1 * diff_l)
|
| 56 |
+
|
| 57 |
+
# アーリーストップ専用(静けさ判定の感情スカラ生成)
|
| 58 |
+
def _early_scalar(self, ema):
|
| 59 |
+
scale_base_l = max(ema['long'], 1e-5)
|
| 60 |
+
diff = (ema['long'] - ema['short']) / scale_base_l
|
| 61 |
+
return math.tanh(1 * diff)
|
| 62 |
+
|
| 63 |
+
# 論文通りの抑制則/急変時は強抑制/悪化時は微減速/平時は無介入で収束を安定させる
|
| 64 |
+
# 区分別けは現状では無意味ですが後々にカスタマイズしやすい形式として整理してあります
|
| 65 |
+
def _decide_coeff(self, scalar): # B <= x <= A: 等も可能
|
| 66 |
+
if abs(scalar) > 0.75:
|
| 67 |
+
return 1.0 - abs(scalar) # 緊急|急制動|tanh 0.97(0.03)
|
| 68 |
+
elif abs(scalar) > 0.50:
|
| 69 |
+
return 1.0 - abs(scalar) # 急変|強抑制|tanh 0.55(0.45)
|
| 70 |
+
elif abs(scalar) > 0.25:
|
| 71 |
+
return 1.0 - abs(scalar) # 悪化|微減速|tanh 0.26(0.74)
|
| 72 |
+
else:
|
| 73 |
+
return 1.0 # 平時|無介入|他(常に 1.0 を返す)
|
| 74 |
+
|
| 75 |
+
# (重要)現在は shadow-effect を参考に得た動的フィルタ効果の近似により use_shadow=False です
|
| 76 |
+
# しかし全機能は shadow なしで全て成立します/コード確認時はこの shadow を考慮外として無視してください
|
| 77 |
+
|
| 78 |
+
# Shadow混合比 3段階構成 タスクに応じ調整可、以下を参考に 開始値・範囲量・変化幅を調整
|
| 79 |
+
# return 開始値 + ((scalar) - 閾値) / 範囲量 * 変化幅 も可能(特殊用途向け)
|
| 80 |
+
def _decide_ratio(self, scalar):
|
| 81 |
+
if not self.use_shadow:
|
| 82 |
+
return 0.0 # 🔸use_shadow = False のとき常に比率を 0 にする
|
| 83 |
+
if abs(scalar) > 0.625:
|
| 84 |
+
return 1.0 - abs(scalar) # 急変|強抑制|tanh 0.73(0.27)
|
| 85 |
+
else:
|
| 86 |
+
return 0.0 # return<0 の場合は leap 専用(書き戻しはしないが履歴更新のみ)
|
| 87 |
+
|
| 88 |
+
# 損失取得(損失値 loss_val を数値化、感情判定に使用、存在しないパラメータ(更新不要)はスキップ)
|
| 89 |
+
@torch.no_grad()
|
| 90 |
+
def step(self, closure=None):
|
| 91 |
+
loss = closure() if closure is not None else None
|
| 92 |
+
loss_val = loss.item() if loss is not None else 0.0
|
| 93 |
+
|
| 94 |
+
# EMA更新・スカラー生成(EMA差分からスカラーを生成しスパイク比率等を決定)
|
| 95 |
+
ema = self._update_ema(self.state, loss_val)
|
| 96 |
+
early_scalar = self._early_scalar(ema)
|
| 97 |
+
scalar = self._compute_scalar(ema)
|
| 98 |
+
coeff = self._decide_coeff(scalar)
|
| 99 |
+
ratio = self._decide_ratio(scalar)
|
| 100 |
+
trust = math.copysign((1.0 - abs(scalar)), scalar)
|
| 101 |
+
emoDpt = 8.0 * abs(trust)
|
| 102 |
+
|
| 103 |
+
for group in self.param_groups:
|
| 104 |
+
for p in group['params']:
|
| 105 |
+
if p.grad is None:
|
| 106 |
+
continue
|
| 107 |
+
|
| 108 |
+
grad = p.grad
|
| 109 |
+
state = self.state[p]
|
| 110 |
+
|
| 111 |
+
# 動的学習率補正により shadow 形成を信頼度で調整(trustは正値化(負にならない))
|
| 112 |
+
# shadow:必要時のみ(スパイクp部分に現在値を最大10%追従させる動的履歴更新)
|
| 113 |
+
# 混合比率:スカラーが閾値を超える場合にのみ計算される(信頼できる感情信号かどうかの選別)
|
| 114 |
+
# 急変時は感情機構による shadow 混合で強く抑制する(急制動による安定性の確保)
|
| 115 |
+
# 新 shadow-system は動的学習率と信頼度で協調し選択的スパース性も発揮する
|
| 116 |
+
if self.use_shadow :
|
| 117 |
+
if 'shadow' not in state: # 🔸shadow = False (デフォルト)
|
| 118 |
+
state['shadow'] = p.clone()
|
| 119 |
+
if ratio > 0: # 書き戻しと履歴更新(急変時の強い抑制と弱めの履歴更新)
|
| 120 |
+
p.mul_(1-ratio).add_(state['shadow'], alpha=abs(trust))
|
| 121 |
+
else: # 書き戻しせず履歴更新のみ:10%×trust
|
| 122 |
+
leap_ratio = 0.1 * abs(trust)
|
| 123 |
+
state['shadow'].lerp_(p, leap_ratio)
|
| 124 |
+
|
| 125 |
+
# emoDrive 作動域 (Turbo & Trust LR system)
|
| 126 |
+
if 0.25 < abs(scalar) < 0.5:
|
| 127 |
+
emoDrive = emoDpt * (1.0 + 0.1 * trust) # 加速/減速ゾーン補正
|
| 128 |
+
elif abs(scalar) > 0.75:
|
| 129 |
+
emoDrive = coeff # 緊急|急制動|tanh 0.97(0.03)
|
| 130 |
+
else:
|
| 131 |
+
emoDrive = 1.0 # 無介入ゾーン
|
| 132 |
+
|
| 133 |
+
# emoPulse (loss 時系列から D / noise を推定し完全自動LRを生成)
|
| 134 |
+
# noise_estimate: loss の揺れ(不安定性)のEMA
|
| 135 |
+
self.noise_est = 0.8 * self.noise_est + 0.2 * abs(trust)
|
| 136 |
+
noise = max(self.noise_est, 1e-10) # 下限 1e-10
|
| 137 |
+
# d_estimate: loss の改善傾向の EMA(距離 D の代理)
|
| 138 |
+
self.d_est = 0.9 * self.d_est + 0.1 * max(trust, 0.0) # 非負にする
|
| 139 |
+
# 上限 妙に遅い/早すぎる、 emoScorpe:5.0~20.0くらいがいい/基準値20.0
|
| 140 |
+
d = min(self.d_est, self.emoScope)
|
| 141 |
+
|
| 142 |
+
# --- Start Gradient Update Logic ---
|
| 143 |
+
# 1次・2次モーメントを使った勾配補正(decoupled weight decay 構造に近い)
|
| 144 |
+
exp_avg = state.setdefault('exp_avg', torch.zeros_like(p))
|
| 145 |
+
exp_avg_sq = state.setdefault('exp_avg_sq', torch.zeros_like(p))
|
| 146 |
+
beta1, beta2 = group['betas']
|
| 147 |
+
|
| 148 |
+
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
|
| 149 |
+
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
|
| 150 |
+
denom = torch.sign(exp_avg_sq.sqrt().add_(group['eps']))
|
| 151 |
+
|
| 152 |
+
#step_size = group['lr']
|
| 153 |
+
# 完全自動LR / 安全クリップ 0.3〜0.5 程度でもいい(emoPulse = step_size)
|
| 154 |
+
direction = torch.sign(exp_avg)
|
| 155 |
+
emoPulse = min((d / noise), 1e-3)
|
| 156 |
+
#step_size = min(step_size, 1.0)
|
| 157 |
+
|
| 158 |
+
if group['weight_decay']:
|
| 159 |
+
p.add_(p, alpha=-group['weight_decay'] * emoPulse)
|
| 160 |
+
p.addcdiv_(direction, denom, value=-emoPulse * emoDrive)
|
| 161 |
+
# --- End Gradient Update Logic ---
|
| 162 |
+
|
| 163 |
+
# 感情機構の発火が収まり"十分に安定"していることを外部伝達できる(自動停止ロジックではない)
|
| 164 |
+
# Early Stop用 scalar 記録(バッファ共通で管理/最大32件保持/動静評価)
|
| 165 |
+
hist = self.state.setdefault('scalar_hist', deque(maxlen=32))
|
| 166 |
+
hist.append(early_scalar)
|
| 167 |
+
|
| 168 |
+
# Early Stop判断(静けさの合図)
|
| 169 |
+
# 32ステップ分のスカラー値の静かな条件を満たした時"フラグ" should_stop = True になるだけ
|
| 170 |
+
if len(hist) >= 32:
|
| 171 |
+
avg_abs = sum(abs(s) for s in hist) / len(hist)
|
| 172 |
+
mean = sum(hist) / len(hist)
|
| 173 |
+
var = sum((s - mean)**2 for s in hist) / len(hist)
|
| 174 |
+
if avg_abs < 0.05 and var < 0.005:
|
| 175 |
+
self.should_stop = True # 💡 外部からこれを見て判断可
|
| 176 |
+
|
| 177 |
+
# TensorBoardへの記録(step関数の末尾に追加)
|
| 178 |
+
if hasattr(self, 'writer') and self.writer is not None:
|
| 179 |
+
self._step_count = getattr(self, "_step_count", 0) + 1
|
| 180 |
+
self.writer.add_scalar("emoLR/base", emoPulse, self._step_count)
|
| 181 |
+
self.writer.add_scalar("emoLR/Turbo", emoPulse * emoDrive, self._step_count)
|
| 182 |
+
self.writer.add_scalar("emostate/emoDrive", emoDrive, self._step_count)
|
| 183 |
+
self.writer.add_scalar("emostate/scalar", scalar, self._step_count)
|
| 184 |
+
|
| 185 |
+
return
|
| 186 |
+
|
| 187 |
+
"""
|
| 188 |
+
https://github.com/muooon/EmoSens
|
| 189 |
+
An emotion-driven optimizer that feels loss and navigates accordingly.
|
| 190 |
+
Don't think. Feel. Don't stop. Keep running. Believe in what's beyond.
|
| 191 |
+
"""
|