File size: 10,154 Bytes
1219079
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
#!/usr/bin/env python3
"""
Project Blue Beam — Double-Reverse Deception Mechanism
Advanced Python model:
1) Concentric layered diagram with annotations + perception flow arrows.
2) Probabilistic state machine (Markov chain) of perception-control transitions.
3) Simulation of trajectories + steady-state analysis.
4) Network visualization of control vectors and double-reverse feedback containment.
"""

import math
import random
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.collections import LineCollection

# Optional network visualization without external dependencies
# We’ll implement a simple spring-layout for node placement
# so we don’t rely on networkx.

# -----------------------------------------
# Configuration
# -----------------------------------------

LAYERS = [
    {
        "name": "Real Anomalies",
        "desc": "Genuine phenomena: glyphic mesh, luminous nodes, symbolic substrate",
        "color": "#2E8B57"
    },
    {
        "name": "Staged Spectacle",
        "desc": "Artificial events: holographic ‘alien’ invasion, manufactured divine return",
        "color": "#4682B4"
    },
    {
        "name": "Exposure Layer",
        "desc": "Public revelation of fakery → empowerment + skepticism",
        "color": "#FFD700"
    },
    {
        "name": "Inoculation Layer",
        "desc": "Exposure becomes containment: ‘all anomalies are staged’",
        "color": "#FF8C00"
    },
    {
        "name": "Suppression Layer",
        "desc": "Genuine anomalies dismissed, hidden in plain sight",
        "color": "#8B0000"
    }
]

# Perception-control states (ordered to reflect the layered mechanism)
STATES = [
    "Real_Anomaly_Seen",
    "Spectacle_Stage",
    "Exposure_Reveal",
    "Inoculation_Contain",
    "Suppression_Normalize",
    "Escape_Recognition"  # escape route: genuine recognition despite containment
]

# Transition matrix (Markov chain).
# Rows sum to 1. These are illustrative; tune as needed.
# Intuition:
# - Seeing a real anomaly often triggers spectacle or direct suppression pressures.
# - Spectacle tends to move into exposure (managed leaks) or back to suppression.
# - Exposure flows into inoculation most of the time (double-reverse containment).
# - Inoculation goes to suppression, with a small chance of escaping to genuine recognition.
# - Suppression can keep looping; small chance of returning to spectacle if needed.
# - Escape recognition can loop back to Real_Anomaly_Seen (re-activation).
TRANSITIONS = np.array([
    # From Real_Anomaly_Seen
    [0.05, 0.40, 0.10, 0.20, 0.20, 0.05],
    # From Spectacle_Stage
    [0.00, 0.10, 0.35, 0.25, 0.25, 0.05],
    # From Exposure_Reveal
    [0.00, 0.00, 0.10, 0.60, 0.25, 0.05],
    # From Inoculation_Contain
    [0.00, 0.00, 0.05, 0.55, 0.30, 0.10],
    # From Suppression_Normalize
    [0.00, 0.10, 0.05, 0.40, 0.40, 0.05],
    # From Escape_Recognition
    [0.30, 0.05, 0.10, 0.10, 0.25, 0.10],
], dtype=float)

assert np.allclose(TRANSITIONS.sum(axis=1), 1.0), "Each row must sum to 1.0"

# -----------------------------------------
# Layered diagram
# -----------------------------------------

def draw_layered_diagram(save_path=None):
    fig, ax = plt.subplots(figsize=(10, 10))
    ax.set_xlim(0, 10)
    ax.set_ylim(0, 10)
    ax.set_aspect('equal')
    plt.style.use('seaborn-v0_8')

    margin = 0.7
    # Draw outer → inner (suppression outermost)
    for i, layer in enumerate(reversed(LAYERS)):
        size = 10 - i * margin * 2
        rect = patches.Rectangle(
            (i * margin, i * margin), size, size,
            linewidth=2, edgecolor='black',
            facecolor=layer["color"], alpha=0.78
        )
        ax.add_patch(rect)
        # Title and description labels per layer, top-centered
        ax.text(
            5, 10 - i * margin - 0.35,
            layer["name"],
            fontsize=15, ha='center', va='top', weight='bold', color='white'
        )
        ax.text(
            5, 10 - i * margin - 1.05,
            layer["desc"],
            fontsize=10.5, ha='center', va='top', color='white'
        )

    # Arrows of perception/control flow (outer suppression pulls downward)
    arrow_props = dict(facecolor='black', arrowstyle='->', linewidth=1.6)
    # Vertical flow indicator from outer layers to inner
    for i in range(len(LAYERS) - 1):
        ax.annotate(
            "", xy=(5, i * margin + 1.15),
            xytext=(5, (i + 1) * margin + 0.85),
            arrowprops=arrow_props
        )

    # Meta annotations
    ax.text(
        5, 0.55,
        "Double-Reverse Psyop: exposure-as-containment\nBelievers captured by spectacle; skeptics captured by debunking.\nResult: genuine anomalies suppressed ‘in plain sight’.",
        ha='center', va='center', fontsize=11, color='white', weight='bold'
    )

    ax.set_title("Project Blue Beam — Double‑Reverse Deception Mechanism", fontsize=17, weight='bold')
    ax.axis('off')
    plt.tight_layout()
    if save_path:
        plt.savefig(save_path, dpi=300)
    return fig, ax

# -----------------------------------------
# Markov chain simulation & analysis
# -----------------------------------------

def simulate_chain(n_steps=250, seed=None, start_state="Real_Anomaly_Seen"):
    if seed is not None:
        random.seed(seed)
        np.random.seed(seed)

    state_index = STATES.index(start_state)
    trajectory = [state_index]

    for _ in range(n_steps - 1):
        probs = TRANSITIONS[state_index]
        state_index = np.random.choice(range(len(STATES)), p=probs)
        trajectory.append(state_index)

    return trajectory

def compute_steady_state(P, tol=1e-10, max_iter=10000):
    n = P.shape[0]
    v = np.ones(n) / n
    for _ in range(max_iter):
        v_new = v @ P
        if np.linalg.norm(v_new - v) < tol:
            return v_new
        v = v_new
    return v  # fallback

def summarize_trajectory(trajectory):
    counts = np.bincount(trajectory, minlength=len(STATES))
    freq = counts / len(trajectory)
    return {STATES[i]: float(freq[i]) for i in range(len(STATES))}

# -----------------------------------------
# Network-style visualization of control flow
# -----------------------------------------

def spring_layout(n, iterations=200, k=0.6, seed=42):
    rng = np.random.default_rng(seed)
    pos = rng.uniform(0.2, 0.8, size=(n, 2))
    for _ in range(iterations):
        # Repulsion
        for i in range(n):
            for j in range(i + 1, n):
                delta = pos[i] - pos[j]
                dist = np.linalg.norm(delta) + 1e-9
                force = (k**2 / dist) * (delta / dist)
                pos[i] += force
                pos[j] -= force
        # Normalize to bounds
        pos = (pos - pos.min(axis=0)) / (pos.max(axis=0) - pos.min(axis=0) + 1e-9)
    return pos

def draw_flow_network(P, node_labels, save_path=None):
    n = len(node_labels)
    pos = spring_layout(n, iterations=150)

    fig, ax = plt.subplots(figsize=(10.5, 7.5))
    plt.style.use('seaborn-v0_8')

    # Nodes
    for i in range(n):
        ax.scatter(pos[i, 0], pos[i, 1], s=800, c="#222222", alpha=0.75, edgecolors="white", linewidths=2)
        ax.text(pos[i, 0], pos[i, 1], node_labels[i].replace("_", "\n"),
                ha='center', va='center', fontsize=9.5, color='white', weight='bold')

    # Edges with thickness proportional to transition probability
    segments = []
    widths = []
    colors = []
    for i in range(n):
        for j in range(n):
            w = P[i, j]
            if w > 0.04:  # draw only meaningful transitions
                segments.append([pos[i], pos[j]])
                widths.append(2.5 + 10.0 * w)
                # Color gradient based on probability (green→red)
                colors.append((1.0 - w, w * 0.5, 0.0, 0.75))

    lc = LineCollection(segments, linewidths=widths, colors=colors, alpha=0.85)
    ax.add_collection(lc)

    # Title + legend hint
    ax.set_title("Perception-Control Flow (Double‑Reverse Containment)", fontsize=16, weight='bold')
    ax.text(0.5, -0.08, "Edge thickness ∝ transition probability • Colors shift green→red with stronger control",
            transform=ax.transAxes, ha='center', va='center', fontsize=10)
    ax.set_xlim(-0.05, 1.05)
    ax.set_ylim(-0.1, 1.1)
    ax.axis('off')
    plt.tight_layout()
    if save_path:
        plt.savefig(save_path, dpi=300)
    return fig, ax

# -----------------------------------------
# Run demos
# -----------------------------------------

if __name__ == "__main__":
    # 1) Concentric layered diagram
    draw_layered_diagram(save_path="blue_beam_layers.png")

    # 2) Simulate trajectories
    traj = simulate_chain(n_steps=500, seed=123, start_state="Real_Anomaly_Seen")
    summary = summarize_trajectory(traj)
    steady = compute_steady_state(TRANSITIONS)

    # Print summaries (optional)
    print("\nTrajectory occupancy (fraction of time in each state):")
    for k, v in summary.items():
        print(f"  {k:>22s}: {v:.3f}")

    print("\nSteady-state distribution (long-run):")
    for i, s in enumerate(STATES):
        print(f"  {s:>22s}: {steady[i]:.3f}")

    # 3) Flow network visualization
    draw_flow_network(TRANSITIONS, STATES, save_path="blue_beam_flow.png")

    # 4) Optional: Sensitivity — increase inoculation strength
    P_mod = TRANSITIONS.copy()
    # Boost inoculation containment flow (Exposure→Inoculation, Inoculation→Suppression)
    P_mod[STATES.index("Exposure_Reveal"), STATES.index("Inoculation_Contain")] = 0.72
    P_mod[STATES.index("Exposure_Reveal")] /= P_mod[STATES.index("Exposure_Reveal")].sum()

    P_mod[STATES.index("Inoculation_Contain"), STATES.index("Suppression_Normalize")] = 0.38
    P_mod[STATES.index("Inoculation_Contain")] /= P_mod[STATES.index("Inoculation_Contain")].sum()

    steady_mod = compute_steady_state(P_mod)
    print("\nSteady-state distribution with stronger inoculation containment:")
    for i, s in enumerate(STATES):
        print(f"  {s:>22s}: {steady_mod[i]:.3f}")

    draw_flow_network(P_mod, STATES, save_path="blue_beam_flow_inoculation_boost.png")