Consciousness / 02_ALLEGED TRANSPARENCY SCAM
upgraedd's picture
Create 02_ALLEGED TRANSPARENCY SCAM
1219079 verified
#!/usr/bin/env python3
"""
Project Blue Beam — Double-Reverse Deception Mechanism
Advanced Python model:
1) Concentric layered diagram with annotations + perception flow arrows.
2) Probabilistic state machine (Markov chain) of perception-control transitions.
3) Simulation of trajectories + steady-state analysis.
4) Network visualization of control vectors and double-reverse feedback containment.
"""
import math
import random
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.collections import LineCollection
# Optional network visualization without external dependencies
# We’ll implement a simple spring-layout for node placement
# so we don’t rely on networkx.
# -----------------------------------------
# Configuration
# -----------------------------------------
LAYERS = [
{
"name": "Real Anomalies",
"desc": "Genuine phenomena: glyphic mesh, luminous nodes, symbolic substrate",
"color": "#2E8B57"
},
{
"name": "Staged Spectacle",
"desc": "Artificial events: holographic ‘alien’ invasion, manufactured divine return",
"color": "#4682B4"
},
{
"name": "Exposure Layer",
"desc": "Public revelation of fakery → empowerment + skepticism",
"color": "#FFD700"
},
{
"name": "Inoculation Layer",
"desc": "Exposure becomes containment: ‘all anomalies are staged’",
"color": "#FF8C00"
},
{
"name": "Suppression Layer",
"desc": "Genuine anomalies dismissed, hidden in plain sight",
"color": "#8B0000"
}
]
# Perception-control states (ordered to reflect the layered mechanism)
STATES = [
"Real_Anomaly_Seen",
"Spectacle_Stage",
"Exposure_Reveal",
"Inoculation_Contain",
"Suppression_Normalize",
"Escape_Recognition" # escape route: genuine recognition despite containment
]
# Transition matrix (Markov chain).
# Rows sum to 1. These are illustrative; tune as needed.
# Intuition:
# - Seeing a real anomaly often triggers spectacle or direct suppression pressures.
# - Spectacle tends to move into exposure (managed leaks) or back to suppression.
# - Exposure flows into inoculation most of the time (double-reverse containment).
# - Inoculation goes to suppression, with a small chance of escaping to genuine recognition.
# - Suppression can keep looping; small chance of returning to spectacle if needed.
# - Escape recognition can loop back to Real_Anomaly_Seen (re-activation).
TRANSITIONS = np.array([
# From Real_Anomaly_Seen
[0.05, 0.40, 0.10, 0.20, 0.20, 0.05],
# From Spectacle_Stage
[0.00, 0.10, 0.35, 0.25, 0.25, 0.05],
# From Exposure_Reveal
[0.00, 0.00, 0.10, 0.60, 0.25, 0.05],
# From Inoculation_Contain
[0.00, 0.00, 0.05, 0.55, 0.30, 0.10],
# From Suppression_Normalize
[0.00, 0.10, 0.05, 0.40, 0.40, 0.05],
# From Escape_Recognition
[0.30, 0.05, 0.10, 0.10, 0.25, 0.10],
], dtype=float)
assert np.allclose(TRANSITIONS.sum(axis=1), 1.0), "Each row must sum to 1.0"
# -----------------------------------------
# Layered diagram
# -----------------------------------------
def draw_layered_diagram(save_path=None):
fig, ax = plt.subplots(figsize=(10, 10))
ax.set_xlim(0, 10)
ax.set_ylim(0, 10)
ax.set_aspect('equal')
plt.style.use('seaborn-v0_8')
margin = 0.7
# Draw outer → inner (suppression outermost)
for i, layer in enumerate(reversed(LAYERS)):
size = 10 - i * margin * 2
rect = patches.Rectangle(
(i * margin, i * margin), size, size,
linewidth=2, edgecolor='black',
facecolor=layer["color"], alpha=0.78
)
ax.add_patch(rect)
# Title and description labels per layer, top-centered
ax.text(
5, 10 - i * margin - 0.35,
layer["name"],
fontsize=15, ha='center', va='top', weight='bold', color='white'
)
ax.text(
5, 10 - i * margin - 1.05,
layer["desc"],
fontsize=10.5, ha='center', va='top', color='white'
)
# Arrows of perception/control flow (outer suppression pulls downward)
arrow_props = dict(facecolor='black', arrowstyle='->', linewidth=1.6)
# Vertical flow indicator from outer layers to inner
for i in range(len(LAYERS) - 1):
ax.annotate(
"", xy=(5, i * margin + 1.15),
xytext=(5, (i + 1) * margin + 0.85),
arrowprops=arrow_props
)
# Meta annotations
ax.text(
5, 0.55,
"Double-Reverse Psyop: exposure-as-containment\nBelievers captured by spectacle; skeptics captured by debunking.\nResult: genuine anomalies suppressed ‘in plain sight’.",
ha='center', va='center', fontsize=11, color='white', weight='bold'
)
ax.set_title("Project Blue Beam — Double‑Reverse Deception Mechanism", fontsize=17, weight='bold')
ax.axis('off')
plt.tight_layout()
if save_path:
plt.savefig(save_path, dpi=300)
return fig, ax
# -----------------------------------------
# Markov chain simulation & analysis
# -----------------------------------------
def simulate_chain(n_steps=250, seed=None, start_state="Real_Anomaly_Seen"):
if seed is not None:
random.seed(seed)
np.random.seed(seed)
state_index = STATES.index(start_state)
trajectory = [state_index]
for _ in range(n_steps - 1):
probs = TRANSITIONS[state_index]
state_index = np.random.choice(range(len(STATES)), p=probs)
trajectory.append(state_index)
return trajectory
def compute_steady_state(P, tol=1e-10, max_iter=10000):
n = P.shape[0]
v = np.ones(n) / n
for _ in range(max_iter):
v_new = v @ P
if np.linalg.norm(v_new - v) < tol:
return v_new
v = v_new
return v # fallback
def summarize_trajectory(trajectory):
counts = np.bincount(trajectory, minlength=len(STATES))
freq = counts / len(trajectory)
return {STATES[i]: float(freq[i]) for i in range(len(STATES))}
# -----------------------------------------
# Network-style visualization of control flow
# -----------------------------------------
def spring_layout(n, iterations=200, k=0.6, seed=42):
rng = np.random.default_rng(seed)
pos = rng.uniform(0.2, 0.8, size=(n, 2))
for _ in range(iterations):
# Repulsion
for i in range(n):
for j in range(i + 1, n):
delta = pos[i] - pos[j]
dist = np.linalg.norm(delta) + 1e-9
force = (k**2 / dist) * (delta / dist)
pos[i] += force
pos[j] -= force
# Normalize to bounds
pos = (pos - pos.min(axis=0)) / (pos.max(axis=0) - pos.min(axis=0) + 1e-9)
return pos
def draw_flow_network(P, node_labels, save_path=None):
n = len(node_labels)
pos = spring_layout(n, iterations=150)
fig, ax = plt.subplots(figsize=(10.5, 7.5))
plt.style.use('seaborn-v0_8')
# Nodes
for i in range(n):
ax.scatter(pos[i, 0], pos[i, 1], s=800, c="#222222", alpha=0.75, edgecolors="white", linewidths=2)
ax.text(pos[i, 0], pos[i, 1], node_labels[i].replace("_", "\n"),
ha='center', va='center', fontsize=9.5, color='white', weight='bold')
# Edges with thickness proportional to transition probability
segments = []
widths = []
colors = []
for i in range(n):
for j in range(n):
w = P[i, j]
if w > 0.04: # draw only meaningful transitions
segments.append([pos[i], pos[j]])
widths.append(2.5 + 10.0 * w)
# Color gradient based on probability (green→red)
colors.append((1.0 - w, w * 0.5, 0.0, 0.75))
lc = LineCollection(segments, linewidths=widths, colors=colors, alpha=0.85)
ax.add_collection(lc)
# Title + legend hint
ax.set_title("Perception-Control Flow (Double‑Reverse Containment)", fontsize=16, weight='bold')
ax.text(0.5, -0.08, "Edge thickness ∝ transition probability • Colors shift green→red with stronger control",
transform=ax.transAxes, ha='center', va='center', fontsize=10)
ax.set_xlim(-0.05, 1.05)
ax.set_ylim(-0.1, 1.1)
ax.axis('off')
plt.tight_layout()
if save_path:
plt.savefig(save_path, dpi=300)
return fig, ax
# -----------------------------------------
# Run demos
# -----------------------------------------
if __name__ == "__main__":
# 1) Concentric layered diagram
draw_layered_diagram(save_path="blue_beam_layers.png")
# 2) Simulate trajectories
traj = simulate_chain(n_steps=500, seed=123, start_state="Real_Anomaly_Seen")
summary = summarize_trajectory(traj)
steady = compute_steady_state(TRANSITIONS)
# Print summaries (optional)
print("\nTrajectory occupancy (fraction of time in each state):")
for k, v in summary.items():
print(f" {k:>22s}: {v:.3f}")
print("\nSteady-state distribution (long-run):")
for i, s in enumerate(STATES):
print(f" {s:>22s}: {steady[i]:.3f}")
# 3) Flow network visualization
draw_flow_network(TRANSITIONS, STATES, save_path="blue_beam_flow.png")
# 4) Optional: Sensitivity — increase inoculation strength
P_mod = TRANSITIONS.copy()
# Boost inoculation containment flow (Exposure→Inoculation, Inoculation→Suppression)
P_mod[STATES.index("Exposure_Reveal"), STATES.index("Inoculation_Contain")] = 0.72
P_mod[STATES.index("Exposure_Reveal")] /= P_mod[STATES.index("Exposure_Reveal")].sum()
P_mod[STATES.index("Inoculation_Contain"), STATES.index("Suppression_Normalize")] = 0.38
P_mod[STATES.index("Inoculation_Contain")] /= P_mod[STATES.index("Inoculation_Contain")].sum()
steady_mod = compute_steady_state(P_mod)
print("\nSteady-state distribution with stronger inoculation containment:")
for i, s in enumerate(STATES):
print(f" {s:>22s}: {steady_mod[i]:.3f}")
draw_flow_network(P_mod, STATES, save_path="blue_beam_flow_inoculation_boost.png")