Wuhuwill commited on
Commit
ad97ed5
·
verified ·
1 Parent(s): 72c68e4

Add Analysis configuration parameters

Browse files
Files changed (1) hide show
  1. coupling_analysis_config.json +33 -0
coupling_analysis_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_config": {
3
+ "model_name": "meta-llama/Llama-2-7b-hf",
4
+ "target_layers": [
5
+ "model.layers.28",
6
+ "model.layers.29",
7
+ "model.layers.30",
8
+ "model.layers.31"
9
+ ],
10
+ "target_component": "mlp.down_proj",
11
+ "layer_selection_reason": "Last 4 layers chosen for semantic richness and memory optimization"
12
+ },
13
+ "coupling_analysis": {
14
+ "method": "gradient_cosine_similarity",
15
+ "gradient_computation": "∇_θ log P(answer|question)",
16
+ "normalization": "L2 normalization",
17
+ "high_coupling_threshold": 0.4,
18
+ "batch_size": 2000,
19
+ "memory_optimization": true
20
+ },
21
+ "dataset_processing": {
22
+ "source_dataset": "hotpotqa",
23
+ "total_samples": 97852,
24
+ "format": "cloze_style_questions",
25
+ "question_template": "Given the context: {context}, the answer to '{question}' is [MASK]."
26
+ },
27
+ "hardware_specs": {
28
+ "gpu": "NVIDIA A40",
29
+ "vram": "46GB",
30
+ "gpu_memory_allocated": "~21GB during analysis",
31
+ "gpu_memory_reserved": "~43GB during analysis"
32
+ }
33
+ }