-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathViT_demostration.py
More file actions
224 lines (184 loc) · 6.74 KB
/
ViT_demostration.py
File metadata and controls
224 lines (184 loc) · 6.74 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 2 19:07:59 2025
@author: jindongfeng
"""
import random
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import glob
# --- 1. Configuration & Paths ---
TEST_DATA_PATH = "/Users/jindongfeng/Desktop/BU/EC523/test_data"
CHECKPOINT_DIR = "/Users/jindongfeng/Desktop/BU/EC523/checkpoints/"
MODEL_DIR = "/Users/jindongfeng/Desktop/BU/EC523/"
# Target Specific Epoch
TARGET_EPOCH = "214"
# Hyperparameters
IMG_SIZE = 224
BATCH_SIZE = 32
PATCH_SIZE = 16
EMBED_DIM = 256
DEPTH = 4
NUM_HEADS = 8
MLP_DIM = 512
MEAN = [0.5, 0.5, 0.5]
STD = [0.5, 0.5, 0.5]
# --- 2. Device Setup ---
if torch.backends.mps.is_available():
device = torch.device("mps")
print("Using Device: MPS (Apple Silicon)")
elif torch.cuda.is_available():
device = torch.device("cuda")
print("Using Device: CUDA")
else:
device = torch.device("cpu")
print("Using Device: CPU")
# --- 3. Dynamic Model Import ---
sys.path.append(MODEL_DIR)
try:
from vit_model import ViT_S16
print("Successfully imported ViT_S16.")
except ImportError:
print(f"Error: Could not import vit_model.py from {MODEL_DIR}")
sys.exit(1)
# --- 4. Data Preparation ---
data_transform = transforms.Compose([
transforms.Resize((IMG_SIZE, IMG_SIZE)),
transforms.ToTensor(),
transforms.Normalize(mean=MEAN, std=STD)
])
if not os.path.exists(TEST_DATA_PATH):
print(f"Error: Dataset path not found at {TEST_DATA_PATH}")
sys.exit(1)
def is_valid_file(path):
filename = os.path.basename(path)
if filename.startswith("._") or filename.startswith("."):
return False
return filename.lower().endswith(('.jpg', '.jpeg', '.png', '.bmp', '.tiff'))
print("Loading Test Dataset...")
val_dataset = datasets.ImageFolder(
root=TEST_DATA_PATH,
transform=data_transform,
is_valid_file=is_valid_file
)
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=False)
class_names = val_dataset.classes
num_classes = len(class_names)
print(f"Found {num_classes} classes.")
# --- 5. Helper Function: Evaluate ---
def evaluate_model(model, loader, device):
"""Runs a pass over the data and returns accuracy."""
model.eval()
correct = 0
total = 0
with torch.no_grad():
for images, labels in loader:
images, labels = images.to(device), labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
if total == 0: return 0.0
return 100 * correct / total
# --- 6. Load Specific Checkpoint (214) ---
print(f"\nSearching for checkpoint 214 in {CHECKPOINT_DIR}...")
checkpoint_files = glob.glob(os.path.join(CHECKPOINT_DIR, f"*{TARGET_EPOCH}*.pth"))
if not checkpoint_files:
print(f"Error: No checkpoint found containing '{TARGET_EPOCH}'")
sys.exit(1)
# Use the first match
target_cp_path = checkpoint_files[0]
print(f"Loading Checkpoint: {os.path.basename(target_cp_path)}")
# Initialize Model
model = ViT_S16(
img_size=IMG_SIZE, patch_size=PATCH_SIZE, in_chans=3, num_classes=num_classes,
embed_dim=EMBED_DIM, depth=DEPTH, num_heads=NUM_HEADS, mlp_dim=MLP_DIM, dropout=0.1
)
model.to(device)
# Load Weights
try:
checkpoint = torch.load(target_cp_path, map_location=device)
if isinstance(checkpoint, dict) and 'model_state_dict' in checkpoint:
model.load_state_dict(checkpoint['model_state_dict'])
elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
model.load_state_dict(checkpoint['state_dict'])
else:
model.load_state_dict(checkpoint)
print("Weights loaded successfully.")
except RuntimeError as e:
print(f"Error loading weights: {e}")
sys.exit(1)
# --- 7. Evaluate and Print Results ---
acc = evaluate_model(model, val_loader, device)
print("\n" + "="*50)
print(f"TARGET CHECKPOINT: {os.path.basename(target_cp_path)}")
print(f"ACCURACY ON TEST DATA: {acc:.2f}%")
print("="*50 + "\n")
# --- 8. Visualization ---
print("Generating visualization...")
model.eval()
correct_pool = []
wrong_pool = []
pool_limit = 50 # Collect up to 50 of each type before stopping scan
with torch.no_grad():
for images, labels in val_loader:
images, labels = images.to(device), labels.to(device)
outputs = model(images)
_, preds = torch.max(outputs, 1)
for i in range(len(labels)):
img_tensor = images[i].cpu()
true_idx = labels[i].item()
pred_idx = preds[i].item()
sample = (img_tensor, true_idx, pred_idx)
if true_idx != pred_idx:
if len(wrong_pool) < pool_limit:
wrong_pool.append(sample)
else:
if len(correct_pool) < pool_limit:
correct_pool.append(sample)
# Stop scanning if we have plenty of options for both
if len(wrong_pool) >= pool_limit and len(correct_pool) >= pool_limit:
break
print(f"Pool Collected: {len(correct_pool)} correct samples, {len(wrong_pool)} wrong samples.")
# Construct the plot batch (aim for 4 images total)
plot_list = []
# 1. Randomly pick at least one wrong sample (if available)
if wrong_pool:
plot_list.append(random.choice(wrong_pool))
# 2. Randomly pick at least one correct sample (if available)
if correct_pool:
plot_list.append(random.choice(correct_pool))
# 3. Fill the remaining 2 slots randomly from whatever is left
remaining_pool = correct_pool + wrong_pool
# Remove the ones we already picked (to avoid duplicates, though unlikely)
# A simple way to fill the rest is just random.choice from the combined lists
while len(plot_list) < 4 and remaining_pool:
plot_list.append(random.choice(remaining_pool))
# Plotting Function
def imshow(img, ax):
img = img.permute(1, 2, 0).numpy()
img = img * STD + MEAN
img = np.clip(img, 0, 1)
ax.imshow(img)
ax.axis('off')
if not plot_list:
print("Could not collect any samples. Is dataset empty?")
else:
fig, axes = plt.subplots(1, len(plot_list), figsize=(12, 5))
if len(plot_list) == 1: axes = [axes] # Handle single image case
fig.suptitle(f"Predictions from Epoch {TARGET_EPOCH} (Random Selection)", fontsize=14)
for i, (img, true_idx, pred_idx) in enumerate(plot_list):
true_label = class_names[true_idx]
pred_label = class_names[pred_idx]
color = 'green' if true_idx == pred_idx else 'red'
imshow(img, axes[i])
axes[i].set_title(f"True: {true_label}\nPred: {pred_label}", color=color, fontweight='bold')
plt.tight_layout()
plt.show()