Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
63 commits
Select commit Hold shift + click to select a range
fe94a07
Added recognition branch for tasks
shakes76 Sep 21, 2025
71d617b
Initial setup for Project 7 - 3D Improved UNet (Abhya)
abhyagarg22 Oct 28, 2025
da04024
Added HipMRIDataset loader for 3D MRI volumes
abhyagarg22 Oct 28, 2025
6290210
Implemented Improved 3D UNet model for 3D MRI segmentation
abhyagarg22 Oct 28, 2025
a62078f
Added and tested training script for Improved 3D UNet
abhyagarg22 Oct 28, 2025
5062a57
Added and tested predict.py for Improved 3D UNet inference
abhyagarg22 Oct 28, 2025
38e9dc6
Modified files to implement Improved 3D U-Net for Prostate MRI segmen…
abhyagarg22 Oct 30, 2025
d890c75
Removed generated artifacts and compiled files from repository
abhyagarg22 Oct 30, 2025
b3e4d60
Update predict.py
abhyagarg22 Oct 30, 2025
d532752
Updated DATA_DIR to use real dataset folder
abhyagarg22 Oct 30, 2025
96b897c
Updated dataset path to correct location for training on Rangpur cluster
abhyagarg22 Oct 30, 2025
001519f
Updated train.py and dataset.py to use real MRI and label paths on Ra…
abhyagarg22 Oct 30, 2025
e8f04d8
Removed tracked binary and model output files
abhyagarg22 Oct 30, 2025
74b6e9e
Fix gradient reset and finalize training script
abhyagarg22 Nov 1, 2025
24b6cf0
Fix CUDA loss crash: normalized labels, BCEWithLogitsLoss, lower LR
abhyagarg22 Nov 1, 2025
821d3d1
Fix label/output shape mismatch for BCE loss
abhyagarg22 Nov 1, 2025
c2c413e
fix: added label_dir to predict.py dataset initialization
abhyagarg22 Nov 1, 2025
f7b5814
Fix tuple unpacking in predict.py to correctly extract image tensor b…
abhyagarg22 Nov 1, 2025
52d216f
Increase learning rate to 0.001 and train for 20 epochs
abhyagarg22 Nov 1, 2025
41f13db
Fixed epochs
abhyagarg22 Nov 1, 2025
f7b57b0
Add Dice coefficient metric display during training
abhyagarg22 Nov 1, 2025
75e1312
Fix label binarization and duplicate sigmoid to improve Dice score
abhyagarg22 Nov 1, 2025
e424902
Delete recognition/Prostate3D_ImprovedUNet_Abhya/README.md
abhyagarg22 Nov 2, 2025
858809c
Delete README.md
abhyagarg22 Nov 2, 2025
b7d0cba
Update README.md
abhyagarg22 Nov 2, 2025
047602c
fix: correct over-segmentation and update predict.py for better mask …
abhyagarg22 Nov 5, 2025
888f6b9
Merge branch 'topic-recognition' of https://github.com/abhyagarg22/Pa…
abhyagarg22 Nov 5, 2025
d3aecb1
fix: correct label preprocessing to use prostate class (label == 5)
abhyagarg22 Nov 5, 2025
afb71a1
Fix loss weighting for prostate class imbalance and enable fine-tuning
abhyagarg22 Nov 5, 2025
19d0ca6
Added average dice coefficient
abhyagarg22 Nov 5, 2025
e1cfdf2
Fixed train.py and dataset.py
abhyagarg22 Nov 5, 2025
b232733
fix: save smooth probability map instead of binary mask in predict.py
abhyagarg22 Nov 5, 2025
0da7cb0
fix: apply softmax-based inference for multiclass segmentation in pre…
abhyagarg22 Nov 5, 2025
3debd7c
fix: remove hard thresholding in predict.py inference
abhyagarg22 Nov 5, 2025
dbbe761
fix: rescale sigmoid output for better visualization and precision
abhyagarg22 Nov 5, 2025
c1e4cbc
fix(predict.py): ensure sigmoid applied to model output for single-ch…
abhyagarg22 Nov 5, 2025
982918a
fix(dataset): correctly match MRI and label files by patient-week ID
abhyagarg22 Nov 5, 2025
1d3ca6f
Add validation split and fix training dataloader in train.py
abhyagarg22 Nov 5, 2025
b8fd7f7
Improve training stability and fix underfitting (z-score normalizatio…
abhyagarg22 Nov 5, 2025
450297f
Added Dice + Focal combined loss to fix flat predictions and improve …
abhyagarg22 Nov 5, 2025
0b0d9be
fix: ensure sigmoid normalization before saving predictions for visua…
abhyagarg22 Nov 5, 2025
18b7db6
add: multi-MRI prediction loop for UNet3D inference
abhyagarg22 Nov 5, 2025
7762c37
Updated ImprovedUNet3D to 6-class segmentation, switched to CrossEntr…
abhyagarg22 Nov 5, 2025
296b167
Refactor: multi-class support + proper affine saving
abhyagarg22 Nov 5, 2025
4ef53d5
Increase epoch
abhyagarg22 Nov 5, 2025
e17bc99
Adjust epoch due to runtime error
abhyagarg22 Nov 5, 2025
89d6134
Increase LR to 0.002, weight_decay to 1e-4, add flip augmentation
abhyagarg22 Nov 6, 2025
8f268f7
Removed verbose
abhyagarg22 Nov 6, 2025
1907520
Optimize 3D UNet training with mixed precision and DataLoader improve…
abhyagarg22 Nov 6, 2025
b87495c
Fix train.py
abhyagarg22 Nov 6, 2025
b6a7492
Reduce 3D MRI volume depth for memory optimization
abhyagarg22 Nov 6, 2025
9fec5e8
Print only final training Dice coefficient and remove validation output
abhyagarg22 Nov 6, 2025
0a977e0
Improve training stability
abhyagarg22 Nov 6, 2025
875706b
Add crop flag to HipMRIDataset for full-volume inference
abhyagarg22 Nov 6, 2025
291e360
Update README.md_final
abhyagarg22 Nov 6, 2025
228bc9b
Add Training script in README.md
abhyagarg22 Nov 6, 2025
2912ff4
Add files via upload
abhyagarg22 Nov 6, 2025
f4f8114
Add visualization files
abhyagarg22 Nov 6, 2025
65e8a47
Add files via upload
abhyagarg22 Nov 6, 2025
e536fca
Organize READMEs: move project README and add recognition + root READ…
abhyagarg22 Nov 22, 2025
84800ca
Fix README.md
abhyagarg22 Nov 22, 2025
ee2e1fd
Added dice score output log again as png
abhyagarg22 Nov 22, 2025
2867405
Merge branch 'topic-recognition' of https://github.com/abhyagarg22/Pa…
abhyagarg22 Nov 22, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
446 changes: 446 additions & 0 deletions recognition/Prostate3D_ImprovedUNet_Abhya/README.md

Large diffs are not rendered by default.

Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
73 changes: 73 additions & 0 deletions recognition/Prostate3D_ImprovedUNet_Abhya/dataset.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
# dataset.py — Project 7 (Abhya)
# Loads 3D MRI volumes and returns them as PyTorch tensors

import os
import torch
import nibabel as nib
from torch.utils.data import Dataset
import numpy as np

class HipMRIDataset(Dataset):
def __init__(self, image_dir, label_dir, transform=None, crop=True):
self.crop=crop
self.image_dir = image_dir
self.label_dir = label_dir
self.transform = transform

# Match images and labels based on patient ID prefix
self.image_files = sorted([
f for f in os.listdir(image_dir) if f.endswith('.nii.gz')
])
self.label_files = sorted([
f for f in os.listdir(label_dir) if f.endswith('.nii.gz')
])

# Filter to keep only those with matching IDs
self.pairs = []
for img in self.image_files:
pid = "_".join(img.split("_")[:2]) # e.g. "D031"
match = next((l for l in self.label_files if l.startswith(pid)), None)
if match:
self.pairs.append((img, match))

def __len__(self):
return len(self.pairs)

def __getitem__(self, idx):
img_name, label_name = self.pairs[idx]
img_path = os.path.join(self.image_dir, img_name)
label_path = os.path.join(self.label_dir, label_name)

# Load MRI and label
image = nib.load(img_path).get_fdata()
label = nib.load(label_path).get_fdata()

# normalize image (keep yours)
image = (image - np.mean(image)) / (np.std(image) + 1e-8)
image = np.clip(image, -3, 3)
image = (image - image.min()) / (image.max() - image.min() + 1e-8)
if np.random.rand() > 0.5:
image = np.flip(image, axis=1).copy()
label = np.flip(label, axis=1).copy()

if np.random.rand() > 0.5:
image = np.flip(image, axis=2).copy()
label = np.flip(label, axis=2).copy()

# image: add channel
image = np.expand_dims(image, axis=0)
if self.crop:

image = image[:, :, image.shape[2] // 2 - 32 : image.shape[2] // 2 + 32]
label = label[:, :, label.shape[2] // 2 - 32 : label.shape[2] // 2 + 32]

# label: KEEP classes 0..5
label = label.astype(np.int64)

image = torch.tensor(image, dtype=torch.float32)
label = torch.tensor(label, dtype=torch.long)
if self.transform:
image = self.transform(image)

return image, label

Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
72 changes: 72 additions & 0 deletions recognition/Prostate3D_ImprovedUNet_Abhya/modules.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
# modules.py — Project 7 (Abhya)
# Improved 3D U-Net for prostate MRI segmentation
# Adds residual connections and dropout for better performance

import torch
import torch.nn as nn

class ResidualBlock3D(nn.Module):
"""3D Residual block with BatchNorm and Dropout"""
def __init__(self, in_ch, out_ch, dropout=0.3):
super().__init__()
self.conv1 = nn.Conv3d(in_ch, out_ch, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm3d(out_ch)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv3d(out_ch, out_ch, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm3d(out_ch)
self.dropout = nn.Dropout3d(dropout)

# Shortcut for residual connection
self.shortcut = (
nn.Conv3d(in_ch, out_ch, kernel_size=1)
if in_ch != out_ch else nn.Identity()
)

def forward(self, x):
identity = self.shortcut(x)
out = self.relu(self.bn1(self.conv1(x)))
out = self.dropout(out)
out = self.bn2(self.conv2(out))
out += identity
return self.relu(out)


class ImprovedUNet3D(nn.Module):
"""Improved 3D U-Net with residual encoder-decoder blocks"""
def __init__(self, in_channels=1, out_channels=6, base_filters=32):
super().__init__()

# Encoder
self.enc1 = ResidualBlock3D(in_channels, base_filters)
self.pool1 = nn.MaxPool3d(2)
self.enc2 = ResidualBlock3D(base_filters, base_filters * 2)
self.pool2 = nn.MaxPool3d(2)
self.enc3 = ResidualBlock3D(base_filters * 2, base_filters * 4)
self.pool3 = nn.MaxPool3d(2)

# Bottleneck
self.bottleneck = ResidualBlock3D(base_filters * 4, base_filters * 8)

# Decoder
self.up3 = nn.ConvTranspose3d(base_filters * 8, base_filters * 4, 2, 2)
self.dec3 = ResidualBlock3D(base_filters * 8, base_filters * 4)
self.up2 = nn.ConvTranspose3d(base_filters * 4, base_filters * 2, 2, 2)
self.dec2 = ResidualBlock3D(base_filters * 4, base_filters * 2)
self.up1 = nn.ConvTranspose3d(base_filters * 2, base_filters, 2, 2)
self.dec1 = ResidualBlock3D(base_filters * 2, base_filters)

# Final output
self.final_conv = nn.Conv3d(base_filters, out_channels, 1)

def forward(self, x):
e1 = self.enc1(x)
e2 = self.enc2(self.pool1(e1))
e3 = self.enc3(self.pool2(e2))
b = self.bottleneck(self.pool3(e3))

d3 = self.dec3(torch.cat([self.up3(b), e3], dim=1))
d2 = self.dec2(torch.cat([self.up2(d3), e2], dim=1))
d1 = self.dec1(torch.cat([self.up1(d2), e1], dim=1))

return self.final_conv(d1)

64 changes: 64 additions & 0 deletions recognition/Prostate3D_ImprovedUNet_Abhya/predict.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
# predict.py — Project 7 (Abhya)
# Inference script for trained Improved 3D U-Net model

import torch
import nibabel as nib
import numpy as np
from modules import ImprovedUNet3D
from dataset import HipMRIDataset
import os

MODEL_PATH = 'models/improved_unet3d.pth'
DATA_DIR = "/home/groups/comp3710/HipMRI_Study_open/semantic_MRs" # change to test data path
LABEL_DIR = "/home/groups/comp3710/HipMRI_Study_open/semantic_labels_only"
SAVE_DIR = 'predictions'
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

os.makedirs(SAVE_DIR, exist_ok=True)

# Load model
model = ImprovedUNet3D().to(DEVICE)
model.load_state_dict(torch.load(MODEL_PATH, map_location=DEVICE))
model.eval()

# Load data
dataset = HipMRIDataset(DATA_DIR, LABEL_DIR, crop=False)

if len(dataset) == 0:
raise RuntimeError(f"No MRI files found in {DATA_DIR}")
# if len(dataset) == 0:
#print("No MRI files found — creating fake test volume.")
#fake = np.random.rand(64, 64, 64)
#nib.save(nib.Nifti1Image(fake, np.eye(4)), 'fake_test.nii')
#dataset = HipMRIDataset('.')

# --- Loop through all MRIs ---
for idx, (img_name, _) in enumerate(dataset.pairs, 1):
print(f"[{idx}/{len(dataset)}] Predicting: {img_name}")

img, _ = dataset[idx - 1]
img = img.unsqueeze(0).to(DEVICE)

with torch.no_grad():
logits = model(img) # [1, 6, D, H, W]
pred = torch.argmax(logits, dim=1) # [1, D, H, W]
pred_np = pred.squeeze(0).cpu().numpy().astype(np.uint8)

# save prediction with proper name
# ---- Save prediction with correct alignment ----
img_path = os.path.join(DATA_DIR, img_name)
affine = nib.load(img_path).affine # copy affine from original MRI

base_name = img_name.replace("_LFOV.nii.gz", "")
save_path = os.path.join(SAVE_DIR, f"{base_name}_prediction.nii.gz") # save as .nii.gz
nib.save(nib.Nifti1Image(pred_np, affine), save_path) # aligned save
print(f" Saved aligned file: {save_path}\n")


print("All predictions completed successfully. Files saved in:", SAVE_DIR)






172 changes: 172 additions & 0 deletions recognition/Prostate3D_ImprovedUNet_Abhya/train.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,172 @@
# train.py — Project 7 (Abhya)
# Training script for Improved 3D UNet model on Rangpur GPU cluster

import os
import torch
from torch import nn, optim
from torch.utils.data import DataLoader, random_split
from dataset import HipMRIDataset
from modules import ImprovedUNet3D
from torch.cuda.amp import autocast, GradScaler
import torch.nn.functional as F



# ----------------------------
# CONFIGURATION
# ----------------------------
MRI_DIR = "/home/groups/comp3710/HipMRI_Study_open/semantic_MRs"
LABEL_DIR = "/home/groups/comp3710/HipMRI_Study_open/semantic_labels_only"

EPOCHS = 15 # Increase if GPU allows
BATCH_SIZE = 1
LR = 0.001
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

print(f"Using device: {DEVICE}")
if DEVICE.type == "cuda":
print("CUDA available — training on GPU")
else:
print("CUDA not available — training on CPU")

# ----------------------------
# DATASET & DATALOADER
# ----------------------------
dataset = HipMRIDataset(MRI_DIR, LABEL_DIR, transform=None, crop=True)
if len(dataset) == 0:
raise RuntimeError(f"No .nii.gz files found in {MRI_DIR}. Please check dataset path.")

# 80% train, 20% val
train_size = int(0.8 * len(dataset))
val_size = len(dataset) - train_size
train_set, val_set = random_split(dataset, [train_size, val_size])

train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True, num_workers=4, pin_memory=True)
val_loader = DataLoader(val_set, batch_size=1, shuffle=False, num_workers=2, pin_memory=True)


# ----------------------------
# MODEL, LOSS, OPTIMIZER
# ----------------------------
model = ImprovedUNet3D().to(DEVICE)

# ← BETTER: Combined loss function
class DiceCELoss(nn.Module):
def __init__(self):
super().__init__()
self.ce = nn.CrossEntropyLoss()

def forward(self, pred, target):
ce_loss = self.ce(pred, target)

# Dice loss
pred_soft = torch.softmax(pred, dim=1)
dice_loss = 0
for c in range(pred.shape[1]):
pred_c = pred_soft[:, c]
target_c = (target == c).float()
intersection = (pred_c * target_c).sum()
union = pred_c.sum() + target_c.sum()
dice_loss += 1 - (2 * intersection + 1e-6) / (union + 1e-6)
dice_loss /= pred.shape[1]

return ce_loss + dice_loss

criterion = DiceCELoss()
optimizer = optim.Adam(model.parameters(), lr=LR, weight_decay=1e-4)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='max', factor=0.5, patience=3
)
def multiclass_dice(pred, target, num_classes=6, eps=1e-6):
"""Compute mean Dice coefficient across all classes."""
dice_scores = []
for c in range(num_classes):
pred_c = (pred == c).float()
target_c = (target == c).float()
intersection = (pred_c * target_c).sum()
union = pred_c.sum() + target_c.sum()
dice = (2 * intersection + eps) / (union + eps)
dice_scores.append(dice)
return torch.mean(torch.stack(dice_scores))


# ----------------------------
# TRAINING LOOP
# ----------------------------
scaler = GradScaler(enabled=(DEVICE.type == "cuda"))
for epoch in range(EPOCHS):
model.train()
epoch_loss = 0.0
epoch_dice = 0.0

for batch_idx, (img, label) in enumerate(train_loader):
img = img.to(DEVICE)
label = label.to(DEVICE).long() # important for CrossEntropyLoss

optimizer.zero_grad()
with autocast(enabled=(DEVICE.type == "cuda")):
output = model(img)
if output.shape[2:] != label.shape[1:]:
label = F.interpolate(
label.unsqueeze(1).float(),
size=output.shape[2:],
mode="nearest"
).squeeze(1).long()
loss = criterion(output, label)

scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
torch.cuda.empty_cache()


with torch.no_grad():
preds = torch.argmax(output, dim=1)
dice = multiclass_dice(preds, label)

epoch_loss += loss.item()
epoch_dice += dice.item()

print(f"Epoch [{epoch+1}/{EPOCHS}] Batch [{batch_idx+1}/{len(train_loader)}] "
f"Loss: {loss.item():.4f} | Dice: {dice.item():.4f}")

# summary for the epoch
avg_loss = epoch_loss / len(train_loader)
avg_dice = epoch_dice / len(train_loader)
print(f"\n Epoch [{epoch+1}/{EPOCHS}] Train Loss: {avg_loss:.4f} | Train Dice: {avg_dice:.4f}")

# ---- VALIDATION ----
model.eval()
val_dice = 0.0
with torch.no_grad():
for img, label in val_loader:
img = img.to(DEVICE)
label = label.to(DEVICE).long()
output = model(img)

if output.shape[2:] != label.shape[1:]:
label = F.interpolate(
label.unsqueeze(1).float(),
size=output.shape[2:],
mode="nearest"
).squeeze(1).long()

preds = torch.argmax(output, dim=1)
val_dice += multiclass_dice(preds, label).item()
val_dice_avg = val_dice / len(val_loader)
scheduler.step(val_dice_avg)
for g in optimizer.param_groups:
g['lr'] = max(g['lr'] * 0.8, 1e-5)

# Print final Dice after last epoch
if epoch == EPOCHS - 1:
print(f"\n Final Training Dice Coefficient: {avg_dice:.4f}")



# ----------------------------
# SAVE MODEL
# ----------------------------
os.makedirs("models", exist_ok=True)
torch.save(model.state_dict(), "models/improved_unet3d.pth")
print("Training complete! Model saved to models/improved_unet3d.pth")
Loading