File size: 1,105 Bytes
03a8782 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
import torch
from PIL import Image
from torchvision import transforms
from architecture import ResNetLungCancer
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = ResNetLungCancer(num_classes=4)
model.load_state_dict(torch.load('Model/lung_cancer_detection_model.pth', map_location=device))
model = model.to(device)
model.eval()
preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
# image from local file
image_path = "Data/test/large.cell.carcinoma/000108.png"
image = Image.open(image_path).convert('RGB')
# preprocess the image
input_tensor = preprocess(image).unsqueeze(0).to(device) # add batch dimension and move to device
# get model predictions
with torch.no_grad():
output = model(input_tensor)
predicted_class = torch.argmax(output, dim=1).item()
class_names = ['Adenocarcinoma', 'Large Cell Carcinoma', 'Normal', 'Squamous Cell Carcinoma']
print(f"Predicted class: {class_names[predicted_class]}") |