Skip to content
Snippets Groups Projects
Commit 4531fbfe authored by s47700's avatar s47700
Browse files

Upload New File

parent 733fbc9b
No related branches found
No related tags found
No related merge requests found
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import os
import sys
import numpy as np
import time
import yaml
import cv2
import matplotlib.pyplot as plt
"""
Author: Jenö Faist, Paul Judis
"""
"""
With this File you can use a Video of a 3D Print to Tets your Model.
By Pressing "f" you mark the current frame as a Fail Print
else the print is considered good
"""
"""
Depending on the Video you must crop it so there a no black lines in it.
You can find out the cropping by just using try and error.
(TODO) IMPLEMENT ADAPTIVE CROPING
"""
def crop_frame(image):
cromped_image = image[200:1080-200,620:1920-740,:] #image[:,250:1920-250,:] #image[:,470:1920-700,:] image[:,250:1920-250,:]#image[170:1080-500,650:1920-650,:]
return cromped_image
if __name__ == '__main__':
absolutepath = os.path.dirname(__file__)
video_PATH = absolutepath+'/TEST_VIDEOS/test_4.mp4'
model_save_PATH = absolutepath+'/COMPLETE_MODELS/3D_DEC_MODEL_MIXR18_18E_64B.pt'
print("STARTING CNN VIDEO TEST")
print("--------------------")
print("Cuda Version: " + torch.version.cuda)
print("Cuda: "+str(torch.cuda.is_available()))
print("GPU: "+str(torch.cuda.get_device_name()))
print("Video PATH: "+video_PATH)
print("Model PATH: "+model_save_PATH)
print("--------------------")
print("Loading Model...")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
"""
Specifing which Model we using for testing the Data this!
THIS MUST BE CHANGED DEPENDING OF WHICH MODEL YOU ARE USING !!!
"""
model = torchvision.models.resnet18(weights='DEFAULT')
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, 2)
try:
model.load_state_dict(torch.load(model_save_PATH))
model.eval()
print("[✓] Model Loaded [✓]")
except:
print("[!?] No Model Found [?!]")
exit(1)
model.eval()
model = model.to(device)
print("Loading VIDEO")
cap = None
try:
cap = cv2.VideoCapture(video_PATH)
print("[✓] Video Loaded [✓]")
except:
print("[!?] Video couldn't be found [?!]")
exit(1)
print("--------------------")
print("STARTING TEST THE VIDEO 3D PRINT")
print("--------------------")
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Resize((256,256)),
])
running_accrucay = 0
count = 0
VIDEO_ON = True
while(VIDEO_ON):
count += 1
ret, frame = cap.read()
frame = crop_frame(frame)
input = transform(frame)
img = (input.squeeze()).numpy()
img = np.transpose(img, (1, 2, 0))
input = input.to(device)
output = model(input.unsqueeze(0))
_, preds = torch.max(output, 1)
cv2.imshow('TEST MODEL PRESS "F" TO MARK AS FAIL',img)
k =cv2.waitKey(20)
FAILED_PRINT = False
if(k==ord("f")):
FAILED_PRINT = True
if(preds[0].item() == 1 and FAILED_PRINT):
running_accrucay +=1
elif(preds[0].item() == 0 and not FAILED_PRINT):
running_accrucay +=1
"""
JUST FOR VISUALS
"""
sym = "NO"
sym_2 = "NO"
if(preds[0].item() == 1):
sym = "YES"
if FAILED_PRINT:
sym_2 = "YES"
sys.stdout.write("\033[K")
print("CNN FAIL DETECTED: " + sym + " USER FAIL DETECTED: " + sym_2 + " CURRENT ACCURACY: "+str(int(100*(running_accrucay/count)))+"%", end='\r')
if k == ord('q'):
VIDEO_ON = False
sys.stdout.write("\033[K")
print("TESTING VIDEO FINISHED ACCURACY: "+str(int(100*(running_accrucay/count)))+"%", end='\r')
cap.release()
cv2.destroyAllWindows()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment