diff --git a/annotation-tool.py b/annotation-tool.py index 3bd01522..c1e73921 100644 --- a/annotation-tool.py +++ b/annotation-tool.py @@ -52,7 +52,7 @@ def distanceTo(self,pt): def shiftPts(self): if self.pts.shape[1] > 1: - idx = range(1,self.pts.shape[1]) + [0] + idx = list(range(1,self.pts.shape[1])) + [0] self.pts = self.pts[...,idx] def getSquare(self): @@ -227,7 +227,7 @@ def displayAllShapes(disp,shapes,selected,typing_mode): if __name__ == '__main__': if len(sys.argv) < 4: - print __doc__ + print(__doc__) sys.exit() maxW = int(sys.argv[1]) @@ -343,19 +343,19 @@ def displayAllShapes(disp,shapes,selected,typing_mode): typing_mode = True if key == key_append_vertex: - print 'Append vertex' + print('Append vertex') shapes[selected].appendSide(disp.getMouseCenterRelative()) if key == key_remove_last_vertex: - print 'Remove last vertex' + print('Remove last vertex') shapes[selected].removeLast() if key == key_change_closest_vertex: - print 'Change closest vertex' + print('Change closest vertex') shapes[selected].changeClosest(disp.getMouseCenterRelative()) if key in key_delete_selected_shape: - print 'Delete closest vertex' + print('Delete closest vertex') del shapes[selected] pt = disp.getMouseCenterRelative() selected = selectClosest(shapes,pt) @@ -364,12 +364,12 @@ def displayAllShapes(disp,shapes,selected,typing_mode): shapes[selected].shiftPts() if key == key_create_new_shape: - print 'Create new shape' + print('Create new shape') shapes.append(ShapeDisplay()) selected = len(shapes)-1 if key == key_select_closest_shape: - print 'Select closest' + print('Select closest') pt = disp.getMouseCenterRelative() selected = selectClosest(shapes,pt) diff --git a/create-model.py b/create-model.py index b41d968a..48f80c6a 100644 --- a/create-model.py +++ b/create-model.py @@ -83,7 +83,7 @@ def create_model_mobnet(): backbone_layers = {'backbone_' + layer.name: layer for layer in backbone.layers} for layer in model.layers: if layer.name in backbone_layers: - print 'setting ' + layer.name + print(('setting ' + layer.name)) layer.set_weights(backbone_layers[layer.name].get_weights()) return model @@ -98,10 +98,10 @@ def create_model_mobnet(): modelf = getattr(sys.modules[__name__],'create_model_' + sys.argv[1]) - print 'Creating model %s' % sys.argv[1] + print(('Creating model %s' % sys.argv[1])) model = modelf() - print 'Finished' + print('Finished') - print 'Saving at %s' % sys.argv[2] + print(('Saving at %s' % sys.argv[2])) save_model(model,sys.argv[2]) diff --git a/darknet/examples/detector-scipy-opencv.py b/darknet/examples/detector-scipy-opencv.py index 3bfc5913..30da786c 100644 --- a/darknet/examples/detector-scipy-opencv.py +++ b/darknet/examples/detector-scipy-opencv.py @@ -39,18 +39,18 @@ def detect2(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45): net = dn.load_net("cfg/tiny-yolo.cfg", "tiny-yolo.weights", 0) meta = dn.load_meta("cfg/coco.data") r = dn.detect(net, meta, "data/dog.jpg") -print r +print(r) # scipy arr= imread('data/dog.jpg') im = array_to_image(arr) r = detect2(net, meta, im) -print r +print(r) # OpenCV arr = cv2.imread('data/dog.jpg') im = array_to_image(arr) dn.rgbgr_image(im) r = detect2(net, meta, im) -print r +print(r) diff --git a/darknet/examples/detector.py b/darknet/examples/detector.py index 40bb365e..b6100951 100644 --- a/darknet/examples/detector.py +++ b/darknet/examples/detector.py @@ -13,15 +13,15 @@ net = dn.load_net("cfg/yolo-thor.cfg", "/home/pjreddie/backup/yolo-thor_final.weights", 0) meta = dn.load_meta("cfg/thor.data") r = dn.detect(net, meta, "data/bedroom.jpg") -print r +print(r) # And then down here you could detect a lot more images like: r = dn.detect(net, meta, "data/eagle.jpg") -print r +print(r) r = dn.detect(net, meta, "data/giraffe.jpg") -print r +print(r) r = dn.detect(net, meta, "data/horses.jpg") -print r +print(r) r = dn.detect(net, meta, "data/person.jpg") -print r +print(r) diff --git a/darknet/python/darknet.py b/darknet/python/darknet.py index c0cab3ab..0ccaf85b 100644 --- a/darknet/python/darknet.py +++ b/darknet/python/darknet.py @@ -123,6 +123,7 @@ def classify(net, meta, im): return res def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45): + image = bytes(image, encoding='utf-8') im = load_image(image, 0, 0) num = c_int(0) pnum = pointer(num) @@ -152,6 +153,6 @@ def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45): net = load_net("cfg/tiny-yolo.cfg", "tiny-yolo.weights", 0) meta = load_meta("cfg/coco.data") r = detect(net, meta, "data/dog.jpg") - print r + print(r) diff --git a/darknet/python/proverbot.py b/darknet/python/proverbot.py index 095aae8f..23dc53cc 100644 --- a/darknet/python/proverbot.py +++ b/darknet/python/proverbot.py @@ -1,4 +1,4 @@ -from darknet import * +from .darknet import * def predict_tactic(net, s): prob = 0 @@ -34,4 +34,4 @@ def predict_tactics(net, s, n): net = load_net("cfg/coq.test.cfg", "/home/pjreddie/backup/coq.backup", 0) t = predict_tactics(net, "+++++\n", 10) -print t +print(t) diff --git a/get-networks.sh b/get-networks.sh old mode 100644 new mode 100755 index fb5f116b..723ce1e2 --- a/get-networks.sh +++ b/get-networks.sh @@ -6,15 +6,15 @@ mkdir data/lp-detector -p mkdir data/ocr -p mkdir data/vehicle-detector -p -wget -c -N www.inf.ufrgs.br/~smsilva/alpr-unconstrained/data/lp-detector/wpod-net_update1.h5 -P data/lp-detector/ -wget -c -N www.inf.ufrgs.br/~smsilva/alpr-unconstrained/data/lp-detector/wpod-net_update1.json -P data/lp-detector/ +wget -c -N http://sergiomsilva.com/data/eccv2018/lp-detector/wpod-net_update1.h5 -P data/lp-detector/ +wget -c -N http://sergiomsilva.com/data/eccv2018/lp-detector/wpod-net_update1.json -P data/lp-detector/ -wget -c -N www.inf.ufrgs.br/~smsilva/alpr-unconstrained/data/ocr/ocr-net.cfg -P data/ocr/ -wget -c -N www.inf.ufrgs.br/~smsilva/alpr-unconstrained/data/ocr/ocr-net.names -P data/ocr/ -wget -c -N www.inf.ufrgs.br/~smsilva/alpr-unconstrained/data/ocr/ocr-net.weights -P data/ocr/ -wget -c -N www.inf.ufrgs.br/~smsilva/alpr-unconstrained/data/ocr/ocr-net.data -P data/ocr/ +wget -c -N http://sergiomsilva.com/data/eccv2018/ocr/ocr-net.cfg -P data/ocr/ +wget -c -N http://sergiomsilva.com/data/eccv2018/ocr/ocr-net.names -P data/ocr/ +wget -c -N http://sergiomsilva.com/data/eccv2018/ocr/ocr-net.weights -P data/ocr/ +wget -c -N http://sergiomsilva.com/data/eccv2018/ocr/ocr-net.data -P data/ocr/ -wget -c -N www.inf.ufrgs.br/~smsilva/alpr-unconstrained/data/vehicle-detector/yolo-voc.cfg -P data/vehicle-detector/ -wget -c -N www.inf.ufrgs.br/~smsilva/alpr-unconstrained/data/vehicle-detector/voc.data -P data/vehicle-detector/ -wget -c -N www.inf.ufrgs.br/~smsilva/alpr-unconstrained/data/vehicle-detector/yolo-voc.weights -P data/vehicle-detector/ -wget -c -N www.inf.ufrgs.br/~smsilva/alpr-unconstrained/data/vehicle-detector/voc.names -P data/vehicle-detector/ +wget -c -N http://sergiomsilva.com/data/eccv2018/vehicle-detector/yolo-voc.cfg -P data/vehicle-detector/ +wget -c -N http://sergiomsilva.com/data/eccv2018/vehicle-detector/voc.data -P data/vehicle-detector/ +wget -c -N http://sergiomsilva.com/data/eccv2018/vehicle-detector/yolo-voc.weights -P data/vehicle-detector/ +wget -c -N http://sergiomsilva.com/data/eccv2018/vehicle-detector/voc.names -P data/vehicle-detector/ diff --git a/license-plate-detection.py b/license-plate-detection.py index 600dcc67..54dbf173 100644 --- a/license-plate-detection.py +++ b/license-plate-detection.py @@ -29,11 +29,11 @@ def adjust_pts(pts,lroi): imgs_paths = glob('%s/*car.png' % input_dir) - print 'Searching for license plates using WPOD-NET' + print('Searching for license plates using WPOD-NET') for i,img_path in enumerate(imgs_paths): - print '\t Processing %s' % img_path + print(('\t Processing %s' % img_path)) bname = splitext(basename(img_path))[0] Ivehicle = cv2.imread(img_path) @@ -41,7 +41,7 @@ def adjust_pts(pts,lroi): ratio = float(max(Ivehicle.shape[:2]))/min(Ivehicle.shape[:2]) side = int(ratio*288.) bound_dim = min(side + (side%(2**4)),608) - print "\t\tBound dim: %d, ratio: %f" % (bound_dim,ratio) + print(("\t\tBound dim: %d, ratio: %f" % (bound_dim,ratio))) Llp,LlpImgs,_ = detect_lp(wpod_net,im2single(Ivehicle),bound_dim,2**4,(240,80),lp_threshold) diff --git a/license-plate-ocr.py b/license-plate-ocr.py index 754fa50d..33505b9f 100644 --- a/license-plate-ocr.py +++ b/license-plate-ocr.py @@ -21,20 +21,20 @@ ocr_threshold = .4 - ocr_weights = 'data/ocr/ocr-net.weights' - ocr_netcfg = 'data/ocr/ocr-net.cfg' - ocr_dataset = 'data/ocr/ocr-net.data' + ocr_weights = b'data/ocr/ocr-net.weights' + ocr_netcfg = b'data/ocr/ocr-net.cfg' + ocr_dataset = b'data/ocr/ocr-net.data' ocr_net = dn.load_net(ocr_netcfg, ocr_weights, 0) ocr_meta = dn.load_meta(ocr_dataset) imgs_paths = sorted(glob('%s/*lp.png' % output_dir)) - print 'Performing OCR...' + print('Performing OCR...') for i,img_path in enumerate(imgs_paths): - print '\tScanning %s' % img_path + print(('\tScanning %s' % img_path)) bname = basename(splitext(img_path)[0]) @@ -51,11 +51,11 @@ with open('%s/%s_str.txt' % (output_dir,bname),'w') as f: f.write(lp_str + '\n') - print '\t\tLP: %s' % lp_str + print(('\t\tLP: %s' % lp_str)) else: - print 'No characters found' + print('No characters found') except: traceback.print_exc() diff --git a/src/keras_utils.py b/src/keras_utils.py index 94792078..614a997f 100644 --- a/src/keras_utils.py +++ b/src/keras_utils.py @@ -24,7 +24,7 @@ def save_model(model,path,verbose=0): with open('%s.json' % path,'w') as json_file: json_file.write(model_json) model.save_weights('%s.h5' % path) - if verbose: print 'Saved to %s' % path + if verbose: print(('Saved to %s' % path)) def load_model(path,custom_objects={},verbose=0): from keras.models import model_from_json @@ -34,7 +34,7 @@ def load_model(path,custom_objects={},verbose=0): model_json = json_file.read() model = model_from_json(model_json, custom_objects=custom_objects) model.load_weights('%s.h5' % path) - if verbose: print 'Loaded from %s' % path + if verbose: print(('Loaded from %s' % path)) return model diff --git a/train-detector.py b/train-detector.py index 97a3adf5..1ac2aa0e 100644 --- a/train-detector.py +++ b/train-detector.py @@ -76,7 +76,7 @@ def process_data_item(data_item,dim,model_stride): opt = getattr(keras.optimizers,args.optimizer)(lr=args.learning_rate) model.compile(loss=loss, optimizer=opt) - print 'Checking input directory...' + print('Checking input directory...') Files = image_files_from_folder(train_dir) Data = [] @@ -87,7 +87,7 @@ def process_data_item(data_item,dim,model_stride): I = cv2.imread(file) Data.append([I,L[0]]) - print '%d images with labels found' % len(Data) + print(('%d images with labels found' % len(Data))) dg = DataGenerator( data=Data, \ process_data_item_func=lambda x: process_data_item(x,dim,model_stride),\ @@ -106,20 +106,20 @@ def process_data_item(data_item,dim,model_stride): for it in range(iterations): - print 'Iter. %d (of %d)' % (it+1,iterations) + print(('Iter. %d (of %d)' % (it+1,iterations))) Xtrain,Ytrain = dg.get_batch(batch_size) train_loss = model.train_on_batch(Xtrain,Ytrain) - print '\tLoss: %f' % train_loss + print(('\tLoss: %f' % train_loss)) # Save model every 1000 iterations if (it+1) % 1000 == 0: - print 'Saving model (%s)' % model_path_backup + print(('Saving model (%s)' % model_path_backup)) save_model(model,model_path_backup) - print 'Stopping data generator' + print('Stopping data generator') dg.stop() - print 'Saving model (%s)' % model_path_final + print(('Saving model (%s)' % model_path_final)) save_model(model,model_path_final) diff --git a/vehicle-detection.py b/vehicle-detection.py index 957a7bb3..fe6bfc4d 100644 --- a/vehicle-detection.py +++ b/vehicle-detection.py @@ -21,10 +21,16 @@ vehicle_threshold = .5 - vehicle_weights = 'data/vehicle-detector/yolo-voc.weights' - vehicle_netcfg = 'data/vehicle-detector/yolo-voc.cfg' - vehicle_dataset = 'data/vehicle-detector/voc.data' + base=b'/media/raghav/e34065bb-bd49-4111-ba3a-96160e27ffd0/raghu/cctv/darknet/' + vehicle_weights = base + b'yolov4.weights' + vehicle_netcfg = base + b'cfg/yolov4.cfg' + vehicle_dataset = b'data/vehicle-detector/coco.data' + vehicle_weights = b'data/vehicle-detector/yolo-voc.weights' + vehicle_netcfg = b'data/vehicle-detector/yolo-voc.cfg' + vehicle_dataset = b'data/vehicle-detector/voc.data' + + # import pdb; pdb.set_trace() vehicle_net = dn.load_net(vehicle_netcfg, vehicle_weights, 0) vehicle_meta = dn.load_meta(vehicle_dataset) @@ -34,19 +40,19 @@ if not isdir(output_dir): makedirs(output_dir) - print 'Searching for vehicles using YOLO...' + print('Searching for vehicles using YOLO...') for i,img_path in enumerate(imgs_paths): - print '\tScanning %s' % img_path + print(('\tScanning %s' % img_path)) bname = basename(splitext(img_path)[0]) R,_ = detect(vehicle_net, vehicle_meta, img_path ,thresh=vehicle_threshold) - R = [r for r in R if r[0] in ['car','bus']] + R = [r for r in R if r[0] in [b'car',b'bus']] - print '\t\t%d cars found' % len(R) + print(('\t\t%d cars found' % len(R))) if len(R):