diff --git a/example/assets/models/yolo11n.torchscript b/example/assets/models/yolo11n.torchscript new file mode 100644 index 0000000..757dc72 Binary files /dev/null and b/example/assets/models/yolo11n.torchscript differ diff --git a/example/lib/run_model_by_image_demo.dart b/example/lib/run_model_by_image_demo.dart index 35efef1..4bc09cd 100644 --- a/example/lib/run_model_by_image_demo.dart +++ b/example/lib/run_model_by_image_demo.dart @@ -18,7 +18,7 @@ class _RunModelByImageDemoState extends State { //CustomModel? _customModel; late ModelObjectDetection _objectModel; late ModelObjectDetection _objectModelYoloV8; - + late ModelObjectDetection _objectModelYoloV11; String? textToShow; List? _prediction; File? _image; @@ -37,6 +37,7 @@ class _RunModelByImageDemoState extends State { //String pathCustomModel = "assets/models/custom_model.ptl"; String pathObjectDetectionModel = "assets/models/yolov5s.torchscript"; String pathObjectDetectionModelYolov8 = "assets/models/yolov8s.torchscript"; + String pathObjectDetectionModelYolov11 = "assets/models/yolo11n.torchscript"; try { _imageModel = await PytorchLite.loadClassificationModel( pathImageModel, 224, 224, 1000, @@ -49,6 +50,14 @@ class _RunModelByImageDemoState extends State { pathObjectDetectionModelYolov8, 80, 640, 640, labelPath: "assets/labels/labels_objectDetection_Coco.txt", objectDetectionModelType: ObjectDetectionModelType.yolov8); + _objectModelYoloV11 = await PytorchLite.loadObjectDetectionModel( + pathObjectDetectionModelYolov11, + 80, + 640, + 640, + labelPath: "assets/labels/labels_objectDetection_Coco.txt", + objectDetectionModelType: ObjectDetectionModelType.yolov8 + ); } catch (e) { if (e is PlatformException) { print("only supported for android, Error is $e"); @@ -157,6 +166,41 @@ class _RunModelByImageDemoState extends State { }); } + Future runObjectDetectionYoloV11() async { + //pick a random image + + final XFile? image = await _picker.pickImage(source: ImageSource.gallery); + Stopwatch stopwatch = Stopwatch()..start(); + + objDetect = await _objectModelYoloV11.getImagePrediction( + await File(image!.path).readAsBytes(), + minimumScore: 0.1, + iOUThreshold: 0.3); + textToShow = inferenceTimeAsString(stopwatch); + + print('object executed in ${stopwatch.elapsed.inMilliseconds} ms'); + for (var element in objDetect) { + print({ + "score": element?.score, + "className": element?.className, + "class": element?.classIndex, + "rect": { + "left": element?.rect.left, + "top": element?.rect.top, + "width": element?.rect.width, + "height": element?.rect.height, + "right": element?.rect.right, + "bottom": element?.rect.bottom, + }, + }); + } + + setState(() { + //this.objDetect = objDetect; + _image = File(image.path); + }); + } + String inferenceTimeAsString(Stopwatch stopwatch) => "Inference Took ${stopwatch.elapsed.inMilliseconds} ms"; @@ -295,6 +339,18 @@ class _RunModelByImageDemoState extends State { ), ), ), + TextButton( + onPressed: runObjectDetectionYoloV11, + style: TextButton.styleFrom( + backgroundColor: Colors.blue, + ), + child: const Text( + "Run object detection YoloV11 with labels", + style: TextStyle( + color: Colors.white, + ), + ), + ), TextButton( onPressed: runObjectDetectionWithoutLabels, style: TextButton.styleFrom(