Skip to content

Commit

Permalink
2.9 Cpp Lint and Cppcheck
Browse files Browse the repository at this point in the history
  • Loading branch information
Achuthankrishna committed Nov 1, 2023
1 parent 40a0e8d commit ddcfc2b
Show file tree
Hide file tree
Showing 442 changed files with 129,739 additions and 134 deletions.
124 changes: 63 additions & 61 deletions app/detector.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,15 +10,17 @@
* @copyright Copyright (c) 2023
*
*/
#include "../include/camera.hpp"
#include "../include/detector.hpp"
#include "../include/tracker.hpp"
#include <cstdlib>
#include <cstdio>
#include <fstream>
#include <iostream>
#include <sstream>
#include <opencv2/dnn.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include "../include/camera.hpp"
#include "../include/tracker.hpp"


/**
Expand All @@ -27,14 +29,15 @@
* @param model_Cfg Path to the model configuration file.
* @param model_Wts Path to the model weights file.
*/
void Detector::load_model(std::string model_Cfg, std::string model_Wts, std::string c_path) {
network=cv::dnn::readNetFromDarknet(model_Cfg,model_Wts);
void Detector::load_model(std::string model_Cfg,
std::string model_Wts, std::string c_path) {
network = cv::dnn::readNetFromDarknet(model_Cfg, model_Wts);

if (network.empty()) {
std::cerr << "Failed to load the neural network model." << std::endl;
return;
}
std::cout<<model_Wts;
std::cout << model_Wts;
network.setPreferableBackend(cv::dnn::DNN_BACKEND_OPENCV);
network.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);

Expand All @@ -44,7 +47,8 @@ void Detector::load_model(std::string model_Cfg, std::string model_Wts, std::str
}


std::pair<cv::Mat, std::vector<Detector::bbox>> Detector::detector(const cv::Mat& cv_frame) {
std::pair<cv::Mat, std::vector<Detector::bbox>>
Detector::detector(const cv::Mat& cv_frame) {
std::vector<Detector::bbox> allBoundingBoxes;
cv::Mat frame;

Expand All @@ -53,74 +57,69 @@ std::pair<cv::Mat, std::vector<Detector::bbox>> Detector::detector(const cv::Mat
frame = cv_frame.clone(); // Use the provided frame if not empty

cv::Mat blob;
cv::dnn::blobFromImage(frame, blob, 1. / 255, cv::Size(Width, Height), cv::Scalar(), true, false);
cv::dnn::blobFromImage(frame, blob, 1. / 255, cv::Size(Width, Height),
cv::Scalar(), true, false);
network.setInput(blob);
std::vector<cv::Mat> output;
network.forward(output, network.getUnconnectedOutLayersNames());

std::vector<Detector::bbox> bbox = processing(frame, output);

allBoundingBoxes.insert(allBoundingBoxes.end(), bbox.begin(), bbox.end());
}
else {
allBoundingBoxes.insert(allBoundingBoxes.end(),
bbox.begin(), bbox.end()); } else {
std::cout << "Opening Frame";
cv::VideoCapture cap;
cv::namedWindow("Camera Output", cv::WINDOW_NORMAL);
cap.open(0);
cv::VideoWriter videoWriter("./human1.mp4", cv::VideoWriter::fourcc('a', 'v', 'c', '1'), 5, cv::Size(640, 480), true);

if (!cap.open(0)) { // Open the default camera (change the index if needed)
cv::VideoWriter videoWriter("./human1.mp4",
cv::VideoWriter::fourcc('a', 'v', 'c', '1'),
5, cv::Size(640, 480), true);
if (!cap.open(0)) {
std::cout << "Error opening camera" << std::endl;
return std::make_pair(cv::Mat(), std::vector<Detector::bbox>());
}
else {
} else {
while (cap.isOpened()) {
cap.read(frame); // Capture a frame from the camera
cap.read(frame);

if (frame.empty()) {
std::cout << "Frame is empty. Exiting." << std::endl;
break;
}

cv::Mat blob;
cv::dnn::blobFromImage(frame, blob, 1. / 255, cv::Size(Width, Height), cv::Scalar(), true, false);
cv::dnn::blobFromImage(frame, blob, 1. / 255,
cv::Size(Width, Height),
cv::Scalar(), true, false);
network.setInput(blob);
std::vector<cv::Mat> output;

try {
network.forward(output, network.getUnconnectedOutLayersNames());
network.forward(output,
network.getUnconnectedOutLayersNames());
} catch (const cv::Exception& e) {
std::cerr << "Error during network forward pass: " << e.what() << std::endl;
std::cerr << "Error during forward pass: "
<< e.what() << std::endl;
break;
}

std::vector<Detector::bbox> bbox = processing(frame, output);


allBoundingBoxes.insert(allBoundingBoxes.end(), bbox.begin(), bbox.end());
allBoundingBoxes.insert(allBoundingBoxes.end(),
bbox.begin(), bbox.end());
// if (std::get<0>(bbox[0])==0){
// Tracker tracker;

// // // // Call the humanTrack function using the "tracker" instance

// tracker.getPredictions(frame,bbox);
// std::vector<cv::Rect_<double>> regions = tracker.boundingBox(frame, bbox);
// std::vector<cv::Rect_<double>> regions =
// tracker.boundingBox(frame, bbox);
// tracker.humanTrack(frame,regions);
// }


// Rest of your processing code

cv::imshow("Camera Output", frame);
videoWriter.write(frame);

char key = cv::waitKey(10);
if (key == 'q') {
break; // Press 'q' to exit the loop
break;
}
}

cap.release(); // Release the camera
cap.release();
}
}

Expand All @@ -132,7 +131,7 @@ std::pair<cv::Mat, std::vector<Detector::bbox>> Detector::detector(const cv::Mat
float Detector::calculate_distance(int box_h, int frame_h) {
int focal_l = 16;
int sensor_h = 25;
int averageHeight=180;
int averageHeight = 180;
// Conversion from pixels to milli-meter
double height_mm = (sensor_h * box_h) / frame_h;
// Calcuation to find distance of human from camera
Expand All @@ -148,7 +147,9 @@ float Detector::calculate_distance(int box_h, int frame_h) {
* @param frame Input frame.
* @param output Output vector for results.
*/
std::vector<std::tuple<int, float, cv::Rect>> Detector::processing(cv::Mat& frame, const std::vector<cv::Mat> & output) {
std::vector<std::tuple<int, float, cv::Rect>>
Detector::processing(cv::Mat& frame,
const std::vector<cv::Mat> & output) {
std::vector<int> detectedClassIds;
std::vector<float> detectedConfidences;
std::vector<cv::Rect> detectedBoxes;
Expand Down Expand Up @@ -178,20 +179,22 @@ std::vector<std::tuple<int, float, cv::Rect>> Detector::processing(cv::Mat& fram
}

std::vector<int> indices;
cv::dnn::NMSBoxes(detectedBoxes, detectedConfidences, confidenceThreshold, nmsThreshold,
indices);
cv::dnn::NMSBoxes(detectedBoxes, detectedConfidences, confidenceThreshold,
nmsThreshold, indices);
unsigned int personid = 1;
for (auto idx : indices) {
cv::Rect box = detectedBoxes[idx];
float z_axis = calculate_distance(box.height, frame.rows);
drawboxes(detectedClassIds[idx], detectedConfidences[idx], box.x, box.x + box.width,
box.y + box.height, frame, classes, personid, z_axis,box.y);
// transform.camera_robot_array(z_axis, box, frame);
std::cout << "Distance from person " << personid
<< " is: " << z_axis << "m" << std::endl;
drawboxes(detectedClassIds[idx], detectedConfidences[idx],
box.x, box.x + box.width,
box.y + box.height, frame,
classes, personid, z_axis, box.y);
std::cout << "Distance from person " <<
personid << " is: " << z_axis << "m" << std::endl;
personid++;
bbox current_bbox;
current_bbox = std::make_tuple(detectedClassIds[idx], detectedConfidences[idx], box);
current_bbox = std::make_tuple(detectedClassIds[idx],
detectedConfidences[idx], box);
bboxes.push_back(current_bbox);
}
return bboxes;
Expand All @@ -207,9 +210,8 @@ std::vector<std::tuple<int, float, cv::Rect>> Detector::processing(cv::Mat& fram

std::vector<std::string> Detector::ClassNames(const cv::dnn::Net& network) {
std::vector<std::string> names;

// Get the indices of the output layers, i.e., the layers with unconnected outputs
std::vector<int> outLayers = network.getUnconnectedOutLayers();
std::vector<int> outLayers =
network.getUnconnectedOutLayers();

// Get the names of all the layers in the network
std::vector<std::string> layersNames = network.getLayerNames();
Expand All @@ -236,27 +238,26 @@ std::vector<std::string> Detector::ClassNames(const cv::dnn::Net& network) {


void Detector::drawboxes(int classID, float con, int left, int right,
int bottom, cv::Mat& frame,const std::vector<std::string> &classes,
int pid, float z,int top) {
int bottom, cv::Mat& frame,
const std::vector<std::string> &classes,
int pid, float z, int top) {
if (classID != 0) {
return; // Skip non-human detections
return;
}


// Check if this pid already has a color assigned
cv::Scalar color;
auto colorIt = colorMap.find(pid);
if (colorIt != colorMap.end()) {
color = colorIt->second;
} else {
// Generate a unique color for the bounding box
cv::RNG rng(pid); // Initialize RNG with pid for consistent color generation
color = cv::Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
colorMap[pid] = color; // Store the color for future use
cv::RNG rng(pid);
color = cv::Scalar(rng.uniform(0, 255),
rng.uniform(0, 255), rng.uniform(0, 255));
colorMap[pid] = color;
}

// Draw a rectangle displaying the bounding box
rectangle(frame, cv::Point(left, top), cv::Point(right, bottom), color, 3);
rectangle(frame, cv::Point(left, top),
cv::Point(right, bottom), color, 3);


// Get the label for the class name and its confidence
Expand All @@ -269,11 +270,12 @@ void Detector::drawboxes(int classID, float con, int left, int right,
// Draw a rectangle displaying the bounding box
rectangle(frame, cv::Point(left, top), cv::Point(right, bottom), color, 3);

// Draw the label on top of the bounding box
int baseLine;
cv::Size labelSize = getTextSize(label, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
cv::Size labelSize = getTextSize(label,
cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
int textBottom = std::max(top, labelSize.height);
putText(frame, label, cv::Point(left, textBottom), cv::FONT_HERSHEY_SIMPLEX, 0.75,
putText(frame, label, cv::Point(left, textBottom),
cv::FONT_HERSHEY_SIMPLEX, 0.75,
cv::Scalar(0, 0, 0), 1);
}

10 changes: 1 addition & 9 deletions app/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,16 +10,8 @@
* @copyright Copyright (c) 2023
*
*/


#include "../include/robot.hpp"


int main() {

Robot robot;
robot.humanDetection();

return 0;

}
return 0; }
28 changes: 11 additions & 17 deletions app/robot.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,26 +15,20 @@
#include "../include/camera.hpp"
#include "../include/detector.hpp"
#include "../include/tracker.hpp"


void Robot::humanDetection()
{
float data[16] = {0, 0, -1, 0, 1, 0, 0, 0.5, 0, -1, 0, 0.5, 0, 0, 0, 1};
void Robot::humanDetection() {
// float data[16] = {0, 0, -1, 0, 1, 0, 0, 0.5, 0, -1, 0, 0.5, 0, 0, 0, 1};
cv::Mat emptyFrame;
cv::VideoWriter videoWriter("./human1.mp4", cv::VideoWriter::fourcc('a', 'v', 'c', '1'), 8, cv::Size(640, 480), true);
std::vector<Detector::bbox> bboxs;

cv::VideoWriter videoWriter("./human1.mp4",
cv::VideoWriter::fourcc('a', 'v', 'c', '1'),
8, cv::Size(640, 480), true);
Detector detector;
detector.load_model(detector.model_Cfg,detector.model_Wts,detector.c_path);
std::pair<cv::Mat, std::vector<Detector::bbox>> result=detector.detector(emptyFrame);
detector.load_model(detector.model_Cfg,
detector.model_Wts, detector.c_path);
std::pair<cv::Mat , std::vector<Detector::bbox>>
result = detector.detector(emptyFrame);
cv::Mat frame = result.first;
print(frame);
// videoWriter.write(frame);

std::vector<Detector::bbox> bbox = result.second;
Tracker tracker;
tracker.getPredictions(frame,bbox);
tracker.boundingBox(frame,bbox);

// return 0;
}
tracker.getPredictions(frame, bbox);
tracker.boundingBox(frame, bbox); }
30 changes: 19 additions & 11 deletions app/tracker.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,16 @@
* @param cv_frame
* @param get_boxes
*/
void Tracker::getPredictions(cv::Mat& cv_frame, std::vector<Detector::bbox> get_boxes) {
void Tracker::getPredictions(cv::Mat& cv_frame,
std::vector<Detector::bbox> get_boxes) {
cv::Ptr<cv::MultiTracker> trackers = cv::MultiTracker::create();

for (Detector::bbox get_box : get_boxes) {
trackers->add(cv::TrackerCSRT::create(), cv_frame, cv::Rect2d(std::get<2>(get_box)));
trackers->add(cv::TrackerCSRT::create(),
cv_frame, cv::Rect2d(std::get<2>(get_box)));
cv::RNG rng(0);
colors.push_back(cv::Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)));
colors.push_back(cv::Scalar(rng.uniform(0, 255),
rng.uniform(0, 255), rng.uniform(0, 255)));
}
}

Expand All @@ -48,16 +51,19 @@ std::vector<float> Tracker::getCoordinates() {
* @param get_boxes
* @return std::vector<cv::Rect>
*/
std::vector<cv::Rect_<double>> Tracker::boundingBox(const cv::Mat& cv_frame,
const std::vector<Detector::bbox> get_boxes) {
std::vector<cv::Rect_<double>>
Tracker::boundingBox(const cv::Mat& cv_frame,
const std::vector<Detector::bbox> &get_boxes) {
std::vector<cv::Rect_<double>> trackedRegions;


for (unsigned i = 0; i < get_boxes.size(); i++) {
std::cout << "ID: " << std::get<0>(get_boxes[i]) << " Confidence: " << std::get<1>(get_boxes[i]) << std::endl;
std::cout << "ID: " << std::get<0>(get_boxes[i]) <<
" Confidence: " << std::get<1>(get_boxes[i]) << std::endl;
cv::rectangle(cv_frame, std::get<2>(get_boxes[i]), colors[i], 2, 1);
std::string label = "Person " + std::to_string(i + 1);
cv::putText(cv_frame, label, cv::Point(std::get<2>(get_boxes[i]).x, std::get<2>(get_boxes[i]).y),
cv::putText(cv_frame, label,
cv::Point(std::get<2>(get_boxes[i]).x, std::get<2>(get_boxes[i]).y),
cv::FONT_HERSHEY_SIMPLEX, 0.75, colors[i], 2);

// Add the tracked region to the output vector
Expand All @@ -73,13 +79,15 @@ std::vector<cv::Rect_<double>> Tracker::boundingBox(const cv::Mat& cv_frame,
* @param cv_frame
* @return std::vector<cv::Rect>
*/

std::vector<Detector::bbox> Tracker::humanTrack(const cv::Mat& cv_frame,std::vector<cv::Rect_<double>> regions) {
std::vector<Detector::bbox>
Tracker::humanTrack(const cv::Mat& cv_frame,
std::vector<cv::Rect_<double>> regions) {
// std::vector<cv::Rect> boundingBox=boundingBox()
trackers->update(cv_frame,regions);
trackers->update(cv_frame, regions);
std::vector<Detector::bbox> updt;
std::vector<cv::Rect2d> objects_list = trackers->getObjects();
std::for_each(objects_list.begin(),objects_list.end(),[&updt](cv::Rect2d tracked_object){
std::for_each(objects_list.begin(), objects_list.end(),
[&updt](cv::Rect2d tracked_object){
updt.push_back({0, 100.0, cv::Rect(tracked_object)});
});
return updt;
Expand Down
Loading

0 comments on commit ddcfc2b

Please sign in to comment.