點擊上方藍字關注我哦~
01
前言
在前面兩篇關于火焰檢測的文章中,最終的效果不是很好,為了提高火焰檢測的效果,又搜集了一些火焰數(shù)據(jù),訓練的網(wǎng)絡由之前的yolov3-tiny改為mobilev2-yolov3,最終在樹莓派上利用NCNN推算框架,比之前的效果要好很多,如圖:
02
實現(xiàn)步驟
在darknet下訓練
訓練的cfg和model文件如果需要聯(lián)系筆者。
在樹莓派上部署NCNN
官方提供了在樹莓派上的編譯說明,按照這個說明是可以編譯起來的。這里可以參考這篇文章來安裝依賴:
sudo apt-get install git cmakesudo apt-get install -y gfortransudo apt-get install -y libprotobuf-dev libleveldb-dev libsnappy-dev libopencv-dev libhdf5-serial-dev protobuf-compilersudo apt-get install --no-install-recommends libboost-all-devsudo apt-get install -y libgflags-dev libgoogle-glog-dev liblmdb-dev libatlas-base-dev
然后下載NCNN:
git clone https://github.com/Tencent/ncnn.gitcd ncnn
編輯CMakeList.txt文件,添加examples和benchmark:
add_subdirectory(examples)add_subdirectory(benchmark)add_subdirectory(tools)
然后就可以按照官方文檔進行編譯了,官方提供的pi3 toolchain在4代Raspbian上可以直接使用,最新版的NCNN會自動使用OpenMP:
cd <ncnn-root-dir>mkdir -p buildcd build=../toolchains/pi3.toolchain.cmake -DPI3=ON ..make -j4
模型轉換
cd <ncnn-root-dir>cd buildcd tools/darknet./darknet2ncnn mobilenetV2-yolov3.cfg mobilenetV2-yolov3.weights mobilenetV2-yolov3.param mobilenetV2-yolov3.bin 1
cd <ncnn-root-dir>cd buildcd example./mobilenetV2-yolov3
部分代碼
struct Object{cv::Rect_<float> rect;int label;float prob;};double what_time_is_it_now(){struct timeval time;if (gettimeofday(&time,NULL)){return 0;}return (double)time.tv_sec + (double)time.tv_usec * .000001;}ncnn::Net MobileNetV2-yolov3;static int detect_MobileNetV2-yolov3(const cv::Mat& bgr, std::vector<Object>& objects){double time;MobileNetV2-yolov3.opt.use_vulkan_compute = true;const int target_size = 320;time = what_time_is_it_now();int img_w = bgr.cols;int img_h = bgr.rows;//PIXEL_BGRncnn::Mat in = ncnn::Mat::from_pixels_resize(bgr.data, ncnn::Mat::PIXEL_BGR2RGB, bgr.cols, bgr.rows, target_size, target_size);const float mean_vals[3] = {0, 0, 0};const float norm_vals[3] = {1 / 255.f, 1 / 255.f, 1 / 255.f};in.substract_mean_normalize(mean_vals, norm_vals);ncnn::Extractor ex = MobileNetV2-yolov3.create_extractor();ex.set_num_threads(4);ex.input("data", in);ncnn::Mat out;ex.extract("output", out);printf("Predicted in %f seconds.11\n", what_time_is_it_now()-time);printf("%d %d %d\n", out.w, out.h, out.c);objects.clear();for (int i = 0; i < out.h; i++){const float* values = out.row(i);Object object;object.label = values[0];object.prob = values[1];object.rect.x = values[2] * img_w;object.rect.y = values[3] * img_h;object.rect.width = values[4] * img_w - object.rect.x;object.rect.height = values[5] * img_h - object.rect.y;objects.push_back(object);}return 0;}void draw_objects(cv::Mat& image, const std::vector<Object>& objects){static const char* class_names[] = {"background", "fire"};//cv::Mat image = bgr.clone();for (size_t i = 0; i < objects.size(); i++){const Object& obj = objects[i];fprintf(stderr, "%d = %.5f at %.2f %.2f %.2f x %.2f\n", obj.label, obj.prob,obj.rect.x, obj.rect.y, obj.rect.width, obj.rect.height);cv::rectangle(image, obj.rect, cv::Scalar(255, 0, 0));char text[256];sprintf(text, "%s %.1f%%", class_names[obj.label], obj.prob * 100);int baseLine = 0;cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);int x = obj.rect.x;int y = obj.rect.y - label_size.height - baseLine;if (y < 0)y = 0;if (x + label_size.width > image.cols)x = image.cols - label_size.width;cv::rectangle(image, cv::Rect(cv::Point(x, y), cv::Size(label_size.width, label_size.height + baseLine)),cv::Scalar(255, 255, 255), -1);cv::putText(image, text, cv::Point(x, y + label_size.height),cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));}}int main(int argc, char** argv){MobileNetV2-yolov3.load_param("MobileNetV2-YOLOv3-Lite.param");MobileNetV2-yolov3.load_model("MobileNetV2-YOLOv3-Lite.bin");cv::VideoCapture cap(0);if(!cap.isOpened()){printf("capture err");return -1;}cv::Mat cv_img;std::vector<Object> objects;while(true){if(!cap.read(cv_img)){printf("cv_img err");return -1;}detect_MobileNetV2-yolov3(cv_img, objects);draw_objects(cv_img, objects);cv::imshow("video", cv_img);cv::waitKey(1);}cap.release();return 0;}
/ The End /
目前測試效果還比較滿意,但是每幀處理的時間需要0.3s左右,還不能實時,接下來的目標是達到實時檢測,并嘗試別推理框架,比如MNN和TNN。
公眾號后臺回復 “ 火焰數(shù)據(jù) ” 獲取火焰數(shù)據(jù)集
推薦閱讀
樹莓派系列(一):基于openCV+python的顏色識別(紅色)
圖像變換:opencv基于樹莓派和Android端分別實現(xiàn)
掃碼關注我們
看更多嵌入式案例
喜歡本篇內(nèi)容請給我們點個在看
免責聲明:本文內(nèi)容由21ic獲得授權后發(fā)布,版權歸原作者所有,本平臺僅提供信息存儲服務。文章僅代表作者個人觀點,不代表本平臺立場,如有問題,請聯(lián)系我們,謝謝!





