雷

  • 首页

  • 关于

  • 标签

  • 分类

  • 归档

  • 站点地图

NCNN环境搭建,Opencv与ncnn开发环境配置模板

发表于 2020-01-08 分类于 ncnn , opencv

NCNN环境搭建

获取ncnn

https://github.com/Tencent/ncnn

编译ncnn

1
2
3
4
5
6
cd ncnn
mkdir -p build
cd build
cmake ..
make -j4
make install

安装onnx-simplifier

1
pip install onnx-simplifier

使用

1
2
3
4
5
6
7
8
9
10

# onnx简化模型
python -m onnxsim model.onnx model-sim.onnx

# onnx转换成ncnn
# 进入build/tools/onnx,如果编译成功会有onnx2ncnn转换工具
./onnx2ncnn model-sim.onnx model.param model.bin

# caff转换成ncnn
./caffe2ncnn mobilnet_yolo_deploy.prototxt mobilenet_yolo_deploy_iter_80000.caffemodel mobilenet_yolo.param mobilenet_yolo.bin

Opencv与ncnn开发环境配置模板

opencv编译详细步骤:

https://www.cnblogs.com/raina/p/11365854.html

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57

sudo apt-get install build-essential
sudo apt-get install cmake git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev
sudo apt-get install python-dev python-numpy libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libjasper-dev libdc1394-22-dev # 处理图像所需的包
sudo apt-get install libavcodec-dev libavformat-dev libswscale-dev libv4l-dev liblapacke-dev
sudo apt-get install libxvidcore-dev libx264-dev # 处理视频所需的包
sudo apt-get install libatlas-base-dev gfortran # 优化opencv功能
sudo apt-get install ffmpeg

## 出现下面错误
errorE: unable to locate libjasper-dev

sudo apt-get install software-properties-common
sudo add-apt-repository "deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu-ports/ xenial main multiverse restricted universe"
sudo apt update
sudo apt install libjasper1 libjasper-dev


cd /xxxxx/xxxxx/opencv-4.1.1
sudo mkdir build
cd build

sudo cmake -D CMAKE_BUILD_TYPE=RELEASE \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D OPENCV_EXTRA_MODULES_PATH=/home/[xxxxxxx]/opencv_contrib-4.1.1/modules \
-D WITH_TBB=ON \
-D WITH_V4L=ON \
-D WITH_GTK=ON \
-D WITH_OPENGL=ON \
-D WITH_VTK=ON \
-D OPENCV_GENERATE_PKGCONFIG=YES ..

'''
sudo cmake -D CMAKE_BUILD_TYPE=RELEASE \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D OPENCV_EXTRA_MODULES_PATH=/home/[xxxxxxx]/opencv_contrib-4.1.1/modules \
-D CMAKE_PREFIX_PATH=/home/[xxxxxxx]/Qt5.13.2/5.13.2/gcc_64/lib/cmake/Qt5 \
-D WITH_TBB=ON \
-D WITH_V4L=ON \
-D WITH_QT=ON \
-D WITH_GTK=ON \
-D WITH_OPENGL=ON \
-D WITH_VTK=ON \
-D OPENCV_GENERATE_PKGCONFIG=YES ..
'''

sudo apt-get install cmake-qt-gui
进入opencv的build
sudo cmake-gui ..
解决下载boostdesc_bgm.i之类的问题

sudo make -j4

sudo make install

sudo sh -c 'echo "/usr/local/lib" > /etc/ld.so.conf.d/opencv.conf'
sudo ldconfig

/opt/Qt5.13.0/5.13.0/gcc_64/lib/cmake/Qt5是我Qt的Qt5Config.cmake所在路径, 需要改成你自己的, 如果不需要opencv支持Qt用户界面, 可以把-D WITH_QT=ON \和-D CMAKE_PREFIX_PATH=/opt/Qt5.13.0/5.13.0/gcc_64/lib/cmake/Qt5 \两行删掉.
另外, 不指定”Qt5Config.cmake”所在路径, 在cmake编译的时候可能会报如下错误:

CMake Error at cmake/OpenCVFindLibsGUI.cmake:18 (find_package):
Could not find a package configuration file provided by “Qt5” with any of
the following names:

Qt5Config.cmake
qt5-config.cmake
Add the installation prefix of “Qt5” to CMAKE_PREFIX_PATH or set “Qt5_DIR”
to a directory containing one of the above files. If “Qt5” provides a
separate development package or SDK, be sure it has been installed.

配置OpenCV环境
sudo gedit /etc/ld.so.conf.d/opencv.conf
在文件最后添加

/usr/local/lib
生效配置:

sudo ldconfig

CMakeLists.txt

# 最低版本要求
cmake_minimum_required(VERSION 3.4.1)

project(ncnnOpencv)

# 设置C++编译版本
set(CMAKE_CXX_STANDARD 11)

# ncnn项目所在路径,需要替换
set(NCNN_DIR /home/ray/ncnn)

# 分别设置ncnn的链接库和头文件
set(NCNN_LIBS ${NCNN_DIR}/build/install/lib/libncnn.a)
set(NCNN_INCLUDE_DIRS ${NCNN_DIR}/build/install/include/ncnn)

# 配置OpenCV
find_package(OpenCV REQUIRED)
include_directories(${OpenCV_INCLUDE_DIRS})

include_directories(${NCNN_INCLUDE_DIRS})

# 配置OpenMP
FIND_PACKAGE( OpenMP REQUIRED)  
if(OPENMP_FOUND)  
    message("OPENMP FOUND")  
    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")  
    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")  
    set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OpenMP_EXE_LINKER_FLAGS}")  
endif()  

# 建立链接依赖
add_executable(ncnnOpencv Main.cpp)
target_link_libraries(ncnnOpencv ${NCNN_LIBS})
target_link_libraries(ncnnOpencv ${OpenCV_LIBS})

Main.cpp

#include <iostream>
#include <fstream>
#include <algorithm>
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/videoio.hpp>

#include <stdio.h>
#include <vector>

#include "platform.h"
#include "net.h"
#if NCNN_VULKAN
#include "gpu.h"
#endif // NCNN_VULKAN

using namespace std;
using namespace cv;

struct Object
{
    cv::Rect_<float> rect;
    int label;
    float prob;
};

static int detect_yolov3(const cv::Mat& bgr, std::vector<Object>& objects)
{
    ncnn::Net yolov3;

#if NCNN_VULKAN
    yolov3.opt.use_vulkan_compute = true;
#endif // NCNN_VULKAN

    // original pretrained model from https://github.com/eric612/MobileNet-YOLO
    // param : https://drive.google.com/open?id=1V9oKHP6G6XvXZqhZbzNKL6FI_clRWdC-
    // bin : https://drive.google.com/open?id=1DBcuFCr-856z3FRQznWL_S5h-Aj3RawA
    // the ncnn model https://github.com/nihui/ncnn-assets/tree/master/models
    yolov3.load_param("mobilenet_yolo.param");
    yolov3.load_model("mobilenet_yolo.bin");

    const int target_size = 352;

    int img_w = bgr.cols;
    int img_h = bgr.rows;

    ncnn::Mat in = ncnn::Mat::from_pixels_resize(bgr.data, ncnn::Mat::PIXEL_BGR, bgr.cols, bgr.rows, target_size, target_size);

    const float mean_vals[3] = {127.5f, 127.5f, 127.5f};
    const float norm_vals[3] = {0.007843f, 0.007843f, 0.007843f};
    in.substract_mean_normalize(mean_vals, norm_vals);

    ncnn::Extractor ex = yolov3.create_extractor();
    ex.set_num_threads(4);

    ex.input("data", in);

    ncnn::Mat out;
    ex.extract("detection_out", out);

//     printf("%d %d %d\n", out.w, out.h, out.c);
    objects.clear();
    for (int i=0; i<out.h; i++)
    {
        const float* values = out.row(i);

        Object object;
        object.label = values[0];
        object.prob = values[1];
        object.rect.x = values[2] * img_w;
        object.rect.y = values[3] * img_h;
        object.rect.width = values[4] * img_w - object.rect.x;
        object.rect.height = values[5] * img_h - object.rect.y;

        objects.push_back(object);
    }

    return 0;
}

static void draw_objects(const cv::Mat& bgr, const std::vector<Object>& objects)
{
    static const char* class_names[] = {"background",
        "aeroplane", "bicycle", "bird", "boat",
        "bottle", "bus", "car", "cat", "chair",
        "cow", "diningtable", "dog", "horse",
        "motorbike", "person", "pottedplant",
        "sheep", "sofa", "train", "tvmonitor"};

    cv::Mat image = bgr.clone();

    for (size_t i = 0; i < objects.size(); i++)
    {
        const Object& obj = objects[i];

        fprintf(stderr, "%d = %.5f at %.2f %.2f %.2f x %.2f\n", obj.label, obj.prob,
                obj.rect.x, obj.rect.y, obj.rect.width, obj.rect.height);

        cv::rectangle(image, obj.rect, cv::Scalar(255, 0, 0));

        char text[256];
        sprintf(text, "%s %.1f%%", class_names[obj.label], obj.prob * 100);

        int baseLine = 0;
        cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);

        int x = obj.rect.x;
        int y = obj.rect.y - label_size.height - baseLine;
        if (y < 0)
            y = 0;
        if (x + label_size.width > image.cols)
            x = image.cols - label_size.width;

        cv::rectangle(image, cv::Rect(cv::Point(x, y),
                                    cv::Size(label_size.width, label_size.height + baseLine)),
                    cv::Scalar(255, 255, 255), -1);

        cv::putText(image, text, cv::Point(x, y + label_size.height),
                    cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));
    }

    cv::imshow("image", image);
    // cv::waitKey(0);
}

void drawText(Mat & image)
{
    putText(image, "Hello OpenCV",
            Point(20, 50),
            FONT_HERSHEY_COMPLEX, 1, // font face and scale
            Scalar(255, 255, 255), // white
            1, LINE_AA); // line thickness and type
}

int main()
{

    Mat image;
    VideoCapture capture;
    capture.open(0);
    if(capture.isOpened())
    {
        cout << "Capture is opened" << endl;
        for(;;)
        {
            capture >> image;
            if(image.empty())
                break;
            // drawText(image);

            std::vector<Object> objects;
            detect_yolov3(image, objects);

            draw_objects(image, objects);

            // imshow("Sample", image);
            if(waitKey(10) >= 0)
                break;
        }
    }
    else
    {
        cout << "No capture" << endl;
        image = Mat::zeros(480, 640, CV_8UC1);
        drawText(image);
        imshow("Sample", image);
        waitKey(0);
    }

    return 0;
}

执行cmake和make:

mkdir build
cd build
cmake ..
make

c_cpp_properties.json

{
    "configurations": [
        {
            "name": "Linux",
            "includePath": [
                "${workspaceFolder}/**",
                "/usr/local/include/opencv4",
                "/home/ray/ncnn/build/install/include/ncnn"
            ],
            "defines": [],
            "compilerPath": "/usr/bin/gcc",
            "cStandard": "c11",
            "cppStandard": "c++17",
            "intelliSenseMode": "clang-x64"
        }
    ],
    "version": 4
}

完整模板代码:

https://github.com/radiumray/ncnnCVCompil

# opencv # ncnn
jetson nano 装机配置说明
qt,NCNN,Darknet,多线程
  • 文章目录
  • 站点概览
ray

ray

17 日志
23 分类
18 标签
RSS
  1. 1. NCNN环境搭建
    1. 1.1. 获取ncnn
    2. 1.2. 编译ncnn
    3. 1.3. 安装onnx-simplifier
    4. 1.4. 使用
  2. 2. Opencv与ncnn开发环境配置模板
    1. 2.1. opencv编译详细步骤:
    2. 2.2. CMakeLists.txt
    3. 2.3. Main.cpp
    4. 2.4. 执行cmake和make:
    5. 2.5. c_cpp_properties.json
  3. 3. 完整模板代码:
0%
© 2020 ray
由 Hexo 强力驱动 v3.9.0
|
主题 – NexT.Pisces v7.3.0