Siehe Github MakeMagazinDE
Version für OpenCV (weil OpenCV keine Blöcke lesen kann?)
/*********
Rui Santos
Complete project details at https://RandomNerdTutorials.com/esp32-cam-video-streaming-web-server-camera-home-assistant/
IMPORTANT!!!
- Select Board "AI Thinker ESP32-CAM"
- GPIO 0 must be connected to GND to upload a sketch
- After connecting GPIO 0 to GND, press the ESP32-CAM on-board RESET button to put your board in flashing mode
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files.
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
*********/
#include "esp_camera.h"
#include <WiFi.h>
#include "esp_timer.h"
#include "img_converters.h"
#include "Arduino.h"
#include "fb_gfx.h"
#include "soc/soc.h" //disable brownout problems
#include "soc/rtc_cntl_reg.h" //disable brownout problems
#include "esp_http_server.h"
//Replace with your network credentials
const char* ssid = "maketest";
const char* password = "maketest";
#define PART_BOUNDARY "123456789000000000000987654321"
// This project was tested with the AI Thinker Model, M5STACK PSRAM Model and M5STACK WITHOUT PSRAM
#define CAMERA_MODEL_AI_THINKER
//#define CAMERA_MODEL_M5STACK_PSRAM
//#define CAMERA_MODEL_M5STACK_WITHOUT_PSRAM
// Not tested with this model
//#define CAMERA_MODEL_WROVER_KIT
#if defined(CAMERA_MODEL_WROVER_KIT)
#define PWDN_GPIO_NUM -1
#define RESET_GPIO_NUM -1
#define XCLK_GPIO_NUM 21
#define SIOD_GPIO_NUM 26
#define SIOC_GPIO_NUM 27
#define Y9_GPIO_NUM 35
#define Y8_GPIO_NUM 34
#define Y7_GPIO_NUM 39
#define Y6_GPIO_NUM 36
#define Y5_GPIO_NUM 19
#define Y4_GPIO_NUM 18
#define Y3_GPIO_NUM 5
#define Y2_GPIO_NUM 4
#define VSYNC_GPIO_NUM 25
#define HREF_GPIO_NUM 23
#define PCLK_GPIO_NUM 22
#elif defined(CAMERA_MODEL_M5STACK_PSRAM)
#define PWDN_GPIO_NUM -1
#define RESET_GPIO_NUM 15
#define XCLK_GPIO_NUM 27
#define SIOD_GPIO_NUM 25
#define SIOC_GPIO_NUM 23
#define Y9_GPIO_NUM 19
#define Y8_GPIO_NUM 36
#define Y7_GPIO_NUM 18
#define Y6_GPIO_NUM 39
#define Y5_GPIO_NUM 5
#define Y4_GPIO_NUM 34
#define Y3_GPIO_NUM 35
#define Y2_GPIO_NUM 32
#define VSYNC_GPIO_NUM 22
#define HREF_GPIO_NUM 26
#define PCLK_GPIO_NUM 21
#elif defined(CAMERA_MODEL_M5STACK_WITHOUT_PSRAM)
#define PWDN_GPIO_NUM -1
#define RESET_GPIO_NUM 15
#define XCLK_GPIO_NUM 27
#define SIOD_GPIO_NUM 25
#define SIOC_GPIO_NUM 23
#define Y9_GPIO_NUM 19
#define Y8_GPIO_NUM 36
#define Y7_GPIO_NUM 18
#define Y6_GPIO_NUM 39
#define Y5_GPIO_NUM 5
#define Y4_GPIO_NUM 34
#define Y3_GPIO_NUM 35
#define Y2_GPIO_NUM 17
#define VSYNC_GPIO_NUM 22
#define HREF_GPIO_NUM 26
#define PCLK_GPIO_NUM 21
#elif defined(CAMERA_MODEL_AI_THINKER)
#define PWDN_GPIO_NUM 32
#define RESET_GPIO_NUM -1
#define XCLK_GPIO_NUM 0
#define SIOD_GPIO_NUM 26
#define SIOC_GPIO_NUM 27
#define Y9_GPIO_NUM 35
#define Y8_GPIO_NUM 34
#define Y7_GPIO_NUM 39
#define Y6_GPIO_NUM 36
#define Y5_GPIO_NUM 21
#define Y4_GPIO_NUM 19
#define Y3_GPIO_NUM 18
#define Y2_GPIO_NUM 5
#define VSYNC_GPIO_NUM 25
#define HREF_GPIO_NUM 23
#define PCLK_GPIO_NUM 22
#else
#error "Camera model not selected"
#endif
static const char* _STREAM_CONTENT_TYPE = "multipart/x-mixed-replace;boundary=" PART_BOUNDARY;
static const char* _STREAM_BOUNDARY = "\r\n--" PART_BOUNDARY "\r\n";
static const char* _STREAM_PART = "Content-Type: image/jpeg\r\nContent-Length: %u\r\n\r\n";
httpd_handle_t stream_httpd = NULL;
static esp_err_t stream_handler(httpd_req_t *req){
camera_fb_t * fb = NULL;
esp_err_t res = ESP_OK;
size_t _jpg_buf_len = 0;
uint8_t * _jpg_buf = NULL;
char * part_buf[64];
res = httpd_resp_set_type(req, _STREAM_CONTENT_TYPE);
if(res != ESP_OK){
return res;
}
while(true){
fb = esp_camera_fb_get();
if (!fb) {
Serial.println("Camera capture failed");
res = ESP_FAIL;
} else {
if(fb->width > 400){
if(fb->format != PIXFORMAT_JPEG){
bool jpeg_converted = frame2jpg(fb, 80, &_jpg_buf, &_jpg_buf_len);
esp_camera_fb_return(fb);
fb = NULL;
if(!jpeg_converted){
Serial.println("JPEG compression failed");
res = ESP_FAIL;
}
} else {
_jpg_buf_len = fb->len;
_jpg_buf = fb->buf;
}
}
}
if(res == ESP_OK){
size_t hlen = snprintf((char *)part_buf, 64, _STREAM_PART, _jpg_buf_len);
res = httpd_resp_send_chunk(req, (const char *)part_buf, hlen);
}
if(res == ESP_OK){
res = httpd_resp_send_chunk(req, (const char *)_jpg_buf, _jpg_buf_len);
}
if(res == ESP_OK){
res = httpd_resp_send_chunk(req, _STREAM_BOUNDARY, strlen(_STREAM_BOUNDARY));
}
if(fb){
esp_camera_fb_return(fb);
fb = NULL;
_jpg_buf = NULL;
} else if(_jpg_buf){
free(_jpg_buf);
_jpg_buf = NULL;
}
if(res != ESP_OK){
break;
}
//Serial.printf("MJPG: %uB\n",(uint32_t)(_jpg_buf_len));
}
return res;
}
void startCameraServer(){
httpd_config_t config = HTTPD_DEFAULT_CONFIG();
config.server_port = 80;
httpd_uri_t index_uri = {
.uri = "/",
.method = HTTP_GET,
.handler = stream_handler,
.user_ctx = NULL
};
//Serial.printf("Starting web server on port: '%d'\n", config.server_port);
if (httpd_start(&stream_httpd, &config) == ESP_OK) {
httpd_register_uri_handler(stream_httpd, &index_uri);
}
}
void setup() {
WRITE_PERI_REG(RTC_CNTL_BROWN_OUT_REG, 0); //disable brownout detector
Serial.begin(115200);
Serial.setDebugOutput(false);
camera_config_t config;
config.ledc_channel = LEDC_CHANNEL_0;
config.ledc_timer = LEDC_TIMER_0;
config.pin_d0 = Y2_GPIO_NUM;
config.pin_d1 = Y3_GPIO_NUM;
config.pin_d2 = Y4_GPIO_NUM;
config.pin_d3 = Y5_GPIO_NUM;
config.pin_d4 = Y6_GPIO_NUM;
config.pin_d5 = Y7_GPIO_NUM;
config.pin_d6 = Y8_GPIO_NUM;
config.pin_d7 = Y9_GPIO_NUM;
config.pin_xclk = XCLK_GPIO_NUM;
config.pin_pclk = PCLK_GPIO_NUM;
config.pin_vsync = VSYNC_GPIO_NUM;
config.pin_href = HREF_GPIO_NUM;
config.pin_sscb_sda = SIOD_GPIO_NUM;
config.pin_sscb_scl = SIOC_GPIO_NUM;
config.pin_pwdn = PWDN_GPIO_NUM;
config.pin_reset = RESET_GPIO_NUM;
config.xclk_freq_hz = 20000000;
config.pixel_format = PIXFORMAT_JPEG;
if(psramFound()){
config.frame_size = FRAMESIZE_UXGA;
config.jpeg_quality = 10;
config.fb_count = 2;
} else {
config.frame_size = FRAMESIZE_SVGA;
config.jpeg_quality = 12;
config.fb_count = 1;
}
// Camera init
esp_err_t err = esp_camera_init(&config);
if (err != ESP_OK) {
Serial.printf("Camera init failed with error 0x%x", err);
return;
}
// Wi-Fi connection
WiFi.begin(ssid, password);
while (WiFi.status() != WL_CONNECTED) {
delay(500);
Serial.print(".");
}
Serial.println("");
Serial.println("WiFi connected");
Serial.print("Camera Stream Ready! Go to: http://");
Serial.print(WiFi.localIP());
// Start streaming web server
startCameraServer();
}
void loop() {
delay(1);
}
Version für Browser
#include <esp32cam.h>
#include <WebServer.h>
#include <WiFi.h>
const char* WIFI_SSID = "my-ssid";
const char* WIFI_PASS = "my-pass";
WebServer server(80);
static auto loRes = esp32cam::Resolution::find(320, 240);
static auto hiRes = esp32cam::Resolution::find(800, 600);
void handleBmp()
{
if (!esp32cam::Camera.changeResolution(loRes)) {
Serial.println("SET-LO-RES FAIL");
}
auto frame = esp32cam::capture();
if (frame == nullptr) {
Serial.println("CAPTURE FAIL");
server.send(503, "", "");
return;
}
Serial.printf("CAPTURE OK %dx%d %db\n", frame->getWidth(), frame->getHeight(),
static_cast<int>(frame->size()));
if (!frame->toBmp()) {
Serial.println("CONVERT FAIL");
server.send(503, "", "");
return;
}
Serial.printf("CONVERT OK %dx%d %db\n", frame->getWidth(), frame->getHeight(),
static_cast<int>(frame->size()));
server.setContentLength(frame->size());
server.send(200, "image/bmp");
WiFiClient client = server.client();
frame->writeTo(client);
}
void serveJpg()
{
auto frame = esp32cam::capture();
if (frame == nullptr) {
Serial.println("CAPTURE FAIL");
server.send(503, "", "");
return;
}
Serial.printf("CAPTURE OK %dx%d %db\n", frame->getWidth(), frame->getHeight(),
static_cast<int>(frame->size()));
server.setContentLength(frame->size());
server.send(200, "image/jpeg");
WiFiClient client = server.client();
frame->writeTo(client);
}
void handleJpgLo()
{
if (!esp32cam::Camera.changeResolution(loRes)) {
Serial.println("SET-LO-RES FAIL");
}
serveJpg();
}
void handleJpgHi()
{
if (!esp32cam::Camera.changeResolution(hiRes)) {
Serial.println("SET-HI-RES FAIL");
}
serveJpg();
}
void handleJpg()
{
server.sendHeader("Location", "/cam-hi.jpg");
server.send(302, "", "");
}
void handleMjpeg()
{
if (!esp32cam::Camera.changeResolution(hiRes)) {
Serial.println("SET-HI-RES FAIL");
}
Serial.println("STREAM BEGIN");
WiFiClient client = server.client();
auto startTime = millis();
int res = esp32cam::Camera.streamMjpeg(client);
if (res <= 0) {
Serial.printf("STREAM ERROR %d\n", res);
return;
}
auto duration = millis() - startTime;
Serial.printf("STREAM END %dfrm %0.2ffps\n", res, 1000.0 * res / duration);
}
void setup()
{
Serial.begin(115200);
Serial.println();
{
using namespace esp32cam;
Config cfg;
cfg.setPins(pins::AiThinker);
cfg.setResolution(hiRes);
cfg.setBufferCount(2);
cfg.setJpeg(80);
bool ok = Camera.begin(cfg);
Serial.println(ok ? "CAMERA OK" : "CAMERA FAIL");
}
WiFi.persistent(false);
WiFi.mode(WIFI_STA);
WiFi.begin(WIFI_SSID, WIFI_PASS);
while (WiFi.status() != WL_CONNECTED) {
delay(500);
}
Serial.print("http://");
Serial.println(WiFi.localIP());
Serial.println(" /cam.bmp");
Serial.println(" /cam-lo.jpg");
Serial.println(" /cam-hi.jpg");
Serial.println(" /cam.mjpeg");
server.on("/cam.bmp", handleBmp);
server.on("/cam-lo.jpg", handleJpgLo);
server.on("/cam-hi.jpg", handleJpgHi);
server.on("/cam.jpg", handleJpg);
server.on("/cam.mjpeg", handleMjpeg);
server.begin();
}
void loop()
{
server.handleClient();
}
Dazu folgender OpenCV-Code
import urllib
import cv2
import numpy as np
url='http://192.168.8.100/cam-hi.jpg'
while True:
imgResp=urllib.request.urlopen(url)
imgNp=np.array(bytearray(imgResp.read()),dtype=np.uint8)
img=cv2.imdecode(imgNp,-1)
# all the opencv processing is done here
cv2.imshow('test',img)
if ord('q')==cv2.waitKey(10):
exit(0)
Funktioniert nur mit Python3 und nicht mit Python2!
catkin_ws einrichten bashrc um source “/root/catkin_ws/devel/setup.bash” erweitern
cd src git clone https://github.com/dabmake/esp32ros.git apt-get install ros-melodic-image-transport
sudo apt-get install libcurl4-openssl-dev libcurlpp-dev
Code:
#include "curl/curl.h" // has to go before opencv headers
#include <stdio.h>
#include <stdlib.h>
#include <ros/ros.h>
#include <image_transport/image_transport.h>
#include <opencv2/highgui/highgui.hpp>
#include <cv_bridge/cv_bridge.h>
#include <iostream>
#include <vector>
#include <camera_info_manager/camera_info_manager.h>
using namespace std;
#include <opencv2/opencv.hpp>
size_t write_data(char *ptr, size_t size, size_t nmemb, void *userdata)
{
vector<uchar> *stream = (vector<uchar>*)userdata;
size_t count = size * nmemb;
stream->insert(stream->end(), ptr, ptr + count);
return count;
}
int main(int argc , char** argv)
{
ros::init(argc, argv , "esp32cam");
ros::NodeHandle nh;
const std::string cname="esp32cam";
const std::string url="file://${ROS_HOME}/esp32cam.yaml";
ros::Publisher pubCameraInfo = nh.advertise<sensor_msgs::CameraInfo>("esp32cam/camera_info", 1);
camera_info_manager::CameraInfoManager cinfo(nh,cname,url);
sensor_msgs::CameraInfo msgCameraInfo;
/*http://ros-developer.com/2018/12/14/how-to-use-image_geometry-and-camera_info_manager-in-ros/
https://answers.ros.org/question/236976/how-to-publish-camera_info-manually/
https://answers.ros.org/question/33929/camera-calibration-parser-in-python/
*/
image_transport::ImageTransport it(nh);
image_transport::Publisher pub = it.advertise("esp32cam/image", 1);
sensor_msgs::ImagePtr msg;
ros::Rate loop_rate(5);
while (nh.ok()) {
vector<uchar> stream;
CURL *curl = curl_easy_init();
curl_easy_setopt(curl, CURLOPT_URL, "http://192.168.2.165/cam-hi.jpg"); //the img url
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_data); // pass the writefunction
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &stream); // pass the stream ptr to the writefunction
curl_easy_setopt(curl, CURLOPT_TIMEOUT, 10); // timeout if curl_easy hangs,
CURLcode res = curl_easy_perform(curl); // start curl
if (res != CURLE_OK) {
fprintf(stderr, "curl_easy_perform() failed: %s\n", curl_easy_strerror(res));
}
else {
cv::Mat image = cv::imdecode(stream, -1);
if (!image.empty()){
msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", image).toImageMsg();
pub.publish(msg);
}
}
curl_easy_cleanup(curl); // cleanup
msgCameraInfo=cinfo.getCameraInfo();
pubCameraInfo.publish(msgCameraInfo);
ros::spinOnce();
loop_rate.sleep();
}
}
Die Datei package.xml muss angepasst respektive erweitert werden:
<?xml version="1.0"?>
<package format="2">
<name>ros-esp32cam</name>
<version>0.0.0</version>
<description>The ros-esp32cam package</description>
<!-- One maintainer tag required, multiple allowed, one person per tag -->
<!-- Example: -->
<!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
<maintainer email="root@todo.todo">root</maintainer>
<!-- One license tag required, multiple allowed, one license per tag -->
<!-- Commonly used license strings: -->
<!-- BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
<license>TODO</license>
<buildtool_depend>catkin</buildtool_depend>
<build_depend>cv_bridge</build_depend>
<build_depend>rospy</build_depend>
<build_depend>roscpp</build_depend>
<build_depend>sensor_msgs</build_depend>
<build_depend>std_msgs</build_depend>
<build_depend>image_transport</build_depend>
<build_depend>camera_info_manager</build_depend>
<build_export_depend>cv_bridge</build_export_depend>
<build_export_depend>rospy</build_export_depend>
<build_export_depend>roscpp</build_export_depend>
<build_export_depend>sensor_msgs</build_export_depend>
<build_export_depend>std_msgs</build_export_depend>
<build_export_depend>image_transport</build_export_depend>
<build_export_depend>camera_info_manager</build_export_depend>
<exec_depend>cv_bridge</exec_depend>
<exec_depend>rospy</exec_depend>
<exec_depend>roscpp</exec_depend>
<exec_depend>sensor_msgs</exec_depend>
<exec_depend>std_msgs</exec_depend>
<exec_depend>image_transport</exec_depend>
<exec_depend>camera_info_manager</exec_depend>
<!-- The export tag contains other, unspecified, tags -->
<export>
<!-- Other tools can request additional information be placed here -->
</export>
</package>
Nicht zu vergessen den CMakefile (CMakefile.txt)
cmake_minimum_required(VERSION 2.8.3)
project(ros-esp32cam)
## Compile as C++11, supported in ROS Kinetic and newer
# add_compile_options(-std=c++11)
## Find catkin macros and libraries
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED COMPONENTS
roscpp
cv_bridge
rospy
sensor_msgs
std_msgs
image_transport
camera_info_manager
)
find_package(OpenCV REQUIRED)
###################################
## catkin specific configuration ##
###################################
## The catkin_package macro generates cmake config files for your package
## Declare things to be passed to dependent projects
## INCLUDE_DIRS: uncomment this if your package contains header files
## LIBRARIES: libraries you create in this project that dependent projects also need
## CATKIN_DEPENDS: catkin_packages dependent projects also need
## DEPENDS: system dependencies of this project that dependent projects also need
catkin_package(
#INCLUDE_DIRS include
LIBRARIES ros-esp32cam
CATKIN_DEPENDS cv_bridge roscpp rospy sensor_msgs std_msgs image_transport camera_info_manager
DEPENDS system_lib
)
###########
## Build ##
###########
## Specify additional locations of header files
## Your package locations should be listed before other locations
include_directories(
# include
${catkin_INCLUDE_DIRS}
${OpenCV_INCLUDE_DIRS}
)
## Declare a C++ library
# add_library(${PROJECT_NAME}
# src/${PROJECT_NAME}/ros-esp32cam.cpp
# )
## Add cmake target dependencies of the library
## as an example, code may need to be generated before libraries
## either from message generation or dynamic reconfigure
# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Declare a C++ executable
## With catkin_make all packages are built within a single CMake context
## The recommended prefix ensures that target names across packages don't collide
add_executable(${PROJECT_NAME}_node src/ros-esp32cam_node.cpp)
add_executable(espcam_node src/espcam-node.cpp)
add_executable(esp32cam_node src/esp32cam-node.cpp)
## Specify libraries to link a library or executable target against
target_link_libraries(${PROJECT_NAME}_node
${catkin_LIBRARIES}
${OpenCV_LIBS}
curl
)
target_link_libraries(espcam_node
${catkin_LIBRARIES}
${OpenCV_LIBS}
curl
)
target_link_libraries(esp32cam_node
${catkin_LIBRARIES}
${OpenCV_LIBS}
curl
)
mit dem Befehle catkin_make
im Verzeichnis catkin_ws übersetzt man alle drei Dateien
root@NAS:~/catkin_wscatkin_make
Base path: /root/catkin_ws
Source space: /root/catkin_ws/src
Build space: /root/catkin_ws/build
Devel space: /root/catkin_ws/devel
Install space: /root/catkin_ws/install
####
#### Running command: "make cmake_check_build_system" in "/root/catkin_ws/build"
####
####
#### Running command: "make -j4 -l4" in "/root/catkin_ws/build"
####
[ 33%] Built target espcam_node
[ 66%] Built target esp32cam_node
[100%] Built target ros-esp32cam_node
Die Struktur unter catkin_ws/src sieht folgendermaßen aus: Es gibt ein Paket esp32ros und ein Unterpaket ros-esp32cam für die Camera
root@NAS:~/catkin_ws/src# tree
.
├── CMakeLists.txt -> /opt/ros/melodic/share/catkin/cmake/toplevel.cmake
└── esp32ros
└── ros-esp32cam
├── CMakeLists.txt
├── package.xml
└── src
├── esp32cam-node.backup
├── esp32cam-node.cpp
├── esp32cam.yaml
├── espcam-node.cpp
├── espcam-node.old
└── ros-esp32cam_node.cpp
die Datei esp32cam-node veröffentlicht das Bild der ESP32CAM esp32cam/image nebst der Calibirierungsdaten über Sensormessages und den Publisher esp32cam/camera_info
espcam-node.cpp veröffentlicht nur das Bild ohne info. ros-esp32cam_node.cpp lädt nur ein einziges Bild und zeigt es auf dem Desktop an. das funktoniert natürlich nicht auf Headless Systemen.
Die Datei esp32cam.yaml ist die Kalibirierungsdatei für die ESP32CAM, die mit den Kalibrierungstools erstellt wurde, um Verzerrungen ausmessen zu können. Damit ist später eine genauere ENtfernungsmessung mit der Kamera möglich.
image_width: 800
image_height: 600
camera_name: narrow_stereo
camera_matrix:
rows: 3
cols: 3
data: [ 835.85408, 0. , 421.64641,
0. , 838.37779, 356.81538,
0. , 0. , 1. ]
camera_model: plumb_bob
distortion_coefficients:
rows: 1
cols: 5
data: [0.150221, -0.423095, 0.017521, -0.005664, 0.000000]
rectification_matrix:
rows: 3
cols: 3
data: [ 1., 0., 0.,
0., 1., 0.,
0., 0., 1.]
projection_matrix:
rows: 3
cols: 4
Damit man die Kalibrierungsdaten veröffentlichen kann, benötigt man den camera info manager:
sudo apt-get install ros-melodic-camera-info-manager
unter devel sieht es so aus
root@NAS:~/catkin_ws/devel# tree
.
├── _setup_util.py
├── cmake.lock
├── env.sh
├── lib
│ ├── pkgconfig
│ │ └── ros-esp32cam.pc
│ └── ros-esp32cam
│ ├── esp32cam_node
│ ├── espcam_node
│ └── ros-esp32cam_node
├── local_setup.bash
├── local_setup.sh
├── local_setup.zsh
├── setup.bash
├── setup.sh
├── setup.zsh
└── share
└── ros-esp32cam
└── cmake
├── ros-esp32camConfig-version.cmake
└── ros-esp32camConfig.cmake
Unter build sieht es zu unübersichtlicht aus, als dass es sich lohnt, das hier abzubilden.
auf Systemen ohne Display vorher noch alles auskommentieren, was das Bild anzeigt (imgshow etc)
diesem Tutorial folgend: http://wiki.ros.org/camera_calibration/Tutorials/MonocularCalibration
Zuerst druckt man das Schwarzweiß-Muster aus (Checkerboard als PDF)
Die Kantenlänge der Quadrate beträgt 25mm, auf meinem Drucker.
Nun die Software installieren:
ACHTUNG: funktioniert unter Melodic ohne Änderungen, unter Noetic bzw Ubuntu Focal Fossa ist Python2 standardmäßig nicht installiert. Im Header von cameracalibrator.py steht allerdings ein Shebang !/usr/bin/python.
Das Problem lässt sich durch einen symbolischen Link lösen
sudo ln -s /usr/bin/python3 /usr/bin/python
Dann kann man das Kalibratorpaket installieren. Entweder über
rosdep install camera_calibration
Oder über den Paketmanager
sudo apt-get install ros-noetic-camera-calibration
Die Kalibrierung startet man mit
root@raspberrypi:~# rosrun camera_calibration cameracalibrator.py --size 8x6 --square 0.025 image:=/esp32cam/image camera:=camera
Allerdings gibt es eine Fehlermeldung, da standardmäßig kein Infomanager für die Kamera läuft.
Waiting for Service /camera/ser_camera_info ...
Service not found
Entweder man lässt camera:=camera weg oder stellt einfach --no-service-check
hinten dran
Wenn das Kalibiertool läuft, hält man das ausgedruckte Schwarzweiß-Muster (A4 auf Platistdeckel geklebt) vor die Kamera und
root@raspberrypi:~# rosrun camera_calibration cameracalibrator.py --size 8x6 --square 0.025 image:=/esp32cam/image camera:=camera --no-service-check
(display:405): GLib-GObject-CRITICAL **: 07:11:26.494: g_object_unref: assertion 'G_IS_OBJECT (object)' failed
*** Added sample 1, p_x = 0.461, p_y = 0.397, p_size = 0.482, skew = 0.485
*** Added sample 2, p_x = 0.420, p_y = 0.420, p_size = 0.503, skew = 0.338
*** Added sample 3, p_x = 0.343, p_y = 0.352, p_size = 0.520, skew = 0.266
*** Added sample 4, p_x = 0.416, p_y = 0.558, p_size = 0.420, skew = 0.021
*** Added sample 5, p_x = 0.551, p_y = 0.465, p_size = 0.350, skew = 0.023
*** Added sample 6, p_x = 0.662, p_y = 0.501, p_size = 0.401, skew = 0.069
*** Added sample 7, p_x = 0.587, p_y = 0.685, p_size = 0.536, skew = 0.048
*** Added sample 8, p_x = 0.646, p_y = 0.565, p_size = 0.595, skew = 0.087
*** Added sample 9, p_x = 0.683, p_y = 0.433, p_size = 0.617, skew = 0.141
*** Added sample 10, p_x = 0.464, p_y = 0.473, p_size = 0.653, skew = 0.041
*** Added sample 11, p_x = 0.390, p_y = 0.372, p_size = 0.633, skew = 0.049
*** Added sample 12, p_x = 0.818, p_y = 0.442, p_size = 0.321, skew = 0.168
*** Added sample 13, p_x = 0.278, p_y = 0.385, p_size = 0.402, skew = 0.148
*** Added sample 14, p_x = 0.364, p_y = 0.426, p_size = 0.370, skew = 0.227
*** Added sample 15, p_x = 0.750, p_y = 0.362, p_size = 0.412, skew = 0.077
*** Added sample 16, p_x = 0.888, p_y = 0.346, p_size = 0.426, skew = 0.144
*** Added sample 17, p_x = 0.606, p_y = 0.347, p_size = 0.429, skew = 0.031
*** Added sample 18, p_x = 0.257, p_y = 0.314, p_size = 0.449, skew = 0.075
*** Added sample 19, p_x = 0.478, p_y = 0.230, p_size = 0.475, skew = 0.039
*** Added sample 20, p_x = 0.464, p_y = 0.117, p_size = 0.533, skew = 0.106
*** Added sample 21, p_x = 0.757, p_y = 0.685, p_size = 0.369, skew = 0.032
*** Added sample 22, p_x = 0.812, p_y = 0.711, p_size = 0.501, skew = 0.081
*** Added sample 23, p_x = 0.756, p_y = 0.142, p_size = 0.526, skew = 0.133
*** Added sample 24, p_x = 0.292, p_y = 0.410, p_size = 0.550, skew = 0.050
*** Added sample 25, p_x = 0.319, p_y = 0.534, p_size = 0.509, skew = 0.035
*** Added sample 26, p_x = 0.117, p_y = 0.463, p_size = 0.495, skew = 0.043
*** Added sample 27, p_x = 0.123, p_y = 0.198, p_size = 0.465, skew = 0.079
*** Added sample 28, p_x = 0.230, p_y = 0.045, p_size = 0.444, skew = 0.067
*** Added sample 29, p_x = 0.265, p_y = 0.142, p_size = 0.483, skew = 0.036
*** Added sample 30, p_x = 0.346, p_y = 0.726, p_size = 0.448, skew = 0.068
*** Added sample 31, p_x = 0.505, p_y = 0.600, p_size = 0.464, skew = 0.135
*** Added sample 32, p_x = 0.357, p_y = 0.000, p_size = 0.541, skew = 0.034
*** Added sample 33, p_x = 0.257, p_y = 0.734, p_size = 0.604, skew = 0.058
*** Added sample 34, p_x = 0.179, p_y = 0.522, p_size = 0.574, skew = 0.008
*** Added sample 35, p_x = 0.403, p_y = 0.734, p_size = 0.530, skew = 0.348
*** Added sample 36, p_x = 0.654, p_y = 0.657, p_size = 0.542, skew = 0.278
*** Added sample 37, p_x = 0.575, p_y = 0.501, p_size = 0.486, skew = 0.237
*** Added sample 38, p_x = 0.525, p_y = 0.382, p_size = 0.494, skew = 0.273
*** Added sample 39, p_x = 0.334, p_y = 0.662, p_size = 0.567, skew = 0.110
*** Added sample 40, p_x = 0.528, p_y = 0.743, p_size = 0.394, skew = 0.030
*** Added sample 41, p_x = 0.614, p_y = 0.721, p_size = 0.291, skew = 0.045
**** Calibrating ****
*** Added sample 42, p_x = 0.660, p_y = 0.725, p_size = 0.410, skew = 0.128
D = [0.15022134806040208, -0.4230954242051678, 0.01752132842705994, -0.005663731092300748, 0.0]
K = [835.8540789959028, 0.0, 421.64641207176396, 0.0, 838.3777899657703, 356.8153829294532, 0.0, 0.0, 1.0]
R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
P = [850.7354125976562, 0.0, 418.90434027338415, 0.0, 0.0, 847.2274780273438, 363.77392418121417, 0.0, 0.0, 0.0, 1.0, 0.0]
None
# oST version 5.0 parameters
[image]
width
800
height
600
[narrow_stereo]
camera matrix
835.854079 0.000000 421.646412
0.000000 838.377790 356.815383
0.000000 0.000000 1.000000
distortion
0.150221 -0.423095 0.017521 -0.005664 0.000000
rectification
1.000000 0.000000 0.000000
0.000000 1.000000 0.000000
0.000000 0.000000 1.000000
projection
850.735413 0.000000 418.904340 0.000000
0.000000 847.227478 363.773924 0.000000
0.000000 0.000000 1.000000 0.000000
('Wrote calibration data to', '/tmp/calibrationdata.tar.gz')
('Wrote calibration data to', '/tmp/calibrationdata.tar.gz')
('Wrote calibration data to', '/tmp/calibrationdata.tar.gz')
^Croot@raspberrypi:~# ls /tmp/calibrationdata.tar.gz
/tmp/calibrationdata.tar.gz
root@raspberrypi:~# ls -al /tmp/calibrationdata.tar.gz
-rw-r--r-- 1 root root 4523291 Aug 15 07:25 /tmp/calibrationdata.tar.gz
So sieht das Bild aus.
Das Kalibrierungstool hat das Muster erkannt. Manchmal ist es zu dunkel dafür.
Nun muss man das Bild von oben nach unten und von links nach rechts sowie von vorne nach hinten bewegen. Sobald das Tool jeweils genug Daten für eine Achse hat, geht der Balken (x,y, Skew) auf grün. Sind alle auf grün oder gelb, wird der Kalibrierbutton ebenfalls grün und man kann ihn anklicken.
Mit Save kann man anschließend die Daten speichern. Dazu legt das Tool unter /tmp ein Archiv ab.
In dem TAR-Archiv sind die Kalibrierungsbilder gespeichert sowie die Dateien ost.yaml und ost.txt. ost.yaml benennt man in esp32cam.yaml um und speichert sie im Verzeichnos $ROS_HOME. Das ist im Pfad des Users (root/dab) der Ordner .ros
root@NAS:~/.ros# pwd
/root/.ros
root@NAS:~/.ros# ls
esp32cam.yaml roscore-11311.pid rospack_cache_02946445166995464026
log rosdep rospack_cache_14834868803710052660
Mit dem Befehl
root@NAS:~/catkin_ws# rosrun ros-esp32cam esp32cam_node
[ INFO] [1604006321.532070249]: camera calibration URL: file:///root/.ros/esp32cam.yaml
[ WARN] [1604006321.546284988]: Camera calibration file did not specify distortion model, assumin
````
startet man den Node, der das Bild und die Infodaten published.
Auf anderen Systemen kann man dann mit
````bash
rosrun image_view image_view image:=/esp32cam/image
den Stream anschauen
Mit Apriltags kann man per Monocular-Kameras Entfernungen zu Tags sowie deren Pose dazu ermitteln
verschiedene Arten von Tags hier 36h11 zum Ausdrucken https://www.dotproduct3d.com/uploads/8/5/1/1/85115558/apriltags1-20.pdf weitere vorgenerierte Tags https://april.eecs.umich.edu/software/apriltag.html
https://doc.rc-visard.com/latest/de/tagdetect.html
topics sudo apt-get install ros-melodic-apriltag-ros oder sudo apt-get install ros-noetic-apriltag-ros sudo apt-get install vim (für rosed)
Zum Paket Apriltags gehören folgende Dateien:
root@NAS:~/.ros# rosed apriltag_ros
AnalyzeSingleImage.srv apriltag_ros_single_image_client_node
AprilTagDetection.msg apriltag_ros_single_image_server_node
AprilTagDetectionArray.msg continuous_detection.launch
analyze_image nodelet_plugins.xml
apriltag_ros-msg-extras.cmake package.xml
apriltag_ros-msg-paths.cmake settings.yaml
apriltag_rosConfig-version.cmake single_image_client.launch
apriltag_rosConfig.cma
Zum Betrieb müssen wir die Datei tags.yaml anpassen und zwar Aufruf mit
rosed apriltag_ros tags.yaml
und folgenden Code einfügen
standalone_tags:
[
{id: 0, size: 0.173, name: robot1},
{id: 1, size: 0.173, name: robot2}
]
Die Größe bezieht sich auf die Seitenlänge und ist mit dem Lineal ausgemessen: 173mm bzw 17,3cm. Dem Tag mit der Id0 ordnen wir den Namen robot1 zu
Zusätzlich muss man den Launchfile continous_detection.launch anpassen, genauer gesagt die Namen für die Kamera und Topics. Aus den Argumenten camera_name und image_topic wird dann
$(arg camera_name)/$(arg image_topic)
um das Topic esp32cam/image zu abonnieren
<launch> │············
<arg name="launch_prefix" default="" /> <!-- set to value="gdbserver localhost:10000" for remote debugging│············
--> │············
<arg name="node_namespace" default="apriltag_ros_continuous_node" /> │············
<arg name="camera_name" default="/esp32cam" /> │············
<arg name="camera_frame" default="camera" /> │············
<arg name="image_topic" default="image" /> │············
│············
<!-- Set parameters --> │············
<rosparam command="load" file="$(find apriltag_ros)/config/settings.yaml" ns="$(arg node_namespace)" /> │············
<rosparam command="load" file="$(find apriltag_ros)/config/tags.yaml" ns="$(arg node_namespace)" /> │············
│············
<node pkg="apriltag_ros" type="apriltag_ros_continuous_node" name="$(arg node_namespace)" clear_params="tr│············
ue" output="screen" launch-prefix="$(arg launch_prefix)" > │············
<!-- Remap topics from those used in code to those on the ROS network --> │············
<remap from="image_rect" to="$(arg camera_name)/$(arg image_topic)" /> │············
<remap from="camera_info" to="$(arg camera_name)/camera_info" /> │············
│············
<param name="camera_frame" type="str" value="$(arg camera_frame)" /> │············
<param name="publish_tag_detections_image" type="bool" value="true" /> <!-- default: false --> │············
</node>
</launch>
Nun startet man den Node mit
roslaunch apriltag_ros continous_detection.launch
auf einem anderen PC mit Grafik kann man sich die Bilder mit erkannten Tags anzeigen lassen:
rosrun image_view image_view image:=/tag_detections_image
Sofern der Node ein Tag erkannt hat, baut er in die Bilder die entsprechenden Informationen ein und veröffentlicht sie in dem Topic.
Parallel dazu kann man sich noch das Topic tag_detections anschauen.
header:
seq: 1781
stamp:
secs: 0
nsecs: 0
frame_id: ''
detections:
-
id: [0]
size: [0.173]
pose:
header:
seq: 4743
stamp:
secs: 0
nsecs: 0
frame_id: ''
pose:
pose:
position:
x: 0.007003948813539688
y: -0.028094134008198143
z: 0.6128322247027292
orientation:
x: 0.9941473774180957
y: -0.01149891131653592
z: 0.08722172284509422
w: 0.06269878846668715
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
---
z entspricht dem Abstand von der Kamera, ca 61cm.
Altes S5 Mini mit IP Cam
sudo apt-get install ros-melodic-video-stream-opencv
< version="1.0"?>
<launch>
<!-- launch video stream -->
<include file="$(find video_stream_opencv)/launch/camera.launch" >
<!-- node name and ros graph name -->
<arg name="camera_name" value="S5" />
<!-- url of the video stream -->
<arg name="video_stream_provider" value="http://192.168.2.197:8080/video" />
<!-- set camera fps to (probably does nothing on a mjpeg stream)-->
<arg name="set_camera_fps" value="30"/>
<!-- set buffer queue size of frame capturing to -->
<arg name="buffer_queue_size" value="100" />
<!-- throttling the querying of frames to -->
<arg name="fps" value="30" />
<!-- setting frame_id -->
<arg name="frame_id" value="axis_optical_frame" />
<!-- camera info loading, take care as it needs the "file:///" at the start , e.g.:
"file:///$(find your_camera_package)/config/your_camera.yaml" -->
<arg name="camera_info_url" value="" />
<!-- flip the image horizontally (mirror it) -->
<arg name="flip_horizontal" value="false" />
<!-- flip the image vertically -->
<arg name="flip_vertical" value="false" />
<!-- visualize on an image_view window the stream generated -->
<arg name="visualize" value="false" />
</include>
</launch>
roslaunch video_stream_opencv mjpg_stream.launch