spacebox/demo/camera/camera.cpp

275 lines
11 KiB
C++

/* /\ +------------------------------------------------------+
* ____/ \____ /| - Open source game framework licensed to freely use, |
* \ / / | copy, modify and sell without restriction |
* +--\ ^__^ /--+ | |
* | ~/ \~ | | - created for <https://foam.shampoo.ooo> |
* | ~~~~~~~~~~~~ | +------------------------------------------------------+
* | SPACE ~~~~~ | /
* | ~~~~~~~ BOX |/
* +--------------+
*
* Camera example by frank at shampoo.ooo
*
* This is an example program that uses the external OpenCV library to display camera input full screen. It can
* be used for testing SPACEBOX with OpenCV.
*/
/* OpenCV */
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
/* SPACEBOX */
#include "Game.hpp"
#include "Model.hpp"
#include "Connection.hpp"
#include "VBO.hpp"
class Camera : public Game
{
public:
cv::VideoCapture capture;
sb::VAO vao;
sb::VBO vbo;
sb::Plane camera_view;
GLuint flat_program;
bool new_frame_available = false;
std::map<std::string, std::map<std::string, GLuint>> uniform;
cv::Mat camera_frame, blurred, low_contrast_mask;
/* Open camera on connection and close on disconnection. */
sb::Connection<> camera_switch {
std::bind(&Camera::open, this),
std::bind(&Camera::close, this)
};
Camera()
{
load_gl_context();
/* Add a texture to the camera Plane for storing frame image data */
camera_view.texture(sb::Texture());
glm::mat4 flip = glm::mat4(1);
flip[1][1] = -1;
camera_view.transformation(flip);
camera_switch.connect();
}
/* Create GL context via super class and load vertices, UV data, and shaders */
void load_gl_context()
{
Game::load_gl_context();
/* Generate a vertex array object ID, bind it as current (requirement of OpenGL) */
vao.generate();
vao.bind();
/* Generate ID for the vertex buffer object that will hold all vertex data. Using one buffer for all attributes, data
* will be copied in one after the other. */
vbo.generate();
vbo.bind();
/* Load flat shader programs */
GLuint vertex_shader = load_shader("shaders/flat.vert", GL_VERTEX_SHADER);
GLuint fragment_shader = load_shader("shaders/flat.frag", GL_FRAGMENT_SHADER);
flat_program = glCreateProgram();
glAttachShader(flat_program, vertex_shader);
glAttachShader(flat_program, fragment_shader);
sb::Plane::position->bind(0, flat_program, "in_position");
sb::Plane::uv->bind(1, flat_program, "vertex_uv");
sb::Log::gl_errors("after loading shaders");
/* Fill VBO with attribute data */
vbo.allocate(camera_view.size(), GL_STATIC_DRAW);
vbo.add(*camera_view.position);
vbo.add(*camera_view.uv);
sb::Log::gl_errors("after filling VBO");
/* link shaders */
link_shader(flat_program);
glUseProgram(flat_program);
sb::Log::gl_errors("after linking");
/* store uniform locations after linking */
uniform["flat"]["texture"] = glGetUniformLocation(flat_program, "base_texture");
uniform["flat"]["time"] = glGetUniformLocation(flat_program, "time");
uniform["flat"]["scroll"] = glGetUniformLocation(flat_program, "scroll");
uniform["flat"]["blend"] = glGetUniformLocation(flat_program, "blend_min_hsv");
uniform["flat"]["transformation"] = glGetUniformLocation(flat_program, "transformation");
sb::Log::gl_errors("after uniform locations");
/* setup GL parameters that won't change for the duration of the program */
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glEnable(GL_BLEND);
glActiveTexture(GL_TEXTURE0);
glDisable(GL_DEPTH_TEST);
glClearColor(0, 0, 0, 1);
}
void open()
{
std::ostringstream message;
/* Open the OpenCV capture, using device ID #0 to get the default attached camera. */
int device_id = configuration()["scan"]["camera-device-id"];
capture.open(device_id);
if (capture.isOpened())
{
message << "Opened and initialized " << capture.get(cv::CAP_PROP_FRAME_WIDTH) << "x" <<
capture.get(cv::CAP_PROP_FRAME_HEIGHT) << ", " << capture.get(cv::CAP_PROP_FPS) <<
"fps video capture device ID #" << device_id << " using " << capture.getBackendName();
/* Check config for a requested camera resolution, and if there is one, try applying it to the `cv::VideoCapture`. The
* requested resolution may not be available, and if so, `cv::VideoCapture` will choose a resolution. If the resulting
* resolution is different from the config value, print the resolution the capture device was set to instead. */
if (configuration()["display"].contains("camera-resolution"))
{
capture.set(cv::CAP_PROP_FRAME_WIDTH, configuration()["display"]["camera-resolution"][0]);
capture.set(cv::CAP_PROP_FRAME_HEIGHT, configuration()["display"]["camera-resolution"][1]);
message << std::endl << "Changed resolution to " << configuration()["display"]["camera-resolution"];
if (capture.get(cv::CAP_PROP_FRAME_WIDTH) != configuration()["display"]["camera-resolution"][0] ||
capture.get(cv::CAP_PROP_FRAME_HEIGHT) != configuration()["display"]["camera-resolution"][1])
{
message << " (but got " << capture.get(cv::CAP_PROP_FRAME_WIDTH) << "x" << capture.get(cv::CAP_PROP_FRAME_HEIGHT) << ")";
}
}
/* This is necessary for Android */
capture.set(cv::CAP_PROP_FOURCC, (('R' & 0x000000FF) | (('G' << 8) & 0x0000FF00) | (('B' << 16) & 0x00FF0000) | (('3' << 24) & 0xFF000000)));
/* Generate a texture the size of the camera's resolution. Using GL_RGB8 (instead of GL_RGBA8) seems to be necessary on Android. */
camera_view.texture().generate({capture.get(cv::CAP_PROP_FRAME_WIDTH), capture.get(cv::CAP_PROP_FRAME_HEIGHT)}, GL_RGB8);
/* Create and detach a thread which will read frame data */
std::thread camera_thread(&Camera::capture_frame, this);
camera_thread.detach();
}
else
{
message << "failed to open video capture device ID #" << device_id;
}
sb::Log::log(message);
}
void close()
{
capture.release();
}
/*!
* Read pixels from the camera into a `cv::Mat` and pre-process them if pre-processing methods are enabled.
*
* This function is meant to be launched in a separate thread, where it will run continuously. Set `new_frame_available` to `false` before
* loading camera frame data into the `cv::Mat` object at `camera_frame`, then set it back to `true` to indicate new frame data is available
* in `camera_frame`.
*/
void capture_frame()
{
/* When the camera button is switched off, this thread will automatically finish execution. */
while (camera_switch && capture.isOpened())
{
/* The frame data in the `cv::Mat` at `camera_frame` is about to be modified by the rest of
* this function, so even if there is data stored there that hasn't been read yet, it should not
* be read by the main thread until this is set to `true`at the end of the function. */
new_frame_available = false;
/* Load camera frame data into `cv::Mat` */
capture >> camera_frame;
/* Pre-process image for improved scan results */
if (!camera_frame.empty())
{
if (configuration()["scan"]["sharpen"])
{
/* Sharpen image for barcode detection */
float sigma = 1.0f, threshold = 5.0f, amount = 1.0f;
cv::GaussianBlur(camera_frame, blurred, cv::Size(), sigma, sigma);
low_contrast_mask = cv::abs(camera_frame - blurred) < threshold;
camera_frame = camera_frame * (1.0f + amount) + blurred * (-amount);
camera_frame.copyTo(camera_frame, low_contrast_mask);
}
if (configuration()["scan"]["brighten"])
{
/* Brightness and contrast adjustment, see
* https://docs.opencv.org/2.4.13.7/doc/tutorials/core/basic_linear_transform/basic_linear_transform.html */
int brightness = configuration()["scan"]["brightness-addition"];
float contrast = configuration()["scan"]["contrast-multiplication"];
if (brightness != 0 || contrast != 1.0)
{
camera_frame.convertTo(camera_frame, -1, contrast, brightness);
}
}
/* Finished loading into `cv::Mat`, so it is new data that is safe to read. */
new_frame_available = true;
}
sb::Log::gl_errors("in capture, after capturing frame");
std::this_thread::sleep_for(std::chrono::milliseconds(50));
}
}
/* Update parameters and draw the screen */
void update()
{
sb::Log::gl_errors("at beginning of update");
/* Time in seconds the game has running for */
float time_seconds = SDL_GetTicks() / 1000.0f;
if (new_frame_available)
{
/* Fill camera view texture memory */
camera_view.texture().bind();
/* Pixels from cv::VideoCapture are BGR on Linux, but not on Android (?) */
#if defined(__ANDROID__) || defined(ANDROID)
GLuint pixel_format = GL_RGB;
#else
GLuint pixel_format = GL_BGR;
#endif
camera_view.texture().load(const_cast<cv::Mat&>(camera_frame).ptr(), {camera_frame.cols, camera_frame.rows}, pixel_format, GL_UNSIGNED_BYTE);
/* Frame data has been processed, so there is not a new frame available anymore. */
new_frame_available = false;
}
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
/* flat shader uniforms for BG: time, texture ID, disabled HSV blend, scroll on */
glUniform1f(uniform["flat"]["time"], time_seconds);
glUniform1i(uniform["flat"]["texture"], 0);
glUniform3f(uniform["flat"]["blend"], 0.0f, 0.0f, 1.0f);
glUniform1i(uniform["flat"]["scroll"], true);
glUniformMatrix4fv(uniform["flat"]["transformation"], 1, GL_FALSE, &camera_view.transformation()[0][0]);
/* only draw if camera is enabled and open */
if (camera_switch && capture.isOpened())
{
/* bind texture for drawing */
camera_view.texture().bind();
camera_view.enable();
/* draws rectangle vertices and rectangle texture using UV coords */
glDrawArrays(GL_TRIANGLES, 0, camera_view.attributes("position")->count());
}
SDL_GL_SwapWindow(window());
sb::Log::gl_errors("at end of update");
}
};
/* Launch the mainloop */
int main()
{
Camera camera = Camera();
camera.run();
camera.quit();
return 0;
}