add 3d world view

This commit is contained in:
frank 2021-07-21 02:11:02 -04:00
parent e2eec46317
commit fb7f5390be
5 changed files with 165 additions and 73 deletions

View File

@ -38,7 +38,7 @@
{
"json-save": true,
"json-save-directory": "local/scans",
"barcode": "4809010272331",
"barcode": "8001250120076",
"capture-device": "/dev/video0"
},
"api":
@ -54,5 +54,9 @@
"edamam-enabled": true,
"open-food-enabled": true,
"best-buy-enabled": true
},
"pudding":
{
"rotation-speed": 0.005
}
}

@ -1 +1 @@
Subproject commit 569e203409993ea1d34bff910b80f72adb47b690
Subproject commit 14759a1c79096f0c84ba1b48b7cb794b1c51f439

View File

@ -45,43 +45,80 @@ void Pudding::load_gl_context()
GLuint vao;
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
/* 2D vertices for the video capture texture that are a single plane spanning the screen */
std::array<glm::vec2, 6> rectangle_vertices = {
{
/* 2D vertices for any texture that is a plane spanning the screen */
std::array<glm::vec2, 6> rectangle_vertices = {{
{-1.0f, 1.0f}, {1.0f, 1.0f}, {-1.0f, -1.0f},
{1.0f, 1.0f}, {1.0f, -1.0f}, {-1.0f, -1.0f}
}};
/* UV map for mapping video capture texture to video capture vertices */
std::array<glm::vec2, 6> rectangle_uv = {
{
/* UV map for mapping a texture onto a plane */
std::array<glm::vec2, 6> rectangle_uv = {{
{0.0f, 1.0f}, {1.0f, 1.0f}, {0.0f, 0.0f},
{1.0f, 1.0f}, {1.0f, 0.0f}, {0.0f, 0.0f}
}};
/* generate one vertex buffer object to hold the camera texture and UV vertices */
/* 3D vertices for a cube */
std::array<glm::vec3, 36> cube_vertices_as_triangles = {{
{1, 1, 1}, {-1, 1, 1}, {-1,-1, 1},
{-1,-1, 1}, {1,-1, 1}, {1, 1, 1},
{1, 1, 1}, {1,-1, 1}, {1,-1,-1},
{1,-1,-1}, {1, 1,-1}, {1, 1, 1},
{1, 1, 1}, {1, 1,-1}, {-1, 1,-1},
{-1, 1,-1}, {-1, 1, 1}, {1, 1, 1},
{-1, 1, 1}, {-1, 1,-1}, {-1,-1,-1},
{-1,-1,-1}, {-1,-1, 1}, {-1, 1, 1},
{-1,-1,-1}, {1,-1,-1}, {1,-1, 1},
{1,-1, 1}, {-1,-1, 1}, {-1,-1,-1},
{1,-1,-1}, {-1,-1,-1}, {-1, 1,-1},
{-1, 1,-1}, {1, 1,-1}, {1,-1,-1}
}};
/* 3D vertices for a cube as line segments */
std::array<glm::vec3, 24> cube_vertices = {{
// front
{-1.0f, 1.0f, 1.0f}, {1.0f, 1.0f, 1.0f}, {1.0f, 1.0f, 1.0f}, {1.0f, -1.0f, 1.0f},
{1.0f, -1.0f, 1.0f}, {-1.0f, -1.0f, 1.0f}, {-1.0f, -1.0f, 1.0f}, {-1.0f, 1.0f, 1.0f},
//right
{1.0f, 1.0f, 1.0f}, {1.0f, 1.0f, -1.0f}, {1.0f, 1.0f, -1.0f}, {1.0f, -1.0f, -1.0f},
{1.0f, -1.0f, -1.0f}, {1.0f, -1.0f, 1.0f},
//back
{1.0f, 1.0f, -1.0f}, {-1.0f, 1.0f, -1.0f}, {-1.0f, -1.0f, -1.0f}, {1.0f, -1.0f, -1.0f},
{-1.0f, -1.0f, -1.0f}, {-1.0f, 1.0f, -1.0f},
//left
{-1.0f, 1.0f, -1.0f}, {-1.0f, 1.0f, 1.0f}, {-1.0f, -1.0f, 1.0f}, {-1.0f, -1.0f, -1.0f}
}};
/* Generate one vertex buffer object to hold rectangle + cube vertices and rectangle UV map. Since we're using
* one buffer, data will be copied in one after the other, offset to after the previous data location. The same
* buffer offset will be passed to the vertex attributes for each data. */
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
/* allocate space for vertices and UV, copy vertices in at initialization */
GLsizeiptr vbo_size = (rectangle_vertices.size() + rectangle_uv.size()) * sizeof(glm::vec2);
/* allocate space for vertices and UV, and copy rectangle vertices in at initialization */
GLsizeiptr vbo_size = (rectangle_vertices.size() + rectangle_uv.size()) * sizeof(glm::vec2) + cube_vertices.size() * sizeof(glm::vec3);
glBufferData(GL_ARRAY_BUFFER, vbo_size, rectangle_vertices.data(), GL_STATIC_DRAW);
/* specify the location and data format of the vertex attributes as consecutive 2D float coords */
/* specify the rectangle vertex attributes as consecutive 2D float coords */
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, nullptr);
/* enable index 0 on currently bound VAO */
glEnableVertexAttribArray(0);
/* copy UV data into the VBO, offset to after the vertex data */
glBufferSubData(GL_ARRAY_BUFFER, rectangle_vertices.size() * sizeof(glm::vec2),
rectangle_uv.size() * sizeof(glm::vec2), rectangle_uv.data());
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, reinterpret_cast<GLvoid*>(rectangle_vertices.size() * sizeof(glm::vec2)));
glEnableVertexAttribArray(1);
/* copy rectangle UV data into the VBO, offset to after the vertex data, set up vertex attributes */
GLintptr offset = rectangle_vertices.size() * sizeof(glm::vec2);
glBufferSubData(GL_ARRAY_BUFFER, offset, rectangle_uv.size() * sizeof(glm::vec2), rectangle_uv.data());
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, reinterpret_cast<GLvoid*>(offset));
/* copy cube vertices into VBO, offset to after the rectangle UV, and set up vertex attributes for 3D */
offset = (rectangle_vertices.size() + rectangle_uv.size()) * sizeof(glm::vec2);
glBufferSubData(GL_ARRAY_BUFFER, offset, cube_vertices.size() * sizeof(glm::vec3), cube_vertices.data());
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 0, reinterpret_cast<GLvoid*>(offset));
/* Load two shader programs, one for rendering the flat objects, and one for rendering the 3D model. Load, configure,
* and link the flat shader program first. */
GLuint vertex_shader = load_shader("src/flat.vert", GL_VERTEX_SHADER);
GLuint fragment_shader = load_shader("src/flat.frag", GL_FRAGMENT_SHADER);
world_program = glCreateProgram();
glAttachShader(world_program, vertex_shader);
glAttachShader(world_program, fragment_shader);
glBindAttribLocation(world_program, 0, "in_Position");
glBindAttribLocation(world_program, 1, "vertexUV");
link_shader(world_program);
/* tell GL to use our shader */
glUseProgram(world_program);
flat_program = glCreateProgram();
glAttachShader(flat_program, vertex_shader);
glAttachShader(flat_program, fragment_shader);
glBindAttribLocation(flat_program, 0, "in_Position");
glBindAttribLocation(flat_program, 1, "vertexUV");
link_shader(flat_program);
/* load, configure and link the 3D world program */
vertex_shader = load_shader("src/mvp.vert", GL_VERTEX_SHADER);
mvp_program = glCreateProgram();
glAttachShader(mvp_program, vertex_shader);
glBindAttribLocation(mvp_program, 2, "in_Position");
link_shader(mvp_program);
mvp_id = glGetUniformLocation(mvp_program, "MVP");
log_gl_errors();
}
@ -491,62 +528,100 @@ void Pudding::update()
message << "read new barcode from config " << current_barcode;
log(message.str());
}
/* just need to set these once since we're drawing one texture to each viewport */
GLint base_texture_location = glGetUniformLocation(world_program, "baseTexture");
glUniform1i(base_texture_location, 0);
glActiveTexture(GL_TEXTURE0);
/* draw the current item image to the left half of the screen if items are available */
Box video_box = get_window_box();
if (items.size() > 0)
/* viewport box will be used to tell GL where to draw */
Box viewport_box = get_window_box();
/* shrink viewport if item texture or camera will be displayed */
if (items.size() > 0 || capture.isOpened())
{
/* viewport width is based on whether we're displaying camera in addition to item */
float width = capture.isOpened() ? get_window_box().get_w() / 2.0f : get_window_box().get_w();
/* will display over entire viewport, so we draw the generic rectangle vertices */
glViewport(0, 0, width, get_window_box().get_h());
glBindTexture(GL_TEXTURE_2D, *get_current_item().get_active_image_texture().get());
glDrawArrays(GL_TRIANGLES, 0, 6);
log_gl_errors();
/* set camera display to right half of the screen */
video_box.set_left(get_window_box().get_center_x(), true);
viewport_box.set_right(get_window_box().get_center_x(), true);
}
/* draw the camera if the camera has been opened, fullscreen if there aren't any items, or on the right otherwise */
if (capture.isOpened())
glViewport(viewport_box.get_x(), viewport_box.get_y(), viewport_box.get_w(), viewport_box.get_h());
glClearColor(0, 1.0f, 0, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
/* draw pudding model using MVP shader */
glUseProgram(mvp_program);
/* calculate the transformation matrix for displaying pudding in viewport */
model = glm::rotate(model, weight(get_configuration()["pudding"]["rotation-speed"].get<float>()), Y_UNIT_NORMAL_3D);
projection = glm::perspective(glm::radians(45.0f), viewport_box.aspect(), 0.1f, 100.0f);
mvp = projection * VIEW_MATRIX * model;
/* pass the mvp matrix to the shader */
glUniformMatrix4fv(mvp_id, 1, GL_FALSE, &mvp[0][0]);
/* disable rectangle vertices and UV and enable pudding vertices */
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glEnableVertexAttribArray(2);
/* draw pudding model */
glDrawArrays(GL_LINES, 0, 36);
/* only do more drawing if items are downloaded or camera is enabled */
if (items.size() > 0 || capture.isOpened())
{
capture.read(capture_frame);
if (!capture_frame.empty())
/* switch to flat shader for item and camera */
glUseProgram(flat_program);
/* disable pudding vertices and enable rectangle */
glDisableVertexAttribArray(2);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
/* just need to set these once since we're drawing one texture per viewport */
GLint base_texture_location = glGetUniformLocation(flat_program, "baseTexture");
glUniform1i(base_texture_location, 0);
glActiveTexture(GL_TEXTURE0);
/* move viewport to the right side of screen */
viewport_box.set_left(get_window_box().get_center_x());
/* draw the current item image if items have been downloaded */
if (items.size() > 0)
{
/* rotate the opencv matrix 180 to work with opengl coords */
cv::flip(capture_frame, capture_frame, -1);
glViewport(video_box.get_left(), 0, video_box.get_w(), video_box.get_h());
/* bind texture, binding it to accept pixel data and to GLSL sampler */
glBindTexture(GL_TEXTURE_2D, video_capture_texture_id);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, capture_frame.cols, capture_frame.rows, GL_BGR, GL_UNSIGNED_BYTE, capture_frame.ptr());
glDrawArrays(GL_TRIANGLES, 0, 6);
log_gl_errors();
/* convert to gray and scan with zbar */
cv::cvtColor(capture_frame, capture_frame, cv::COLOR_BGR2GRAY);
zbar::Image query_image(capture_frame.cols, capture_frame.rows, "Y800", static_cast<void*>(capture_frame.data),
capture_frame.cols * capture_frame.rows);
int result = image_scanner.scan(query_image);
if (result > 0)
/* shrink viewport to half size if camera will also be displayed */
if (capture.isOpened())
{
for (zbar::Image::SymbolIterator symbol = query_image.symbol_begin(); symbol != query_image.symbol_end(); ++symbol)
{
std::stringstream message;
message << "camera scanned " << symbol->get_type_name() << " symbol " << symbol->get_data();
log(message.str());
current_camera_barcode = symbol->get_data();
current_barcode = current_camera_barcode;
}
viewport_box.set_top(get_window_box().get_center_y(), true);
}
query_image.set_data(nullptr, 0);
glViewport(viewport_box.get_x(), viewport_box.get_y(), viewport_box.get_w(), viewport_box.get_h());
glBindTexture(GL_TEXTURE_2D, *get_current_item().get_active_image_texture().get());
/* draws rectangle vertices and rectangle texture using UV coords */
glDrawArrays(GL_TRIANGLES, 0, 6);
}
else
/* draw the camera if the camera has been opened */
if (capture.isOpened())
{
debug("video capture device frame empty");
capture.read(capture_frame);
viewport_box.set_top(get_window_box().get_top());
if (!capture_frame.empty())
{
/* rotate the opencv matrix 180 to work with opengl coords */
cv::flip(capture_frame, capture_frame, -1);
glViewport(viewport_box.get_x(), viewport_box.get_y(), viewport_box.get_w(), viewport_box.get_h());
/* bind texture, binding it to accept pixel data and to GLSL sampler */
glBindTexture(GL_TEXTURE_2D, video_capture_texture_id);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, capture_frame.cols, capture_frame.rows, GL_BGR, GL_UNSIGNED_BYTE, capture_frame.ptr());
/* draws rectangle vertices and rectangle texture using UV coords */
glDrawArrays(GL_TRIANGLES, 0, 6);
/* convert to gray and scan with zbar */
cv::cvtColor(capture_frame, capture_frame, cv::COLOR_BGR2GRAY);
zbar::Image query_image(capture_frame.cols, capture_frame.rows, "Y800", static_cast<void*>(capture_frame.data),
capture_frame.cols * capture_frame.rows);
int result = image_scanner.scan(query_image);
if (result > 0)
{
for (zbar::Image::SymbolIterator symbol = query_image.symbol_begin(); symbol != query_image.symbol_end(); ++symbol)
{
std::stringstream message;
message << "camera scanned " << symbol->get_type_name() << " symbol " << symbol->get_data();
log(message.str());
current_camera_barcode = symbol->get_data();
current_barcode = current_camera_barcode;
}
}
query_image.set_data(nullptr, 0);
}
else
{
debug("video capture device frame empty");
}
}
}
SDL_GL_SwapWindow(get_window());
log_gl_errors();
/* add a new item if a new barcode was scanned or entered */
if (current_barcode != previous_barcode)
{

View File

@ -34,13 +34,17 @@ private:
const std::string BEST_BUY_API_URL_2 = ")?format=json&apiKey=";
const std::string NUTRONIX_NOT_FOUND = "resource not found";
const std::string GIANTBOMB_API_URL = "https://www.giantbomb.com/api/release/?api_key=";
const glm::vec3 ZERO_VECTOR_3D = glm::vec3(0, 0, 0);
const glm::vec3 Y_UNIT_NORMAL_3D = glm::vec3(0, 1, 0);
const glm::mat4 VIEW_MATRIX = glm::lookAt(glm::vec3(4, 3, 3), ZERO_VECTOR_3D, Y_UNIT_NORMAL_3D);
std::string current_barcode, previous_barcode, current_config_barcode, current_camera_barcode;
std::vector<Item> items;
int current_item_index = 0;
cv::VideoCapture capture;
cv::Mat capture_frame;
zbar::ImageScanner image_scanner;
GLuint vbo, world_program, video_capture_texture_id;
GLuint vbo, flat_program, mvp_program, video_capture_texture_id, mvp_id;
glm::mat4 projection, model = glm::mat4(1.0f), mvp;
void load_gl_context();
void initialize_camera();

9
src/mvp.vert Normal file
View File

@ -0,0 +1,9 @@
#version 130
in vec3 in_Position;
uniform mat4 MVP;
void main(void)
{
gl_Position = MVP * vec4(in_Position, 1);
}