toggle camera

This commit is contained in:
frank 2021-07-07 21:56:04 -04:00
parent a3a6d61d20
commit e2eec46317
3 changed files with 75 additions and 34 deletions

View File

@ -10,7 +10,8 @@
"keys":
{
"print-video-memory-size": ["CTRL", "v"],
"print-frame-length-history": ["CTRL", "SHIFT", "h"]
"print-frame-length-history": ["CTRL", "SHIFT", "h"],
"toggle-camera": ["CTRL", "c"]
},
"recording":
{
@ -37,7 +38,7 @@
{
"json-save": true,
"json-save-directory": "local/scans",
"barcode": "",
"barcode": "4809010272331",
"capture-device": "/dev/video0"
},
"api":

View File

@ -30,27 +30,14 @@ Pudding::Pudding()
{
/* subscribe to command events */
get_delegate().subscribe(&Pudding::respond, this);
/* initialize an opencv capture device for getting images from an attached camera */
int device_id = 0;
capture.open(device_id);
std::stringstream message;
if (capture.isOpened())
{
message << "opened and initialized " << capture.get(cv::CAP_PROP_FRAME_WIDTH) << "x" <<
capture.get(cv::CAP_PROP_FRAME_HEIGHT) << ", " << capture.get(cv::CAP_PROP_FPS) <<
"fps video capture device ID #" << device_id << " using " << capture.getBackendName();
}
else
{
message << "failed to open video capture device ID #" << device_id;
}
log(message.str());
/* initialize a zbar image scanner for reading barcodes of any format */
image_scanner.set_config(zbar::ZBAR_NONE, zbar::ZBAR_CFG_ENABLE, 1);
/* use gl context so we can draw 3D pudding */
/* use gl context so we can draw 3D */
load_gl_context();
initialize_camera();
}
/* Create GL context via super class and load vertices, UV data, and shaders */
void Pudding::load_gl_context()
{
super::load_gl_context();
@ -93,21 +80,39 @@ void Pudding::load_gl_context()
glBindAttribLocation(world_program, 0, "in_Position");
glBindAttribLocation(world_program, 1, "vertexUV");
link_shader(world_program);
/* generate the texture that will store the video frame, allocate storage for a video frame, bind and edit texture properties */
glGenTextures(1, &video_capture_texture_id);
glBindTexture(GL_TEXTURE_2D, video_capture_texture_id);
glTexStorage2D(GL_TEXTURE_2D, 1, GL_RGB8, capture.get(cv::CAP_PROP_FRAME_WIDTH), capture.get(cv::CAP_PROP_FRAME_HEIGHT));
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
/* just need to set these once since we're drawing one texture to each viewport */
GLint base_texture_location = glGetUniformLocation(world_program, "baseTexture");
glUniform1i(base_texture_location, 0);
glActiveTexture(GL_TEXTURE0);
/* tell GL to use our shader */
glUseProgram(world_program);
log_gl_errors();
}
/* Try to create cv::VideoCapture object using device ID #0. If successful, this will create a GL texture for displaying
* the frames, so it must be called after GL context has been created.
*/
void Pudding::initialize_camera()
{
/* initialize an opencv capture device for getting images from an attached camera */
int device_id = 0;
capture.open(device_id);
std::stringstream message;
if (capture.isOpened())
{
message << "opened and initialized " << capture.get(cv::CAP_PROP_FRAME_WIDTH) << "x" <<
capture.get(cv::CAP_PROP_FRAME_HEIGHT) << ", " << capture.get(cv::CAP_PROP_FPS) <<
"fps video capture device ID #" << device_id << " using " << capture.getBackendName();
/* generate the texture that will store the video frame, allocate storage for a video frame, bind and edit texture properties */
glGenTextures(1, &video_capture_texture_id);
glBindTexture(GL_TEXTURE_2D, video_capture_texture_id);
glTexStorage2D(GL_TEXTURE_2D, 1, GL_RGB8, capture.get(cv::CAP_PROP_FRAME_WIDTH), capture.get(cv::CAP_PROP_FRAME_HEIGHT));
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
}
else
{
message << "failed to open video capture device ID #" << device_id;
}
log(message.str());
}
/* Respond to command events */
void Pudding::respond(SDL_Event& event)
{
@ -127,6 +132,17 @@ void Pudding::respond(SDL_Event& event)
{
get_current_item().increment_image_index(-1);
}
else if (get_delegate().compare(event, "toggle-camera"))
{
if (capture.isOpened())
{
capture.release();
}
else
{
initialize_camera();
}
}
}
/* Build an Item object by submitting the upc parameter to multiple APIs and taking
@ -153,9 +169,18 @@ void Pudding::add_item(const std::string& upc)
{
incorporate_best_buy_api(item);
}
items.push_back(item);
/* set item index to end so newest item will display */
current_item_index = items.size() - 1;
if (item.get_image_textures().size() > 0)
{
items.push_back(item);
/* set item index to end so newest item will display */
current_item_index = items.size() - 1;
}
else
{
std::ostringstream message;
message << "discarding item, no images found for " << upc;
log(message.str());
}
}
/* Look for item upc in the Open Food API, and use the result to fill out item properties if found
@ -182,7 +207,7 @@ void Pudding::incorporate_open_food_api(Item& item)
}
else
{
log("no results from Open Food API");
log("no results from Open Food");
}
}
@ -217,7 +242,7 @@ void Pudding::incorporate_nutronix_api(Item& item)
}
else
{
log("no results from Nutronix API");
log("no results from Nutronix");
}
}
@ -248,6 +273,10 @@ void Pudding::incorporate_edamam_api(Item& item)
}
save_item_json(json, item, "Edamam_API");
}
else
{
log("no results from Edamam");
}
}
/* Submit a query to the Best Buy API and insert relevant results into supplied Item object
@ -280,6 +309,10 @@ void Pudding::incorporate_best_buy_api(Item& item)
item.set_product_name(product.value("name", ""));
save_item_json(json, item, "Best_Buy_API");
}
else
{
log("no results from Best Buy");
}
}
/* Write submitted JSON to file, creating parent directories if necessary, and using item and
@ -458,12 +491,18 @@ void Pudding::update()
message << "read new barcode from config " << current_barcode;
log(message.str());
}
/* just need to set these once since we're drawing one texture to each viewport */
GLint base_texture_location = glGetUniformLocation(world_program, "baseTexture");
glUniform1i(base_texture_location, 0);
glActiveTexture(GL_TEXTURE0);
/* draw the current item image to the left half of the screen if items are available */
Box video_box = get_window_box();
if (items.size() > 0)
{
/* viewport width is based on whether we're displaying camera in addition to item */
float width = capture.isOpened() ? get_window_box().get_w() / 2.0f : get_window_box().get_w();
/* will display over entire viewport, so we draw the generic rectangle vertices */
glViewport(0, 0, get_window_box().get_w() / 2.0f, get_window_box().get_h());
glViewport(0, 0, width, get_window_box().get_h());
glBindTexture(GL_TEXTURE_2D, *get_current_item().get_active_image_texture().get());
glDrawArrays(GL_TRIANGLES, 0, 6);
log_gl_errors();

View File

@ -43,6 +43,7 @@ private:
GLuint vbo, world_program, video_capture_texture_id;
void load_gl_context();
void initialize_camera();
void incorporate_open_food_api(Item&);
void incorporate_nutronix_api(Item&);
void incorporate_edamam_api(Item&);