gunkiss/src/Pudding.cpp

1516 lines
63 KiB
C++

/* _______________ ,--------------------------------------------------------.
//`````````````\\ \ \
//~~~~~~~~~~~~~~~\\ \ by @ohsqueezy & @sleepin \
//=================\\ \ [ohsqueezy.itch.io] [sleepin.itch.io] \
// \\ \ \
// \\ \ zlib licensed code at [git.nugget.fun/nugget/gunkiss] \
// ☆ GUNKISS ☆ \\ \ \
//_________________________\\ `--------------------------------------------------------'
Generate a custom pudding from food product UPC codes and help a pair of rats take over the video game industry, using
their extraterrestrial ability to turn trash into performance enhancing drug puddings that enable business professionals
to predict the stock market with supernatural accuracy.
*/
#include "Pudding.hpp"
/* Launch the Pudding instance's mainloop */
int main()
{
Pudding pudding = Pudding();
pudding.run();
pudding.quit();
return 0;
}
#ifdef __EMSCRIPTEN__
void flag_frame()
{
new_frame_available = true;
}
void set_heap_offset(int offset)
{
emscripten_heap_offset = offset;
}
#endif
/* Initialize a Pudding instance */
Pudding::Pudding()
{
/* subscribe to command events */
get_delegate().subscribe(&Pudding::respond, this);
get_delegate().subscribe(&Pudding::respond, this, SDL_MOUSEMOTION);
get_delegate().subscribe(&Pudding::respond, this, SDL_MOUSEBUTTONDOWN);
/* initialize a zbar image scanner for reading barcodes of any format */
image_scanner.set_config(zbar::ZBAR_NONE, zbar::ZBAR_CFG_ENABLE, 1);
/* set up pudding model */
nlohmann::json pudding = configuration()["pudding"];
load_pudding_model(pudding["top-radius"], pudding["base-radius"], pudding["ring-vertex-count"], pudding["layer-count"],
pudding["y-range"][0], pudding["y-range"][1], pudding["gradient-position"]);
/* loading GL context instead of SDL context for 3D */
load_gl_context();
/* Add a texture to the camera Plane for storing frame image data */
camera_view.texture(sb::Texture());
glm::mat4 flip = glm::mat4(1);
flip[1][1] = -1;
camera_view.transformation(flip);
/* Load background tiles */
load_tiles();
/* Load button graphics and create button objects */
load_pads();
/* Load a pointer cursor from the system library that will be freed automatically */
poke = std::shared_ptr<SDL_Cursor>(SDL_CreateSystemCursor(SDL_SYSTEM_CURSOR_HAND), SDL_FreeCursor);
}
/* Assign vertices, colors and texture UV coordinates to the pudding model */
void Pudding::load_pudding_model(float top_radius, float base_radius, int ring_vertex_count, int layer_count, float min_y,
float max_y, float gradient_position)
{
std::size_t ii;
const glm::vec3 *layer_top_color, *layer_bottom_color;
const glm::vec2 *start_vertex, *end_vertex;
float layer_top_y, layer_top_percent, layer_base_y, layer_base_percent, u_step = 1.0f / ring_vertex_count, ring_start_vertex_u;
std::vector<glm::vec2> layer_top_ring, layer_base_ring;
layer_top_ring.reserve(ring_vertex_count);
layer_base_ring.reserve(ring_vertex_count);
/* y coordinates of each ring of vertices in the pudding */
const std::map<float, float> y_coords = sb::range_percent_count(max_y, min_y, layer_count + 1);
/* loop through layers by looking at each layer's top and bottom rings simultaneously */
for (
auto layer_top_entry = y_coords.begin(), layer_base_entry = ++y_coords.begin();
layer_base_entry != y_coords.end();
layer_top_entry++, layer_base_entry++
)
{
layer_top_y = layer_top_entry->second;
layer_top_percent = layer_top_entry->first;
layer_base_y = layer_base_entry->second;
layer_base_percent = layer_base_entry->first;
layer_top_ring.clear();
layer_base_ring.clear();
sb::points_on_circle(layer_top_ring, ring_vertex_count, layer_top_percent * (base_radius - top_radius) + top_radius);
sb::points_on_circle(layer_base_ring, ring_vertex_count, layer_base_percent * (base_radius - top_radius) + top_radius);
/* layers above gradient position are brown, layers below are yellow, and the layer that contains gradient positon
* is a gradient from brown to yellow */
if (layer_top_percent <= gradient_position && layer_base_percent > gradient_position)
{
layer_top_color = &PUDDING_BROWN;
layer_bottom_color = &PUDDING_YELLOW;
}
else if (layer_top_percent <= gradient_position)
{
layer_top_color = &PUDDING_BROWN;
layer_bottom_color = &PUDDING_BROWN;
}
else
{
layer_top_color = &PUDDING_YELLOW;
layer_bottom_color = &PUDDING_YELLOW;
}
/* u coordinate will increase toward 1.0f as we go around the ring */
ring_start_vertex_u = 0.0f;
for (ii = 0; ii < layer_top_ring.size(); ii++)
{
/* triangle that includes top two vertices and first base vertex */
start_vertex = &layer_top_ring[ii];
end_vertex = &layer_top_ring[(ii + 1) % layer_top_ring.size()];
pudding_model["position"]->add(start_vertex->x, layer_top_y, start_vertex->y);
pudding_model["uv"]->add(ring_start_vertex_u, layer_top_percent);
pudding_model["position"]->add(end_vertex->x, layer_top_y, end_vertex->y);
pudding_model["uv"]->add(ring_start_vertex_u + u_step, layer_top_percent);
pudding_model["color"]->extend(*layer_top_color, 2);
pudding_model["position"]->add(layer_base_ring[ii].x, layer_base_y, layer_base_ring[ii].y);
pudding_model["uv"]->add(ring_start_vertex_u, layer_base_percent);
pudding_model["color"]->add(*layer_bottom_color);
/* triangle that includes bottom two vertices and second top vertex */
start_vertex = &layer_base_ring[ii];
pudding_model["position"]->add(start_vertex->x, layer_base_y, start_vertex->y);
pudding_model["uv"]->add(ring_start_vertex_u, layer_base_percent);
pudding_model["color"]->add(*layer_bottom_color);
pudding_model["position"]->add(end_vertex->x, layer_top_y, end_vertex->y);
pudding_model["uv"]->add(ring_start_vertex_u + u_step, layer_top_percent);
pudding_model["color"]->add(*layer_top_color);
end_vertex = &layer_base_ring[(ii + 1) % layer_base_ring.size()];
pudding_model["position"]->add(end_vertex->x, layer_base_y, end_vertex->y);
pudding_model["uv"]->add(ring_start_vertex_u + u_step, layer_base_percent);
pudding_model["color"]->add(*layer_bottom_color);
ring_start_vertex_u += u_step;
}
}
pudding_triangle_vertex_count = pudding_model["position"]->count();
/* process the top and bottom of pudding, filling each face with a triangle fan */
float y = max_y;
const glm::vec3* face_color = &PUDDING_BROWN;
Box texture_box = Box({0, 0}, {1, 1});
for (float radius : {top_radius, base_radius})
{
/* first point in a GL_TRIANGLE_FAN is the center */
pudding_model["position"]->add(0.0f, y, 0.0f);
pudding_model["uv"]->add(0.0f, 0.0f);
layer_top_ring.clear();
sb::points_on_circle(layer_top_ring, ring_vertex_count, radius);
/* loop through points on the face */
for (ii = 0; ii < layer_top_ring.size(); ii++)
{
start_vertex = &layer_top_ring[ii];
/* for GL_TRIANGLE_FAN we just need to add an outer vertex */
pudding_model["position"]->add(start_vertex->x, y, start_vertex->y);
pudding_model["uv"]->add(*start_vertex);
/* connect the ring on the last vertex */
if (ii == layer_top_ring.size() - 1)
{
end_vertex = &layer_top_ring[(ii + 1) % layer_top_ring.size()];
pudding_model["position"]->add(end_vertex->x, y, end_vertex->y);
pudding_model["uv"]->add(*end_vertex);
}
}
/* single color for the entire layer_top_ring */
pudding_model["color"]->extend(*face_color, layer_top_ring.size() + 2);
y = min_y;
face_color = &PUDDING_YELLOW;
}
pudding_fan_vertex_count = (pudding_model["position"]->count() - pudding_triangle_vertex_count) / 2;
}
/* Create GL context via super class and load vertices, UV data, and shaders */
void Pudding::load_gl_context()
{
super::load_gl_context();
/* Generate a vertex array object ID, bind it as current (requirement of OpenGL) */
vao.generate();
vao.bind();
/* Generate ID for the vertex buffer object that will hold all vertex data. Using one buffer for all attributes, data
* will be copied in one after the other. */
vbo.generate();
vbo.bind();
/* Load two shader programs, one for rendering the flat objects, and one for rendering the 3D model. Load and configure
* the flat shader program first. */
GLuint vertex_shader = load_shader("src/shaders/flat.vert", GL_VERTEX_SHADER);
GLuint fragment_shader = load_shader("src/shaders/flat.frag", GL_FRAGMENT_SHADER);
flat_program = glCreateProgram();
glAttachShader(flat_program, vertex_shader);
glAttachShader(flat_program, fragment_shader);
Plane::position->bind(0, flat_program, "in_position");
Plane::uv->bind(1, flat_program, "vertex_uv");
/* load, configure and link the 3D world program */
vertex_shader = load_shader("src/shaders/mvp.vert", GL_VERTEX_SHADER);
fragment_shader = load_shader("src/shaders/mvp.frag", GL_FRAGMENT_SHADER);
mvp_program = glCreateProgram();
glAttachShader(mvp_program, vertex_shader);
glAttachShader(mvp_program, fragment_shader);
pudding_model.attributes("position")->bind(2, mvp_program, "vertex_position");
pudding_model.attributes("uv")->bind(3, mvp_program, "vertex_uv");
pudding_model.attributes("color")->bind(4, mvp_program, "vertex_color");
sb::Log::gl_errors("after loading shaders");
/* Fill VBO with attribute data */
vbo.allocate(background.size() + pudding_model.size(), GL_STATIC_DRAW);
vbo.add(*Plane::position);
vbo.add(*Plane::uv);
vbo.add(*pudding_model.attributes("uv"));
vbo.add(*pudding_model.attributes("position"));
vbo.add(*pudding_model.attributes("color"));
sb::Log::gl_errors("after filling VBO");
/* link shaders */
link_shader(flat_program);
link_shader(mvp_program);
sb::Log::gl_errors("after linking");
/* store uniform locations after linking */
uniform["flat"]["texture"] = glGetUniformLocation(flat_program, "base_texture");
uniform["flat"]["time"] = glGetUniformLocation(flat_program, "time");
uniform["flat"]["scroll"] = glGetUniformLocation(flat_program, "scroll");
uniform["flat"]["blend"] = glGetUniformLocation(flat_program, "blend_min_hsv");
uniform["flat"]["transformation"] = glGetUniformLocation(flat_program, "transformation");
uniform["mvp"]["mvp"] = glGetUniformLocation(mvp_program, "mvp");
uniform["mvp"]["time"] = glGetUniformLocation(mvp_program, "time");
uniform["mvp"]["effect"] = glGetUniformLocation(mvp_program, "effect");
uniform["mvp"]["uv transformation"] = glGetUniformLocation(mvp_program, "uv_transformation");
uniform["mvp"]["coordinate bound"] = glGetUniformLocation(mvp_program, "coordinate_bound");
uniform["mvp"]["pudding texture"] = glGetUniformLocation(mvp_program, "pudding_texture");
/* enable alpha rendering */
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glEnable(GL_BLEND);
sb::Log::gl_errors("after uniform locations");
}
/* Read every jpg in the folder at tile path into a GL texture and associate with the background object. */
void Pudding::load_tiles()
{
for (fs::path path : sb::glob(configuration()["resource"]["tile-path"].get<fs::path>() / ".*.jpg"))
{
sb::Texture texture {path};
texture.load();
background.texture(texture, path);
}
}
/* Load every png in the button path as a Texture and add to a map. */
void Pudding::load_pads()
{
for (fs::path path : sb::glob(configuration()["resource"]["button-path"].get<fs::path>() / ".*.png"))
{
labels[path.stem()] = sb::Texture(path);
labels[path.stem()].load();
}
nlohmann::json interface = configuration()["interface"];
camera_button.texture(labels["scan"]);
camera_button.translation({interface["main-button-single-x"], interface["main-button-y"]});
camera_button.scale(interface["main-button-scale"], window_box().aspect());
inventory_button.texture(labels["inventory"]);
inventory_button.translation({interface["main-button-double-x"], interface["main-button-y"]});
inventory_button.scale(interface["main-button-scale"], window_box().aspect());
previous_button.texture(labels["arrow"]);
previous_button.translation(glm::vec2({-1, 1}) * interface["arrow-button-location"].get<glm::vec2>());
previous_button.scale(interface["arrow-button-scale"], window_box().aspect());
next_button.texture(labels["arrow"]);
next_button.translation(interface["arrow-button-location"]);
next_button.scale(interface["arrow-button-scale"], window_box().aspect());
next_button.rotation(glm::radians(180.0f));
}
void Pudding::open_camera()
{
#ifndef __EMSCRIPTEN__
/* Open the OpenCV capture, using device ID #0 to get the default attached camera. */
int device_id = configuration()["scan"]["camera-device-id"];
capture.open(device_id);
std::ostringstream message;
if (capture.isOpened())
{
message << "Opened and initialized " << capture.get(cv::CAP_PROP_FRAME_WIDTH) << "x" <<
capture.get(cv::CAP_PROP_FRAME_HEIGHT) << ", " << capture.get(cv::CAP_PROP_FPS) <<
"fps video capture device ID #" << device_id << " using " << capture.getBackendName();
/* Check config for a requested camera resolution, and if there is one, try applying it to the `cv::VideoCapture`. The
* requested resolution may not be available, and if so, `cv::VideoCapture` will choose a resolution. If the resulting
* resolution is different from the config value, print the resolution the capture device was set to instead. */
if (configuration()["display"].contains("camera-resolution"))
{
capture.set(cv::CAP_PROP_FRAME_WIDTH, configuration()["display"]["camera-resolution"][0]);
capture.set(cv::CAP_PROP_FRAME_HEIGHT, configuration()["display"]["camera-resolution"][1]);
message << std::endl << "Changed resolution to " << configuration()["display"]["camera-resolution"];
if (capture.get(cv::CAP_PROP_FRAME_WIDTH) != configuration()["display"]["camera-resolution"][0] ||
capture.get(cv::CAP_PROP_FRAME_HEIGHT) != configuration()["display"]["camera-resolution"][1])
{
message << " (but got " << capture.get(cv::CAP_PROP_FRAME_WIDTH) << "x" << capture.get(cv::CAP_PROP_FRAME_HEIGHT) << ")";
}
}
/* Generate a texture the size of the camera's resolution. */
camera_view.texture().generate({capture.get(cv::CAP_PROP_FRAME_WIDTH), capture.get(cv::CAP_PROP_FRAME_HEIGHT)});
/* Create and detach a thread which will read frame data */
std::thread camera_thread(&Pudding::capture_frame, this);
camera_thread.detach();
}
else
{
message << "failed to open video capture device ID #" << device_id;
}
sb::Log::log(message);
#else
emscripten_run_script("open_camera()");
#endif
}
void Pudding::close_camera()
{
#ifndef __EMSCRIPTEN__
capture.release();
#else
emscripten_run_script("close_camera()");
new_frame_available = false;
#endif
}
/* Respond to command events */
void Pudding::respond(SDL_Event& event)
{
if (get_delegate().compare(event, "up"))
{
item_carousel.next(items);
}
else if (get_delegate().compare(event, "right"))
{
if (items.size() > 0)
{
current_item().next_texture();
}
}
else if (get_delegate().compare(event, "down"))
{
item_carousel.previous(items);
}
else if (get_delegate().compare(event, "left"))
{
if (items.size() > 0)
{
current_item().previous_texture();
}
}
else if (get_delegate().compare(event, "toggle-camera"))
{
camera_switch.toggle();
}
else if (get_delegate().compare(event, "toggle-item"))
{
show_item = !show_item;
}
/* The effect command switches the active effect to the next in the list, wrapping around at the end */
else if (get_delegate().compare(event, "effect"))
{
effect_id = ++effect_id % EFFECT_COUNT;
glUseProgram(mvp_program);
glUniform1i(uniform["mvp"]["effect"], effect_id);
}
else if (get_delegate().compare(event, "tile"))
{
background.next();
}
/* Mouse interface */
else if (event.type == SDL_MOUSEMOTION || event.type == SDL_MOUSEBUTTONDOWN)
{
/* Get the secondary window viewport dimensions in NDC and pixel resolution for sizing the arrow buttons and transforming
* the mouse coordinates. */
Box viewport_ndc = sb::Display::ndc;
/* Drag viewport completely closed to the bottom of the screen */
viewport_ndc.top(viewport_ndc.bottom(), true);
nlohmann::json interface = configuration()["interface"];
/* Drag viewport back up the height of the pop-up window */
viewport_ndc.drag_top(interface["pop-up-viewport-height"]);
/* Get the viewport in pixel resolution to size the buttons to be square inside the viewport */
Box viewport_pixel = get_display().ndc_to_pixel(viewport_ndc);
/* Get mouse coordinates in NDC and pixel resolution in both main window and secondary */
glm::vec2 mouse_pixel = event.type == SDL_MOUSEBUTTONDOWN ? glm::vec2{event.button.x, event.button.y} :
glm::vec2{event.motion.x, event.motion.y};
glm::vec2 mouse_ndc {
float(mouse_pixel.x) / window_box().width() * 2.0f - 1.0f, (1.0f - float(mouse_pixel.y) / window_box().height()) * 2.0f - 1.0f
};
glm::vec2 mouse_viewport_ndc {
mouse_ndc.x, (1.0f - (float(mouse_pixel.y) - float(viewport_pixel.top())) / viewport_pixel.height()) * 2.0f - 1.0f
};
bool over_camera_button = !camera_switch && !item_display_active() && camera_button.collide(mouse_ndc),
over_inventory_button = items.size() > 0 && !item_display_active() && !camera_switch && inventory_button.collide(mouse_ndc),
over_close_area = (camera_switch || item_display_active()) && get_display().ndc_subsection(main_viewport).collide(mouse_ndc),
over_previous_button = item_display_active() && previous_button.collide(mouse_viewport_ndc),
over_next_button = item_display_active() && next_button.collide(mouse_viewport_ndc);
/* Check for collisions with anything clickable */
if (over_camera_button || over_inventory_button || over_close_area || over_previous_button || over_next_button)
{
/* Set cursor to pokey finger */
if (SDL_GetCursor() != poke.get())
{
SDL_SetCursor(poke.get());
}
/* Respond to a click */
if (event.type == SDL_MOUSEBUTTONDOWN)
{
if (over_camera_button || over_inventory_button || over_close_area)
{
/* Reset cursor to default arrow */
SDL_SetCursor(SDL_GetDefaultCursor());
if (over_camera_button)
{
camera_switch.connect();
#ifndef __EMSCRIPTEN__
/* If the camera did not open, this failed, so unflip the switch */
if (!capture.isOpened())
{
camera_switch.disconnect();
}
#endif
}
else if (over_inventory_button)
{
show_item = true;
/* Scale buttons according to viewport that is going to open */
next_button.scale(interface["arrow-button-scale"], viewport_pixel.aspect());
previous_button.scale(interface["arrow-button-scale"], viewport_pixel.aspect());
}
else if (over_close_area)
{
camera_switch.disconnect();
show_item = false;
}
}
else
{
/* Handle arrow buttons */
if (over_next_button)
{
if (current_item().at_last())
{
item_carousel.next(items);
current_item().to_first();
}
else
{
current_item().next_texture();
}
}
else
{
if (current_item().at_first())
{
item_carousel.previous(items);
current_item().to_last();
}
else
{
current_item().previous_texture();
}
}
}
}
}
else if (SDL_GetCursor() == poke.get())
{
SDL_SetCursor(SDL_GetDefaultCursor());
}
}
}
/* Build an Item object by submitting the upc parameter to multiple APIs and taking
* relevant results from each. Result JSON will be saved if saving is enabled in the global
* configuration
*/
void Pudding::add_item(const std::string& upc)
{
/* Store the UPC code in the incoming item object */
incoming_item.upc(upc);
if (configuration()["api"]["open-food-enabled"])
{
web_get_bytes(OPEN_FOOD_API_URL + upc, std::bind(&Pudding::incorporate_open_api, this, std::placeholders::_1, std::placeholders::_2));
}
if (configuration()["api"]["open-products-enabled"])
{
web_get_bytes(OPEN_PRODUCTS_API_URL + upc, std::bind(&Pudding::incorporate_open_api, this, std::placeholders::_1, std::placeholders::_2));
}
if (configuration()["api"]["nutritionix-enabled"])
{
/* Nutritionix requires API keys in headers for validation */
web_get_bytes(NUTRITIONIX_API_URL + upc, std::bind(&Pudding::incorporate_nutritionix_api, this, std::placeholders::_1, std::placeholders::_2), {
"x-app-id", configuration()["api"]["nutritionix-app-id"].get<std::string>(),
"x-app-key", configuration()["api"]["nutritionix-app-key"].get<std::string>()
});
}
if (configuration()["api"]["edamam-enabled"])
{
/* Build API request by concatenating URL and query string */
std::stringstream url;
url << "https://api.edamam.com/api/food-database/v2/parser?upc=" << upc << "&app_id=" <<
configuration()["api"]["edamam-app-id"].get<std::string>() << "&app_key=" <<
configuration()["api"]["edamam-app-key"].get<std::string>();
web_get_bytes(url.str(), std::bind(&Pudding::incorporate_edamam_api, this, std::placeholders::_1, std::placeholders::_2));
}
if (configuration()["api"]["best-buy-enabled"])
{
/* Build API request by concatenating URL and query string */
std::stringstream url;
url << BEST_BUY_API_URL_1 << upc << BEST_BUY_API_URL_2 << configuration()["api"]["best-buy-api-key"].get<std::string>();
web_get_bytes(url.str(), std::bind(&Pudding::incorporate_best_buy_api, this, std::placeholders::_1, std::placeholders::_2));
}
if (configuration()["api"]["google-books-enabled"])
{
std::stringstream url;
url << GOOGLE_BOOKS_API_URL << upc << "&key=" << configuration()["api"]["google-books-api-key"].get<std::string>();
web_get_bytes(url.str(), std::bind(&Pudding::incorporate_google_books_api, this, std::placeholders::_1, std::placeholders::_2));
}
}
void Pudding::incorporate_open_api(const std::vector<std::uint8_t>& json_bytes, const std::string& url)
{
std::ostringstream message;
message << "Processing " << (json_bytes.size() / 100.0) << "KB from the Open Food/Products API";
sb::Log::log(message);
/* Use the nlohmann library to parse the raw JSON byte data retrieved from a web request. */
nlohmann::json json = nlohmann::json::parse(json_bytes);
std::stringstream json_formatted;
json_formatted << std::setw(4) << json << std::endl;
sb::Log::log(json_formatted.str(), sb::Log::DEBUG);
/* Test that should determine if an Open Food API response is not empty */
if (json.value("status", 0) && json.contains("product"))
{
std::ostringstream message;
if (json["product"].value("image_url", "") != "")
{
std::string image_url = json["product"]["image_url"];
std::ostringstream message;
message << "Found image URL for item " << incoming_item << " from Open API at " << image_url;
sb::Log::log(message);
web_get_bytes(image_url, std::bind(&Pudding::store_web_image, this, std::placeholders::_1, std::placeholders::_2));
}
else
{
message << "No images found at Open API for " << incoming_item;
}
sb::Log::log(message);
incoming_item.brand_name(json["product"].value("brands", ""));
incoming_item.product_name(json["product"].value("product_name", ""));
save_item_json(json, incoming_item, "Open_Food_and_Products_API");
}
else
{
sb::Log::log("No item found in JSON from Open API");
}
}
void Pudding::incorporate_nutritionix_api(const std::vector<std::uint8_t>& json_bytes, const std::string& url)
{
std::ostringstream message;
message << "Processing " << (json_bytes.size() / 100.0) << "KB from the Nutritionix API";
sb::Log::log(message);
/* Use the nlohmann library to parse the raw JSON byte data retrieved from a web request. */
nlohmann::json json = nlohmann::json::parse(json_bytes);
std::stringstream json_formatted;
json_formatted << std::setw(4) << json << std::endl;
sb::Log::log(json_formatted.str(), sb::Log::DEBUG);
/* test that should determine if a Nutritionix response is not empty */
if (!(json.contains("message") && json["message"] == NUTRITIONIX_NOT_FOUND))
{
nlohmann::json food = json["foods"][0];
std::ostringstream message;
if (food.contains("photo") && food["photo"].value("thumb", "") != "")
{
std::string image_url = food["photo"]["thumb"];
message << "Found image URL for item " << incoming_item << " from Nutritionix at " << image_url;
web_get_bytes(image_url, std::bind(&Pudding::store_web_image, this, std::placeholders::_1, std::placeholders::_2));
}
else
{
message << "No images found at Nutritionix for " << incoming_item;
}
sb::Log::log(message);
incoming_item.brand_name(food.value("brand_name", ""));
incoming_item.product_name(food.value("food_name", ""));
save_item_json(json, incoming_item, "Nutritionix_API");
}
else
{
sb::Log::log("no results from Nutritionix");
}
}
void Pudding::incorporate_edamam_api(const std::vector<std::uint8_t>& json_bytes, const std::string& url)
{
std::ostringstream message;
message << "Processing " << (json_bytes.size() / 100.0) << "KB from the Edamam API";
sb::Log::log(message);
/* Use the nlohmann library to parse the raw JSON byte data retrieved from a web request. */
nlohmann::json json = nlohmann::json::parse(json_bytes);
std::stringstream json_formatted;
json_formatted << std::setw(4) << json << std::endl;
sb::Log::log(json_formatted.str(), sb::Log::DEBUG);
/* test that should determine if a Edamam response has food data */
if (json.contains("hints") && json["hints"][0].contains("food"))
{
nlohmann::json food = json["hints"][0]["food"];
std::ostringstream message;
if (food.value("image", "") != "")
{
std::string image_url = food["image"];
message << "Found URL to image for item " << incoming_item << " from Edamam at " << image_url;
web_get_bytes(image_url, std::bind(&Pudding::store_web_image, this, std::placeholders::_1, std::placeholders::_2));
}
else
{
message << "No images found at Edamam for " << incoming_item;
}
sb::Log::log(message);
incoming_item.product_name(food.value("label", ""));
save_item_json(json, incoming_item, "Edamam_API");
}
else
{
sb::Log::log("no results from Edamam");
}
}
void Pudding::incorporate_best_buy_api(const std::vector<std::uint8_t>& json_bytes, const std::string& url)
{
std::ostringstream message;
message << "Processing " << (json_bytes.size() / 100.0) << "KB from the Best Buy API";
sb::Log::log(message);
/* Use the nlohmann library to parse the raw JSON byte data retrieved from a web request. */
nlohmann::json json = nlohmann::json::parse(json_bytes);
std::stringstream json_formatted;
json_formatted << std::setw(4) << json << std::endl;
sb::Log::log(json_formatted.str(), sb::Log::DEBUG);
/* test that should determine if a Best Buy response has a result */
if (json.contains("total") && json["total"].get<int>() > 0)
{
nlohmann::json product = json["products"][0];
/* look up image (for games this is box art) and "alternate views image" (for games this is a screen shot) */
for (std::string key : {"alternateViewsImage", "image"})
{
std::ostringstream message;
if (product.value(key, "") != "")
{
std::string image_url = product[key];
message << "Found URL to image for item " << incoming_item << " from Best Buy at " << image_url;
web_get_bytes(image_url, std::bind(&Pudding::store_web_image, this, std::placeholders::_1, std::placeholders::_2));
}
else
{
message << "No images found at Best Buy for " << incoming_item;
}
sb::Log::log(message);
}
incoming_item.product_name(product.value("name", ""));
save_item_json(json, incoming_item, "Best_Buy_API");
}
else
{
sb::Log::log("no results from Best Buy");
}
}
void Pudding::incorporate_google_books_api(const std::vector<std::uint8_t>& json_bytes, const std::string& url)
{
std::ostringstream message;
message << "Processing " << (json_bytes.size() / 100.0) << "KB from the Google Books API";
sb::Log::log(message);
/* Use the nlohmann library to parse the raw JSON byte data retrieved from a web request. */
nlohmann::json json = nlohmann::json::parse(json_bytes);
std::stringstream json_formatted;
json_formatted << std::setw(4) << json << std::endl;
sb::Log::log(json_formatted.str(), sb::Log::DEBUG);
/* test that should determine if a Google Books API response is not empty */
if (json.value<int>("totalItems", 0) > 0 && json.contains("items") && json["items"][0].contains("volumeInfo"))
{
/* book specific section of the JSON */
json = json["items"][0]["volumeInfo"];
/* get the image data */
std::ostringstream message;
if (json.contains("imageLinks") && json["imageLinks"].value("thumbnail", "") != "")
{
std::string image_url = json["imageLinks"]["thumbnail"];
message << "Found URL to image for item " << incoming_item << " from Google Books at " << image_url;
web_get_bytes(image_url, std::bind(&Pudding::store_web_image, this, std::placeholders::_1, std::placeholders::_2));
}
else
{
message << "No images found at Google Books for " << incoming_item;
}
sb::Log::log(message);
if (json.contains("authors"))
{
incoming_item.brand_name(json["authors"][0]);
}
incoming_item.product_name(json.value("title", ""));
save_item_json(json, incoming_item, "Google_Books_API");
}
else
{
sb::Log::log("no results from Google Books API");
}
}
/* Write submitted JSON to file, creating parent directories if necessary, and using item and
* api_name to determine file name prefix
*/
void Pudding::save_item_json(const nlohmann::json& json, const Item& item, const std::string& api_name) const
{
if (configuration()["scan"]["json-save"])
{
fs::path path = configuration()["scan"]["json-save-directory"];
if (!fs::exists(path))
{
fs::create_directories(path);
}
std::string prefix = api_name;
if (item.full_name() != "")
{
prefix += "_" + item.full_name();
}
else
{
prefix += "_Unknown";
}
std::replace_if(prefix.begin(), prefix.end(), [](char c) { return !std::isalnum(c); }, '_');
path /= prefix + "_" + item.upc() + ".json";
std::ofstream out(path);
out << std::setw(4) << json << std::endl;
sb::Log::log("Saved JSON to " + path.string());
}
else
{
SDL_LogWarn(SDL_LOG_CATEGORY_CUSTOM, "not saving JSON, saving disabled by configuration");
}
}
void Pudding::web_get_bytes(std::string url, const web_callback& callback, const std::vector<std::string>& headers)
{
std::stringstream message;
message << "Fetching data from " << url;
sb::Log::log(message.str());
/* Add a request object to the end of the vector of launched requests. */
Request* request = new Request(callback, url);
requests.push_back(request);
#if defined(__EMSCRIPTEN__)
/* Use the CORS anywhere proxy */
url = CORS_ANYWHERE_PROXY_URL + url;
/* Create a fetch attributes object. Set the callback that will be called when response data is received. Attach the user
* submitted callback to the userData attribute. Set the headers. */
emscripten_fetch_attr_t attr;
emscripten_fetch_attr_init(&attr);
strcpy(attr.requestMethod, "GET");
attr.attributes = EMSCRIPTEN_FETCH_LOAD_TO_MEMORY;
attr.onsuccess = fetch_success;
attr.onerror = fetch_error;
attr.userData = request;
/* Copy headers into a vector of C strings with null terminator for Emscripten */
if (!headers.empty())
{
std::vector<const char*>* emscripten_formatted_headers = new std::vector<const char*>();
for (const std::string& component : headers)
{
const std::string* component_c = new std::string(component.c_str());
emscripten_formatted_headers->push_back(component_c->c_str());
}
emscripten_formatted_headers->push_back(nullptr);
std::ostringstream message;
message << "Headers are";
for (const char* component : *emscripten_formatted_headers)
{
message << " " << component;
}
sb::Log::log(message);
attr.requestHeaders = emscripten_formatted_headers->data();
}
emscripten_fetch(&attr, url.c_str());
#else
CURL *curl;
CURLcode result;
result = curl_global_init(CURL_GLOBAL_DEFAULT);
if (result != CURLE_OK)
{
std::cout << "cURL initialization failed " << curl_easy_strerror(result) << std::endl;
}
else
{
curl = curl_easy_init();
if (curl)
{
curl_easy_setopt(curl, CURLOPT_URL, url.c_str());
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, curl_write_response);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, request);
curl_easy_setopt(curl, CURLOPT_USERAGENT, configuration()["api"]["user-agent"].get<std::string>().c_str());
/* Pass submitted headers to cURL */
struct curl_slist* list = nullptr;
if (headers.size() > 0)
{
/* cURL expects headers as a list of "name: value" pair strings, so combine every two components of the headers list
* into a single string */
for (std::size_t ii = 0; ii < headers.size(); ii += 2)
{
std::string pair = headers[ii] + ": " + headers[ii + 1];
list = curl_slist_append(list, pair.c_str());
}
}
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, list);
result = curl_easy_perform(curl);
curl_slist_free_all(list);
if (result != CURLE_OK)
{
std::cout << "cURL request failed " << curl_easy_strerror(result) << std::endl;
}
}
else
{
std::cout << "cURL initialization failed" << std::endl;
}
curl_easy_cleanup(curl);
}
curl_global_cleanup();
/* Call the user supplied callback */
request->respond();
#endif
}
#if defined(__EMSCRIPTEN__)
void Pudding::fetch_success(emscripten_fetch_t* fetch)
{
std::stringstream bytes_message;
bytes_message << "Found " << fetch->numBytes << " bytes using Emscripten Fetch API";
sb::Log::log(bytes_message.str());
/* Store the bytes in the request object */
Request* request = reinterpret_cast<Request*>(fetch->userData);
request->store(reinterpret_cast<const std::uint8_t*>(fetch->data), fetch->numBytes);
/* Call the user supplied callback */
request->respond();
emscripten_fetch_close(fetch);
}
void Pudding::fetch_error(emscripten_fetch_t* fetch)
{
std::ostringstream message;
message << "Failed fetching " << fetch->url << " with status code " << fetch->status;
sb::Log::log(message);
/* Since the request failed, mark it finished */
Request* request = reinterpret_cast<Request*>(fetch->userData);
request->mark_finished();
emscripten_fetch_close(fetch);
}
#else
std::size_t Pudding::curl_write_response(std::uint8_t* buffer, std::size_t size, std::size_t count, Request* request)
{
std::size_t packet_size = size * count;
std::stringstream bytes_message;
bytes_message << "Found " << packet_size << " bytes using cURL ";
sb::Log::log(bytes_message.str());
/* Store the bytes in the request object */
request->store(buffer, packet_size);
return packet_size;
}
#endif
void Pudding::store_web_image(const std::vector<std::uint8_t>& image, const std::string& url)
{
/* Get a Texture by passing the bytes through an RW ops which will enable the Texture object to load a Surface */
sb::Texture texture;
SDL_RWops* rw = SDL_RWFromConstMem(image.data(), image.size());
texture.load(rw);
SDL_RWclose(rw);
std::ostringstream message;
sb::Log::Level message_level;
if (texture.generated())
{
message << "Loaded an image from " << url << " and attached it to " << incoming_item << " at " << &incoming_item;
message_level = sb::Log::INFO;
/* Use the URL as the name for the texture */
incoming_item.texture(texture, url);
}
else
{
message << "Could not generate texture from " << url;
message_level = sb::Log::WARN;
}
sb::Log::log(message, message_level);
}
/* Call GL's delete texture function, and print a debug statement for testing. This is defined as a static member
* function and uses the SDL logging function instead of the inherited logging functions from Node since the object
* may not be allocated at destruction time (?)
*/
void Pudding::destroy_texture(GLuint* texture_id)
{
/* not sure why SDL_Log works here but SDL_LogDebug and SDL_LogInfo don't */
std::ostringstream message;
message << "destroying texture ID " << *texture_id;
sb::Log::log(message);
glDeleteTextures(1, texture_id);
}
/* Return the item currently selected in the inventory */
Item& Pudding::current_item()
{
try
{
return *item_carousel.current(items);
}
catch (const std::out_of_range& exception)
{
std::ostringstream message;
message << "Out of range exception: " << exception.what() << " (Attempting to retrieve an item from empty inventory)";
sb::Log::log(message);
}
}
/* Returns true if item display is toggled on and there is at least one item to display */
bool Pudding::item_display_active() const
{
return show_item && items.size() > 0;
}
/*!
* Read pixels from the camera into a `cv::Mat` and pre-process them if pre-processing methods are enabled.
*
* For a Linux build: This function is meant to be launched in a separate thread, where it will run continuously. Set `new_frame_available`
* to `false` before loading camera frame data into the `cv::Mat` object at `camera_frame`, then set it back to `true` to indicate new frame
* data is available in `camera_frame`.
*
* For an Emscripten build: This method actually does not capture frame data since that is done by associating the cv::Mat at Pudding::camera_frame
* with the data in the Emscripten heap memory. However, pre-processing is done in this function, so it should be called synchronously once per
* frame.
*/
void Pudding::capture_frame()
{
/* Emscripten builds will call this function from the main thread, so don't run continuously */
#ifndef __EMSCRIPTEN__
/* When the camera button is switched off, this thread will automatically finish execution. */
while (camera_switch)
{
#endif
/* The frame data in the `cv::Mat` at `pudding->camera_frame` is about to be modified by the rest of
* this function, so even if there is data stored there that hasn't been read yet, it should not
* be read by the main thread until this is set to `true`at the end of the function. */
new_frame_available = false;
/* If requests are running, a barcode is currently being scanned and the camera image doesn't need to be scanned. */
if (requests.empty())
{
/* Emscripten loads frame data differently, so disable this part */
#ifndef __EMSCRIPTEN__
/* Load camera frame data into `cv::Mat` */
capture >> camera_frame;
#endif
/* Pre-process image for improved scan results */
if (!camera_frame.empty())
{
if (configuration()["scan"]["sharpen"])
{
/* Sharpen image for barcode detection */
float sigma = 1.0f, threshold = 5.0f, amount = 1.0f;
cv::GaussianBlur(camera_frame, blurred, cv::Size(), sigma, sigma);
low_contrast_mask = cv::abs(camera_frame - blurred) < threshold;
camera_frame = camera_frame * (1.0f + amount) + blurred * (-amount);
camera_frame.copyTo(camera_frame, low_contrast_mask);
}
if (configuration()["scan"]["brighten"])
{
/* Brightness and contrast adjustment, see
* https://docs.opencv.org/2.4.13.7/doc/tutorials/core/basic_linear_transform/basic_linear_transform.html */
int brightness = configuration()["scan"]["brightness-addition"];
float contrast = configuration()["scan"]["contrast-multiplication"];
if (brightness != 0 || contrast != 1.0)
{
camera_frame.convertTo(camera_frame, -1, contrast, brightness);
}
}
/* Finished loading into `cv::Mat`, so it is new data that is safe to read. */
new_frame_available = true;
}
sb::Log::gl_errors("in capture, after capturing frame");
}
#ifndef __EMSCRIPTEN__
std::this_thread::sleep_for(std::chrono::milliseconds(50));
}
#endif
}
/* Update parameters and draw the screen */
void Pudding::update()
{
/* The Emscripten pixel data memory address has been set in JS, so reassociate the cv::Mat with the data. Emscripten frame data will be stored
* on the heap at the given offset. Once this memory is associated with the cv::Mat, the matrix will update as the memory is filled with pixel
* data. */
#ifdef __EMSCRIPTEN__
if (emscripten_heap_offset != -1)
{
/* Convert the address of frame RGBA pixel data on the Emscripten heap into an unsigned 8-bit pointer and point a cv::Mat to
* the data. The memory is managed in the JS code, so don't free it. */
std::uint8_t* emscripten_camera_pixels = reinterpret_cast<std::uint8_t*>(emscripten_heap_offset);
camera_frame = cv::Mat(240, 320, CV_8UC4, emscripten_camera_pixels);
emscripten_heap_offset = -1;
}
#endif
sb::Log::gl_errors("at beginning of update");
/* Time in seconds the game has running for */
float time_seconds = SDL_GetTicks() / 1000.0f;
/* if the config is set to refresh automatically, there may be a new barcode available */
if (current_config_barcode != configuration()["scan"]["barcode"])
{
current_config_barcode = configuration()["scan"]["barcode"];
current_barcode = current_config_barcode;
std::ostringstream message;
message << "read new barcode from config " << current_barcode;
sb::Log::log(message);
}
/* If new frame data is available and requests aren't running, pass the frame through barcode scanning and copy it into texture memory for display. */
if (new_frame_available && requests.empty())
{
#ifdef __EMSCRIPTEN__
/* Emscripten builds call the capture frame image pre-processing synchronously */
capture_frame();
/* Pixels from Emscripten are RGBA */
GLenum pixel_format = GL_RGBA;
#else
/* Pixels from cv::VideoCapture are BGR */
GLenum pixel_format = GL_BGR;
#endif
if (configuration()["scan"]["enabled"])
{
std::vector<cv::Point> contours;
/* Check the original camera frame and the pre-processed ones for barcodes */
for (const cv::Mat& frame : {camera_frame, contrasted_frame, sharpened_frame})
{
/* Pre-processing may be disabled for this frame */
if (!frame.empty())
{
/* Pass to OpenCV barcode module */
barcode_detector->detectAndDecode(frame, barcode_info, barcode_type, barcode_corners);
/* If there are corners detected, a barcode was detected, so draw the border. */
if (!barcode_corners.empty())
{
if (!barcode_info[0].empty())
{
std::cout << "BAR[" << 0 << "] @ " << cv::Mat(barcode_corners).reshape(2, 1) << ": " <<
"TYPE: " << barcode_type[0] << " INFO: " << barcode_info[0] << std::endl;
}
contours = barcode_corners;
/* If there is barcode info and type, there is a code to store */
if (barcode_info.size() && barcode_type[0] != cv::barcode::NONE && barcode_type[0] != cv::barcode::EAN_8)
{
std::ostringstream message;
message << "camera scanned " << barcode_info[0] << " [" << barcode_type[0] << "]" << std::endl;
sb::Log::log(message);
current_barcode = barcode_info[0];
break;
}
}
}
}
/* A barcode was detected, so draw the most recently stored corners */
if (!contours.empty())
{
/* Draw the border of the barcode */
std::vector<std::vector<cv::Point>> input;
input.push_back(contours);
/* Indicate whether the barcode is decodable or not using the border color property */
std::vector<std::uint8_t> color_components;
if (barcode_type[0] != cv::barcode::NONE && barcode_type[0] != cv::barcode::EAN_8)
{
color_components = configuration()["scan"]["contour-color-decodable"].get<std::vector<std::uint8_t>>();
}
else
{
color_components = configuration()["scan"]["contour-color-undecodable"].get<std::vector<std::uint8_t>>();
}
cv::Scalar color = cv::Scalar(color_components[0], color_components[1], color_components[2], 255);
if (pixel_format == GL_BGR)
{
std::uint8_t save = color[0];
color[0] = color[2];
color[2] = save;
}
cv::drawContours(camera_frame, input, 0, color, 2);
for (const cv::Point& point : contours)
{
cv::circle(camera_frame, point, 3, color, cv::FILLED, cv::FILLED);
}
}
}
/* Fill camera view texture memory */
camera_view.texture().bind();
camera_view.texture().load(const_cast<cv::Mat&>(camera_frame).ptr(), {camera_frame.cols, camera_frame.rows}, pixel_format, GL_UNSIGNED_BYTE);
/* Frame data has been processed, so there is not a new frame available anymore. */
new_frame_available = false;
}
/* viewport box will be used to tell GL where to draw */
viewport = window_box(true);
/* shrink viewport if item texture or camera will be displayed */
if (item_display_active() || camera_switch)
{
viewport.drag_bottom(0.5f * configuration()["interface"]["pop-up-viewport-height"].get<float>() * viewport.height());
}
/* Save the main viewport dimensions */
main_viewport = viewport;
sb::Log::gl_errors("before viewport");
glViewport(viewport);
glDisable(GL_DEPTH_TEST);
glClearColor(0, 0, 0, 1);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
/* switch to flat shader for background */
glUseProgram(flat_program);
/* flat shader uniforms for BG: time, texture ID, disabled HSV blend, scroll on */
glActiveTexture(GL_TEXTURE0);
glUniform1f(uniform["flat"]["time"], time_seconds);
glUniform1i(uniform["flat"]["texture"], 0);
glUniform3f(uniform["flat"]["blend"], 0.0f, 0.0f, 1.0f);
glUniform1i(uniform["flat"]["scroll"], true);
glUniformMatrix4fv(uniform["flat"]["transformation"], 1, GL_FALSE, &glm::mat4(1)[0][0]);
/* disable pudding attributes and enable background attributes */
pudding_model.disable();
background.enable();
background.current().bind();
/* draws bg vertices and texture */
glDrawArrays(GL_TRIANGLES, 0, background.attributes("position")->count());
/* turn off scrolling */
glUniform1i(uniform["flat"]["scroll"], false);
sb::Log::gl_errors("after background, before pudding");
/* draw pudding model using MVP shader */
glUseProgram(mvp_program);
/* calculate the transformation matrix for displaying pudding in viewport */
model = glm::rotate(model, weight(configuration()["pudding"]["rotation-speed"].get<float>()), Y_UNIT_NORMAL_3D);
projection = glm::perspective(
glm::radians(40.0f * 1 / viewport.aspect()), viewport.aspect(), 0.1f, 100.0f);
mvp = projection * VIEW_MATRIX * model;
/* uniforms */
glUniform1f(uniform["mvp"]["time"], time_seconds);
glUniformMatrix4fv(uniform["mvp"]["mvp"], 1, GL_FALSE, &mvp[0][0]);
/* disable bg attributes and enable pudding attributes */
background.disable();
pudding_model.attributes("position")->enable();
GLenum side_mode, top_mode;
if (items.size() == 0)
{
// glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
// pudding_model.attributes("color")->enable();
side_mode = GL_LINES;
top_mode = GL_LINES;
}
else
{
// glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
// pudding_model.attributes("color")->enable();
side_mode = GL_TRIANGLES;
top_mode = GL_TRIANGLE_FAN;
pudding_model.attributes("uv")->enable();
glUniform1i(uniform["mvp"]["pudding texture"], 0);
glActiveTexture(GL_TEXTURE0);
current_item().current_texture().bind();
}
/* draw pudding model */
glEnable(GL_DEPTH_TEST);
/* draw the sides of the pudding */
glDrawArrays(side_mode, 0, pudding_triangle_vertex_count);
sb::Log::gl_errors("after pudding sides, before pudding top/bottom");
/* enable squircling and draw the top and bottom of pudding */
glUniform1i(uniform["mvp"]["uv transformation"], UV_SQUIRCLE);
glUniform1f(uniform["mvp"]["coordinate bound"], configuration()["pudding"]["top-radius"]);
glDrawArrays(top_mode, pudding_triangle_vertex_count, pudding_fan_vertex_count);
glUniform1f(uniform["mvp"]["coordinate bound"], configuration()["pudding"]["base-radius"]);
glDrawArrays(top_mode, pudding_triangle_vertex_count + pudding_fan_vertex_count, pudding_fan_vertex_count);
/* disable squircling for all other drawing */
glUniform1i(uniform["mvp"]["uv transformation"], UV_NONE);
/* regular fill mode enabled for all other drawing */
// glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
sb::Log::gl_errors("after pudding, before item or camera view");
/* only do more drawing if items are downloaded or camera is enabled */
if (item_display_active() || camera_switch)
{
/* switch to flat shader for item and camera */
glUseProgram(flat_program);
pudding_model.disable();
glDisable(GL_DEPTH_TEST);
/* just need to set these once since we're drawing one texture per viewport */
glUniform1i(uniform["flat"]["texture"], 0);
glActiveTexture(GL_TEXTURE0);
/* move viewport to the bottom of screen */
viewport.top(viewport.bottom(), true);
viewport.bottom(window_box(true).bottom(), true);
/* reset blend to display the original texture colors */
glUniform3f(uniform["flat"]["blend"], 0.0f, 0.0f, 1.0f);
/* draw the current item image if we're supposed to */
if (item_display_active())
{
/* shrink viewport to half size if camera will also be displayed */
if (camera_switch)
{
viewport.left(viewport.cx(), true);
}
glViewport(viewport);
current_item().current_texture().bind();
current_item().view().enable();
/* draws rectangle vertices and rectangle texture using UV coords */
glDrawArrays(GL_TRIANGLES, 0, current_item().view().attributes("position")->count());
current_item().view().disable();
/* Draw arrows for cycling through items in inventory */
if (items.size() > 1 || current_item().texture_count() > 1)
{
next_button.draw(uniform["flat"]["transformation"]);
previous_button.draw(uniform["flat"]["transformation"]);
}
}
/* draw the camera view if the camera button has been switched on */
if (camera_switch)
{
viewport.left(window_box(true).left());
glViewport(viewport);
/* bind texture for drawing */
glUniformMatrix4fv(uniform["flat"]["transformation"], 1, GL_FALSE, &camera_view.transformation()[0][0]);
camera_view.texture().bind();
camera_view.enable();
/* draws rectangle vertices and rectangle texture using UV coords */
glDrawArrays(GL_TRIANGLES, 0, camera_view.attributes("position")->count());
}
}
else
{
/* Draw the camera button if neither the camera or inventory is displayed */
glUseProgram(flat_program);
camera_button.draw(uniform["flat"]["transformation"]);
/* And the inventory button if there are items scanned into the inventory */
if (items.size() > 0)
{
inventory_button.draw(uniform["flat"]["transformation"]);
}
}
SDL_GL_SwapWindow(window());
sb::Log::gl_errors("at end of update");
/* Launch requests if a new barcode was scanned or entered */
if (camera_switch && current_barcode != previous_barcode)
{
add_item(current_barcode);
previous_barcode = current_barcode;
}
/* Delete and erase finished requests from the vector using iterators to erase while reading the vector */
for (auto iter = requests.begin(); iter != requests.end();)
{
if ((*iter)->finished())
{
std::ostringstream message;
message << "Freeing and removing request object for " << (*iter)->url();
sb::Log::log(message);
/* Free the heap allocated Request */
delete *iter;
/* Get a iterator that points to the next request, which may have been moved after erase */
iter = requests.erase(iter);
}
else
{
/* Only increment the iterator when there was no erase */
iter++;
}
}
/* If requests are finished processing and the incoming item has a texture, add the item to the item list and create a new incoming item. */
if (requests.empty() && incoming_item.texture_count() > 0)
{
std::ostringstream message;
message << "Adding item " << incoming_item.full_name() << " to inventory";
sb::Log::log(message);
items.push_back(incoming_item);
/* Set item index to end so newest item will display. */
item_carousel.end(items);
/* Move the camera button away from center to make room for inventory button if this is the first item added. */
if (items.size() == 1)
{
const nlohmann::json& interface = configuration()["interface"];
camera_button.translation({-1.0f * interface["main-button-double-x"].get<float>(), interface["main-button-y"]});
}
incoming_item = Item();
}
}
Request::Request(const web_callback& callback, const std::string& url) : callback(callback), request_url(url) {}
void Request::store(const std::uint8_t* buffer, const std::size_t& size)
{
response.insert(response.end(), buffer, buffer + size);
std::stringstream store_message;
store_message << "Have " << (response.size() / 100.0) << "KB of data in memory";
sb::Log::log(store_message.str());
}
const std::string& Request::url() const
{
return request_url;
}
void Request::respond()
{
callback(response, url());
mark_finished();
}
void Request::mark_finished()
{
is_finished = true;
}
const bool& Request::finished() const
{
return is_finished;
}
/* Construct a Pad using a texture, a translation, a scale, and a callback function. A Pad is a Plane which can be clicked
* to launch an arbitrary user function. It can be sized and placed by setting the translation and scale values. The translation
* is relative to (0.0, 0.0), and the scale is relative to the Plane, which has opposite corners at (-1.0, -1.0) and (1.0, 1.0).
* The texture is the graphic that displays in the Pad location. The callback must be a function that doesn't return a value or
* accept any arguments. */
Pad::Pad(sb::Texture texture, glm::vec2 translation, float scale, float ratio, std::function<void()> on_connect, float rotation)
{
this->texture(texture);
this->translation(translation);
this->scale(scale, ratio);
if (rotation)
{
this->rotation(rotation);
}
this->on_connect(on_connect);
collision_box.invert_y(true);
}
/* Set angle in radians the pad will be rotated. The pad will be rotated around its center. The collision box will not
* change, so the box will not contain the entire pad if the angle is not a multiple of pi/2. The pad's transformation
* matrix will automatically be set to incorporate this rotation transformation. */
void Pad::rotation(float angle)
{
rotation_angle = angle;
transform();
}
/* Set the scale using a factor and ratio that will transform the pad in the X and Y dimensions. The ratio will determine
* how much each axis is scaled. If the ratio is above one, the X-axis's scale will be divided by the ratio. If the ratio
* is below one, the Y-axis's scale will be multiplied by the aspect ratio. If the aspect ratio of the window is given,
* this will force the pad to display as a square, and the ratio will be relative to the shorter axis. The collision box
* will be scaled by the same factors. The pad's transformation matrix will automatically be set to incorporate this
* scale transformation. */
void Pad::scale(float factor, float ratio)
{
scale_factor = factor;
scale_ratio = ratio;
transform();
}
/* Set a translation for the pad object in the X and Y dimension using a 2d vector. The collision box will be moved by the
* same translation. The pad's transformation matrix will automatically be set to incorporate this translation
* transformation. */
void Pad::translation(const glm::vec2& translation)
{
translation_vector = translation;
transform();
}
/* Set the transformation matrix for the pad object by applying the scale to the translation and the rotation to the
* resulting matrix, meaning the transformations will be applied to the pad in the order of: translate, scale, and
* rotate. The collision box will be scaled and moved to fit around the position coordinates that would result from
* applying this transformation to the position coordinates. */
void Pad::transform()
{
glm::vec3 scale { scale_factor, scale_factor, 1.0f };
if (scale_ratio > 1.0f)
{
scale.x /= scale_ratio;
}
else if (scale_ratio < 1.0f)
{
scale.y *= scale_ratio;
}
collision_box.size(2.0f * glm::vec2{scale.x, scale.y}, true);
collision_box.center(translation_vector);
Model::transformation(glm::translate(glm::vec3{translation_vector.x, translation_vector.y, 0.0f}) *
glm::scale(scale) * glm::rotate(rotation_angle, ROTATION_AXIS));
}
/* Set the function that will run when a pad object is clicked. */
void Pad::on_connect(std::function<void()> on_connect)
{
connection.on_connect(on_connect);
}
/* Returns true if the point at position collides with the pad's collision box. */
bool Pad::collide(const glm::vec2& position) const
{
return collision_box.collide(position);
}
void Pad::draw(GLuint uniform_id)
{
glUniformMatrix4fv(uniform_id, 1, GL_FALSE, &transformation()[0][0]);
texture().bind();
enable();
glDrawArrays(GL_TRIANGLES, 0, attributes("position")->count());
disable();
}
void glViewport(Box box)
{
glViewport(box.left(), box.bottom(), box.width(), box.height());
}
#ifdef __EMSCRIPTEN__
/* This will bind the global functions to Emscripten so the camera pixel data can be transferred */
EMSCRIPTEN_BINDINGS(my_module)
{
function("flag_frame", &flag_frame);
function("set_heap_offset", &set_heap_offset);
}
#endif