YobeSDK  0.2.0
IDListener_demo.cpp

This is an example of how to use the Yobe IDListener.

#include "util/demo_utils.hpp"
#include <fstream>
#include <iostream>
#include <memory>
#include <vector>
constexpr auto ENV_VAR_LICENSE = "YOBE_LICENSE";
constexpr auto TEMPLATE_01 = "../audio_files/IDListener/user_1_template_01.wav";
constexpr auto TEMPLATE_02 = "../audio_files/IDListener/user_1_template_02.wav";
constexpr auto TEMPLATE_03 = "../audio_files/IDListener/user_1_template_03.wav";
// These file are in the audio file folder if you want to experiment.
constexpr auto TEMPLATE_LONG = "../audio_files/IDListener/user_1_template_40s.wav";
constexpr auto TEMPALTE_DATA = "../audio_files/IDListener/user_1_template_data.dat";
std::vector<double> YobeProcessing(const std::string& license, std::vector<double> input_buffer);
std::shared_ptr<Yobe::BiometricTemplate> GetTemplate(std::shared_ptr<Yobe::IDListener> id_listener);
std::ofstream log_stream;
int main(int argc, char* argv[]) {
if (argc != 2) {
std::cout << "cpp demo requires a .wav file as input.\n";
} else {
// Just printing out the setting the IDListener expects
std::cout << "Just checking to see if the Yobe parameters match the audio file.\n";
std::cout << "Expected sampling rate: " << Yobe::Info::SamplingRate() << '\n';
std::cout << "Expected buffer size in seconds: " << Yobe::Info::AudioBufferTime() << '\n';
std::cout << "Number expected input channels: " << Yobe::Info::InputChannels() << '\n';
std::cout << "Number expected output channels: " << Yobe::Info::OutputChannels() << "\n\n";
const std::string file_path(argv[1]);
// Preparing input buffer
const auto input_buffer = DemoUtil::ReadAudioFile(file_path);
std::cout << '\n';
// all the Yobe processing happens in this function
const auto processed_audio = YobeProcessing(std::getenv(ENV_VAR_LICENSE), input_buffer);
// Writing the processed data to a .wav file
DemoUtil::WriteAudioFile(file_path, processed_audio);
}
return 0;
}
std::vector<double> YobeProcessing(const std::string& license, std::vector<double> input_buffer) {
auto id_listener = Yobe::Create::NewIDListener();
if (id_listener == nullptr) {
std::cout << "Probably the library you have does not have biometrics." << std::endl;
return {};
}
// Here we set up our logging callback
log_stream.open("IDListener_demo.log");
Yobe::Info::RegisterCallback([](const char* mess) { log_stream << mess << '\n'; });
// Init the IDListener.
auto init_status = id_listener->Init(license.c_str(), "../../init_data");
if (init_status != Yobe::Status::YOBE_OK) {
std::cout << "Init returned: " << Yobe::Info::StdError(init_status) << '\n';
return {};
}
// Calculate the input buffer size that you are going to pass in to ProcessBuffer.
const auto input_size = Yobe::Info::InputBufferSize();
// Prepare output buffer for collecting the out put from the IDListener.
std::vector<double> output_buffer;
// This is the pre-allocated buffer that will be returned with processed data in it.
std::vector<double> scratch_buffer;
uint32_t out_buffer_size = 0;
const auto total_input_samples = input_buffer.size();
auto voice_template = GetTemplate(id_listener);
id_listener->SelectUser(voice_template);
std::cout << "Yobe has started processing.\n";
for (size_t input_index = 0; input_index < total_input_samples; input_index += input_size) {
// this is padding for the last buffer. If the last buffer is to small we pad it with zeros
// so that we don't miss any information in the signal.
if (input_index + input_size > total_input_samples) {
std::vector<double> pad_buffer(input_index + input_index - total_input_samples, 0.0);
input_buffer.insert(input_buffer.end(), pad_buffer.begin(), pad_buffer.end());
}
// Here we are processing the audio a buffer at a time.
status = id_listener->ProcessBuffer(&input_buffer[input_index], scratch_buffer, input_size);
log_stream << "Yobe::ProcessBuffer: " << Yobe::Info::StdError(status) << "\n";
// Now we check the status to make sure that the audio got processed.
std::cout << "ProcessBuffer returned: " << Yobe::Info::StdError(status) << '\n';
} else if (!scratch_buffer.empty()) {
// Now we collect our scratch buffer into are output buffer
output_buffer.insert(output_buffer.end(), scratch_buffer.begin(), scratch_buffer.end());
}
}
// Here we are cleaning up and deiniting the IDListener.
auto deinit_status = id_listener->Deinit();
if (deinit_status != Yobe::Status::YOBE_STOPPED) {
std::cout << "There was an error when deinit the IDListener.\n";
}
std::cout << "IDListener has finished processing.\n";
// closing the log stream
log_stream.close();
return output_buffer;
}
std::shared_ptr<Yobe::BiometricTemplate> GetTemplate(std::shared_ptr<Yobe::IDListener> id_listener) {
std::cout << "Now registering template.\n";
std::vector<std::shared_ptr<Yobe::BiometricTemplate>> voiceTemplates{id_listener->RegisterTemplate(TEMPLATE_01),
id_listener->RegisterTemplate(TEMPLATE_02),
id_listener->RegisterTemplate(TEMPLATE_03)};
return id_listener->MergeUserTemplates(voiceTemplates);
}
YOBE_SDK_API std::shared_ptr< IDListener > NewIDListener()
Creates a new instance of IDListener.
YOBE_SDK_API int32_t InputChannels()
Returns the number of input channels required for processing.
YOBE_SDK_API double AudioBufferTime()
Returns the processing audio buffer length in seconds.
YOBE_SDK_API void RegisterCallback(std::function< void(const char *)> log_callback)
Registers a callback function to receive Yobe logging information.
YOBE_SDK_API uint32_t InputBufferSize()
Returns the input buffer size in samples.
YOBE_SDK_API const char * StdError(Status status)
Translates a Yobe Status code into a more readable string.
YOBE_SDK_API uint32_t SamplingRate(bool output_sampling_rate=true)
Returns the expected sampling rate of the input/output buffers.
YOBE_SDK_API int32_t OutputChannels()
Returns the number of processing output channels.
@ YOBE_STOPPED
This means that the engine successfully stopped.
@ YOBE_UNKNOWN
An unknown error has occurred.
@ YOBE_OK
The function executed successfully.
@ NEEDS_MORE_DATA
The algorithm needs more data before it can start processing the audio.