Blog Image

guivi

About the blog

In this blog I will keep track of projects I develop though out this year and may be in the future. For now it is juts a testing ground for developing the blog itself but I hope as I put more material it will become a good place for me to hold information.

Small program to stop the screen saver or screen blanking by moving cursor.

C/C++ Programming, Uncategorised Posted on 04 Sep, 2020 08:07:31
#include <iostream>
#include <windows.h>

bool iskeypressed(unsigned timeout_ms = 0)
{
    return WaitForSingleObject(
        GetStdHandle(STD_INPUT_HANDLE),
        timeout_ms
    ) == WAIT_OBJECT_0;
}

int main()
{
    POINT cursor;
    GetCursorPos(&cursor);

    std::cout << "Press any key to stop the program!\n";

    while (!iskeypressed(500)) {
        cursor.x += 10;
        SetCursorPos(cursor.x, cursor.y);
        Sleep(1000);

        cursor.x -= 10;
        SetCursorPos(cursor.x, cursor.y);
        Sleep(500);
    }

    std::cout << "Bye, Bye!\n";
    return 0;
}


Simple Server Socket C++

C/C++ Programming, Linux Posted on 01 Jun, 2020 12:01:41

This is a simple sever written in C++ and the original code can be found in “The definitive guide to Linux network programming” book.

#include <stdio.h>
#include <stdlib.h>
#include <cstring>
#include <sys/types.h>
#include <sys/socket.h>
#include <netdb.h>
#include <unistd.h>

const char APRESSMESSAGE[] = "APRESS - For Professionals, by Professionals!\n";

int main(int argc, char *argv[])
{
	int simpleSocket = 0;
	int simplePort = 0;
	int returnStatus = 0;
	struct sockaddr_in simpleServer;
	
	// make sure we va a port number
	if (argc != 2){
		fprintf(stderr, "Usage: %s <port>\n", argv[0]);
		exit(1);
	}
	
	// Create streaming socket
	simpleSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
	if (simpleSocket == -1){
		fprintf(stderr, "Could not create a socket!\n");
		exit(1);
	}
	else {
		fprintf(stderr, "Socket created!\n");
	}
	// retrieve the port number for listing
	simplePort = atoi(argv[1]);
	
	// Set up address strucuter
	//use INADDR_ANY
	std::memset(&simpleServer, '\0',sizeof(simpleServer));
	simpleServer.sin_family = AF_INET;
	simpleServer.sin_addr.s_addr = htonl(INADDR_ANY);
	simpleServer.sin_port = htons(simplePort);
	
	returnStatus = bind(simpleSocket, (struct sockaddr*)&simpleServer, sizeof(simpleServer));
	
	if (returnStatus == 0){
		fprintf(stderr, "Bind Completed!\n");
	}
	else {
		fprintf(stderr, "Could not bid the address!\n");
		close(simpleSocket);
		exit(1);
	}
	// Listen on the socket for connection
	returnStatus = listen(simpleSocket, 5);
	
	if (returnStatus == -1){
		fprintf(stderr, "Cannot listen to socket\n!");
		close(simpleSocket);
		exit(1);
	}
	
	while(1) {
		struct sockaddr_in clientName = { 0};
		int simpleChildSocket = 0;
		socklen_t clientNameLength = sizeof(clientName);
		
		simpleChildSocket = accept(simpleSocket, (struct sockaddr *)&clientName, &clientNameLength);
		if (simpleChildSocket == -1){
			fprintf(stderr, "Cannot accept connections!\n");
			close(simpleSocket);
			exit(1);
		}
		
		write(simpleChildSocket, APRESSMESSAGE, strlen(APRESSMESSAGE));
		close(simpleChildSocket);
	}
	close(simpleSocket);
	return 0;
}


MFC C++

C/C++ Programming Posted on 04 Mar, 2020 10:23:27

Use DYNAMIC_DOWNCAST to get ribbon objects



OpenCV Aruco Marker Creator

C/C++ Programming Posted on 15 Nov, 2019 16:25:24

This is a link to generate Aruco markers controlling the size of the market in mm

http://chev.me/arucogen/



SimplestColorBalance

C/C++ Programming Posted on 14 Aug, 2019 17:11:06

void SimplestColorBalance(cv::Mat inArray, cv::Mat &outArray, int percent){

if (percent<= 0)

percent = 5;

inArray.convertTo(inArray, CV_32F);

int rows = inArray.rows;

int cols = inArray.cols;

int chnls = inArray.channels();

double halfPercent = percent /200.0;

std::vector<cv::Mat> channels;

std::vector<cv::Mat> results;

if (chnls == 3){

channels.reserve(3);

cv::split(inArray, channels);

}

else {

channels.reserve(1);

inArray.copyTo(channels[0]);

}

for (int i = 0; i < chnls; i++){

cv::Mat flat;

channels[i].reshape(1,1).copyTo(flat);

cv::sort(flat, flat, cv::SORT_ASCENDING);

double lowVal = flat.at<float>(0, floor(flat.cols * halfPercent));

double topVal = flat.at<float>(0, ceil(flat.cols * (1.0-halfPercent)));

cv::Mat channel = channels[i];

for (int m = 0; m < rows; m++){

for (int n = 0; n < cols; n++){

if (channel.at<float>(m, n) < lowVal)

channel.at<float>(m, n) = lowVal;

if (channel.at<float>(m, n) > topVal)

channel.at<float>(m, n) = topVal;

}

}

cv::normalize(channel, channel, 0, 255, cv::NORM_MINMAX);

//channel.convertTo(channel, CV_32F);

results.push_back(channel);

}

cv::merge(results, outArray);

outArray.convertTo(outArray, CV_8U);

}



MFC VfW

C/C++ Programming Posted on 19 Jul, 2019 16:49:05

http://www.orgler.it/webcam.htm



Adding and making a .PC file for pkg-config

C/C++ Programming Posted on 27 Mar, 2019 10:04:49


Package OpenCV not found? Let’s Find It.

mainOpenCV has been refined over the years and installing it has become way more user-friendly now. If has been ported to many platforms and you don’t need an IDE to use it. There are many different ways in which you can install it and use it in your existing code. But sometimes, when people try to do different things with it, they run into problems. This post deals with one such problem. When you write code that uses OpenCV, you need to tell your compiler where it is. The reason for this is that this location will contains all the libraries and binaries necessary for the header files to work in your code. If you don’t specify this, you will encounter the following error:

Package opencv was not found in the pkg-config search path.
Perhaps you should add the directory containing `opencv.pc’
to the PKG_CONFIG_PATH environment variable
No package ‘opencv’ found

Where do we go from here?

The reason for the error is that when you try to compile your code from the command line, you need to tell it where to find OpenCV related stuff. People generally use pkg-config to take care of this because it’s easy to handle and it looks clean. Imagine doing the ‘-l’ library-includes every time you want to compile some code. Tedious! Just open your terminal and type the following:

$ export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig

This is the path to opencv.pc by default. If you have already installed OpenCV, you should already have opencv.pc somewhere on your machine. So in case you don’t have opencv.pc file in that path, just search for it and assign the path to the PKG_CONFIG_PATH variable:

$ export PKG_CONFIG_PATH=/path/to/the/file

And you are done! It’s better to add this line to your bashrc file so that you don’t have to do it every single time you reopen your terminal.

I can’t find my opencv.pc file, what do I do?

If you cannot find the opencv.pc file, don’t worry! Just create a new file called opencv.pc in /usr/local/lib/pkgconfig and add the following content to that file:

prefix=/usr
exec_prefix=${prefix}
includedir=${prefix}/include
libdir=${exec_prefix}/lib

Name: opencv
Description: The opencv library
Version: 2.x.x
Cflags: -I${includedir}/opencv -I${includedir}/opencv2
Libs: -L${libdir} -lopencv_calib3d -lopencv_imgproc -lopencv_contrib -lopencv_legacy -l

After creating this file, make sure the PKG_CONFIG_PATH variable contains this path and you are good to go.

How do I run my OpenCV code now?

Now that all that installing stuff is out of the way, let’s see how we can run your OpenCV code:

Install opencv on your system.
Unzip the zip file
Open the terminal and go to that folder
Type the following on your terminal to compile:

$ g++ main.cpp MyFile.h MyFile.cpp `pkg-config opencv –cflags –libs` -o main.o

Type the following on your terminal to run (you may need to chmod 777 main.o):

$ ./main.o

————————————————————————————————-

source: https://prateekvjoshi.com/2013/10/18/package-opencv-not-found-lets-find-it/



Neural Network With Eigen Lib

C/C++ Programming Posted on 19 Mar, 2019 11:15:03

Here is a code for C like neural network XOR operation example. The only dependency is the Eigen library which is a very light algebra library.

// MakeYourOwnNeuralNetwork.cpp : Defines the entry point for the console application.
//

#ifdef __linux__
#elif _WIN32

#else
#endif

#include “stdafx.h”

#include <iostream>
#include <vector>
#include <random>
#include <cmath>
#include <chrono>
#include <Eigen/Dense>
#include <Eigen/Core>

using namespace Eigen;

/*

Quick diagram of how the layers are organized.

|Layer zero| Layer One
i1|——–N1|——–N1—– O1
|\______/ |\______/
|/ \ |/ \
i2|——–N2|——–N2—– O2
| |

Error[0] vector is the error of the last/output layer.
Error[n] is the error of the input layer. Where n is the number of layers in the case above n = 1.

*/

//#define _WEIGHT_DEBUG
//#define _SOMA_DEBUG
//#define _ACTIVATION_DEBUG
//#define _ERROR_DEBUG

void forward_calculation(std::vector<MatrixXd> weights, std::vector<VectorXd> &somas, std::vector<VectorXd> &activations, VectorXd inputs)
{
/*
* Forward feeding through the network.
*/
for (int i = 0; i < weights.size(); i++) { // For every layer
if (i == 0) // Calculate the forward summation for the input layer
somas[i] = (weights[i] * inputs);
else // Calculate the forward summation for every other layer
somas[i] = (weights[i] * activations[i – 1]);

// Calculate the sigmoid function activation of the layer
activations[i] = 1/(1 + (-somas[i].array()).array().exp());

#ifdef _WEIGHT_DEBUG
std::cout << “weights : ” << i << “\n” << weights[i] << std::endl;
#endif
#ifdef _SOMA_DEBUG
std::cout << “Soma : ” << i << “\n” << somas[i] << std::endl;
#endif
#ifdef _ACTIVATION_DEBUG
std::cout << “Activations : ” << i << “\n” << activations[i] << std::endl;
#endif
}
}

void backpropagation(std::vector<MatrixXd> weights, std::vector<VectorXd> activations, std::vector<VectorXd> &errors, VectorXd targets)
{
/*
* Calculate Errors
*/
int err_count = 0;
for (int i = (weights.size() – 1); i >= 0; i–) { // For every layer starting from the last one

if (i == (weights.size() – 1)) { // If last layer calculate the error in base of the targets (only using (target – activation) as it is the only needed calculation for the derivative of gredient decent)
VectorXd e(activations[i].size());
e = (targets – activations[i]);
errors[0] = e;
}
else { // Calculate the error of all other layers E = W^t * E.
VectorXd e(activations[i].size());
e = weights[i + 1].transpose()*errors[err_count – 1];
errors[err_count] = e;
}
#ifdef _ERROR_DEBUG
std::cout << “errors: ” << err_count << “\n” << errors[err_count] << std::endl;
#endif
err_count++;
}
}

void gradient_decent(std::vector<MatrixXd> &weights, std::vector<VectorXd> activations, std::vector<VectorXd> errors, VectorXd inputs, VectorXd targets, double learning_rate)
{
for (int i = weights.size() – 1; i >= 0; i–) { // For every layer in the network, starting from the last/output layer.
MatrixXd delta;
Eigen::Index n = activations[i].size();

if (i != 0) { // Calculate the delta (gradient decent) of every layer but the input layer: D = (El o Ol o (1-Ol))*O^t(l-1) (o represent the Hadamard product or element wise multiplication.)
delta = -((VectorXd)(errors[(weights.size() – 1)-i].array()*activations[i].array()*(VectorXd::Ones(n) – activations[i]).array()))*activations[i – 1].transpose();
}
else { // Calculate the delta (gradient decent) of every layer but the input layer: D = (El o Ol o (1-Ol))*I^t (o represent the Hadamard product or element wise multiplication.)
delta = -((VectorXd)(errors[(weights.size() – 1)-i].array()*activations[i].array()*(VectorXd::Ones(n) – activations[i]).array()))*inputs.transpose();
}

weights[i] = weights[i] – (learning_rate * delta); // Update the weights Wn = W – (learningrate*D)
}
}

int main()
{
Eigen::initParallel();

//std::cout << “Number of threads” << Eigen::nbThreads() << std::endl;
//getchar();
/*
* Used for random.
*/
std::default_random_engine generator;
generator.seed(std::chrono::system_clock::now().time_since_epoch().count());
std::normal_distribution<double> distribution(0, 1);

/*
* Network description.
*/
int number_of_inputs = 2;
int number_of_layers = 2;
int *number_neurons_in_layer = new int[number_of_layers];

number_neurons_in_layer[0] = 10;
number_neurons_in_layer[1] = 2;

/*
* Network and training information.
*/
std::vector<VectorXd> inputs;
std::vector<VectorXd> targets;
std::vector<MatrixXd> weights;
std::vector<VectorXd> somas;
std::vector<VectorXd> activations;
std::vector<VectorXd> errors;
double totalError = 10;

/*
* Setting training parameters.
*/
inputs.push_back(Vector2d(0.99, 0.99));
inputs.push_back(Vector2d(0.99, 0.01));
inputs.push_back(Vector2d(0.01, 0.99));
inputs.push_back(Vector2d(0.01, 0.01));

targets.push_back(Vector2d(0.01, 0.01));
targets.push_back(Vector2d(0.01, 0.99));
targets.push_back(Vector2d(0.99, 0.01));
targets.push_back(Vector2d(0.01, 0.01));

/*
* Initialize network.
*/
for (int i = 0; i < number_of_layers; i++) {
MatrixXd t;
if (i == 0)
t = MatrixXd(number_neurons_in_layer[i], number_of_inputs);
else
t = MatrixXd(number_neurons_in_layer[i], number_neurons_in_layer[i – 1]);

for (int i = 0; i < t.cols(); i++)
for (int j = 0; j < t.rows(); j++)
t(j, i) = distribution(generator);

weights.push_back(t);

if (i == 0)
somas.push_back((-1)*(weights[i] * inputs[0]));
else
somas.push_back((-1)*(weights[i] * activations[activations.size() – 1]));

VectorXd tt = 1 + somas[i].array().exp();
activations.push_back(tt.array().inverse());
#ifdef _MYDEBUG2
std::cout << “weights : ” << i << “\n” << weights[i] << std::endl;
std::cout << “Soma : ” << i << “\n” << somas[i] << std::endl;
std::cout << “Activations : ” << i << “\n” << activations[i] << std::endl;
#endif
}

/*
* Initialize Errors
*/
for (int i = weights.size(); i > 0; i–) {

if (i == (weights.size())) {
VectorXd e(activations[i – 1].size());
for (int o = 0; o < activations[i – 1].size(); o++) {
e(o) = (targets[0](o) – activations[i – 1](o));
}
errors.push_back(e);
}
else {
VectorXd e(activations[i – 1].size());
e = weights[i].transpose()*errors[errors.size() – 1];
errors.push_back(e);
}
}

FILE* ofile;
fopen_s(&ofile, “errors.txt”, “w+”);
double co = 5;
double larning_rate = 0.1;
int batch = 10;
do {
totalError = 0;

for (int i = 0; i < targets.size(); i++) {
for (int k = 0; k < batch; k++) {
forward_calculation(weights, somas, activations, inputs[i]);
backpropagation(weights, activations, errors, targets[i]);
totalError += fabs(errors[0].sum());
gradient_decent(weights, activations, errors, inputs[i], targets[i], larning_rate);
}
}
co = totalError;
std::cout << “Total Error: ” << totalError << std::endl;
fprintf(ofile, “%lf\n”, totalError);
}while(totalError / batch > 0.1);

fclose(ofile);

while (true) {
int selected;
std::cin >> selected;
if (selected >= 0 && selected <= 3) {
forward_calculation(weights, somas, activations, inputs[selected]);
std::cout << “Selected: ” << selected << std::endl;
for (int i = 0; i < inputs[selected].size(); i++)
std::cout << “In: ” << inputs[selected](i) << ” ; Output: ” << activations[activations.size() – 1](i) << std::endl;
}
else break;
}

return 0;
}



Next »