Algorithms_in_C++ 1.0.0
Set of algorithms implemented in C++.
Loading...
Searching...
No Matches
machine_learning Namespace Reference

A* search algorithm More...

Classes

class  adaline
 

Functions

int save_u_matrix (const char *fname, const std::vector< std::vector< std::valarray< double > > > &W)
 
double update_weights (const std::valarray< double > &X, std::vector< std::vector< std::valarray< double > > > *W, std::vector< std::valarray< double > > *D, double alpha, int R)
 
void kohonen_som (const std::vector< std::valarray< double > > &X, std::vector< std::vector< std::valarray< double > > > *W, double alpha_min)
 
void update_weights (const std::valarray< double > &x, std::vector< std::valarray< double > > *W, std::valarray< double > *D, double alpha, int R)
 
void kohonen_som_tracer (const std::vector< std::valarray< double > > &X, std::vector< std::valarray< double > > *W, double alpha_min)
 
template<typename T >
std::ostreamoperator<< (std::ostream &out, std::vector< std::valarray< T > > const &A)
 
template<typename T >
std::ostreamoperator<< (std::ostream &out, const std::pair< T, T > &A)
 
template<typename T >
std::ostreamoperator<< (std::ostream &out, const std::valarray< T > &A)
 
template<typename T >
std::valarray< T > insert_element (const std::valarray< T > &A, const T &ele)
 
template<typename T >
std::valarray< T > pop_front (const std::valarray< T > &A)
 
template<typename T >
std::valarray< T > pop_back (const std::valarray< T > &A)
 
template<typename T >
void equal_shuffle (std::vector< std::vector< std::valarray< T > > > &A, std::vector< std::vector< std::valarray< T > > > &B)
 
template<typename T >
void uniform_random_initialization (std::vector< std::valarray< T > > &A, const std::pair< size_t, size_t > &shape, const T &low, const T &high)
 
template<typename T >
void unit_matrix_initialization (std::vector< std::valarray< T > > &A, const std::pair< size_t, size_t > &shape)
 
template<typename T >
void zeroes_initialization (std::vector< std::valarray< T > > &A, const std::pair< size_t, size_t > &shape)
 
template<typename T >
sum (const std::vector< std::valarray< T > > &A)
 
template<typename T >
std::pair< size_t, size_t > get_shape (const std::vector< std::valarray< T > > &A)
 
template<typename T >
std::vector< std::vector< std::valarray< T > > > minmax_scaler (const std::vector< std::vector< std::valarray< T > > > &A, const T &low, const T &high)
 
template<typename T >
size_t argmax (const std::vector< std::valarray< T > > &A)
 
template<typename T >
std::vector< std::valarray< T > > apply_function (const std::vector< std::valarray< T > > &A, T(*func)(const T &))
 
template<typename T >
std::vector< std::valarray< T > > operator* (const std::vector< std::valarray< T > > &A, const T &val)
 
template<typename T >
std::vector< std::valarray< T > > operator/ (const std::vector< std::valarray< T > > &A, const T &val)
 
template<typename T >
std::vector< std::valarray< T > > transpose (const std::vector< std::valarray< T > > &A)
 
template<typename T >
std::vector< std::valarray< T > > operator+ (const std::vector< std::valarray< T > > &A, const std::vector< std::valarray< T > > &B)
 
template<typename T >
std::vector< std::valarray< T > > operator- (const std::vector< std::valarray< T > > &A, const std::vector< std::valarray< T > > &B)
 
template<typename T >
std::vector< std::valarray< T > > multiply (const std::vector< std::valarray< T > > &A, const std::vector< std::valarray< T > > &B)
 
template<typename T >
std::vector< std::valarray< T > > hadamard_product (const std::vector< std::valarray< T > > &A, const std::vector< std::valarray< T > > &B)
 

Variables

constexpr double MIN_DISTANCE = 1e-4
 

Detailed Description

A* search algorithm

Machine Learning algorithms.

for std::vector

Machine learning algorithms.

A* is an informed search algorithm, or a best-first search, meaning that it is formulated in terms of weighted graphs: starting from a specific starting node of a graph (initial state), it aims to find a path to the given goal node having the smallest cost (least distance travelled, shortest time, etc.). It evaluates by maintaining a tree of paths originating at the start node and extending those paths one edge at a time until it reaches the final state. The weighted edges (or cost) is evaluated on two factors, G score (cost required from starting node or initial state to current state) and H score (cost required from current state to final state). The F(state), then is evaluated as: F(state) = G(state) + H(state).

To solve the given search with shortest cost or path possible is to inspect values having minimum F(state).

Author
Ashish Daulatabad for std::reverse function for std::array, representing EightPuzzle board for assert for std::function STL for IO operations for std::map STL for std::shared_ptr for std::set STL for std::vector STL

Machine learning algorithms

for std::transform and std::sort for assert for std::pow and std::sqrt for std::cout for std::accumulate for std::unordered_map

Machine learning algorithms

Function Documentation

◆ apply_function()

template<typename T >
std::vector< std::valarray< T > > machine_learning::apply_function ( const std::vector< std::valarray< T > > & A,
T(*)(const T &) func )

Function which applys supplied function to every element of 2D vector

Template Parameters
Ttypename of the vector
Parameters
A2D vector on which function will be applied
funcFunction to be applied
Returns
new resultant vector
330 {
332 A; // New vector to store resultant vector
333 for (auto &b : B) { // For every row in vector
334 b = b.apply(func); // Apply function to that row
335 }
336 return B; // Return new resultant 2D vector
337}

◆ argmax()

template<typename T >
size_t machine_learning::argmax ( const std::vector< std::valarray< T > > & A)

Function to get index of maximum element in 2D vector

Template Parameters
Ttypename of the vector
Parameters
A2D vector for which maximum index is required
Returns
index of maximum element
307 {
308 const auto shape = get_shape(A);
309 // As this function is used on predicted (or target) vector, shape should be
310 // (1, X)
311 if (shape.first != 1) {
312 std::cerr << "ERROR (" << __func__ << ") : ";
313 std::cerr << "Supplied vector is ineligible for argmax" << std::endl;
314 std::exit(EXIT_FAILURE);
315 }
316 // Return distance of max element from first element (i.e. index)
317 return std::distance(std::begin(A[0]),
318 std::max_element(std::begin(A[0]), std::end(A[0])));
319}
T begin(T... args)
T distance(T... args)
T end(T... args)
T endl(T... args)
T exit(T... args)
T max_element(T... args)
std::pair< size_t, size_t > get_shape(const std::vector< std::valarray< T > > &A)
Definition vector_ops.hpp:247
Here is the call graph for this function:

◆ equal_shuffle()

template<typename T >
void machine_learning::equal_shuffle ( std::vector< std::vector< std::valarray< T > > > & A,
std::vector< std::vector< std::valarray< T > > > & B )

Function to equally shuffle two 3D vectors (used for shuffling training data)

Template Parameters
Ttypename of the vector
Parameters
AFirst 3D vector
BSecond 3D vector
137 {
138 // If two vectors have different sizes
139 if (A.size() != B.size()) {
140 std::cerr << "ERROR (" << __func__ << ") : ";
142 << "Can not equally shuffle two vectors with different sizes: ";
143 std::cerr << A.size() << " and " << B.size() << std::endl;
144 std::exit(EXIT_FAILURE);
145 }
146 for (size_t i = 0; i < A.size(); i++) { // For every element in A and B
147 // Genrating random index < size of A and B
148 std::srand(std::chrono::system_clock::now().time_since_epoch().count());
149 size_t random_index = std::rand() % A.size();
150 // Swap elements in both A and B with same random index
151 std::swap(A[i], A[random_index]);
152 std::swap(B[i], B[random_index]);
153 }
154 return;
155}
T rand(T... args)
T size(T... args)
T srand(T... args)
T swap(T... args)
Here is the call graph for this function:

◆ get_shape()

template<typename T >
std::pair< size_t, size_t > machine_learning::get_shape ( const std::vector< std::valarray< T > > & A)

Function to get shape of given 2D vector

Template Parameters
Ttypename of the vector
Parameters
A2D vector for which shape is required
Returns
shape as pair
247 {
248 const size_t sub_size = (*A.begin()).size();
249 for (const auto &a : A) {
250 // If supplied vector don't have same shape in all rows
251 if (a.size() != sub_size) {
252 std::cerr << "ERROR (" << __func__ << ") : ";
253 std::cerr << "Supplied vector is not 2D Matrix" << std::endl;
254 std::exit(EXIT_FAILURE);
255 }
256 }
257 return std::make_pair(A.size(), sub_size); // Return shape as pair
258}
T make_pair(T... args)
Here is the call graph for this function:

◆ hadamard_product()

template<typename T >
std::vector< std::valarray< T > > machine_learning::hadamard_product ( const std::vector< std::valarray< T > > & A,
const std::vector< std::valarray< T > > & B )

Function to get hadamard product of two 2D vectors

Template Parameters
Ttypename of the vector
Parameters
AFirst 2D vector
BSecond 2D vector
Returns
new resultant vector
496 {
497 const auto shape_a = get_shape(A);
498 const auto shape_b = get_shape(B);
499 // If vectors are not eligible for hadamard product
500 if (shape_a.first != shape_b.first || shape_a.second != shape_b.second) {
501 std::cerr << "ERROR (" << __func__ << ") : ";
502 std::cerr << "Vectors have different shapes ";
503 std::cerr << shape_a << " and " << shape_b << std::endl;
504 std::exit(EXIT_FAILURE);
505 }
506 std::vector<std::valarray<T>> C; // Vector to store result
507 for (size_t i = 0; i < A.size(); i++) {
508 C.push_back(A[i] * B[i]); // Elementwise multiplication
509 }
510 return C; // Return new resultant 2D vector
511}
T push_back(T... args)
Here is the call graph for this function:

◆ insert_element()

template<typename T >
std::valarray< T > machine_learning::insert_element ( const std::valarray< T > & A,
const T & ele )

Function to insert element into 1D vector

Template Parameters
Ttypename of the 1D vector and the element
Parameters
A1D vector in which element will to be inserted
eleelement to be inserted
Returns
new resultant vector
85 {
86 std::valarray<T> B; // New 1D vector to store resultant vector
87 B.resize(A.size() + 1); // Resizing it accordingly
88 for (size_t i = 0; i < A.size(); i++) { // For every element in A
89 B[i] = A[i]; // Copy element in B
90 }
91 B[B.size() - 1] = ele; // Inserting new element in last position
92 return B; // Return resultant vector
93}

◆ kohonen_som()

void machine_learning::kohonen_som ( const std::vector< std::valarray< double > > & X,
std::vector< std::vector< std::valarray< double > > > * W,
double alpha_min )

Apply incremental algorithm with updating neighborhood and learning rates on all samples in the given datset.

Parameters
[in]Xdata set
[in,out]Wweights matrix
[in]alpha_minterminal value of alpha
271 {
272 size_t num_samples = X.size(); // number of rows
273 // size_t num_features = X[0].size(); // number of columns
274 size_t num_out = W->size(); // output matrix size
275 size_t R = num_out >> 2, iter = 0;
276 double alpha = 1.f;
277
279 for (int i = 0; i < num_out; i++) D[i] = std::valarray<double>(num_out);
280
281 double dmin = 1.f; // average minimum distance of all samples
282 double past_dmin = 1.f; // average minimum distance of all samples
283 double dmin_ratio = 1.f; // change per step
284
285 // Loop alpha from 1 to slpha_min
286 for (; alpha > 0 && dmin_ratio > 1e-5; alpha -= 1e-4, iter++) {
287 // Loop for each sample pattern in the data set
288 for (int sample = 0; sample < num_samples; sample++) {
289 // update weights for the current input pattern sample
290 dmin += update_weights(X[sample], W, &D, alpha, R);
291 }
292
293 // every 100th iteration, reduce the neighborhood range
294 if (iter % 300 == 0 && R > 1) {
295 R--;
296 }
297
298 dmin /= num_samples;
299
300 // termination condition variable -> % change in minimum distance
301 dmin_ratio = (past_dmin - dmin) / past_dmin;
302 if (dmin_ratio < 0) {
303 dmin_ratio = 1.f;
304 }
305 past_dmin = dmin;
306
307 std::cout << "iter: " << iter << "\t alpha: " << alpha << "\t R: " << R
308 << "\t d_min: " << dmin_ratio << "\r";
309 }
310
311 std::cout << "\n";
312}
double update_weights(const std::valarray< double > &X, std::vector< std::vector< std::valarray< double > > > *W, std::vector< std::valarray< double > > *D, double alpha, int R)
Definition kohonen_som_topology.cpp:200
Here is the call graph for this function:

◆ kohonen_som_tracer()

void machine_learning::kohonen_som_tracer ( const std::vector< std::valarray< double > > & X,
std::vector< std::valarray< double > > * W,
double alpha_min )

Apply incremental algorithm with updating neighborhood and learning rates on all samples in the given datset.

Parameters
[in]Xdata set
[in,out]Wweights matrix
[in]alpha_minterminal value of alpha
151 {
152 int num_samples = X.size(); // number of rows
153 // int num_features = X[0].size(); // number of columns
154 int num_out = W->size(); // number of rows
155 int R = num_out >> 2, iter = 0;
156 double alpha = 1.f;
157
158 std::valarray<double> D(num_out);
159
160 // Loop alpha from 1 to slpha_min
161 do {
162 // Loop for each sample pattern in the data set
163 for (int sample = 0; sample < num_samples; sample++) {
164 // update weights for the current input pattern sample
165 update_weights(X[sample], W, &D, alpha, R);
166 }
167
168 // every 10th iteration, reduce the neighborhood range
169 if (iter % 10 == 0 && R > 1) {
170 R--;
171 }
172
173 alpha -= 0.01;
174 iter++;
175 } while (alpha > alpha_min);
176}
Here is the call graph for this function:

◆ minmax_scaler()

template<typename T >
std::vector< std::vector< std::valarray< T > > > machine_learning::minmax_scaler ( const std::vector< std::vector< std::valarray< T > > > & A,
const T & low,
const T & high )

Function to scale given 3D vector using min-max scaler

Template Parameters
Ttypename of the vector
Parameters
A3D vector which will be scaled
lownew minimum value
highnew maximum value
Returns
new scaled 3D vector
271 {
273 A; // Copying into new vector B
274 const auto shape = get_shape(B[0]); // Storing shape of B's every element
275 // As this function is used for scaling training data vector should be of
276 // shape (1, X)
277 if (shape.first != 1) {
278 std::cerr << "ERROR (" << __func__ << ") : ";
280 << "Supplied vector is not supported for minmax scaling, shape: ";
281 std::cerr << shape << std::endl;
282 std::exit(EXIT_FAILURE);
283 }
284 for (size_t i = 0; i < shape.second; i++) {
285 T min = B[0][0][i], max = B[0][0][i];
286 for (size_t j = 0; j < B.size(); j++) {
287 // Updating minimum and maximum values
288 min = std::min(min, B[j][0][i]);
289 max = std::max(max, B[j][0][i]);
290 }
291 for (size_t j = 0; j < B.size(); j++) {
292 // Applying min-max scaler formula
293 B[j][0][i] =
294 ((B[j][0][i] - min) / (max - min)) * (high - low) + low;
295 }
296 }
297 return B; // Return new resultant 3D vector
298}
T max(T... args)
T min(T... args)
Here is the call graph for this function:

◆ multiply()

template<typename T >
std::vector< std::valarray< T > > machine_learning::multiply ( const std::vector< std::valarray< T > > & A,
const std::vector< std::valarray< T > > & B )

Function to multiply two 2D vectors

Template Parameters
Ttypename of the vector
Parameters
AFirst 2D vector
BSecond 2D vector
Returns
new resultant vector
461 {
462 const auto shape_a = get_shape(A);
463 const auto shape_b = get_shape(B);
464 // If vectors are not eligible for multiplication
465 if (shape_a.second != shape_b.first) {
466 std::cerr << "ERROR (" << __func__ << ") : ";
467 std::cerr << "Vectors are not eligible for multiplication ";
468 std::cerr << shape_a << " and " << shape_b << std::endl;
469 std::exit(EXIT_FAILURE);
470 }
471 std::vector<std::valarray<T>> C; // Vector to store result
472 // Normal matrix multiplication
473 for (size_t i = 0; i < shape_a.first; i++) {
475 row.resize(shape_b.second);
476 for (size_t j = 0; j < shape_b.second; j++) {
477 for (size_t k = 0; k < shape_a.second; k++) {
478 row[j] += A[i][k] * B[k][j];
479 }
480 }
481 C.push_back(row);
482 }
483 return C; // Return new resultant 2D vector
484}
Here is the call graph for this function:

◆ operator*()

template<typename T >
std::vector< std::valarray< T > > machine_learning::operator* ( const std::vector< std::valarray< T > > & A,
const T & val )

Overloaded operator "*" to multiply given 2D vector with scaler

Template Parameters
Ttypename of both vector and the scaler
Parameters
A2D vector to which scaler will be multiplied
valScaler value which will be multiplied
Returns
new resultant vector
348 {
350 A; // New vector to store resultant vector
351 for (auto &b : B) { // For every row in vector
352 b = b * val; // Multiply row with scaler
353 }
354 return B; // Return new resultant 2D vector
355}

◆ operator+()

template<typename T >
std::vector< std::valarray< T > > machine_learning::operator+ ( const std::vector< std::valarray< T > > & A,
const std::vector< std::valarray< T > > & B )

Overloaded operator "+" to add two 2D vectors

Template Parameters
Ttypename of the vector
Parameters
AFirst 2D vector
BSecond 2D vector
Returns
new resultant vector
408 {
409 const auto shape_a = get_shape(A);
410 const auto shape_b = get_shape(B);
411 // If vectors don't have equal shape
412 if (shape_a.first != shape_b.first || shape_a.second != shape_b.second) {
413 std::cerr << "ERROR (" << __func__ << ") : ";
414 std::cerr << "Supplied vectors have different shapes ";
415 std::cerr << shape_a << " and " << shape_b << std::endl;
416 std::exit(EXIT_FAILURE);
417 }
419 for (size_t i = 0; i < A.size(); i++) { // For every row
420 C.push_back(A[i] + B[i]); // Elementwise addition
421 }
422 return C; // Return new resultant 2D vector
423}
Here is the call graph for this function:

◆ operator-()

template<typename T >
std::vector< std::valarray< T > > machine_learning::operator- ( const std::vector< std::valarray< T > > & A,
const std::vector< std::valarray< T > > & B )

Overloaded operator "-" to add subtract 2D vectors

Template Parameters
Ttypename of the vector
Parameters
AFirst 2D vector
BSecond 2D vector
Returns
new resultant vector
435 {
436 const auto shape_a = get_shape(A);
437 const auto shape_b = get_shape(B);
438 // If vectors don't have equal shape
439 if (shape_a.first != shape_b.first || shape_a.second != shape_b.second) {
440 std::cerr << "ERROR (" << __func__ << ") : ";
441 std::cerr << "Supplied vectors have different shapes ";
442 std::cerr << shape_a << " and " << shape_b << std::endl;
443 std::exit(EXIT_FAILURE);
444 }
445 std::vector<std::valarray<T>> C; // Vector to store result
446 for (size_t i = 0; i < A.size(); i++) { // For every row
447 C.push_back(A[i] - B[i]); // Elementwise substraction
448 }
449 return C; // Return new resultant 2D vector
450}
Here is the call graph for this function:

◆ operator/()

template<typename T >
std::vector< std::valarray< T > > machine_learning::operator/ ( const std::vector< std::valarray< T > > & A,
const T & val )

Overloaded operator "/" to divide given 2D vector with scaler

Template Parameters
Ttypename of the vector and the scaler
Parameters
A2D vector to which scaler will be divided
valScaler value which will be divided
Returns
new resultant vector
366 {
368 A; // New vector to store resultant vector
369 for (auto &b : B) { // For every row in vector
370 b = b / val; // Divide row with scaler
371 }
372 return B; // Return new resultant 2D vector
373}

◆ operator<<() [1/3]

template<typename T >
std::ostream & machine_learning::operator<< ( std::ostream & out,
const std::pair< T, T > & A )

Overloaded operator "<<" to print a pair

Template Parameters
Ttypename of the pair
Parameters
outstd::ostream to output
APair to be printed
52 {
53 // Setting output precision to 4 in case of floating point numbers
54 out.precision(4);
55 // printing pair in the form (p, q)
56 std::cout << "(" << A.first << ", " << A.second << ")";
57 return out;
58}
T precision(T... args)
Here is the call graph for this function:

◆ operator<<() [2/3]

template<typename T >
std::ostream & machine_learning::operator<< ( std::ostream & out,
const std::valarray< T > & A )

Overloaded operator "<<" to print a 1D vector

Template Parameters
Ttypename of the vector
Parameters
outstd::ostream to output
A1D vector to be printed
67 {
68 // Setting output precision to 4 in case of floating point numbers
69 out.precision(4);
70 for (const auto &a : A) { // For every element in the vector.
71 std::cout << a << ' '; // Print element
72 }
74 return out;
75}
Here is the call graph for this function:

◆ operator<<() [3/3]

template<typename T >
std::ostream & machine_learning::operator<< ( std::ostream & out,
std::vector< std::valarray< T > > const & A )

Overloaded operator "<<" to print 2D vector

Template Parameters
Ttypename of the vector
Parameters
outstd::ostream to output
A2D vector to be printed
33 {
34 // Setting output precision to 4 in case of floating point numbers
35 out.precision(4);
36 for (const auto &a : A) { // For each row in A
37 for (const auto &x : a) { // For each element in row
38 std::cout << x << ' '; // print element
39 }
41 }
42 return out;
43}
Here is the call graph for this function:

◆ pop_back()

template<typename T >
std::valarray< T > machine_learning::pop_back ( const std::valarray< T > & A)

Function to remove last element from 1D vector

Template Parameters
Ttypename of the vector
Parameters
A1D vector from which last element will be removed
Returns
new resultant vector
119 {
120 std::valarray<T> B; // New 1D vector to store resultant vector
121 B.resize(A.size() - 1); // Resizing it accordingly
122 for (size_t i = 0; i < A.size() - 1;
123 i++) { // For every (except last) element in A
124 B[i] = A[i]; // Copy element in B
125 }
126 return B; // Return resultant vector
127}

◆ pop_front()

template<typename T >
std::valarray< T > machine_learning::pop_front ( const std::valarray< T > & A)

Function to remove first element from 1D vector

Template Parameters
Ttypename of the vector
Parameters
A1D vector from which first element will be removed
Returns
new resultant vector
102 {
103 std::valarray<T> B; // New 1D vector to store resultant vector
104 B.resize(A.size() - 1); // Resizing it accordingly
105 for (size_t i = 1; i < A.size();
106 i++) { // // For every (except first) element in A
107 B[i - 1] = A[i]; // Copy element in B with left shifted position
108 }
109 return B; // Return resultant vector
110}

◆ save_u_matrix()

int machine_learning::save_u_matrix ( const char * fname,
const std::vector< std::vector< std::valarray< double > > > & W )

Create the distance matrix or U-matrix from the trained 3D weiths matrix and save to disk.

Parameters
[in]fnamefilename to save in (gets overwriten without confirmation)
[in]Wmodel matrix to save
Returns
0 if all ok
-1 if file creation failed
143 {
144 std::ofstream fp(fname);
145 if (!fp) { // error with fopen
146 std::cerr << "File error (" << fname << "): " << std::strerror(errno)
147 << std::endl;
148 return -1;
149 }
150
151 // neighborhood range
152 unsigned int R = 1;
153
154 for (int i = 0; i < W.size(); i++) { // for each x
155 for (int j = 0; j < W[0].size(); j++) { // for each y
156 double distance = 0.f;
157
158 int from_x = std::max<int>(0, i - R);
159 int to_x = std::min<int>(W.size(), i + R + 1);
160 int from_y = std::max<int>(0, j - R);
161 int to_y = std::min<int>(W[0].size(), j + R + 1);
162 int l = 0, m = 0;
163#ifdef _OPENMP
164#pragma omp parallel for reduction(+ : distance)
165#endif
166 for (l = from_x; l < to_x; l++) { // scan neighborhoor in x
167 for (m = from_y; m < to_y; m++) { // scan neighborhood in y
168 auto d = W[i][j] - W[l][m];
169 double d2 = std::pow(d, 2).sum();
170 distance += std::sqrt(d2);
171 // distance += d2;
172 }
173 }
174
175 distance /= R * R; // mean distance from neighbors
176 fp << distance; // print the mean separation
177 if (j < W[0].size() - 1) { // if not the last column
178 fp << ','; // suffix comma
179 }
180 }
181 if (i < W.size() - 1) { // if not the last row
182 fp << '\n'; // start a new line
183 }
184 }
185
186 fp.close();
187 return 0;
188}
T pow(T... args)
T sqrt(T... args)
T strerror(T... args)
Here is the call graph for this function:

◆ sum()

template<typename T >
T machine_learning::sum ( const std::vector< std::valarray< T > > & A)

Function to get sum of all elements in 2D vector

Template Parameters
Ttypename of the vector
Parameters
A2D vector for which sum is required
Returns
returns sum of all elements of 2D vector
232 {
233 T cur_sum = 0; // Initially sum is zero
234 for (const auto &a : A) { // For every row in A
235 cur_sum += a.sum(); // Add sum of that row to current sum
236 }
237 return cur_sum; // Return sum
238}

◆ transpose()

template<typename T >
std::vector< std::valarray< T > > machine_learning::transpose ( const std::vector< std::valarray< T > > & A)

Function to get transpose of 2D vector

Template Parameters
Ttypename of the vector
Parameters
A2D vector which will be transposed
Returns
new resultant vector
383 {
384 const auto shape = get_shape(A); // Current shape of vector
385 std::vector<std::valarray<T>> B; // New vector to store result
386 // Storing transpose values of A in B
387 for (size_t j = 0; j < shape.second; j++) {
389 row.resize(shape.first);
390 for (size_t i = 0; i < shape.first; i++) {
391 row[i] = A[i][j];
392 }
393 B.push_back(row);
394 }
395 return B; // Return new resultant 2D vector
396}
Here is the call graph for this function:

◆ uniform_random_initialization()

template<typename T >
void machine_learning::uniform_random_initialization ( std::vector< std::valarray< T > > & A,
const std::pair< size_t, size_t > & shape,
const T & low,
const T & high )

Function to initialize given 2D vector using uniform random initialization

Template Parameters
Ttypename of the vector
Parameters
A2D vector to be initialized
shaperequired shape
lowlower limit on value
highupper limit on value
168 {
169 A.clear(); // Making A empty
170 // Uniform distribution in range [low, high]
172 std::chrono::system_clock::now().time_since_epoch().count());
173 std::uniform_real_distribution<T> distribution(low, high);
174 for (size_t i = 0; i < shape.first; i++) { // For every row
176 row; // Making empty row which will be inserted in vector
177 row.resize(shape.second);
178 for (auto &r : row) { // For every element in row
179 r = distribution(generator); // copy random number
180 }
181 A.push_back(row); // Insert new row in vector
182 }
183 return;
184}
T clear(T... args)
Here is the call graph for this function:

◆ unit_matrix_initialization()

template<typename T >
void machine_learning::unit_matrix_initialization ( std::vector< std::valarray< T > > & A,
const std::pair< size_t, size_t > & shape )

Function to Intialize 2D vector as unit matrix

Template Parameters
Ttypename of the vector
Parameters
A2D vector to be initialized
shaperequired shape
194 {
195 A.clear(); // Making A empty
196 for (size_t i = 0; i < shape.first; i++) {
198 row; // Making empty row which will be inserted in vector
199 row.resize(shape.second);
200 row[i] = T(1); // Insert 1 at ith position
201 A.push_back(row); // Insert new row in vector
202 }
203 return;
204}

◆ update_weights() [1/2]

void machine_learning::update_weights ( const std::valarray< double > & x,
std::vector< std::valarray< double > > * W,
std::valarray< double > * D,
double alpha,
int R )

Update weights of the SOM using Kohonen algorithm

Parameters
[in]Xdata point
[in,out]Wweights matrix
[in,out]Dtemporary vector to store distances
[in]alphalearning rate \(0<\alpha\le1\)
[in]Rneighborhood range
105 {
106 int j = 0, k = 0;
107 int num_out = W->size(); // number of SOM output nodes
108 // int num_features = x.size(); // number of data features
109
110#ifdef _OPENMP
111#pragma omp for
112#endif
113 // step 1: for each output point
114 for (j = 0; j < num_out; j++) {
115 // compute Euclidian distance of each output
116 // point from the current sample
117 (*D)[j] = (((*W)[j] - x) * ((*W)[j] - x)).sum();
118 }
119
120 // step 2: get closest node i.e., node with snallest Euclidian distance to
121 // the current pattern
122 auto result = std::min_element(std::begin(*D), std::end(*D));
123 // double d_min = *result;
124 int d_min_idx = std::distance(std::begin(*D), result);
125
126 // step 3a: get the neighborhood range
127 int from_node = std::max(0, d_min_idx - R);
128 int to_node = std::min(num_out, d_min_idx + R + 1);
129
130 // step 3b: update the weights of nodes in the
131 // neighborhood
132#ifdef _OPENMP
133#pragma omp for
134#endif
135 for (j = from_node; j < to_node; j++) {
136 // update weights of nodes in the neighborhood
137 (*W)[j] += alpha * (x - (*W)[j]);
138 }
139}
T min_element(T... args)
Here is the call graph for this function:

◆ update_weights() [2/2]

double machine_learning::update_weights ( const std::valarray< double > & X,
std::vector< std::vector< std::valarray< double > > > * W,
std::vector< std::valarray< double > > * D,
double alpha,
int R )

Update weights of the SOM using Kohonen algorithm

Parameters
[in]Xdata point - N features
[in,out]Wweights matrix - PxQxN
[in,out]Dtemporary vector to store distances PxQ
[in]alphalearning rate \(0<\alpha\le1\)
[in]Rneighborhood range
Returns
minimum distance of sample and trained weights
203 {
204 int x = 0, y = 0;
205 int num_out_x = static_cast<int>(W->size()); // output nodes - in X
206 int num_out_y = static_cast<int>(W[0][0].size()); // output nodes - in Y
207 // int num_features = static_cast<int>(W[0][0][0].size()); // features =
208 // in Z
209 double d_min = 0.f;
210
211#ifdef _OPENMP
212#pragma omp for
213#endif
214 // step 1: for each output point
215 for (x = 0; x < num_out_x; x++) {
216 for (y = 0; y < num_out_y; y++) {
217 (*D)[x][y] = 0.f;
218 // compute Euclidian distance of each output
219 // point from the current sample
220 auto d = ((*W)[x][y] - X);
221 (*D)[x][y] = (d * d).sum();
222 (*D)[x][y] = std::sqrt((*D)[x][y]);
223 }
224 }
225
226 // step 2: get closest node i.e., node with snallest Euclidian distance
227 // to the current pattern
228 int d_min_x = 0, d_min_y = 0;
229 get_min_2d(*D, &d_min, &d_min_x, &d_min_y);
230
231 // step 3a: get the neighborhood range
232 int from_x = std::max(0, d_min_x - R);
233 int to_x = std::min(num_out_x, d_min_x + R + 1);
234 int from_y = std::max(0, d_min_y - R);
235 int to_y = std::min(num_out_y, d_min_y + R + 1);
236
237 // step 3b: update the weights of nodes in the
238 // neighborhood
239#ifdef _OPENMP
240#pragma omp for
241#endif
242 for (x = from_x; x < to_x; x++) {
243 for (y = from_y; y < to_y; y++) {
244 /* you can enable the following normalization if needed.
245 personally, I found it detrimental to convergence */
246 // const double s2pi = sqrt(2.f * M_PI);
247 // double normalize = 1.f / (alpha * s2pi);
248
249 /* apply scaling inversely proportional to distance from the
250 current node */
251 double d2 =
252 (d_min_x - x) * (d_min_x - x) + (d_min_y - y) * (d_min_y - y);
253 double scale_factor = std::exp(-d2 / (2.f * alpha * alpha));
254
255 (*W)[x][y] += (X - (*W)[x][y]) * alpha * scale_factor;
256 }
257 }
258 return d_min;
259}
T exp(T... args)
void get_min_2d(const std::vector< std::valarray< double > > &X, double *val, int *x_idx, int *y_idx)
Definition kohonen_som_topology.cpp:105
T sum(const std::vector< std::valarray< T > > &A)
Definition vector_ops.hpp:232
Here is the call graph for this function:

◆ zeroes_initialization()

template<typename T >
void machine_learning::zeroes_initialization ( std::vector< std::valarray< T > > & A,
const std::pair< size_t, size_t > & shape )

Function to Intialize 2D vector as zeroes

Template Parameters
Ttypename of the vector
Parameters
A2D vector to be initialized
shaperequired shape
214 {
215 A.clear(); // Making A empty
216 for (size_t i = 0; i < shape.first; i++) {
218 row; // Making empty row which will be inserted in vector
219 row.resize(shape.second); // By default all elements are zero
220 A.push_back(row); // Insert new row in vector
221 }
222 return;
223}

Variable Documentation

◆ MIN_DISTANCE

constexpr double machine_learning::MIN_DISTANCE = 1e-4
constexpr

Minimum average distance of image nodes