Algorithms_in_C++ 1.0.0
Set of algorithms implemented in C++.
Loading...
Searching...
No Matches
machine_learning::adaline Class Reference
Collaboration diagram for machine_learning::adaline:
[legend]

Public Member Functions

 adaline (int num_features, const double eta=0.01f, const double accuracy=1e-5)
 
int predict (const std::vector< double > &x, double *out=nullptr)
 
double fit (const std::vector< double > &x, const int &y)
 
template<size_t N>
void fit (std::array< std::vector< double >, N > const &X, std::array< int, N > const &Y)
 
int activation (double x)
 

Private Member Functions

bool check_size_match (const std::vector< double > &x)
 

Private Attributes

const double eta
 learning rate of the algorithm
 
const double accuracy
 model fit convergence accuracy
 
std::vector< double > weights
 weights of the neural network
 

Friends

std::ostreamoperator<< (std::ostream &out, const adaline &ada)
 

Constructor & Destructor Documentation

◆ adaline()

machine_learning::adaline::adaline ( int num_features,
const double eta = 0.01f,
const double accuracy = 1e-5 )
inlineexplicit

Default constructor

Parameters
[in]num_featuresnumber of features present
[in]etalearning rate (optional, default=0.1)
[in]convergenceaccuracy (optional, default= \(1\times10^{-5}\))
58 if (eta <= 0) {
59 std::cerr << "learning rate should be positive and nonzero"
60 << std::endl;
61 std::exit(EXIT_FAILURE);
62 }
63
65 num_features +
66 1); // additional weight is for the constant bias term
67
68 // initialize with random weights in the range [-50, 49]
69 for (double &weight : weights) weight = 1.f;
70 // weights[i] = (static_cast<double>(std::rand() % 100) - 50);
71 }
const double eta
learning rate of the algorithm
Definition adaline_learning.cpp:207
std::vector< double > weights
weights of the neural network
Definition adaline_learning.cpp:209
const double accuracy
model fit convergence accuracy
Definition adaline_learning.cpp:208
T endl(T... args)
T exit(T... args)
Here is the call graph for this function:

Member Function Documentation

◆ activation()

int machine_learning::adaline::activation ( double x)
inline

Defines activation function as Heaviside's step function.

\[ f(x) = \begin{cases} -1 & \forall x \le 0\\ 1 & \forall x > 0 \end{cases} \]

Parameters
xinput value to apply activation on
Returns
activation output
186{ return x > 0 ? 1 : -1; }

◆ check_size_match()

bool machine_learning::adaline::check_size_match ( const std::vector< double > & x)
inlineprivate

convenient function to check if input feature vector size matches the model weights size

Parameters
[in]xfecture vector to check
Returns
true size matches
false size does not match
196 {
197 if (x.size() != (weights.size() - 1)) {
198 std::cerr << __func__ << ": "
199 << "Number of features in x does not match the feature "
200 "dimension in model!"
201 << std::endl;
202 return false;
203 }
204 return true;
205 }
T size(T... args)
Here is the call graph for this function:

◆ fit() [1/2]

double machine_learning::adaline::fit ( const std::vector< double > & x,
const int & y )
inline

Update the weights of the model using supervised learning for one feature vector

Parameters
[in]xfeature vector
[in]yknown output value
Returns
correction factor
119 {
120 if (!check_size_match(x)) {
121 return 0;
122 }
123
124 /* output of the model with current weights */
125 int p = predict(x);
126 int prediction_error = y - p; // error in estimation
127 double correction_factor = eta * prediction_error;
128
129 /* update each weight, the last weight is the bias term */
130 for (int i = 0; i < x.size(); i++) {
131 weights[i] += correction_factor * x[i];
132 }
133 weights[x.size()] += correction_factor; // update bias
134
135 return correction_factor;
136 }
int predict(const std::vector< double > &x, double *out=nullptr)
Definition adaline_learning.cpp:95
bool check_size_match(const std::vector< double > &x)
Definition adaline_learning.cpp:196
Here is the call graph for this function:

◆ fit() [2/2]

template<size_t N>
void machine_learning::adaline::fit ( std::array< std::vector< double >, N > const & X,
std::array< int, N > const & Y )
inline

Update the weights of the model using supervised learning for an array of vectors.

Parameters
[in]Xarray of feature vector
[in]yknown output value for each feature vector
146 {
147 double avg_pred_error = 1.f;
148
149 int iter = 0;
150 for (iter = 0; (iter < MAX_ITER) && (avg_pred_error > accuracy);
151 iter++) {
152 avg_pred_error = 0.f;
153
154 // perform fit for each sample
155 for (int i = 0; i < N; i++) {
156 double err = fit(X[i], Y[i]);
157 avg_pred_error += std::abs(err);
158 }
159 avg_pred_error /= N;
160
161 // Print updates every 200th iteration
162 // if (iter % 100 == 0)
163 std::cout << "\tIter " << iter << ": Training weights: " << *this
164 << "\tAvg error: " << avg_pred_error << std::endl;
165 }
166
167 if (iter < MAX_ITER) {
168 std::cout << "Converged after " << iter << " iterations."
169 << std::endl;
170 } else {
171 std::cout << "Did not converge after " << iter << " iterations."
172 << std::endl;
173 }
174 }
double fit(const std::vector< double > &x, const int &y)
Definition adaline_learning.cpp:119
constexpr uint32_t N
A struct to represent sparse table for min() as their invariant function, for the given array A....
Definition sparse_table.cpp:47
constexpr int MAX_ITER
Definition adaline_learning.cpp:40
Here is the call graph for this function:

◆ predict()

int machine_learning::adaline::predict ( const std::vector< double > & x,
double * out = nullptr )
inline

predict the output of the model for given set of features

Parameters
[in]xinput vector
[out]outoptional argument to return neuron output before applying activation function (optional, nullptr to ignore)
Returns
model prediction output
95 {
96 if (!check_size_match(x)) {
97 return 0;
98 }
99
100 double y = weights.back(); // assign bias value
101
102 // for (int i = 0; i < x.size(); i++) y += x[i] * weights[i];
103 y = std::inner_product(x.begin(), x.end(), weights.begin(), y);
104
105 if (out != nullptr) { // if out variable is provided
106 *out = y;
107 }
108
109 return activation(y); // quantizer: apply ADALINE threshold function
110 }
T back(T... args)
T begin(T... args)
int activation(double x)
Definition adaline_learning.cpp:186
T end(T... args)
T inner_product(T... args)
Here is the call graph for this function:

Friends And Related Symbol Documentation

◆ operator<<

std::ostream & operator<< ( std::ostream & out,
const adaline & ada )
friend

Operator to print the weights of the model

76 {
77 out << "<";
78 for (int i = 0; i < ada.weights.size(); i++) {
79 out << ada.weights[i];
80 if (i < ada.weights.size() - 1) {
81 out << ", ";
82 }
83 }
84 out << ">";
85 return out;
86 }

The documentation for this class was generated from the following file: