TheAlgorithms/C++ 1.0.0
All the algorithms implemented in C++
Loading...
Searching...
No Matches
ordinary_least_squares_regressor.cpp File Reference

Linear regression example using Ordinary least squares More...

#include <cassert>
#include <cmath>
#include <iomanip>
#include <iostream>
#include <vector>
Include dependency graph for ordinary_least_squares_regressor.cpp:

Go to the source code of this file.

Functions

template<typename T>
std::ostream & operator<< (std::ostream &out, std::vector< std::vector< T > > const &v)
template<typename T>
std::ostream & operator<< (std::ostream &out, std::vector< T > const &v)
template<typename T>
bool is_square (std::vector< std::vector< T > > const &A)
template<typename T>
std::vector< std::vector< T > > operator* (std::vector< std::vector< T > > const &A, std::vector< std::vector< T > > const &B)
template<typename T>
std::vector< T > operator* (std::vector< std::vector< T > > const &A, std::vector< T > const &B)
template<typename T>
std::vector< float > operator* (float const scalar, std::vector< T > const &A)
template<typename T>
std::vector< float > operator* (std::vector< T > const &A, float const scalar)
template<typename T>
std::vector< float > operator/ (std::vector< T > const &A, float const scalar)
template<typename T>
std::vector< T > operator- (std::vector< T > const &A, std::vector< T > const &B)
template<typename T>
std::vector< T > operator+ (std::vector< T > const &A, std::vector< T > const &B)
template<typename T>
std::vector< std::vector< float > > get_inverse (std::vector< std::vector< T > > const &A)
template<typename T>
std::vector< std::vector< T > > get_transpose (std::vector< std::vector< T > > const &A)
template<typename T>
std::vector< float > fit_OLS_regressor (std::vector< std::vector< T > > const &X, std::vector< T > const &Y)
template<typename T>
std::vector< float > predict_OLS_regressor (std::vector< std::vector< T > > const &X, std::vector< float > const &beta)
void ols_test ()
int main ()

Detailed Description

Linear regression example using Ordinary least squares

Program that gets the number of data samples and number of features per sample along with output per sample. It applies OLS regression to compute the regression output for additional test data samples.

Author
Krishna Vedala

Definition in file ordinary_least_squares_regressor.cpp.

Function Documentation

◆ fit_OLS_regressor()

template<typename T>
std::vector< float > fit_OLS_regressor ( std::vector< std::vector< T > > const & X,
std::vector< T > const & Y )

Perform Ordinary Least Squares curve fit. This operation is defined as

\[\beta = \left(X^TXX^T\right)Y\]

Parameters
Xfeature matrix with rows representing sample vector of features
Yknown regression value for each sample
Returns
fitted regression model polynomial coefficients

Definition at line 321 of file ordinary_least_squares_regressor.cpp.

322 {
323 // NxF
324 std::vector<std::vector<T>> X2 = X;
325 for (size_t i = 0; i < X2.size(); i++) {
326 // add Y-intercept -> Nx(F+1)
327 X2[i].push_back(1);
328 }
329 // (F+1)xN
330 std::vector<std::vector<T>> Xt = get_transpose(X2);
331 // (F+1)x(F+1)
332 std::vector<std::vector<T>> tmp = get_inverse(Xt * X2);
333 // (F+1)xN
334 std::vector<std::vector<float>> out = tmp * Xt;
335 // cout << endl
336 // << "Projection matrix: " << X2 * out << endl;
337
338 // Fx1,1 -> (F+1)^th element is the independent constant
339 return out * Y;
340}
std::vector< std::vector< T > > get_transpose(std::vector< std::vector< T > > const &A)
std::vector< std::vector< float > > get_inverse(std::vector< std::vector< T > > const &A)

◆ get_inverse()

template<typename T>
std::vector< std::vector< float > > get_inverse ( std::vector< std::vector< T > > const & A)

Get matrix inverse using Row-trasnformations. Given matrix must be a square and non-singular.

Returns
inverse matrix

Definition at line 226 of file ordinary_least_squares_regressor.cpp.

227 {
228 // Assuming A is square matrix
229 size_t N = A.size();
230
231 std::vector<std::vector<float>> inverse(N);
232 for (size_t row = 0; row < N; row++) {
233 // preallocatae a resultant identity matrix
234 inverse[row] = std::vector<float>(N);
235 for (size_t col = 0; col < N; col++) {
236 inverse[row][col] = (row == col) ? 1.f : 0.f;
237 }
238 }
239
240 if (!is_square(A)) {
241 std::cerr << "A must be a square matrix!" << std::endl;
242 return inverse;
243 }
244
245 // preallocatae a temporary matrix identical to A
246 std::vector<std::vector<float>> temp(N);
247 for (size_t row = 0; row < N; row++) {
248 std::vector<float> v(N);
249 for (size_t col = 0; col < N; col++) {
250 v[col] = static_cast<float>(A[row][col]);
251 }
252 temp[row] = v;
253 }
254
255 // start transformations
256 for (size_t row = 0; row < N; row++) {
257 for (size_t row2 = row; row2 < N && temp[row][row] == 0; row2++) {
258 // this to ensure diagonal elements are not 0
259 temp[row] = temp[row] + temp[row2];
260 inverse[row] = inverse[row] + inverse[row2];
261 }
262
263 for (size_t col2 = row; col2 < N && temp[row][row] == 0; col2++) {
264 // this to further ensure diagonal elements are not 0
265 for (size_t row2 = 0; row2 < N; row2++) {
266 temp[row2][row] = temp[row2][row] + temp[row2][col2];
267 inverse[row2][row] = inverse[row2][row] + inverse[row2][col2];
268 }
269 }
270
271 if (temp[row][row] == 0) {
272 // Probably a low-rank matrix and hence singular
273 std::cerr << "Low-rank matrix, no inverse!" << std::endl;
274 return inverse;
275 }
276
277 // set diagonal to 1
278 auto divisor = static_cast<float>(temp[row][row]);
279 temp[row] = temp[row] / divisor;
280 inverse[row] = inverse[row] / divisor;
281 // Row transformations
282 for (size_t row2 = 0; row2 < N; row2++) {
283 if (row2 == row) {
284 continue;
285 }
286 float factor = temp[row2][row];
287 temp[row2] = temp[row2] - factor * temp[row];
288 inverse[row2] = inverse[row2] - factor * inverse[row];
289 }
290 }
291
292 return inverse;
293}
bool is_square(std::vector< std::vector< T > > const &A)

◆ get_transpose()

template<typename T>
std::vector< std::vector< T > > get_transpose ( std::vector< std::vector< T > > const & A)

matrix transpose

Returns
resultant matrix

Definition at line 300 of file ordinary_least_squares_regressor.cpp.

301 {
302 std::vector<std::vector<T>> result(A[0].size());
303
304 for (size_t row = 0; row < A[0].size(); row++) {
305 std::vector<T> v(A.size());
306 for (size_t col = 0; col < A.size(); col++) v[col] = A[col][row];
307
308 result[row] = v;
309 }
310 return result;
311}
uint64_t result(uint64_t n)

◆ is_square()

template<typename T>
bool is_square ( std::vector< std::vector< T > > const & A)
inline

function to check if given matrix is a square matrix

Returns
1 if true, 0 if false

Definition at line 59 of file ordinary_least_squares_regressor.cpp.

59 {
60 // Assuming A is square matrix
61 size_t N = A.size();
62 for (size_t i = 0; i < N; i++) {
63 if (A[i].size() != N) {
64 return false;
65 }
66 }
67 return true;
68}

◆ main()

int main ( void )

main function

Definition at line 421 of file ordinary_least_squares_regressor.cpp.

421 {
422 ols_test();
423
424 size_t N = 0, F = 0;
425
426 std::cout << "Enter number of features: ";
427 // number of features = columns
428 std::cin >> F;
429 std::cout << "Enter number of samples: ";
430 // number of samples = rows
431 std::cin >> N;
432
433 std::vector<std::vector<float>> data(N);
434 std::vector<float> Y(N);
435
436 std::cout
437 << "Enter training data. Per sample, provide features and one output."
438 << std::endl;
439
440 for (size_t rows = 0; rows < N; rows++) {
441 std::vector<float> v(F);
442 std::cout << "Sample# " << rows + 1 << ": ";
443 for (size_t cols = 0; cols < F; cols++) {
444 // get the F features
445 std::cin >> v[cols];
446 }
447 data[rows] = v;
448 // get the corresponding output
449 std::cin >> Y[rows];
450 }
451
452 std::vector<float> beta = fit_OLS_regressor(data, Y);
453 std::cout << std::endl << std::endl << "beta:" << beta << std::endl;
454
455 size_t T = 0;
456 std::cout << "Enter number of test samples: ";
457 // number of test sample inputs
458 std::cin >> T;
459 std::vector<std::vector<float>> data2(T);
460 // vector<float> Y2(T);
461
462 for (size_t rows = 0; rows < T; rows++) {
463 std::cout << "Sample# " << rows + 1 << ": ";
464 std::vector<float> v(F);
465 for (size_t cols = 0; cols < F; cols++) std::cin >> v[cols];
466 data2[rows] = v;
467 }
468
469 std::vector<float> out = predict_OLS_regressor(data2, beta);
470 for (size_t rows = 0; rows < T; rows++) std::cout << out[rows] << std::endl;
471
472 return 0;
473}
int data[MAX]
test data
std::vector< float > fit_OLS_regressor(std::vector< std::vector< T > > const &X, std::vector< T > const &Y)
std::vector< float > predict_OLS_regressor(std::vector< std::vector< T > > const &X, std::vector< float > const &beta)

◆ ols_test()

void ols_test ( )

Self test checks

Definition at line 369 of file ordinary_least_squares_regressor.cpp.

369 {
370 /* test function = x^2 -5 */
371 std::cout << "Test 1 (quadratic function)....";
372 // create training data set with features = x, x^2, x^3
373 std::vector<std::vector<float>> data1(
374 {{-5, 25, -125}, {-1, 1, -1}, {0, 0, 0}, {1, 1, 1}, {6, 36, 216}});
375 // create corresponding outputs
376 std::vector<float> Y1({20, -4, -5, -4, 31});
377 // perform regression modelling
378 std::vector<float> beta1 = fit_OLS_regressor(data1, Y1);
379 // create test data set with same features = x, x^2, x^3
380 std::vector<std::vector<float>> test_data1(
381 {{-2, 4, -8}, {2, 4, 8}, {-10, 100, -1000}, {10, 100, 1000}});
382 // expected regression outputs
383 std::vector<float> expected1({-1, -1, 95, 95});
384 // predicted regression outputs
385 std::vector<float> out1 = predict_OLS_regressor(test_data1, beta1);
386 // compare predicted results are within +-0.01 limit of expected
387 for (size_t rows = 0; rows < out1.size(); rows++) {
388 assert(std::abs(out1[rows] - expected1[rows]) < 0.01);
389 }
390 std::cout << "passed\n";
391
392 /* test function = x^3 + x^2 - 100 */
393 std::cout << "Test 2 (cubic function)....";
394 // create training data set with features = x, x^2, x^3
395 std::vector<std::vector<float>> data2(
396 {{-5, 25, -125}, {-1, 1, -1}, {0, 0, 0}, {1, 1, 1}, {6, 36, 216}});
397 // create corresponding outputs
398 std::vector<float> Y2({-200, -100, -100, 98, 152});
399 // perform regression modelling
400 std::vector<float> beta2 = fit_OLS_regressor(data2, Y2);
401 // create test data set with same features = x, x^2, x^3
402 std::vector<std::vector<float>> test_data2(
403 {{-2, 4, -8}, {2, 4, 8}, {-10, 100, -1000}, {10, 100, 1000}});
404 // expected regression outputs
405 std::vector<float> expected2({-104, -88, -1000, 1000});
406 // predicted regression outputs
407 std::vector<float> out2 = predict_OLS_regressor(test_data2, beta2);
408 // compare predicted results are within +-0.01 limit of expected
409 for (size_t rows = 0; rows < out2.size(); rows++) {
410 assert(std::abs(out2[rows] - expected2[rows]) < 0.01);
411 }
412 std::cout << "passed\n";
413
414 std::cout << std::endl; // ensure test results are displayed on screen
415 // (flush stdout)
416}

◆ operator*() [1/4]

template<typename T>
std::vector< float > operator* ( float const scalar,
std::vector< T > const & A )

pre-multiplication of a vector by a scalar

Returns
resultant vector

Definition at line 138 of file ordinary_least_squares_regressor.cpp.

138 {
139 // Number of rows in A
140 size_t N_A = A.size();
141
142 std::vector<float> result(N_A);
143
144 for (size_t row = 0; row < N_A; row++) {
145 result[row] += A[row] * static_cast<float>(scalar);
146 }
147
148 return result;
149}

◆ operator*() [2/4]

template<typename T>
std::vector< std::vector< T > > operator* ( std::vector< std::vector< T > > const & A,
std::vector< std::vector< T > > const & B )

Matrix multiplication such that if A is size (mxn) and B is of size (pxq) then the multiplication is defined only when n = p and the resultant matrix is of size (mxq)

Returns
resultant matrix

Definition at line 78 of file ordinary_least_squares_regressor.cpp.

79 {
80 // Number of rows in A
81 size_t N_A = A.size();
82 // Number of columns in B
83 size_t N_B = B[0].size();
84
85 std::vector<std::vector<T>> result(N_A);
86
87 if (A[0].size() != B.size()) {
88 std::cerr << "Number of columns in A != Number of rows in B ("
89 << A[0].size() << ", " << B.size() << ")" << std::endl;
90 return result;
91 }
92
93 for (size_t row = 0; row < N_A; row++) {
94 std::vector<T> v(N_B);
95 for (size_t col = 0; col < N_B; col++) {
96 v[col] = static_cast<T>(0);
97 for (size_t j = 0; j < B.size(); j++) {
98 v[col] += A[row][j] * B[j][col];
99 }
100 }
101 result[row] = v;
102 }
103
104 return result;
105}

◆ operator*() [3/4]

template<typename T>
std::vector< T > operator* ( std::vector< std::vector< T > > const & A,
std::vector< T > const & B )

multiplication of a matrix with a column vector

Returns
resultant vector

Definition at line 112 of file ordinary_least_squares_regressor.cpp.

113 {
114 // Number of rows in A
115 size_t N_A = A.size();
116
117 std::vector<T> result(N_A);
118
119 if (A[0].size() != B.size()) {
120 std::cerr << "Number of columns in A != Number of rows in B ("
121 << A[0].size() << ", " << B.size() << ")" << std::endl;
122 return result;
123 }
124
125 for (size_t row = 0; row < N_A; row++) {
126 result[row] = static_cast<T>(0);
127 for (size_t j = 0; j < B.size(); j++) result[row] += A[row][j] * B[j];
128 }
129
130 return result;
131}

◆ operator*() [4/4]

template<typename T>
std::vector< float > operator* ( std::vector< T > const & A,
float const scalar )

post-multiplication of a vector by a scalar

Returns
resultant vector

Definition at line 156 of file ordinary_least_squares_regressor.cpp.

156 {
157 // Number of rows in A
158 size_t N_A = A.size();
159
160 std::vector<float> result(N_A);
161
162 for (size_t row = 0; row < N_A; row++) {
163 result[row] = A[row] * static_cast<float>(scalar);
164 }
165
166 return result;
167}

◆ operator+()

template<typename T>
std::vector< T > operator+ ( std::vector< T > const & A,
std::vector< T > const & B )

addition of two vectors of identical lengths

Returns
resultant vector

Definition at line 204 of file ordinary_least_squares_regressor.cpp.

204 {
205 // Number of rows in A
206 size_t N = A.size();
207
208 std::vector<T> result(N);
209
210 if (B.size() != N) {
211 std::cerr << "Vector dimensions shouldbe identical!" << std::endl;
212 return A;
213 }
214
215 for (size_t row = 0; row < N; row++) result[row] = A[row] + B[row];
216
217 return result;
218}

◆ operator-()

template<typename T>
std::vector< T > operator- ( std::vector< T > const & A,
std::vector< T > const & B )

subtraction of two vectors of identical lengths

Returns
resultant vector

Definition at line 183 of file ordinary_least_squares_regressor.cpp.

183 {
184 // Number of rows in A
185 size_t N = A.size();
186
187 std::vector<T> result(N);
188
189 if (B.size() != N) {
190 std::cerr << "Vector dimensions shouldbe identical!" << std::endl;
191 return A;
192 }
193
194 for (size_t row = 0; row < N; row++) result[row] = A[row] - B[row];
195
196 return result;
197}

◆ operator/()

template<typename T>
std::vector< float > operator/ ( std::vector< T > const & A,
float const scalar )

division of a vector by a scalar

Returns
resultant vector

Definition at line 174 of file ordinary_least_squares_regressor.cpp.

174 {
175 return (1.f / scalar) * A;
176}

◆ operator<<() [1/2]

template<typename T>
std::ostream & operator<< ( std::ostream & out,
std::vector< std::vector< T > > const & v )

operator to print a matrix

Definition at line 22 of file ordinary_least_squares_regressor.cpp.

23 {
24 const int width = 10;
25 const char separator = ' ';
26
27 for (size_t row = 0; row < v.size(); row++) {
28 for (size_t col = 0; col < v[row].size(); col++) {
29 out << std::left << std::setw(width) << std::setfill(separator)
30 << v[row][col];
31 }
32 out << std::endl;
33 }
34
35 return out;
36}

◆ operator<<() [2/2]

template<typename T>
std::ostream & operator<< ( std::ostream & out,
std::vector< T > const & v )

operator to print a vector

Definition at line 42 of file ordinary_least_squares_regressor.cpp.

42 {
43 const int width = 15;
44 const char separator = ' ';
45
46 for (size_t row = 0; row < v.size(); row++) {
47 out << std::left << std::setw(width) << std::setfill(separator)
48 << v[row];
49 }
50
51 return out;
52}

◆ predict_OLS_regressor()

template<typename T>
std::vector< float > predict_OLS_regressor ( std::vector< std::vector< T > > const & X,
std::vector< float > const & beta )

Given data and OLS model coeffficients, predict regression estimates. This operation is defined as

\[y_{\text{row}=i} = \sum_{j=\text{columns}}\beta_j\cdot X_{i,j}\]

Parameters
Xfeature matrix with rows representing sample vector of features
betafitted regression model
Returns
vector with regression values for each sample

Definition at line 352 of file ordinary_least_squares_regressor.cpp.

354 {
355 std::vector<float> result(X.size());
356
357 for (size_t rows = 0; rows < X.size(); rows++) {
358 // -> start with constant term
359 result[rows] = beta[X[0].size()];
360 for (size_t cols = 0; cols < X[0].size(); cols++) {
361 result[rows] += beta[cols] * X[rows][cols];
362 }
363 }
364 // Nx1
365 return result;
366}