-
Notifications
You must be signed in to change notification settings - Fork 7
/
70_neural_net.cpp
91 lines (65 loc) · 2.47 KB
/
70_neural_net.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
#include <range/v3/all.hpp> // get everything
namespace rng = ranges::v3; // easy access
namespace view = rng::view; // easy access
#include <iostream>
using namespace std;
int main() {
// To build a neural network we first need some basic linear algebra facilities.
// ´matrix_view` constructs a range of ranges (2D range) over a given flat range.
auto matrix_view = [](auto const& range, size_t num_cols) {
return range | view::chunk(num_cols);
};
// `sum` is an eager algorithm the does what it says.
auto sum = [](auto const& range) {
using value_t = rng::range_value_type_t<decltype(range)>;
return rng::accumulate(range, value_t{});
};
// `add` adds two ranges lazily.
auto add = [](auto const& r1, auto const& r2) {
return view::zip_with( std::plus<>{}, r1, r2 );
};
// `dot` computes the dot product of two ranges
auto dot = [sum](auto const& r1, auto const& r2) {
return sum( view::zip_with( std::multiplies<>{}, r1, r2 ));
};
// `dot_mat` computes the dot product of a 2D-range (range of ranges) and a single range
auto dot_mat = [dot](auto const& mat, auto const& vec) {
return mat | view::transform([dot,&vec](auto const row){
return dot(row,vec);
});
};
// Now let's build a little neural network layers...
// `scale_add` mixes the input neurons with the given weights and adds a bias
auto scale_add = [add,dot_mat](auto const& weights, auto const& biases) {
return [=](auto const x) {
return add(dot_mat(weights,x), biases);
};
};
// `sigmoid` is a lazy view that does the nonlinear mapping of the outputs
auto sigmoid = view::transform([](auto const& x) {
decltype(x) one = 1.;
return one / (one + std::exp(-x));
});
// weights and biases for two layers, just some random numbers
auto W1 = { -2., -.2, .8
, 1., 0., -1. };
auto b1 = { 1., 2., 3. };
auto W2 = { 1., -0.3
, -1., 0.9 };
auto b2 = { -1., 0.2 };
// Now, construct the actual network...
auto network =
[ =
, layer1 = scale_add(matrix_view(W1,3), b1)
, layer2 = scale_add(matrix_view(W2,2), b2)
]
(auto const& l0)
{
std::vector<double> l1 = layer1(l0) | sigmoid | rng::copy;
std::vector<double> l2 = layer2(l1) | sigmoid | rng::copy;
return l2;
};
// Run the network with some input and print the result
auto input = {1., 0.3, 0.8};
rng::copy( network(input), rng::ostream_iterator(cout, "\n"));
}