class Num::NN::NetworkInfo(T)
inherits Reference
#
Constructors#
.new(context : Num::Grad::Context(T))
#
(context : Num::Grad::Context(T))
This should always be initialized with an empty array of layers that can be tapped and yielded by Network creation
Methods#
#conv2d(n : Int32, kh : Int32, kw : Int32)
#
(n : Int32, kh : Int32, kw : Int32)
Convolution layer for a neural network
Arguments#
- n :
Int32
- Number of filters to apply - kh :
Int32
- Filter height - kw :
Int32
- Filter width
#dropout(prob : Float = 0.5_f32)
#
(prob : Float = 0.5_f32)
Adds a dropout layer for a neural network
Arguments#
- prob :
Float
- Probability of a neuron being dropped out
#input(shape : Array(Int))
#
(shape : Array(Int))
Adds an input layer to a Network
. This is simply a wrapper
around the input Tensor
, in order to allow layers further along
in the network to infer input shapes
Arguments#
- shape :
Array(Int)
- Shape of input data
#linear(output_size : Int)
#
(output_size : Int)
Add a linear layer to the Network. Since activation functions are just treated as additional layers, this simply requires the dimensions of the transformation.
Dimensions should be NUM_FEATURES
x NUM_OUTPUTS
, so
if the data set is 100x10, with 200 neurons in the hidden layers,
the dimensions of the layer would be 10, 100, the 200 will be handled
by dynamically.
Arguments#
- output_size :
Int
- The number of outputs in the linear layer
Examples#
net = Num::NN::Network.new(ctx) do
linear 2, 3
end
#maxpool(kernel : Tuple(Int, Int), padding : Tuple(Int, Int), stride : Tuple(Int, Int))
#
(kernel : Tuple(Int, Int), padding : Tuple(Int, Int), stride : Tuple(Int, Int))
Maxpool layer for a neural network
Arguments#
- kernel :
Tuple(Int, Int)
- Kernel height and width - padding : Tuple(Int, Int)` - Padding height and width
- stride : Tuple(Int, Int)` - Stride height and width
#mse_loss
#
Uses Mean Squared Error to compute the loss for the Network
Examples#
net = Num::NN::Network.new(ctx) do
linear 2, 3
sigmoid
linear 3, 1
sgd 0.7
mse_loss
end
#relu
#
Add a ReLU layer to the Network. Activation functions are handled the same way as other layers, but do not change the dimensions of the input
Examples#
net = Num::NN::Network.new(ctx) do
linear 2, 3
relu
end
#sgd(learning_rate : Float64 = 0.01)
#
(learning_rate : Float64 = 0.01)
Add an SGD optimizer to the Network.
Arguments#
- learning_rate :
Float64
- Learning rate for all layers in the Network
Examples#
net = Num::NN::Network.new(ctx) do
linear 2, 3
sigmoid
linear 3, 1
sgd 0.7
end
#sigmoid
#
Add a Sigmoid layer to the Network. Activation functions are handled the same way as other layers, but do not change the dimensions of the input
Examples#
net = Num::NN::Network.new(ctx) do
linear 2, 3
sigmoid
end
#sigmoid_cross_entropy_loss
#
Uses Sigmoid Cross Entropy to compute the loss for the Network
Examples#
net = Num::NN::Network.new(ctx) do
linear 2, 3
sigmoid
linear 3, 1
sgd 0.7
sigmoid_cross_entropy_loss
end
#softmax_cross_entropy_loss
#
Specifies Softmax Cross Entropy as the method of loss to be used with the Network