Skip to content

class Num::NN::NetworkInfo(T)
inherits Reference #

Constructors#

.new(context : Num::Grad::Context(T)) #

This should always be initialized with an empty array of layers that can be tapped and yielded by Network creation

View source

Methods#

#adam(*args) #

Adds an Adam optimizer to the Network

View source

#conv2d(n : Int32, kh : Int32, kw : Int32) #

Convolution layer for a neural network

Arguments#
  • n : Int32 - Number of filters to apply
  • kh : Int32 - Filter height
  • kw : Int32 - Filter width
View source

#dropout(prob : Float = 0.5_f32) #

Adds a dropout layer for a neural network

Arguments#
  • prob : Float - Probability of a neuron being dropped out
View source

#elu #

Adds an ELU layer to the network

View source

#flatten #

Adds a Flattening layer to a neural network

View source

#input(shape : Array(Int)) #

Adds an input layer to a Network. This is simply a wrapper around the input Tensor, in order to allow layers further along in the network to infer input shapes

Arguments#
View source

#leaky_relu #

Adds a Leaky ReLU layer to a network.

View source

#linear(output_size : Int) #

Add a linear layer to the Network. Since activation functions are just treated as additional layers, this simply requires the dimensions of the transformation.

Dimensions should be NUM_FEATURES x NUM_OUTPUTS, so if the data set is 100x10, with 200 neurons in the hidden layers, the dimensions of the layer would be 10, 100, the 200 will be handled by dynamically.

Arguments#
  • output_size : Int - The number of outputs in the linear layer
Examples#
net = Num::NN::Network.new(ctx) do
  linear 2, 3
end
View source

#maxpool(kernel : Tuple(Int, Int), padding : Tuple(Int, Int), stride : Tuple(Int, Int)) #

Maxpool layer for a neural network

Arguments#
  • kernel : Tuple(Int, Int) - Kernel height and width
  • padding : Tuple(Int, Int)` - Padding height and width
  • stride : Tuple(Int, Int)` - Stride height and width
View source

#mse_loss #

Uses Mean Squared Error to compute the loss for the Network

Examples#
net = Num::NN::Network.new(ctx) do
  linear 2, 3
  sigmoid
  linear 3, 1
  sgd 0.7
  mse_loss
end
View source

#optimizer : Num::NN::Optimizer(T) #

View source

#relu #

Add a ReLU layer to the Network. Activation functions are handled the same way as other layers, but do not change the dimensions of the input

Examples#
net = Num::NN::Network.new(ctx) do
  linear 2, 3
  relu
end
View source

#sgd(learning_rate : Float64 = 0.01) #

Add an SGD optimizer to the Network.

Arguments#
  • learning_rate : Float64 - Learning rate for all layers in the Network
Examples#
net = Num::NN::Network.new(ctx) do
  linear 2, 3
  sigmoid
  linear 3, 1
  sgd 0.7
end
View source

#sigmoid #

Add a Sigmoid layer to the Network. Activation functions are handled the same way as other layers, but do not change the dimensions of the input

Examples#
net = Num::NN::Network.new(ctx) do
  linear 2, 3
  sigmoid
end
View source

#sigmoid_cross_entropy_loss #

Uses Sigmoid Cross Entropy to compute the loss for the Network

Examples#
net = Num::NN::Network.new(ctx) do
  linear 2, 3
  sigmoid
  linear 3, 1
  sgd 0.7
  sigmoid_cross_entropy_loss
end
View source

#softmax_cross_entropy_loss #

Specifies Softmax Cross Entropy as the method of loss to be used with the Network

View source

Macros#

method_missing(call) #

View source