class Tensor(T, S)
inherits Reference
#
Included modules
Enumerable
Constructors#
.new(data : S, shape : Array(Int32), strides : Array(Int32), offset : Int32, flags : Num::ArrayFlags, dtype : T.class = T)
#
(data : S, shape : Array(Int32), strides : Array(Int32), offset : Int32, flags : Num::ArrayFlags, dtype : T.class = T)
Initialize a Tensor from a storage instance, a shape, strides, an offset, flags, and a data type. This should primarily be used by internal methods, since it assumes the passed shape and strides correspond to the storage provided.
The dtype is required to infer T without having it explicitly provided
.new(data : S, shape : Array(Int), order : Num::OrderType = Num::RowMajor, dtype : T.class = T)
#
(data : S, shape : Array(Int), order : Num::OrderType = Num::RowMajor, dtype : T.class = T)
Initialize a Tensor from a storage instance, a shape, an order, and a data type. This should primarily be used by internal methods, since it assumes the contiguity of the storage.
The dtype is required to infer T without having it explicitly provided
.new(data : S, shape : Array(Int32), strides : Array(Int32), offset : Int32, dtype : T.class = T)
#
(data : S, shape : Array(Int32), strides : Array(Int32), offset : Int32, dtype : T.class = T)
Initialize a Tensor from a storage instance, a shape, strides, an offset, and a data type. This should primarily be used by internal methods, since it assumes the passed shape and strides correspond to the storage provided.
The dtype is required to infer T without having it explicitly provided
.new(shape : Array(Int), order : Num::OrderType = Num::RowMajor)
#
(shape : Array(Int), order : Num::OrderType = Num::RowMajor)
Initializes a Tensor onto a device with a provided shape and memory layout.
Examples#
a = Tensor(Float32).new([3, 3, 2], device: OCL(Float32)) # => GPU Tensor
b = Tensor(Float32).new([2, 3, 4]) # => CPU Tensor
.new(shape : Array(Int), value : T, device = CPU(T), order : Num::OrderType = Num::RowMajor)
#
(shape : Array(Int), value : T, device = CPU(T), order : Num::OrderType = Num::RowMajor)
Initializes a Tensor onto a device with a provided shape and memory layout, containing a specified value.
Examples#
a = Tensor.new([2, 2], 3.5) # => CPU Tensor filled with 3.5
.new(shape : Array(Int), order : Num::OrderType = Num::RowMajor, device = CPU, &block : Int32 -> T)
#
(shape : Array(Int), order : Num::OrderType = Num::RowMajor, device = CPU, &block : Int32 -> T)
Creates a Tensor from a block onto a specified device. The type of the Tensor is inferred from the return type of the block
Examples#
a = Tensor.new([3, 3, 2]) { |i| i } # => Int32 Tensor stored on a CPU
.new(m : Int, n : Int, device = CPU, &block : Int32, Int32 -> T)
#
(m : Int, n : Int, device = CPU, &block : Int32, Int32 -> T)
Creates a matrix Tensor from a block onto a specified device. The type of the Tensor is inferred from the return type of the block
Examples#
a = Tensor.new(3, 3) { |i, j| i / j } # => Float64 Tensor stored on a CPU
Class methods#
.beta(shape : Array(Int), a : Float, b : Float)
#
(shape : Array(Int), a : Float, b : Float)
Generates a Tensor containing a beta-distribution collection of values
Arguments#
- shape :
Array(Int)
- Shape of outputTensor
- a :
Float
- Shape parameter of distribution - b :
Float
- Shape parameter of distribution
Examples#
Num::Rand.set_seed(0)
a = Tensor(Float32, CPU(Float32)).beta([5], 0.1, 0.5)
puts a # => [0.000463782, 0.40858 , 1.67573e-07, 0.143055, 3.08452e-08]
.binomial(shape : Array(Int), n : Int, prob : Float) : Tensor(T, S)
#
(shape : Array(Int), n : Int, prob : Float) : Tensor(T, S)
Draw samples from a binomial distribution. Samples are drawn from a binomial distribution with specified parameters, n trials and prob probability of success where n an integer >= 0 and p is in the interval [0,1].
Arguments#
- shape :
Array(Int)
- Shape of outputTensor
- n :
Int
- Number of trials - prob :
Float
- Probability of success on a single trial
Examples#
Num::Rand.set_seed(0)
a = Tensor(Float32, CPU(Float32)).binomial([5], 50, 0.5)
puts a # => [23, 30, 22, 18, 28]
.chisq(shape : Array(Int), df : Float)
#
(shape : Array(Int), df : Float)
Generates a Tensor containing chi-square distributed values
Arguments#
- shape :
Array(Int)
- Shape of outputTensor
- df :
Float
- Degrees of freedom
Examples#
Num::Rand.set_seed(0)
a = Tensor(Float32, CPU(Float32)).chisq([5], 30.0)
puts a # => [32.2738, 27.2351, 26.0258, 22.136 , 31.9774]
.exp(shape : Array(Int), scale : Float = 1.0)
#
(shape : Array(Int), scale : Float = 1.0)
Generates a Tensor containing expontentially distributed values
Arguments#
- shape :
Array(Int)
- Shape of outputTensor
- scale :
Float
- Scale of the distribution
Examples#
Num::Rand.set_seed(0)
a = Tensor(Float32, CPU(Float32)).exp([5])
puts a # => [0.697832 , 0.710307 , 1.35733 , 0.0423776, 0.209743 ]
.eye(m : Int, n : Int? = nil, offset : Int = 0)
#
(m : Int, n : Int? = nil, offset : Int = 0)
Return a two-dimensional Tensor
with ones along the diagonal,
and zeros elsewhere
Arguments#
- m :
Int
- Number of rows in the returnedTensor
- n :
Int?
- Number of columns in theTensor
, defaults tom
if nil - offset :
Int
- Indicates which diagonal to fill with ones
Examples#
Tensor(Int8, CPU(Int8)).eye(3, offset: -1)
# [[0, 0, 0],
# [1, 0, 0],
# [0, 1, 0]]
Tensor(Int8, CPU(Int8)).eye(2)
# [[1, 0],
# [0, 1]]
.from_array(a : Array, device = CPU)
#
(a : Array, device = CPU)
Creates a Tensor from a standard library array onto a specified device. The type of Tensor is inferred from the innermost element type, and the Array's shape must be uniform along all subarrays.
Examples#
a = [[1, 2], [3, 4], [5, 6]]
Tensor.from_array(a, device: OCL) # => [3, 2] Tensor stored on a GPU
.from_npy(path : String) : Tensor(T, S)
#
(path : String) : Tensor(T, S)
Reads a .npy file and returns a Tensor
of the specified type.
If the ndarray is stored in a different type inside the file, it will be converted.
Arguments#
- path :
String
- Filename of npy file to load
Note
Only integer, unsigned integer and float ndarrays are supported at the moment. NOTE: Only little endian files can be read due to laziness by the num.cr developer
.from_slice(s : Slice, device = CPU, shape : Array(Int32)? = nil)
#
(s : Slice, device = CPU, shape : Array(Int32)? = nil)
Creates a Tensor from a standard library slice onto a specified device. The type of Tensor is inferred from the element type, and alternative shapes can be provided.
Examples#
s = Slice.new(200) { |i| (i + 10).to_u8 }
Tensor.from_array(s, device: OCL) # => [200] Tensor stored on a GPU
.fsned(shape : Array(Int), df1 : Float, df2 : Float) : Tensor(T, S)
#
(shape : Array(Int), df1 : Float, df2 : Float) : Tensor(T, S)
Generates a Tensor containing f-snedecor distributed values
Arguments#
- shape :
Array(Int)
- Shape of outputTensor
- df1 :
Float
- Degrees of freedom of the underlying chi-square distribution, numerator side; usually mentioned as m. - df2 :
Float
- Degrees of freedom of the underlying chi-square distribution, denominator side; usually mentioned as n.
Examples#
Num::Rand.set_seed(0)
a = Tensor(Float32, CPU(Float32)).fsned([5], 30.0, 50.0)
puts a # => [1.15436 , 1.08983 , 0.971573, 1.75811 , 2.06518 ]
.full(shape : Array(Int), value : Number) : Tensor(T, S)
#
(shape : Array(Int), value : Number) : Tensor(T, S)
Creates a Tensor
of a provided shape, filled with a value. The generic type
is inferred from the value
Arguments#
- shape :
Array(Int)
- shape of returnedTensor
Examples#
t = Tensor(Int8, CPU(Int8)).full([3], 1) # => [1, 1, 1]
.full_like(t : Tensor, value : Number) : Tensor(T, S)
#
(t : Tensor, value : Number) : Tensor(T, S)
Creates a Tensor
filled with a value, sharing the shape of another
provided Tensor
Arguments#
Examples#
t = Tensor(Int8, CPU(Int8)) &.to_f
u = Tensor(Int8, CPU(Int8)).full_like(t, 3) # => [3, 3, 3]
.gamma(t_shape : Array(Int), shape : Float, scale : Float = 1.0) : Tensor(T, S)
#
(t_shape : Array(Int), shape : Float, scale : Float = 1.0) : Tensor(T, S)
Generate a gamma-distributed, pseudo-random Tensor
Arguments#
- t_shape :
Array(Int)
- Shape of outputTensor
- shape :
Float
- shape parameter of the distribution; usually mentioned as k - scale : Float - scale parameter of the distribution; usually mentioned as θ
Examples#
Num::Rand.set_seed(0)
a = Tensor(Float32, CPU(Float32)).gamma([5], 0.5)
puts a # => [0.169394 , 0.0336937, 0.921517 , 0.0210972, 0.0487926]
.geometric_space(start : T, stop : T, num : Int = 50, endpoint : Bool = true, device = CPU)
#
(start : T, stop : T, num : Int = 50, endpoint : Bool = true, device = CPU)
Return numbers spaced evenly on a log scale (a geometric progression).
This is similar to logspace
, but with endpoints specified directly.
Each output sample is a constant multiple of the previous.
Arguments#
- start :
T
- Start of interval - stop :
T
- End of interval - num :
Int
- Number of samples - endpoint :
Bool
- Indicates if endpoint should be included in the results - device :
Num::Storage
Examples#
Tensor.geometric_space(1_f32, 1000_f32, 4) # => [1, 10, 100, 1000]
.identity(n : Int)
#
(n : Int)
Returns an identity Tensor
with ones along the diagonal,
and zeros elsewhere
Arguments#
- n :
Int
- Number of rows and columns in output
Examples#
Tensor(Int8, CPU(Int8)).identity(2)
# [[1, 0],
# [0, 1]]
.laplace(shape : Array(Int), loc : Float = 0.0, scale : Float = 1.0) : Tensor(T, S)
#
(shape : Array(Int), loc : Float = 0.0, scale : Float = 1.0) : Tensor(T, S)
Generate a laplace-distributed, pseudo-random Tensor
Arguments#
- shape :
Array(Int)
- Shape of outputTensor
- loc :
Float
- Centrality parameter, or mean of the distribution; usually mentioned as μ - scale : Float - scale parameter of the distribution; usually mentioned as b
Examples#
Num::Rand.set_seed(0)
a = Tensor(Float32, CPU(Float32)).laplace([5], 0.5)
puts a # => [0.305384 , 0.601509 , 0.247952 , -3.34791 , -0.502075]
.linear_space(start : T, stop : T, num : Int = 50, endpoint = true, device = CPU)
#
(start : T, stop : T, num : Int = 50, endpoint = true, device = CPU)
Return evenly spaced numbers over a specified interval.
Returns num
evenly spaced samples, calculated over the
interval [start
, stop
].
The endpoint of the interval can optionally be excluded.
Arguments#
- start :
T
- Start of interval - stop :
T
- End of interval - num :
Int
- Number of samples - endpoint :
Bool
- Indicates if endpoint of the interval should be included in the results - device :
Num::Storage
- Backend for theTensor
Examples#
Tensor.linspace(0_f32, 1_f32, 5) # => [0.0, 0.25, 0.5, 0.75, 1.0]
Tensor.linspace(0_f32, 1_f32, 5, endpoint: false) # => [0.0, 0.2, 0.4, 0.6, 0.8]
.logarithmic_space(start : T, stop : T, num = 50, endpoint = true, base : T = T.new(10.0), device = CPU)
#
(start : T, stop : T, num = 50, endpoint = true, base : T = T.new(10.0), device = CPU)
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at base ** start
(base
to the power of start
) and ends with base ** stop
(see endpoint
below).
Arguments#
- start :
T
- Start of interval - stop :
T
- End of interval - num :
Int
- Number of samples - endpoint :
Bool
- Indicates if endpoint should be included in the results - device :
Num::Storage
- Backend for theTensor
Examples#
Tensor.logarithmic_space(2.0, 3.0, num: 4)
# [100 , 215.443, 464.159, 1000 ]
.lognormal(shape : Array(Int), loc : Float = 0.0, sigma : Float = 1.0) : Tensor(T, S)
#
(shape : Array(Int), loc : Float = 0.0, sigma : Float = 1.0) : Tensor(T, S)
Generate a log-normal-distributed, pseudo-random Tensor
Arguments#
- shape :
Array(Int)
- Shape of outputTensor
- loc :
Float
- centrality parameter, or mean of the underlying normal distribution; usually mentioned as μ - sigma :
Float
- scale parameter, or standard deviation of the underlying normal distribution; usually mentioned as σ
Examples#
Num::Rand.set_seed(0)
a = Tensor(Float32, CPU(Float32)).lognormal([5], 0.5)
puts a # => [1.41285 , 5.00594 , 0.766401, 1.61069 , 2.29073 ]
.multinomial(input : Tensor(T, S), num_samples : Int32)
#
(input : Tensor(T, S), num_samples : Int32)
Draw samples from a multinomial distribution.
Returns a Tensor where each row contains num_samples
samples from the multinomial distribution
located in the corresponding row of Tensor input
.
The rows of input
do not need to be normalized, but must sum to a positive number.
If input
is a vector (1-D Tensor), returns a vector of length num_samples
If input
is a matrix (2-D Tensor), returns a matrix where each row contains num_samples
samples, with shape (m x num_samples
).
Arguments#
- input :
Tensor
- Tensor containing probabilities of different outcomes - num_samples :
Int
- Number of samples to draw from the multinomial distribution
Examples#
Num::Rand.set_seed(0)
input = [[0.5, 0.5], [0.5, 0.5]].to_tensor
a = Tensor.multinomial(input, 5)
puts a # => [[0, 1, 1, 0, 1], [1, 0, 1, 1, 0]]
input2 = [0.5, 0.5, 0.5, 0.5].to_tensor
b = Tensor.multinomial(input, 6)
puts b # => [3, 2, 1, 1, 0, 2]
.normal(shape : Array(Int), loc = 0.0, sigma = 1.0) : Tensor(T, S)
#
(shape : Array(Int), loc = 0.0, sigma = 1.0) : Tensor(T, S)
Generate a Tensor containing a normal-distribution collection of values
Arguments#
- shape :
Array(Int)
- Shape ofTensor
to create - loc :
Float
- Centrality parameter - sigma :
Float
- Standard deviation
Examples#
Num::Rand.set_seed(0)
a = Tensor(Float32, CPU(Float32)).normal([5], 0.5)
puts a # => 0.345609, 1.61063 , -0.26605, 0.476662, 0.828871]
.ones(shape : Array(Int)) : Tensor(T, S)
#
(shape : Array(Int)) : Tensor(T, S)
Creates a Tensor
of a provided shape, filled with 1. The generic type
must be specified.
Arguments#
- shape :
Array(Int)
- shape of returnedTensor
Examples#
t = Tensor(Int8, CPU(Int8)).ones([3]) # => [1, 1, 1]
.ones_like(t : Tensor) : Tensor(T, S)
#
(t : Tensor) : Tensor(T, S)
Creates a Tensor
filled with 1, sharing the shape of another
provided Tensor
Arguments#
Examples#
t = Tensor(Int8, CPU(Int8)) &.to_f
u = Tensor(Int8, CPU(Int8)).ones_like(t) # => [0, 0, 0]
.poisson(shape : Array(Int), lam : Float = 1.0) : Tensor(T, S)
#
(shape : Array(Int), lam : Float = 1.0) : Tensor(T, S)
Generate a poisson-distributed, pseudo-random Tensor(Int64)
Arguments#
- shape :
Array(Int)
- Shape of outputTensor
- lam :
Float
- Separation parameter of the distribution; usually mentioned as λ
Examples#
Num::Rand.set_seed(0)
a = Tensor(Int64, CPU(Int64)).poisson([5])
puts a # => [1, 0, 1, 0, 3]
.rand(shape : Array(Int))
#
(shape : Array(Int))
Generate random floating point values between 0 and 1
Arguments#
- shape :
Array(Int)
Shape ofTensor
to generate
Examples#
Num::Rand.set_seed(0)
a = Tensor(Float32, CPU(Float32)).rand([5])
puts a # => [0.411575 , 0.548264 , 0.388604 , 0.0106621, 0.183558 ]
.random(r : Range(U, U), shape : Array(Int), device = CPU) forall U
#
(r : Range(U, U), shape : Array(Int), device = CPU) forall U
Creates a Tensor
sampled from a provided range, with a given
shape.
The generic types of the Tensor
are inferred from the endpoints
of the range
Arguments#
- r :
Range(U, U)
- Range of values to sample between - shape :
Array(Int)
- Shape of returnedTensor
Examples#
Num::Rand.set_seed(0)
t = Tensor.random(0...10, [2, 2])
t
# [[8, 4],
# [7, 4]]
.range(start : T, stop : T, step : T, device = CPU)
#
(start : T, stop : T, step : T, device = CPU)
Creates a flat Tensor
containing a monotonically increasing
or decreasing range. The generic type is inferred from
the inputs to the method
Arguments#
- start :
T
- Beginning value for the range - stop :
T
- End value for the range - step :
T
- Offset between values of the range
Examples#
Tensor.range(0, 5, 2) # => [0, 2, 4]
Tensor.range(5, 0, -1) # => [5, 4, 3, 2, 1]
Tensor.range(0.0, 3.5, 0.7) # => [0 , 0.7, 1.4, 2.1, 2.8]
.range(start : T, stop : T, device = CPU)
#
(start : T, stop : T, device = CPU)
Creates a flat Tensor
containing a monotonically increasing
or decreasing range. The generic type is inferred from
the inputs to the method
Arguments#
- start :
T
- Beginning value for the range - stop :
T
- End value for the range - step :
T
- Offset between values of the range
Examples#
Tensor.range(0, 5, 2) # => [0, 2, 4]
Tensor.range(5, 0, -1) # => [5, 4, 3, 2, 1]
Tensor.range(0.0, 3.5, 0.7) # => [0 , 0.7, 1.4, 2.1, 2.8]
.range(stop : T, device = CPU)
#
(stop : T, device = CPU)
Creates a flat Tensor
containing a monotonically increasing
or decreasing range. The generic type is inferred from
the inputs to the method
Arguments#
- start :
T
- Beginning value for the range - stop :
T
- End value for the range - step :
T
- Offset between values of the range
Examples#
Tensor.range(0, 5, 2) # => [0, 2, 4]
Tensor.range(5, 0, -1) # => [5, 4, 3, 2, 1]
Tensor.range(0.0, 3.5, 0.7) # => [0 , 0.7, 1.4, 2.1, 2.8]
.t_student(shape : Array(Int), df : Float) : Tensor(T, S)
#
(shape : Array(Int), df : Float) : Tensor(T, S)
Generate a t-student-distributed, pseudo-random Tensor
Arguments#
- shape :
Array(Int)
- Shape of outputTensor
- df :
Float
- degrees of freedom of the distribution; usually mentioned as n
Examples#
Num::Rand.set_seed(0)
a = Tensor(Float32, CPU(Float32)).t_student([5], 30.0)
puts a # => [-0.148853, -0.803994, 0.353089 , -1.25613 , -0.141144]
.zeros(shape : Array(Int)) : Tensor(T, S)
#
(shape : Array(Int)) : Tensor(T, S)
Creates a Tensor
of a provided shape, filled with 0. The generic type
must be specified.
Arguments#
- shape :
Array(Int)
- shape of returnedTensor
Examples#
t = Tensor(Int8).zeros([3]) # => [0, 0, 0]
.zeros_like(t : Tensor) : Tensor(T, S)
#
(t : Tensor) : Tensor(T, S)
Creates a Tensor
filled with 0, sharing the shape of another
provided Tensor
Arguments#
Examples#
t = Tensor(Int8, CPU(Int8)).new([3]) &.to_f
u = Tensor(Int8, CPU(Int8)).zeros_like(t) # => [0, 0, 0]
Methods#
#!=(other)
#
(other)
Implements the != operator for two Tensor
s element-wise.
Arguments#
Examples#
a = Tensor.from_array [1, 2, 3]
a != a
#%(other)
#
(other)
Return element-wise remainder of division for two Tensor
s elementwise
Arguments#
Examples#
a = Tensor.from_array [1.5, 2.2, 3.2]
a % a
#&(other)
#
(other)
Compute the bit-wise AND of two Tensor
s element-wise.
Arguments#
Examples#
a = Tensor.from_array [1, 2, 3]
a & a
#*(other)
#
View source
(other)
#**(other)
#
(other)
Exponentiates two Tensor
s elementwise
Arguments#
Examples#
a = Tensor.from_array [1.5, 2.2, 3.2]
a ** a
#+(other)
#
View source
(other)
#-(other)
#
View source
(other)
#-
#
View source
#/(other)
#
View source
(other)
#//(other)
#
(other)
Floor divides two Tensor
s elementwise
Arguments#
Examples#
a = Tensor.from_array [1.5, 2.2, 3.2]
a // a
#<(other)
#
(other)
Implements the < operator for two Tensor
s element-wise.
Arguments#
Examples#
a = Tensor.from_array [1, 2, 3]
a < a
#<<(other)
#
(other)
Shift the bits of an integer to the left. Bits are shifted to the left by appending x2 0s at the right of x1. Since the internal representation of numbers is in binary format, this operation is equivalent to multiplying x1 by 2**x2.
Arguments#
Examples#
a = Tensor.from_array [1, 2, 3]
a << a
#<=(other)
#
(other)
Implements the <= operator for two Tensor
s element-wise.
Arguments#
Examples#
a = Tensor.from_array [1, 2, 3]
a <= a
#==(other)
#
(other)
Implements the == operator for two Tensor
s element-wise.
Arguments#
Examples#
a = Tensor.from_array [1, 2, 3]
a == a
#>(other)
#
(other)
Implements the > operator for two Tensor
s element-wise.
Arguments#
Examples#
a = Tensor.from_array [1, 2, 3]
a > a
#>=(other)
#
(other)
Implements the >= operator for two Tensor
s element-wise.
Arguments#
Examples#
a = Tensor.from_array [1, 2, 3]
a >= a
#>>(other)
#
(other)
Shift the bits of an integer to the right.
Bits are shifted to the right x2. Because the internal representation of numbers is in binary format, this operation is equivalent to dividing x1 by 2**x2.
Arguments#
Examples#
a = Tensor.from_array [1, 2, 3]
a >> a
#[](args : Array) : Tensor(T, S)
#
(args : Array) : Tensor(T, S)
Returns a view of a Tensor
from any valid indexers. This view
must be able to be represented as valid strided/shaped view, slicing
as a copy is not supported.
When an Integer argument is passed, an axis will be removed from
the Tensor
, and a view at that index will be returned.
a = Tensor.new([2, 2]) { |i| i }
a[0] # => [0, 1]
When a Range argument is passed, an axis will be sliced based on the endpoints of the range.
a = Tensor.new([2, 2, 2]) { |i| i }
a[1...]
# [[[4, 5],
# [6, 7]]]
When a Tuple containing a Range and an Integer step is passed, an axis is sliced based on the endpoints of the range, and the strides of the axis are updated to reflect the step. Negative steps will reflect the array along an axis.
a = Tensor.new([2, 2]) { |i| i }
a[{..., -1}]
# [[2, 3],
# [0, 1]]
#[](indices : Tensor) : Tensor(T, S)
#
(indices : Tensor) : Tensor(T, S)
An indexing operation that accepts another Tensor for advanced indexing capabilities.
This method allows you to index a tensor by using another tensor as indices. It's particularly useful when you need to select or change complex patterns that can't be achieved with simple slicing.
The shape of the returned tensor is determined by the shape of the indices
tensor concatenated with the remaining dimensions of the original tensor.
Each value in indices
selects a value from the original tensor at the corresponding index.
If the corresponding index is a vector, than the returned tensor will include the vector as an additional dimension.
Parameters:#
indices
- A Tensor that contains the indices to index the original tensor.
Returns:#
A new tensor containing the values of the original tensor indexed by indices
.
Examples:#
t = Tensor.new([[1, 2], [3, 4], [5, 6]])
indices = Tensor.new([[0, 2]])
result = t[indices] # Returns: Tensor([[1, 2], [5, 6]])
#[](*args) : Tensor(T, S)
#
(*args) : Tensor(T, S)
Returns a view of a Tensor
from any valid indexers. This view
must be able to be represented as valid strided/shaped view, slicing
as a copy is not supported.
When an Integer argument is passed, an axis will be removed from
the Tensor
, and a view at that index will be returned.
a = Tensor.new([2, 2]) { |i| i }
a[0] # => [0, 1]
When a Range argument is passed, an axis will be sliced based on the endpoints of the range.
a = Tensor.new([2, 2, 2]) { |i| i }
a[1...]
# [[[4, 5],
# [6, 7]]]
When a Tuple containing a Range and an Integer step is passed, an axis is sliced based on the endpoints of the range, and the strides of the axis are updated to reflect the step. Negative steps will reflect the array along an axis.
a = Tensor.new([2, 2]) { |i| i }
a[{..., -1}]
# [[2, 3],
# [0, 1]]
#[]=(args : Array, value)
#
(args : Array, value)
The primary method of setting Tensor values. The slicing behavior
for this method is identical to the []
method.
If a Tensor
is passed as the value to set, it will be broadcast
to the shape of the slice if possible. If a scalar is passed, it will
be tiled across the slice.
Arguments#
- args :
Array
- Array of arguments. All but the last argument must be valid indexer, so aRange
,Int
, orTuple(Range, Int)
. - value :
Tensor
|Number
- Value to assign to the slice
Examples#
a = Tensor.new([2, 2]) { |i| i }
a[1.., 1..] = 99
a
# [[ 0, 1],
# [ 2, 99]]
#[]=(*args : *U) forall U
#
(*args : *U) forall U
The primary method of setting Tensor values. The slicing behavior
for this method is identical to the []
method.
If a Tensor
is passed as the value to set, it will be broadcast
to the shape of the slice if possible. If a scalar is passed, it will
be tiled across the slice.
Arguments#
- args :
*U
- Tuple of arguments. All but the last argument must be valid indexer, so aRange
,Int
, orTuple(Range, Int)
. The final argument passed is used to set the values of theTensor
. It can be either aTensor
, or a scalar value.
Examples#
a = Tensor.new([2, 2]) { |i| i }
a[1.., 1..] = 99
a
# [[ 0, 1],
# [ 2, 99]]
#^(other)
#
(other)
Compute the bit-wise XOR of two Tensor
s element-wise.
Arguments#
Examples#
a = Tensor.from_array [1, 2, 3]
a ^ a
#accumulate(&block : T, T -> T) : Tensor(T, S)
#
(&block : T, T -> T) : Tensor(T, S)
Returns a Tensor containing the successive values of applying a binary operation, specified by the given block, to this Tensor's elements.
For each element in the Tensor the block is passed an accumulator value and the element. The result becomes the new value for the accumulator and is also appended to the returned Tensor. The initial value for the accumulator is the first element in the Tensor.
Examples#
[2, 3, 4, 5]..to_tensor.accumulate { |x, y| x * y } # => [2, 6, 24, 120]
#acos
#
View source
#acosh
#
View source
#add(other)
#
View source
(other)
#all(axis : Int, dims : Bool = false)
#
(axis : Int, dims : Bool = false)
Reduces a Tensor
along an axis, asserting the truthiness of all values
in each view into the Tensor
Arguments#
- axis :
Int
- Axis of reduction - dims :
Bool
- Indicate if the axis of reduction should remain in the result
Examples#
a = Tensor.new([2, 2]) { |i| i }
a.all(0) # => [false, true]
a.all(1, dims: true)
# [[false],
# [ true]]
#all : Bool
#
: Bool
Reduces a Tensor
to a boolean by asserting the truthiness of
all elements
Examples#
a = [0, 2, 3].to_tensor
a.all # => false
#all_close(other : Tensor, epsilon : Float = 1e-6) : Bool
#
(other : Tensor, epsilon : Float = 1e-6) : Bool
Asserts that two Tensor
s are equal, allowing for small
margins of errors with floating point values using
an EPSILON value.
Arguments#
- other :
Tensor
-Tensor
to compare toself
- epsilon :
Float
- Margin of error to accept between elements
Examples#
a = [0.0, 0.0, 0.0000000001].to_tensor
b = [0.0, 0.0, 0.0].to_tensor
a.all_close(b) # => true
a.all_close(b, 1e-12) # => false
#any(axis : Int, dims : Bool = false)
#
(axis : Int, dims : Bool = false)
Reduces a Tensor
along an axis, asserting the truthiness of any values
in each view into the Tensor
Arguments#
- axis :
Int
- Axis of reduction - dims :
Bool
- Indicate if the axis of reduction should remain in the result
Examples#
a = Tensor.new([2, 2]) { |i| i }
a.any(0) # => [true, true]
a.any(1, dims: true)
# [[true],
# [ true]]
#any : Bool
#
: Bool
Reduces a Tensor
to a boolean by asserting the truthiness of
any element
Examples#
a = [0, 2, 3].to_tensor
a.any # => true
#argmax(axis : Int, dims : Bool = false)
#
(axis : Int, dims : Bool = false)
Find the maximum index value of a Tensor along an axis
Arguments#
- axis :
Int
- Axis of reduction - dims :
Bool
- Indicate if the axis of reduction should remain in the result
Examples#
a = [[4, 2], [0, 1]].to_tensor
a.argmax(1) # => [0, 1]
#argmax : Int32
#
View source
: Int32
#argmin(axis : Int, dims : Bool = false)
#
(axis : Int, dims : Bool = false)
Find the minimum index value of a Tensor along an axis
Arguments#
- axis :
Int
- Axis of reduction - dims :
Bool
- Indicate if the axis of reduction should remain in the result
Examples#
a = [[4, 2], [0, 1]].to_tensor
a.argmin(1) # => [1, 0]
#argmin : Int32
#
View source
: Int32
#as_strided(shape : Array(Int), strides : Array(Int)) : Tensor(T, S)
#
(shape : Array(Int), strides : Array(Int)) : Tensor(T, S)
as_strided
creates a view into the Tensor
given the exact strides
and shape. This means it manipulates the internal data structure
of a Tensor
and, if done incorrectly, the array elements can point
to invalid memory and can corrupt results or crash your program.
It is advisable to always use the original strides
when
calculating new strides to avoid reliance on a contiguous
memory layout.
Furthermore, Tensor
s created with this function often contain
self overlapping memory, so that two elements are identical.
Vectorized write operations on such Tensor
s will typically be
unpredictable. They may even give different results for small,
large, or transposed Tensor
s.
Arguments#
- shape :
Array(Int)
- Shape of the newTensor
- strides :
Array(Int)
- Strides of the newTensor
Examples#
a = Tensor.from_array [1, 2, 3]
a.as_strided([3, 3], [0, 1])
# [[1, 2, 3],
# [1, 2, 3],
# [1, 2, 3]]
#as_type(dtype : U.class) forall U
#
(dtype : U.class) forall U
Converts a Tensor to a given dtype. No rounding is done on floating point values.
Arguments#
- dtype :
U.class
- desired data type of the returnedTensor
Examples#
a = Tensor.from_array [1.5, 2.2, 3.2]
a.as_type(Int32) # => [1, 2, 3]
#asinh
#
View source
#atanh
#
View source
#besselj0
#
View source
#besselj1
#
View source
#bessely0
#
View source
#bessely1
#
View source
#bitwise_and(other)
#
(other)
Compute the bit-wise AND of two Tensor
s element-wise.
Arguments#
Examples#
a = Tensor.from_array [1, 2, 3]
a & a
#bitwise_or(other)
#
(other)
Compute the bit-wise OR of two Tensor
s element-wise.
Arguments#
Examples#
a = Tensor.from_array [1, 2, 3]
a | a
#bitwise_xor(other)
#
(other)
Compute the bit-wise XOR of two Tensor
s element-wise.
Arguments#
Examples#
a = Tensor.from_array [1, 2, 3]
a ^ a
#broadcast(other1 : Tensor(U, V), other2 : Tensor(W, X)) forall U, V, W, X
#
(other1 : Tensor(U, V), other2 : Tensor(W, X)) forall U, V, W, X
Broadcasts three Tensor
's' to a new shape. This allows
for elementwise operations between the three Tensors with the
new shape.
Broadcasting rules apply, and imcompatible shapes will raise an error.
Arguments#
Examples#
a = Tensor.from_array [1, 2, 3]
b = Tensor.new([3, 3]) { |i| i }
c = Tensor.new([3, 3, 3]) { |i| i }
x, y, z = a.broadcast(b, c)
x.shape # => [3, 3, 3]
#broadcast(other : Tensor(U, V)) forall U, V
#
(other : Tensor(U, V)) forall U, V
Broadcasts two Tensor
's' to a new shape. This allows
for elementwise operations between the two Tensors with the
new shape.
Broadcasting rules apply, and imcompatible shapes will raise an error.
Arguments#
- other :
Tensor
- RHS of the broadcast
Examples#
a = Tensor.from_array [1, 2, 3]
b = Tensor.new([3, 3]) { |i| i }
x, y = a.broadcast(b)
x.shape # => [3, 3]
#broadcast_to(shape : Array(Int)) : Tensor(T, S)
#
(shape : Array(Int)) : Tensor(T, S)
Broadcasts a Tensor
to a new shape. Returns a read-only
view of the original Tensor
. Many elements in the Tensor
will refer to the same memory location, and the result is
rarely contiguous.
Shapes must be broadcastable, and an error will be raised if broadcasting fails.
Arguments#
- shape :
Array(Int)
- The shape of the desired outputTensor
Examples#
a = Tensor.from_array [1, 2, 3]
a.broadcast_to([3, 3])
# [[1, 2, 3],
# [1, 2, 3],
# [1, 2, 3]]
#cbrt
#
View source
#cholesky!(*, lower = true)
#
(*, lower = true)
Cholesky decomposition.
Return the Cholesky decomposition, L * L.H, of the square matrix a, where L is lower-triangular and .H is the conjugate transpose operator (which is the ordinary transpose if a is real-valued). a must be Hermitian (symmetric if real-valued) and positive-definite. Only L is actually returned.
Arguments#
- lower :
Bool
- Which triangular of decomposition to return
Examples#
t = [[2, -1, 0], [-1, 2, -1], [0, -1, 2]].to_tensor.astype(Float32)
t.cholesky
# [[ 1.414, 0.0, 0.0],
# [-0.707, 1.225, 0.0],
# [ 0.0, -0.816, 1.155]]
#cosh
#
View source
#cpu : Tensor(T, CPU(T))
#
: Tensor(T, CPU(T))
Places a Tensor onto a CPU backend. No copy is done if the Tensor is already on a CPU
Arguments#
Examples#
a = Tensor(Float32, OCL(Float32)).ones([3])
a.cpu # => [1, 1, 1]
#det
#
Compute the determinant of an array.
Examples#
t = [[1, 2], [3, 4]].to_tensor.as_type(Float32)
puts t.det # => -2.0
#diagonal : Tensor(T, S)
#
: Tensor(T, S)
Returns a view of the diagonal of a Tensor
. This method only works
for two-dimensional arrays.
Todo
Implement views for offset diagonals
Examples#
a = Tensor.new(3, 3) { |i, _| i }
a.diagonal # => [0, 1, 2]
#divide(other)
#
View source
(other)
#dot(u : Tensor(T, S))
#
(u : Tensor(T, S))
DOT forms the dot product of two vectors. Uses unrolled loops for increments equal to one.
Arguments#
- u :
Tensor
- Right hand side of the dot product
Examples#
a = [1, 2, 3, 4, 5].to_tensor
a.dot(a) # => 55.0
#dup(order : Num::OrderType = Num::RowMajor)
#
(order : Num::OrderType = Num::RowMajor)
Deep-copies a Tensor
. If an order is provided, the returned
Tensor
's memory layout will respect that order.
Arguments#
- order :
Num::OrderType
- Memory layout to use for the returnedTensor
Examples#
a = Tensor.from_array [1, 2, 3]
a.dup # => [1, 2, 3]
#each
#
Yields the elements of a Tensor
, always in RowMajor order,
as if the Tensor
was flat.
Examples#
a = Tensor.new(2, 2) { |i| i }
a.each do |el|
puts el
end
# 0
# 1
# 2
# 3
#each
#
Yields the elements of a Tensor
lazily, always in RowMajor order,
as if the Tensor
was flat.
Examples#
a = Tensor.new(2, 2) { |i| i }
iter = a.each
a.size.times do
puts iter.next.value
end
# 0
# 1
# 2
# 3
#each_axis(axis : Int, dims : Bool = false
#
(axis : Int, dims : Bool = false
Yields a view of each lane of an axis
. Changes made in
the passed block will be reflected in the original Tensor
Arguments#
- axis :
Int
- Axis of reduction - dims :
Bool
- Indicates if the axis of reduction should be removed from the result
Examples#
a = Tensor.new([3, 3]) { |i| i }
a.each_axis(1) do |ax|
puts ax
end
# [0, 3, 6]
# [1, 4, 7]
# [2, 5, 8]
#each_axis(axis : Int, dims : Bool = false)
#
(axis : Int, dims : Bool = false)
Returns an iterator along each element of an axis
.
Each element returned by the iterator is a view, not a copy
Arguments#
- axis :
Int
- Axis of reduction - dims :
Bool
- Indicates if the axis of reduction should be removed from the result
Examples#
a = Tensor.new([3, 3]) { |i| i }
a.each_axis(1).next # => [0, 3, 6]
#each_pointer
#
Yields the memory locations of each element of a Tensor
, always in
RowMajor oder, as if the Tensor
was flat.
This should primarily be used by internal methods. Methods such
as map!
provided more convenient access to editing the values
of a Tensor
Examples#
a = Tensor.new(2, 2) { |i| i }
a.each_pointer do |el|
puts el.value
end
# 0
# 1
# 2
# 3
#each_pointer_with_index
#
Yields the memory locations of each element of a Tensor
, always in
RowMajor oder, as if the Tensor
was flat. Also yields the flat
index of a Tensor
This should primarily be used by internal methods. Methods such
as map!
provided more convenient access to editing the values
of a Tensor
Examples#
a = Tensor.new(2, 2) { |i| i }
a.each_pointer_with_index do |el, i|
puts "#{el.value}_#{i}"
end
# 0_0
# 1_1
# 2_2
# 3_3
#each_with_index
#
Yields the elements of a Tensor
, always in RowMajor order,
as if the Tensor
was flat. Also yields the flat index of each
element.
Examples#
a = Tensor.new(2, 2) { |i| i }
a.each_with_index do |el, i|
puts "#{el}_#{i}"
end
# 0_0
# 1_1
# 2_2
# 3_3
#eig
#
Compute the eigenvalues and right eigenvectors of a square array.
Examples#
t = [[0, 1], [1, 1]].to_tensor.as_type(Float32)
w, v = t.eig
puts w
puts v
# [-0.618034, 1.61803 ]
# [[-0.850651, 0.525731 ],
# [-0.525731, -0.850651]]
#eigh
#
Compute the eigenvalues and right eigenvectors of a square Tensor
.
Examples#
t = [[0, 1], [1, 1]].to_tensor.as_type(Float32)
w, v = t.eigh
puts w
puts v
# [-0.618034, 1.61803 ]
# [[-0.850651, 0.525731 ],
# [0.525731 , 0.850651 ]]
#eigvals
#
Compute the eigenvalues of a general matrix.
Main difference between eigvals and eig: the eigenvectors aren’t returned.
Examples#
t = [[0, 1], [1, 1]].to_tensor.as_type(Float32)
puts t.eigvals
# [-0.618034, 1.61803 ]
#eigvalsh
#
Compute the eigenvalues of a symmetric matrix.
Main difference between eigvals and eig: the eigenvectors aren’t returned.
Examples#
t = [[0, 1], [1, 1]].to_tensor.as_type(Float32)
puts t.eigvalsh
# [-0.618034, 1.61803 ]
#equal(other)
#
(other)
Implements the == operator for two Tensor
s element-wise.
Arguments#
Examples#
a = Tensor.from_array [1, 2, 3]
a == a
#expand_dims(axis : Int) : Tensor(T, S)
#
(axis : Int) : Tensor(T, S)
Expands the dimensions of a Tensor
, along a single axis
Arguments#
- axis :
Int
- Axis on which to expand dimensions
Examples#
a = Tensor.new([2, 2]) { |i| i }
a.expand_dims(1)
# [[[0, 1]],
#
# [[2, 3]]]
#expm1
#
View source
#flags : Num::ArrayFlags
#
: Num::ArrayFlags
Returns the flags of a Tensor, describing its memory and read status
Examples#
a = Tensor(Float32, CPU(Float32)).new([2, 3, 4])
b = a[..., 1]
a.flags # => CONTIGUOUS | OWNDATA | WRITE
b.flags # => WRITE
#flat : Tensor(T, S)
#
: Tensor(T, S)
Flattens a Tensor
to a single dimension. If a view can be created,
the reshape operation will not copy data.
Examples#
a = Tensor.new([2, 2]) { |i| i }
a.flat # => [0, 1, 2, 3]
#flip(axis : Int) : Tensor(T, S)
#
(axis : Int) : Tensor(T, S)
Flips a Tensor
along an axis, returning a view
Arguments#
- axis :
Int
- Axis to flip
Examples#
a = [[1, 2, 3], [4, 5, 6]]
a.flip(1)
# [[3, 2, 1],
# [6, 5, 4]]
#flip : Tensor(T, S)
#
: Tensor(T, S)
Flips a Tensor
along all axes, returning a view
Examples#
a = [[1, 2, 3], [4, 5, 6]]
a.flip
# [[6, 5, 4],
# [3, 2, 1]]
#floordiv(other)
#
(other)
Floor divides two Tensor
s elementwise
Arguments#
Examples#
a = Tensor.from_array [1.5, 2.2, 3.2]
a // a
#gamma
#
View source
#greater(other)
#
(other)
Implements the > operator for two Tensor
s element-wise.
Arguments#
Examples#
a = Tensor.from_array [1, 2, 3]
a > a
#greater_equal(other)
#
(other)
Implements the >= operator for two Tensor
s element-wise.
Arguments#
Examples#
a = Tensor.from_array [1, 2, 3]
a >= a
#hessenberg
#
Compute Hessenberg form of a matrix.
The Hessenberg decomposition is:
A = Q H Q^H
where Q is unitary/orthogonal and H has only zero elements below the first sub-diagonal.
Examples#
a = [[2, 5, 8, 7],
[5, 2, 2, 8],
[7, 5, 6, 6],
[5, 4, 4, 8]].to_tensor.as_type(Float64)
puts a.hessenberg
# [[2 , -11.6584, 1.42005 , 0.253491],
# [-9.94987, 14.5354 , -5.31022, 2.43082 ],
# [0 , -1.83299, 0.3897 , -0.51527],
# [0 , 0 , -3.8319 , 1.07495 ]]
#ilogb
#
View source
#inv
#
Compute the (multiplicative) inverse of a matrix.
Given a square matrix a, return the matrix ainv satisfying dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])
Examples#
t = [[1, 2], [3, 4]].to_tensor.as_type(Float32)
puts t.inv
# [[-2 , 1 ],
# [1.5 , -0.5]]
#left_shift(other)
#
(other)
Shift the bits of an integer to the left. Bits are shifted to the left by appending x2 0s at the right of x1. Since the internal representation of numbers is in binary format, this operation is equivalent to multiplying x1 by 2**x2.
Arguments#
Examples#
a = Tensor.from_array [1, 2, 3]
a << a
#less(other)
#
(other)
Implements the < operator for two Tensor
s element-wise.
Arguments#
Examples#
a = Tensor.from_array [1, 2, 3]
a < a
#less_equal(other)
#
(other)
Implements the <= operator for two Tensor
s element-wise.
Arguments#
Examples#
a = Tensor.from_array [1, 2, 3]
a <= a
#lgamma
#
View source
#log10
#
View source
#log1p
#
View source
#map(d1 : Tensor, d2 : Tensor
#
(d1 : Tensor, d2 : Tensor
Maps a block across three Tensors
. This is more efficient than
zipping iterators since it iterates all Tensor
's in a single
call, avoiding overhead from tracking multiple iterators.
The generic type of the returned Tensor
is inferred from a block
Arguments#
- t :
Tensor
- The secondTensor
for iteration. Must be broadcastable against theshape
ofself
andv
- v :
Tensor
- The thirdTensor
for iteration. Must be broadcastable against theshape
ofself
andt
- block :
Proc(T, U, V, W)
- TheProc
to map across allTensor
s
Examples#
a = Tensor.new([3]) { |i| i }
b = Tensor.new([3]) { |i| i }
c = Tensor.new([3]) { |i| i }
a.map(b, c) { |i, j, k| i + j + k } # => [0, 3, 6]
#map(other : Tensor
#
(other : Tensor
Maps a block across two Tensors
. This is more efficient than
zipping iterators since it iterates both Tensor
's in a single
call, avoiding overhead from tracking multiple iterators.
The generic type of the returned Tensor
is inferred from a block
Arguments#
- t :
Tensor
- The secondTensor
for iteration. Must be broadcastable against theshape
ofself
- block :
Proc(T, U, V)
-Proc
to map across theTensor
Examples#
a = Tensor.new([3]) { |i| i }
b = Tensor.new([3]) { |i| i }
a.map(b) { |i, j| i + j } # => [0, 2, 4]
#map(&block : T -> U) forall U
#
(&block : T -> U) forall U
Maps a block across a Tensor
. The Tensor
is treated
as flat during iteration, and iteration is always done
in RowMajor order
The generic type of the returned Tensor
is inferred from
the block
Arguments#
- block :
Proc(T, U)
-Proc
to map across theTensor
Examples#
a = Tensor.new([3]) { |i| i }
a.map { |e| e + 5 } # => [5, 6, 7]
#map!(d1 : Tensor, d2 : Tensor
#
(d1 : Tensor, d2 : Tensor
Maps a block across three Tensors
. This is more efficient than
zipping iterators since it iterates all Tensor
's in a single
call, avoiding overhead from tracking multiple iterators.
The result of the block is stored in self
.
Broadcasting rules still apply, but since this is an in place
operation, the other Tensor
's must broadcast to the shape of self
Arguments#
- t :
Tensor
- The secondTensor
for iteration. Must be broadcastable against theshape
ofself
andv
- v :
Tensor
- The thirdTensor
for iteration. Must be broadcastable against theshape
ofself
andt
- block :
Proc(T, U, V, T)
- TheProc
to map across allTensor
s
Examples#
a = Tensor.new([3]) { |i| i }
b = Tensor.new([3]) { |i| i }
c = Tensor.new([3]) { |i| i }
a.map!(b, c) { |i, j, k| i + j + k }
a # => [0, 3, 6]
#map!(other : Tensor
#
(other : Tensor
Maps a block across two Tensors
. This is more efficient than
zipping iterators since it iterates both Tensor
's in a single
call, avoiding overhead from tracking multiple iterators.
The result of the block is stored in self
.
Broadcasting rules still apply, but since this is an in place
operation, the other Tensor
must broadcast to the shape of self
Arguments#
- t :
Tensor
- The secondTensor
for iteration. Must be broadcastable against theshape
ofself
- block :
Proc(T, U, T)
-Proc
to map across theTensor
Examples#
a = Tensor.new([3]) { |i| i }
b = Tensor.new([3]) { |i| i }
a.map!(b) { |i, j| i + j }
a # => [0, 2, 4]
#map!
#
Maps a block across a Tensor
in place. The Tensor
is treated
as flat during iteration, and iteration is always done
in RowMajor order
Arguments#
- block :
Proc(T, U)
-Proc
to map across theTensor
Examples#
a = Tensor.new([3]) { |i| i }
a.map! { |e| e + 5 }
a # => [5, 6, 7]
#matmul(other : Tensor(T, S), output : Tensor(T, S)? = nil)
#
(other : Tensor(T, S), output : Tensor(T, S)? = nil)
Computes a matrix multiplication between two Tensors
. The Tensor
s
must be two dimensional with compatible shapes. Currently
only Float and Complex Tensor
s are supported, as BLAS is used
for this operation
Arguments#
- other :
Tensor
- The right hand side of the operation
Examples#
Num::Rand.set_seed(0)
a = Tensor.random(0.0...10.0, [3, 3])
a.matmul(a)
# [[28.2001, 87.4285, 30.5423],
# [12.4381, 30.9552, 26.2495],
# [34.0873, 73.5366, 40.5504]]
#max(axis : Int, dims : Bool = false)
#
(axis : Int, dims : Bool = false)
Reduces a Tensor
along an axis, finding the max of each
view into the Tensor
Arguments#
- axis :
Int
- Axis of reduction - dims :
Bool
- Indicate if the axis of reduction should remain in the result
Examples#
a = Tensor.new([2, 2]) { |i| i }
a.max(0) # => [2, 3]
a.max(1, dims: true)
# [[1],
# [3]]
#max : T
#
View source
: T
#mean(axis : Int, dims : Bool = false)
#
(axis : Int, dims : Bool = false)
Reduces a Tensor
along an axis, finding the average of each
view into the Tensor
Arguments#
- axis :
Int
- Axis of reduction - dims :
Bool
- Indicate if the axis of reduction should remain in the result
Examples#
a = Tensor.new([2, 2]) { |i| i }
a.mean(0) # => [1, 2]
a.mean(1, dims: true)
# [[0],
# [2]]
#mean
#
View source
#min(axis : Int, dims : Bool = false)
#
(axis : Int, dims : Bool = false)
Reduces a Tensor
along an axis, finding the min of each
view into the Tensor
Arguments#
- axis :
Int
- Axis of reduction - dims :
Bool
- Indicate if the axis of reduction should remain in the result
Examples#
a = Tensor.new([2, 2]) { |i| i }
a.min(0) # => [0, 1]
a.min(1, dims: true)
# [[0],
# [2]]
#min : T
#
View source
: T
#modulo(other)
#
(other)
Return element-wise remainder of division for two Tensor
s elementwise
Arguments#
Examples#
a = Tensor.from_array [1.5, 2.2, 3.2]
a % a
#move_axis(source : Array(Int), destination : Array(Int)) : Tensor(T, S)
#
(source : Array(Int), destination : Array(Int)) : Tensor(T, S)
Move axes of a Tensor to new positions, other axes remain in their original order
Arguments#
- source :
Array(Int)
- Original positions of axes to move - destination :
Array(Int)
- Destination positions to permute axes
Examples#
a = Tensor(Int8, CPU(Int8)).new([3, 4, 5])
a.moveaxis([0], [-1]).shape # => 4, 5, 3
#move_axis(source : Int, destination : Int) : Tensor(T, S)
#
(source : Int, destination : Int) : Tensor(T, S)
Move axes of a Tensor to new positions, other axes remain in their original order
Arguments#
- source :
Int
- Original position of axis to move - destination :
Int
- Destination position of axis
Examples#
a = Tensor(Int8, CPU(Int8)).new([3, 4, 5])
a.moveaxis(0, -1).shape # => 4, 5, 3
#multiply(other)
#
View source
(other)
#negate
#
View source
#norm(order = 'F')
#
(order = 'F')
Matrix norm
This function is able to return one of eight different matrix norms
Arguments#
- order :
String
- Type of norm
Examples#
t = [[0, 1], [1, 1], [1, 1], [2, 1]].to_tensor.as_type(Float32)
t.norm # => 3.6055512
#not_equal(other)
#
(other)
Implements the != operator for two Tensor
s element-wise.
Arguments#
Examples#
a = Tensor.from_array [1, 2, 3]
a != a
#offset : Int32
#
: Int32
Returns the offset of a Tensor's data
Examples#
a = Tensor(Int8, CPU(Int8)).new([2, 3, 4])
a.offset # => 0
#opencl : Tensor(T, OCL(T))
#
: Tensor(T, OCL(T))
Places a Tensor onto an OpenCL backend. No copy is done if the Tensor is already on a CPU
Examples#
a = Tensor(Float32, CPU(Float32)).ones([3])
a.opencl # => <[3] on OpenCL Backend>
#power(other)
#
(other)
Exponentiates two Tensor
s elementwise
Arguments#
Examples#
a = Tensor.from_array [1.5, 2.2, 3.2]
a ** a
#prod(axis : Int, dims : Bool = false)
#
(axis : Int, dims : Bool = false)
Reduces a Tensor
along an axis, multiplying each view into
the Tensor
Arguments#
- axis :
Int
- Axis of reduction - dims :
Bool
- Indicate if the axis of reduction should remain in the result
Examples#
a = Tensor.new([2, 2]) { |i| i }
a.prod(0) # => [0, 3]
a.prod(1, dims: true)
# [[0],
# [6]]
#prod : T
#
: T
Reduces a Tensor
to a scalar by multiplying all of its
elements
Examples#
a = [1, 2, 3]
a.prod # => 6
#ptp(axis : Int, dims : Bool = false)
#
(axis : Int, dims : Bool = false)
Finds the difference between the maximum and minimum
elements of a Tensor
along an axis
Arguments#
- axis :
Int
- Axis of reduction - dims :
Bool
- Indicate if the axis of reduction should remain in the result
Examples#
a = [[3, 4], [1, 2], [6, 2]].to_tensor
a.ptp(1) # [1, 1, 4]
#ptp : T
#
: T
Finds the difference between the maximum and minimum
elements of a Tensor
Examples#
a = [1, 2, 3].to_tensor
a.ptp # => 2
#qr
#
Compute the qr factorization of a matrix.
Factor the matrix a as qr, where q is orthonormal and r is upper-triangular
Examples#
t = [[0, 1], [1, 1], [1, 1], [2, 1]].to_tensor.as_type(Float32)
q, r = t.qr
puts q
puts r
# [[ 0.0, 0.866],
# [-0.408, 0.289],
# [-0.408, 0.289],
# [-0.816, -0.289]]
# [[-2.449, -1.633],
# [ 0.0, 1.155],
# [ 0.0, 0.0],
# [ 0.0, 0.0]]
#rank : Int32
#
: Int32
Returns the number of dimensions in a Tensor
Examples#
a = Tensor(Int8, CPU(Int8)).new([3, 3, 3, 3])
a.rank # => 4
#reduce
#
Just like the other variant, but you can set the initial value of the accumulator.
Arguments#
- memo - Value to start off the accumulator
Examples#
[1, 2, 3, 4, 5].to_tensor.reduce(10) { |acc, i| acc + i } # => 25
#reduce
#
Combines all elements in the Tensor by applying a binary operation, specified by a block, so as to reduce them to a single value.
For each element in the Tensor the block is passed an accumulator value (memo) and the element. The result becomes the new value for memo. At the end of the iteration, the final value of memo is the return value for the method. The initial value for the accumulator is the first element in the Tensor.
Raises Enumerable::EmptyError
if the Tensor is empty.
Examples#
[1, 2, 3, 4, 5].to_tensor.reduce { |acc, i| acc + i } # => 15
#reduce_axis(axis : Int, dims : Bool = false
#
(axis : Int, dims : Bool = false
Equivalent of calling reduce
on each slice into an axis
.
Used primarily for reductions like Num.sum
, Num.prod
, in their
axis versions.
Arguments#
- axis :
Int
- Axis of reduction - dims :
Bool
- Indicates if the axis of reduction should be removed from the result
Examples#
a = Tensor.new([3, 3]) { |i| i }
a.reduce_axis(0) { |i, j| i + j } # => [ 9, 12, 15]
#repeat(n : Int, axis : Int) : Tensor(T, S)
#
(n : Int, axis : Int) : Tensor(T, S)
Repeat elements of a Tensor
along an axis
Arguments#
- n : Int - Number of times to repeat
- axis :
Int
- Axis along which to repeat
Examples#
a = [[1, 2, 3], [4, 5, 6]]
a.repeat(2, 1)
# [[1, 1, 2, 2, 3, 3],
# [4, 4, 5, 5, 6, 6]]
#repeat(n : Int) : Tensor(T, S)
#
(n : Int) : Tensor(T, S)
Repeat elements of a Tensor
, treating the Tensor
as flat
Arguments#
- n :
Int
- Number of times to repeat
Examples#
a = [1, 2, 3]
a.repeat(2) # => [1, 1, 2, 2, 3, 3]
#reshape(shape : Array(Int)) : Tensor(T, S)
#
(shape : Array(Int)) : Tensor(T, S)
Transform's a Tensor
's shape. If a view can be created,
the reshape will not copy data. The number of elements
in the Tensor
must remain the same.
Arguments#
- result_shape :
Array(Int)
- Result shape for theTensor
Examples#
a = Tensor.from_array [1, 2, 3, 4]
a.reshape([2, 2])
# [[1, 2],
# [3, 4]]
#reshape(*shape : Int) : Tensor(T, S)
#
(*shape : Int) : Tensor(T, S)
Transform's a Tensor
's shape. If a view can be created,
the reshape will not copy data. The number of elements
in the Tensor
must remain the same.
Arguments#
- result_shape :
Array(Int)
- Result shape for theTensor
Examples#
a = Tensor.from_array [1, 2, 3, 4]
a.reshape([2, 2])
# [[1, 2],
# [3, 4]]
#right_shift(other)
#
(other)
Shift the bits of an integer to the right.
Bits are shifted to the right x2. Because the internal representation of numbers is in binary format, this operation is equivalent to dividing x1 by 2**x2.
Arguments#
Examples#
a = Tensor.from_array [1, 2, 3]
a >> a
#set(*args, value)
#
(*args, value)
The primary method of setting Tensor values. The slicing behavior
for this method is identical to the []
method.
If a Tensor
is passed as the value to set, it will be broadcast
to the shape of the slice if possible. If a scalar is passed, it will
be tiled across the slice.
Arguments#
- args :
Tuple
- Tuple of arguments. All must be valid indexers, so aRange
,Int
, orTuple(Range, Int)
. - value :
Tensor | Number
- Value to assign to the slice
Examples#
a = Tensor.new([2, 2]) { |i| i }
a[1.., 1..] = 99
a
# [[ 0, 1],
# [ 2, 99]]
#shape : Array(Int32)
#
: Array(Int32)
Returns the size of a Tensor along each dimension
Examples#
a = Tensor(Int8, CPU(Int8)).new([2, 3, 4])
a.shape # => [2, 3, 4]
#sinh
#
View source
#size : Int32
#
: Int32
Returns the size of a Tensor along each dimension
a = Tensor(Int8, CPU(Int8)).new([2, 3, 4])
a.shape # => [2, 3, 4]
#slice(*args) : Tensor(T, S)
#
(*args) : Tensor(T, S)
Returns a view of a Tensor
from any valid indexers. This view
must be able to be represented as valid strided/shaped view, slicing
as a copy is not supported.
When an Integer argument is passed, an axis will be removed from
the Tensor
, and a view at that index will be returned.
a = Tensor.new([2, 2]) { |i| i }
a[0] # => [0, 1]
When a Range argument is passed, an axis will be sliced based on the endpoints of the range.
a = Tensor.new([2, 2, 2]) { |i| i }
a[1...]
# [[[4, 5],
# [6, 7]]]
When a Tuple containing a Range and an Integer step is passed, an axis is sliced based on the endpoints of the range, and the strides of the axis are updated to reflect the step. Negative steps will reflect the array along an axis.
a = Tensor.new([2, 2]) { |i| i }
a[{..., -1}]
# [[2, 3],
# [0, 1]]
#solve(x : Tensor(T, S))
#
(x : Tensor(T, S))
Solve a linear matrix equation, or system of linear scalar equations.
Computes the “exact” solution, x, of the well-determined, i.e., full rank, linear matrix equation ax = b.
Arguments#
- x :
Tensor
- Argument with which to solve
Examples#
a = [[3, 1], [1, 2]].to_tensor.as_type(Float32)
b = [9, 8].to_tensor.as_type(Float32)
puts a.solve(b)
# [2, 3]
#sort(axis : Int) : Tensor(T, S)
#
(axis : Int) : Tensor(T, S)
Sorts a Tensor
along an axis.
Arguments#
- axis :
Int
- Axis of reduction
Examples#
t = Tensor.random(0...10, [3, 3, 2])
puts t.sort(axis: 1)
# [[[1, 1],
# [4, 5],
# [5, 7]],
#
# [[0, 0],
# [2, 3],
# [8, 4]],
#
# [[2, 5],
# [5, 7],
# [5, 7]]]
#sort : Tensor(T, S)
#
: Tensor(T, S)
Sorts a Tensor
, treating it's elements like the Tensor
is flat.
Examples#
a = [3, 2, 1].to_tensor
a.sort # => [1, 2, 3]
#sort(axis : Int, &block : T, T -> _)
#
(axis : Int, &block : T, T -> _)
Sorts a Tensor
along an axis.
Arguments#
- axis :
Int
- Axis of reduction
Examples#
t = Tensor.random(0...10, [3, 3, 2])
puts t.sort(axis: 1) { |i, j| i <=> j }
# [[[5, 3],
# [6, 9],
# [7, 9]],
#
# [[0, 1],
# [3, 2],
# [8, 5]],
#
# [[3, 1],
# [4, 7],
# [7, 8]]]
#sort(&block : T, T -> _)
#
(&block : T, T -> _)
Sorts a Tensor
, treating it's elements like the Tensor
is flat. Sorts using criteria specified by a passed block
Arguments#
- block :
Proc(T, T, _)
- Function used to sort
Examples#
a = [3, 2, 1].to_tensor
a.sort { |i, j| j - i } # => [3, 2, 1]
#sqrt
#
View source
#std(axis : Int, dims : Bool = false)
#
(axis : Int, dims : Bool = false)
Reduces a Tensor
along an axis, finding the std of each
view into the Tensor
Arguments#
- axis :
Int
- Axis of reduction - dims :
Bool
- Indicate if the axis of reduction should remain in the result
Examples#
a = Tensor.new([2, 2]) { |i| i }
a.std(0) # => [1, 1]
a.std(1, dims: true)
# [[0.707107],
# [0.707107]]
#std : Float64
#
: Float64
Reduces a Tensor
to a scalar by finding the standard deviation
Examples#
a = [1, 2, 3].to_tensor
a.std # => 0.816496580927726
#strides : Array(Int32)
#
: Array(Int32)
Returns the step of a Tensor along each dimension
Examples#
a = Tensor(Int8, CPU(Int8)).new([3, 3, 2])
a.shape # => [4, 2, 1]
#subtract(other)
#
View source
(other)
#sum(axis : Int, dims : Bool = false)
#
(axis : Int, dims : Bool = false)
Reduces a Tensor
along an axis, summing each view into
the Tensor
Arguments#
- axis :
Int
- Axis of summation - dims :
Bool
- Indicate if the axis of reduction should remain in the result
Examples#
a = Tensor.new([2, 2]) { |i| i }
a.sum(0) # => [2, 4]
a.sum(1, dims: true)
# [[1],
# [5]]
#sum : T
#
View source
: T
#svd
#
Singular Value Decomposition.
When a is a 2D array, it is factorized as u @ np.diag(s) @ vh = (u * s) @ vh, where u and vh are 2D unitary arrays and s is a 1D array of a’s singular values.
Examples#
t = [[0, 1], [1, 1], [1, 1], [2, 1]].to_tensor.as_type(Float32)
a, b, c = t.svd
puts a
puts b
puts c
# [[-0.203749, 0.841716 , -0.330613, 0.375094 ],
# [-0.464705, 0.184524 , -0.19985 , -0.842651],
# [-0.464705, 0.184524 , 0.861075 , 0.092463 ],
# [-0.725662, -0.472668, -0.330613, 0.375094 ]]
# [3.02045 , 0.936426]
# [[-0.788205, -0.615412],
# [-0.615412, 0.788205 ]]
#swap_axes(a : Int, b : Int) : Tensor(T, S)
#
(a : Int, b : Int) : Tensor(T, S)
Permutes two axes of a Tensor
. This will always create a view
of the permuted Tensor
Arguments#
- a :
Int
- First axis of permutation - b :
Int
- Second axis of permutation
Examples#
a = Tensor.new([4, 3, 2]) { |i| i }
a.swap_axes(2, 0)
# [[[ 0, 6, 12, 18]
# [ 2, 8, 14, 20]
# [ 4, 10, 16, 22]]
#
# [[ 1, 7, 13, 19]
# [ 3, 9, 15, 21]
# [ 5, 11, 17, 23]]]
#tanh
#
View source
#tile(n : Int) : Tensor(T, S)
#
(n : Int) : Tensor(T, S)
Tile elements of a Tensor
Arguments#
- n :
Int
- Number of times to tile
Examples#
a = [[1, 2, 3], [4, 5, 6]]
puts a.tile(2)
# [[1, 2, 3, 1, 2, 3],
# [4, 5, 6, 4, 5, 6]]
#tile(n : Array(Int)) : Tensor(T, S)
#
(n : Array(Int)) : Tensor(T, S)
Tile elements of a Tensor
Arguments#
- n :
Int
- Number of times to tile
Examples#
a = [[1, 2, 3], [4, 5, 6]]
puts Num.tile(a, 2)
# [[1, 2, 3, 1, 2, 3],
# [4, 5, 6, 4, 5, 6]]
#to_a : Array(T)
#
: Array(T)
Converts a Tensor to an Array. To avoid return type ambiguity this will always return a 1D Array
Arguments#
Examples#
a = Tensor.from_array [[1, 2], [3, 4]]
a.to_a # => [1, 2, 3, 4]
#to_npy(path : String)
#
(path : String)
Export a Tensor to the Numpy format
Arguments#
- path :
String
- filename of output.npy
file.
Only integer, unsigned integer and float ndarrays are supported at the moment.
#transpose(axes : Array(Int) = [] of Int32) : Tensor(T, S)
#
(axes : Array(Int) = [] of Int32) : Tensor(T, S)
Permutes a Tensor
's axes to a different order. This will
always create a view of the permuted Tensor
.
Arguments#
- axes :
Array(Int)
- New ordering of axes for the permutedTensor
. If empty, a full transpose will occur
Examples#
a = Tensor.new([4, 3, 2]) { |i| i }
a.transpose([2, 0, 1])
# [[[ 0, 2, 4],
# [ 6, 8, 10],
# [12, 14, 16],
# [18, 20, 22]],
#
# [[ 1, 3, 5],
# [ 7, 9, 11],
# [13, 15, 17],
# [19, 21, 23]]]
#transpose(*axes : Int) : Tensor(T, S)
#
(*axes : Int) : Tensor(T, S)
Permutes a Tensor
's axes to a different order. This will
always create a view of the permuted Tensor
.
Arguments#
- axes :
Array(Int)
- New ordering of axes for the permutedTensor
. If empty, a full transpose will occur
Examples#
a = Tensor.new([4, 3, 2]) { |i| i }
a.transpose([2, 0, 1])
# [[[ 0, 2, 4],
# [ 6, 8, 10],
# [12, 14, 16],
# [18, 20, 22]],
#
# [[ 1, 3, 5],
# [ 7, 9, 11],
# [13, 15, 17],
# [19, 21, 23]]]
#tril(k : Int = 0)
#
(k : Int = 0)
Computes the lower triangle of a Tensor
. Zeros
out values above the k
th diagonal
Arguments#
- k :
Int
- Diagonal
Examples#
a = Tensor(Int32).ones([3, 3])
a.tril!
a
# [[1, 0, 0],
# [1, 1, 0],
# [1, 1, 1]]
#tril!(k : Int = 0)
#
(k : Int = 0)
Computes the lower triangle of a Tensor
. Zeros
out values above the k
th diagonal
Arguments#
- k :
Int
- Diagonal
Examples#
a = Tensor(Int32).ones([3, 3])
a.tril!
a
# [[1, 0, 0],
# [1, 1, 0],
# [1, 1, 1]]
#triu(k : Int = 0)
#
(k : Int = 0)
Computes the upper triangle of a Tensor
. Zeros
out values below the k
th diagonal
Arguments#
- k :
Int
- Diagonal
Examples#
a = Tensor(Int32).ones([3, 3])
a.triu!
a
# [[1, 1, 1],
# [0, 1, 1],
# [0, 0, 1]]
#triu!(k : Int = 0)
#
(k : Int = 0)
Computes the upper triangle of a Tensor
. Zeros
out values below the k
th diagonal
Arguments#
- k :
Int
- Diagonal
Examples#
a = Tensor(Int32).ones([3, 3])
a.triu!
a
# [[1, 1, 1],
# [0, 1, 1],
# [0, 0, 1]]
#view(u : U.class) forall U
#
(u : U.class) forall U
Return a shallow copy of a Tensor
with a new dtype. The underlying
data buffer is shared, but the Tensor
owns its other attributes.
The size of the new dtype must be a multiple of the current dtype
Arguments#
- u :
U.class
- The data type used to reintepret the underlying data buffer of aTensor
Examples#
a = Tensor.new([3]) { |i| i }
a.view(Int8) # => [0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0]
#view : Tensor(T, S)
#
: Tensor(T, S)
Return a shallow copy of a Tensor
. The underlying data buffer
is shared, but the Tensor
owns its other attributes. Changes
to a view of a Tensor
will be reflected in the original Tensor
Examples#
a = Tensor(Int32, CPU(Int32)).new([3, 3])
b = a.view
b[...] = 99
a
# [[99, 99, 99],
# [99, 99, 99],
# [99, 99, 99]]
#with_broadcast(n : Int) : Tensor(T, S)
#
(n : Int) : Tensor(T, S)
Expands a Tensor
s dimensions n times by broadcasting
the shape and strides. No data is copied, and the result
is a read-only view of the original Tensor
Arguments#
- n :
Int
- Number of dimensions to broadcast
Examples#
a = [1, 2, 3].to_tensor
a.with_broadcast(2)
# [[[1]],
#
# [[2]],
#
# [[3]]]
#yield_along_axis
#
Similar to each_axis
, but instead of yielding slices of
an axis, it yields slices along an axis, useful for methods
that require an entire view of an axis
slice for a reduction
operation, such as std
, rather than being able to incrementally
reduce.
Arguments#
- axis :
Int
- Axis of reduction
Examples#
a = Tensor.new([3, 3, 3]) { |i| i }
a.yield_along_axis(0) do |ax|
puts ax
end
# [ 0, 9, 18]
# [ 1, 10, 19]
# [ 2, 11, 20]
# [ 3, 12, 21]
# [ 4, 13, 22]
# [ 5, 14, 23]
# [ 6, 15, 24]
# [ 7, 16, 25]
# [ 8, 17, 26]
#zip(b : Tensor(U, CPU(U)), &block : T, U -> _) forall U, V
#
(b : Tensor(U, CPU(U)), &block : T, U -> _) forall U, V
Yields the elements of two Tensor
s, always in RowMajor order,
as if the Tensor
s were flat.
Arguments#
Examples#
a = Tensor.new(2, 2) { |i| i }
b = Tensor.new(2, 2) { |i| i + 2 }
a.zip(b) do |el|
puts el
end
# { 0, 2}
# { 1, 3}
# { 2, 4}
# { 3, 5}