Struct juice::layers::activation::relu::ReLU

source ·
pub struct ReLU;
Expand description

ReLU Activation Layer

Trait Implementations§

source§

impl Clone for ReLU

source§

fn clone(&self) -> ReLU

Returns a copy of the value. Read more
1.0.0 · source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
source§

impl<B: IBackend + Relu<f32> + ReluPointwise<f32>> ComputeInputGradient<f32, B> for ReLU

source§

fn compute_input_gradient( &self, backend: &B, weights_data: &[&SharedTensor<f32>], output_data: &[&SharedTensor<f32>], output_gradients: &[&SharedTensor<f32>], input_data: &[&SharedTensor<f32>], input_gradients: &mut [&mut SharedTensor<f32>] )

Compute gradients with respect to the inputs and write them into input_gradients.
source§

impl<B: IBackend + Relu<f32> + ReluPointwise<f32>> ComputeOutput<f32, B> for ReLU

source§

fn compute_output( &self, backend: &B, _weights: &[&SharedTensor<f32>], input_data: &[&SharedTensor<f32>], output_data: &mut [&mut SharedTensor<f32>] )

Compute output for given input and write them into output_data.
source§

impl<B: IBackend + Relu<f32> + ReluPointwise<f32>> ComputeParametersGradient<f32, B> for ReLU

source§

fn compute_parameters_gradient( &self, backend: &B, output_data: &[&SharedTensor<T>], output_gradients: &[&SharedTensor<T>], input_data: &[&SharedTensor<T>], parameters_gradients: &mut [&mut SharedTensor<T>] )

Compute gradients with respect to the parameters and write them into parameters_gradients.
source§

impl Debug for ReLU

source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
source§

impl<B: IBackend + Relu<f32> + ReluPointwise<f32>> ILayer<B> for ReLU

source§

fn exact_num_output_blobs(&self) -> Option<usize>

Returns the exact number of output blobs required by the layer, or None if no exact number is required. Read more
source§

fn exact_num_input_blobs(&self) -> Option<usize>

Returns the exact number of input blobs required by the layer, or None if no exact number is required. Read more
source§

fn compute_in_place(&self) -> bool

Return wether the computations of a layer should be done in-place (the output will be written where the input was read from). Read more
source§

fn reshape( &mut self, backend: Rc<B>, input_data: &mut Vec<ArcLock<SharedTensor<f32>>>, input_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>, weights_data: &mut Vec<ArcLock<SharedTensor<f32>>>, weights_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>, output_data: &mut Vec<ArcLock<SharedTensor<f32>>>, output_gradient: &mut Vec<ArcLock<SharedTensor<f32>>> )

Adjust to shapes of the output blobs to fit the shapes of the input blobs. Read more
source§

fn init(&mut self, backend: Rc<B>)

Initialize the layer for computation. Read more
source§

fn resize_shared_workspace( &mut self, backend: Rc<B>, workspace: Option<ArcLock<SharedTensor<u8>>> ) -> Option<ArcLock<SharedTensor<u8>>>

Adjust size of shared workspace. Read more
source§

fn forward( &self, backend: &B, input_data: &[ArcLock<SharedTensor<f32>>], weights_data: &[ArcLock<SharedTensor<f32>>], output_data: &mut [ArcLock<SharedTensor<f32>>] )

Compute the [feedforward][1] layer output using the provided Backend. [1]: https://en.wikipedia.org/wiki/Feedforward_neural_network Read more
source§

fn backward_input( &self, backend: &B, weights_data: &[ArcLock<SharedTensor<f32>>], output_data: &[ArcLock<SharedTensor<f32>>], output_gradients: &[ArcLock<SharedTensor<f32>>], input_data: &[ArcLock<SharedTensor<f32>>], input_gradients: &mut [ArcLock<SharedTensor<f32>>] )

Compute the [backpropagation][1] input gradient using the provided backend. [1]: https://en.wikipedia.org/wiki/Backpropagation Read more
source§

fn backward_parameters( &self, backend: &B, output_data: &[ArcLock<SharedTensor<f32>>], output_gradients: &[ArcLock<SharedTensor<f32>>], input_data: &[ArcLock<SharedTensor<f32>>], weights_gradients: &mut [ArcLock<SharedTensor<f32>>] )

Compute the [backpropagation][1] parameters gradient using the provided backend. [1]: https://en.wikipedia.org/wiki/Backpropagation Read more
source§

fn auto_output_blobs(&self) -> bool

Return whether “anonymous” output blobs are created automatically for the layer. Read more
source§

fn min_output_blobs(&self) -> usize

Returns the minimum number of output blobs required by the layer, or 0 if no minimum number is required. Read more
source§

fn auto_weight_blobs(&self) -> bool

Return whether weight blobs are created automatically for the layer. Read more
source§

fn allow_force_backward(&self, input_id: usize) -> bool

Return whether to allow force_backward for a given input blob index. Read more
source§

fn sync_native(&self) -> bool

Return wether a simple native backend should be used to [sync][1] instead of the default backend. [1]: #method.sync Read more
source§

fn is_container(&self) -> bool

Return wether the layer is a container. Read more
source§

fn loss_weight(&self, output_id: usize) -> Option<f32>

Return the associated loss weight for a given output blob index. Read more
source§

fn inputs_data(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

Return the input tensors of the layer. Read more
source§

fn inputs_gradients(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

Return the gradients of the input tensors of the layer. Read more
source§

fn outputs_data(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

Return the output tensors of the layer. Read more
source§

fn outputs_gradients(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

Return the gradients of the output tensors of the layer. Read more
source§

fn learnable_weights(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

Return the learnable weights inside the layer. Read more
source§

fn learnable_weights_gradients(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

Return the gradients for the learnable weights inside the layer. Read more
source§

fn learnable_weights_names(&self) -> Option<Vec<String>>

Return the names of the learnable weights inside the layer. Read more
source§

fn learnable_weights_lr(&self) -> Option<Vec<Option<f32>>>

Return the learning rates for the learnable weights inside the layer. Read more

Auto Trait Implementations§

§

impl RefUnwindSafe for ReLU

§

impl Send for ReLU

§

impl Sync for ReLU

§

impl Unpin for ReLU

§

impl UnwindSafe for ReLU

Blanket Implementations§

source§

impl<T> Any for T
where T: 'static + ?Sized,

source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
source§

impl<T> Borrow<T> for T
where T: ?Sized,

source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
source§

impl<T> From<T> for T

source§

fn from(t: T) -> T

Returns the argument unchanged.

source§

impl<T, U> Into<U> for T
where U: From<T>,

source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

source§

impl<T> ToOwned for T
where T: Clone,

§

type Owned = T

The resulting type after obtaining ownership.
source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

§

type Error = Infallible

The type returned in the event of a conversion error.
source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

§

fn vzip(self) -> V