pub struct Sequential<B: IBackend + LayerOps<f32>> { /* private fields */ }
Expand description

Sequential Layer

Implementations§

source§

impl<B: IBackend + LayerOps<f32> + 'static> Sequential<B>

source

pub fn empty() -> Sequential<B>

Create a empty Sequential container layer.

source

pub fn from_config(backend: Rc<B>, config: &SequentialConfig) -> Sequential<B>

Create a Sequential layer from a SequentialConfig.

source

pub fn init_layers(&mut self, backend: Rc<B>, in_config: &SequentialConfig)

Initializes a sequential container.

Sets up the structure of the sequential container. It reads the supplied SequentialConfig, connects the input and output blobs of each layer and determines if the backpropagation has to be executed for each tensor and layer.

Trait Implementations§

source§

impl<B: IBackend + LayerOps<f32> + 'static> ComputeInputGradient<f32, B> for Sequential<B>

source§

fn compute_input_gradient( &self, backend: &B, weights_data: &[&SharedTensor<f32>], output_data: &[&SharedTensor<f32>], output_gradients: &[&SharedTensor<f32>], input_data: &[&SharedTensor<f32>], input_gradients: &mut [&mut SharedTensor<f32>] )

Compute gradients with respect to the inputs and write them into input_gradients.
source§

impl<B: IBackend + LayerOps<f32> + 'static> ComputeOutput<f32, B> for Sequential<B>

source§

fn compute_output( &self, backend: &B, weights: &[&SharedTensor<f32>], input_data: &[&SharedTensor<f32>], output_data: &mut [&mut SharedTensor<f32>] )

Compute output for given input and write them into output_data.
source§

impl<B: IBackend + LayerOps<f32> + 'static> ComputeParametersGradient<f32, B> for Sequential<B>

source§

fn compute_parameters_gradient( &self, backend: &B, output_data: &[&SharedTensor<f32>], output_gradients: &[&SharedTensor<f32>], input_data: &[&SharedTensor<f32>], parameters_gradients: &mut [&mut SharedTensor<f32>] )

Compute gradients with respect to the parameters and write them into parameters_gradients.
source§

impl<B: Debug + IBackend + LayerOps<f32>> Debug for Sequential<B>

source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
source§

impl<B: IBackend + LayerOps<f32> + 'static> ILayer<B> for Sequential<B>

source§

fn is_container(&self) -> bool

Return wether the layer is a container. Read more
source§

fn inputs_data(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

Return the input tensors of the layer. Read more
source§

fn inputs_gradients(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

Return the gradients of the input tensors of the layer. Read more
source§

fn outputs_data(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

Return the output tensors of the layer. Read more
source§

fn outputs_gradients(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

Return the gradients of the output tensors of the layer. Read more
source§

fn learnable_weights(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

Return the learnable weights inside the layer. Read more
source§

fn learnable_weights_gradients(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

Return the gradients for the learnable weights inside the layer. Read more
source§

fn learnable_weights_names(&self) -> Option<Vec<String>>

Return the names of the learnable weights inside the layer. Read more
source§

fn resize_shared_workspace( &mut self, backend: Rc<B>, workspace: Option<ArcLock<SharedTensor<u8>>> ) -> Option<ArcLock<SharedTensor<u8>>>

Adjust size of shared workspace. Read more
source§

fn forward( &self, backend: &B, input_data: &[ArcLock<SharedTensor<f32>>], weights_data: &[ArcLock<SharedTensor<f32>>], output_data: &mut [ArcLock<SharedTensor<f32>>] )

Compute the [feedforward][1] layer output using the provided Backend. [1]: https://en.wikipedia.org/wiki/Feedforward_neural_network Read more
source§

fn backward_input( &self, backend: &B, weights_data: &[ArcLock<SharedTensor<f32>>], output_data: &[ArcLock<SharedTensor<f32>>], output_gradients: &[ArcLock<SharedTensor<f32>>], input_data: &[ArcLock<SharedTensor<f32>>], input_gradients: &mut [ArcLock<SharedTensor<f32>>] )

Compute the [backpropagation][1] input gradient using the provided backend. [1]: https://en.wikipedia.org/wiki/Backpropagation Read more
source§

fn backward_parameters( &self, backend: &B, output_data: &[ArcLock<SharedTensor<f32>>], output_gradients: &[ArcLock<SharedTensor<f32>>], input_data: &[ArcLock<SharedTensor<f32>>], weights_gradients: &mut [ArcLock<SharedTensor<f32>>] )

Compute the [backpropagation][1] parameters gradient using the provided backend. [1]: https://en.wikipedia.org/wiki/Backpropagation Read more
source§

fn init(&mut self, backend: Rc<B>)

Initialize the layer for computation. Read more
source§

fn reshape( &mut self, backend: Rc<B>, input_data: &mut Vec<ArcLock<SharedTensor<f32>>>, input_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>, weights_data: &mut Vec<ArcLock<SharedTensor<f32>>>, weights_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>, output_data: &mut Vec<ArcLock<SharedTensor<f32>>>, output_gradient: &mut Vec<ArcLock<SharedTensor<f32>>> )

Adjust to shapes of the output blobs to fit the shapes of the input blobs. Read more
source§

fn auto_output_blobs(&self) -> bool

Return whether “anonymous” output blobs are created automatically for the layer. Read more
source§

fn min_output_blobs(&self) -> usize

Returns the minimum number of output blobs required by the layer, or 0 if no minimum number is required. Read more
source§

fn exact_num_output_blobs(&self) -> Option<usize>

Returns the exact number of output blobs required by the layer, or None if no exact number is required. Read more
source§

fn auto_weight_blobs(&self) -> bool

Return whether weight blobs are created automatically for the layer. Read more
source§

fn exact_num_input_blobs(&self) -> Option<usize>

Returns the exact number of input blobs required by the layer, or None if no exact number is required. Read more
source§

fn allow_force_backward(&self, input_id: usize) -> bool

Return whether to allow force_backward for a given input blob index. Read more
source§

fn sync_native(&self) -> bool

Return wether a simple native backend should be used to [sync][1] instead of the default backend. [1]: #method.sync Read more
source§

fn compute_in_place(&self) -> bool

Return wether the computations of a layer should be done in-place (the output will be written where the input was read from). Read more
source§

fn loss_weight(&self, output_id: usize) -> Option<f32>

Return the associated loss weight for a given output blob index. Read more
source§

fn learnable_weights_lr(&self) -> Option<Vec<Option<f32>>>

Return the learning rates for the learnable weights inside the layer. Read more

Auto Trait Implementations§

§

impl<B> !RefUnwindSafe for Sequential<B>

§

impl<B> !Send for Sequential<B>

§

impl<B> !Sync for Sequential<B>

§

impl<B> Unpin for Sequential<B>

§

impl<B> !UnwindSafe for Sequential<B>

Blanket Implementations§

source§

impl<T> Any for T
where T: 'static + ?Sized,

source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
source§

impl<T> Borrow<T> for T
where T: ?Sized,

source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
source§

impl<T> From<T> for T

source§

fn from(t: T) -> T

Returns the argument unchanged.

source§

impl<T, U> Into<U> for T
where U: From<T>,

source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

§

type Error = Infallible

The type returned in the event of a conversion error.
source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

§

fn vzip(self) -> V