Struct juice::layer::Layer

source ·
pub struct Layer<B: IBackend> {
    pub name: String,
    pub config: Box<LayerConfig>,
    pub worker: Box<dyn ILayer<B>>,
    pub weights_data: Vec<ArcLock<SharedTensor<f32>>>,
    pub weights_gradient: Vec<ArcLock<SharedTensor<f32>>>,
    pub input_blobs_data: Vec<ArcLock<SharedTensor<f32>>>,
    pub input_blobs_gradient: Vec<ArcLock<SharedTensor<f32>>>,
    pub input_blob_names: Vec<String>,
    pub output_blobs_data: Vec<ArcLock<SharedTensor<f32>>>,
    pub output_blobs_gradient: Vec<ArcLock<SharedTensor<f32>>>,
    pub blob_names: HashMap<String, (ArcLock<SharedTensor<f32>>, ArcLock<SharedTensor<f32>>)>,
    /* private fields */
}
Expand description

The generic Layer

Fields§

§name: String

Identifies the Network

The name is mainly used for logging purposes.

§config: Box<LayerConfig>

The configuration of the Layer

§worker: Box<dyn ILayer<B>>

The [implementation][1] of the Layer. [1]: ../layers/index.html

This is the part that does most of the work ([forward][2]/[backward][3]). [2]: ./trait.ILayer.html#method.forward [3]: ./trait.ILayer.html#method.backward

§weights_data: Vec<ArcLock<SharedTensor<f32>>>

The vector that stores shared references to the weights in the form of blobs.

§weights_gradient: Vec<ArcLock<SharedTensor<f32>>>

The vector that stores shared references to the weights in the form of blobs.

§input_blobs_data: Vec<ArcLock<SharedTensor<f32>>>

References to all the input blobs of the layer.

§input_blobs_gradient: Vec<ArcLock<SharedTensor<f32>>>

References to all the input blobs of the layer.

§input_blob_names: Vec<String>

Names for all the input blobs of the layer.

§output_blobs_data: Vec<ArcLock<SharedTensor<f32>>>

References to all the output blobs of the layer.

§output_blobs_gradient: Vec<ArcLock<SharedTensor<f32>>>

References to all the output blobs of the layer.

§blob_names: HashMap<String, (ArcLock<SharedTensor<f32>>, ArcLock<SharedTensor<f32>>)>

All the blobs of the layer that can be addressed by name.

Does not contain anonymous blobs.

Implementations§

source§

impl<B: IBackend> Layer<B>

source

pub fn connect( &mut self, registry: &mut HashMap<String, (ArcLock<SharedTensor<f32>>, ArcLock<SharedTensor<f32>>)>, weight_registry: &mut HashMap<String, (ArcLock<SharedTensor<f32>>, ArcLock<SharedTensor<f32>>, Option<f32>, Option<f32>)> )

Connect the layer to another layers and set up tensors for intermediate results and weights.

Connects to the outputs provided by other layers via the registry. Adds output blobs to the layer and then adds them to the registry, so the next layers can connect them as their inputs. In the end it initializes the underlying layer implementation.

Called during initialization of containter layers.

source

pub fn init_backprop( &mut self, blobs_under_loss: &mut HashSet<String>, blobs_skip_backp: &mut HashSet<String> )

Initializes layer for [backpropagation][1] [1]: https://en.wikipedia.org/wiki/Backpropagation

Go through all the blobs of a layer to determine which blobs contribute to the loss of the next layer. We can skip backward computation for blobs that don’t contribute to the loss. If all of the blobs skip backpropagation we set a flag to skip backpropagation of the whole layer.

source

pub fn init_force_backward(&mut self)

Set [backpropagation][1] flags to force this layer to backpropagate. [1]: https://en.wikipedia.org/wiki/Backpropagation

Is executed during Network initalization if [NetworkConfig][2].force_backward is true. Forcing backpropagation is useful for debugging.

source

pub fn forward( &mut self, inputs: &[ArcLock<SharedTensor<f32>>] ) -> Vec<ArcLock<SharedTensor<f32>>>

Uses the underlying layer implementation to compute a forward step.

See ILayer.forward

source

pub fn backward( &mut self, output_gradients: &[ArcLock<SharedTensor<f32>>] ) -> Vec<ArcLock<SharedTensor<f32>>>

Uses the underlying layer implementation to compute a backward step.

See ILayer.backward

source

pub fn backward_input( &mut self, output_gradients: &[ArcLock<SharedTensor<f32>>] ) -> Vec<ArcLock<SharedTensor<f32>>>

Calculate the gradient w.r.t. input.

This method is mostly used when doing backpropagation.

source

pub fn backward_parameters(&mut self)

Calculate the gradient w.r.t. parameters.

“Parameters” here refers to weights and also possibly bias, depending on the layer.

This method is mostly used when doing backpropagation.

source

pub fn synchronize(&self)

Synchronize the layers backend.

source

pub fn update_weights<SolverB: IBackend + SolverOps<f32>>( &mut self, backend: &SolverB )

Updates the [weights][1] with the weight update computed by the [Solver][2]. [1]: https://en.wikipedia.org/wiki/Synaptic_weight [2]: ../solver/struct.Solver.html

Updating the weights is the last step of computing a [Solver][2] minibatch. The update value is computed in previous steps according to the learning rate policy

source

pub fn clear_weights_gradients(&mut self)

Clears the [weights][1] gradients and zero-inits them. [1]: https://en.wikipedia.org/wiki/Synaptic_weight

The gradients for the weights accumulate over the backpropagation steps of a Solver minibatch and are cleared between each minibatch to start over with a clean slate.

source

pub fn save<P: AsRef<Path>>(&mut self, path: P) -> Result<()>

Serialize the Layer and it’s weights to a Cap’n Proto file at the specified path.

You can find the capnp schema here.

let mut net_cfg = SequentialConfig::default();
// ... set up network ...
let cfg = LayerConfig::new("network", net_cfg);

let native_backend = Rc::new(util::native_backend());
let mut layer = Layer::from_config(native_backend, &cfg);
// ... do stuff with the layer ...
// ... and save it
layer.save("mynetwork").unwrap();
source

pub fn load<LB: IBackend + LayerOps<f32> + 'static, P: AsRef<Path>>( backend: Rc<LB>, path: P ) -> Result<Layer<LB>>

Read a Cap’n Proto file at the specified path and deserialize the Layer inside it.

You can find the capnp schema here.

use coaster::prelude::*;

let native_backend = Rc::new(util::native_backend());
// Load layer from file "mynetwork"
let layer = Layer::<Backend<Native>>::load(native_backend, "mynetwork").unwrap();
source

pub fn set_weight_propagate_down(&mut self, weight_id: usize, value: bool)

Sets whether the layer should compute gradients w.r.t. a weight at a particular index given by weight_id.

See [weight_propagate_down][1] ./struct.Layer.html

source

pub fn is_using_in_place(&self) -> bool

Returns true when the layer is using in-place computation.

For a layer to use in-place computation it needs to support it via compute_in_place and the names of the first input and output tensor have to match.

source

pub fn input_blob_names(&self) -> &[String]

Returns the names of all the input blobs.

source

pub fn loss(&self, weight_id: usize) -> Option<&f32>

Returns the [loss weight][1] associated with the weight blob with id weight_id. [1]: http://caffe.berkeleyvision.org/tutorial/loss.html

source

pub fn learnable_weights_data(&self) -> Vec<ArcLock<SharedTensor<f32>>>

Returns all the learnable weights in the layer.

If the layer is a container layer it will return all the weights of the layers inside it.

source

pub fn learnable_weights_gradients(&self) -> Vec<ArcLock<SharedTensor<f32>>>

Returns the gradients for all the learnable weights in the layer.

If the layer is a container layer it will return all the gradients of the layers inside it.

source

pub fn learnable_weights_names(&self) -> Vec<String>

Returns the names of all the learnable weights in the layer.

If the layer is a container layer it will return all the names of the layers inside it.

source

pub fn learnable_weights_lr(&self) -> Vec<Option<f32>>

Returns the learning rate for all the learnable weights in the layer.

If the layer is a container layer it will return all learning rates of the layers inside it.

source§

impl<B: IBackend + LayerOps<f32> + Copy<f32> + 'static> Layer<B>

source

pub fn from_config(backend: Rc<B>, config: &LayerConfig) -> Layer<B>

Creates a new Layer from a [LayerConfig][1]. [1]: ./struct.LayerConfig.html

Trait Implementations§

source§

impl<B: Debug + IBackend> Debug for Layer<B>

source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
source§

impl<B: IBackend> Send for Layer<B>

Auto Trait Implementations§

§

impl<B> !RefUnwindSafe for Layer<B>

§

impl<B> !Sync for Layer<B>

§

impl<B> Unpin for Layer<B>

§

impl<B> !UnwindSafe for Layer<B>

Blanket Implementations§

source§

impl<T> Any for T
where T: 'static + ?Sized,

source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
source§

impl<T> Borrow<T> for T
where T: ?Sized,

source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
source§

impl<T> From<T> for T

source§

fn from(t: T) -> T

Returns the argument unchanged.

source§

impl<T, U> Into<U> for T
where U: From<T>,

source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

§

type Error = Infallible

The type returned in the event of a conversion error.
source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

§

fn vzip(self) -> V