|
| attention_type |
|
| encoder_output_dim |
|
| encoder_outputs |
|
| decoder_input_dim |
|
| decoder_state_dim |
|
| weighted_encoder_outputs |
|
| encoder_outputs_transposed |
|
| attention_type |
|
| lstm_memory_optimization |
|
| attention_memory_optimization |
|
| name |
|
| recompute_blobs |
|
|
def | __init__ (self, encoder_output_dim, encoder_outputs, decoder_input_dim, decoder_state_dim, name, attention_type, weighted_encoder_outputs, forget_bias, lstm_memory_optimization, attention_memory_optimization) |
|
def | get_attention_weights (self) |
|
def | prepare_input (self, model, input_blob) |
|
def | get_state_names (self) |
|
def | get_outputs_with_grads (self) |
|
def | get_output_size (self) |
|
def | __init__ (self, name) |
|
def | scope (self, name) |
|
def | apply_over_sequence (self, model, inputs, seq_lengths, initial_states, outputs_with_grads=None) |
|
def | apply (self, model, input_t, seq_lengths, states, timestep) |
|
def | prepare_input (self, model, input_blob) |
|
def | get_state_names (self) |
|
Definition at line 837 of file rnn_cell.py.
The documentation for this class was generated from the following file: