-
Notifications
You must be signed in to change notification settings - Fork 480
Expand file tree
/
Copy pathattention.h
More file actions
96 lines (83 loc) · 3.96 KB
/
attention.h
File metadata and controls
96 lines (83 loc) · 3.96 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
#pragma once
#include "ctranslate2/layers/attention_layer.h"
#include "ctranslate2/padder.h"
#include "ctranslate2/layers/transformer.h"
namespace ctranslate2 {
namespace layers {
StorageView make_relative_positions(dim_t queries_length,
dim_t keys_length,
dim_t max_position);
StorageView make_asymmetric_relative_positions(dim_t queries_length,
dim_t keys_length,
dim_t left_max_position,
dim_t right_max_position);
class RotaryEmbeddings;
class Alibi;
class MultiHeadAttention : public AttentionLayer
{
public:
MultiHeadAttention(const models::Model& model,
const std::string& scope,
dim_t num_heads,
bool self_attention,
bool pre_norm = true,
bool is_decoder = false,
Alibi* alibi = nullptr);
DataType output_type() const override;
dim_t output_size() const override;
virtual void operator()(const StorageView& queries,
const StorageView& values,
const StorageView* values_lengths,
StorageView& output,
StorageView* cached_keys = nullptr,
StorageView* cached_values = nullptr,
StorageView* attention = nullptr,
const Padder* queries_padder = nullptr,
const Padder* values_padder = nullptr,
bool return_normalized_attention = true,
StorageView* position_bias = nullptr,
dim_t offset = 0) const override;
virtual bool has_positional_embeddings() const override {
return _relative_position_keys || _relative_attention_bias || _rotary_embeddings || _alibi;
}
protected:
void process_cross_attention(const StorageView& queries,
const StorageView& values,
StorageView& fused_proj,
StorageView& queries_proj,
StorageView& keys_proj,
StorageView& values_proj,
StorageView* cached_keys,
StorageView* cached_values,
const Padder* queries_padder,
const Padder* values_padder,
dim_t& beam_size) const;
private:
static void split_heads(StorageView& x,
dim_t num_heads,
const Padder* padder = nullptr,
dim_t beam_size = 1);
static void combine_heads(StorageView& x,
dim_t num_heads,
const Padder* padder = nullptr,
dim_t beam_size = 1);
void apply_k_norm(StorageView& keys_proj) const;
void apply_qk_norm(StorageView& queries_proj,
StorageView& keys_proj) const;
const StorageView* _relative_attention_bias;
const StorageView* _relative_position_keys;
const StorageView* _relative_asymmetric_position_keys;
const StorageView* _relative_position_values;
const StorageView* _gru_relative_position_const;
dim_t _maximum_relative_position;
dim_t _relative_left_max_position;
dim_t _relative_right_max_position;
const bool _merge_time_and_head_dims;
const dim_t _cache_time_dim;
std::unique_ptr<const LayerNorm> _q_norm; // Query normalization
std::unique_ptr<const LayerNorm> _k_norm; // Key normalization
protected:
const std::unique_ptr<const Dense> _gru_relative_position_linear;
};
}
}