Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions gemma/activations.h
Original file line number Diff line number Diff line change
Expand Up @@ -203,6 +203,12 @@ struct Activations {
ffw_out.OverrideRows(batch_size);

attention_storage.SetBatchSize(batch_size);
attention.q = attention_storage.q;
attention.q_T = attention_storage.q_T;
attention.pre_att_rms_out = attention_storage.pre_att_rms_out;
attention.att = attention_storage.att;
attention.att_out = attention_storage.att_out;
attention.att_sums = attention_storage.att_sums;
}

const LayerConfig& layer_config;
Expand Down
4 changes: 2 additions & 2 deletions gemma/attention.cc
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ static HWY_INLINE void WeightedSumV(
void SingleDotSoftmaxWeightedSum(
const size_t pos, const size_t start_pos, const size_t last_pos,
float* HWY_RESTRICT q, const MatPtrT<KV_t>& k, const MatPtrT<KV_t>& v,
const MatPtrT<float>& query_norm_scale, const size_t layer_idx,
const MatPtr& query_norm_scale, const size_t layer_idx,
const AttentionActivationsPtrs& activations, float* HWY_RESTRICT att,
float* HWY_RESTRICT att_out, ThreadingContext& ctx, const size_t worker) {
const float att_cap = activations.config.att_cap;
Expand Down Expand Up @@ -169,7 +169,7 @@ size_t StartPos(size_t pos, const ModelConfig& config, size_t layer_idx) {
}

void DotSoftmaxWeightedSum(const size_t num_tokens, const size_t layer_idx,
const MatPtrT<float>& query_norm_scale,
const MatPtr& query_norm_scale,
AttentionActivationsPtrs& activations,
QBatch& qbatch, ThreadingContext& ctx) {
GCPP_ZONE(ctx, 0, Zones::kGenAttentionDotSoftmaxWeightedSumInclusive);
Expand Down
4 changes: 2 additions & 2 deletions gemma/attention.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,12 +38,12 @@ namespace gcpp {
void SingleDotSoftmaxWeightedSum( \
const size_t pos, const size_t start_pos, const size_t last_pos, \
float* HWY_RESTRICT q, const MatPtrT<KV_t>& k, const MatPtrT<KV_t>& v, \
const MatPtrT<float>& query_norm_scale, size_t layer_idx, \
const MatPtr& query_norm_scale, size_t layer_idx, \
const AttentionActivationsPtrs& activations, float* HWY_RESTRICT att, \
float* HWY_RESTRICT att_out, ThreadingContext& ctx, size_t worker); \
\
void DotSoftmaxWeightedSum(const size_t num_tokens, size_t layer_idx, \
const MatPtrT<float>& query_norm_scale, \
const MatPtr& query_norm_scale, \
AttentionActivationsPtrs& activations, \
QBatch& qbatch, ThreadingContext& ctx); \
\
Expand Down
5 changes: 2 additions & 3 deletions gemma/flash_attention.cc
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ static void TransposeQ(const MatPtrT<float>& q, MatPtrT<float>& q_t,
// Updates q in place for RMSNorm and positional encoding.
void RMSNormAndPositionalEncoding(const size_t num_tokens, const QBatch& qbatch,
MatPtrT<float>& q,
const MatPtrT<float>& query_norm_scale,
const MatPtr& query_norm_scale,
const size_t layer_idx,
const AttentionActivationsPtrs& activations,
ThreadingContext& ctx) {
Expand Down Expand Up @@ -592,8 +592,7 @@ size_t GetVTileSize(size_t kNF, size_t num_head_groups, size_t num_tokens,
// grouped together so that mode 1 or 2 can be used, and choosing which of the
// 3 modes to use for best efficiency.
void FlashAttention(const size_t num_tokens, const size_t target_parallelism,
const size_t layer_idx,
const MatPtrT<float>& query_norm_scale,
const size_t layer_idx, const MatPtr& query_norm_scale,
AttentionActivationsPtrs& activations, QBatch& qbatch,
ThreadingContext& ctx) {
GCPP_ZONE(ctx, 0, Zones::kFlashAttentionInclusive);
Expand Down
5 changes: 2 additions & 3 deletions gemma/flash_attention.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ namespace gcpp {
namespace NAMESPACE { \
void RMSNormAndPositionalEncoding( \
size_t num_tokens, const QBatch& qbatch, MatPtrT<float>& q, \
const MatPtrT<float>& query_norm_scale, size_t layer_idx, \
const MatPtr& query_norm_scale, size_t layer_idx, \
const AttentionActivationsPtrs& activations, ThreadingContext& ctx); \
\
void SingleFlashAttention(size_t start_pos, size_t last_pos, \
Expand All @@ -45,8 +45,7 @@ namespace gcpp {
size_t total_tasks, size_t target_parallelism); \
\
void FlashAttention(size_t num_tokens, size_t target_parallelism, \
size_t layer_idx, \
const MatPtrT<float>& query_norm_scale, \
size_t layer_idx, const MatPtr& query_norm_scale, \
AttentionActivationsPtrs& activations, QBatch& qbatch, \
ThreadingContext& ctx); \
/* NOLINTNEXTLINE(google-readability-namespace-comments) */ \
Expand Down
Loading