CUDA: fix race conditions FlashAttention kernels (#13438)
This commit is contained in:
parent
d2a4ef05c6
commit
0208355f42
2 changed files with 3 additions and 0 deletions
|
@ -874,6 +874,8 @@ static __device__ __forceinline__ void flash_attn_ext_f16_process_tile(
|
|||
}
|
||||
}
|
||||
|
||||
__syncthreads();
|
||||
|
||||
// Write back combined meta data:
|
||||
#pragma unroll
|
||||
for (int imeta = 0; imeta < nmeta; ++imeta) {
|
||||
|
|
|
@ -168,6 +168,7 @@ static __global__ void flash_attn_vec_ext_f16(
|
|||
for (int j = 0; j < ncols; ++j) {
|
||||
KQ[j*D + tid] = -HALF_MAX_HALF;
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
half2 VKQ[ncols] = {{0.0f, 0.0f}};
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue