CUDA: fix typo in FlashAttention code (#13926)
This commit is contained in:
parent
b47ab7b8e9
commit
e562eece7c
1 changed files with 1 additions and 1 deletions
|
@ -1246,7 +1246,7 @@ static __global__ void flash_attn_ext_f16(
|
||||||
NO_DEVICE_CODE;
|
NO_DEVICE_CODE;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
#endif __CUDA_ARCH__ == GGML_CUDA_CC_TURING
|
#endif // __CUDA_ARCH__ == GGML_CUDA_CC_TURING
|
||||||
|
|
||||||
static_assert(!mla || DKQ >= DV, "MLA needs DKQ >= DV");
|
static_assert(!mla || DKQ >= DV, "MLA needs DKQ >= DV");
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue