llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_xs.cu
2024-07-05 09:06:31 +02:00

5 lines
140 B
Text

// This file has been autogenerated by generate_cu_files.py, do not edit manually.
#include "../mmq.cuh"
DECL_MMQ_CASE(GGML_TYPE_IQ4_XS);