CANN: Support more ops (#12841)

* [CANN]Support Opt LOG && MEAN && PAD_REFLECT_1D

* [CANN]Support COUNT_EQUAL && STEP && SGN

* [CANN]codestyle adjustment

* [CANN]codestyle adjustment

---------

Signed-off-by: noemotiovon <noemotiovon@gmail.com>
This commit is contained in:
Chenguang Li 2025-04-10 08:51:52 +08:00 committed by GitHub
parent 11d07e1e69
commit fe5b78c896
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 173 additions and 0 deletions

View file

@ -59,6 +59,11 @@
#include <aclnnop/aclnn_div.h>
#include <aclnnop/aclnn_convolution.h>
#include <aclnnop/aclnn_elu.h>
#include <aclnnop/aclnn_log.h>
#include <aclnnop/aclnn_mean.h>
#include <aclnnop/aclnn_reflection_pad1d.h>
#include <aclnnop/aclnn_eq_tensor.h>
#include <aclnnop/aclnn_gt_scalar.h>
#include <float.h>
#include <cmath>
@ -2598,6 +2603,7 @@ void ggml_cann_rope(ggml_backend_cann_context& ctx, ggml_tensor* dst) {
aclTensor* acl_dst = ggml_cann_create_tensor(dst, dst->ne, dst->nb, 3);
GGML_CANN_CALL_ACLNN_OP(ArgMax, acl_src, 3, false, acl_dst);
ACL_CHECK(aclDestroyTensor(acl_src));
ACL_CHECK(aclDestroyTensor(acl_dst));
}
@ -2629,6 +2635,9 @@ void ggml_cann_conv_transpose_1d(ggml_backend_cann_context& ctx, ggml_tensor* ds
ACL_CHECK(aclDestroyTensor(acl_weight));
ACL_CHECK(aclDestroyTensor(acl_dst));
ACL_CHECK(aclDestroyIntArray(stride));
ACL_CHECK(aclDestroyIntArray(padding));
ACL_CHECK(aclDestroyIntArray(dilation));
}
void ggml_cann_elu(ggml_backend_cann_context& ctx, ggml_tensor* dst){
@ -2646,4 +2655,79 @@ void ggml_cann_elu(ggml_backend_cann_context& ctx, ggml_tensor* dst){
ACL_CHECK(aclDestroyTensor(acl_input));
ACL_CHECK(aclDestroyTensor(acl_dst));
ACL_CHECK(aclDestroyScalar(alpha));
}
void ggml_cann_mean(ggml_backend_cann_context& ctx, ggml_tensor* dst){
ggml_tensor * src0 = dst->src[0];
aclTensor* acl_src = ggml_cann_create_tensor(src0);
aclTensor* acl_dst = ggml_cann_create_tensor(dst);
int64_t reduceDimValue[] = {3};
aclIntArray* reduceDim = aclCreateIntArray(reduceDimValue, 1);
bool keepDim = true;
GGML_CANN_CALL_ACLNN_OP(Mean, acl_src, reduceDim, keepDim, ACL_FLOAT, acl_dst);
ACL_CHECK(aclDestroyTensor(acl_src));
ACL_CHECK(aclDestroyTensor(acl_dst));
ACL_CHECK(aclDestroyIntArray(reduceDim));
}
void ggml_cann_pad_reflect_1d(ggml_backend_cann_context& ctx, ggml_tensor* dst){
ggml_tensor * src0 = dst->src[0];
int32_t *opts = (int32_t *) dst->op_params;
int64_t paddingsArray[2] = {opts[0], opts[1]};
aclIntArray* paddings = aclCreateIntArray(paddingsArray, 2);
for (int64_t i = 0; i < src0->ne[3]; i++) {
aclTensor* acl_src = ggml_cann_create_tensor(
(char*)src0->data + i * src0->ne[3],
ggml_cann_type_mapping(src0->type), ggml_element_size(src0),
src0->ne, src0->nb, 3);
aclTensor* acl_dst = ggml_cann_create_tensor(
(char*)dst->data + i * src0->ne[3],
ggml_cann_type_mapping(dst->type), ggml_element_size(dst),
dst->ne, dst->nb, 3);
GGML_CANN_CALL_ACLNN_OP(ReflectionPad1d, acl_src, paddings, acl_dst);
ACL_CHECK(aclDestroyTensor(acl_src));
ACL_CHECK(aclDestroyTensor(acl_dst));
}
ACL_CHECK(aclDestroyIntArray(paddings));
}
void ggml_cann_count_equal(ggml_backend_cann_context& ctx, ggml_tensor* dst){
ggml_tensor * src0 = dst->src[0];
ggml_tensor * src1 = dst->src[1];
aclTensor* acl_self = ggml_cann_create_tensor(src0);
aclTensor* acl_other = ggml_cann_create_tensor(src1);
GGML_CANN_CALL_ACLNN_OP(InplaceEqTensor, acl_self, acl_other);
ggml_cann_sum(ctx, dst);
ACL_CHECK(aclDestroyTensor(acl_self));
ACL_CHECK(aclDestroyTensor(acl_other));
}
void ggml_cann_step(ggml_backend_cann_context& ctx, ggml_tensor* dst){
ggml_tensor * src0 = dst->src[0];
aclTensor* acl_src = ggml_cann_create_tensor(src0);
aclTensor* acl_dst = ggml_cann_create_tensor(dst);
float alphaValue = 0.0f;
aclScalar* alpha = nullptr;
alpha = aclCreateScalar(&alphaValue, aclDataType::ACL_FLOAT);
GGML_CANN_CALL_ACLNN_OP(GtScalar, acl_src, alpha, acl_dst);
ACL_CHECK(aclDestroyTensor(acl_src));
ACL_CHECK(aclDestroyTensor(acl_dst));
ACL_CHECK(aclDestroyScalar(alpha));
}