From 1ff66ec871b9eb8ef35ff3cf6dcd09fe9289c14f Mon Sep 17 00:00:00 2001 From: Ben Arthur Date: Tue, 16 Nov 2021 16:34:12 -0500 Subject: [PATCH] permit NNlibCUDA to use Float16 --- src/batched/batchedmul.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/batched/batchedmul.jl b/src/batched/batchedmul.jl index 490584882..5f84ac66e 100644 --- a/src/batched/batchedmul.jl +++ b/src/batched/batchedmul.jl @@ -220,7 +220,7 @@ _batched_mul!(::Type, C, A, B, α::Number, β::Number) = batched_mul_generic!(C, _batched_mul!(::Type{DT}, C, A, B, α::Number, β::Number) where {DT<:DenseArray{T}} where {T<:BlasFloat} = _batched_try_gemm!(DT, C, A, B, α, β) -function _batched_try_gemm!(::Type{DT}, C, A, B, α::Number, β::Number) where {DT<:DenseArray{T}} where {T<:BlasFloat} +function _batched_try_gemm!(::Type{DT}, C, A, B, α::Number, β::Number) where {DT<:DenseArray{T}} where {T} alpha, beta = promote(α, β, zero(T)) alpha isa T && beta isa T || return batched_mul_generic!(C, A, B, α, β)