source
stringlengths 3
92
| original_c
stringlengths 26
2.25M
| no_omp_formatted
stringlengths 0
2.25M
| omp_formatted
stringlengths 0
2.25M
|
---|---|---|---|
elemwise_binary_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file elemwise_binary_op.h
* \brief Function definition of elementwise binary operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#include <mxnet/operator_util.h>
#include <mxnet/op_attr_types.h>
#include <vector>
#include <string>
#include <utility>
#include <typeinfo>
#include <algorithm>
#include "../mxnet_op.h"
#include "../mshadow_op.h"
#include "../../engine/openmp.h"
#include "elemwise_unary_op.h"
#include "../../common/utils.h"
#include "./init_op.h"
namespace mxnet {
namespace op {
/*! Gather binary operator functions into ElemwiseBinaryOp class */
class ElemwiseBinaryOp : public OpBase {
public:
/*! \brief For sparse, assume missing rvalue is 0 */
template<typename OP, int Req>
struct MissingRValueOp {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0)));
}
};
/*! \brief For sparse, assume missing lvalue is 0 */
template<typename OP, int Req>
struct MissingLValueOp {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i]));
}
};
private:
/*!
* \brief CSR operation requires temp space
*/
enum ResourceRequestType {
kTempSpace
};
/*!
* \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input
* CPU-Only version
*/
template<typename DType, typename OP, typename xpu>
static inline size_t FillDense(mshadow::Stream<xpu> *s,
const size_t idx_l,
const size_t idx_r,
const OpReqType req,
mshadow::Tensor<xpu, 2, DType> *out,
const size_t iter_out) {
const int index_out_min = static_cast<int>(std::min(idx_l, idx_r));
if (static_cast<size_t>(index_out_min) > iter_out) {
const DType zero_input_val = OP::Map(DType(0), DType(0));
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) {
Fill<false>(s, (*out)[i], req, zero_input_val);
}
}
return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int'
}
static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) {
return a1.var() == a2.var();
}
/*! \brief Minimum of three */
static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) {
return a < b ? (a < c ? a : c) : (b < c ? b : c);
}
template<typename xpu, typename LOP, typename ROP, typename DType>
static void BackwardUseNone_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
Stream<xpu> *s = ctx.get_stream<xpu>();
const int size = static_cast<int>((outputs[0].Size() + DataType<DType>::kLanes - 1)
/ DataType<DType>::kLanes);
const DType *ograd_dptr = inputs[0].dptr<DType>();
if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>());
} else if (req[0] != kNullOp) {
DType *lgrad_dptr = outputs[0].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
Kernel<mxnet_op::op_with_req<LOP, Req>, xpu>::Launch(s, size, lgrad_dptr, ograd_dptr);
});
}
if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>());
} else if (req[1] != kNullOp) {
DType *rgrad_dptr = outputs[1].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
Kernel<mxnet_op::op_with_req<ROP, Req>, xpu>::Launch(s, size, rgrad_dptr, ograd_dptr);
});
}
}
template<typename xpu, typename LOP, typename ROP, typename DType>
static void BackwardUseIn_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(outputs.size(), 2U);
DCHECK_EQ(inputs.size(), 3U);
mxnet_op::Stream<xpu> *s = ctx.get_stream<xpu>();
const DType *ograd_dptr = inputs[0].dptr<DType>();
const DType *lhs_dptr = inputs[1].dptr<DType>();
const DType *rhs_dptr = inputs[2].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
const int size = static_cast<int>(
(outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1)
/ mxnet_op::DataType<DType>::kLanes);
DType * lgrad_dptr = outputs[0].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad<LOP>, Req>, xpu>::Launch(
s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
const int size = static_cast<int>(
(outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1)
/ mxnet_op::DataType<DType>::kLanes);
DType * rgrad_dptr = outputs[1].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad<ROP>, Req>, xpu>::Launch(
s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
}
template<
typename xpu,
typename LOP,
typename ROP,
typename DType,
bool in0_ok_dense = false,
bool in1_ok_dense = false,
bool in2_ok_dense = false,
typename BackupCompute>
static inline void BackwardUseInEx_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs,
BackupCompute backup_compute) {
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
// lhs grad
if (req[0] != kNullOp) {
// RspRspOp can handle dense outputs so long as OP(0, 0) == 0
MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, LOP>(
s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0],
false, false, false, false);
});
// lhs in-place
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, mshadow::op::mul>(
s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0],
false, false, true, false);
});
}
// rhs grad
if (req[1] != kNullOp) {
MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, ROP>(
s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1],
false, false, false, false);
});
// rhs in-place
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, mshadow::op::mul>(
s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1],
false, false, true, false);
});
}
}
protected:
/*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */
template<typename DType, typename IType, typename OP>
static void RspRspOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
bool lhs_may_be_dense,
bool rhs_may_be_dense,
bool allow_inplace,
bool scatter);
/*! \brief CSR -op- CSR binary operator for non-canonical NDArray */
template<typename DType, typename IType, typename CType, typename OP>
static inline void CsrCsrOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output);
public:
/*!
* \brief Rsp-op-Rsp operation which produces a dense result
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs);
/*!
* \brief Allow one of the inputs to be dense and still produce a sparse output
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
template<bool lhs_dense_ok = true, bool rhs_dense_ok = true>
static bool AllowLRDenseInputWithSparseOutputStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name;
CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name;
const auto& lhs_stype = in_attrs->at(0);
const auto& rhs_stype = in_attrs->at(1);
auto& out_stype = out_attrs->at(0);
bool dispatched = false;
const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask;
const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback :
DispatchMode::kFComputeEx;
if (!dispatched && lhs_stype == kDefaultStorage && rhs_stype == kDefaultStorage) {
// dns, dns -> dns
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
if (!dispatched) {
if ((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) ||
(rhs_dense_ok && lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_dense_ok && lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) {
// rsp, rsp -> rsp
// rsp, dns -> rsp
// dns, rsp -> rsp
dispatched = storage_type_assign(&out_stype, kRowSparseStorage,
dispatch_mode, dispatch_ex);
} else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) {
// csr, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, dispatch_ex);
} else if ((lhs_stype == kCSRStorage && rhs_dense_ok) ||
(rhs_stype == kCSRStorage && lhs_dense_ok)) {
// csr, dns -> csr
// dns, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, DispatchMode::kFComputeFallback);
}
}
if (!dispatched) {
dispatch_fallback(out_attrs, dispatch_mode);
}
if (*dispatch_mode == DispatchMode::kFComputeFallback) {
LogStorageFallback(attrs, dev_mask, in_attrs, out_attrs);
}
return true;
}
/*!
* \brief Backward pass computing input gradient using forward inputs
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs);
template<typename xpu, typename OP>
static void Compute(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] != kNullOp) {
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
});
});
}
}
template<typename xpu, typename OP>
static void ComputeWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] != kNullOp) {
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
});
});
}
}
template<typename xpu, typename OP>
static void ComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto out_stype = outputs[0].storage_type();
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
if ((common::ContainsOnlyStorage(inputs, kRowSparseStorage))
&& (out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
const int rsp_input_idx = lhs_stype == kRowSparseStorage ? 0 : 1;
MSHADOW_IDX_TYPE_SWITCH(inputs[rsp_input_idx].aux_type(rowsparse::kIdx), IType, {
MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, {
RspRspOp<DType, IType, OP>(
s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false);
});
});
} else if (common::ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) {
// csr, csr -> csr
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIdx), IType, {
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIndPtr), CType, {
MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, {
CsrCsrOp<DType, IType, CType, OP>(
s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]);
});
});
});
} else {
LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
}
}
/*! \brief ComputeEx allowing dense lvalue and/or rvalue */
template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense>
static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto rhs_stype = inputs[1].storage_type();
const auto out_stype = outputs[0].storage_type();
if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) &&
((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) ||
(lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) &&
lhs_may_be_dense && rhs_may_be_dense) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
// rsp, dns -> rsp
// dns, rsp -> rsp
// More than once dense not allowed (this will be checked in RspRspOp):
// rsp, dns -> dns <-- NOT ALLOWED
// dns, rsp -> dns <-- NOT ALLOWED
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, {
MSHADOW_IDX_TYPE_SWITCH(outputs[0].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, OP>(
s, attrs, ctx, inputs[0], inputs[1],
req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false);
});
});
} else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) {
ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs);
} else {
LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
CHECK_EQ(inputs.size(), 1U); // output grad
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto in_stype = inputs[0].storage_type();
const auto lhs_stype = outputs[0].storage_type();
const auto rhs_stype = outputs[1].storage_type();
// lhs grad
if (req[0] != kNullOp) {
if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> rsp, _. op requires 0-input returns 0-output
DCHECK_LT(fabs(static_cast<float>(LOP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]});
} else {
LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
}
}
// rhs grad
if (req[1] != kNullOp) {
if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> _, rsp. op requires 0-input returns 0-output
DCHECK_LT(fabs(static_cast<float>(ROP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]});
} else {
LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
}
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseInWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<
typename xpu, typename LOP, typename ROP,
bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false>
static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace common;
CHECK_EQ(inputs.size(), 3U);
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto lhs_grad_stype = outputs[0].storage_type();
const auto rhs_grad_stype = outputs[1].storage_type();
if (ContainsOnlyStorage(inputs, kRowSparseStorage) &&
(lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) &&
(rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) {
// rsp, rsp, rsp -> [dns, rsp], [dns, rsp]
MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, {
BackwardUseInEx_<xpu, LOP, ROP, DType, in0_ok_dense, in1_ok_dense, in2_ok_dense>(
attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>);
});
}
}
}; // class ElemwiseBinaryOp
/*! \brief Binary launch */
#define MXNET_OPERATOR_REGISTER_BINARY(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(2) \
.set_num_outputs(1) \
.set_attr<nnvm::FListInputNames>("FListInputNames", \
[](const NodeAttrs& attrs) { \
return std::vector<std::string>{"lhs", "rhs"}; \
}) \
.set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \
}) \
.add_argument("lhs", "NDArray-or-Symbol", "first input") \
.add_argument("rhs", "NDArray-or-Symbol", "second input")
/*! \brief Binary launch, with FComputeEx for csr and rsp available */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseStorageType<2, 1, true, true, true>) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
/*! \brief Binary launch, dense result
* FInferStorageType attr is not set using this macro.
* By default DefaultStorageType is used.
*/
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::SparseSparseWithDenseResult) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>)
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
|
/*!
* \file elemwise_binary_op.h
* \brief Function definition of elementwise binary operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#include <mxnet/operator_util.h>
#include <mxnet/op_attr_types.h>
#include <vector>
#include <string>
#include <utility>
#include <typeinfo>
#include <algorithm>
#include "../mxnet_op.h"
#include "../mshadow_op.h"
#include "../../engine/openmp.h"
#include "elemwise_unary_op.h"
#include "../../common/utils.h"
#include "./init_op.h"
namespace mxnet {
namespace op {
/*! Gather binary operator functions into ElemwiseBinaryOp class */
class ElemwiseBinaryOp : public OpBase {
public:
/*! \brief For sparse, assume missing rvalue is 0 */
template<typename OP, int Req>
struct MissingRValueOp {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0)));
}
};
/*! \brief For sparse, assume missing lvalue is 0 */
template<typename OP, int Req>
struct MissingLValueOp {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i]));
}
};
private:
/*!
* \brief CSR operation requires temp space
*/
enum ResourceRequestType {
kTempSpace
};
/*!
* \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input
* CPU-Only version
*/
template<typename DType, typename OP, typename xpu>
static inline size_t FillDense(mshadow::Stream<xpu> *s,
const size_t idx_l,
const size_t idx_r,
const OpReqType req,
mshadow::Tensor<xpu, 2, DType> *out,
const size_t iter_out) {
const int index_out_min = static_cast<int>(std::min(idx_l, idx_r));
if (static_cast<size_t>(index_out_min) > iter_out) {
const DType zero_input_val = OP::Map(DType(0), DType(0));
for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) {
Fill<false>(s, (*out)[i], req, zero_input_val);
}
}
return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int'
}
static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) {
return a1.var() == a2.var();
}
/*! \brief Minimum of three */
static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) {
return a < b ? (a < c ? a : c) : (b < c ? b : c);
}
template<typename xpu, typename LOP, typename ROP, typename DType>
static void BackwardUseNone_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
Stream<xpu> *s = ctx.get_stream<xpu>();
const int size = static_cast<int>((outputs[0].Size() + DataType<DType>::kLanes - 1)
/ DataType<DType>::kLanes);
const DType *ograd_dptr = inputs[0].dptr<DType>();
if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>());
} else if (req[0] != kNullOp) {
DType *lgrad_dptr = outputs[0].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
Kernel<mxnet_op::op_with_req<LOP, Req>, xpu>::Launch(s, size, lgrad_dptr, ograd_dptr);
});
}
if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>());
} else if (req[1] != kNullOp) {
DType *rgrad_dptr = outputs[1].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
Kernel<mxnet_op::op_with_req<ROP, Req>, xpu>::Launch(s, size, rgrad_dptr, ograd_dptr);
});
}
}
template<typename xpu, typename LOP, typename ROP, typename DType>
static void BackwardUseIn_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(outputs.size(), 2U);
DCHECK_EQ(inputs.size(), 3U);
mxnet_op::Stream<xpu> *s = ctx.get_stream<xpu>();
const DType *ograd_dptr = inputs[0].dptr<DType>();
const DType *lhs_dptr = inputs[1].dptr<DType>();
const DType *rhs_dptr = inputs[2].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
const int size = static_cast<int>(
(outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1)
/ mxnet_op::DataType<DType>::kLanes);
DType * lgrad_dptr = outputs[0].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad<LOP>, Req>, xpu>::Launch(
s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
const int size = static_cast<int>(
(outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1)
/ mxnet_op::DataType<DType>::kLanes);
DType * rgrad_dptr = outputs[1].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad<ROP>, Req>, xpu>::Launch(
s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
}
template<
typename xpu,
typename LOP,
typename ROP,
typename DType,
bool in0_ok_dense = false,
bool in1_ok_dense = false,
bool in2_ok_dense = false,
typename BackupCompute>
static inline void BackwardUseInEx_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs,
BackupCompute backup_compute) {
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
// lhs grad
if (req[0] != kNullOp) {
// RspRspOp can handle dense outputs so long as OP(0, 0) == 0
MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, LOP>(
s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0],
false, false, false, false);
});
// lhs in-place
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, mshadow::op::mul>(
s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0],
false, false, true, false);
});
}
// rhs grad
if (req[1] != kNullOp) {
MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, ROP>(
s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1],
false, false, false, false);
});
// rhs in-place
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, mshadow::op::mul>(
s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1],
false, false, true, false);
});
}
}
protected:
/*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */
template<typename DType, typename IType, typename OP>
static void RspRspOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
bool lhs_may_be_dense,
bool rhs_may_be_dense,
bool allow_inplace,
bool scatter);
/*! \brief CSR -op- CSR binary operator for non-canonical NDArray */
template<typename DType, typename IType, typename CType, typename OP>
static inline void CsrCsrOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output);
public:
/*!
* \brief Rsp-op-Rsp operation which produces a dense result
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs);
/*!
* \brief Allow one of the inputs to be dense and still produce a sparse output
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
template<bool lhs_dense_ok = true, bool rhs_dense_ok = true>
static bool AllowLRDenseInputWithSparseOutputStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name;
CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name;
const auto& lhs_stype = in_attrs->at(0);
const auto& rhs_stype = in_attrs->at(1);
auto& out_stype = out_attrs->at(0);
bool dispatched = false;
const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask;
const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback :
DispatchMode::kFComputeEx;
if (!dispatched && lhs_stype == kDefaultStorage && rhs_stype == kDefaultStorage) {
// dns, dns -> dns
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
if (!dispatched) {
if ((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) ||
(rhs_dense_ok && lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_dense_ok && lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) {
// rsp, rsp -> rsp
// rsp, dns -> rsp
// dns, rsp -> rsp
dispatched = storage_type_assign(&out_stype, kRowSparseStorage,
dispatch_mode, dispatch_ex);
} else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) {
// csr, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, dispatch_ex);
} else if ((lhs_stype == kCSRStorage && rhs_dense_ok) ||
(rhs_stype == kCSRStorage && lhs_dense_ok)) {
// csr, dns -> csr
// dns, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, DispatchMode::kFComputeFallback);
}
}
if (!dispatched) {
dispatch_fallback(out_attrs, dispatch_mode);
}
if (*dispatch_mode == DispatchMode::kFComputeFallback) {
LogStorageFallback(attrs, dev_mask, in_attrs, out_attrs);
}
return true;
}
/*!
* \brief Backward pass computing input gradient using forward inputs
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs);
template<typename xpu, typename OP>
static void Compute(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] != kNullOp) {
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
});
});
}
}
template<typename xpu, typename OP>
static void ComputeWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] != kNullOp) {
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
});
});
}
}
template<typename xpu, typename OP>
static void ComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto out_stype = outputs[0].storage_type();
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
if ((common::ContainsOnlyStorage(inputs, kRowSparseStorage))
&& (out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
const int rsp_input_idx = lhs_stype == kRowSparseStorage ? 0 : 1;
MSHADOW_IDX_TYPE_SWITCH(inputs[rsp_input_idx].aux_type(rowsparse::kIdx), IType, {
MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, {
RspRspOp<DType, IType, OP>(
s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false);
});
});
} else if (common::ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) {
// csr, csr -> csr
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIdx), IType, {
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIndPtr), CType, {
MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, {
CsrCsrOp<DType, IType, CType, OP>(
s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]);
});
});
});
} else {
LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
}
}
/*! \brief ComputeEx allowing dense lvalue and/or rvalue */
template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense>
static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto rhs_stype = inputs[1].storage_type();
const auto out_stype = outputs[0].storage_type();
if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) &&
((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) ||
(lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) &&
lhs_may_be_dense && rhs_may_be_dense) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
// rsp, dns -> rsp
// dns, rsp -> rsp
// More than once dense not allowed (this will be checked in RspRspOp):
// rsp, dns -> dns <-- NOT ALLOWED
// dns, rsp -> dns <-- NOT ALLOWED
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, {
MSHADOW_IDX_TYPE_SWITCH(outputs[0].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, OP>(
s, attrs, ctx, inputs[0], inputs[1],
req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false);
});
});
} else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) {
ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs);
} else {
LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
CHECK_EQ(inputs.size(), 1U); // output grad
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto in_stype = inputs[0].storage_type();
const auto lhs_stype = outputs[0].storage_type();
const auto rhs_stype = outputs[1].storage_type();
// lhs grad
if (req[0] != kNullOp) {
if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> rsp, _. op requires 0-input returns 0-output
DCHECK_LT(fabs(static_cast<float>(LOP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]});
} else {
LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
}
}
// rhs grad
if (req[1] != kNullOp) {
if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> _, rsp. op requires 0-input returns 0-output
DCHECK_LT(fabs(static_cast<float>(ROP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]});
} else {
LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
}
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseInWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<
typename xpu, typename LOP, typename ROP,
bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false>
static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace common;
CHECK_EQ(inputs.size(), 3U);
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto lhs_grad_stype = outputs[0].storage_type();
const auto rhs_grad_stype = outputs[1].storage_type();
if (ContainsOnlyStorage(inputs, kRowSparseStorage) &&
(lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) &&
(rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) {
// rsp, rsp, rsp -> [dns, rsp], [dns, rsp]
MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, {
BackwardUseInEx_<xpu, LOP, ROP, DType, in0_ok_dense, in1_ok_dense, in2_ok_dense>(
attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>);
});
}
}
}; // class ElemwiseBinaryOp
/*! \brief Binary launch */
#define MXNET_OPERATOR_REGISTER_BINARY(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(2) \
.set_num_outputs(1) \
.set_attr<nnvm::FListInputNames>("FListInputNames", \
[](const NodeAttrs& attrs) { \
return std::vector<std::string>{"lhs", "rhs"}; \
}) \
.set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \
}) \
.add_argument("lhs", "NDArray-or-Symbol", "first input") \
.add_argument("rhs", "NDArray-or-Symbol", "second input")
/*! \brief Binary launch, with FComputeEx for csr and rsp available */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseStorageType<2, 1, true, true, true>) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
/*! \brief Binary launch, dense result
* FInferStorageType attr is not set using this macro.
* By default DefaultStorageType is used.
*/
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::SparseSparseWithDenseResult) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>)
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
|
/*!
* \file elemwise_binary_op.h
* \brief Function definition of elementwise binary operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#include <mxnet/operator_util.h>
#include <mxnet/op_attr_types.h>
#include <vector>
#include <string>
#include <utility>
#include <typeinfo>
#include <algorithm>
#include "../mxnet_op.h"
#include "../mshadow_op.h"
#include "../../engine/openmp.h"
#include "elemwise_unary_op.h"
#include "../../common/utils.h"
#include "./init_op.h"
namespace mxnet {
namespace op {
/*! Gather binary operator functions into ElemwiseBinaryOp class */
class ElemwiseBinaryOp : public OpBase {
public:
/*! \brief For sparse, assume missing rvalue is 0 */
template<typename OP, int Req>
struct MissingRValueOp {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0)));
}
};
/*! \brief For sparse, assume missing lvalue is 0 */
template<typename OP, int Req>
struct MissingLValueOp {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i]));
}
};
private:
/*!
* \brief CSR operation requires temp space
*/
enum ResourceRequestType {
kTempSpace
};
/*!
* \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input
* CPU-Only version
*/
template<typename DType, typename OP, typename xpu>
static inline size_t FillDense(mshadow::Stream<xpu> *s,
const size_t idx_l,
const size_t idx_r,
const OpReqType req,
mshadow::Tensor<xpu, 2, DType> *out,
const size_t iter_out) {
const int index_out_min = static_cast<int>(std::min(idx_l, idx_r));
if (static_cast<size_t>(index_out_min) > iter_out) {
const DType zero_input_val = OP::Map(DType(0), DType(0));
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) {
Fill<false>(s, (*out)[i], req, zero_input_val);
}
}
return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int'
}
static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) {
return a1.var() == a2.var();
}
/*! \brief Minimum of three */
static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) {
return a < b ? (a < c ? a : c) : (b < c ? b : c);
}
template<typename xpu, typename LOP, typename ROP, typename DType>
static void BackwardUseNone_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
Stream<xpu> *s = ctx.get_stream<xpu>();
const int size = static_cast<int>((outputs[0].Size() + DataType<DType>::kLanes - 1)
/ DataType<DType>::kLanes);
const DType *ograd_dptr = inputs[0].dptr<DType>();
if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>());
} else if (req[0] != kNullOp) {
DType *lgrad_dptr = outputs[0].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
Kernel<mxnet_op::op_with_req<LOP, Req>, xpu>::Launch(s, size, lgrad_dptr, ograd_dptr);
});
}
if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>());
} else if (req[1] != kNullOp) {
DType *rgrad_dptr = outputs[1].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
Kernel<mxnet_op::op_with_req<ROP, Req>, xpu>::Launch(s, size, rgrad_dptr, ograd_dptr);
});
}
}
template<typename xpu, typename LOP, typename ROP, typename DType>
static void BackwardUseIn_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(outputs.size(), 2U);
DCHECK_EQ(inputs.size(), 3U);
mxnet_op::Stream<xpu> *s = ctx.get_stream<xpu>();
const DType *ograd_dptr = inputs[0].dptr<DType>();
const DType *lhs_dptr = inputs[1].dptr<DType>();
const DType *rhs_dptr = inputs[2].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
const int size = static_cast<int>(
(outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1)
/ mxnet_op::DataType<DType>::kLanes);
DType * lgrad_dptr = outputs[0].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad<LOP>, Req>, xpu>::Launch(
s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
const int size = static_cast<int>(
(outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1)
/ mxnet_op::DataType<DType>::kLanes);
DType * rgrad_dptr = outputs[1].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad<ROP>, Req>, xpu>::Launch(
s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
}
template<
typename xpu,
typename LOP,
typename ROP,
typename DType,
bool in0_ok_dense = false,
bool in1_ok_dense = false,
bool in2_ok_dense = false,
typename BackupCompute>
static inline void BackwardUseInEx_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs,
BackupCompute backup_compute) {
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
// lhs grad
if (req[0] != kNullOp) {
// RspRspOp can handle dense outputs so long as OP(0, 0) == 0
MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, LOP>(
s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0],
false, false, false, false);
});
// lhs in-place
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, mshadow::op::mul>(
s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0],
false, false, true, false);
});
}
// rhs grad
if (req[1] != kNullOp) {
MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, ROP>(
s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1],
false, false, false, false);
});
// rhs in-place
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, mshadow::op::mul>(
s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1],
false, false, true, false);
});
}
}
protected:
/*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */
template<typename DType, typename IType, typename OP>
static void RspRspOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
bool lhs_may_be_dense,
bool rhs_may_be_dense,
bool allow_inplace,
bool scatter);
/*! \brief CSR -op- CSR binary operator for non-canonical NDArray */
template<typename DType, typename IType, typename CType, typename OP>
static inline void CsrCsrOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output);
public:
/*!
* \brief Rsp-op-Rsp operation which produces a dense result
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs);
/*!
* \brief Allow one of the inputs to be dense and still produce a sparse output
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
template<bool lhs_dense_ok = true, bool rhs_dense_ok = true>
static bool AllowLRDenseInputWithSparseOutputStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name;
CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name;
const auto& lhs_stype = in_attrs->at(0);
const auto& rhs_stype = in_attrs->at(1);
auto& out_stype = out_attrs->at(0);
bool dispatched = false;
const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask;
const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback :
DispatchMode::kFComputeEx;
if (!dispatched && lhs_stype == kDefaultStorage && rhs_stype == kDefaultStorage) {
// dns, dns -> dns
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
if (!dispatched) {
if ((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) ||
(rhs_dense_ok && lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_dense_ok && lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) {
// rsp, rsp -> rsp
// rsp, dns -> rsp
// dns, rsp -> rsp
dispatched = storage_type_assign(&out_stype, kRowSparseStorage,
dispatch_mode, dispatch_ex);
} else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) {
// csr, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, dispatch_ex);
} else if ((lhs_stype == kCSRStorage && rhs_dense_ok) ||
(rhs_stype == kCSRStorage && lhs_dense_ok)) {
// csr, dns -> csr
// dns, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, DispatchMode::kFComputeFallback);
}
}
if (!dispatched) {
dispatch_fallback(out_attrs, dispatch_mode);
}
if (*dispatch_mode == DispatchMode::kFComputeFallback) {
LogStorageFallback(attrs, dev_mask, in_attrs, out_attrs);
}
return true;
}
/*!
* \brief Backward pass computing input gradient using forward inputs
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs);
template<typename xpu, typename OP>
static void Compute(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] != kNullOp) {
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
});
});
}
}
template<typename xpu, typename OP>
static void ComputeWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] != kNullOp) {
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
});
});
}
}
template<typename xpu, typename OP>
static void ComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto out_stype = outputs[0].storage_type();
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
if ((common::ContainsOnlyStorage(inputs, kRowSparseStorage))
&& (out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
const int rsp_input_idx = lhs_stype == kRowSparseStorage ? 0 : 1;
MSHADOW_IDX_TYPE_SWITCH(inputs[rsp_input_idx].aux_type(rowsparse::kIdx), IType, {
MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, {
RspRspOp<DType, IType, OP>(
s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false);
});
});
} else if (common::ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) {
// csr, csr -> csr
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIdx), IType, {
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIndPtr), CType, {
MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, {
CsrCsrOp<DType, IType, CType, OP>(
s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]);
});
});
});
} else {
LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
}
}
/*! \brief ComputeEx allowing dense lvalue and/or rvalue */
template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense>
static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto rhs_stype = inputs[1].storage_type();
const auto out_stype = outputs[0].storage_type();
if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) &&
((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) ||
(lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) &&
lhs_may_be_dense && rhs_may_be_dense) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
// rsp, dns -> rsp
// dns, rsp -> rsp
// More than once dense not allowed (this will be checked in RspRspOp):
// rsp, dns -> dns <-- NOT ALLOWED
// dns, rsp -> dns <-- NOT ALLOWED
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, {
MSHADOW_IDX_TYPE_SWITCH(outputs[0].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, OP>(
s, attrs, ctx, inputs[0], inputs[1],
req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false);
});
});
} else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) {
ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs);
} else {
LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
CHECK_EQ(inputs.size(), 1U); // output grad
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto in_stype = inputs[0].storage_type();
const auto lhs_stype = outputs[0].storage_type();
const auto rhs_stype = outputs[1].storage_type();
// lhs grad
if (req[0] != kNullOp) {
if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> rsp, _. op requires 0-input returns 0-output
DCHECK_LT(fabs(static_cast<float>(LOP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]});
} else {
LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
}
}
// rhs grad
if (req[1] != kNullOp) {
if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> _, rsp. op requires 0-input returns 0-output
DCHECK_LT(fabs(static_cast<float>(ROP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]});
} else {
LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
}
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseInWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<
typename xpu, typename LOP, typename ROP,
bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false>
static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace common;
CHECK_EQ(inputs.size(), 3U);
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto lhs_grad_stype = outputs[0].storage_type();
const auto rhs_grad_stype = outputs[1].storage_type();
if (ContainsOnlyStorage(inputs, kRowSparseStorage) &&
(lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) &&
(rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) {
// rsp, rsp, rsp -> [dns, rsp], [dns, rsp]
MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, {
BackwardUseInEx_<xpu, LOP, ROP, DType, in0_ok_dense, in1_ok_dense, in2_ok_dense>(
attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>);
});
}
}
}; // class ElemwiseBinaryOp
/*! \brief Binary launch */
#define MXNET_OPERATOR_REGISTER_BINARY(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(2) \
.set_num_outputs(1) \
.set_attr<nnvm::FListInputNames>("FListInputNames", \
[](const NodeAttrs& attrs) { \
return std::vector<std::string>{"lhs", "rhs"}; \
}) \
.set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \
}) \
.add_argument("lhs", "NDArray-or-Symbol", "first input") \
.add_argument("rhs", "NDArray-or-Symbol", "second input")
/*! \brief Binary launch, with FComputeEx for csr and rsp available */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseStorageType<2, 1, true, true, true>) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
/*! \brief Binary launch, dense result
* FInferStorageType attr is not set using this macro.
* By default DefaultStorageType is used.
*/
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::SparseSparseWithDenseResult) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>)
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
|
pf_fold.c | /*
* partiton function for single RNA secondary structures
*
* Simplified interfaces and backward compatibility
* wrappers
*
* Ivo L Hofacker + Ronny Lorenz
* Vienna RNA package
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
/*###########################################*/
/*# deprecated functions below #*/
/*###########################################*/
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h> /* #defines FLT_MAX ... */
#include <limits.h>
#include "ViennaRNA/utils/basic.h"
#include "ViennaRNA/params/default.h"
#include "ViennaRNA/fold_vars.h"
#include "ViennaRNA/loops/all.h"
#include "ViennaRNA/gquad.h"
#include "ViennaRNA/constraints/hard.h"
#include "ViennaRNA/constraints/soft.h"
#include "ViennaRNA/mfe.h"
#include "ViennaRNA/part_func.h"
#ifdef _OPENMP
#include <omp.h>
#endif
/*
#################################
# GLOBAL VARIABLES #
#################################
*/
PUBLIC int st_back = 0;
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
/* some backward compatibility stuff */
PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL;
PRIVATE int backward_compat = 0;
#ifdef _OPENMP
#pragma omp threadprivate(backward_compat_compound, backward_compat)
#endif
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
PRIVATE float
wrap_pf_fold(const char *sequence,
char *structure,
vrna_exp_param_t *parameters,
int calculate_bppm,
int is_constrained,
int is_circular);
PRIVATE double
wrap_mean_bp_distance(FLT_OR_DBL *p,
int length,
int *index,
int turn);
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
PRIVATE double
wrap_mean_bp_distance(FLT_OR_DBL *p,
int length,
int *index,
int turn)
{
int i, j;
double d = 0.;
/* compute the mean base pair distance in the thermodynamic ensemble */
/* <d> = \sum_{a,b} p_a p_b d(S_a,S_b)
* this can be computed from the pair probs p_ij as
* <d> = \sum_{ij} p_{ij}(1-p_{ij}) */
for (i = 1; i <= length; i++)
for (j = i + turn + 1; j <= length; j++)
d += p[index[i] - j] * (1 - p[index[i] - j]);
return 2 * d;
}
PRIVATE float
wrap_pf_fold(const char *sequence,
char *structure,
vrna_exp_param_t *parameters,
int calculate_bppm,
int is_constrained,
int is_circular)
{
vrna_fold_compound_t *vc;
vrna_md_t md;
vc = NULL;
/* we need vrna_exp_param_t datastructure to correctly init default hard constraints */
if (parameters)
md = parameters->model_details;
else
set_model_details(&md); /* get global default parameters */
md.circ = is_circular;
md.compute_bpp = calculate_bppm;
vc = vrna_fold_compound(sequence, &md, VRNA_OPTION_DEFAULT);
/* prepare exp_params and set global pf_scale */
vc->exp_params = vrna_exp_params(&(vc->params->model_details));
vc->exp_params->pf_scale = pf_scale;
if (is_constrained && structure) {
unsigned int constraint_options = 0;
constraint_options |= VRNA_CONSTRAINT_DB
| VRNA_CONSTRAINT_DB_PIPE
| VRNA_CONSTRAINT_DB_DOT
| VRNA_CONSTRAINT_DB_X
| VRNA_CONSTRAINT_DB_ANG_BRACK
| VRNA_CONSTRAINT_DB_RND_BRACK;
vrna_constraints_add(vc, (const char *)structure, constraint_options);
}
if (backward_compat_compound && backward_compat)
vrna_fold_compound_free(backward_compat_compound);
backward_compat_compound = vc;
backward_compat = 1;
iindx = backward_compat_compound->iindx;
return vrna_pf(vc, structure);
}
PUBLIC vrna_ep_t *
stackProb(double cutoff)
{
if (!(backward_compat_compound && backward_compat)) {
vrna_message_warning("stackProb: "
"run pf_fold() first!");
return NULL;
} else if (!backward_compat_compound->exp_matrices->probs) {
vrna_message_warning("stackProb: "
"probs == NULL!");
return NULL;
}
return vrna_stack_prob(backward_compat_compound, cutoff);
}
PUBLIC char *
centroid(int length,
double *dist)
{
if (pr == NULL) {
vrna_message_warning("centroid: "
"pr == NULL. You need to call pf_fold() before centroid()");
return NULL;
}
return vrna_centroid_from_probs(length, dist, pr);
}
PUBLIC double
mean_bp_dist(int length)
{
/* compute the mean base pair distance in the thermodynamic ensemble */
/* <d> = \sum_{a,b} p_a p_b d(S_a,S_b)
* this can be computed from the pair probs p_ij as
* <d> = \sum_{ij} p_{ij}(1-p_{ij}) */
int i, j, *my_iindx;
double d = 0;
if (pr == NULL) {
vrna_message_warning("mean_bp_dist: "
"pr == NULL. You need to call pf_fold() before mean_bp_dist()");
return d;
}
my_iindx = vrna_idx_row_wise(length);
for (i = 1; i <= length; i++)
for (j = i + TURN + 1; j <= length; j++)
d += pr[my_iindx[i] - j] * (1 - pr[my_iindx[i] - j]);
free(my_iindx);
return 2 * d;
}
/* get the free energy of a subsequence from the q[] array */
PUBLIC double
get_subseq_F(int i,
int j)
{
if (backward_compat_compound)
if (backward_compat_compound->exp_matrices)
if (backward_compat_compound->exp_matrices->q) {
int *my_iindx = backward_compat_compound->iindx;
vrna_exp_param_t *pf_params = backward_compat_compound->exp_params;
FLT_OR_DBL *q = backward_compat_compound->exp_matrices->q;
return (-log(q[my_iindx[i] - j]) - (j - i + 1) * log(pf_params->pf_scale)) * pf_params->kT /
1000.0;
}
vrna_message_warning("get_subseq_F: "
"call pf_fold() to fill q[] array before calling get_subseq_F()");
return 0.; /* we will never get to this point */
}
/*----------------------------------------------------------------------*/
PUBLIC double
expHairpinEnergy(int u,
int type,
short si1,
short sj1,
const char *string)
{
/* compute Boltzmann weight of a hairpin loop, multiply by scale[u+2] */
vrna_exp_param_t *pf_params = backward_compat_compound->exp_params;
double q, kT;
kT = pf_params->kT; /* kT in cal/mol */
if (u <= 30)
q = pf_params->exphairpin[u];
else
q = pf_params->exphairpin[30] * exp(-(pf_params->lxc * log(u / 30.)) * 10. / kT);
if ((tetra_loop) && (u == 4)) {
char tl[7] = {
0
}, *ts;
strncpy(tl, string, 6);
if ((ts = strstr(pf_params->Tetraloops, tl)))
return pf_params->exptetra[(ts - pf_params->Tetraloops) / 7];
}
if ((tetra_loop) && (u == 6)) {
char tl[9] = {
0
}, *ts;
strncpy(tl, string, 6);
if ((ts = strstr(pf_params->Hexaloops, tl)))
return pf_params->exphex[(ts - pf_params->Hexaloops) / 9];
}
if (u == 3) {
char tl[6] = {
0
}, *ts;
strncpy(tl, string, 5);
if ((ts = strstr(pf_params->Triloops, tl)))
return pf_params->exptri[(ts - pf_params->Triloops) / 6];
if (type > 2)
q *= pf_params->expTermAU;
} else {
/* no mismatches for tri-loops */
q *= pf_params->expmismatchH[type][si1][sj1];
}
return q;
}
PUBLIC double
expLoopEnergy(int u1,
int u2,
int type,
int type2,
short si1,
short sj1,
short sp1,
short sq1)
{
/* compute Boltzmann weight of interior loop,
* multiply by scale[u1+u2+2] for scaling */
double z = 0;
int no_close = 0;
vrna_exp_param_t *pf_params = backward_compat_compound->exp_params;
if ((no_closingGU) && ((type2 == 3) || (type2 == 4) || (type == 2) || (type == 4)))
no_close = 1;
if ((u1 == 0) && (u2 == 0)) {
/* stack */
z = pf_params->expstack[type][type2];
} else if (no_close == 0) {
if ((u1 == 0) || (u2 == 0)) {
/* bulge */
int u;
u = (u1 == 0) ? u2 : u1;
z = pf_params->expbulge[u];
if (u2 + u1 == 1) {
z *= pf_params->expstack[type][type2];
} else {
if (type > 2)
z *= pf_params->expTermAU;
if (type2 > 2)
z *= pf_params->expTermAU;
}
} else {
/* interior loop */
if (u1 + u2 == 2) {
/* size 2 is special */
z = pf_params->expint11[type][type2][si1][sj1];
} else if ((u1 == 1) && (u2 == 2)) {
z = pf_params->expint21[type][type2][si1][sq1][sj1];
} else if ((u1 == 2) && (u2 == 1)) {
z = pf_params->expint21[type2][type][sq1][si1][sp1];
} else if ((u1 == 2) && (u2 == 2)) {
z = pf_params->expint22[type][type2][si1][sp1][sq1][sj1];
} else if (((u1 == 2) && (u2 == 3)) || ((u1 == 3) && (u2 == 2))) {
/*2-3 is special*/
z = pf_params->expinternal[5] *
pf_params->expmismatch23I[type][si1][sj1] *
pf_params->expmismatch23I[type2][sq1][sp1];
z *= pf_params->expninio[2][1];
} else if ((u1 == 1) || (u2 == 1)) {
/*1-n is special*/
z = pf_params->expinternal[u1 + u2] *
pf_params->expmismatch1nI[type][si1][sj1] *
pf_params->expmismatch1nI[type2][sq1][sp1];
z *= pf_params->expninio[2][abs(u1 - u2)];
} else {
z = pf_params->expinternal[u1 + u2] *
pf_params->expmismatchI[type][si1][sj1] *
pf_params->expmismatchI[type2][sq1][sp1];
z *= pf_params->expninio[2][abs(u1 - u2)];
}
}
}
return z;
}
PUBLIC void
init_pf_circ_fold(int length)
{
/* DO NOTHING */
}
PUBLIC void
init_pf_fold(int length)
{
/* DO NOTHING */
}
/**
*** Allocate memory for all matrices and other stuff
**/
PUBLIC void
free_pf_arrays(void)
{
if (backward_compat_compound && backward_compat) {
vrna_fold_compound_free(backward_compat_compound);
backward_compat_compound = NULL;
backward_compat = 0;
iindx = NULL;
}
}
PUBLIC FLT_OR_DBL *
export_bppm(void)
{
if (backward_compat_compound)
if (backward_compat_compound->exp_matrices)
if (backward_compat_compound->exp_matrices->probs)
return backward_compat_compound->exp_matrices->probs;
return NULL;
}
/*-------------------------------------------------------------------------*/
/* make arrays used for pf_fold available to other routines */
PUBLIC int
get_pf_arrays(short **S_p,
short **S1_p,
char **ptype_p,
FLT_OR_DBL **qb_p,
FLT_OR_DBL **qm_p,
FLT_OR_DBL **q1k_p,
FLT_OR_DBL **qln_p)
{
if (backward_compat_compound) {
if (backward_compat_compound->exp_matrices)
if (backward_compat_compound->exp_matrices->qb) {
*S_p = backward_compat_compound->sequence_encoding2;
*S1_p = backward_compat_compound->sequence_encoding;
*ptype_p = backward_compat_compound->ptype_pf_compat;
*qb_p = backward_compat_compound->exp_matrices->qb;
*qm_p = backward_compat_compound->exp_matrices->qm;
*q1k_p = backward_compat_compound->exp_matrices->q1k;
*qln_p = backward_compat_compound->exp_matrices->qln;
return 1;
}
}
return 0;
}
/*-----------------------------------------------------------------*/
PUBLIC float
pf_fold(const char *sequence,
char *structure)
{
return wrap_pf_fold(sequence, structure, NULL, do_backtrack, fold_constrained, 0);
}
PUBLIC float
pf_circ_fold(const char *sequence,
char *structure)
{
return wrap_pf_fold(sequence, structure, NULL, do_backtrack, fold_constrained, 1);
}
PUBLIC float
pf_fold_par(const char *sequence,
char *structure,
vrna_exp_param_t *parameters,
int calculate_bppm,
int is_constrained,
int is_circular)
{
return wrap_pf_fold(sequence, structure, parameters, calculate_bppm, is_constrained, is_circular);
}
PUBLIC char *
pbacktrack(char *seq)
{
int n = (int)strlen(seq);
return vrna_pbacktrack5(backward_compat_compound, n);
}
PUBLIC char *
pbacktrack5(char *seq,
int length)
{
/* the seq parameter must no differ to the one stored globally anyway, so we just ignore it */
return vrna_pbacktrack5(backward_compat_compound, length);
}
PUBLIC char *
pbacktrack_circ(char *seq)
{
char *structure;
vrna_md_t *md;
structure = NULL;
if (backward_compat_compound) {
md = &(backward_compat_compound->exp_params->model_details);
if (md->circ && backward_compat_compound->exp_matrices->qm2)
structure = vrna_pbacktrack(backward_compat_compound);
}
return structure;
}
PUBLIC void
update_pf_params(int length)
{
if (backward_compat_compound && backward_compat) {
vrna_md_t md;
set_model_details(&md);
vrna_exp_params_reset(backward_compat_compound, &md);
/* compatibility with RNAup, may be removed sometime */
pf_scale = backward_compat_compound->exp_params->pf_scale;
}
}
PUBLIC void
update_pf_params_par(int length,
vrna_exp_param_t *parameters)
{
if (backward_compat_compound && backward_compat) {
vrna_md_t md;
if (parameters) {
vrna_exp_params_subst(backward_compat_compound, parameters);
} else {
set_model_details(&md);
vrna_exp_params_reset(backward_compat_compound, &md);
}
/* compatibility with RNAup, may be removed sometime */
pf_scale = backward_compat_compound->exp_params->pf_scale;
}
}
PUBLIC char *
get_centroid_struct_gquad_pr(int length,
double *dist)
{
return vrna_centroid(backward_compat_compound, dist);
}
PUBLIC void
assign_plist_gquad_from_pr(vrna_ep_t **pl,
int length, /* ignored */
double cut_off)
{
if (!backward_compat_compound)
*pl = NULL;
else if (!backward_compat_compound->exp_matrices->probs)
*pl = NULL;
else
*pl = vrna_plist_from_probs(backward_compat_compound, cut_off);
}
PUBLIC double
mean_bp_distance(int length)
{
if (backward_compat_compound)
if (backward_compat_compound->exp_matrices)
if (backward_compat_compound->exp_matrices->probs)
return vrna_mean_bp_distance(backward_compat_compound);
vrna_message_warning("mean_bp_distance: "
"you need to call vrna_pf_fold first");
return 0.; /* we will never get to this point */
}
PUBLIC double
mean_bp_distance_pr(int length,
FLT_OR_DBL *p)
{
double d = 0;
int *index = vrna_idx_row_wise((unsigned int)length);
if (p == NULL) {
vrna_message_warning("mean_bp_distance_pr: "
"p == NULL. You need to supply a valid probability matrix for mean_bp_distance_pr()");
return d;
}
d = wrap_mean_bp_distance(p, length, index, TURN);
free(index);
return d;
}
#endif
| /*
* partiton function for single RNA secondary structures
*
* Simplified interfaces and backward compatibility wrappers
*
* Ivo L Hofacker + Ronny Lorenz Vienna RNA package
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
/* ########################################### */
/* # deprecated functions below # */
/* ########################################### */
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h> /* #defines FLT_MAX ... */
#include <limits.h>
#include "ViennaRNA/utils/basic.h"
#include "ViennaRNA/params/default.h"
#include "ViennaRNA/fold_vars.h"
#include "ViennaRNA/loops/all.h"
#include "ViennaRNA/gquad.h"
#include "ViennaRNA/constraints/hard.h"
#include "ViennaRNA/constraints/soft.h"
#include "ViennaRNA/mfe.h"
#include "ViennaRNA/part_func.h"
/*
* ################################# # GLOBAL VARIABLES #
* #################################
*/
PUBLIC int st_back = 0;
/*
* ################################# # PRIVATE VARIABLES #
* #################################
*/
/* some backward compatibility stuff */
PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL;
PRIVATE int backward_compat = 0;
/*
* ################################# # PRIVATE FUNCTION DECLARATIONS #
* #################################
*/
PRIVATE float
wrap_pf_fold(const char *sequence,
char *structure,
vrna_exp_param_t * parameters,
int calculate_bppm,
int is_constrained,
int is_circular);
PRIVATE double
wrap_mean_bp_distance(FLT_OR_DBL * p,
int length,
int *index,
int turn);
/*
* ################################# # BEGIN OF FUNCTION DEFINITIONS #
* #################################
*/
PRIVATE double
wrap_mean_bp_distance(FLT_OR_DBL * p,
int length,
int *index,
int turn)
{
int i, j;
double d = 0.;
/* compute the mean base pair distance in the thermodynamic ensemble */
/*
* <d> = \sum_{a,b} p_a p_b d(S_a,S_b) this can be computed from the pair
* probs p_ij as <d> = \sum_{ij} p_{ij}(1-p_{ij})
*/
for (i = 1; i <= length; i++)
for (j = i + turn + 1; j <= length; j++)
d += p[index[i] - j] * (1 - p[index[i] - j]);
return 2 * d;
}
PRIVATE float
wrap_pf_fold(const char *sequence,
char *structure,
vrna_exp_param_t * parameters,
int calculate_bppm,
int is_constrained,
int is_circular)
{
vrna_fold_compound_t *vc;
vrna_md_t md;
vc = NULL;
/*
* we need vrna_exp_param_t datastructure to correctly init default hard
* constraints
*/
if (parameters)
md = parameters->model_details;
else
set_model_details(&md); /* get global default parameters */
md.circ = is_circular;
md.compute_bpp = calculate_bppm;
vc = vrna_fold_compound(sequence, &md, VRNA_OPTION_DEFAULT);
/* prepare exp_params and set global pf_scale */
vc->exp_params = vrna_exp_params(&(vc->params->model_details));
vc->exp_params->pf_scale = pf_scale;
if (is_constrained && structure)
{
unsigned int constraint_options = 0;
constraint_options |= VRNA_CONSTRAINT_DB
| VRNA_CONSTRAINT_DB_PIPE
| VRNA_CONSTRAINT_DB_DOT
| VRNA_CONSTRAINT_DB_X
| VRNA_CONSTRAINT_DB_ANG_BRACK
| VRNA_CONSTRAINT_DB_RND_BRACK;
vrna_constraints_add(vc, (const char *)structure, constraint_options);
}
if (backward_compat_compound && backward_compat)
vrna_fold_compound_free(backward_compat_compound);
backward_compat_compound = vc;
backward_compat = 1;
iindx = backward_compat_compound->iindx;
return vrna_pf(vc, structure);
}
PUBLIC vrna_ep_t *
stackProb(double cutoff)
{
if (!(backward_compat_compound && backward_compat))
{
vrna_message_warning("stackProb: "
"run pf_fold() first!");
return NULL;
} else if (!backward_compat_compound->exp_matrices->probs)
{
vrna_message_warning("stackProb: "
"probs == NULL!");
return NULL;
}
return vrna_stack_prob(backward_compat_compound, cutoff);
}
PUBLIC char *
centroid(int length,
double *dist)
{
if (pr == NULL)
{
vrna_message_warning("centroid: "
"pr == NULL. You need to call pf_fold() before centroid()");
return NULL;
}
return vrna_centroid_from_probs(length, dist, pr);
}
PUBLIC double
mean_bp_dist(int length)
{
/* compute the mean base pair distance in the thermodynamic ensemble */
/*
* <d> = \sum_{a,b} p_a p_b d(S_a,S_b) this can be computed from the pair
* probs p_ij as <d> = \sum_{ij} p_{ij}(1-p_{ij})
*/
int i, j, *my_iindx;
double d = 0;
if (pr == NULL)
{
vrna_message_warning("mean_bp_dist: "
"pr == NULL. You need to call pf_fold() before mean_bp_dist()");
return d;
}
my_iindx = vrna_idx_row_wise(length);
for (i = 1; i <= length; i++)
for (j = i + TURN + 1; j <= length; j++)
d += pr[my_iindx[i] - j] * (1 - pr[my_iindx[i] - j]);
free(my_iindx);
return 2 * d;
}
/* get the free energy of a subsequence from the q[] array */
PUBLIC double
get_subseq_F(int i,
int j)
{
if (backward_compat_compound)
if (backward_compat_compound->exp_matrices)
if (backward_compat_compound->exp_matrices->q)
{
int *my_iindx = backward_compat_compound->iindx;
vrna_exp_param_t *pf_params = backward_compat_compound->exp_params;
FLT_OR_DBL *q = backward_compat_compound->exp_matrices->q;
return (-log(q[my_iindx[i] - j]) - (j - i + 1) * log(pf_params->pf_scale)) * pf_params->kT /
1000.0;
}
vrna_message_warning("get_subseq_F: "
"call pf_fold() to fill q[] array before calling get_subseq_F()");
return 0.; /* we will never get to this point */
}
/*----------------------------------------------------------------------*/
PUBLIC double
expHairpinEnergy(int u,
int type,
short si1,
short sj1,
const char *string)
{
/* compute Boltzmann weight of a hairpin loop, multiply by scale[u+2] */
vrna_exp_param_t *pf_params = backward_compat_compound->exp_params;
double q, kT;
kT = pf_params->kT; /* kT in cal/mol */
if (u <= 30)
q = pf_params->exphairpin[u];
else
q = pf_params->exphairpin[30] * exp(-(pf_params->lxc * log(u / 30.)) * 10. / kT);
if ((tetra_loop) && (u == 4))
{
char tl[7] = {
0
}, *ts;
strncpy(tl, string, 6);
if ((ts = strstr(pf_params->Tetraloops, tl)))
return pf_params->exptetra[(ts - pf_params->Tetraloops) / 7];
}
if ((tetra_loop) && (u == 6))
{
char tl[9] = {
0
}, *ts;
strncpy(tl, string, 6);
if ((ts = strstr(pf_params->Hexaloops, tl)))
return pf_params->exphex[(ts - pf_params->Hexaloops) / 9];
}
if (u == 3)
{
char tl[6] = {
0
}, *ts;
strncpy(tl, string, 5);
if ((ts = strstr(pf_params->Triloops, tl)))
return pf_params->exptri[(ts - pf_params->Triloops) / 6];
if (type > 2)
q *= pf_params->expTermAU;
} else
{
/* no mismatches for tri-loops */
q *= pf_params->expmismatchH[type][si1][sj1];
}
return q;
}
PUBLIC double
expLoopEnergy(int u1,
int u2,
int type,
int type2,
short si1,
short sj1,
short sp1,
short sq1)
{
/*
* compute Boltzmann weight of interior loop, multiply by scale[u1+u2+2]
* for scaling
*/
double z = 0;
int no_close = 0;
vrna_exp_param_t *pf_params = backward_compat_compound->exp_params;
if ((no_closingGU) && ((type2 == 3) || (type2 == 4) || (type == 2) || (type == 4)))
no_close = 1;
if ((u1 == 0) && (u2 == 0))
{
/* stack */
z = pf_params->expstack[type][type2];
} else if (no_close == 0)
{
if ((u1 == 0) || (u2 == 0))
{
/* bulge */
int u;
u = (u1 == 0) ? u2 : u1;
z = pf_params->expbulge[u];
if (u2 + u1 == 1)
{
z *= pf_params->expstack[type][type2];
} else
{
if (type > 2)
z *= pf_params->expTermAU;
if (type2 > 2)
z *= pf_params->expTermAU;
}
} else
{
/* interior loop */
if (u1 + u2 == 2)
{
/* size 2 is special */
z = pf_params->expint11[type][type2][si1][sj1];
} else if ((u1 == 1) && (u2 == 2))
{
z = pf_params->expint21[type][type2][si1][sq1][sj1];
} else if ((u1 == 2) && (u2 == 1))
{
z = pf_params->expint21[type2][type][sq1][si1][sp1];
} else if ((u1 == 2) && (u2 == 2))
{
z = pf_params->expint22[type][type2][si1][sp1][sq1][sj1];
} else if (((u1 == 2) && (u2 == 3)) || ((u1 == 3) && (u2 == 2)))
{
/* 2-3 is special */
z = pf_params->expinternal[5] *
pf_params->expmismatch23I[type][si1][sj1] *
pf_params->expmismatch23I[type2][sq1][sp1];
z *= pf_params->expninio[2][1];
} else if ((u1 == 1) || (u2 == 1))
{
/* 1-n is special */
z = pf_params->expinternal[u1 + u2] *
pf_params->expmismatch1nI[type][si1][sj1] *
pf_params->expmismatch1nI[type2][sq1][sp1];
z *= pf_params->expninio[2][abs(u1 - u2)];
} else
{
z = pf_params->expinternal[u1 + u2] *
pf_params->expmismatchI[type][si1][sj1] *
pf_params->expmismatchI[type2][sq1][sp1];
z *= pf_params->expninio[2][abs(u1 - u2)];
}
}
}
return z;
}
PUBLIC void
init_pf_circ_fold(int length)
{
/* DO NOTHING */
}
PUBLIC void
init_pf_fold(int length)
{
/* DO NOTHING */
}
/**
*** Allocate memory for all matrices and other stuff
**/
PUBLIC void
free_pf_arrays(void)
{
if (backward_compat_compound && backward_compat)
{
vrna_fold_compound_free(backward_compat_compound);
backward_compat_compound = NULL;
backward_compat = 0;
iindx = NULL;
}
}
PUBLIC FLT_OR_DBL *
export_bppm(void)
{
if (backward_compat_compound)
if (backward_compat_compound->exp_matrices)
if (backward_compat_compound->exp_matrices->probs)
return backward_compat_compound->exp_matrices->probs;
return NULL;
}
/*-------------------------------------------------------------------------*/
/* make arrays used for pf_fold available to other routines */
PUBLIC int
get_pf_arrays(short **S_p,
short **S1_p,
char **ptype_p,
FLT_OR_DBL ** qb_p,
FLT_OR_DBL ** qm_p,
FLT_OR_DBL ** q1k_p,
FLT_OR_DBL ** qln_p)
{
if (backward_compat_compound)
{
if (backward_compat_compound->exp_matrices)
if (backward_compat_compound->exp_matrices->qb)
{
*S_p = backward_compat_compound->sequence_encoding2;
*S1_p = backward_compat_compound->sequence_encoding;
*ptype_p = backward_compat_compound->ptype_pf_compat;
*qb_p = backward_compat_compound->exp_matrices->qb;
*qm_p = backward_compat_compound->exp_matrices->qm;
*q1k_p = backward_compat_compound->exp_matrices->q1k;
*qln_p = backward_compat_compound->exp_matrices->qln;
return 1;
}
}
return 0;
}
/*-----------------------------------------------------------------*/
PUBLIC float
pf_fold(const char *sequence,
char *structure)
{
return wrap_pf_fold(sequence, structure, NULL, do_backtrack, fold_constrained, 0);
}
PUBLIC float
pf_circ_fold(const char *sequence,
char *structure)
{
return wrap_pf_fold(sequence, structure, NULL, do_backtrack, fold_constrained, 1);
}
PUBLIC float
pf_fold_par(const char *sequence,
char *structure,
vrna_exp_param_t * parameters,
int calculate_bppm,
int is_constrained,
int is_circular)
{
return wrap_pf_fold(sequence, structure, parameters, calculate_bppm, is_constrained, is_circular);
}
PUBLIC char *
pbacktrack(char *seq)
{
int n = (int)strlen(seq);
return vrna_pbacktrack5(backward_compat_compound, n);
}
PUBLIC char *
pbacktrack5(char *seq,
int length)
{
/*
* the seq parameter must no differ to the one stored globally anyway, so
* we just ignore it
*/
return vrna_pbacktrack5(backward_compat_compound, length);
}
PUBLIC char *
pbacktrack_circ(char *seq)
{
char *structure;
vrna_md_t *md;
structure = NULL;
if (backward_compat_compound)
{
md = &(backward_compat_compound->exp_params->model_details);
if (md->circ && backward_compat_compound->exp_matrices->qm2)
structure = vrna_pbacktrack(backward_compat_compound);
}
return structure;
}
PUBLIC void
update_pf_params(int length)
{
if (backward_compat_compound && backward_compat)
{
vrna_md_t md;
set_model_details(&md);
vrna_exp_params_reset(backward_compat_compound, &md);
/* compatibility with RNAup, may be removed sometime */
pf_scale = backward_compat_compound->exp_params->pf_scale;
}
}
PUBLIC void
update_pf_params_par(int length,
vrna_exp_param_t * parameters)
{
if (backward_compat_compound && backward_compat)
{
vrna_md_t md;
if (parameters)
{
vrna_exp_params_subst(backward_compat_compound, parameters);
} else
{
set_model_details(&md);
vrna_exp_params_reset(backward_compat_compound, &md);
}
/* compatibility with RNAup, may be removed sometime */
pf_scale = backward_compat_compound->exp_params->pf_scale;
}
}
PUBLIC char *
get_centroid_struct_gquad_pr(int length,
double *dist)
{
return vrna_centroid(backward_compat_compound, dist);
}
PUBLIC void
assign_plist_gquad_from_pr(vrna_ep_t ** pl,
int length, /* ignored */
double cut_off)
{
if (!backward_compat_compound)
*pl = NULL;
else if (!backward_compat_compound->exp_matrices->probs)
*pl = NULL;
else
*pl = vrna_plist_from_probs(backward_compat_compound, cut_off);
}
PUBLIC double
mean_bp_distance(int length)
{
if (backward_compat_compound)
if (backward_compat_compound->exp_matrices)
if (backward_compat_compound->exp_matrices->probs)
return vrna_mean_bp_distance(backward_compat_compound);
vrna_message_warning("mean_bp_distance: "
"you need to call vrna_pf_fold first");
return 0.; /* we will never get to this point */
}
PUBLIC double
mean_bp_distance_pr(int length,
FLT_OR_DBL * p)
{
double d = 0;
int *index = vrna_idx_row_wise((unsigned int)length);
if (p == NULL)
{
vrna_message_warning("mean_bp_distance_pr: "
"p == NULL. You need to supply a valid probability matrix for mean_bp_distance_pr()");
return d;
}
d = wrap_mean_bp_distance(p, length, index, TURN);
free(index);
return d;
}
#endif
| /*
* partiton function for single RNA secondary structures
*
* Simplified interfaces and backward compatibility wrappers
*
* Ivo L Hofacker + Ronny Lorenz Vienna RNA package
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
/* ########################################### */
/* # deprecated functions below # */
/* ########################################### */
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h> /* #defines FLT_MAX ... */
#include <limits.h>
#include "ViennaRNA/utils/basic.h"
#include "ViennaRNA/params/default.h"
#include "ViennaRNA/fold_vars.h"
#include "ViennaRNA/loops/all.h"
#include "ViennaRNA/gquad.h"
#include "ViennaRNA/constraints/hard.h"
#include "ViennaRNA/constraints/soft.h"
#include "ViennaRNA/mfe.h"
#include "ViennaRNA/part_func.h"
#ifdef _OPENMP
#include <omp.h>
#endif
/*
* ################################# # GLOBAL VARIABLES #
* #################################
*/
PUBLIC int st_back = 0;
/*
* ################################# # PRIVATE VARIABLES #
* #################################
*/
/* some backward compatibility stuff */
PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL;
PRIVATE int backward_compat = 0;
#ifdef _OPENMP
#pragma omp threadprivate(backward_compat_compound, backward_compat)
#endif
/*
* ################################# # PRIVATE FUNCTION DECLARATIONS #
* #################################
*/
PRIVATE float
wrap_pf_fold(const char *sequence,
char *structure,
vrna_exp_param_t * parameters,
int calculate_bppm,
int is_constrained,
int is_circular);
PRIVATE double
wrap_mean_bp_distance(FLT_OR_DBL * p,
int length,
int *index,
int turn);
/*
* ################################# # BEGIN OF FUNCTION DEFINITIONS #
* #################################
*/
PRIVATE double
wrap_mean_bp_distance(FLT_OR_DBL * p,
int length,
int *index,
int turn)
{
int i, j;
double d = 0.;
/* compute the mean base pair distance in the thermodynamic ensemble */
/*
* <d> = \sum_{a,b} p_a p_b d(S_a,S_b) this can be computed from the pair
* probs p_ij as <d> = \sum_{ij} p_{ij}(1-p_{ij})
*/
for (i = 1; i <= length; i++)
for (j = i + turn + 1; j <= length; j++)
d += p[index[i] - j] * (1 - p[index[i] - j]);
return 2 * d;
}
PRIVATE float
wrap_pf_fold(const char *sequence,
char *structure,
vrna_exp_param_t * parameters,
int calculate_bppm,
int is_constrained,
int is_circular)
{
vrna_fold_compound_t *vc;
vrna_md_t md;
vc = NULL;
/*
* we need vrna_exp_param_t datastructure to correctly init default hard
* constraints
*/
if (parameters)
md = parameters->model_details;
else
set_model_details(&md); /* get global default parameters */
md.circ = is_circular;
md.compute_bpp = calculate_bppm;
vc = vrna_fold_compound(sequence, &md, VRNA_OPTION_DEFAULT);
/* prepare exp_params and set global pf_scale */
vc->exp_params = vrna_exp_params(&(vc->params->model_details));
vc->exp_params->pf_scale = pf_scale;
if (is_constrained && structure)
{
unsigned int constraint_options = 0;
constraint_options |= VRNA_CONSTRAINT_DB
| VRNA_CONSTRAINT_DB_PIPE
| VRNA_CONSTRAINT_DB_DOT
| VRNA_CONSTRAINT_DB_X
| VRNA_CONSTRAINT_DB_ANG_BRACK
| VRNA_CONSTRAINT_DB_RND_BRACK;
vrna_constraints_add(vc, (const char *)structure, constraint_options);
}
if (backward_compat_compound && backward_compat)
vrna_fold_compound_free(backward_compat_compound);
backward_compat_compound = vc;
backward_compat = 1;
iindx = backward_compat_compound->iindx;
return vrna_pf(vc, structure);
}
PUBLIC vrna_ep_t *
stackProb(double cutoff)
{
if (!(backward_compat_compound && backward_compat))
{
vrna_message_warning("stackProb: "
"run pf_fold() first!");
return NULL;
} else if (!backward_compat_compound->exp_matrices->probs)
{
vrna_message_warning("stackProb: "
"probs == NULL!");
return NULL;
}
return vrna_stack_prob(backward_compat_compound, cutoff);
}
PUBLIC char *
centroid(int length,
double *dist)
{
if (pr == NULL)
{
vrna_message_warning("centroid: "
"pr == NULL. You need to call pf_fold() before centroid()");
return NULL;
}
return vrna_centroid_from_probs(length, dist, pr);
}
PUBLIC double
mean_bp_dist(int length)
{
/* compute the mean base pair distance in the thermodynamic ensemble */
/*
* <d> = \sum_{a,b} p_a p_b d(S_a,S_b) this can be computed from the pair
* probs p_ij as <d> = \sum_{ij} p_{ij}(1-p_{ij})
*/
int i, j, *my_iindx;
double d = 0;
if (pr == NULL)
{
vrna_message_warning("mean_bp_dist: "
"pr == NULL. You need to call pf_fold() before mean_bp_dist()");
return d;
}
my_iindx = vrna_idx_row_wise(length);
for (i = 1; i <= length; i++)
for (j = i + TURN + 1; j <= length; j++)
d += pr[my_iindx[i] - j] * (1 - pr[my_iindx[i] - j]);
free(my_iindx);
return 2 * d;
}
/* get the free energy of a subsequence from the q[] array */
PUBLIC double
get_subseq_F(int i,
int j)
{
if (backward_compat_compound)
if (backward_compat_compound->exp_matrices)
if (backward_compat_compound->exp_matrices->q)
{
int *my_iindx = backward_compat_compound->iindx;
vrna_exp_param_t *pf_params = backward_compat_compound->exp_params;
FLT_OR_DBL *q = backward_compat_compound->exp_matrices->q;
return (-log(q[my_iindx[i] - j]) - (j - i + 1) * log(pf_params->pf_scale)) * pf_params->kT /
1000.0;
}
vrna_message_warning("get_subseq_F: "
"call pf_fold() to fill q[] array before calling get_subseq_F()");
return 0.; /* we will never get to this point */
}
/*----------------------------------------------------------------------*/
PUBLIC double
expHairpinEnergy(int u,
int type,
short si1,
short sj1,
const char *string)
{
/* compute Boltzmann weight of a hairpin loop, multiply by scale[u+2] */
vrna_exp_param_t *pf_params = backward_compat_compound->exp_params;
double q, kT;
kT = pf_params->kT; /* kT in cal/mol */
if (u <= 30)
q = pf_params->exphairpin[u];
else
q = pf_params->exphairpin[30] * exp(-(pf_params->lxc * log(u / 30.)) * 10. / kT);
if ((tetra_loop) && (u == 4))
{
char tl[7] = {
0
}, *ts;
strncpy(tl, string, 6);
if ((ts = strstr(pf_params->Tetraloops, tl)))
return pf_params->exptetra[(ts - pf_params->Tetraloops) / 7];
}
if ((tetra_loop) && (u == 6))
{
char tl[9] = {
0
}, *ts;
strncpy(tl, string, 6);
if ((ts = strstr(pf_params->Hexaloops, tl)))
return pf_params->exphex[(ts - pf_params->Hexaloops) / 9];
}
if (u == 3)
{
char tl[6] = {
0
}, *ts;
strncpy(tl, string, 5);
if ((ts = strstr(pf_params->Triloops, tl)))
return pf_params->exptri[(ts - pf_params->Triloops) / 6];
if (type > 2)
q *= pf_params->expTermAU;
} else
{
/* no mismatches for tri-loops */
q *= pf_params->expmismatchH[type][si1][sj1];
}
return q;
}
PUBLIC double
expLoopEnergy(int u1,
int u2,
int type,
int type2,
short si1,
short sj1,
short sp1,
short sq1)
{
/*
* compute Boltzmann weight of interior loop, multiply by scale[u1+u2+2]
* for scaling
*/
double z = 0;
int no_close = 0;
vrna_exp_param_t *pf_params = backward_compat_compound->exp_params;
if ((no_closingGU) && ((type2 == 3) || (type2 == 4) || (type == 2) || (type == 4)))
no_close = 1;
if ((u1 == 0) && (u2 == 0))
{
/* stack */
z = pf_params->expstack[type][type2];
} else if (no_close == 0)
{
if ((u1 == 0) || (u2 == 0))
{
/* bulge */
int u;
u = (u1 == 0) ? u2 : u1;
z = pf_params->expbulge[u];
if (u2 + u1 == 1)
{
z *= pf_params->expstack[type][type2];
} else
{
if (type > 2)
z *= pf_params->expTermAU;
if (type2 > 2)
z *= pf_params->expTermAU;
}
} else
{
/* interior loop */
if (u1 + u2 == 2)
{
/* size 2 is special */
z = pf_params->expint11[type][type2][si1][sj1];
} else if ((u1 == 1) && (u2 == 2))
{
z = pf_params->expint21[type][type2][si1][sq1][sj1];
} else if ((u1 == 2) && (u2 == 1))
{
z = pf_params->expint21[type2][type][sq1][si1][sp1];
} else if ((u1 == 2) && (u2 == 2))
{
z = pf_params->expint22[type][type2][si1][sp1][sq1][sj1];
} else if (((u1 == 2) && (u2 == 3)) || ((u1 == 3) && (u2 == 2)))
{
/* 2-3 is special */
z = pf_params->expinternal[5] *
pf_params->expmismatch23I[type][si1][sj1] *
pf_params->expmismatch23I[type2][sq1][sp1];
z *= pf_params->expninio[2][1];
} else if ((u1 == 1) || (u2 == 1))
{
/* 1-n is special */
z = pf_params->expinternal[u1 + u2] *
pf_params->expmismatch1nI[type][si1][sj1] *
pf_params->expmismatch1nI[type2][sq1][sp1];
z *= pf_params->expninio[2][abs(u1 - u2)];
} else
{
z = pf_params->expinternal[u1 + u2] *
pf_params->expmismatchI[type][si1][sj1] *
pf_params->expmismatchI[type2][sq1][sp1];
z *= pf_params->expninio[2][abs(u1 - u2)];
}
}
}
return z;
}
PUBLIC void
init_pf_circ_fold(int length)
{
/* DO NOTHING */
}
PUBLIC void
init_pf_fold(int length)
{
/* DO NOTHING */
}
/**
*** Allocate memory for all matrices and other stuff
**/
PUBLIC void
free_pf_arrays(void)
{
if (backward_compat_compound && backward_compat)
{
vrna_fold_compound_free(backward_compat_compound);
backward_compat_compound = NULL;
backward_compat = 0;
iindx = NULL;
}
}
PUBLIC FLT_OR_DBL *
export_bppm(void)
{
if (backward_compat_compound)
if (backward_compat_compound->exp_matrices)
if (backward_compat_compound->exp_matrices->probs)
return backward_compat_compound->exp_matrices->probs;
return NULL;
}
/*-------------------------------------------------------------------------*/
/* make arrays used for pf_fold available to other routines */
PUBLIC int
get_pf_arrays(short **S_p,
short **S1_p,
char **ptype_p,
FLT_OR_DBL ** qb_p,
FLT_OR_DBL ** qm_p,
FLT_OR_DBL ** q1k_p,
FLT_OR_DBL ** qln_p)
{
if (backward_compat_compound)
{
if (backward_compat_compound->exp_matrices)
if (backward_compat_compound->exp_matrices->qb)
{
*S_p = backward_compat_compound->sequence_encoding2;
*S1_p = backward_compat_compound->sequence_encoding;
*ptype_p = backward_compat_compound->ptype_pf_compat;
*qb_p = backward_compat_compound->exp_matrices->qb;
*qm_p = backward_compat_compound->exp_matrices->qm;
*q1k_p = backward_compat_compound->exp_matrices->q1k;
*qln_p = backward_compat_compound->exp_matrices->qln;
return 1;
}
}
return 0;
}
/*-----------------------------------------------------------------*/
PUBLIC float
pf_fold(const char *sequence,
char *structure)
{
return wrap_pf_fold(sequence, structure, NULL, do_backtrack, fold_constrained, 0);
}
PUBLIC float
pf_circ_fold(const char *sequence,
char *structure)
{
return wrap_pf_fold(sequence, structure, NULL, do_backtrack, fold_constrained, 1);
}
PUBLIC float
pf_fold_par(const char *sequence,
char *structure,
vrna_exp_param_t * parameters,
int calculate_bppm,
int is_constrained,
int is_circular)
{
return wrap_pf_fold(sequence, structure, parameters, calculate_bppm, is_constrained, is_circular);
}
PUBLIC char *
pbacktrack(char *seq)
{
int n = (int)strlen(seq);
return vrna_pbacktrack5(backward_compat_compound, n);
}
PUBLIC char *
pbacktrack5(char *seq,
int length)
{
/*
* the seq parameter must no differ to the one stored globally anyway, so
* we just ignore it
*/
return vrna_pbacktrack5(backward_compat_compound, length);
}
PUBLIC char *
pbacktrack_circ(char *seq)
{
char *structure;
vrna_md_t *md;
structure = NULL;
if (backward_compat_compound)
{
md = &(backward_compat_compound->exp_params->model_details);
if (md->circ && backward_compat_compound->exp_matrices->qm2)
structure = vrna_pbacktrack(backward_compat_compound);
}
return structure;
}
PUBLIC void
update_pf_params(int length)
{
if (backward_compat_compound && backward_compat)
{
vrna_md_t md;
set_model_details(&md);
vrna_exp_params_reset(backward_compat_compound, &md);
/* compatibility with RNAup, may be removed sometime */
pf_scale = backward_compat_compound->exp_params->pf_scale;
}
}
PUBLIC void
update_pf_params_par(int length,
vrna_exp_param_t * parameters)
{
if (backward_compat_compound && backward_compat)
{
vrna_md_t md;
if (parameters)
{
vrna_exp_params_subst(backward_compat_compound, parameters);
} else
{
set_model_details(&md);
vrna_exp_params_reset(backward_compat_compound, &md);
}
/* compatibility with RNAup, may be removed sometime */
pf_scale = backward_compat_compound->exp_params->pf_scale;
}
}
PUBLIC char *
get_centroid_struct_gquad_pr(int length,
double *dist)
{
return vrna_centroid(backward_compat_compound, dist);
}
PUBLIC void
assign_plist_gquad_from_pr(vrna_ep_t ** pl,
int length, /* ignored */
double cut_off)
{
if (!backward_compat_compound)
*pl = NULL;
else if (!backward_compat_compound->exp_matrices->probs)
*pl = NULL;
else
*pl = vrna_plist_from_probs(backward_compat_compound, cut_off);
}
PUBLIC double
mean_bp_distance(int length)
{
if (backward_compat_compound)
if (backward_compat_compound->exp_matrices)
if (backward_compat_compound->exp_matrices->probs)
return vrna_mean_bp_distance(backward_compat_compound);
vrna_message_warning("mean_bp_distance: "
"you need to call vrna_pf_fold first");
return 0.; /* we will never get to this point */
}
PUBLIC double
mean_bp_distance_pr(int length,
FLT_OR_DBL * p)
{
double d = 0;
int *index = vrna_idx_row_wise((unsigned int)length);
if (p == NULL)
{
vrna_message_warning("mean_bp_distance_pr: "
"p == NULL. You need to supply a valid probability matrix for mean_bp_distance_pr()");
return d;
}
d = wrap_mean_bp_distance(p, length, index, TURN);
free(index);
return d;
}
#endif
|
GB_binop__rdiv_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__rdiv_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__rdiv_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_int8)
// A*D function (colscale): GB (_AxD__rdiv_int8)
// D*A function (rowscale): GB (_DxB__rdiv_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_int8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_int8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_int8)
// C=scalar+B GB (_bind1st__rdiv_int8)
// C=scalar+B' GB (_bind1st_tran__rdiv_int8)
// C=A+scalar GB (_bind2nd__rdiv_int8)
// C=A'+scalar GB (_bind2nd_tran__rdiv_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 8)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_SIGNED (y, x, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_INT8 || GxB_NO_RDIV_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rdiv_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_SIGNED (bij, x, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_SIGNED (y, aij, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (aij, x, 8) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (y, aij, 8) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__rdiv_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__rdiv_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_int8)
// A*D function (colscale): GB (_AxD__rdiv_int8)
// D*A function (rowscale): GB (_DxB__rdiv_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_int8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_int8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_int8)
// C=scalar+B GB (_bind1st__rdiv_int8)
// C=scalar+B' GB (_bind1st_tran__rdiv_int8)
// C=A+scalar GB (_bind2nd__rdiv_int8)
// C=A'+scalar GB (_bind2nd_tran__rdiv_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 8)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_SIGNED (y, x, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_INT8 || GxB_NO_RDIV_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rdiv_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_SIGNED (bij, x, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_SIGNED (y, aij, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (aij, x, 8) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (y, aij, 8) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__rdiv_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__rdiv_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_int8)
// A*D function (colscale): GB (_AxD__rdiv_int8)
// D*A function (rowscale): GB (_DxB__rdiv_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_int8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_int8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_int8)
// C=scalar+B GB (_bind1st__rdiv_int8)
// C=scalar+B' GB (_bind1st_tran__rdiv_int8)
// C=A+scalar GB (_bind2nd__rdiv_int8)
// C=A'+scalar GB (_bind2nd_tran__rdiv_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 8)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_SIGNED (y, x, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_INT8 || GxB_NO_RDIV_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rdiv_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_SIGNED (bij, x, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_SIGNED (y, aij, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (aij, x, 8) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (y, aij, 8) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
tree-vectorizer.h | "/* Vectorizer\n Copyright (C) 2003-2019 Free Software Foundation, Inc.\n Contributed by Dorit N(...TRUNCATED) | "\n\n#ifndef GCC_TREE_VECTORIZER_H\n#define GCC_TREE_VECTORIZER_H\n\ntypedef struct _stmt_vec_info *(...TRUNCATED) | "\n\n#ifndef GCC_TREE_VECTORIZER_H\n#define GCC_TREE_VECTORIZER_H\n\ntypedef struct _stmt_vec_info *(...TRUNCATED) |
ztrsm.c | "#include \"blas.h\"\n#include \"error.h\"\n#include <stdio.h>\n#include \"handle.h\"\n#include \"co(...TRUNCATED) | "#include \"blas.h\"\n#include \"error.h\"\n#include <stdio.h>\n#include \"handle.h\"\n#include \"co(...TRUNCATED) | "#include \"blas.h\"\n#include \"error.h\"\n#include <stdio.h>\n#include \"handle.h\"\n#include \"co(...TRUNCATED) |
binStruct.h | "#ifndef binStruct_h\n#define binStruct_h\n#include \"../../baseFunctions/fpBaseNode.h\"\n#include \(...TRUNCATED) | "#ifndef binStruct_h\n#define binStruct_h\n#include \"../../baseFunctions/fpBaseNode.h\"\n#include \(...TRUNCATED) | "#ifndef binStruct_h\n#define binStruct_h\n#include \"../../baseFunctions/fpBaseNode.h\"\n#include \(...TRUNCATED) |
particle_levelset_utilities.h | "/*\n==============================================================================\nKratosTestAppli(...TRUNCATED) | "\n\n\n//\n// Project Name: Kratos\n// Last Modified by: $Author: rrossi $\n// Date:(...TRUNCATED) | "\n\n\n//\n// Project Name: Kratos\n// Last Modified by: $Author: rrossi $\n// Date:(...TRUNCATED) |
hypre_merge_sort.c | "/******************************************************************************\n * Copyright 1998-(...TRUNCATED) | "\n\n#include \"_hypre_utilities.h\"\n#include \"hypre_hopscotch_hash.h\"\n#include \"../seq_mv/HYPR(...TRUNCATED) | "\n\n#include \"_hypre_utilities.h\"\n#include \"hypre_hopscotch_hash.h\"\n#include \"../seq_mv/HYPR(...TRUNCATED) |
par_mgr.c | "/******************************************************************************\n * Copyright 1998-(...TRUNCATED) | "\n\n/******************************************************************************\n *\n * Two-gri(...TRUNCATED) | "\n\n/******************************************************************************\n *\n * Two-gri(...TRUNCATED) |
GB_binop__isge_int32.c | "//------------------------------------------------------------------------------\n// GB_binop: har(...TRUNCATED) | "//------------------------------------------------------------------------------\n// GB_binop: har(...TRUNCATED) | "//------------------------------------------------------------------------------\n// GB_binop: har(...TRUNCATED) |
End of preview. Expand
in Dataset Viewer.
- Downloads last month
- 43