Skip to content

Commit

Permalink
[CINN] Fix dynamic shape not match cause RxT cannot fuse (PaddlePaddl…
Browse files Browse the repository at this point in the history
  • Loading branch information
SigureMo authored Apr 22, 2024
1 parent b2100ff commit 053d22f
Show file tree
Hide file tree
Showing 4 changed files with 58 additions and 19 deletions.
23 changes: 11 additions & 12 deletions paddle/cinn/operator_fusion/policy/relative_judge_policy.cc
Original file line number Diff line number Diff line change
Expand Up @@ -168,12 +168,12 @@ SplitDims RelativeJudgePolicy<T>::SplitDimsWithRelationship(

bool DimsEqual(const std::vector<ValueDim>& first,
const std::vector<ValueDim>& second) {
const auto GetDimInfo =
[](const std::vector<ValueDim>& dims) -> std::unordered_map<size_t, int> {
std::unordered_map<size_t, int> result;
const auto GetDimInfo = [](const std::vector<ValueDim>& dims)
-> std::unordered_map<symbol::DimExpr, int> {
std::unordered_map<symbol::DimExpr, int> result;
for (const auto& dim : dims) {
VLOG(4) << "dim: " << dim.DebugStr();
size_t value = dim.GetNumericValue();
symbol::DimExpr value = dim.GetSymbolicDim();
VLOG(4) << "value: " << value;
if (result.find(value) == result.end()) {
result[value] = 1;
Expand All @@ -184,9 +184,11 @@ bool DimsEqual(const std::vector<ValueDim>& first,
return result;
};
VLOG(4) << "GetDimInfo";
const std::unordered_map<size_t, int>& first_dims = GetDimInfo(first);
const std::unordered_map<symbol::DimExpr, int>& first_dims =
GetDimInfo(first);
VLOG(4) << "GetDimInfo";
const std::unordered_map<size_t, int>& second_dims = GetDimInfo(second);
const std::unordered_map<symbol::DimExpr, int>& second_dims =
GetDimInfo(second);
if (first_dims.size() != second_dims.size()) return false;
for (const auto& [dim_value, count] : first_dims) {
if (second_dims.find(dim_value) == second_dims.end() ||
Expand Down Expand Up @@ -259,18 +261,15 @@ symbol::DimExpr GetProductDimExprForValueDims(
for (const auto& dim : dims) {
dim_idx.emplace_back(dim.idx_);
}
auto& shape_analysis = pir::ShapeAnalysisManager::Instance().Get(
dims[0].v_.defining_op()->GetParentProgram());
return shape_analysis.GetProductDimExpr(dims[0].v_, dim_idx);
return dims[0].shape_analysis().GetProductDimExpr(dims[0].v_, dim_idx);
}

bool IsProductSmallerOrEqual(const std::vector<ValueDim>& first,
const std::vector<ValueDim>& second) {
if (first.empty()) return true;
const auto& first_product = GetProductDimExprForValueDims(first);
const auto& second_product = GetProductDimExprForValueDims(second);
const auto& shape_analysis = pir::ShapeAnalysisManager::Instance().Get(
first[0].v_.defining_op()->GetParentProgram());
const auto& shape_analysis = first[0].shape_analysis();
if (second_product.isa<int64_t>() && first_product.isa<int64_t>()) {
VLOG(4) << "Static Shape: left is "
<< std::get<int64_t>(first_product.variant()) << " ; right is "
Expand Down Expand Up @@ -357,7 +356,7 @@ std::vector<size_t> RelativeJudgePolicy<T>::GetFakeReduceIterIdx(
for (auto& reduce_dim : upstream_reduce_dims) {
for (auto& trivial_dim : trivial_reorder_dims) {
if (visited_dims.find(trivial_dim) == visited_dims.end() &&
trivial_dim.GetNumericValue() == reduce_dim.GetNumericValue()) {
trivial_dim.SymbolicEqualTo(reduce_dim)) {
visited_dims.emplace(trivial_dim);
result.emplace_back(trivial_dim.idx_);
break;
Expand Down
39 changes: 36 additions & 3 deletions paddle/cinn/operator_fusion/policy/relative_judge_policy.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,21 +17,46 @@
#include "paddle/cinn/operator_fusion/policy/policy_manager.h"
#include "paddle/cinn/operator_fusion/policy/shardable_axes_base.h"
#include "paddle/cinn/operator_fusion/utils.h"
#include "paddle/common/enforce.h"

namespace cinn::fusion {

struct ValueDim {
pir::Value v_;
size_t idx_;
ValueDim(pir::Value v, size_t idx) : v_(v), idx_(idx) {}
std::weak_ptr<pir::ShapeConstraintIRAnalysis> shape_analysis_;
ValueDim(pir::Value v, size_t idx) : v_(v), idx_(idx) {
// Just get a related op to get the shape analysis. It can be value's
// upstream op (defining op) or downstream op (user op).
const auto get_related_op_from_value =
[](const pir::Value& v) -> pir::Operation* {
if (v.defining_op() != nullptr) {
return v.defining_op();
}
// For inputs of the program, the defining_op is nullptr, we use it's user
// as the related op.
PADDLE_ENFORCE_EQ(v.use_empty(),
false,
phi::errors::PreconditionNotMet(
"Value is an input value, it should have a use."));
return v.first_use().owner();
};
shape_analysis_ = pir::ShapeAnalysisManager::Instance()
.Get(get_related_op_from_value(v)->GetParentProgram())
.shared_from_this();
}
ValueDim() = default;
ValueDim(const ValueDim& v) = default;
bool operator==(const ValueDim& v) const {
return (idx_ == v.idx_) && (v_ == v.v_);
}

size_t GetNumericValue() const {
return v_.type().dyn_cast<pir::DenseTensorType>().dims().at(idx_);
symbol::DimExpr GetSymbolicDim() const {
return shape_analysis().GetProductDimExpr(v_, {static_cast<int>(idx_)});
}

bool SymbolicEqualTo(const ValueDim& other) const {
return shape_analysis().IsEqual(GetSymbolicDim(), other.GetSymbolicDim());
}

std::string DebugStr() const {
Expand All @@ -42,6 +67,14 @@ struct ValueDim {
v_.defining_op()->Print(oss);
return oss.str();
}

pir::ShapeConstraintIRAnalysis& shape_analysis() const {
auto shape_analysis_ptr = shape_analysis_.lock();
PADDLE_ENFORCE_NOT_NULL(
shape_analysis_ptr,
phi::errors::PreconditionNotMet("shape_analysis_ptr is nullptr."));
return *shape_analysis_ptr;
}
};

struct ValueDimHash {
Expand Down
11 changes: 9 additions & 2 deletions paddle/pir/include/dialect/shape/utils/shape_analysis.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

#pragma once

#include <memory>
#include "paddle/pir/include/core/builtin_attribute.h"
#include "paddle/pir/include/core/builtin_op.h"
#include "paddle/pir/include/core/builtin_type_interfaces.h"
Expand All @@ -27,8 +28,13 @@
namespace pir {

// The implementation is based on shape constraint ir.
class IR_API ShapeConstraintIRAnalysis {
class IR_API ShapeConstraintIRAnalysis final
: public std::enable_shared_from_this<ShapeConstraintIRAnalysis> {
public:
ShapeConstraintIRAnalysis() = default;
ShapeConstraintIRAnalysis(const ShapeConstraintIRAnalysis&) = delete;
ShapeConstraintIRAnalysis(ShapeConstraintIRAnalysis&&) = delete;

void Init();

const std::string GetNextSymName();
Expand Down Expand Up @@ -117,7 +123,8 @@ class IR_API ShapeAnalysisManager {

private:
ShapeAnalysisManager() {}
std::unordered_map<uint64_t, ShapeConstraintIRAnalysis> tables_;
std::unordered_map<uint64_t, std::shared_ptr<ShapeConstraintIRAnalysis>>
tables_;
};

#define OP_DECLARE_INFER_SYMBOLIC_SHAPE(name) \
Expand Down
4 changes: 2 additions & 2 deletions paddle/pir/src/dialect/shape/utils/shape_analysis.cc
Original file line number Diff line number Diff line change
Expand Up @@ -402,11 +402,11 @@ ShapeConstraintIRAnalysis& ShapeAnalysisManager::Get(pir::Program* program) {
if (it == tables_.end()) {
it = tables_
.emplace(program->module_op().operation()->id(),
ShapeConstraintIRAnalysis())
std::make_shared<ShapeConstraintIRAnalysis>())
.first;
}

return it->second;
return *it->second;
}

} // namespace pir

0 comments on commit 053d22f

Please sign in to comment.