Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[PIR] Replace OpResult with Value in python #60798

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions python/paddle/autograd/backward_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,13 +172,13 @@ class State:

def __init__(self, block):
self.block = block
# opresult -> list(list(opresult))
# value -> list(list(value))
self.value_to_valuegrad = ValueDict(default_factory=list)
self.value_to_sumvaluegrad = ValueDict(default_factory=list)
# operation -> list(operation)
self.op_to_opgrad = collections.defaultdict(list)

# opresult -> list(opresult)
# value -> list(value)
self.valuegrad_to_value = ValueDict(default_factory=list)
self.sumvaluegrad_to_value = ValueDict(default_factory=list)
# operation -> list(operation)
Expand Down Expand Up @@ -213,7 +213,7 @@ def copy(self, new_block):
# operation -> list(operation)
state.op_to_opgrad = self.op_to_opgrad.copy()

# opresult -> list(opresult)
# value -> list(value)
state.valuegrad_to_value = self.valuegrad_to_value.copy()
state.sumvaluegrad_to_value = self.sumvaluegrad_to_value.copy()
# operation -> list(operation)
Expand Down
12 changes: 6 additions & 6 deletions python/paddle/autograd/ir_backward.py
Original file line number Diff line number Diff line change
Expand Up @@ -1202,27 +1202,27 @@ def grad(
check_type(
outputs,
'outputs',
((paddle.pir.Value, paddle.pir.OpResult), list, tuple),
(paddle.pir.Value, list, tuple),
'paddle.autograd.ir_backward.grad',
)
check_type(
inputs,
'inputs',
((paddle.pir.Value, paddle.pir.OpResult), list, tuple),
(paddle.pir.Value, list, tuple),
'paddle.autograd.ir_backward.grad',
)
check_type(
grad_outputs,
'grad_outputs',
((paddle.pir.Value, paddle.pir.OpResult), list, tuple, type(None)),
(paddle.pir.Value, list, tuple, type(None)),
'paddle.autograd.ir_backward.grad',
)

check_type(
no_grad_vars,
'no_grad_vars',
(
(paddle.pir.Value, paddle.pir.OpResult),
paddle.pir.Value,
list,
tuple,
set,
Expand Down Expand Up @@ -1265,7 +1265,7 @@ def append_backward(loss, parameter_list=None, no_grad_set=None):
check_type(
loss,
'loss',
(paddle.pir.Value, paddle.pir.OpResult),
paddle.pir.Value,
'paddle.autograd.ir_backward.append_backward',
)

Expand All @@ -1280,7 +1280,7 @@ def append_backward(loss, parameter_list=None, no_grad_set=None):
check_type(
param,
'parameter_list[%s]' % i,
(paddle.pir.Value, paddle.pir.OpResult),
paddle.pir.Value,
'base.backward.append_backward',
)

Expand Down
8 changes: 4 additions & 4 deletions python/paddle/base/backward.py
Original file line number Diff line number Diff line change
Expand Up @@ -2752,27 +2752,27 @@ def gradients(targets, inputs, target_gradients=None, no_grad_set=None):
check_type(
targets,
'targets',
((paddle.pir.Value, paddle.pir.OpResult), list, tuple),
(paddle.pir.Value, list, tuple),
'paddle.autograd.ir_backward.grad',
)
check_type(
inputs,
'inputs',
((paddle.pir.Value, paddle.pir.OpResult), list, tuple),
(paddle.pir.Value, list, tuple),
'paddle.autograd.ir_backward.grad',
)
check_type(
target_gradients,
'target_gradients',
((paddle.pir.Value, paddle.pir.OpResult), list, tuple, type(None)),
(paddle.pir.Value, list, tuple, type(None)),
'paddle.autograd.ir_backward.grad',
)

check_type(
no_grad_set,
'no_grad_set',
(
(paddle.pir.Value, paddle.pir.OpResult),
paddle.pir.Value,
list,
tuple,
set,
Expand Down
4 changes: 1 addition & 3 deletions python/paddle/base/layer_helper_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,9 +97,7 @@ def to_variable(self, value, name=None):
name if name else None,
True,
)
elif isinstance(
value, (Variable, core.eager.Tensor, paddle.pir.OpResult)
):
elif isinstance(value, (Variable, core.eager.Tensor, paddle.pir.Value)):
return value
else:
raise TypeError(
Expand Down
16 changes: 4 additions & 12 deletions python/paddle/base/variable_index.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,17 +234,11 @@ def slice_is_same_to_original(start, end, step):
return True

# If there is Variable, we cannot determine whether it is the same to original.
if isinstance(
start, (paddle.base.Variable, paddle.pir.Value, paddle.pir.OpResult)
):
if isinstance(start, (paddle.base.Variable, paddle.pir.Value)):
return False
if isinstance(
end, (paddle.base.Variable, paddle.pir.Value, paddle.pir.OpResult)
):
if isinstance(end, (paddle.base.Variable, paddle.pir.Value)):
return False
if isinstance(
step, (paddle.base.Variable, paddle.pir.Value, paddle.pir.OpResult)
):
if isinstance(step, (paddle.base.Variable, paddle.pir.Value)):
return False
return start == 0 and end == MAX_INTEGER and step == 1

Expand Down Expand Up @@ -594,9 +588,7 @@ def _setitem_static(x, indices, values):
# 3. assign values to the sliced result by index_put OP;
# 4. transpose back and assign the result to original tensor by set_value OP.

if not isinstance(
values, (Variable, paddle.pir.Value, paddle.pir.OpResult)
):
if not isinstance(values, (Variable, paddle.pir.Value)):
values = paddle.assign(values).astype(x.dtype)

sub_tensor = get_tensor_with_basic_indexing(
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/decomposition/decomp.py
Original file line number Diff line number Diff line change
Expand Up @@ -884,7 +884,7 @@ def _reset_prim_state(state):


def _translate_gradvartovar_to_pir(param_mapping, grad_var_to_var):
'''translate grad_var_to_var (mapping VarDesc->VarDesc) to pir_grad_var_to_var (mapping OpResult->OpResult)'''
'''translate grad_var_to_var (mapping VarDesc->VarDesc) to pir_grad_var_to_var (mapping Value->Value)'''
pir_grad_var_to_var = ValueDict()
for grad_var, var in grad_var_to_var.items():
if grad_var in param_mapping.keys() and var in param_mapping.keys():
Expand Down Expand Up @@ -992,7 +992,7 @@ def decompose_pir_program(pir_program, param_mapping, grad_var_to_var):
Decompose all PHI ops into prim ops in a pir program.
Args:
pir_program (Program): the program to be decomposed
param_mapping (dict): a map of program variables to pir program opresults
param_mapping (dict): a map of program variables to pir program values
grad_var_to_var (dict): a dict obtained from distributed processing,
which maps the backward grad variable to its corresponding forward variable.
'''
Expand Down
10 changes: 5 additions & 5 deletions python/paddle/jit/pir_dy2static/parameter_recorder.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
class ParametersRecorder:
def __init__(self):
self.params_dict = {}
self.tensor2opresult = {}
self.tensor2value = {}

@synchronized
def get(self, program, tensor):
Expand All @@ -31,10 +31,10 @@ def get(self, program, tensor):
key = _program_hash(program)
if key not in self.params_dict:
self.params_dict[key] = set()
self.tensor2opresult[key] = {}
self.tensor2value[key] = {}

params = self.params_dict[key]
mappings = self.tensor2opresult[key]
mappings = self.tensor2value[key]
if id(tensor) not in mappings:
non_used_initializer = paddle.nn.initializer.Constant(0.0)
op_result = create_parameter(
Expand All @@ -54,10 +54,10 @@ def pop(self, program):
if params is None:
return [], []
params_values = [
self.tensor2opresult[hash_id][id(x)] for x in list(params)
self.tensor2value[hash_id][id(x)] for x in list(params)
]
del self.params_dict[hash_id]
del self.tensor2opresult[hash_id]
del self.tensor2value[hash_id]
return list(params), list(params_values)


Expand Down
8 changes: 4 additions & 4 deletions python/paddle/optimizer/adamw.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
import paddle
from paddle import pir
from paddle.base.libpaddle import DataType
from paddle.pir import OpResult
from paddle.pir import Value

from .. import _C_ops
from ..base import core, framework
Expand Down Expand Up @@ -182,7 +182,7 @@ def __init__(
if not 0 <= epsilon:
raise ValueError("Invaild value of epsilon, expect epsilon >= 0.")
if not isinstance(weight_decay, float) and not isinstance(
weight_decay, (framework.Variable, OpResult)
weight_decay, (framework.Variable, Value)
):
raise TypeError("weight_decay should be float or Tensor.")
if lr_ratio is not None:
Expand Down Expand Up @@ -375,7 +375,7 @@ def _add_moments_pows(self, p):
param=p,
dtype=acc_dtype,
fill_value=0.9
if isinstance(self._beta1, (Variable, OpResult))
if isinstance(self._beta1, (Variable, Value))
else self._beta1,
shape=[1],
type=core.VarDesc.VarType.LOD_TENSOR,
Expand All @@ -386,7 +386,7 @@ def _add_moments_pows(self, p):
param=p,
dtype=acc_dtype,
fill_value=0.999
if isinstance(self._beta2, (Variable, OpResult))
if isinstance(self._beta2, (Variable, Value))
else self._beta2,
shape=[1],
type=core.VarDesc.VarType.LOD_TENSOR,
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/optimizer/momentum.py
Original file line number Diff line number Diff line change
Expand Up @@ -476,12 +476,12 @@ def _append_optimize_multi_tensor_op(
found_inf = self._get_auxiliary_var('found_inf')
if found_inf:
if isinstance(
found_inf, (core.eager.Tensor, paddle.pir.OpResult)
found_inf, (core.eager.Tensor, paddle.pir.Value)
):
self._set_auxiliary_var('found_inf', True)
else:
if isinstance(
found_inf, (core.eager.Tensor, paddle.pir.OpResult)
found_inf, (core.eager.Tensor, paddle.pir.Value)
):
self._set_auxiliary_var('found_inf', False)
_, _, _ = _C_ops.merged_momentum_(
Expand Down
8 changes: 4 additions & 4 deletions python/paddle/optimizer/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -457,7 +457,7 @@ def do_create():
set_parameter(init_result, lr_name)
main_program.move_parameters_from(startup_program)

if not isinstance(lr_var, paddle.pir.OpResult):
if not isinstance(lr_var, paddle.pir.Value):
self._learning_rate._var_name = lr_name
with paddle.static.program_guard(main_program):
param = parameter(lr_name, _lr_dtype, [])
Expand Down Expand Up @@ -497,7 +497,7 @@ def do_create():
# only create global lr_var once
lr = self._global_learning_rate()
if in_pir_mode():
if isinstance(lr, paddle.pir.OpResult):
if isinstance(lr, paddle.pir.Value):
return
else:
place = _current_expected_place()
Expand Down Expand Up @@ -764,7 +764,7 @@ def _create_param_lr(self, param_and_grad):
param = param_and_grad[0]
if hasattr(param, 'optimize_attr'):
param_lr = param.optimize_attr['learning_rate']
if isinstance(param_lr, (Variable, paddle.pir.OpResult)):
if isinstance(param_lr, (Variable, paddle.pir.Value)):
return param_lr
else:
if param_lr == 1.0:
Expand Down Expand Up @@ -1735,7 +1735,7 @@ def minimize(

"""
assert isinstance(
loss, (Variable, paddle.pir.OpResult)
loss, (Variable, paddle.pir.Value)
), "The loss should be an Tensor."

parameter_list = parameters if parameters else self._parameter_list
Expand Down
1 change: 0 additions & 1 deletion python/paddle/pir/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
Block,
Operation,
OpOperand,
OpResult,
PassManager,
Program,
Type,
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/pir/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -332,7 +332,7 @@ def create_parameter(

def _convert_into_value(tensor):
"""
Convert Tensor into OpResult.
Convert Tensor into Value.
"""
import paddle
from paddle.jit.pir_dy2static.parameter_recorder import (
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/regularizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def __call__(self, param, grad, block):
new variable for weight decay
"""
assert isinstance(
param, (framework.Variable, pir.OpResult, pir.core.ParameterMeta)
param, (framework.Variable, pir.Value, pir.core.ParameterMeta)
)
assert isinstance(block, (framework.Block, pir.Block))

Expand Down Expand Up @@ -236,7 +236,7 @@ def __call__(self, param, grad, block):
new variable for weight decay
"""
assert isinstance(
param, (framework.Variable, pir.OpResult, pir.core.ParameterMeta)
param, (framework.Variable, pir.Value, pir.core.ParameterMeta)
)
assert isinstance(block, (framework.Block, pir.Block))

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/tensor/attribute.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ def is_integer(x):
True
"""
if not isinstance(
x, (paddle.Tensor, paddle.static.Variable, paddle.pir.OpResult)
x, (paddle.Tensor, paddle.static.Variable, paddle.pir.Value)
):
raise TypeError(f"Expected Tensor, but received type of x: {type(x)}")
dtype = x.dtype
Expand Down
Loading