Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[clear ps] del some all list #51289

Merged
merged 3 commits into from
Mar 9, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,9 @@
from paddle.incubate.distributed.fleet.role_maker import MPISymetricRoleMaker

from paddle.incubate.distributed.fleet.parameter_server import version
from paddle.incubate.distributed.fleet.parameter_server.pslib.optimizer_factory import (
DistributedAdam,
)
from paddle.incubate.distributed.fleet.parameter_server.ir.public import (
get_sparse_tablenames,
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,16 @@

import os
import sys
from .optimizer_factory import * # noqa: F403
from .optimizer_factory import FLEET_GLOBAL_DICT # noqa: F403
from .optimizer_factory import DistributedAdam # noqa: F403
from google.protobuf import text_format
from paddle.framework import core

from paddle.incubate.distributed.fleet.base import Fleet
from paddle.incubate.distributed.fleet.base import Mode
from paddle.incubate.distributed.fleet.base import DistributedOptimizer
from paddle.incubate.distributed.fleet.role_maker import MPISymetricRoleMaker
from paddle.incubate.distributed.fleet.role_maker import HeterRoleMaker
import paddle


class PSLib(Fleet):
Expand Down Expand Up @@ -534,7 +535,7 @@ def shrink_dense_table(self, decay, emb_dim=11, scope=None, table_id=None):
>>> fleet.shrink_dense_table(0.98, 11, myscope2, 3)
"""
if scope is None:
scope = fluid.global_scope()
scope = paddle.static.global_scope()
self._role_maker._barrier_worker()
if self._role_maker.is_first_worker():
for tp in self._opt_info["fleet_desc"].trainer_param:
Expand Down Expand Up @@ -971,7 +972,7 @@ def _fleet_embedding(
if padding_idx is None:
padding_idx = 0
global FLEET_GLOBAL_DICT
return fluid.layers.nn._pull_sparse(
return paddle.static.nn._pull_sparse(
input=input,
size=size,
table_id=FLEET_GLOBAL_DICT["emb_to_table"][name],
Expand Down Expand Up @@ -1013,7 +1014,7 @@ def _fleet_embedding_v2(
if padding_idx is None:
padding_idx = 0

return fluid.layers.nn._pull_sparse_v2(
return paddle.static.nn._pull_sparse_v2(
input=input,
size=size,
table_id=FLEET_GLOBAL_DICT["emb_to_table"][name],
Expand Down Expand Up @@ -1042,8 +1043,8 @@ class fleet_embedding:

def __init__(self, click_name, scale_sparse_grad=True):
"""Init."""
self.origin_emb = fluid.layers.embedding
self.origin_emb_v2 = fluid.embedding
# self.origin_emb = fluid.layers.embedding
self.origin_emb_v2 = paddle.static.nn.embedding
# if user uses cvm layer after embedding, click_name can be None
self.click_name = "" if click_name is None else click_name
self.scale_sparse_grad = scale_sparse_grad
Expand All @@ -1052,16 +1053,16 @@ def __init__(self, click_name, scale_sparse_grad=True):

def __enter__(self):
"""Enter."""
fluid.layers.embedding = _fleet_embedding
fluid.embedding = _fleet_embedding_v2
# fluid.layers.embedding = _fleet_embedding
paddle.static.nn.embedding = _fleet_embedding_v2
FLEET_GLOBAL_DICT["cur_accessor"] = self.accessor
FLEET_GLOBAL_DICT["click_name"] = self.click_name
FLEET_GLOBAL_DICT["scale_sparse_grad"] = self.scale_sparse_grad

def __exit__(self, exc_type, exc_val, exc_tb):
"""Exit."""
fluid.layers.embedding = self.origin_emb
fluid.embedding = self.origin_emb_v2
# fluid.layers.embedding = self.origin_emb
paddle.static.nn.embedding = self.origin_emb_v2
FLEET_GLOBAL_DICT["cur_accessor"] = ""
FLEET_GLOBAL_DICT["click_name"] = ""
FLEET_GLOBAL_DICT["scale_sparse_grad"] = None
Expand Down Expand Up @@ -1220,7 +1221,7 @@ def minimize(
programs = [loss.block.program for loss in losses]

if scopes is None:
scopes = [fluid.global_scope()] * len(programs)
scopes = [paddle.static.global_scope()] * len(programs)

if len(scopes) != len(programs):
raise ValueError(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.
"""Optimizer Factory."""

__all__ = ["DistributedAdam", "FLEET_GLOBAL_DICT"]
__all__ = []
import copy
import logging
import os
Expand Down
2 changes: 2 additions & 0 deletions python/paddle/static/nn/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,8 @@
from .common import embedding # noqa: F401
from ...fluid.contrib.layers import sparse_embedding # noqa: F401
from ...fluid.layers import StaticRNN # noqa: F401
from ...fluid.layers.nn import _pull_sparse # noqa: F401
from ...fluid.layers.nn import _pull_sparse_v2 # noqa: F401

from .sequence_lod import sequence_conv # noqa: F401
from .sequence_lod import sequence_softmax # noqa: F401
Expand Down