Skip to content

Commit

Permalink
chore: reorganize submodules to follow PEP8 naming convention
Browse files Browse the repository at this point in the history
  • Loading branch information
XuehaiPan committed Jul 4, 2022
1 parent 8f01ad5 commit bf5d6c9
Show file tree
Hide file tree
Showing 14 changed files with 423 additions and 263 deletions.
78 changes: 42 additions & 36 deletions torchopt/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,50 +15,56 @@
"""TorchOpt: a high-performance optimizer library built upon PyTorch."""

from torchopt._src import (
accelerated_op_available,
clip,
combine,
hook,
schedule,
visual,
accelerated_op_available,
clip,
combine,
hook,
schedule,
visual,
)
from torchopt._src.alias import adam, rmsprop, sgd
from torchopt._src.MetaOptimizer import (
MetaAdam,
MetaOptimizer,
MetaRMSProp,
MetaSGD,
from torchopt._src.optimizer import (
Optimizer,
SGD,
Adam,
RMSProp,
meta,
)
from torchopt._src.optimizer.meta import (
MetaOptimizer,
MetaSGD,
MetaAdam,
MetaRMSProp,
)
from torchopt._src.Optimizer import SGD, Adam, Optimizer, RMSProp
from torchopt._src.update import apply_updates
from torchopt._src.utils import (
extract_state_dict,
recover_state_dict,
stop_gradient,
extract_state_dict,
recover_state_dict,
stop_gradient,
)

__version__ = "0.4.1"

__all__ = (
"accelerated_op_available",
"clip",
"combine",
"hook",
"schedule",
"visual",
"adam",
"rmsprop",
"sgd",
"MetaAdam",
"MetaOptimizer",
"MetaRMSProp",
"MetaSGD",
"SGD",
"Adam",
"Optimizer",
"RMSProp",
"apply_updates",
"extract_state_dict",
"recover_state_dict",
"stop_gradient",
"accelerated_op_available",
"clip",
"combine",
"hook",
"schedule",
"visual",
"adam",
"rmsprop",
"sgd",
"Optimizer",
"SGD",
"Adam",
"RMSProp",
"MetaOptimizer",
"MetaSGD",
"MetaAdam",
"MetaRMSProp",
"apply_updates",
"extract_state_dict",
"recover_state_dict",
"stop_gradient",
)
Binary file not shown.
2 changes: 1 addition & 1 deletion torchopt/_src/accelerated_op/adam_op/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,4 @@
# limitations under the License.
# ==============================================================================

from torchopt._src.accelerated_op.adam_op.AdamOp import AdamOp
from torchopt._src.accelerated_op.adam_op.adam_op import AdamOp
21 changes: 21 additions & 0 deletions torchopt/_src/optimizer/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# Copyright 2022 MetaOPT Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

from torchopt._src.optimizer.base import Optimizer
from torchopt._src.optimizer.adam import Adam
from torchopt._src.optimizer.rmsprop import RMSProp
from torchopt._src.optimizer.sgd import SGD

from torchopt._src.optimizer import meta
55 changes: 55 additions & 0 deletions torchopt/_src/optimizer/adam.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
# Copyright 2022 MetaOPT Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

from torchopt._src.alias import adam
from torchopt._src.optimizer.base import Optimizer
from torchopt._src.typing import ScalarOrSchedule


class Adam(Optimizer):
"""A canonical Stochastic Gradient Descent optimizer."""

def __init__(
self,
params,
lr: ScalarOrSchedule,
b1: float = 0.9,
b2: float = 0.999,
eps: float = 1e-8,
eps_root: float = 0.0,
use_accelerated_op: bool = False
):
"""The `init` function.
Args:
params (iterable):
An iterable of `torch.Tensor`s. Specifies what Tensors should be
optimized.
args:
Other arguments see `alias.sgd`.
"""

super().__init__(
params,
adam(
lr=lr,
b1=b1,
b2=b2,
eps=eps,
eps_root=eps_root,
moment_requires_grad=False,
use_accelerated_op=use_accelerated_op
)
)
117 changes: 5 additions & 112 deletions torchopt/_src/Optimizer.py → torchopt/_src/optimizer/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,28 +12,27 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Iterable, Union

from typing import Iterable

import jax
import torch

from torchopt._src import base
from torchopt._src.alias import adam, rmsprop, sgd
from torchopt._src.typing import ScalarOrSchedule
from torchopt._src.base import GradientTransformation
from torchopt._src.update import apply_updates


class Optimizer(object):
"""A high-level base class that has the similar with `torch.optim.Optimizer`."""

def __init__(self, params: Iterable, impl: base.GradientTransformation):
def __init__(self, params: Iterable, impl: GradientTransformation):
"""The `init` function.
Args:
params (iterable):
An iterable of `torch.Tensor`s. Specifies what Tensors should be
optimized.
impl (base.GradientTransformation):
impl (GradientTransformation):
A low level optimizer function, it could be a optimizer function
provided by `alias.py` or a customized `chain` provided by
`combine.py`.
Expand Down Expand Up @@ -126,109 +125,3 @@ def add_param_group(self, params):
self.param_groups.append(params)
self.param_tree_groups.append(tree)
self.state_groups.append(self.impl.init(params))


class SGD(Optimizer):
"""The classic Adam optimizer."""

def __init__(
self,
params,
lr: ScalarOrSchedule,
momentum: Union[float, None] = None,
nesterov: bool = False
):
"""The `init` function.
Args:
params (iterable):
An iterable of `torch.Tensor`s. Specifies what Tensors should be
optimized.
args:
Other arguments see `alias.adam`.
"""

super().__init__(
params,
sgd(
lr=lr,
momentum=momentum,
nesterov=nesterov,
moment_requires_grad=False
)
)


class Adam(Optimizer):
"""A canonical Stochastic Gradient Descent optimizer."""

def __init__(
self,
params,
lr: ScalarOrSchedule,
b1: float = 0.9,
b2: float = 0.999,
eps: float = 1e-8,
eps_root: float = 0.0,
use_accelerated_op: bool = False
):
"""The `init` function.
Args:
params (iterable):
An iterable of `torch.Tensor`s. Specifies what Tensors should be
optimized.
args:
Other arguments see `alias.sgd`.
"""

super().__init__(
params,
adam(
lr=lr,
b1=b1,
b2=b2,
eps=eps,
eps_root=eps_root,
moment_requires_grad=False,
use_accelerated_op=use_accelerated_op
)
)


class RMSProp(Optimizer):
"""An RMSProp optimiser."""

def __init__(
self,
params,
lr: ScalarOrSchedule,
decay: float = 0.9,
eps: float = 1e-8,
initial_scale: float = 0.,
centered: bool = False,
momentum: Union[float, None] = None,
nesterov: bool = False
):
"""The `init` function.
Args:
params (iterable):
An iterable of `torch.Tensor`s. Specifies what Tensors should be
optimized.
args:
Other arguments see `alias.sgd`.
"""

super().__init__(
params,
rmsprop(
lr=lr,
decay=decay,
eps=eps,
initial_scale=initial_scale,
centered=centered,
momentum=momentum,
nesterov=nesterov
)
)
19 changes: 19 additions & 0 deletions torchopt/_src/optimizer/meta/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# Copyright 2022 MetaOPT Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

from torchopt._src.optimizer.meta.base import MetaOptimizer
from torchopt._src.optimizer.meta.adam import MetaAdam
from torchopt._src.optimizer.meta.rmsprop import MetaRMSProp
from torchopt._src.optimizer.meta.sgd import MetaSGD
56 changes: 56 additions & 0 deletions torchopt/_src/optimizer/meta/adam.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
# Copyright 2022 MetaOPT Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

from torchopt._src.alias import adam
from torchopt._src.optimizer.meta.base import MetaOptimizer
from torchopt._src.typing import ScalarOrSchedule


class MetaAdam(MetaOptimizer):
"""The classic Adam optimizer."""

def __init__(
self,
net,
lr: ScalarOrSchedule,
b1: float = 0.9,
b2: float = 0.999,
eps: float = 1e-8,
eps_root: float = 0.0,
moment_requires_grad: bool = True,
use_accelerated_op: bool = False
):
"""The `init` function.
Args:
net (nn.Module):
A network whose parameters should be optimized.
args:
Other arguments see `alias.adam`, here we set `moment_requires_grad=True`
to make tensors like momentum be differentiable.
"""

super().__init__(
net,
adam(
lr=lr,
b1=b1,
b2=b2,
eps=eps,
eps_root=eps_root,
moment_requires_grad=moment_requires_grad,
use_accelerated_op=use_accelerated_op
)
)
Loading

0 comments on commit bf5d6c9

Please sign in to comment.