Skip to content

Commit

Permalink
update ptl import
Browse files Browse the repository at this point in the history
Signed-off-by: Maanu Grover <[email protected]>
  • Loading branch information
maanug-nv committed Nov 18, 2024
1 parent 35e5bd7 commit 5e711ed
Show file tree
Hide file tree
Showing 12 changed files with 13 additions and 13 deletions.
2 changes: 1 addition & 1 deletion examples/nlp/gpt/serve_reward_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.

import torch
from pytorch_lightning.trainer.trainer import Trainer
from lightning.pytorch.trainer.trainer import Trainer

from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
from nemo.core.config import hydra_runner
Expand Down
2 changes: 1 addition & 1 deletion nemo_aligner/models/nlp/gpt/gpt_sft_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from megatron.core.num_microbatches_calculator import get_micro_batch_size, get_num_microbatches
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer
from lightning.pytorch.trainer.trainer import Trainer

from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.modules.common.megatron.utils import get_iterator_k_split
Expand Down
2 changes: 1 addition & 1 deletion nemo_aligner/models/nlp/gpt/megatron_gpt_critic.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
from megatron.core.transformer.module import Float16Module
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer
from lightning.pytorch.trainer.trainer import Trainer

from nemo.collections.nlp.modules.common.megatron.utils import (
average_losses_across_data_parallel_group,
Expand Down
2 changes: 1 addition & 1 deletion nemo_aligner/models/nlp/gpt/megatron_gpt_dpo_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
from megatron.core.utils import divide
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer
from lightning.pytorch.trainer.trainer import Trainer

from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.modules.common.megatron.utils import (
Expand Down
2 changes: 1 addition & 1 deletion nemo_aligner/models/nlp/gpt/megatron_gpt_kto_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
from megatron.core.utils import divide
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer
from lightning.pytorch.trainer.trainer import Trainer

from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.modules.common.megatron.utils import (
Expand Down
2 changes: 1 addition & 1 deletion nemo_aligner/models/nlp/gpt/megatron_gpt_ppo_actor.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from megatron.core.utils import divide
from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer
from lightning.pytorch.trainer.trainer import Trainer

from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.modules.common.megatron.utils import (
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from megatron.core.num_microbatches_calculator import get_num_microbatches
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer
from lightning.pytorch.trainer.trainer import Trainer

from nemo.collections.nlp.modules.common.megatron.utils import (
average_losses_across_data_parallel_group,
Expand Down
2 changes: 1 addition & 1 deletion nemo_aligner/models/nlp/gpt/megatron_gpt_reward_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
from megatron.core.utils import divide
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer
from lightning.pytorch.trainer.trainer import Trainer

from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel, get_specs
from nemo.collections.nlp.modules.common.megatron.utils import (
Expand Down
2 changes: 1 addition & 1 deletion nemo_aligner/models/nlp/gpt/megatron_gpt_rs_actor.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from megatron.core.utils import divide
from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer
from lightning.pytorch.trainer.trainer import Trainer

from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.modules.common.megatron.utils import (
Expand Down
2 changes: 1 addition & 1 deletion nemo_aligner/models/nlp/gpt/megatron_gpt_spin_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer
from lightning.pytorch.trainer.trainer import Trainer

from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.modules.common.megatron.utils import (
Expand Down
4 changes: 2 additions & 2 deletions nemo_aligner/utils/train_script_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@

from omegaconf import open_dict
from omegaconf.omegaconf import OmegaConf
from pytorch_lightning.trainer import call
from pytorch_lightning.trainer.states import TrainerFn
from lightning.pytorch.trainer import call
from lightning.pytorch.trainer.states import TrainerFn

from nemo.collections.nlp.parts.megatron_trainer_builder import MegatronTrainerBuilder
from nemo.collections.nlp.parts.peft_config import PEFT_CONFIG_MAP
Expand Down
2 changes: 1 addition & 1 deletion tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

import pytest
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from lightning.pytorch import Trainer

from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
Expand Down

0 comments on commit 5e711ed

Please sign in to comment.