Skip to content

lr_scheduler_from_optim_params setting #288

@BraveBoBo

Description

@BraveBoBo
    elif lr_scheduler_type == "cosine":
        assert net_optim_params["learning_rate"]["step_every_batch"]
        # source: https://github.com/huggingface/diffusers/blob/ee7e141d805b0d87ad207872060ae1f15ce65943/src/diffusers/optimization.py#L154
        num_warmup_steps = net_optim_params["learning_rate"].get("warmup_steps", 0)
        num_training_steps = num_train_batches * num_epochs
        num_cycles = net_optim_params["learning_rate"].get("num_cycles", 0.5) # number of cosine cycles (0 to 2pi) in the LR schedule, default to half-cycle 
        def lr_lambda(current_step):
            if current_step < num_warmup_steps:
                return float(current_step) / float(max(1, num_warmup_steps))
            progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
            return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))
        return optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)

Traceback (most recent call last):
File "/home/libo/project/VectorizedCollisionLab/scripts/imitation_learning/robomimic/train.py", line 428, in main
train(config, device, log_dir, ckpt_dir, video_dir)
File "/home/libo/project/VectorizedCollisionLab/scripts/imitation_learning/robomimic/train.py", line 220, in train
model = algo_factory(
^^^^^^^^^^^^^
File "/home/libo/project/VectorizedCollisionLab/third_party/robomimic/robomimic/algo/algo.py", line 78, in algo_factory
return algo_cls(
^^^^^^^^^
File "/home/libo/project/VectorizedCollisionLab/third_party/robomimic/robomimic/algo/algo.py", line 133, in init
self._create_optimizers()
File "/home/libo/project/VectorizedCollisionLab/third_party/robomimic/robomimic/algo/algo.py", line 195, in _create_optimizers
self.lr_schedulers[k] = TorchUtils.lr_scheduler_from_optim_params(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/libo/project/VectorizedCollisionLab/third_party/robomimic/robomimic/utils/torch_utils.py", line 170, in lr_scheduler_from_optim_params
num_training_steps = num_train_batches * num_epochs
~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~
TypeError: unsupported operand type(s) for *: 'Config' and 'Config'

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions