猿问

自定义学习率调度程序 TF2 和 Keras

我正在尝试编写自定义学习率调度程序:带预热的余弦退火。但我既不能在 Keras 中使用它,也不能在 Tensorflow 中使用它。下面是代码:


import tensorflow as tf

import numpy as np



def make_linear_lr(min_lr, max_lr, number_of_steps):

    def gen_lr(step):

        return (max_lr - min_lr) / number_of_steps * step + min_lr

    return gen_lr


def make_cosine_anneal_lr(learning_rate, alpha, decay_steps):

    def gen_lr(global_step):

        global_step = min(global_step, decay_steps)

        cosine_decay = 0.5 * (1 + np.cos(np.pi * global_step / decay_steps))

        decayed = (1 - alpha) * cosine_decay + alpha

        decayed_learning_rate = learning_rate * decayed

        return decayed_learning_rate

    return gen_lr


def make_cosine_annealing_with_warmup(min_lr, max_lr, number_of_steps, alpha, decay_steps):

    gen_lr_1 = make_linear_lr(min_lr, max_lr, number_of_steps)

    gen_lr_2 = make_cosine_anneal_lr(max_lr, alpha, decay_steps)

    def gen_lr(global_step):

        if global_step < number_of_steps:

            return gen_lr_1(global_step)

        else:

            return gen_lr_2(global_step - number_of_steps)

        

    return gen_lr


class CosineAnnealingWithWarmUP(tf.keras.optimizers.schedules.LearningRateSchedule):

  def __init__(self, min_lr, max_lr, number_of_steps, alpha, decay_steps):

    super(CosineAnnealingWithWarmUP, self).__init__()

    self.gen_lr_ca =  make_cosine_annealing_with_warmup(min_lr, max_lr, number_of_steps, alpha, decay_steps)

  def __call__(self, step):

    return tf.cast(self.gen_lr_ca(step), tf.float32)


learning_rate_fn = CosineAnnealingWithWarmUP(.0000001, 0.01, 10_000, 0, 150_000)

optimizer=tf.keras.optimizers.SGD(

    learning_rate=learning_rate_fn, 

当我尝试将它与 TensorFlow 一起使用时,在 get_model_train_step_function 中传递优化器 - 如果我删除 @tf.function 装饰器,它就会起作用。但它不适用于@tf.function,错误显示:OperatorNotAllowedInGraphError: using a tf.Tensoras a Python boolis not allowed: AutoGraph did conversion this function。这可能表明您正在尝试使用不受支持的功能。


我应该如何编写自定义学习率调度程序?另外,我想将此时间表与 Keras 一起使用。但它在那里根本不起作用。


千万里不及你
浏览 85回答 1
1回答

叮当猫咪

您需要排除 numpy 调用并用张量流运算符替换 python 条件(“if”、“min”):def make_cosine_anneal_lr(learning_rate, alpha, decay_steps):&nbsp; &nbsp; def gen_lr(global_step):&nbsp; &nbsp; &nbsp; &nbsp; #global_step = min(global_step, decay_steps)&nbsp; &nbsp; &nbsp; &nbsp; global_step = tf.minimum(global_step, decay_steps)&nbsp; &nbsp; &nbsp; &nbsp; cosine_decay = 0.5 * (1 + tf.cos(3.1415926 * global_step / decay_steps)) # changed np.pi to 3.14&nbsp; &nbsp; &nbsp; &nbsp; decayed = (1 - alpha) * cosine_decay + alpha&nbsp; &nbsp; &nbsp; &nbsp; decayed_learning_rate = learning_rate * decayed&nbsp; &nbsp; &nbsp; &nbsp; return decayed_learning_rate&nbsp; &nbsp; return gen_lrdef make_cosine_annealing_with_warmup(min_lr, max_lr, number_of_steps, alpha, decay_steps):&nbsp; &nbsp; gen_lr_1 = make_linear_lr(min_lr, max_lr, number_of_steps)&nbsp; &nbsp; gen_lr_2 = make_cosine_anneal_lr(max_lr, alpha, decay_steps)&nbsp; &nbsp; def gen_lr(global_step):&nbsp; &nbsp; &nbsp; #if global_step < number_of_steps:&nbsp; &nbsp; &nbsp; #&nbsp; &nbsp; return gen_lr_1(global_step)&nbsp; &nbsp; &nbsp; #else:&nbsp; &nbsp; &nbsp; #&nbsp; &nbsp; return gen_lr_2(global_step - number_of_steps)&nbsp; &nbsp; &nbsp; a = global_step < number_of_steps&nbsp; &nbsp; &nbsp; a = tf.cast(a, tf.float32)&nbsp; &nbsp; &nbsp; b = 1. - a&nbsp; &nbsp; &nbsp; return a * gen_lr_1(global_step) + b * gen_lr_2(global_step - number_of_steps)&nbsp; &nbsp; &nbsp; &nbsp;&nbsp;&nbsp; &nbsp; return gen_lr这样的时间表适用于 Keras。
随时随地看视频慕课网APP

相关分类

Python
我要回答