class YellowFin extends GradientDescent
Linear Supertypes
Ordering
- Alphabetic
- By Inheritance
Inherited
- YellowFin
- GradientDescent
- Optimizer
- AnyRef
- Any
- Hide All
- Show All
Visibility
- Public
- Protected
Instance Constructors
- new YellowFin(learningRate: Float = 1.0f, decay: Schedule[Float] = FixedSchedule[Float](), momentum: Float = 0.0f, beta: Float = 0.999f, curvatureWindowWidth: Int = 20, zeroDebias: Boolean = true, sparsityDebias: Boolean = true, useNesterov: Boolean = false, useLocking: Boolean = false, learningRateSummaryTag: String = null, name: String = "YellowFin")
- Attributes
- protected
Value Members
- final def !=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- final def ##: Int
- Definition Classes
- AnyRef → Any
- final def ==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- def applyDense[T, I](gradient: Output[T], variable: variables.Variable[T], iteration: Option[variables.Variable[I]])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): UntypedOp
- Definition Classes
- YellowFin → GradientDescent → Optimizer
- def applyGradients[T, I](gradientsAndVariables: Seq[(OutputLike[T], variables.Variable[Any])], iteration: Option[variables.Variable[I]] = None, name: String = this.name)(implicit arg0: core.types.TF[T], arg1: LongDefault[I], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): UntypedOp
- def applySparse[T, I](gradient: OutputIndexedSlices[T], variable: variables.Variable[T], iteration: Option[variables.Variable[I]])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): UntypedOp
- Definition Classes
- YellowFin → GradientDescent → Optimizer
- def applySparseDuplicateIndices[T, I](gradient: OutputIndexedSlices[T], variable: variables.Variable[T], iteration: Option[variables.Variable[I]])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): UntypedOp
- Definition Classes
- GradientDescent → Optimizer
- final def asInstanceOf[T0]: T0
- Definition Classes
- Any
- val beta: Float
- var betaTensor: Output[Float]
- Attributes
- protected
- def clone(): AnyRef
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.CloneNotSupportedException]) @native() @HotSpotIntrinsicCandidate()
- def computeGradients[T](loss: Output[T], lossGradients: Seq[OutputLike[T]] = null, variables: Set[variables.Variable[Any]] = null, gradientsGatingMethod: GatingMethod = Gradients.OpGating, gradientsAggregationMethod: AggregationMethod = Gradients.AddAggregationMethod, colocateGradientsWithOps: Boolean = false)(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): Seq[(OutputLike[T], variables.Variable[Any])]
- Definition Classes
- Optimizer
- Annotations
- @throws(scala.this.throws.<init>$default$1[IllegalArgumentException])
- def createSlots(variables: Seq[variables.Variable[Any]]): Unit
- Definition Classes
- YellowFin → GradientDescent → Optimizer
- def curvatureRange(gradNormSquaredSum: Output[Float], sparsityAvg: Option[Output[Float]]): (Output[Float], Output[Float])
- Attributes
- protected
- var curvatureWindow: variables.Variable[Float]
- Attributes
- protected
- val curvatureWindowWidth: Int
- val decay: Schedule[Float]
- Definition Classes
- YellowFin → GradientDescent
- def distanceToOptimum(gradNormSquaredSum: Output[Float], gradNormSquaredAvg: Output[Float], sparsityAvg: Option[Output[Float]]): Output[Float]
- Attributes
- protected
- var doTune: Output[Boolean]
- Attributes
- protected
- final def eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- def equals(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef → Any
- def finish(updateOps: Set[UntypedOp], nameScope: String): UntypedOp
- Definition Classes
- Optimizer
- final def getClass(): Class[_ <: AnyRef]
- Definition Classes
- AnyRef → Any
- Annotations
- @native() @HotSpotIntrinsicCandidate()
- def getLearningRate[V, I](variable: variables.Variable[V], iteration: Option[variables.Variable[I]])(implicit arg0: core.types.TF[V], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): Output[V]
- Attributes
- protected
- Definition Classes
- YellowFin → GradientDescent
- def getMomentum[V](variable: variables.Variable[V])(implicit arg0: core.types.TF[V]): Output[V]
- Attributes
- protected
- Definition Classes
- YellowFin → GradientDescent
- final def getNonSlotVariable[T](name: String, graph: core.Graph = null): variables.Variable[T]
- Attributes
- protected
- Definition Classes
- Optimizer
- final def getNonSlotVariables: Iterable[variables.Variable[Any]]
- Attributes
- protected
- Definition Classes
- Optimizer
- final def getOrCreateNonSlotVariable[T](name: String, initialValue: tensors.Tensor[T], colocationOps: Set[UntypedOp] = Set.empty, ignoreExisting: Boolean = false)(implicit arg0: core.types.TF[T]): variables.Variable[T]
- Attributes
- protected
- Definition Classes
- Optimizer
- final def getSlot[T, R](name: String, variable: variables.Variable[T])(implicit arg0: core.types.TF[R]): variables.Variable[R]
- Attributes
- protected
- Definition Classes
- Optimizer
- final def getSlot[T, R](name: String, variable: variables.Variable[T], dataType: core.types.DataType[R], initializer: Initializer, shape: core.Shape, variableScope: String)(implicit arg0: core.types.TF[R]): variables.Variable[R]
- Attributes
- protected
- Definition Classes
- Optimizer
- def gradientsSparsity(gradients: Seq[Output[Float]]): Option[Output[Float]]
- Attributes
- protected
- def gradientsVariance(gradients: Seq[OutputLike[Float]], gradNormSquaredAvg: Output[Float], sparsityAvg: Option[Output[Float]]): Output[Float]
- Attributes
- protected
- def hashCode(): Int
- Definition Classes
- AnyRef → Any
- Annotations
- @native() @HotSpotIntrinsicCandidate()
- val ignoreDuplicateSparseIndices: Boolean
- Definition Classes
- GradientDescent → Optimizer
- var incrementStepOp: UntypedOp
- Attributes
- protected
- final def isInstanceOf[T0]: Boolean
- Definition Classes
- Any
- val learningRate: Float
- Definition Classes
- YellowFin → GradientDescent
- var learningRateFactorVariable: variables.Variable[Float]
- Attributes
- protected
- val learningRateSummaryTag: String
- Definition Classes
- YellowFin → GradientDescent
- var learningRateTensor: Output[Float]
- Attributes
- protected
- Definition Classes
- GradientDescent
- var learningRateVariable: variables.Variable[Float]
- Attributes
- protected
- def minimize[T, I](loss: Output[T], lossGradients: Seq[OutputLike[T]] = null, variables: Set[variables.Variable[Any]] = null, gradientsGatingMethod: GatingMethod = Gradients.OpGating, gradientsAggregationMethod: AggregationMethod = Gradients.AddAggregationMethod, colocateGradientsWithOps: Boolean = false, iteration: Option[variables.Variable[I]] = None, name: String = "Minimize")(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T], arg2: LongDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): UntypedOp
- Definition Classes
- Optimizer
- Annotations
- @throws(scala.this.throws.<init>$default$1[IllegalArgumentException])
- val momentum: Float
- Definition Classes
- YellowFin → GradientDescent
- var momentumTensor: Output[Float]
- Attributes
- protected
- Definition Classes
- GradientDescent
- var momentumVariable: variables.Variable[Float]
- Attributes
- protected
- var movingAverage: ExponentialMovingAverage
- Attributes
- protected
- val name: String
- Definition Classes
- YellowFin → GradientDescent → Optimizer
- final def ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- final val nonSlotVariables: Map[(String, Option[core.Graph]), variables.Variable[Any]]
- Attributes
- protected
- Definition Classes
- Optimizer
- final def notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native() @HotSpotIntrinsicCandidate()
- final def notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native() @HotSpotIntrinsicCandidate()
- def prepare[I](iteration: Option[variables.Variable[I]])(implicit arg0: core.types.TF[I], arg1: core.types.IsIntOrLong[I]): Unit
- Definition Classes
- YellowFin → GradientDescent → Optimizer
- final def slotNames: Set[String]
- Attributes
- protected
- Definition Classes
- Optimizer
- final val slots: Map[String, Map[variables.Variable[Any], variables.Variable[Any]]]
- Attributes
- protected
- Definition Classes
- Optimizer
- val sparsityDebias: Boolean
- final def state: Seq[variables.Variable[Any]]
- Definition Classes
- Optimizer
- var step: variables.Variable[Int]
- Attributes
- protected
- final def synchronized[T0](arg0: => T0): T0
- Definition Classes
- AnyRef
- def toString(): String
- Definition Classes
- AnyRef → Any
- val useLocking: Boolean
- Definition Classes
- YellowFin → GradientDescent → Optimizer
- val useNesterov: Boolean
- Definition Classes
- YellowFin → GradientDescent
- final def wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])
- final def wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException]) @native()
- final def wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])
- def yellowFinUpdate[T](gradientsAndVariables: Seq[(OutputLike[T], variables.Variable[Any])])(implicit arg0: core.types.TF[T]): UntypedOp
- Attributes
- protected
- val zeroDebias: Boolean
- final def zerosSlot[T](name: String, variable: variables.Variable[T], variableScope: String)(implicit arg0: core.types.TF[T]): variables.Variable[T]
- Attributes
- protected
- Definition Classes
- Optimizer