diff --git a/src/Microsoft.ML.StandardLearners/Standard/LinearClassificationTrainer.cs b/src/Microsoft.ML.StandardLearners/Standard/LinearClassificationTrainer.cs index 56a3663054..6a2e18dbda 100644 --- a/src/Microsoft.ML.StandardLearners/Standard/LinearClassificationTrainer.cs +++ b/src/Microsoft.ML.StandardLearners/Standard/LinearClassificationTrainer.cs @@ -222,6 +222,22 @@ internal virtual void Check(IHostEnvironment env) } } + internal const string SDCADetailedSummary = @"This classifier is a trainer based on the Stochastic DualCoordinate +Ascent(SDCA) method, a state-of-the-art optimization technique for convex objective functions. +The algorithm can be scaled for use on large out-of-memory data sets due to a semi-asynchronized implementation +that supports multi-threading. +Convergence is underwritten by periodically enforcing synchronization between primal and dual updates in a separate thread. +Several choices of loss functions are also provided. +The SDCA method combines several of the best properties and capabilities of logistic regression and SVM algorithms. +For more information on SDCA, see: +Scaling Up Stochastic Dual Coordinate Ascent. +Stochastic Dual Coordinate Ascent Methods for Regularized Loss Minimization. +Note that SDCA is a stochastic and streaming optimization algorithm. +The results depends on the order of the training data. For reproducible results, it is recommended that one sets `shuffle` to +`False` and `NumThreads` to `1`. +Elastic net regularization can be specified by the l2_weight and l1_weight parameters. Note that the l2_weight has an effect on the rate of convergence. +In general, the larger the l2_weight, the faster SDCA converges."; + // The order of these matter, since they are used as indices into arrays. protected enum MetricKind { diff --git a/src/Microsoft.ML.StandardLearners/Standard/LogisticRegression/LbfgsPredictorBase.cs b/src/Microsoft.ML.StandardLearners/Standard/LogisticRegression/LbfgsPredictorBase.cs index 89f4866228..5fe70de2f0 100644 --- a/src/Microsoft.ML.StandardLearners/Standard/LogisticRegression/LbfgsPredictorBase.cs +++ b/src/Microsoft.ML.StandardLearners/Standard/LogisticRegression/LbfgsPredictorBase.cs @@ -94,6 +94,28 @@ public abstract class ArgumentsBase : LearnerInputBaseWithWeight public bool EnforceNonNegativity = false; } + internal const string DetailedSummary = @"Logistic Regression is a classification method used to predict the value of a categorical dependent variable from its relationship to one or more independent variables assumed to have a logistic distribution. +If the dependent variable has only two possible values (success/failure), then the logistic regression is binary. +If the dependent variable has more than two possible values (blood type given diagnostic test results), then the logistic regression is multinomial. +The optimization technique used for LogisticRegressionBinaryClassifier is the limited memory Broyden-Fletcher-Goldfarb-Shanno (L-BFGS). +Both the L-BFGS and regular BFGS algorithms use quasi-Newtonian methods to estimate the computationally intensive Hessian matrix in the equation used by Newton's method to calculate steps. +But the L-BFGS approximation uses only a limited amount of memory to compute the next step direction, so that it is especially suited for problems with a large number of variables. +The memory_size parameter specifies the number of past positions and gradients to store for use in the computation of the next step. +This learner can use elastic net regularization: a linear combination of L1 (lasso) and L2 (ridge) regularizations. +Regularization is a method that can render an ill-posed problem more tractable by imposing constraints that provide information to supplement the data and that prevents overfitting by penalizing models with extreme coefficient values. +This can improve the generalization of the model learned by selecting the optimal complexity in the bias-variance tradeoff. Regularization works by adding the penalty that is associated with coefficient values to the error of the hypothesis. +An accurate model with extreme coefficient values would be penalized more, but a less accurate model with more conservative values would be penalized less. L1 and L2 regularization have different effects and uses that are complementary in certain respects. +l1_weight: can be applied to sparse models, when working with high-dimensional data. It pulls small weights associated features that are relatively unimportant towards 0. +l2_weight: is preferable for data that is not sparse. It pulls large weights towards zero. +Adding the ridge penalty to the regularization overcomes some of lasso's limitations. It can improve its predictive accuracy, for example, when the number of predictors is greater than the sample size. If x = l1_weight and y = l2_weight, ax + by = c defines the linear span of the regularization terms. +The default values of x and y are both 1. +An agressive regularization can harm predictive capacity by excluding important variables out of the model. So choosing the optimal values for the regularization parameters is important for the performance of the logistic regression model. +Wikipedia: L-BFGS. +Wikipedia: Logistic regression. +Scalable Training of L1-Regularized Log-Linear Models. +Test Run - L1 and L2 Regularization for Machine Learning. +"; + protected int NumFeatures; protected VBuffer CurrentWeights; protected long NumGoodRows; diff --git a/src/Microsoft.ML.StandardLearners/Standard/LogisticRegression/LogisticRegression.cs b/src/Microsoft.ML.StandardLearners/Standard/LogisticRegression/LogisticRegression.cs index 5abc062bf7..6f4a1d9617 100644 --- a/src/Microsoft.ML.StandardLearners/Standard/LogisticRegression/LogisticRegression.cs +++ b/src/Microsoft.ML.StandardLearners/Standard/LogisticRegression/LogisticRegression.cs @@ -386,7 +386,7 @@ public override ParameterMixingCalibratedPredictor CreatePredictor() new PlattCalibrator(Host, -1, 0)); } - [TlcModule.EntryPoint(Name = "Trainers.LogisticRegressionBinaryClassifier", Desc = "Train a logistic regression binary model", UserName = UserNameValue, ShortName = ShortName)] + [TlcModule.EntryPoint(Name = "Trainers.LogisticRegressionBinaryClassifier", Desc = DetailedSummary, UserName = UserNameValue, ShortName = ShortName)] public static CommonOutputs.BinaryClassificationOutput TrainBinary(IHostEnvironment env, Arguments input) { Contracts.CheckValue(env, nameof(env)); diff --git a/src/Microsoft.ML.StandardLearners/Standard/LogisticRegression/MulticlassLogisticRegression.cs b/src/Microsoft.ML.StandardLearners/Standard/LogisticRegression/MulticlassLogisticRegression.cs index 51decafea5..8e9b03b831 100644 --- a/src/Microsoft.ML.StandardLearners/Standard/LogisticRegression/MulticlassLogisticRegression.cs +++ b/src/Microsoft.ML.StandardLearners/Standard/LogisticRegression/MulticlassLogisticRegression.cs @@ -961,7 +961,7 @@ public IRow GetStatsIRowOrNull(RoleMappedSchema schema) /// public partial class LogisticRegression { - [TlcModule.EntryPoint(Name = "Trainers.LogisticRegressionClassifier", Desc = "Train a logistic regression multi class model", UserName = MulticlassLogisticRegression.UserNameValue, ShortName = MulticlassLogisticRegression.ShortName)] + [TlcModule.EntryPoint(Name = "Trainers.LogisticRegressionClassifier", Desc = DetailedSummary, UserName = MulticlassLogisticRegression.UserNameValue, ShortName = MulticlassLogisticRegression.ShortName)] public static CommonOutputs.MulticlassClassificationOutput TrainMultiClass(IHostEnvironment env, MulticlassLogisticRegression.Arguments input) { Contracts.CheckValue(env, nameof(env)); diff --git a/src/Microsoft.ML.StandardLearners/Standard/Online/AveragedPerceptron.cs b/src/Microsoft.ML.StandardLearners/Standard/Online/AveragedPerceptron.cs index 1861821d1c..1164cbb5ae 100644 --- a/src/Microsoft.ML.StandardLearners/Standard/Online/AveragedPerceptron.cs +++ b/src/Microsoft.ML.StandardLearners/Standard/Online/AveragedPerceptron.cs @@ -37,6 +37,17 @@ public sealed class AveragedPerceptronTrainer : internal const string UserNameValue = "Averaged Perceptron"; internal const string ShortName = "ap"; internal const string Summary = "Perceptron is a binary classification algorithm that makes its predictions based on a linear function."; + internal const string DetailedSummary = @"Perceptron is a classification algorithm that makes its predictions based on a linear function. +I.e., for an instance with feature values f0, f1,..., f_D-1, , the prediction is given by the sign of sigma[0,D-1] ( w_i * f_i), where w_0, w_1,...,w_D-1 are the weights computed by the algorithm. +Perceptron is an online algorithm, i.e., it processes the instances in the training set one at a time. +The weights are initialized to be 0, or some random values. Then, for each example in the training set, the value of sigma[0, D-1] (w_i * f_i) is computed. +If this value has the same sign as the label of the current example, the weights remain the same. If they have opposite signs, +the weights vector is updated by either subtracting or adding (if the label is negative or positive, respectively) the feature vector of the current example, +multiplied by a factor 0 < a <= 1, called the learning rate. In a generalization of this algorithm, the weights are updated by adding the feature vector multiplied by the learning rate, +and by the gradient of some loss function (in the specific case described above, the loss is hinge-loss, whose gradient is 1 when it is non-zero). +In Averaged Perceptron (AKA voted-perceptron), the weight vectors are stored, +together with a weight that counts the number of iterations it survived (this is equivalent to storing the weight vector after every iteration, regardless of whether it was updated or not). +The prediction is then calculated by taking the weighted average of all the sums sigma[0, D-1] (w_i * f_i) or the different weight vectors."; public class Arguments : AveragedLinearArguments { @@ -91,7 +102,7 @@ public override LinearBinaryPredictor CreatePredictor() return new LinearBinaryPredictor(Host, ref weights, bias); } - [TlcModule.EntryPoint(Name = "Trainers.AveragedPerceptronBinaryClassifier", Desc = "Train a Average perceptron.", UserName = UserNameValue, ShortName = ShortName)] + [TlcModule.EntryPoint(Name = "Trainers.AveragedPerceptronBinaryClassifier", Desc = DetailedSummary, UserName = UserNameValue, ShortName = ShortName)] public static CommonOutputs.BinaryClassificationOutput TrainBinary(IHostEnvironment env, Arguments input) { Contracts.CheckValue(env, nameof(env)); diff --git a/src/Microsoft.ML.StandardLearners/Standard/SdcaMultiClass.cs b/src/Microsoft.ML.StandardLearners/Standard/SdcaMultiClass.cs index f8d7db7998..b00bc1a4c5 100644 --- a/src/Microsoft.ML.StandardLearners/Standard/SdcaMultiClass.cs +++ b/src/Microsoft.ML.StandardLearners/Standard/SdcaMultiClass.cs @@ -386,7 +386,7 @@ protected override Float GetInstanceWeight(FloatLabelCursor cursor) /// public static partial class Sdca { - [TlcModule.EntryPoint(Name = "Trainers.StochasticDualCoordinateAscentClassifier", Desc = "Train an SDCA multi class model", UserName = SdcaMultiClassTrainer.UserNameValue, ShortName = SdcaMultiClassTrainer.ShortName)] + [TlcModule.EntryPoint(Name = "Trainers.StochasticDualCoordinateAscentClassifier", Desc = SdcaMultiClassTrainer.SDCADetailedSummary, UserName = SdcaMultiClassTrainer.UserNameValue, ShortName = SdcaMultiClassTrainer.ShortName)] public static CommonOutputs.MulticlassClassificationOutput TrainMultiClass(IHostEnvironment env, SdcaMultiClassTrainer.Arguments input) { Contracts.CheckValue(env, nameof(env)); diff --git a/src/Microsoft.ML.StandardLearners/Standard/SdcaRegression.cs b/src/Microsoft.ML.StandardLearners/Standard/SdcaRegression.cs index e8f5aeb04d..516c2c7fcb 100644 --- a/src/Microsoft.ML.StandardLearners/Standard/SdcaRegression.cs +++ b/src/Microsoft.ML.StandardLearners/Standard/SdcaRegression.cs @@ -131,7 +131,7 @@ protected override Float TuneDefaultL2(IChannel ch, int maxIterations, long rowC /// public static partial class Sdca { - [TlcModule.EntryPoint(Name = "Trainers.StochasticDualCoordinateAscentRegressor", Desc = "Train an SDCA regression model", UserName = SdcaRegressionTrainer.UserNameValue, ShortName = SdcaRegressionTrainer.ShortName)] + [TlcModule.EntryPoint(Name = "Trainers.StochasticDualCoordinateAscentRegressor", Desc = SdcaRegressionTrainer.SDCADetailedSummary, UserName = SdcaRegressionTrainer.UserNameValue, ShortName = SdcaRegressionTrainer.ShortName)] public static CommonOutputs.RegressionOutput TrainRegression(IHostEnvironment env, SdcaRegressionTrainer.Arguments input) { Contracts.CheckValue(env, nameof(env)); diff --git a/src/Microsoft.ML/CSharpApi.cs b/src/Microsoft.ML/CSharpApi.cs index 12402b3993..192e7f2b5a 100644 --- a/src/Microsoft.ML/CSharpApi.cs +++ b/src/Microsoft.ML/CSharpApi.cs @@ -4081,7 +4081,17 @@ namespace Trainers { /// - /// Train a Average perceptron. + /// Perceptron is a classification algorithm that makes its predictions based on a linear function. + /// I.e., for an instance with feature values f0, f1,..., f_D-1, , the prediction is given by the sign of sigma[0,D-1] ( w_i * f_i), where w_0, w_1,...,w_D-1 are the weights computed by the algorithm. + /// Perceptron is an online algorithm, i.e., it processes the instances in the training set one at a time. + /// The weights are initialized to be 0, or some random values. Then, for each example in the training set, the value of sigma[0, D-1] (w_i * f_i) is computed. + /// If this value has the same sign as the label of the current example, the weights remain the same. If they have opposite signs, + /// the weights vector is updated by either subtracting or adding (if the label is negative or positive, respectively) the feature vector of the current example, + /// multiplied by a factor 0 < a <= 1, called the learning rate. In a generalization of this algorithm, the weights are updated by adding the feature vector multiplied by the learning rate, + /// and by the gradient of some loss function (in the specific case described above, the loss is hinge-loss, whose gradient is 1 when it is non-zero). + /// In Averaged Perceptron (AKA voted-perceptron), the weight vectors are stored, + /// together with a weight that counts the number of iterations it survived (this is equivalent to storing the weight vector after every iteration, regardless of whether it was updated or not). + /// The prediction is then calculated by taking the weighted average of all the sums sigma[0, D-1] (w_i * f_i) or the different weight vectors. /// public sealed partial class AveragedPerceptronBinaryClassifier : Microsoft.ML.Runtime.EntryPoints.CommonInputs.ITrainerInputWithLabel, Microsoft.ML.Runtime.EntryPoints.CommonInputs.ITrainerInput, Microsoft.ML.ILearningPipelineItem { @@ -8265,7 +8275,26 @@ namespace Trainers { /// - /// Train a logistic regression binary model + /// Logistic Regression is a classification method used to predict the value of a categorical dependent variable from its relationship to one or more independent variables assumed to have a logistic distribution. + /// If the dependent variable has only two possible values (success/failure), then the logistic regression is binary. + /// If the dependent variable has more than two possible values (blood type given diagnostic test results), then the logistic regression is multinomial. + /// The optimization technique used for LogisticRegressionBinaryClassifier is the limited memory Broyden-Fletcher-Goldfarb-Shanno (L-BFGS). + /// Both the L-BFGS and regular BFGS algorithms use quasi-Newtonian methods to estimate the computationally intensive Hessian matrix in the equation used by Newton's method to calculate steps. + /// But the L-BFGS approximation uses only a limited amount of memory to compute the next step direction, so that it is especially suited for problems with a large number of variables. + /// The memory_size parameter specifies the number of past positions and gradients to store for use in the computation of the next step. + /// This learner can use elastic net regularization: a linear combination of L1 (lasso) and L2 (ridge) regularizations. + /// Regularization is a method that can render an ill-posed problem more tractable by imposing constraints that provide information to supplement the data and that prevents overfitting by penalizing models with extreme coefficient values. + /// This can improve the generalization of the model learned by selecting the optimal complexity in the bias-variance tradeoff. Regularization works by adding the penalty that is associated with coefficient values to the error of the hypothesis. + /// An accurate model with extreme coefficient values would be penalized more, but a less accurate model with more conservative values would be penalized less. L1 and L2 regularization have different effects and uses that are complementary in certain respects. + /// l1_weight: can be applied to sparse models, when working with high-dimensional data. It pulls small weights associated features that are relatively unimportant towards 0. + /// l2_weight: is preferable for data that is not sparse. It pulls large weights towards zero. + /// Adding the ridge penalty to the regularization overcomes some of lasso's limitations. It can improve its predictive accuracy, for example, when the number of predictors is greater than the sample size. If x = l1_weight and y = l2_weight, ax + by = c defines the linear span of the regularization terms. + /// The default values of x and y are both 1. + /// An agressive regularization can harm predictive capacity by excluding important variables out of the model. So choosing the optimal values for the regularization parameters is important for the performance of the logistic regression model. + /// Wikipedia: L-BFGS. + /// Wikipedia: Logistic regression. + /// Scalable Training of L1-Regularized Log-Linear Models. + /// Test Run - L1 and L2 Regularization for Machine Learning. /// public sealed partial class LogisticRegressionBinaryClassifier : Microsoft.ML.Runtime.EntryPoints.CommonInputs.ITrainerInputWithWeight, Microsoft.ML.Runtime.EntryPoints.CommonInputs.ITrainerInputWithLabel, Microsoft.ML.Runtime.EntryPoints.CommonInputs.ITrainerInput, Microsoft.ML.ILearningPipelineItem { @@ -8415,7 +8444,26 @@ namespace Trainers { /// - /// Train a logistic regression multi class model + /// Logistic Regression is a classification method used to predict the value of a categorical dependent variable from its relationship to one or more independent variables assumed to have a logistic distribution. + /// If the dependent variable has only two possible values (success/failure), then the logistic regression is binary. + /// If the dependent variable has more than two possible values (blood type given diagnostic test results), then the logistic regression is multinomial. + /// The optimization technique used for LogisticRegressionBinaryClassifier is the limited memory Broyden-Fletcher-Goldfarb-Shanno (L-BFGS). + /// Both the L-BFGS and regular BFGS algorithms use quasi-Newtonian methods to estimate the computationally intensive Hessian matrix in the equation used by Newton's method to calculate steps. + /// But the L-BFGS approximation uses only a limited amount of memory to compute the next step direction, so that it is especially suited for problems with a large number of variables. + /// The memory_size parameter specifies the number of past positions and gradients to store for use in the computation of the next step. + /// This learner can use elastic net regularization: a linear combination of L1 (lasso) and L2 (ridge) regularizations. + /// Regularization is a method that can render an ill-posed problem more tractable by imposing constraints that provide information to supplement the data and that prevents overfitting by penalizing models with extreme coefficient values. + /// This can improve the generalization of the model learned by selecting the optimal complexity in the bias-variance tradeoff. Regularization works by adding the penalty that is associated with coefficient values to the error of the hypothesis. + /// An accurate model with extreme coefficient values would be penalized more, but a less accurate model with more conservative values would be penalized less. L1 and L2 regularization have different effects and uses that are complementary in certain respects. + /// l1_weight: can be applied to sparse models, when working with high-dimensional data. It pulls small weights associated features that are relatively unimportant towards 0. + /// l2_weight: is preferable for data that is not sparse. It pulls large weights towards zero. + /// Adding the ridge penalty to the regularization overcomes some of lasso's limitations. It can improve its predictive accuracy, for example, when the number of predictors is greater than the sample size. If x = l1_weight and y = l2_weight, ax + by = c defines the linear span of the regularization terms. + /// The default values of x and y are both 1. + /// An agressive regularization can harm predictive capacity by excluding important variables out of the model. So choosing the optimal values for the regularization parameters is important for the performance of the logistic regression model. + /// Wikipedia: L-BFGS. + /// Wikipedia: Logistic regression. + /// Scalable Training of L1-Regularized Log-Linear Models. + /// Test Run - L1 and L2 Regularization for Machine Learning. /// public sealed partial class LogisticRegressionClassifier : Microsoft.ML.Runtime.EntryPoints.CommonInputs.ITrainerInputWithWeight, Microsoft.ML.Runtime.EntryPoints.CommonInputs.ITrainerInputWithLabel, Microsoft.ML.Runtime.EntryPoints.CommonInputs.ITrainerInput, Microsoft.ML.ILearningPipelineItem { @@ -9175,7 +9223,21 @@ namespace Trainers { /// - /// Train an SDCA multi class model + /// This classifier is a trainer based on the Stochastic DualCoordinate + /// Ascent(SDCA) method, a state-of-the-art optimization technique for convex objective functions. + /// The algorithm can be scaled for use on large out-of-memory data sets due to a semi-asynchronized implementation + /// that supports multi-threading. + /// Convergence is underwritten by periodically enforcing synchronization between primal and dual updates in a separate thread. + /// Several choices of loss functions are also provided. + /// The SDCA method combines several of the best properties and capabilities of logistic regression and SVM algorithms. + /// For more information on SDCA, see: + /// Scaling Up Stochastic Dual Coordinate Ascent. + /// Stochastic Dual Coordinate Ascent Methods for Regularized Loss Minimization. + /// Note that SDCA is a stochastic and streaming optimization algorithm. + /// The results depends on the order of the training data. For reproducible results, it is recommended that one sets `shuffle` to + /// `False` and `NumThreads` to `1`. + /// Elastic net regularization can be specified by the l2_weight and l1_weight parameters. Note that the l2_weight has an effect on the rate of convergence. + /// In general, the larger the l2_weight, the faster SDCA converges. /// public sealed partial class StochasticDualCoordinateAscentClassifier : Microsoft.ML.Runtime.EntryPoints.CommonInputs.ITrainerInputWithLabel, Microsoft.ML.Runtime.EntryPoints.CommonInputs.ITrainerInput, Microsoft.ML.ILearningPipelineItem { @@ -9300,7 +9362,21 @@ namespace Trainers { /// - /// Train an SDCA regression model + /// This classifier is a trainer based on the Stochastic DualCoordinate + /// Ascent(SDCA) method, a state-of-the-art optimization technique for convex objective functions. + /// The algorithm can be scaled for use on large out-of-memory data sets due to a semi-asynchronized implementation + /// that supports multi-threading. + /// Convergence is underwritten by periodically enforcing synchronization between primal and dual updates in a separate thread. + /// Several choices of loss functions are also provided. + /// The SDCA method combines several of the best properties and capabilities of logistic regression and SVM algorithms. + /// For more information on SDCA, see: + /// Scaling Up Stochastic Dual Coordinate Ascent. + /// Stochastic Dual Coordinate Ascent Methods for Regularized Loss Minimization. + /// Note that SDCA is a stochastic and streaming optimization algorithm. + /// The results depends on the order of the training data. For reproducible results, it is recommended that one sets `shuffle` to + /// `False` and `NumThreads` to `1`. + /// Elastic net regularization can be specified by the l2_weight and l1_weight parameters. Note that the l2_weight has an effect on the rate of convergence. + /// In general, the larger the l2_weight, the faster SDCA converges. /// public sealed partial class StochasticDualCoordinateAscentRegressor : Microsoft.ML.Runtime.EntryPoints.CommonInputs.ITrainerInputWithLabel, Microsoft.ML.Runtime.EntryPoints.CommonInputs.ITrainerInput, Microsoft.ML.ILearningPipelineItem { diff --git a/test/BaselineOutput/Common/EntryPoints/core_ep-list.tsv b/test/BaselineOutput/Common/EntryPoints/core_ep-list.tsv index ce4f2b09fa..ee0eb3de15 100644 --- a/test/BaselineOutput/Common/EntryPoints/core_ep-list.tsv +++ b/test/BaselineOutput/Common/EntryPoints/core_ep-list.tsv @@ -36,7 +36,7 @@ Models.Summarizer Summarize a linear regression predictor. Microsoft.ML.Runtime. Models.SweepResultExtractor Extracts the sweep result. Microsoft.ML.Runtime.EntryPoints.PipelineSweeperMacro ExtractSweepResult Microsoft.ML.Runtime.EntryPoints.PipelineSweeperMacro+ResultInput Microsoft.ML.Runtime.EntryPoints.PipelineSweeperMacro+Output Models.TrainTestBinaryEvaluator Train test for binary classification Microsoft.ML.Runtime.EntryPoints.TrainTestBinaryMacro TrainTestBinary Microsoft.ML.Runtime.EntryPoints.TrainTestBinaryMacro+Arguments Microsoft.ML.Runtime.EntryPoints.CommonOutputs+MacroOutput`1[Microsoft.ML.Runtime.EntryPoints.TrainTestBinaryMacro+Output] Models.TrainTestEvaluator General train test for any supported evaluator Microsoft.ML.Runtime.EntryPoints.TrainTestMacro TrainTest Microsoft.ML.Runtime.EntryPoints.TrainTestMacro+Arguments Microsoft.ML.Runtime.EntryPoints.CommonOutputs+MacroOutput`1[Microsoft.ML.Runtime.EntryPoints.TrainTestMacro+Output] -Trainers.AveragedPerceptronBinaryClassifier Train a Average perceptron. Microsoft.ML.Runtime.Learners.AveragedPerceptronTrainer TrainBinary Microsoft.ML.Runtime.Learners.AveragedPerceptronTrainer+Arguments Microsoft.ML.Runtime.EntryPoints.CommonOutputs+BinaryClassificationOutput +Trainers.AveragedPerceptronBinaryClassifier Perceptron is a classification algorithm that makes its predictions based on a linear function.I.e., for an instance with feature values f0, f1,..., f_D-1, , the prediction is given by the sign of sigma[0,D-1] ( w_i * f_i), where w_0, w_1,...,w_D-1 are the weights computed by the algorithm.Perceptron is an online algorithm, i.e., it processes the instances in the training set one at a time.The weights are initialized to be 0, or some random values. Then, for each example in the training set, the value of sigma[0, D-1] (w_i * f_i) is computed. If this value has the same sign as the label of the current example, the weights remain the same. If they have opposite signs,the weights vector is updated by either subtracting or adding (if the label is negative or positive, respectively) the feature vector of the current example,multiplied by a factor 0 < a <= 1, called the learning rate. In a generalization of this algorithm, the weights are updated by adding the feature vector multiplied by the learning rate, and by the gradient of some loss function (in the specific case described above, the loss is hinge-loss, whose gradient is 1 when it is non-zero).In Averaged Perceptron (AKA voted-perceptron), the weight vectors are stored, together with a weight that counts the number of iterations it survived (this is equivalent to storing the weight vector after every iteration, regardless of whether it was updated or not).The prediction is then calculated by taking the weighted average of all the sums sigma[0, D-1] (w_i * f_i) or the different weight vectors. Microsoft.ML.Runtime.Learners.AveragedPerceptronTrainer TrainBinary Microsoft.ML.Runtime.Learners.AveragedPerceptronTrainer+Arguments Microsoft.ML.Runtime.EntryPoints.CommonOutputs+BinaryClassificationOutput Trainers.EnsembleBinaryClassifier Train binary ensemble. Microsoft.ML.Ensemble.EntryPoints.Ensemble CreateBinaryEnsemble Microsoft.ML.Runtime.Ensemble.EnsembleTrainer+Arguments Microsoft.ML.Runtime.EntryPoints.CommonOutputs+BinaryClassificationOutput Trainers.EnsembleClassification Train multiclass ensemble. Microsoft.ML.Ensemble.EntryPoints.Ensemble CreateMultiClassEnsemble Microsoft.ML.Runtime.Ensemble.MulticlassDataPartitionEnsembleTrainer+Arguments Microsoft.ML.Runtime.EntryPoints.CommonOutputs+MulticlassClassificationOutput Trainers.EnsembleRegression Train regression ensemble. Microsoft.ML.Ensemble.EntryPoints.Ensemble CreateRegressionEnsemble Microsoft.ML.Runtime.Ensemble.RegressionEnsembleTrainer+Arguments Microsoft.ML.Runtime.EntryPoints.CommonOutputs+RegressionOutput @@ -51,15 +51,15 @@ Trainers.GeneralizedAdditiveModelBinaryClassifier Trains a gradient boosted stum Trainers.GeneralizedAdditiveModelRegressor Trains a gradient boosted stump per feature, on all features simultaneously, to fit target values using least-squares. It mantains no interactions between features. Microsoft.ML.Runtime.FastTree.Gam TrainRegression Microsoft.ML.Runtime.FastTree.RegressionGamTrainer+Arguments Microsoft.ML.Runtime.EntryPoints.CommonOutputs+RegressionOutput Trainers.KMeansPlusPlusClusterer K-means is a popular clustering algorithm. With K-means, the data is clustered into a specified number of clusters in order to minimize the within-cluster sum of squares. K-means++ improves upon K-means by using a better method for choosing the initial cluster centers. Microsoft.ML.Runtime.KMeans.KMeansPlusPlusTrainer TrainKMeans Microsoft.ML.Runtime.KMeans.KMeansPlusPlusTrainer+Arguments Microsoft.ML.Runtime.EntryPoints.CommonOutputs+ClusteringOutput Trainers.LinearSvmBinaryClassifier Train a linear SVM. Microsoft.ML.Runtime.Learners.LinearSvm TrainLinearSvm Microsoft.ML.Runtime.Learners.LinearSvm+Arguments Microsoft.ML.Runtime.EntryPoints.CommonOutputs+BinaryClassificationOutput -Trainers.LogisticRegressionBinaryClassifier Train a logistic regression binary model Microsoft.ML.Runtime.Learners.LogisticRegression TrainBinary Microsoft.ML.Runtime.Learners.LogisticRegression+Arguments Microsoft.ML.Runtime.EntryPoints.CommonOutputs+BinaryClassificationOutput -Trainers.LogisticRegressionClassifier Train a logistic regression multi class model Microsoft.ML.Runtime.Learners.LogisticRegression TrainMultiClass Microsoft.ML.Runtime.Learners.MulticlassLogisticRegression+Arguments Microsoft.ML.Runtime.EntryPoints.CommonOutputs+MulticlassClassificationOutput +Trainers.LogisticRegressionBinaryClassifier Logistic Regression is a classification method used to predict the value of a categorical dependent variable from its relationship to one or more independent variables assumed to have a logistic distribution. If the dependent variable has only two possible values (success/failure), then the logistic regression is binary. If the dependent variable has more than two possible values (blood type given diagnostic test results), then the logistic regression is multinomial.The optimization technique used for LogisticRegressionBinaryClassifier is the limited memory Broyden-Fletcher-Goldfarb-Shanno (L-BFGS). Both the L-BFGS and regular BFGS algorithms use quasi-Newtonian methods to estimate the computationally intensive Hessian matrix in the equation used by Newton's method to calculate steps. But the L-BFGS approximation uses only a limited amount of memory to compute the next step direction, so that it is especially suited for problems with a large number of variables. The memory_size parameter specifies the number of past positions and gradients to store for use in the computation of the next step.This learner can use elastic net regularization: a linear combination of L1 (lasso) and L2 (ridge) regularizations. Regularization is a method that can render an ill-posed problem more tractable by imposing constraints that provide information to supplement the data and that prevents overfitting by penalizing models with extreme coefficient values. This can improve the generalization of the model learned by selecting the optimal complexity in the bias-variance tradeoff. Regularization works by adding the penalty that is associated with coefficient values to the error of the hypothesis. An accurate model with extreme coefficient values would be penalized more, but a less accurate model with more conservative values would be penalized less. L1 and L2 regularization have different effects and uses that are complementary in certain respects.l1_weight: can be applied to sparse models, when working with high-dimensional data. It pulls small weights associated features that are relatively unimportant towards 0. l2_weight: is preferable for data that is not sparse. It pulls large weights towards zero. Adding the ridge penalty to the regularization overcomes some of lasso's limitations. It can improve its predictive accuracy, for example, when the number of predictors is greater than the sample size. If x = l1_weight and y = l2_weight, ax + by = c defines the linear span of the regularization terms. The default values of x and y are both 1. An agressive regularization can harm predictive capacity by excluding important variables out of the model. So choosing the optimal values for the regularization parameters is important for the performance of the logistic regression model.Wikipedia: L-BFGS.Wikipedia: Logistic regression.Scalable Training of L1-Regularized Log-Linear Models.Test Run - L1 and L2 Regularization for Machine Learning. Microsoft.ML.Runtime.Learners.LogisticRegression TrainBinary Microsoft.ML.Runtime.Learners.LogisticRegression+Arguments Microsoft.ML.Runtime.EntryPoints.CommonOutputs+BinaryClassificationOutput +Trainers.LogisticRegressionClassifier Logistic Regression is a classification method used to predict the value of a categorical dependent variable from its relationship to one or more independent variables assumed to have a logistic distribution. If the dependent variable has only two possible values (success/failure), then the logistic regression is binary. If the dependent variable has more than two possible values (blood type given diagnostic test results), then the logistic regression is multinomial.The optimization technique used for LogisticRegressionBinaryClassifier is the limited memory Broyden-Fletcher-Goldfarb-Shanno (L-BFGS). Both the L-BFGS and regular BFGS algorithms use quasi-Newtonian methods to estimate the computationally intensive Hessian matrix in the equation used by Newton's method to calculate steps. But the L-BFGS approximation uses only a limited amount of memory to compute the next step direction, so that it is especially suited for problems with a large number of variables. The memory_size parameter specifies the number of past positions and gradients to store for use in the computation of the next step.This learner can use elastic net regularization: a linear combination of L1 (lasso) and L2 (ridge) regularizations. Regularization is a method that can render an ill-posed problem more tractable by imposing constraints that provide information to supplement the data and that prevents overfitting by penalizing models with extreme coefficient values. This can improve the generalization of the model learned by selecting the optimal complexity in the bias-variance tradeoff. Regularization works by adding the penalty that is associated with coefficient values to the error of the hypothesis. An accurate model with extreme coefficient values would be penalized more, but a less accurate model with more conservative values would be penalized less. L1 and L2 regularization have different effects and uses that are complementary in certain respects.l1_weight: can be applied to sparse models, when working with high-dimensional data. It pulls small weights associated features that are relatively unimportant towards 0. l2_weight: is preferable for data that is not sparse. It pulls large weights towards zero. Adding the ridge penalty to the regularization overcomes some of lasso's limitations. It can improve its predictive accuracy, for example, when the number of predictors is greater than the sample size. If x = l1_weight and y = l2_weight, ax + by = c defines the linear span of the regularization terms. The default values of x and y are both 1. An agressive regularization can harm predictive capacity by excluding important variables out of the model. So choosing the optimal values for the regularization parameters is important for the performance of the logistic regression model.Wikipedia: L-BFGS.Wikipedia: Logistic regression.Scalable Training of L1-Regularized Log-Linear Models.Test Run - L1 and L2 Regularization for Machine Learning. Microsoft.ML.Runtime.Learners.LogisticRegression TrainMultiClass Microsoft.ML.Runtime.Learners.MulticlassLogisticRegression+Arguments Microsoft.ML.Runtime.EntryPoints.CommonOutputs+MulticlassClassificationOutput Trainers.NaiveBayesClassifier Train a MultiClassNaiveBayesTrainer. Microsoft.ML.Runtime.Learners.MultiClassNaiveBayesTrainer TrainMultiClassNaiveBayesTrainer Microsoft.ML.Runtime.Learners.MultiClassNaiveBayesTrainer+Arguments Microsoft.ML.Runtime.EntryPoints.CommonOutputs+MulticlassClassificationOutput Trainers.OnlineGradientDescentRegressor Train a Online gradient descent perceptron. Microsoft.ML.Runtime.Learners.OnlineGradientDescentTrainer TrainRegression Microsoft.ML.Runtime.Learners.OnlineGradientDescentTrainer+Arguments Microsoft.ML.Runtime.EntryPoints.CommonOutputs+RegressionOutput Trainers.PcaAnomalyDetector Train an PCA Anomaly model. Microsoft.ML.Runtime.PCA.RandomizedPcaTrainer TrainPcaAnomaly Microsoft.ML.Runtime.PCA.RandomizedPcaTrainer+Arguments Microsoft.ML.Runtime.EntryPoints.CommonOutputs+AnomalyDetectionOutput Trainers.PoissonRegressor Train an Poisson regression model. Microsoft.ML.Runtime.Learners.PoissonRegression TrainRegression Microsoft.ML.Runtime.Learners.PoissonRegression+Arguments Microsoft.ML.Runtime.EntryPoints.CommonOutputs+RegressionOutput Trainers.StochasticDualCoordinateAscentBinaryClassifier Train an SDCA binary model. Microsoft.ML.Runtime.Learners.Sdca TrainBinary Microsoft.ML.Runtime.Learners.LinearClassificationTrainer+Arguments Microsoft.ML.Runtime.EntryPoints.CommonOutputs+BinaryClassificationOutput -Trainers.StochasticDualCoordinateAscentClassifier Train an SDCA multi class model Microsoft.ML.Runtime.Learners.Sdca TrainMultiClass Microsoft.ML.Runtime.Learners.SdcaMultiClassTrainer+Arguments Microsoft.ML.Runtime.EntryPoints.CommonOutputs+MulticlassClassificationOutput -Trainers.StochasticDualCoordinateAscentRegressor Train an SDCA regression model Microsoft.ML.Runtime.Learners.Sdca TrainRegression Microsoft.ML.Runtime.Learners.SdcaRegressionTrainer+Arguments Microsoft.ML.Runtime.EntryPoints.CommonOutputs+RegressionOutput +Trainers.StochasticDualCoordinateAscentClassifier This classifier is a trainer based on the Stochastic DualCoordinate Ascent(SDCA) method, a state-of-the-art optimization technique for convex objective functions.The algorithm can be scaled for use on large out-of-memory data sets due to a semi-asynchronized implementation that supports multi-threading.Convergence is underwritten by periodically enforcing synchronization between primal and dual updates in a separate thread.Several choices of loss functions are also provided.The SDCA method combines several of the best properties and capabilities of logistic regression and SVM algorithms.For more information on SDCA, see:Scaling Up Stochastic Dual Coordinate Ascent.Stochastic Dual Coordinate Ascent Methods for Regularized Loss Minimization.Note that SDCA is a stochastic and streaming optimization algorithm. The results depends on the order of the training data. For reproducible results, it is recommended that one sets `shuffle` to`False` and `NumThreads` to `1`.Elastic net regularization can be specified by the l2_weight and l1_weight parameters. Note that the l2_weight has an effect on the rate of convergence. In general, the larger the l2_weight, the faster SDCA converges. Microsoft.ML.Runtime.Learners.Sdca TrainMultiClass Microsoft.ML.Runtime.Learners.SdcaMultiClassTrainer+Arguments Microsoft.ML.Runtime.EntryPoints.CommonOutputs+MulticlassClassificationOutput +Trainers.StochasticDualCoordinateAscentRegressor This classifier is a trainer based on the Stochastic DualCoordinate Ascent(SDCA) method, a state-of-the-art optimization technique for convex objective functions.The algorithm can be scaled for use on large out-of-memory data sets due to a semi-asynchronized implementation that supports multi-threading.Convergence is underwritten by periodically enforcing synchronization between primal and dual updates in a separate thread.Several choices of loss functions are also provided.The SDCA method combines several of the best properties and capabilities of logistic regression and SVM algorithms.For more information on SDCA, see:Scaling Up Stochastic Dual Coordinate Ascent.Stochastic Dual Coordinate Ascent Methods for Regularized Loss Minimization.Note that SDCA is a stochastic and streaming optimization algorithm. The results depends on the order of the training data. For reproducible results, it is recommended that one sets `shuffle` to`False` and `NumThreads` to `1`.Elastic net regularization can be specified by the l2_weight and l1_weight parameters. Note that the l2_weight has an effect on the rate of convergence. In general, the larger the l2_weight, the faster SDCA converges. Microsoft.ML.Runtime.Learners.Sdca TrainRegression Microsoft.ML.Runtime.Learners.SdcaRegressionTrainer+Arguments Microsoft.ML.Runtime.EntryPoints.CommonOutputs+RegressionOutput Trainers.StochasticGradientDescentBinaryClassifier Train an Hogwild SGD binary model. Microsoft.ML.Runtime.Learners.StochasticGradientDescentClassificationTrainer TrainBinary Microsoft.ML.Runtime.Learners.StochasticGradientDescentClassificationTrainer+Arguments Microsoft.ML.Runtime.EntryPoints.CommonOutputs+BinaryClassificationOutput Transforms.ApproximateBootstrapSampler Approximate bootstrap sampling. Microsoft.ML.Runtime.Data.BootstrapSample GetSample Microsoft.ML.Runtime.Data.BootstrapSampleTransform+Arguments Microsoft.ML.Runtime.EntryPoints.CommonOutputs+TransformOutput Transforms.BinaryPredictionScoreColumnsRenamer For binary prediction, it renames the PredictedLabel and Score columns to include the name of the positive class. Microsoft.ML.Runtime.EntryPoints.ScoreModel RenameBinaryPredictionScoreColumns Microsoft.ML.Runtime.EntryPoints.ScoreModel+RenameBinaryPredictionScoreColumnsInput Microsoft.ML.Runtime.EntryPoints.CommonOutputs+TransformOutput diff --git a/test/BaselineOutput/Common/EntryPoints/core_manifest.json b/test/BaselineOutput/Common/EntryPoints/core_manifest.json index 0acb5971b0..47ecdd2be6 100644 --- a/test/BaselineOutput/Common/EntryPoints/core_manifest.json +++ b/test/BaselineOutput/Common/EntryPoints/core_manifest.json @@ -3719,7 +3719,7 @@ }, { "Name": "Trainers.AveragedPerceptronBinaryClassifier", - "Desc": "Train a Average perceptron.", + "Desc": "Perceptron is a classification algorithm that makes its predictions based on a linear function.I.e., for an instance with feature values f0, f1,..., f_D-1, , the prediction is given by the sign of sigma[0,D-1] ( w_i * f_i), where w_0, w_1,...,w_D-1 are the weights computed by the algorithm.Perceptron is an online algorithm, i.e., it processes the instances in the training set one at a time.The weights are initialized to be 0, or some random values. Then, for each example in the training set, the value of sigma[0, D-1] (w_i * f_i) is computed. If this value has the same sign as the label of the current example, the weights remain the same. If they have opposite signs,the weights vector is updated by either subtracting or adding (if the label is negative or positive, respectively) the feature vector of the current example,multiplied by a factor 0 < a <= 1, called the learning rate. In a generalization of this algorithm, the weights are updated by adding the feature vector multiplied by the learning rate, and by the gradient of some loss function (in the specific case described above, the loss is hinge-loss, whose gradient is 1 when it is non-zero).In Averaged Perceptron (AKA voted-perceptron), the weight vectors are stored, together with a weight that counts the number of iterations it survived (this is equivalent to storing the weight vector after every iteration, regardless of whether it was updated or not).The prediction is then calculated by taking the weighted average of all the sums sigma[0, D-1] (w_i * f_i) or the different weight vectors.", "FriendlyName": "Averaged Perceptron", "ShortName": "ap", "Inputs": [ @@ -10942,7 +10942,7 @@ }, { "Name": "Trainers.LogisticRegressionBinaryClassifier", - "Desc": "Train a logistic regression binary model", + "Desc": "Logistic Regression is a classification method used to predict the value of a categorical dependent variable from its relationship to one or more independent variables assumed to have a logistic distribution. If the dependent variable has only two possible values (success/failure), then the logistic regression is binary. If the dependent variable has more than two possible values (blood type given diagnostic test results), then the logistic regression is multinomial.The optimization technique used for LogisticRegressionBinaryClassifier is the limited memory Broyden-Fletcher-Goldfarb-Shanno (L-BFGS). Both the L-BFGS and regular BFGS algorithms use quasi-Newtonian methods to estimate the computationally intensive Hessian matrix in the equation used by Newton's method to calculate steps. But the L-BFGS approximation uses only a limited amount of memory to compute the next step direction, so that it is especially suited for problems with a large number of variables. The memory_size parameter specifies the number of past positions and gradients to store for use in the computation of the next step.This learner can use elastic net regularization: a linear combination of L1 (lasso) and L2 (ridge) regularizations. Regularization is a method that can render an ill-posed problem more tractable by imposing constraints that provide information to supplement the data and that prevents overfitting by penalizing models with extreme coefficient values. This can improve the generalization of the model learned by selecting the optimal complexity in the bias-variance tradeoff. Regularization works by adding the penalty that is associated with coefficient values to the error of the hypothesis. An accurate model with extreme coefficient values would be penalized more, but a less accurate model with more conservative values would be penalized less. L1 and L2 regularization have different effects and uses that are complementary in certain respects.l1_weight: can be applied to sparse models, when working with high-dimensional data. It pulls small weights associated features that are relatively unimportant towards 0. l2_weight: is preferable for data that is not sparse. It pulls large weights towards zero. Adding the ridge penalty to the regularization overcomes some of lasso's limitations. It can improve its predictive accuracy, for example, when the number of predictors is greater than the sample size. If x = l1_weight and y = l2_weight, ax + by = c defines the linear span of the regularization terms. The default values of x and y are both 1. An agressive regularization can harm predictive capacity by excluding important variables out of the model. So choosing the optimal values for the regularization parameters is important for the performance of the logistic regression model.Wikipedia: L-BFGS.Wikipedia: Logistic regression.Scalable Training of L1-Regularized Log-Linear Models.Test Run - L1 and L2 Regularization for Machine Learning.", "FriendlyName": "Logistic Regression", "ShortName": "lr", "Inputs": [ @@ -11254,7 +11254,7 @@ }, { "Name": "Trainers.LogisticRegressionClassifier", - "Desc": "Train a logistic regression multi class model", + "Desc": "Logistic Regression is a classification method used to predict the value of a categorical dependent variable from its relationship to one or more independent variables assumed to have a logistic distribution. If the dependent variable has only two possible values (success/failure), then the logistic regression is binary. If the dependent variable has more than two possible values (blood type given diagnostic test results), then the logistic regression is multinomial.The optimization technique used for LogisticRegressionBinaryClassifier is the limited memory Broyden-Fletcher-Goldfarb-Shanno (L-BFGS). Both the L-BFGS and regular BFGS algorithms use quasi-Newtonian methods to estimate the computationally intensive Hessian matrix in the equation used by Newton's method to calculate steps. But the L-BFGS approximation uses only a limited amount of memory to compute the next step direction, so that it is especially suited for problems with a large number of variables. The memory_size parameter specifies the number of past positions and gradients to store for use in the computation of the next step.This learner can use elastic net regularization: a linear combination of L1 (lasso) and L2 (ridge) regularizations. Regularization is a method that can render an ill-posed problem more tractable by imposing constraints that provide information to supplement the data and that prevents overfitting by penalizing models with extreme coefficient values. This can improve the generalization of the model learned by selecting the optimal complexity in the bias-variance tradeoff. Regularization works by adding the penalty that is associated with coefficient values to the error of the hypothesis. An accurate model with extreme coefficient values would be penalized more, but a less accurate model with more conservative values would be penalized less. L1 and L2 regularization have different effects and uses that are complementary in certain respects.l1_weight: can be applied to sparse models, when working with high-dimensional data. It pulls small weights associated features that are relatively unimportant towards 0. l2_weight: is preferable for data that is not sparse. It pulls large weights towards zero. Adding the ridge penalty to the regularization overcomes some of lasso's limitations. It can improve its predictive accuracy, for example, when the number of predictors is greater than the sample size. If x = l1_weight and y = l2_weight, ax + by = c defines the linear span of the regularization terms. The default values of x and y are both 1. An agressive regularization can harm predictive capacity by excluding important variables out of the model. So choosing the optimal values for the regularization parameters is important for the performance of the logistic regression model.Wikipedia: L-BFGS.Wikipedia: Logistic regression.Scalable Training of L1-Regularized Log-Linear Models.Test Run - L1 and L2 Regularization for Machine Learning.", "FriendlyName": "Multi-class Logistic Regression", "ShortName": "mlr", "Inputs": [ @@ -12760,7 +12760,7 @@ }, { "Name": "Trainers.StochasticDualCoordinateAscentClassifier", - "Desc": "Train an SDCA multi class model", + "Desc": "This classifier is a trainer based on the Stochastic DualCoordinate Ascent(SDCA) method, a state-of-the-art optimization technique for convex objective functions.The algorithm can be scaled for use on large out-of-memory data sets due to a semi-asynchronized implementation that supports multi-threading.Convergence is underwritten by periodically enforcing synchronization between primal and dual updates in a separate thread.Several choices of loss functions are also provided.The SDCA method combines several of the best properties and capabilities of logistic regression and SVM algorithms.For more information on SDCA, see:Scaling Up Stochastic Dual Coordinate Ascent.Stochastic Dual Coordinate Ascent Methods for Regularized Loss Minimization.Note that SDCA is a stochastic and streaming optimization algorithm. The results depends on the order of the training data. For reproducible results, it is recommended that one sets `shuffle` to`False` and `NumThreads` to `1`.Elastic net regularization can be specified by the l2_weight and l1_weight parameters. Note that the l2_weight has an effect on the rate of convergence. In general, the larger the l2_weight, the faster SDCA converges.", "FriendlyName": "Fast Linear Multi-class Classification (SA-SDCA)", "ShortName": "sasdcamc", "Inputs": [ @@ -13030,7 +13030,7 @@ }, { "Name": "Trainers.StochasticDualCoordinateAscentRegressor", - "Desc": "Train an SDCA regression model", + "Desc": "This classifier is a trainer based on the Stochastic DualCoordinate Ascent(SDCA) method, a state-of-the-art optimization technique for convex objective functions.The algorithm can be scaled for use on large out-of-memory data sets due to a semi-asynchronized implementation that supports multi-threading.Convergence is underwritten by periodically enforcing synchronization between primal and dual updates in a separate thread.Several choices of loss functions are also provided.The SDCA method combines several of the best properties and capabilities of logistic regression and SVM algorithms.For more information on SDCA, see:Scaling Up Stochastic Dual Coordinate Ascent.Stochastic Dual Coordinate Ascent Methods for Regularized Loss Minimization.Note that SDCA is a stochastic and streaming optimization algorithm. The results depends on the order of the training data. For reproducible results, it is recommended that one sets `shuffle` to`False` and `NumThreads` to `1`.Elastic net regularization can be specified by the l2_weight and l1_weight parameters. Note that the l2_weight has an effect on the rate of convergence. In general, the larger the l2_weight, the faster SDCA converges.", "FriendlyName": "Fast Linear Regression (SA-SDCA)", "ShortName": "sasdcar", "Inputs": [ diff --git a/test/Microsoft.ML.Core.Tests/UnitTests/TestEntryPoints.cs b/test/Microsoft.ML.Core.Tests/UnitTests/TestEntryPoints.cs index ded78d11de..684dabfbbd 100644 --- a/test/Microsoft.ML.Core.Tests/UnitTests/TestEntryPoints.cs +++ b/test/Microsoft.ML.Core.Tests/UnitTests/TestEntryPoints.cs @@ -6,6 +6,7 @@ using System.Collections.Generic; using System.IO; using System.Linq; +using System.Text.RegularExpressions; using Microsoft.ML.Runtime.Api; using Microsoft.ML.Runtime.Core.Tests.UnitTests; using Microsoft.ML.Runtime.Data; @@ -274,13 +275,31 @@ public void EntryPointCatalog() var entryPointsSubDir = Path.Combine("..", "Common", "EntryPoints"); var catalog = ModuleCatalog.CreateInstance(Env); var path = DeleteOutputPath(entryPointsSubDir, epListFile); + + var regex = new Regex(@"\r\n?|\n", RegexOptions.Compiled); File.WriteAllLines(path, catalog.AllEntryPoints() - .Select(x => string.Join("\t", x.Name, x.Description, x.Method.DeclaringType, x.Method.Name, x.InputType, x.OutputType).Replace(Environment.NewLine, "\\n ")) + .Select(x => string.Join("\t", + x.Name, + regex.Replace(x.Description, ""), + x.Method.DeclaringType, + x.Method.Name, + x.InputType, + x.OutputType) + .Replace(Environment.NewLine, "")) .OrderBy(x => x)); CheckEquality(entryPointsSubDir, epListFile); var jObj = JsonManifestUtils.BuildAllManifests(Env, catalog); + + //clean up the description from the new line characters + if (jObj[FieldNames.TopEntryPoints] != null && jObj[FieldNames.TopEntryPoints] is JArray) + { + foreach (JToken entry in jObj[FieldNames.TopEntryPoints].Children()) + if (entry[FieldNames.Desc] != null) + entry[FieldNames.Desc] = regex.Replace(entry[FieldNames.Desc].ToString(), ""); + } + var jPath = DeleteOutputPath(entryPointsSubDir, manifestFile); using (var file = File.OpenWrite(jPath)) using (var writer = new StreamWriter(file))