diff --git a/Microsoft.ML.sln b/Microsoft.ML.sln
index 6ad93c3886..58e24041f1 100644
--- a/Microsoft.ML.sln
+++ b/Microsoft.ML.sln
@@ -5,6 +5,9 @@ MinimumVisualStudioVersion = 10.0.40219.1
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Microsoft.ML.Core", "src\Microsoft.ML.Core\Microsoft.ML.Core.csproj", "{A6CA6CC6-5D7C-4D7F-A0F5-35E14B383B0A}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "src", "src", "{09EADF06-BE25-4228-AB53-95AE3E15B530}"
+ ProjectSection(SolutionItems) = preProject
+ src\Source.ruleset = src\Source.ruleset
+ EndProjectSection
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "test", "test", "{AED9C836-31E3-4F3F-8ABC-929555D3F3C4}"
EndProject
@@ -88,6 +91,12 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Microsoft.ML.CpuMath", "Mic
pkg\Microsoft.ML.CpuMath\Microsoft.ML.CpuMath.symbols.nupkgproj = pkg\Microsoft.ML.CpuMath\Microsoft.ML.CpuMath.symbols.nupkgproj
EndProjectSection
EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "tools-local", "tools-local", "{7F13E156-3EBA-4021-84A5-CD56BA72F99E}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Microsoft.ML.CodeAnalyzer", "tools-local\Microsoft.ML.CodeAnalyzer\Microsoft.ML.CodeAnalyzer.csproj", "{B4E55B2D-2A92-46E7-B72F-E76D6FD83440}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Microsoft.ML.CodeAnalyzer.Tests", "test\Microsoft.ML.CodeAnalyzer.Tests\Microsoft.ML.CodeAnalyzer.Tests.csproj", "{3E4ABF07-7970-4BE6-B45B-A13D3C397545}"
+EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
@@ -304,6 +313,22 @@ Global
{DCF46B79-1FDB-4DBA-A263-D3D64E3AAA27}.Release|Any CPU.Build.0 = Release|Any CPU
{DCF46B79-1FDB-4DBA-A263-D3D64E3AAA27}.Release-Intrinsics|Any CPU.ActiveCfg = Release|Any CPU
{DCF46B79-1FDB-4DBA-A263-D3D64E3AAA27}.Release-Intrinsics|Any CPU.Build.0 = Release|Any CPU
+ {B4E55B2D-2A92-46E7-B72F-E76D6FD83440}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {B4E55B2D-2A92-46E7-B72F-E76D6FD83440}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {B4E55B2D-2A92-46E7-B72F-E76D6FD83440}.Debug-Intrinsics|Any CPU.ActiveCfg = Debug|Any CPU
+ {B4E55B2D-2A92-46E7-B72F-E76D6FD83440}.Debug-Intrinsics|Any CPU.Build.0 = Debug|Any CPU
+ {B4E55B2D-2A92-46E7-B72F-E76D6FD83440}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {B4E55B2D-2A92-46E7-B72F-E76D6FD83440}.Release|Any CPU.Build.0 = Release|Any CPU
+ {B4E55B2D-2A92-46E7-B72F-E76D6FD83440}.Release-Intrinsics|Any CPU.ActiveCfg = Release|Any CPU
+ {B4E55B2D-2A92-46E7-B72F-E76D6FD83440}.Release-Intrinsics|Any CPU.Build.0 = Release|Any CPU
+ {3E4ABF07-7970-4BE6-B45B-A13D3C397545}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {3E4ABF07-7970-4BE6-B45B-A13D3C397545}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {3E4ABF07-7970-4BE6-B45B-A13D3C397545}.Debug-Intrinsics|Any CPU.ActiveCfg = Debug|Any CPU
+ {3E4ABF07-7970-4BE6-B45B-A13D3C397545}.Debug-Intrinsics|Any CPU.Build.0 = Debug|Any CPU
+ {3E4ABF07-7970-4BE6-B45B-A13D3C397545}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {3E4ABF07-7970-4BE6-B45B-A13D3C397545}.Release|Any CPU.Build.0 = Release|Any CPU
+ {3E4ABF07-7970-4BE6-B45B-A13D3C397545}.Release-Intrinsics|Any CPU.ActiveCfg = Release|Any CPU
+ {3E4ABF07-7970-4BE6-B45B-A13D3C397545}.Release-Intrinsics|Any CPU.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
@@ -340,6 +365,8 @@ Global
{001F3B4E-FBE4-4001-AFD2-A6A989CD1C25} = {09EADF06-BE25-4228-AB53-95AE3E15B530}
{DCF46B79-1FDB-4DBA-A263-D3D64E3AAA27} = {09EADF06-BE25-4228-AB53-95AE3E15B530}
{BF66A305-DF10-47E4-8D81-42049B149D2B} = {D3D38B03-B557-484D-8348-8BADEE4DF592}
+ {B4E55B2D-2A92-46E7-B72F-E76D6FD83440} = {7F13E156-3EBA-4021-84A5-CD56BA72F99E}
+ {3E4ABF07-7970-4BE6-B45B-A13D3C397545} = {AED9C836-31E3-4F3F-8ABC-929555D3F3C4}
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {41165AF1-35BB-4832-A189-73060F82B01D}
diff --git a/build/Dependencies.props b/build/Dependencies.props
index 21e65c9007..5325011f05 100644
--- a/build/Dependencies.props
+++ b/build/Dependencies.props
@@ -7,6 +7,6 @@
4.4.04.3.01.0.0-beta-62824-02
- 2.1.2.2
+ 2.1.2.2
diff --git a/src/Directory.Build.props b/src/Directory.Build.props
index cedfa39442..113da3575a 100644
--- a/src/Directory.Build.props
+++ b/src/Directory.Build.props
@@ -11,7 +11,18 @@
$(NoWarn);1591$(WarningsNotAsErrors);1591
-
+ $(MSBuildThisFileDirectory)\Source.ruleset
+
+
+ false
+ Analyzer
+
+
+
+
diff --git a/src/Microsoft.ML.Api/ApiUtils.cs b/src/Microsoft.ML.Api/ApiUtils.cs
index 4b7daf0a74..8b8cb5871b 100644
--- a/src/Microsoft.ML.Api/ApiUtils.cs
+++ b/src/Microsoft.ML.Api/ApiUtils.cs
@@ -46,7 +46,7 @@ private static OpCode GetAssignmentOpCode(Type t)
///
/// Each of the specialized 'peek' methods copies the appropriate field value of an instance of T
- /// into the provided buffer. So, the call is 'peek(userObject, ref destination)' and the logic is
+ /// into the provided buffer. So, the call is 'peek(userObject, ref destination)' and the logic is
/// indentical to 'destination = userObject.##FIELD##', where ##FIELD## is defined per peek method.
///
internal static Delegate GeneratePeek(InternalSchemaDefinition.Column column)
@@ -83,7 +83,7 @@ private static Delegate GeneratePeek(FieldInfo fieldInfo, Op
///
/// Each of the specialized 'poke' methods sets the appropriate field value of an instance of T
- /// to the provided value. So, the call is 'peek(userObject, providedValue)' and the logic is
+ /// to the provided value. So, the call is 'peek(userObject, providedValue)' and the logic is
/// indentical to 'userObject.##FIELD## = providedValue', where ##FIELD## is defined per poke method.
///
internal static Delegate GeneratePoke(InternalSchemaDefinition.Column column)
diff --git a/src/Microsoft.ML.Api/CodeGenerationUtils.cs b/src/Microsoft.ML.Api/CodeGenerationUtils.cs
index 74f262c57c..7af0fb85ed 100644
--- a/src/Microsoft.ML.Api/CodeGenerationUtils.cs
+++ b/src/Microsoft.ML.Api/CodeGenerationUtils.cs
@@ -97,12 +97,12 @@ public static string GetCSharpString(CSharpCodeProvider codeProvider, string val
}
///
- /// Gets the C# strings representing the type name for a variable corresponding to
- /// the column type.
- ///
- /// If the type is a vector, then controls whether the array field is
+ /// Gets the C# strings representing the type name for a variable corresponding to
+ /// the column type.
+ ///
+ /// If the type is a vector, then controls whether the array field is
/// generated or .
- ///
+ ///
/// If additional attributes are required, they are appended to the list.
///
private static string GetBackingTypeName(ColumnType colType, bool useVBuffer, List attributes)
diff --git a/src/Microsoft.ML.Api/ComponentCreation.cs b/src/Microsoft.ML.Api/ComponentCreation.cs
index 0d164b6124..3080a8197c 100644
--- a/src/Microsoft.ML.Api/ComponentCreation.cs
+++ b/src/Microsoft.ML.Api/ComponentCreation.cs
@@ -11,14 +11,14 @@
namespace Microsoft.ML.Runtime.Api
{
///
- /// This class defines extension methods for an to facilitate creating
+ /// This class defines extension methods for an to facilitate creating
/// components (loaders, transforms, trainers, scorers, evaluators, savers).
///
public static class ComponentCreation
{
///
/// Create a new data view which is obtained by appending all columns of all the source data views.
- /// If the data views are of different length, the resulting data view will have the length equal to the
+ /// If the data views are of different length, the resulting data view will have the length equal to the
/// length of the shortest source.
///
/// The host environment to use.
@@ -59,11 +59,11 @@ public static RoleMappedData CreateExamples(this IHostEnvironment env, IDataView
/// Create a new over an in-memory collection of the items of user-defined type.
/// The user maintains ownership of the and the resulting data view will
/// never alter the contents of the .
- /// Since is assumed to be immutable, the user is expected to not
+ /// Since is assumed to be immutable, the user is expected to not
/// modify the contents of while the data view is being actively cursored.
- ///
+ ///
/// One typical usage for in-memory data view could be: create the data view, train a predictor.
- /// Once the predictor is fully trained, modify the contents of the underlying collection and
+ /// Once the predictor is fully trained, modify the contents of the underlying collection and
/// train another predictor.
///
/// The user-defined item type.
@@ -88,9 +88,9 @@ public static IDataView CreateDataView(this IHostEnvironment env, IList
is assumed to be immutable, the user is expected to support
/// multiple enumeration of the that would return the same results, unless
/// the user knows that the data will only be cursored once.
- ///
+ ///
/// One typical usage for streaming data view could be: create the data view that lazily loads data
- /// as needed, then apply pre-trained transformations to it and cursor through it for transformation
+ /// as needed, then apply pre-trained transformations to it and cursor through it for transformation
/// results. This is how is implemented.
///
/// The user-defined item type.
@@ -191,7 +191,7 @@ public static PredictionEngine CreatePredictionEngine(th
///
/// Create a prediction engine.
/// This encapsulates the 'classic' prediction problem, where the input is denoted by the float array of features,
- /// and the output is a float score. For binary classification predictors that can output probability, there are output
+ /// and the output is a float score. For binary classification predictors that can output probability, there are output
/// fields that report the predicted label and probability.
///
/// The host environment to use.
@@ -207,7 +207,7 @@ public static SimplePredictionEngine CreateSimplePredictionEngine(this IHostEnvi
///
/// Load the transforms (but not loader) from the model steram and apply them to the specified data.
- /// It is acceptable to have no transforms in the model stream: in this case the original
+ /// It is acceptable to have no transforms in the model stream: in this case the original
/// will be returned.
///
/// The host environment to use.
diff --git a/src/Microsoft.ML.Api/DataViewConstructionUtils.cs b/src/Microsoft.ML.Api/DataViewConstructionUtils.cs
index 6ecff5b204..341e3a72af 100644
--- a/src/Microsoft.ML.Api/DataViewConstructionUtils.cs
+++ b/src/Microsoft.ML.Api/DataViewConstructionUtils.cs
@@ -285,7 +285,7 @@ private Delegate CreateGetter(int index)
}
// REVIEW: The converting getter invokes a type conversion delegate on every call, so it's inherently slower
- // than the 'direct' getter. We don't have good indication of this to the user, and the selection
+ // than the 'direct' getter. We don't have good indication of this to the user, and the selection
// of affected types is pretty arbitrary (signed integers and bools, but not uints and floats).
private Delegate CreateConvertingArrayGetterDelegate(int index, Func convert)
{
@@ -396,7 +396,7 @@ private void CheckColumnInRange(int columnIndex)
}
///
- /// An in-memory data view based on the IList of data.
+ /// An in-memory data view based on the IList of data.
/// Supports shuffling.
///
private sealed class ListDataView : DataViewBase
@@ -492,11 +492,11 @@ protected override bool MoveManyCore(long count)
}
///
- /// An in-memory data view based on the IEnumerable of data.
+ /// An in-memory data view based on the IEnumerable of data.
/// Doesn't support shuffling.
- ///
+ ///
/// This class is public because prediction engine wants to call its
- /// for performance reasons.
+ /// for performance reasons.
///
public sealed class StreamingDataView : DataViewBase
where TRow : class
@@ -578,7 +578,7 @@ protected override bool MoveNextCore()
///
/// This represents the 'infinite data view' over one (mutable) user-defined object.
- /// The 'current row' object can be updated at any time, this will affect all the
+ /// The 'current row' object can be updated at any time, this will affect all the
/// newly created cursors, but not the ones already existing.
///
public sealed class SingleRowLoopDataView : DataViewBase
@@ -731,7 +731,7 @@ public abstract partial class MetadataInfo
///
public ColumnType MetadataType;
///
- /// The string identifier of the metadata. Some identifiers have special meaning,
+ /// The string identifier of the metadata. Some identifiers have special meaning,
/// like "SlotNames", but any other identifiers can be used.
///
public readonly string Kind;
@@ -757,7 +757,7 @@ public sealed class MetadataInfo : MetadataInfo
///
/// Constructor for metadata of value type T.
///
- /// The string identifier of the metadata. Some identifiers have special meaning,
+ /// The string identifier of the metadata. Some identifiers have special meaning,
/// like "SlotNames", but any other identifiers can be used.
/// Metadata value.
/// Type of the metadata.
diff --git a/src/Microsoft.ML.Api/GenerateCodeCommand.cs b/src/Microsoft.ML.Api/GenerateCodeCommand.cs
index 0bca5edfb3..26136971af 100644
--- a/src/Microsoft.ML.Api/GenerateCodeCommand.cs
+++ b/src/Microsoft.ML.Api/GenerateCodeCommand.cs
@@ -21,7 +21,7 @@ namespace Microsoft.ML.Runtime.Api
{
///
/// Generates the sample prediction code for a given model file, with correct input and output classes.
- ///
+ ///
/// REVIEW: Consider adding support for generating VBuffers instead of arrays, maybe for high dimensionality vectors.
///
public sealed class GenerateCodeCommand : ICommand
@@ -45,7 +45,7 @@ public sealed class Arguments
ShortName = "sparse", SortOrder = 102)]
public bool SparseVectorDeclaration;
- // REVIEW: currently, it's only used in unit testing to not generate the paths into the test output folder.
+ // REVIEW: currently, it's only used in unit testing to not generate the paths into the test output folder.
// However, it might be handy for automation scenarios, so I've added this as a hidden option.
[Argument(ArgumentType.AtMostOnce, HelpText = "A location of the model file to put into generated file", Hide = true)]
public string ModelNameOverride;
diff --git a/src/Microsoft.ML.Api/InternalSchemaDefinition.cs b/src/Microsoft.ML.Api/InternalSchemaDefinition.cs
index 2b0f056214..3edf7599a4 100644
--- a/src/Microsoft.ML.Api/InternalSchemaDefinition.cs
+++ b/src/Microsoft.ML.Api/InternalSchemaDefinition.cs
@@ -76,12 +76,12 @@ private Column(string columnName, ColumnType columnType, FieldInfo fieldInfo = n
}
///
- /// Function that checks whether the InternalSchemaDefinition.Column is a valid one.
+ /// Function that checks whether the InternalSchemaDefinition.Column is a valid one.
/// To be valid, the Column must:
/// 1. Have non-empty values for ColumnName and ColumnType
/// 2. Have a non-empty value for FieldInfo iff it is a field column, else
/// ReturnParameterInfo and Generator iff it is a computed column
- /// 3. Generator must have the method inputs (TRow rowObject,
+ /// 3. Generator must have the method inputs (TRow rowObject,
/// long position, ref TValue outputValue) in that order.
///
[Conditional("DEBUG")]
@@ -133,7 +133,7 @@ private InternalSchemaDefinition(Column[] columns)
///
/// Given a field info on a type, returns whether this appears to be a vector type,
/// and also the associated data kind for this type. If a data kind could not
- /// be determined, this will throw.
+ /// be determined, this will throw.
///
/// The field info to inspect.
/// Whether this appears to be a vector type.
@@ -149,7 +149,7 @@ public static void GetVectorAndKind(FieldInfo fieldInfo, out bool isVector, out
///
/// Given a parameter info on a type, returns whether this appears to be a vector type,
/// and also the associated data kind for this type. If a data kind could not
- /// be determined, this will throw.
+ /// be determined, this will throw.
///
/// The parameter info to inspect.
/// Whether this appears to be a vector type.
@@ -165,7 +165,7 @@ public static void GetVectorAndKind(ParameterInfo parameterInfo, out bool isVect
///
/// Given a type and name for a variable, returns whether this appears to be a vector type,
/// and also the associated data kind for this type. If a data kind could not
- /// be determined, this will throw.
+ /// be determined, this will throw.
///
/// The type of the variable to inspect.
/// The name of the variable to inspect.
@@ -222,7 +222,7 @@ public static InternalSchemaDefinition Create(Type userType, SchemaDefinition us
col.MemberName,
userType.FullName);
- //Clause to handle the field that may be used to expose the cursor channel.
+ //Clause to handle the field that may be used to expose the cursor channel.
//This field does not need a column.
if (fieldInfo.FieldType == typeof(IChannel))
continue;
@@ -251,7 +251,7 @@ public static InternalSchemaDefinition Create(Type userType, SchemaDefinition us
}
else
{
- // Make sure that the types are compatible with the declared type, including
+ // Make sure that the types are compatible with the declared type, including
// whether it is a vector type.
if (isVector != col.ColumnType.IsVector)
{
diff --git a/src/Microsoft.ML.Api/LambdaTransform.cs b/src/Microsoft.ML.Api/LambdaTransform.cs
index 93635c6d7d..506c675524 100644
--- a/src/Microsoft.ML.Api/LambdaTransform.cs
+++ b/src/Microsoft.ML.Api/LambdaTransform.cs
@@ -37,7 +37,7 @@ public static class LambdaTransform
/// different data by calling ), and the transformed data (which can be
/// enumerated upon by calling GetRowCursor or AsCursorable{TRow}). If or
/// implement the interface, they will be disposed after use.
- ///
+ ///
/// This is a 'stateless non-savable' version of the transform.
///
/// The host environment to use.
@@ -78,7 +78,7 @@ public static ITransformTemplate CreateMap(IHostEnvironment env, IDa
/// different data by calling ), and the transformed data (which can be
/// enumerated upon by calling GetRowCursor or AsCursorable{TRow}). If or
/// implement the interface, they will be disposed after use.
- ///
+ ///
/// This is a 'stateless savable' version of the transform: save and load routines must be provided.
///
/// The host environment to use.
@@ -123,7 +123,7 @@ public static ITransformTemplate CreateMap(IHostEnvironment env, IDa
///
/// This is a 'stateful non-savable' version of the map transform: the mapping function is guaranteed to be invoked once per
- /// every row of the data set, in sequence; one user-defined state object will be allocated per cursor and passed to the
+ /// every row of the data set, in sequence; one user-defined state object will be allocated per cursor and passed to the
/// map function every time. If , , or
/// implement the interface, they will be disposed after use.
///
@@ -164,7 +164,7 @@ public static ITransformTemplate CreateMap(IHostEnvironment
///
/// This is a 'stateful savable' version of the map transform: the mapping function is guaranteed to be invoked once per
- /// every row of the data set, in sequence (non-parallelizable); one user-defined state object will be allocated per cursor and passed to the
+ /// every row of the data set, in sequence (non-parallelizable); one user-defined state object will be allocated per cursor and passed to the
/// map function every time; save and load routines must be provided. If , ,
/// or implement the interface, they will be disposed after use.
///
@@ -217,8 +217,8 @@ public static ITransformTemplate CreateMap(IHostEnvironment
/// This creates a filter transform that can 'accept' or 'decline' any row of the data based on the contents of the row
/// or state of the cursor.
/// This is a 'stateful non-savable' version of the filter: the filter function is guaranteed to be invoked once per
- /// every row of the data set, in sequence (non-parallelizable); one user-defined state object will be allocated per cursor and passed to the
- /// filter function every time.
+ /// every row of the data set, in sequence (non-parallelizable); one user-defined state object will be allocated per cursor and passed to the
+ /// filter function every time.
/// If or implement the interface, they will be disposed after use.
///
/// The type that describes what 'source' columns are consumed from the
@@ -251,7 +251,7 @@ public static ITransformTemplate CreateFilter(IHostEnvironment env
/// This creates a filter transform that can 'accept' or 'decline' any row of the data based on the contents of the row
/// or state of the cursor.
/// This is a 'stateful savable' version of the filter: the filter function is guaranteed to be invoked once per
- /// every row of the data set, in sequence (non-parallelizable); one user-defined state object will be allocated per cursor and passed to the
+ /// every row of the data set, in sequence (non-parallelizable); one user-defined state object will be allocated per cursor and passed to the
/// filter function every time; save and load routines must be provided.
/// If or implement the interface, they will be disposed after use.
///
@@ -294,11 +294,11 @@ public static ITransformTemplate CreateFilter(IHostEnvironment env
}
///
- /// Defines common ancestor for various flavors of lambda-based user-defined transforms that may or may not be
+ /// Defines common ancestor for various flavors of lambda-based user-defined transforms that may or may not be
/// serializable.
- ///
+ ///
/// In order for the transform to be serializable, the user should specify a save and load delegate.
- /// Specifically, for this the user has to provide the following things:
+ /// Specifically, for this the user has to provide the following things:
/// * a custom save action that serializes the transform 'state' to the binary writer.
/// * a custom load action that de-serializes the transform from the binary reader. This must be a public static method of a public class.
///
diff --git a/src/Microsoft.ML.Api/MapTransform.cs b/src/Microsoft.ML.Api/MapTransform.cs
index 914bb63c07..4426721620 100644
--- a/src/Microsoft.ML.Api/MapTransform.cs
+++ b/src/Microsoft.ML.Api/MapTransform.cs
@@ -14,7 +14,7 @@ namespace Microsoft.ML.Runtime.Api
/// It doesn't change the number of rows, and can be seen as a result of application of the user's function
/// to every row of the input data.
/// Similarly to the existing 's, this object can be treated as both the 'transformation' algorithm
- /// (which can be then applied to different data by calling ), and the transformed data (which can
+ /// (which can be then applied to different data by calling ), and the transformed data (which can
/// be enumerated upon by calling GetRowCursor or AsCursorable{TRow}).
///
/// The type that describes what 'source' columns are consumed from the input .
@@ -36,8 +36,8 @@ internal sealed class MapTransform : LambdaTransformBase, ITransform
private static string RegistrationName { get { return string.Format(RegistrationNameTemplate, typeof(TSrc).FullName, typeof(TDst).FullName); } }
///
- /// Create a a map transform that is savable iff and are
- /// not null.
+ /// Create a a map transform that is savable iff and are
+ /// not null.
///
/// The host environment
/// The dataview upon which we construct the transform
@@ -47,7 +47,7 @@ internal sealed class MapTransform : LambdaTransformBase, ITransform
/// A function that given the serialization stream and a data view, returns
/// an . The intent is, this returned object should itself be a
/// , but this is not strictly necessary. This delegate should be
- /// a static non-lambda method that this assembly can legally call. May be null simultaneously with
+ /// a static non-lambda method that this assembly can legally call. May be null simultaneously with
/// .
/// The schema definition overrides for
/// The schema definition overrides for
diff --git a/src/Microsoft.ML.Api/PredictionEngine.cs b/src/Microsoft.ML.Api/PredictionEngine.cs
index 14e2498c93..9410d3b50e 100644
--- a/src/Microsoft.ML.Api/PredictionEngine.cs
+++ b/src/Microsoft.ML.Api/PredictionEngine.cs
@@ -72,12 +72,12 @@ internal BatchPredictionEngine(IHostEnvironment env, IDataView dataPipeline, boo
}
///
- /// Run the prediction pipe. This will enumerate the exactly once,
- /// cache all the examples (by reference) into its internal representation and then run
+ /// Run the prediction pipe. This will enumerate the exactly once,
+ /// cache all the examples (by reference) into its internal representation and then run
/// the transformation pipe.
///
/// The examples to run the prediction on.
- /// If true, the engine will not allocate memory per output, and
+ /// If true, the engine will not allocate memory per output, and
/// the returned objects will actually always be the same object. The user is
/// expected to clone the values himself if needed.
/// The that contains all the pipeline results.
@@ -141,7 +141,7 @@ public void Reset()
/// in-memory data, one example at a time.
/// This can also be used with trained pipelines that do not end with a predictor: in this case, the
/// 'prediction' will be just the outcome of all the transformations.
- /// This is essentially a wrapper for that throws if
+ /// This is essentially a wrapper for that throws if
/// more than one result is returned per call to .
///
/// The user-defined type that holds the example.
@@ -198,7 +198,7 @@ public TDst Predict(TSrc example)
///
/// This class encapsulates the 'classic' prediction problem, where the input is denoted by the float array of features,
- /// and the output is a float score. For binary classification predictors that can output probability, there are output
+ /// and the output is a float score. For binary classification predictors that can output probability, there are output
/// fields that report the predicted label and probability.
///
public sealed class SimplePredictionEngine
diff --git a/src/Microsoft.ML.Api/SchemaDefinition.cs b/src/Microsoft.ML.Api/SchemaDefinition.cs
index 559e3a81ee..e08845a87e 100644
--- a/src/Microsoft.ML.Api/SchemaDefinition.cs
+++ b/src/Microsoft.ML.Api/SchemaDefinition.cs
@@ -63,7 +63,7 @@ public VectorTypeAttribute(params int[] dims)
}
///
- /// Describes column information such as name and the source columns indicies that this
+ /// Describes column information such as name and the source columns indicies that this
/// column encapsulates.
///
[AttributeUsage(AttributeTargets.Field, AllowMultiple = false, Inherited = true)]
@@ -81,12 +81,12 @@ public ColumnAttribute(string ordinal, string name = null)
public string Name { get; }
///
- /// Contains positions of indices of source columns in the form
- /// of ranges. Examples of range: if we want to include just column
- /// with index 1 we can write the range as 1, if we want to include
+ /// Contains positions of indices of source columns in the form
+ /// of ranges. Examples of range: if we want to include just column
+ /// with index 1 we can write the range as 1, if we want to include
/// columns 1 to 10 then we can write the range as 1-10 and we want to include all the
/// columns from column with index 1 until end then we can write 1-*.
- ///
+ ///
/// This takes sequence of ranges that are comma seperated, example:
/// 1,2-5,10-*
///
@@ -125,7 +125,7 @@ public sealed class NoColumnAttribute : Attribute
}
///
- /// Mark a member that implements exactly IChannel as being permitted to receive
+ /// Mark a member that implements exactly IChannel as being permitted to receive
/// channel information from an external channel.
///
[AttributeUsage(AttributeTargets.Field, AllowMultiple = false, Inherited = true)]
@@ -133,11 +133,11 @@ public sealed class CursorChannelAttribute : Attribute
{
///
/// When passed some object, and a channel, it attempts to pass the channel to the object. It
- /// passes the channel to the object iff the object has exactly one field marked with the
- /// CursorChannelAttribute, and that field implements only the IChannel interface.
- ///
- /// The function returns the modified object, as well as a boolean indicator of whether it was
- /// able to pass the channel to the object.
+ /// passes the channel to the object iff the object has exactly one field marked with the
+ /// CursorChannelAttribute, and that field implements only the IChannel interface.
+ ///
+ /// The function returns the modified object, as well as a boolean indicator of whether it was
+ /// able to pass the channel to the object.
///
/// The object that attempts to acquire the channel.
/// The channel to pass to the object.
@@ -206,13 +206,13 @@ public sealed class Column
public ColumnType ColumnType { get; set; }
///
- /// Whether the column is a computed type.
+ /// Whether the column is a computed type.
///
public bool IsComputed { get { return Generator != null; } }
///
- /// The generator function. if the column is computed.
- ///
+ /// The generator function. if the column is computed.
+ ///
public Delegate Generator { get; set; }
public Type ReturnType => Generator?.GetMethodInfo().GetParameters().LastOrDefault().ParameterType.GetElementType();
@@ -277,7 +277,7 @@ public IEnumerable> GetMetadataTypes
}
///
- /// Get or set the column definition by column name.
+ /// Get or set the column definition by column name.
/// If there's no such column:
/// - get returns null,
/// - set adds a new column.
@@ -287,9 +287,7 @@ public IEnumerable> GetMetadataTypes
///
public Column this[string columnName]
{
-#pragma warning disable TLC_NoThis // Do not use 'this' keyword for member access
get => this.FirstOrDefault(x => x.ColumnName == columnName);
-#pragma warning restore TLC_NoThis // Do not use 'this' keyword for member access
set
{
Contracts.CheckValue(value, nameof(value));
@@ -323,9 +321,9 @@ public static SchemaDefinition Create(Type userType)
HashSet colNames = new HashSet();
foreach (var fieldInfo in userType.GetFields())
{
- // Clause to handle the field that may be used to expose the cursor channel.
+ // Clause to handle the field that may be used to expose the cursor channel.
// This field does not need a column.
- // REVIEW: maybe validate the channel attribute now, instead
+ // REVIEW: maybe validate the channel attribute now, instead
// of later at cursor creation.
if (fieldInfo.FieldType == typeof(IChannel))
continue;
diff --git a/src/Microsoft.ML.Api/SerializableLambdaTransform.cs b/src/Microsoft.ML.Api/SerializableLambdaTransform.cs
index 5f761a042b..7de6e522d8 100644
--- a/src/Microsoft.ML.Api/SerializableLambdaTransform.cs
+++ b/src/Microsoft.ML.Api/SerializableLambdaTransform.cs
@@ -79,7 +79,7 @@ public static ITransformTemplate Create(IHostEnvironment env, ModelLoadContext c
/// that method that should be enough to "recover" it, assuming it is a "recoverable" method (recoverable
/// here is a loose definition, meaning that is capable
/// of creating it, which includes among other things that it's static, non-lambda, accessible to
- /// this assembly, etc.).
+ /// this assembly, etc.).
///
/// The method that should be "recoverable"
/// A string array describing the input method
diff --git a/src/Microsoft.ML.Api/StatefulFilterTransform.cs b/src/Microsoft.ML.Api/StatefulFilterTransform.cs
index b7e0cf473b..f47b8620a8 100644
--- a/src/Microsoft.ML.Api/StatefulFilterTransform.cs
+++ b/src/Microsoft.ML.Api/StatefulFilterTransform.cs
@@ -9,10 +9,10 @@
namespace Microsoft.ML.Runtime.Api
{
- // REVIEW: the current interface to 'state' object may be inadequate: instead of insisting on
+ // REVIEW: the current interface to 'state' object may be inadequate: instead of insisting on
// parameterless constructor, we could take a delegate that would create the state per cursor.
///
- /// This transform is similar to , but it allows per-cursor state,
+ /// This transform is similar to , but it allows per-cursor state,
/// as well as the ability to 'accept' or 'filter out' some rows of the supplied .
/// The downside is that the provided lambda is eagerly called on every row (not lazily when needed), and
/// parallel cursors are not allowed.
@@ -38,8 +38,8 @@ internal sealed class StatefulFilterTransform : LambdaTransf
private static string RegistrationName { get { return string.Format(RegistrationNameTemplate, typeof(TSrc).FullName, typeof(TDst).FullName); } }
///
- /// Create a filter transform that is savable iff and are
- /// not null.
+ /// Create a filter transform that is savable iff and are
+ /// not null.
///
/// The host environment
/// The dataview upon which we construct the transform
@@ -51,7 +51,7 @@ internal sealed class StatefulFilterTransform : LambdaTransf
/// A function that given the serialization stream and a data view, returns
/// an . The intent is, this returned object should itself be a
/// , but this is not strictly necessary. This delegate should be
- /// a static non-lambda method that this assembly can legally call. May be null simultaneously with
+ /// a static non-lambda method that this assembly can legally call. May be null simultaneously with
/// .
/// The schema definition overrides for
/// The schema definition overrides for
diff --git a/src/Microsoft.ML.Api/TypedCursor.cs b/src/Microsoft.ML.Api/TypedCursor.cs
index 2ba9eeb23a..f6ebaf687f 100644
--- a/src/Microsoft.ML.Api/TypedCursor.cs
+++ b/src/Microsoft.ML.Api/TypedCursor.cs
@@ -57,7 +57,7 @@ public interface ICursorable
///
/// Implementation of the strongly typed Cursorable.
- /// Similarly to the 'DataView{T}, this class uses IL generation to create the 'poke' methods that
+ /// Similarly to the 'DataView{T}, this class uses IL generation to create the 'poke' methods that
/// write directly into the fields of the user-defined type.
///
internal sealed class TypedCursorable : ICursorable
@@ -437,7 +437,7 @@ private Action GenerateSetter(IRow input, int index, InternalSchemaDefinit
}
// REVIEW: The converting getter invokes a type conversion delegate on every call, so it's inherently slower
- // than the 'direct' getter. We don't have good indication of this to the user, and the selection
+ // than the 'direct' getter. We don't have good indication of this to the user, and the selection
// of affected types is pretty arbitrary (signed integers and bools, but not uints and floats).
private Action CreateConvertingVBufferSetter(IRow input, int col, Delegate poke, Delegate peek, Func convert)
{
diff --git a/src/Microsoft.ML.Core/CommandLine/CmdParser.cs b/src/Microsoft.ML.Core/CommandLine/CmdParser.cs
index b9b9506cf9..eb85fcce12 100644
--- a/src/Microsoft.ML.Core/CommandLine/CmdParser.cs
+++ b/src/Microsoft.ML.Core/CommandLine/CmdParser.cs
@@ -493,7 +493,7 @@ public static string ArgumentsUsage(IHostEnvironment env, Type type, object defa
#if CORECLR
///
- /// Fix the window width for the Core build to remove the kernel32.dll dependency.
+ /// Fix the window width for the Core build to remove the kernel32.dll dependency.
///
///
public static int GetConsoleWindowWidth()
@@ -620,7 +620,7 @@ private static ArgumentInfo GetArgumentInfo(Type type, object defaults)
string[] nicks;
// Semantics of ShortName:
// The string provided represents an array of names separated by commas and spaces, once empty entries are removed.
- // 'null' or a singleton array with containing only the long field name means "use the default short name",
+ // 'null' or a singleton array with containing only the long field name means "use the default short name",
// and is represented by the null 'nicks' array.
// 'String.Empty' or a string containing only spaces and commas means "no short name", and is represented by an empty 'nicks' array.
if (attr.ShortName == null)
@@ -1666,7 +1666,7 @@ public bool Finish(CmdParser owner, ArgValue val, object destination)
}
else if (IsMultiSubComponent)
{
- // REVIEW: the kind should not be separated from settings: everything related
+ // REVIEW: the kind should not be separated from settings: everything related
// to one item should go into one value, not multiple values
if (IsTaggedCollection)
{
diff --git a/src/Microsoft.ML.Core/ComponentModel/ComponentCatalog.cs b/src/Microsoft.ML.Core/ComponentModel/ComponentCatalog.cs
index 28666e7f44..3b56e8bb36 100644
--- a/src/Microsoft.ML.Core/ComponentModel/ComponentCatalog.cs
+++ b/src/Microsoft.ML.Core/ComponentModel/ComponentCatalog.cs
@@ -385,7 +385,7 @@ private static void CacheLoadedAssemblies()
{
if (_assemblyQueue == null)
{
- // Create the loaded assembly queue and dictionary, set up the AssemblyLoad / AssemblyResolve
+ // Create the loaded assembly queue and dictionary, set up the AssemblyLoad / AssemblyResolve
// event handlers and populate the queue / dictionary with all assemblies that are currently loaded.
Contracts.Assert(_assemblyQueue == null);
Contracts.Assert(_loadedAssemblies == null);
@@ -413,7 +413,7 @@ private static void CacheLoadedAssemblies()
// Load all assemblies in our directory.
var moduleName = typeof(ComponentCatalog).Module.FullyQualifiedName;
- // If were are loaded in the context of SQL CLR then the FullyQualifiedName and Name properties are set to
+ // If were are loaded in the context of SQL CLR then the FullyQualifiedName and Name properties are set to
// string "" and we skip scanning current directory.
if (moduleName != "")
{
@@ -451,7 +451,7 @@ private static void CacheLoadedAssemblies()
#if TRACE_ASSEMBLY_LOADING
// The "" no-op argument is necessary because WriteLine has multiple overloads, and with two strings
- // it will be the one that is message/category, rather than format string with
+ // it will be the one that is message/category, rather than format string with
System.Diagnostics.Debug.WriteLine("*** Caching classes in {0}", assembly.FullName, "");
#endif
int added = 0;
diff --git a/src/Microsoft.ML.Core/Data/ColumnType.cs b/src/Microsoft.ML.Core/Data/ColumnType.cs
index 780ef7a7d7..0cff911e77 100644
--- a/src/Microsoft.ML.Core/Data/ColumnType.cs
+++ b/src/Microsoft.ML.Core/Data/ColumnType.cs
@@ -325,7 +325,7 @@ public static PrimitiveType FromKind(DataKind kind)
///
public sealed class TextType : PrimitiveType
{
- private volatile static TextType _instance;
+ private static volatile TextType _instance;
public static TextType Instance
{
get
@@ -370,7 +370,7 @@ private NumberType(DataKind kind, string name)
Contracts.Assert(IsNumber);
}
- private volatile static NumberType _instI1;
+ private static volatile NumberType _instI1;
public static NumberType I1
{
get
@@ -381,7 +381,7 @@ public static NumberType I1
}
}
- private volatile static NumberType _instU1;
+ private static volatile NumberType _instU1;
public static NumberType U1
{
get
@@ -392,7 +392,7 @@ public static NumberType U1
}
}
- private volatile static NumberType _instI2;
+ private static volatile NumberType _instI2;
public static NumberType I2
{
get
@@ -403,7 +403,7 @@ public static NumberType I2
}
}
- private volatile static NumberType _instU2;
+ private static volatile NumberType _instU2;
public static NumberType U2
{
get
@@ -414,7 +414,7 @@ public static NumberType U2
}
}
- private volatile static NumberType _instI4;
+ private static volatile NumberType _instI4;
public static NumberType I4
{
get
@@ -425,7 +425,7 @@ public static NumberType I4
}
}
- private volatile static NumberType _instU4;
+ private static volatile NumberType _instU4;
public static NumberType U4
{
get
@@ -436,7 +436,7 @@ public static NumberType U4
}
}
- private volatile static NumberType _instI8;
+ private static volatile NumberType _instI8;
public static NumberType I8
{
get
@@ -447,7 +447,7 @@ public static NumberType I8
}
}
- private volatile static NumberType _instU8;
+ private static volatile NumberType _instU8;
public static NumberType U8
{
get
@@ -458,7 +458,7 @@ public static NumberType U8
}
}
- private volatile static NumberType _instUG;
+ private static volatile NumberType _instUG;
public static NumberType UG
{
get
@@ -469,7 +469,7 @@ public static NumberType UG
}
}
- private volatile static NumberType _instR4;
+ private static volatile NumberType _instR4;
public static NumberType R4
{
get
@@ -480,7 +480,7 @@ public static NumberType R4
}
}
- private volatile static NumberType _instR8;
+ private static volatile NumberType _instR8;
public static NumberType R8
{
get
@@ -496,7 +496,7 @@ public static NumberType Float
get { return R4; }
}
- public new static NumberType FromKind(DataKind kind)
+ public static new NumberType FromKind(DataKind kind)
{
switch (kind)
{
@@ -557,7 +557,7 @@ public override string ToString()
///
public sealed class BoolType : PrimitiveType
{
- private volatile static BoolType _instance;
+ private static volatile BoolType _instance;
public static BoolType Instance
{
get
@@ -589,7 +589,7 @@ public override string ToString()
public sealed class DateTimeType : PrimitiveType
{
- private volatile static DateTimeType _instance;
+ private static volatile DateTimeType _instance;
public static DateTimeType Instance
{
get
@@ -621,7 +621,7 @@ public override string ToString()
public sealed class DateTimeZoneType : PrimitiveType
{
- private volatile static DateTimeZoneType _instance;
+ private static volatile DateTimeZoneType _instance;
public static DateTimeZoneType Instance
{
get
@@ -656,7 +656,7 @@ public override string ToString()
///
public sealed class TimeSpanType : PrimitiveType
{
- private volatile static TimeSpanType _instance;
+ private static volatile TimeSpanType _instance;
public static TimeSpanType Instance
{
get
@@ -692,11 +692,11 @@ public override string ToString()
/// meaningful. Examples are SSNs, phone numbers, auto-generated/incremented key values,
/// class numbers, etc. For example, in multi-class classification, the label is typically
/// a class number which is naturally a KeyType.
- ///
+ ///
/// KeyTypes can be contiguous (the class number example), in which case they can have
/// a cardinality/Count. For non-contiguous KeyTypes the Count property returns zero.
/// Any KeyType (contiguous or not) can have a Min value. The Min value is always >= 0.
- ///
+ ///
/// Note that the representation value does not necessarily match the logical value.
/// For example, if a KeyType has range 1000-5000, then it has a Min of 1000, Count
/// of 4001, but the representational values are 1-4001. The representation value zero
@@ -951,7 +951,7 @@ public bool IsSubtypeOf(VectorType other)
if (other == null)
return false;
- // REVIEW: Perhaps we should allow the case when _itemType is
+ // REVIEW: Perhaps we should allow the case when _itemType is
// a sub-type of other._itemType (in particular for key types)
if (!_itemType.Equals(other._itemType))
return false;
diff --git a/src/Microsoft.ML.Core/Data/DataKind.cs b/src/Microsoft.ML.Core/Data/DataKind.cs
index 358227399b..32325f44a1 100644
--- a/src/Microsoft.ML.Core/Data/DataKind.cs
+++ b/src/Microsoft.ML.Core/Data/DataKind.cs
@@ -30,7 +30,7 @@ public enum DataKind : byte
Num = R4,
TX = 11,
-#pragma warning disable TLC_GeneralName // The data kind enum has its own logic, independnet of C# naming conventions.
+#pragma warning disable MSML_GeneralName // The data kind enum has its own logic, independnet of C# naming conventions.
TXT = TX,
Text = TX,
@@ -46,7 +46,7 @@ public enum DataKind : byte
UG = 16, // Unsigned 16-byte integer.
U16 = UG,
-#pragma warning restore TLC_GeneralName
+#pragma warning restore MSML_GeneralName
}
///
diff --git a/src/Microsoft.ML.Core/Data/DateTime.cs b/src/Microsoft.ML.Core/Data/DateTime.cs
index 52b30b5bb6..d11be2a494 100644
--- a/src/Microsoft.ML.Core/Data/DateTime.cs
+++ b/src/Microsoft.ML.Core/Data/DateTime.cs
@@ -230,7 +230,7 @@ public DvDateTimeZone(DvDateTime dt, DvTimeSpan offset)
/// are within the valid range, and returns a DvDateTime representing the UTC time (dateTime-offset).
///
/// The clock time
- /// The offset. This value is assumed to be validated as a legal offset:
+ /// The offset. This value is assumed to be validated as a legal offset:
/// a value in whole minutes, between -14 and 14 hours.
/// The UTC DvDateTime representing the input clock time minus the offset
private static DvDateTime ValidateDate(DvDateTime dateTime, ref DvInt2 offset)
diff --git a/src/Microsoft.ML.Core/Data/ICursor.cs b/src/Microsoft.ML.Core/Data/ICursor.cs
index 264eaa55bb..e1efc842f4 100644
--- a/src/Microsoft.ML.Core/Data/ICursor.cs
+++ b/src/Microsoft.ML.Core/Data/ICursor.cs
@@ -18,7 +18,7 @@ public interface ICounted
/// This is incremented for ICursor when the underlying contents changes, giving clients a way to detect change.
/// Generally it's -1 when the object is in an invalid state. In particular, for an , this is -1
/// when the is or .
- ///
+ ///
/// Note that this position is not position within the underlying data, but position of this cursor only.
/// If one, for example, opened a set of parallel streaming cursors, or a shuffled cursor, each such cursor's
/// first valid entry would always have position 0.
@@ -30,7 +30,7 @@ public interface ICounted
/// batch numbers should be non-decreasing. Furthermore, any given batch number should only appear in one
/// of the streams. Order is determined by batch number. The reconciler ensures that each stream (that is
/// still active) has at least one item available, then takes the item with the smallest batch number.
- ///
+ ///
/// Note that there is no suggestion that the batches for a particular entry will be consistent from
/// cursoring to cursoring, except for the consistency in resulting in the same overall ordering. The same
/// entry could have different batch numbers from one cursoring to another. There is also no requirement
@@ -45,7 +45,7 @@ public interface ICounted
/// will produce the same data as a serial cursor or any other shuffled cursor, only shuffled. The ID
/// exists for applications that need to reconcile which entry is actually which. Ideally this ID should
/// be unique, but for practical reasons, it suffices if collisions are simply extremely improbable.
- ///
+ ///
/// Note that this ID, while it must be consistent for multiple streams according to the semantics
/// above, is not considered part of the data per se. So, to take the example of a data view specifically,
/// a single data view must render consistent IDs across all cursorings, but there is no suggestion at
@@ -77,7 +77,7 @@ public interface ICursor : ICounted, IDisposable
/// Returns the state of the cursor. Before the first call to or
/// this should be . After
/// any call those move functions that returns true, this should return
- /// ,
+ /// ,
///
CursorState State { get; }
diff --git a/src/Microsoft.ML.Core/Data/IDataView.cs b/src/Microsoft.ML.Core/Data/IDataView.cs
index db83c15fd9..052a07dc9e 100644
--- a/src/Microsoft.ML.Core/Data/IDataView.cs
+++ b/src/Microsoft.ML.Core/Data/IDataView.cs
@@ -89,7 +89,7 @@ public interface IDataView : ISchematized
/// call. This indicates, that the transform does not YET know the number of rows, but
/// may in the future. If lazy is false, then this is permitted to do some work (no more
/// that it would normally do for cursoring) to determine the number of rows.
- ///
+ ///
/// Most components will return the same answer whether lazy is true or false. Some, like
/// a cache, might return null until the cache is fully populated (when lazy is true). When
/// lazy is false, such a cache would block until the cache was populated.
@@ -110,7 +110,7 @@ public interface IDataView : ISchematized
/// has no recommendation, and the implementation should have some default behavior to cover
/// this case. Note that this is strictly a recommendation: it is entirely possible that
/// an implementation can return a different number of cursors.
- ///
+ ///
/// The cursors should return the same data as returned through
/// , except partitioned: no two cursors
/// should return the "same" row as would have been returned through the regular serial cursor,
diff --git a/src/Microsoft.ML.Core/Data/IHostEnvironment.cs b/src/Microsoft.ML.Core/Data/IHostEnvironment.cs
index 7589ef13ad..b463e52a8e 100644
--- a/src/Microsoft.ML.Core/Data/IHostEnvironment.cs
+++ b/src/Microsoft.ML.Core/Data/IHostEnvironment.cs
@@ -62,7 +62,7 @@ public interface IHostEnvironment : IChannelProvider, IProgressChannelProvider
/// Note that IFileHandle derives from IDisposable. Clients may dispose the IFileHandle when it is
/// no longer needed, but they are not required to. The host environment should track all temp file
/// handles and ensure that they are disposed properly when the environment is "shut down".
- ///
+ ///
/// The suffix and prefix are optional. A common use for suffix is to specify an extension, eg, ".txt".
/// The use of suffix and prefix, including whether they have any affect, is up to the host enviroment.
///
diff --git a/src/Microsoft.ML.Core/Data/IMlState.cs b/src/Microsoft.ML.Core/Data/IMlState.cs
index 98c0e8e5aa..52b0828256 100644
--- a/src/Microsoft.ML.Core/Data/IMlState.cs
+++ b/src/Microsoft.ML.Core/Data/IMlState.cs
@@ -5,7 +5,7 @@
namespace Microsoft.ML.Runtime.EntryPoints
{
///
- /// Dummy interface to allow reference to the AutoMlState object in the C# API (since AutoMlState
+ /// Dummy interface to allow reference to the AutoMlState object in the C# API (since AutoMlState
/// has things that reference C# API, leading to circular dependency). Makes state object an opaque
/// black box to the graph. The macro itself will then case to the concrete type.
///
diff --git a/src/Microsoft.ML.Core/Data/IProgressChannel.cs b/src/Microsoft.ML.Core/Data/IProgressChannel.cs
index b5bae12c0b..0f673d9b2a 100644
--- a/src/Microsoft.ML.Core/Data/IProgressChannel.cs
+++ b/src/Microsoft.ML.Core/Data/IProgressChannel.cs
@@ -10,7 +10,7 @@ namespace Microsoft.ML.Runtime
/// This is a factory interface for .
/// Both and implement this interface,
/// to allow for nested progress reporters.
- ///
+ ///
/// REVIEW: make implement this, instead of the environment?
///
public interface IProgressChannelProvider
@@ -24,10 +24,10 @@ public interface IProgressChannelProvider
///
/// A common interface for progress reporting.
/// It is expected that the progress channel interface is used from only one thread.
- ///
+ ///
/// Supported workflow:
/// 1) Create the channel via .
- /// 2) Call as many times as desired (including 0).
+ /// 2) Call as many times as desired (including 0).
/// Each call to supersedes the previous one.
/// 3) Report checkpoints (0 or more) by calling .
/// 4) Repeat steps 2-3 as often as necessary.
@@ -39,13 +39,13 @@ public interface IProgressChannel : IProgressChannelProvider, IDisposable
/// Set up the reporting structure:
/// - Set the 'header' of the progress reports, defining which progress units and metrics are going to be reported.
/// - Provide a thread-safe delegate to be invoked whenever anyone needs to know the progress.
- ///
+ ///
/// It is acceptable to call multiple times (or none), regardless of whether the calculation is running
- /// or not. Because of synchronization, the computation should not deny calls to the 'old'
+ /// or not. Because of synchronization, the computation should not deny calls to the 'old'
/// delegates even after a new one is provided.
///
/// The header object.
- /// The delegate to provide actual progress. The parameter of
+ /// The delegate to provide actual progress. The parameter of
/// the delegate will correspond to the provided .
void SetHeader(ProgressHeader header, Action fillAction);
@@ -53,10 +53,10 @@ public interface IProgressChannel : IProgressChannelProvider, IDisposable
/// Submit a 'checkpoint' entry. These entries are guaranteed to be delivered to the progress listener,
/// if it is interested. Typically, this would contain some intermediate metrics, that are only calculated
/// at certain moments ('checkpoints') of the computation.
- ///
+ ///
/// For example, SDCA may report a checkpoint every time it computes the loss, or LBFGS may report a checkpoint
/// every iteration.
- ///
+ ///
/// The only parameter, , is interpreted in the following fashion:
/// * First MetricNames.Length items, if present, are metrics.
/// * Subsequent ProgressNames.Length items, if present, are progress units.
@@ -92,11 +92,11 @@ public sealed class ProgressHeader
/// progress or metrics to report, it is always better to report them.
///
/// The metrics that the calculation reports. These are completely independent, and there
- /// is no contract on whether the metric values should increase or not. As naming convention,
+ /// is no contract on whether the metric values should increase or not. As naming convention,
/// can have multiple words with spaces, and should be title-cased.
/// The names of the progress units, listed from least granular to most granular.
/// The idea is that the progress should be lexicographically increasing (like [0,0], [0,10], [1,0], [1,15], [2,5] etc.).
- /// As naming convention, should be lower-cased and typically plural
+ /// As naming convention, should be lower-cased and typically plural
/// (e.g. iterations, clusters, examples).
public ProgressHeader(string[] metricNames, string[] unitNames)
{
@@ -108,7 +108,7 @@ public ProgressHeader(string[] metricNames, string[] unitNames)
}
///
- /// A constructor for no metrics, just progress units. As naming convention, should be lower-cased
+ /// A constructor for no metrics, just progress units. As naming convention, should be lower-cased
/// and typically plural (e.g. iterations, clusters, examples).
///
public ProgressHeader(params string[] unitNames)
@@ -118,7 +118,7 @@ public ProgressHeader(params string[] unitNames)
}
///
- /// A metric/progress holder item.
+ /// A metric/progress holder item.
///
public interface IProgressEntry
{
@@ -130,7 +130,7 @@ public interface IProgressEntry
///
/// Set the progress value for the index to ,
- /// and the limit value to . If is a NAN, it is set to null instead.
+ /// and the limit value to . If is a NAN, it is set to null instead.
///
void SetProgress(int index, Double value, Double lim);
diff --git a/src/Microsoft.ML.Core/Data/ISchemaBindableMapper.cs b/src/Microsoft.ML.Core/Data/ISchemaBindableMapper.cs
index 466611c11a..6adac55f1b 100644
--- a/src/Microsoft.ML.Core/Data/ISchemaBindableMapper.cs
+++ b/src/Microsoft.ML.Core/Data/ISchemaBindableMapper.cs
@@ -9,15 +9,15 @@ namespace Microsoft.ML.Runtime.Data
{
///
/// A mapper that can be bound to a (which is an ISchema, with mappings from column kinds
- /// to columns). Binding an to a produces an
+ /// to columns). Binding an to a produces an
/// , which is an interface that has methods to return the names and indices of the input columns
/// needed by the mapper to compute its output. The is an extention to this interface, that
- /// can also produce an output IRow given an input IRow. The IRow produced generally contains only the output columns of the mapper, and not
+ /// can also produce an output IRow given an input IRow. The IRow produced generally contains only the output columns of the mapper, and not
/// the input columns (but there is nothing preventing an from mapping input columns directly to outputs).
- /// This interface is implemented by wrappers of IValueMapper based predictors, which are predictors that take a single
+ /// This interface is implemented by wrappers of IValueMapper based predictors, which are predictors that take a single
/// features column. New predictors can implement directly. Implementing
/// includes implementing a corresponding (or ) and a corresponding ISchema
- /// for the output schema of the . In case the interface is implemented,
+ /// for the output schema of the . In case the interface is implemented,
/// the SimpleRow class can be used in the method.
///
public interface ISchemaBindableMapper
@@ -54,7 +54,7 @@ public interface ISchemaBoundMapper
///
/// This interface extends with an additional method: . This method
- /// takes an input IRow and a predicate indicating which output columns are active, and returns a new IRow
+ /// takes an input IRow and a predicate indicating which output columns are active, and returns a new IRow
/// containing the output columns.
///
public interface ISchemaBoundRowMapper : ISchemaBoundMapper
@@ -67,11 +67,11 @@ public interface ISchemaBoundRowMapper : ISchemaBoundMapper
///
/// Get an IRow based on the input IRow with the indicated active columns. The active columns are those for which
- /// predicate(col) returns true. The schema of the returned IRow will be the same as the OutputSchema, but getting
+ /// predicate(col) returns true. The schema of the returned IRow will be the same as the OutputSchema, but getting
/// values on inactive columns will throw. Null predicates are disallowed.
/// The schema of input should match the InputSchema.
/// This method creates a live connection between the input IRow and the output IRow. In particular, when the
- /// getters of the output IRow are invoked, they invoke the getters of the input row and base the output values on
+ /// getters of the output IRow are invoked, they invoke the getters of the input row and base the output values on
/// the current values of the input IRow. The output IRow values are re-computed when requested through the getters.
/// The optional disposer is invoked by the cursor wrapping, when it no longer needs the IRow.
/// If no action is needed when the cursor is Disposed, the override should set disposer to null,
@@ -101,7 +101,7 @@ public interface IRowToRowMapper
/// predicate(col) returns true. Getting values on inactive columns will throw. Null predicates are disallowed.
/// The schema of input should match the InputSchema.
/// This method creates a live connection between the input IRow and the output IRow. In particular, when the
- /// getters of the output IRow are invoked, they invoke the getters of the input row and base the output values on
+ /// getters of the output IRow are invoked, they invoke the getters of the input row and base the output values on
/// the current values of the input IRow. The output IRow values are re-computed when requested through the getters.
/// The optional disposer is invoked by the cursor wrapping, when it no longer needs the IRow.
/// If no action is needed when the cursor is Disposed, the override should set disposer to null,
diff --git a/src/Microsoft.ML.Core/Data/ITrainerArguments.cs b/src/Microsoft.ML.Core/Data/ITrainerArguments.cs
index af74a9abfc..e4fdbbdc59 100644
--- a/src/Microsoft.ML.Core/Data/ITrainerArguments.cs
+++ b/src/Microsoft.ML.Core/Data/ITrainerArguments.cs
@@ -6,7 +6,7 @@ namespace Microsoft.ML.Runtime
{
// This is basically a no-op interface put in primarily
// for backward binary compat support for AFx.
- // REVIEW: This interface was removed in TLC 3.0 as part of the
+ // REVIEW: This interface was removed in TLC 3.0 as part of the
// deprecation of the *Factory interfaces, but added back as a temporary
// hack. Remove it asap.
public interface ITrainerArguments
diff --git a/src/Microsoft.ML.Core/Data/MetadataUtils.cs b/src/Microsoft.ML.Core/Data/MetadataUtils.cs
index b0a18f6d18..116d521756 100644
--- a/src/Microsoft.ML.Core/Data/MetadataUtils.cs
+++ b/src/Microsoft.ML.Core/Data/MetadataUtils.cs
@@ -74,9 +74,9 @@ public static class Kinds
///
/// Metadata kind that indicates the ranges within a column that are categorical features.
- /// The value is a vector type of ints with dimension of two. The first dimension
+ /// The value is a vector type of ints with dimension of two. The first dimension
/// represents the number of categorical features and second dimension represents the range
- /// and is of size two. The range has start and end index(both inclusive) of categorical
+ /// and is of size two. The range has start and end index(both inclusive) of categorical
/// slots within that column.
///
public const string CategoricalSlotRanges = "CategoricalSlotRanges";
@@ -156,7 +156,7 @@ public static VectorType GetNamesType(int size)
}
///
- /// Returns a vector type with item type int and the given size.
+ /// Returns a vector type with item type int and the given size.
/// The range count must be a positive integer.
/// This is a standard type for metadata consisting of multiple int values that represent
/// categorical slot ranges with in a column.
@@ -386,12 +386,12 @@ public static bool IsHidden(this ISchema schema, int col)
}
///
- /// The categoricalFeatures is a vector of the indices of categorical features slots.
+ /// The categoricalFeatures is a vector of the indices of categorical features slots.
/// This vector should always have an even number of elements, and the elements should be parsed in groups of two consecutive numbers.
/// So if its value is the range of numbers: 0,2,3,4,8,9
/// look at it as [0,2],[3,4],[8,9].
/// The way to interpret that is: feature with indices 0, 1, and 2 are one categorical
- /// Features with indices 3 and 4 are another categorical. Features 5 and 6 don't appear there, so they are not categoricals.
+ /// Features with indices 3 and 4 are another categorical. Features 5 and 6 don't appear there, so they are not categoricals.
///
public static bool TryGetCategoricalFeatureIndices(ISchema schema, int colIndex, out int[] categoricalFeatures)
{
diff --git a/src/Microsoft.ML.Core/Data/ProgressReporter.cs b/src/Microsoft.ML.Core/Data/ProgressReporter.cs
index 384e1bfb61..5f9575cca5 100644
--- a/src/Microsoft.ML.Core/Data/ProgressReporter.cs
+++ b/src/Microsoft.ML.Core/Data/ProgressReporter.cs
@@ -202,8 +202,8 @@ private ProgressEntry BuildJointEntry(ProgressEntry rootEntry)
///
/// This is a 'derived' or 'subordinate' progress channel.
- ///
- /// The subordinates' Start/Stop events and checkpoints will not be propagated.
+ ///
+ /// The subordinates' Start/Stop events and checkpoints will not be propagated.
/// When the status is requested, all of the subordinate channels are also invoked,
/// and the resulting metrics are then returned in the order of their 'subordinate level'.
/// If there's more than one channel with the same level, the order is not defined.
@@ -278,7 +278,7 @@ private void Stop()
public void Checkpoint(params Double?[] values)
{
// We are ignoring all checkpoints from subordinates.
- // REVIEW: maybe this could be changed in the future. Right now it seems that
+ // REVIEW: maybe this could be changed in the future. Right now it seems that
// this limitation is reasonable.
}
}
@@ -287,7 +287,7 @@ public void Checkpoint(params Double?[] values)
///
/// This class listens to the progress reporting channels, caches all checkpoints and
/// start/stop events and, on demand, requests current progress on all active calculations.
- ///
+ ///
/// The public methods of this class should only be called from one thread.
///
public sealed class ProgressTracker
@@ -303,7 +303,7 @@ public sealed class ProgressTracker
///
/// For each calculation, its properties.
/// This list is protected by , and it's updated every time a new calculation starts.
- /// The entries are cleaned up when the start and stop events are reported (that is, after the first
+ /// The entries are cleaned up when the start and stop events are reported (that is, after the first
/// pull request after the calculation's 'Stop' event).
///
private readonly List _infos;
@@ -319,8 +319,8 @@ public sealed class ProgressTracker
private readonly HashSet _namesUsed;
///
- /// This class is an 'event log' for one calculation.
- ///
+ /// This class is an 'event log' for one calculation.
+ ///
/// Every time a calculation is 'started', it gets its own log, so if there are multiple 'start' calls,
/// there will be multiple logs.
///
@@ -425,12 +425,12 @@ public void Log(ProgressChannel source, ProgressEvent.EventKind kind, ProgressEn
}
///
- /// Get progress reports from all current calculations.
+ /// Get progress reports from all current calculations.
/// For every calculation the following events will be returned:
/// * A start event.
/// * Each checkpoint.
- /// * If the calculation is finished, the stop event.
- ///
+ /// * If the calculation is finished, the stop event.
+ ///
/// Each of the above events will be returned exactly once.
/// If, for one calculation, there's no events in the above categories, the tracker will
/// request ('pull') the current progress and return this as an event.
@@ -490,14 +490,14 @@ public sealed class ProgressEntry : IProgressEntry
///
/// The actual progress (amount of completed units), in the units that are contained in the header.
/// Parallel to the header's . Null value indicates 'not applicable now'.
- ///
+ ///
/// The computation should not modify these arrays directly, and instead rely on ,
/// and .
///
public readonly Double?[] Progress;
///
- /// The lim values of each progress unit.
+ /// The lim values of each progress unit.
/// Parallel to the header's . Null value indicates unbounded or unknown.
///
public readonly Double?[] ProgressLim;
diff --git a/src/Microsoft.ML.Core/Data/RoleMappedSchema.cs b/src/Microsoft.ML.Core/Data/RoleMappedSchema.cs
index 2dab48fc58..2e35be86b7 100644
--- a/src/Microsoft.ML.Core/Data/RoleMappedSchema.cs
+++ b/src/Microsoft.ML.Core/Data/RoleMappedSchema.cs
@@ -88,10 +88,10 @@ public static ColumnInfo CreateFromIndex(ISchema schema, int index)
///
///
/// Note that instances of this class are, like instances of , immutable.
- ///
+ ///
/// It is often the case that one wishes to bundle the actual data with the role mappings, not just the schema. For
/// that case, please use the class.
- ///
+ ///
/// Note that there is no need for components consuming a or
/// to make use of every defined mapping. Consuming components are also expected to ignore any
/// they do not handle. They may very well however complain if a mapping they wanted to see is not present, or the column(s)
diff --git a/src/Microsoft.ML.Core/Data/RootCursorBase.cs b/src/Microsoft.ML.Core/Data/RootCursorBase.cs
index d5cc611e1d..1ac3858636 100644
--- a/src/Microsoft.ML.Core/Data/RootCursorBase.cs
+++ b/src/Microsoft.ML.Core/Data/RootCursorBase.cs
@@ -6,7 +6,7 @@
namespace Microsoft.ML.Runtime.Data
{
- // REVIEW: Since each cursor will create a channel, it would be great that the RootCursorBase takes
+ // REVIEW: Since each cursor will create a channel, it would be great that the RootCursorBase takes
// ownership of the channel so the derived classes don't have to.
///
diff --git a/src/Microsoft.ML.Core/Data/ServerChannel.cs b/src/Microsoft.ML.Core/Data/ServerChannel.cs
index 9c75c19937..5cde023e69 100644
--- a/src/Microsoft.ML.Core/Data/ServerChannel.cs
+++ b/src/Microsoft.ML.Core/Data/ServerChannel.cs
@@ -26,7 +26,7 @@ public sealed class ServerChannel : ServerChannel.IPendingBundleNotification, ID
private readonly string _identifier;
// This holds the running collection of named delegates, if any. The dictionary itself
- // is lazily initialized only when a listener
+ // is lazily initialized only when a listener
private Dictionary _toPublish;
private Action _onPublish;
private Bundle _published;
diff --git a/src/Microsoft.ML.Core/EntryPoints/EntryPointUtils.cs b/src/Microsoft.ML.Core/EntryPoints/EntryPointUtils.cs
index b94e25c9e3..ad07ec86a5 100644
--- a/src/Microsoft.ML.Core/EntryPoints/EntryPointUtils.cs
+++ b/src/Microsoft.ML.Core/EntryPoints/EntryPointUtils.cs
@@ -35,7 +35,7 @@ public static bool IsValueWithinRange(this TlcModule.RangeAttribute range, objec
Contracts.AssertValue(val);
Func fn = IsValueWithinRange;
// Avoid trying to cast double as float. If range
- // was specified using floats, but value being checked
+ // was specified using floats, but value being checked
// is double, change range to be of type double
if (range.Type == typeof(float) && val is double)
range.CastToDouble();
diff --git a/src/Microsoft.ML.Core/EntryPoints/ModuleArgs.cs b/src/Microsoft.ML.Core/EntryPoints/ModuleArgs.cs
index f991df73f0..8a4ab8ca43 100644
--- a/src/Microsoft.ML.Core/EntryPoints/ModuleArgs.cs
+++ b/src/Microsoft.ML.Core/EntryPoints/ModuleArgs.cs
@@ -15,7 +15,7 @@
namespace Microsoft.ML.Runtime.EntryPoints
{
///
- /// This class defines attributes to annotate module inputs, outputs, entry points etc. when defining
+ /// This class defines attributes to annotate module inputs, outputs, entry points etc. when defining
/// the module interface.
///
public static class TlcModule
@@ -124,7 +124,7 @@ public sealed class OutputAttribute : Attribute
public string Desc { get; set; }
///
- /// The rank order of the output. Because .NET reflection returns members in an unspecfied order, this
+ /// The rank order of the output. Because .NET reflection returns members in an unspecfied order, this
/// is the only way to ensure consistency.
///
public Double SortOrder { get; set; }
@@ -544,11 +544,11 @@ public enum DataKind
///
Unknown = 0,
///
- /// Integer, including long.
+ /// Integer, including long.
///
Int,
///
- /// Unsigned integer, including ulong.
+ /// Unsigned integer, including ulong.
///
UInt,
///
@@ -588,11 +588,11 @@ public enum DataKind
///
Enum,
///
- /// An array (0 or more values of the same type, accessible by index).
+ /// An array (0 or more values of the same type, accessible by index).
///
Array,
///
- /// A dictionary (0 or more values of the same type, identified by a unique string key).
+ /// A dictionary (0 or more values of the same type, identified by a unique string key).
/// The underlying C# representation is
///
Dictionary,
@@ -603,7 +603,7 @@ public enum DataKind
///
Component,
///
- /// An C# object that represents state, such as .
+ /// An C# object that represents state, such as .
///
State
}
@@ -682,8 +682,8 @@ protected Optional(bool isExplicit)
/// This is a 'maybe' class that is able to differentiate the cases when the value is set 'explicitly', or 'implicitly'.
/// The idea is that if the default value is specified by the user, in some cases it needs to be treated differently
/// than if it's auto-filled.
- ///
- /// An example is the weight column: the default behavior is to use 'Weight' column if it's present. But if the user explicitly sets
+ ///
+ /// An example is the weight column: the default behavior is to use 'Weight' column if it's present. But if the user explicitly sets
/// the weight column to be 'Weight', we need to actually enforce the presence of the column.
///
/// The type of the value
@@ -719,7 +719,7 @@ public static implicit operator T(Optional optional)
}
///
- /// The implicit conversion from .
+ /// The implicit conversion from .
/// This will assume that the parameter is set 'explicitly'.
///
public static implicit operator Optional(T value)
diff --git a/src/Microsoft.ML.Core/EntryPoints/ModuleCatalog.cs b/src/Microsoft.ML.Core/EntryPoints/ModuleCatalog.cs
index 93db75c169..60511bfd39 100644
--- a/src/Microsoft.ML.Core/EntryPoints/ModuleCatalog.cs
+++ b/src/Microsoft.ML.Core/EntryPoints/ModuleCatalog.cs
@@ -261,7 +261,7 @@ private bool ScanForComponents(IExceptionContext ectx, Type nestedType)
}
///
- /// The valid names for the components and entry points must consist of letters, digits, underscores and dots,
+ /// The valid names for the components and entry points must consist of letters, digits, underscores and dots,
/// and begin with a letter or digit.
///
private static readonly Regex _nameRegex = new Regex(@"^\w[_\.\w]*$", RegexOptions.Compiled);
diff --git a/src/Microsoft.ML.Core/Environment/HostEnvironmentBase.cs b/src/Microsoft.ML.Core/Environment/HostEnvironmentBase.cs
index b4d9337695..d4ff5ccd96 100644
--- a/src/Microsoft.ML.Core/Environment/HostEnvironmentBase.cs
+++ b/src/Microsoft.ML.Core/Environment/HostEnvironmentBase.cs
@@ -109,7 +109,7 @@ public interface IMessageDispatcher : IHostEnvironment
///
/// A basic host environment suited for many environments.
- /// This also supports modifying the concurrency factor, provides the ability to subscribe to pipes via the
+ /// This also supports modifying the concurrency factor, provides the ability to subscribe to pipes via the
/// AddListener/RemoveListener methods, and exposes the to
/// query progress.
///
@@ -315,7 +315,7 @@ protected sealed class Dispatcher : Dispatcher
/// This field is actually used as a , which holds the listener actions
/// for all listeners that are currently subscribed. The action itself is an immutable object, so every time
/// any listener subscribes or unsubscribes, the field is replaced with a modified version of the delegate.
- ///
+ ///
/// The field can be null, if no listener is currently subscribed.
///
private volatile Action _listenerAction;
@@ -488,10 +488,8 @@ protected virtual IProgressChannel StartProgressChannelCore(HostBase host, strin
///
protected virtual IFileHandle OpenInputFileCore(IHostEnvironment env, string path)
{
-#pragma warning disable TLC_NoThis // Do not use 'this' keyword for member access
this.AssertValue(env);
this.CheckNonWhiteSpace(path, nameof(path));
-#pragma warning restore TLC_NoThis // Do not use 'this' keyword for member access
if (Master != null)
return Master.OpenInputFileCore(env, path);
return new SimpleFileHandle(env, path, needsWrite: false, autoDelete: false);
@@ -511,10 +509,8 @@ public IFileHandle CreateOutputFile(string path)
///
protected virtual IFileHandle CreateOutputFileCore(IHostEnvironment env, string path)
{
-#pragma warning disable TLC_NoThis // Do not use 'this' keyword for member access
this.AssertValue(env);
this.CheckNonWhiteSpace(path, nameof(path));
-#pragma warning restore TLC_NoThis // Do not use 'this' keyword for member access
if (Master != null)
return Master.CreateOutputFileCore(env, path);
return new SimpleFileHandle(env, path, needsWrite: true, autoDelete: false);
@@ -532,9 +528,7 @@ public IFileHandle CreateTempFile(string suffix = null, string prefix = null)
///
protected IFileHandle CreateAndRegisterTempFile(IHostEnvironment env, string suffix = null, string prefix = null)
{
-#pragma warning disable TLC_NoThis // Do not use 'this' keyword for member access
this.AssertValue(env);
-#pragma warning restore TLC_NoThis // Do not use 'this' keyword for member access
if (Master != null)
return Master.CreateAndRegisterTempFile(env, suffix, prefix);
@@ -556,10 +550,8 @@ protected IFileHandle CreateAndRegisterTempFile(IHostEnvironment env, string suf
protected virtual IFileHandle CreateTempFileCore(IHostEnvironment env, string suffix = null, string prefix = null)
{
-#pragma warning disable TLC_NoThis // Do not use 'this' keyword for member access
this.CheckParam(!HasBadFileCharacters(suffix), nameof(suffix));
this.CheckParam(!HasBadFileCharacters(prefix), nameof(prefix));
-#pragma warning restore TLC_NoThis // Do not use 'this' keyword for member access
Guid guid = Guid.NewGuid();
string path = Path.GetFullPath(Path.Combine(Path.GetTempPath(), prefix + guid.ToString() + suffix));
diff --git a/src/Microsoft.ML.Core/Environment/TlcEnvironment.cs b/src/Microsoft.ML.Core/Environment/TlcEnvironment.cs
index ccf60dc28a..13781c5c11 100644
--- a/src/Microsoft.ML.Core/Environment/TlcEnvironment.cs
+++ b/src/Microsoft.ML.Core/Environment/TlcEnvironment.cs
@@ -225,7 +225,7 @@ public void GetAndPrintAllProgress(ProgressReporting.ProgressTracker progressTra
if (PrintDot())
{
- // We need to print an extended status line. At this point, every event should be
+ // We need to print an extended status line. At this point, every event should be
// a non-checkpoint progress event.
bool needPrepend = entries.Count > 1;
foreach (var ev in entries)
@@ -306,7 +306,7 @@ private void EnsureNewLine(bool isError = false)
return;
// If _err and _out is the same writer, we need to print new line as well.
- // If _out and _err writes to Console.Out and Console.Error respectively,
+ // If _out and _err writes to Console.Out and Console.Error respectively,
// in the general user scenario they ends up with writing to the same underlying stream,.
// so write a new line to the stream anyways.
if (isError && _err != _out && (_out != Console.Out || _err != Console.Error))
diff --git a/src/Microsoft.ML.Core/Prediction/ISweeper.cs b/src/Microsoft.ML.Core/Prediction/ISweeper.cs
index b3dd0dc3da..fe887e0ae2 100644
--- a/src/Microsoft.ML.Core/Prediction/ISweeper.cs
+++ b/src/Microsoft.ML.Core/Prediction/ISweeper.cs
@@ -210,8 +210,8 @@ public sealed class RunResult : IRunResult
private readonly bool _isMetricMaximizing;
///
- /// This switch changes the behavior of the CompareTo function, switching the greater than / less than
- /// behavior, depending on if it is set to True.
+ /// This switch changes the behavior of the CompareTo function, switching the greater than / less than
+ /// behavior, depending on if it is set to True.
///
public bool IsMetricMaximizing { get { return _isMetricMaximizing; } }
@@ -267,8 +267,8 @@ IComparable IRunResult.MetricValue
///
/// The metric class, used by smart sweeping algorithms.
- /// Ideally we would like to move towards the new IDataView/ISchematized, this is
- /// just a simple view instead, and it is decoupled from RunResult so we can move
+ /// Ideally we would like to move towards the new IDataView/ISchematized, this is
+ /// just a simple view instead, and it is decoupled from RunResult so we can move
/// in that direction in the future.
///
public sealed class RunMetric
diff --git a/src/Microsoft.ML.Core/Prediction/TrainContext.cs b/src/Microsoft.ML.Core/Prediction/TrainContext.cs
index 3464aa4bc9..be93ce68aa 100644
--- a/src/Microsoft.ML.Core/Prediction/TrainContext.cs
+++ b/src/Microsoft.ML.Core/Prediction/TrainContext.cs
@@ -33,7 +33,6 @@ public sealed class TrainContext
///
public IPredictor InitialPredictor { get; }
-
///
/// Constructor, given a training set and optional other arguments.
///
diff --git a/src/Microsoft.ML.Core/Utilities/BigArray.cs b/src/Microsoft.ML.Core/Utilities/BigArray.cs
index d6c6ef7b9b..ba2e67b0d9 100644
--- a/src/Microsoft.ML.Core/Utilities/BigArray.cs
+++ b/src/Microsoft.ML.Core/Utilities/BigArray.cs
@@ -7,14 +7,14 @@
namespace Microsoft.ML.Runtime.Internal.Utilities
{
///
- /// An array-like data structure that supports storing more than
- /// many entries, up to 0x7FEFFFFF00000L.
- /// The entries are indexed by 64-bit integers, and a single entry can be accessed by
+ /// An array-like data structure that supports storing more than
+ /// many entries, up to 0x7FEFFFFF00000L.
+ /// The entries are indexed by 64-bit integers, and a single entry can be accessed by
/// the indexer if no modifications to the entries is desired, or the
/// method. Efficient looping can be accomplished by calling the method.
- /// This data structure employs the "length and capacity" pattern. The logical length
+ /// This data structure employs the "length and capacity" pattern. The logical length
/// can be retrieved from the property, which can possibly be strictly less
- /// than the total capacity.
+ /// than the total capacity.
///
/// The type of entries.
public sealed class BigArray
@@ -38,8 +38,8 @@ public sealed class BigArray
// The 2-D jagged array containing the entries.
// Its total size is larger than or equal to _length, but
// less than Length + BlockSize.
- // Each one-dimension subarray has length equal to BlockSize,
- // except for the last one, which has a positive length
+ // Each one-dimension subarray has length equal to BlockSize,
+ // except for the last one, which has a positive length
// less than or equal to BlockSize.
private T[][] _entries;
@@ -53,13 +53,13 @@ public sealed class BigArray
public long Length { get { return _length; } }
///
- /// Gets or sets the entry at .
+ /// Gets or sets the entry at .
///
///
- /// This indexer is not efficient for looping. If looping access to entries is desired,
+ /// This indexer is not efficient for looping. If looping access to entries is desired,
/// use the method instead.
- /// Note that unlike a normal array, the value returned from this indexer getter cannot be modified
- /// (e.g., by ++ operator or passing into a method as a ref parameter). To modify an entry, use
+ /// Note that unlike a normal array, the value returned from this indexer getter cannot be modified
+ /// (e.g., by ++ operator or passing into a method as a ref parameter). To modify an entry, use
/// the method instead.
///
public T this[long index]
@@ -113,7 +113,7 @@ public BigArray(long size = 0)
public delegate void Visitor(long index, ref T item);
///
- /// Applies a method at a given .
+ /// Applies a method at a given .
///
public void ApplyAt(long index, Visitor manip)
{
@@ -190,16 +190,16 @@ public void FillRange(long min, long lim, T value)
}
///
- /// Resizes the array so that its logical length equals . This method
- /// is more efficient than initialize another array and copy the entries because it preserves
+ /// Resizes the array so that its logical length equals . This method
+ /// is more efficient than initialize another array and copy the entries because it preserves
/// existing blocks. The actual capacity of the array may become larger than .
/// If equals , then no operation is done.
/// If is less than , the array shrinks in size
/// so that both its length and its capacity equal .
/// If is larger than , the array capacity grows
- /// to the smallest integral multiple of that is larger than ,
- /// unless is less than , in which case the capacity
- /// grows to double its current capacity or , which ever is larger,
+ /// to the smallest integral multiple of that is larger than ,
+ /// unless is less than , in which case the capacity
+ /// grows to double its current capacity or , which ever is larger,
/// but up to .
///
public void Resize(long newLength)
@@ -304,7 +304,7 @@ public void TrimCapacity()
}
///
- /// Appends the first elements of to the end.
+ /// Appends the first elements of to the end.
/// This method is thread safe related to calls to (assuming those copy operations
/// are happening over ranges already added), but concurrent calls to
/// should not be attempted. Intended usage is that
@@ -373,10 +373,10 @@ public void AddRange(T[] src, int length)
}
///
- /// Copies the subarray starting from index of length
- /// to the destination array .
- /// Concurrent calls to this method is valid even with one single concurrent call
- /// to .
+ /// Copies the subarray starting from index of length
+ /// to the destination array .
+ /// Concurrent calls to this method is valid even with one single concurrent call
+ /// to .
///
public void CopyTo(long idx, T[] dst, int length)
{
diff --git a/src/Microsoft.ML.Core/Utilities/CharUtils.cs b/src/Microsoft.ML.Core/Utilities/CharUtils.cs
index e459452041..bf7ae4677e 100644
--- a/src/Microsoft.ML.Core/Utilities/CharUtils.cs
+++ b/src/Microsoft.ML.Core/Utilities/CharUtils.cs
@@ -13,8 +13,8 @@ namespace Microsoft.ML.Runtime.Internal.Utilities
public static class CharUtils
{
private const int CharsCount = 0x10000;
- private volatile static char[] _lowerInvariantChars;
- private volatile static char[] _upperInvariantChars;
+ private static volatile char[] _lowerInvariantChars;
+ private static volatile char[] _upperInvariantChars;
private static char[] EnsureLowerInvariant()
{
diff --git a/src/Microsoft.ML.Core/Utilities/HashArray.cs b/src/Microsoft.ML.Core/Utilities/HashArray.cs
index c76ceb9482..27f0ec9b5d 100644
--- a/src/Microsoft.ML.Core/Utilities/HashArray.cs
+++ b/src/Microsoft.ML.Core/Utilities/HashArray.cs
@@ -243,7 +243,7 @@ private static class HashHelpers
{
// Note: This HashHelpers class was adapted from the BCL code base.
- // This is the maximum prime smaller than Array.MaxArrayLength
+ // This is the maximum prime smaller than Array.MaxArrayLength
public const int MaxPrimeArrayLength = 0x7FEFFFFD;
// Table of prime numbers to use as hash table sizes.
@@ -271,7 +271,7 @@ public static int GetPrime(int min)
return min + 1;
}
- // Returns size of hashtable to grow to.
+ // Returns size of hashtable to grow to.
public static int ExpandPrime(int oldSize)
{
int newSize = 2 * oldSize;
diff --git a/src/Microsoft.ML.Core/Utilities/MathUtils.cs b/src/Microsoft.ML.Core/Utilities/MathUtils.cs
index 7fd0829708..fb68ee82d6 100644
--- a/src/Microsoft.ML.Core/Utilities/MathUtils.cs
+++ b/src/Microsoft.ML.Core/Utilities/MathUtils.cs
@@ -133,7 +133,7 @@ public static Float Min(Float[] a)
///
/// Finds the first index of the max element of the array.
- /// NaNs are ignored. If all the elements to consider are NaNs, -1 is
+ /// NaNs are ignored. If all the elements to consider are NaNs, -1 is
/// returned. The caller should distinguish in this case between two
/// possibilities:
/// 1) The number of the element to consider is zero.
@@ -147,8 +147,8 @@ public static int ArgMax(Float[] a)
}
///
- /// Finds the first index of the max element of the array.
- /// NaNs are ignored. If all the elements to consider are NaNs, -1 is
+ /// Finds the first index of the max element of the array.
+ /// NaNs are ignored. If all the elements to consider are NaNs, -1 is
/// returned. The caller should distinguish in this case between two
/// possibilities:
/// 1) The number of the element to consider is zero.
@@ -179,7 +179,7 @@ public static int ArgMax(Float[] a, int count)
///
/// Finds the first index of the minimum element of the array.
- /// NaNs are ignored. If all the elements to consider are NaNs, -1 is
+ /// NaNs are ignored. If all the elements to consider are NaNs, -1 is
/// returned. The caller should distinguish in this case between two
/// possibilities:
/// 1) The number of the element to consider is zero.
@@ -194,7 +194,7 @@ public static int ArgMin(Float[] a)
///
/// Finds the first index of the minimum element of the array.
- /// NaNs are ignored. If all the elements to consider are NaNs, -1 is
+ /// NaNs are ignored. If all the elements to consider are NaNs, -1 is
/// returned. The caller should distinguish in this case between two
/// possibilities:
/// 1) The number of the element to consider is zero.
@@ -331,9 +331,9 @@ public static bool AlmostEqual(Float a, Float b, Float maxRelErr, Float maxAbsEr
return (absDiff / maxAbs) <= maxRelErr;
}
- private readonly static int[] _possiblePrimeMod30 = new int[] { 1, 7, 11, 13, 17, 19, 23, 29 };
- private readonly static double _constantForLogGamma = 0.5 * Math.Log(2 * Math.PI);
- private readonly static double[] _coeffsForLogGamma = { 12.0, -360.0, 1260.0, -1680.0, 1188.0 };
+ private static readonly int[] _possiblePrimeMod30 = new int[] { 1, 7, 11, 13, 17, 19, 23, 29 };
+ private static readonly double _constantForLogGamma = 0.5 * Math.Log(2 * Math.PI);
+ private static readonly double[] _coeffsForLogGamma = { 12.0, -360.0, 1260.0, -1680.0, 1188.0 };
///
/// Returns the log of the gamma function, using the Stirling approximation
@@ -849,7 +849,7 @@ public static Float LnSum(IEnumerable terms)
}
///
- /// Math.Sin returns the input value for inputs with large magnitude. We return NaN instead, for consistency
+ /// Math.Sin returns the input value for inputs with large magnitude. We return NaN instead, for consistency
/// with Math.Sin(infinity).
///
public static double Sin(double a)
@@ -859,7 +859,7 @@ public static double Sin(double a)
}
///
- /// Math.Cos returns the input value for inputs with large magnitude. We return NaN instead, for consistency
+ /// Math.Cos returns the input value for inputs with large magnitude. We return NaN instead, for consistency
/// with Math.Cos(infinity).
///
public static double Cos(double a)
diff --git a/src/Microsoft.ML.Core/Utilities/MemUtils.cs b/src/Microsoft.ML.Core/Utilities/MemUtils.cs
index 736ae90892..1dba9205e9 100644
--- a/src/Microsoft.ML.Core/Utilities/MemUtils.cs
+++ b/src/Microsoft.ML.Core/Utilities/MemUtils.cs
@@ -10,7 +10,7 @@ public static class MemUtils
// .Net 4.6's Buffer.MemoryCopy.
// REVIEW: Remove once we're on a version of .NET which includes
// Buffer.MemoryCopy.
- public unsafe static void MemoryCopy(void* source, void* destination, long destinationSizeInBytes, long sourceBytesToCopy)
+ public static unsafe void MemoryCopy(void* source, void* destination, long destinationSizeInBytes, long sourceBytesToCopy)
{
// MemCpy has undefined behavior when handed overlapping source and
// destination buffers.
diff --git a/src/Microsoft.ML.Core/Utilities/MinWaiter.cs b/src/Microsoft.ML.Core/Utilities/MinWaiter.cs
index d29bfe23c1..8c44315ba6 100644
--- a/src/Microsoft.ML.Core/Utilities/MinWaiter.cs
+++ b/src/Microsoft.ML.Core/Utilities/MinWaiter.cs
@@ -12,7 +12,7 @@ namespace Microsoft.ML.Runtime.Internal.Utilities
/// entities of known count, where you want to iteratively provide critical sections
/// for each depending on which comes first, but you do not necessarily know what
/// constitutes "first" until all such entities tell you where they stand in line.
- ///
+ ///
/// The anticipated usage is that whatever entity is using the
/// to synchronize itself, will register itself using
/// so as to unblock any "lower" waiters as soon as it knows what value it needs to
@@ -65,7 +65,7 @@ public MinWaiter(int waiters)
/// point when we actually want to wait. This method itself has the potential to
/// signal other events, if by registering ourselves the waiter becomes aware of
/// the maximum number of waiters, allowing that waiter to enter its critical state.
- ///
+ ///
/// If multiple events are associated with the minimum value, then only one will
/// be signaled, and the rest will remain unsignaled. Which is chosen is undefined.
///
@@ -75,7 +75,7 @@ public ManualResetEventSlim Register(long position)
lock (_waiters)
{
Contracts.Check(_maxWaiters > 0, "All waiters have been retired, Wait should not be called at this point");
- // We should never reach the state
+ // We should never reach the state
Contracts.Assert(_waiters.Count < _maxWaiters);
ev = new WaitStats(position);
// REVIEW: Optimize the case where this is the minimum?
diff --git a/src/Microsoft.ML.Core/Utilities/PathUtils.cs b/src/Microsoft.ML.Core/Utilities/PathUtils.cs
index 74ccec30c0..6698c11f7f 100644
--- a/src/Microsoft.ML.Core/Utilities/PathUtils.cs
+++ b/src/Microsoft.ML.Core/Utilities/PathUtils.cs
@@ -36,19 +36,19 @@ private static string DllDir
/// Attempts to find a file that is expected to be distributed with a TLC component. Searches
/// in the following order:
/// 1. In the customSearchDir directory, if it is provided.
- /// 2. In the custom search directory specified by the
+ /// 2. In the custom search directory specified by the
/// environment variable.
/// 3. In the root folder of the provided assembly.
/// 4. In the folder of this assembly.
/// In each case it searches the file in the directory provided and combined with folderPrefix.
- ///
+ ///
/// If any of these locations contain the file, a full local path will be returned, otherwise this
/// method will return null.
///
/// File name to find
/// folder prefix, relative to the current or customSearchDir
///
- /// Custom directory to search for resources.
+ /// Custom directory to search for resources.
/// If null, the path specified in the environment variable
/// will be used.
///
diff --git a/src/Microsoft.ML.Core/Utilities/ReservoirSampler.cs b/src/Microsoft.ML.Core/Utilities/ReservoirSampler.cs
index b8006bd943..69b57fea45 100644
--- a/src/Microsoft.ML.Core/Utilities/ReservoirSampler.cs
+++ b/src/Microsoft.ML.Core/Utilities/ReservoirSampler.cs
@@ -9,8 +9,8 @@
namespace Microsoft.ML.Runtime.Internal.Utilities
{
///
- /// This is an interface for creating samples of a requested size from a stream of data of type .
- /// The sample is created in one pass by calling for every data point in the stream. Implementations should have
+ /// This is an interface for creating samples of a requested size from a stream of data of type .
+ /// The sample is created in one pass by calling for every data point in the stream. Implementations should have
/// a delegate for getting the next data point, which is invoked if the current data point should go into the reservoir.
///
public interface IReservoirSampler
@@ -44,8 +44,8 @@ public interface IReservoirSampler
}
///
- /// This class produces a sample without replacement from a stream of data of type .
- /// It is instantiated with a delegate that gets the next data point, and builds a reservoir in one pass by calling
+ /// This class produces a sample without replacement from a stream of data of type .
+ /// It is instantiated with a delegate that gets the next data point, and builds a reservoir in one pass by calling
/// for every data point in the stream. In case the next data point does not get 'picked' into the reservoir, the delegate is not invoked.
/// Sampling is done according to the algorithm in this paper: http://epubs.siam.org/doi/pdf/10.1137/1.9781611972740.53.
///
@@ -117,8 +117,8 @@ public IEnumerable GetSample()
}
///
- /// This class produces a sample with replacement from a stream of data of type .
- /// It is instantiated with a delegate that gets the next data point, and builds a reservoir in one pass by calling
+ /// This class produces a sample with replacement from a stream of data of type .
+ /// It is instantiated with a delegate that gets the next data point, and builds a reservoir in one pass by calling
/// for every data point in the stream. In case the next data point does not get 'picked' into the reservoir, the delegate is not invoked.
/// Sampling is done according to the algorithm in this paper: http://epubs.siam.org/doi/pdf/10.1137/1.9781611972740.53.
///
@@ -237,7 +237,7 @@ public void Lock()
}
///
- /// Gets a reservoir sample with replacement of the elements sampled so far. Users should not change the
+ /// Gets a reservoir sample with replacement of the elements sampled so far. Users should not change the
/// elements returned since multiple elements in the reservoir might be pointing to the same memory.
///
public IEnumerable GetSample()
diff --git a/src/Microsoft.ML.Core/Utilities/ResourceManagerUtils.cs b/src/Microsoft.ML.Core/Utilities/ResourceManagerUtils.cs
index ccb4b0c90c..2cfa8c185a 100644
--- a/src/Microsoft.ML.Core/Utilities/ResourceManagerUtils.cs
+++ b/src/Microsoft.ML.Core/Utilities/ResourceManagerUtils.cs
@@ -18,7 +18,7 @@ namespace Microsoft.ML.Runtime.Internal.Utilities
///
public sealed class ResourceManagerUtils
{
- private volatile static ResourceManagerUtils _instance;
+ private static volatile ResourceManagerUtils _instance;
public static ResourceManagerUtils Instance
{
get
@@ -91,7 +91,7 @@ public static string GetUrl(string suffix)
/// The relative url from which to download.
/// This is appended to the url defined in .
/// The name of the file to save.
- /// The directory where the file should be saved to. The file will be saved in a directory with the specified name inside
+ /// The directory where the file should be saved to. The file will be saved in a directory with the specified name inside
/// a folder called "tlc-resources" in the directory.
/// An integer indicating the number of milliseconds to wait before timing out while downloading a resource.
/// The download results, containing the file path where the resources was (or should have been) downloaded to, and an error message
diff --git a/src/Microsoft.ML.Core/Utilities/Stream.cs b/src/Microsoft.ML.Core/Utilities/Stream.cs
index 8b22e46380..41c794e17f 100644
--- a/src/Microsoft.ML.Core/Utilities/Stream.cs
+++ b/src/Microsoft.ML.Core/Utilities/Stream.cs
@@ -979,7 +979,7 @@ public static BitArray ReadBitArray(this BinaryReader reader)
return returnArray;
}
- public unsafe static void ReadBytes(this BinaryReader reader, void* destination, long destinationSizeInBytes, long bytesToRead, ref byte[] work)
+ public static unsafe void ReadBytes(this BinaryReader reader, void* destination, long destinationSizeInBytes, long bytesToRead, ref byte[] work)
{
Contracts.AssertValue(reader);
Contracts.Assert(bytesToRead >= 0);
@@ -1007,7 +1007,7 @@ public unsafe static void ReadBytes(this BinaryReader reader, void* destination,
}
}
- public unsafe static void ReadBytes(this BinaryReader reader, void* destination, long destinationSizeInBytes, long bytesToRead)
+ public static unsafe void ReadBytes(this BinaryReader reader, void* destination, long destinationSizeInBytes, long bytesToRead)
{
byte[] work = null;
ReadBytes(reader, destination, destinationSizeInBytes, bytesToRead, ref work);
@@ -1097,10 +1097,10 @@ public static bool TryGetBuffer(this MemoryStream mem, out ArraySegment bu
// REVIEW: need to plumb IExceptionContext into the method.
///
/// Checks that the directory of the file name passed in already exists.
- /// This is meant to be called before calling an API that creates the file,
+ /// This is meant to be called before calling an API that creates the file,
/// so the file need not exist.
///
- /// An absolute or relative file path, or null to skip the check
+ /// An absolute or relative file path, or null to skip the check
/// (useful for optional user parameters)
/// The user level parameter name, as exposed by the command line help
public static void CheckOptionalUserDirectory(string file, string userArgument)
@@ -1113,7 +1113,7 @@ public static void CheckOptionalUserDirectory(string file, string userArgument)
return;
string dir;
-#pragma warning disable TLC_ContractsNameUsesNameof
+#pragma warning disable MSML_ContractsNameUsesNameof
try
{
// Relative paths are interpreted as local.
@@ -1134,6 +1134,6 @@ public static void CheckOptionalUserDirectory(string file, string userArgument)
if (!Directory.Exists(dir))
throw Contracts.ExceptUserArg(userArgument, "Cannot find directory '{0}'.", dir);
}
-#pragma warning restore TLC_ContractsNameUsesNameof
+#pragma warning restore MSML_ContractsNameUsesNameof
}
}
\ No newline at end of file
diff --git a/src/Microsoft.ML.Core/Utilities/SupervisedBinFinder.cs b/src/Microsoft.ML.Core/Utilities/SupervisedBinFinder.cs
index 00dbf68d2c..a96e8df5c2 100644
--- a/src/Microsoft.ML.Core/Utilities/SupervisedBinFinder.cs
+++ b/src/Microsoft.ML.Core/Utilities/SupervisedBinFinder.cs
@@ -11,12 +11,12 @@
namespace Microsoft.ML.Runtime.Internal.Utilities
{
///
- /// This class performs discretization of (value, label) pairs into bins in a way that minimizes
+ /// This class performs discretization of (value, label) pairs into bins in a way that minimizes
/// the target function "minimum description length".
/// The algorithm is outlineed in an article
/// "Multi-Interval Discretization of Continuous-Valued Attributes for Classification Learning"
/// [Fayyad, Usama M.; Irani, Keki B. (1993)] http://ijcai.org/Past%20Proceedings/IJCAI-93-VOL2/PDF/022.pdf
- ///
+ ///
/// The class can be used several times sequentially, it is stateful and not thread-safe.
/// Both Single and Double precision processing is implemented, and is identical.
///
@@ -117,7 +117,7 @@ public Single[] FindBins(int maxBins, int minBinSize, int nLabels, IList
result[i] = BinFinderBase.GetSplitValue(distinctValues[split - 1], distinctValues[split]);
// Even though distinctValues may contain infinities, the boundaries may not be infinite:
- // GetSplitValue(a,b) only returns +-inf if a==b==+-inf,
+ // GetSplitValue(a,b) only returns +-inf if a==b==+-inf,
// and distinctValues won't contain more than one +inf or -inf.
Contracts.Assert(FloatUtils.IsFinite(result[i]));
}
@@ -195,7 +195,7 @@ public Double[] FindBins(int maxBins, int minBinSize, int nLabels, IList
result[i] = BinFinderBase.GetSplitValue(distinctValues[split - 1], distinctValues[split]);
// Even though distinctValues may contain infinities, the boundaries may not be infinite:
- // GetSplitValue(a,b) only returns +-inf if a==b==+-inf,
+ // GetSplitValue(a,b) only returns +-inf if a==b==+-inf,
// and distinctValues won't contain more than one +inf or -inf.
Contracts.Assert(FloatUtils.IsFinite(result[i]));
}
@@ -259,7 +259,7 @@ public SplitInterval(SupervisedBinFinder binFinder, int min, int lim, bool skipS
Contracts.Assert(leftCount + rightCount == totalCount);
// This term corresponds to the 'fixed cost associated with a split'
- // It's a simplification of a Delta(A,T;S) term calculated in the paper
+ // It's a simplification of a Delta(A,T;S) term calculated in the paper
var delta = logN - binFinder._labelCardinality * (totalEntropy - leftEntropy - rightEntropy);
var curGain = totalCount * totalEntropy // total cost of transmitting non-split content
diff --git a/src/Microsoft.ML.Core/Utilities/ThreadUtils.cs b/src/Microsoft.ML.Core/Utilities/ThreadUtils.cs
index e7bc27235f..859ae7b28d 100644
--- a/src/Microsoft.ML.Core/Utilities/ThreadUtils.cs
+++ b/src/Microsoft.ML.Core/Utilities/ThreadUtils.cs
@@ -64,7 +64,7 @@ public sealed class ExceptionMarshaller : IDisposable
private readonly CancellationTokenSource _ctSource;
private readonly object _lock;
- // The stored exception
+ // The stored exception
private string _component;
private Exception _ex;
diff --git a/src/Microsoft.ML.Core/Utilities/Tree.cs b/src/Microsoft.ML.Core/Utilities/Tree.cs
index 880afc4083..7d030cf46c 100644
--- a/src/Microsoft.ML.Core/Utilities/Tree.cs
+++ b/src/Microsoft.ML.Core/Utilities/Tree.cs
@@ -53,7 +53,7 @@ public Tree this[TKey key]
///
/// This is the key for this child node in its parent, if any. If this is not
- /// a child of any parent, that is, it is the root of its own tree, then
+ /// a child of any parent, that is, it is the root of its own tree, then
///
public TKey Key { get { return _key; } }
@@ -129,7 +129,7 @@ public void Add(KeyValuePair> item)
}
///
- /// Adds a node as a child of this node. This will disconnect the
+ /// Adds a node as a child of this node. This will disconnect the
///
///
///
diff --git a/src/Microsoft.ML.Core/Utilities/Utils.cs b/src/Microsoft.ML.Core/Utilities/Utils.cs
index 48993de785..96c23a0fe3 100644
--- a/src/Microsoft.ML.Core/Utilities/Utils.cs
+++ b/src/Microsoft.ML.Core/Utilities/Utils.cs
@@ -898,7 +898,7 @@ private static MethodInfo MarshalInvokeCheckAndCreate(Type genArg, Delegat
/// but whose code depends on some sort of generic type parameter. This utility method exists to make
/// this common pattern more convenient, and also safer so that the arguments, if any, can be type
/// checked at compile time instead of at runtime.
- ///
+ ///
/// Because it is strongly typed, this can only be applied to methods whose return type
/// is known at compile time, that is, that do not depend on the type parameter of the method itself.
///
diff --git a/src/Microsoft.ML.Core/Utilities/VBufferUtils.cs b/src/Microsoft.ML.Core/Utilities/VBufferUtils.cs
index 12d5bfcccb..1be9c77ee4 100644
--- a/src/Microsoft.ML.Core/Utilities/VBufferUtils.cs
+++ b/src/Microsoft.ML.Core/Utilities/VBufferUtils.cs
@@ -349,7 +349,7 @@ public static void Apply(ref VBuffer dst, SlotValueManipulator manip)
/// The vector to modify
/// The slot of the vector to modify
/// The manipulation function
- /// A predicate that returns true if we should skip insertion of a value into
+ /// A predicate that returns true if we should skip insertion of a value into
/// sparse vector if it was default. If the predicate is null, we insert any non-default.
public static void ApplyAt(ref VBuffer dst, int slot, SlotValueManipulator manip, ValuePredicate pred = null)
{
@@ -489,7 +489,7 @@ public static void DensifyFirst(ref VBuffer dst, int denseCount)
}
///
- /// Creates a maybe sparse copy of a VBuffer.
+ /// Creates a maybe sparse copy of a VBuffer.
/// Whether the created copy is sparse or not is determined by the proportion of non-default entries compared to the sparsity parameter.
///
public static void CreateMaybeSparseCopy(ref VBuffer src, ref VBuffer dst, RefPredicate isDefaultPredicate, float sparsityThreshold = SparsityThreshold)
@@ -580,9 +580,9 @@ public static void ApplyWith(ref VBuffer src, ref VBuffer
/// Applies the to each pair of elements
- /// where is defined, in order of index. It stores the result
- /// in another vector. If there is some value at an index in
- /// that is not defined in , that slot value is copied to the
+ /// where is defined, in order of index. It stores the result
+ /// in another vector. If there is some value at an index in
+ /// that is not defined in , that slot value is copied to the
/// corresponding slot in the result vector without any further modification.
/// If either of the vectors are dense, the resulting
/// will be dense. Otherwise, if both are sparse, the output will be sparse iff
@@ -616,7 +616,7 @@ public static void ApplyWithEitherDefined(ref VBuffer src, ref
///
/// Applies the to each pair of elements
/// where either or , has an element
- /// defined at that index. It stores the result in another vector .
+ /// defined at that index. It stores the result in another vector .
/// If either of the vectors are dense, the resulting
/// will be dense. Otherwise, if both are sparse, the output will be sparse iff
/// there is any slot that is not explicitly represented in either vector.
diff --git a/src/Microsoft.ML.CpuMath/AlignedArray.cs b/src/Microsoft.ML.CpuMath/AlignedArray.cs
index 1dc8e3ee46..87583a8ef6 100644
--- a/src/Microsoft.ML.CpuMath/AlignedArray.cs
+++ b/src/Microsoft.ML.CpuMath/AlignedArray.cs
@@ -13,7 +13,7 @@ namespace Microsoft.ML.Runtime.Internal.CpuMath
/// To pin and force alignment, call the GetPin method, typically wrapped in a using (since it
/// returns a Pin struct that is IDisposable). From the pin, you can get the IntPtr to pass to
/// native code.
- ///
+ ///
/// The ctor takes an alignment value, which must be a power of two at least sizeof(Float).
///
public sealed class AlignedArray
diff --git a/src/Microsoft.ML.CpuMath/AlignedMatrix.cs b/src/Microsoft.ML.CpuMath/AlignedMatrix.cs
index 5ec9b53cca..67f05ee7cf 100644
--- a/src/Microsoft.ML.CpuMath/AlignedMatrix.cs
+++ b/src/Microsoft.ML.CpuMath/AlignedMatrix.cs
@@ -80,7 +80,7 @@ private void AssertValid()
}
///
- /// The physical AligenedArray items.
+ /// The physical AligenedArray items.
///
public AlignedArray Items { get { return _items; } }
@@ -155,7 +155,7 @@ public void CopyTo(Float[] dst, ref int ivDst)
}
///
- /// Copy the values from this vector starting at slot ivSrc into dst, starting at slot ivDst.
+ /// Copy the values from this vector starting at slot ivSrc into dst, starting at slot ivDst.
/// The number of values that are copied is determined by count.
///
/// The staring index in this vector
@@ -525,7 +525,7 @@ public CpuAlignedMatrixRow(int crow, int ccol, int cbAlign)
public override int ColCountPhy { get { return RunLenPhy; } }
///
- /// Copy the values from this matrix, starting from the row into dst, starting at slot ivDst and advancing ivDst.
+ /// Copy the values from this matrix, starting from the row into dst, starting at slot ivDst and advancing ivDst.
///
/// The starting row in this matrix
/// The destination array
@@ -606,7 +606,7 @@ public void CopyTo(Float[] dst, ref int ivDst)
}
///
- /// Copy the values from this matrix, starting from the row into dst, starting at slot ivDst and advancing ivDst.
+ /// Copy the values from this matrix, starting from the row into dst, starting at slot ivDst and advancing ivDst.
///
/// The starting row in this matrix
/// The destination array
diff --git a/src/Microsoft.ML.CpuMath/Avx.cs b/src/Microsoft.ML.CpuMath/Avx.cs
index 68e751c86b..6dcf898b6f 100644
--- a/src/Microsoft.ML.CpuMath/Avx.cs
+++ b/src/Microsoft.ML.CpuMath/Avx.cs
@@ -7,7 +7,7 @@
namespace Microsoft.ML.Runtime.Internal.CpuMath
{
///
- /// Keep Avx.cs in sync with Sse.cs. When making changes to one, use BeyondCompare or a similar tool
+ /// Keep Avx.cs in sync with Sse.cs. When making changes to one, use BeyondCompare or a similar tool
/// to view diffs and propagate appropriate changes to the other.
///
public static class AvxUtils
@@ -21,7 +21,7 @@ private static bool Compat(AlignedArray a)
return a.CbAlign == CbAlign;
}
- private unsafe static float* Ptr(AlignedArray a, float* p)
+ private static unsafe float* Ptr(AlignedArray a, float* p)
{
Contracts.AssertValue(a);
float* q = p + a.GetBase((long)p);
diff --git a/src/Microsoft.ML.CpuMath/CpuAligenedMathUtils.cs b/src/Microsoft.ML.CpuMath/CpuAligenedMathUtils.cs
index 363c40007b..ad53810ff3 100644
--- a/src/Microsoft.ML.CpuMath/CpuAligenedMathUtils.cs
+++ b/src/Microsoft.ML.CpuMath/CpuAligenedMathUtils.cs
@@ -115,7 +115,7 @@ public static void MatTranTimesSrc(bool add, ICpuFullMatrix mat, ICpuVector src,
public static class GeneralUtils
{
///
- /// Count the number of zero bits in the lonest string of zero's from the lowest significant bit of the input integer.
+ /// Count the number of zero bits in the lonest string of zero's from the lowest significant bit of the input integer.
///
/// The input integer
///
diff --git a/src/Microsoft.ML.CpuMath/ICpuBuffer.cs b/src/Microsoft.ML.CpuMath/ICpuBuffer.cs
index e58a453f9f..ad55f5c8c6 100644
--- a/src/Microsoft.ML.CpuMath/ICpuBuffer.cs
+++ b/src/Microsoft.ML.CpuMath/ICpuBuffer.cs
@@ -77,8 +77,8 @@ public interface ICpuFullMatrix : ICpuMatrix
///
/// Zero out the items with the given indices.
- /// The indices contain the logical indices to the vectorized representation of the matrix,
- /// which can be different depending on whether the matrix is row-major or column-major.
+ /// The indices contain the logical indices to the vectorized representation of the matrix,
+ /// which can be different depending on whether the matrix is row-major or column-major.
///
void ZeroItems(int[] indices);
}
diff --git a/src/Microsoft.ML.CpuMath/IntUtils.cs b/src/Microsoft.ML.CpuMath/IntUtils.cs
index b0aed315c3..2492dddaff 100644
--- a/src/Microsoft.ML.CpuMath/IntUtils.cs
+++ b/src/Microsoft.ML.CpuMath/IntUtils.cs
@@ -84,7 +84,7 @@ private static ulong Div64(ulong lo, ulong hi, ulong den, out ulong rem)
return Div64Core(lo, hi, den, out rem);
}
- // REVIEW: on Linux, the hardware divide-by-zero exception is not translated into
+ // REVIEW: on Linux, the hardware divide-by-zero exception is not translated into
// a managed exception properly by CoreCLR so the process will crash. This is a temporary fix
// until CoreCLR addresses this issue.
[DllImport(Thunk.NativePath, CharSet = CharSet.Unicode, EntryPoint = "Div64"), SuppressUnmanagedCodeSecurity]
diff --git a/src/Microsoft.ML.CpuMath/Sse.cs b/src/Microsoft.ML.CpuMath/Sse.cs
index 77be547b69..68e6ee906b 100644
--- a/src/Microsoft.ML.CpuMath/Sse.cs
+++ b/src/Microsoft.ML.CpuMath/Sse.cs
@@ -7,7 +7,7 @@
namespace Microsoft.ML.Runtime.Internal.CpuMath
{
///
- /// Keep Sse.cs in sync with Avx.cs. When making changes to one, use BeyondCompare or a similar tool
+ /// Keep Sse.cs in sync with Avx.cs. When making changes to one, use BeyondCompare or a similar tool
/// to view diffs and propagate appropriate changes to the other.
///
public static class SseUtils
@@ -21,7 +21,7 @@ private static bool Compat(AlignedArray a)
return a.CbAlign == CbAlign;
}
- private unsafe static float* Ptr(AlignedArray a, float* p)
+ private static unsafe float* Ptr(AlignedArray a, float* p)
{
Contracts.AssertValue(a);
float* q = p + a.GetBase((long)p);
diff --git a/src/Microsoft.ML.CpuMath/Thunk.cs b/src/Microsoft.ML.CpuMath/Thunk.cs
index bc23963bbe..d7082c8313 100644
--- a/src/Microsoft.ML.CpuMath/Thunk.cs
+++ b/src/Microsoft.ML.CpuMath/Thunk.cs
@@ -9,7 +9,7 @@
namespace Microsoft.ML.Runtime.Internal.CpuMath
{
- internal unsafe static class Thunk
+ internal static unsafe class Thunk
{
internal const string NativePath = "CpuMathNative";
diff --git a/src/Microsoft.ML.Data/Commands/EvaluateCommand.cs b/src/Microsoft.ML.Data/Commands/EvaluateCommand.cs
index cd2eb464af..77bdf0e32f 100644
--- a/src/Microsoft.ML.Data/Commands/EvaluateCommand.cs
+++ b/src/Microsoft.ML.Data/Commands/EvaluateCommand.cs
@@ -19,7 +19,7 @@
namespace Microsoft.ML.Runtime.Data
{
- // REVIEW: For simplicity (since this is currently the case),
+ // REVIEW: For simplicity (since this is currently the case),
// we assume that all metrics are either numeric, or numeric vectors.
///
/// This class contains information about an overall metric, namely its name and whether it is a vector
@@ -92,7 +92,7 @@ public string GetNameMatch(string input)
public interface IEvaluator
{
///
- /// Compute the aggregate metrics. Return a dictionary from the metric kind
+ /// Compute the aggregate metrics. Return a dictionary from the metric kind
/// (overal/per-fold/confusion matrix/PR-curves etc.), to a data view containing the metric.
///
Dictionary Evaluate(RoleMappedData data);
diff --git a/src/Microsoft.ML.Data/Commands/ScoreCommand.cs b/src/Microsoft.ML.Data/Commands/ScoreCommand.cs
index c353e4a4ec..607bf119d7 100644
--- a/src/Microsoft.ML.Data/Commands/ScoreCommand.cs
+++ b/src/Microsoft.ML.Data/Commands/ScoreCommand.cs
@@ -291,9 +291,9 @@ public static SubComponent GetScorerC
///
/// Given a predictor and an optional scorer SubComponent, produces a compatible ISchemaBindableMapper.
/// First, it tries to instantiate the bindable mapper using the
- /// (this will only succeed if there's a registered BindableMapper creation method with load name equal to the one
+ /// (this will only succeed if there's a registered BindableMapper creation method with load name equal to the one
/// of the scorer).
- /// If the above fails, it checks whether the predictor implements
+ /// If the above fails, it checks whether the predictor implements
/// directly.
/// If this also isn't true, it will create a 'matching' standard mapper.
///
diff --git a/src/Microsoft.ML.Data/Commands/TrainCommand.cs b/src/Microsoft.ML.Data/Commands/TrainCommand.cs
index 1c25275c3e..69370ad3ef 100644
--- a/src/Microsoft.ML.Data/Commands/TrainCommand.cs
+++ b/src/Microsoft.ML.Data/Commands/TrainCommand.cs
@@ -222,9 +222,9 @@ public static string MatchNameOrDefaultOrNull(IExceptionContext ectx, ISchema sc
return userName;
if (userName == defaultName)
return null;
-#pragma warning disable TLC_ContractsNameUsesNameof
+#pragma warning disable MSML_ContractsNameUsesNameof
throw ectx.ExceptUserArg(argName, $"Could not find column '{userName}'");
-#pragma warning restore TLC_ContractsNameUsesNameof
+#pragma warning restore MSML_ContractsNameUsesNameof
}
public static IPredictor Train(IHostEnvironment env, IChannel ch, RoleMappedData data, ITrainer trainer, string name,
@@ -291,7 +291,7 @@ public static bool TryLoadPredictor(IChannel ch, IHostEnvironment env, string in
///
/// Save the model to the output path.
- /// The method saves the loader and the transformations of dataPipe and saves optionally predictor
+ /// The method saves the loader and the transformations of dataPipe and saves optionally predictor
/// and command. It also uses featureColumn, if provided, to extract feature names.
///
/// The host environment to use.
@@ -316,7 +316,7 @@ public static void SaveModel(IHostEnvironment env, IChannel ch, IFileHandle outp
///
/// Save the model to the stream.
- /// The method saves the loader and the transformations of dataPipe and saves optionally predictor
+ /// The method saves the loader and the transformations of dataPipe and saves optionally predictor
/// and command. It also uses featureColumn, if provided, to extract feature names.
///
/// The host environment to use.
@@ -400,7 +400,7 @@ public static void SaveDataPipe(IHostEnvironment env, RepositoryWriter repositor
///
/// Traces back the .Source chain of the transformation pipe up to the moment it no longer can.
- /// Returns all the transforms of and the first data view (a non-transform).
+ /// Returns all the transforms of and the first data view (a non-transform).
///
/// The transformation pipe to traverse.
/// The beginning data view of the transform chain
@@ -413,7 +413,7 @@ private static List BacktrackPipe(IDataView dataPipe, out IDataV
while (dataPipe is IDataTransform xf)
{
// REVIEW: a malicious user could construct a loop in the Source chain, that would
- // cause this method to iterate forever (and throw something when the list overflows). There's
+ // cause this method to iterate forever (and throw something when the list overflows). There's
// no way to insulate from ALL malicious behavior.
transforms.Add(xf);
dataPipe = xf.Source;
diff --git a/src/Microsoft.ML.Data/Data/Combiner.cs b/src/Microsoft.ML.Data/Data/Combiner.cs
index 9a5de27ff6..ee45aee3e3 100644
--- a/src/Microsoft.ML.Data/Data/Combiner.cs
+++ b/src/Microsoft.ML.Data/Data/Combiner.cs
@@ -21,7 +21,7 @@ public abstract class Combiner
public sealed class TextCombiner : Combiner
{
- private volatile static TextCombiner _instance;
+ private static volatile TextCombiner _instance;
public static TextCombiner Instance
{
get
@@ -46,7 +46,7 @@ public override void Combine(ref DvText dst, DvText src)
public sealed class FloatAdder : Combiner
{
- private volatile static FloatAdder _instance;
+ private static volatile FloatAdder _instance;
public static FloatAdder Instance
{
get
@@ -67,7 +67,7 @@ private FloatAdder()
public sealed class R4Adder : Combiner
{
- private volatile static R4Adder _instance;
+ private static volatile R4Adder _instance;
public static R4Adder Instance
{
get
@@ -88,7 +88,7 @@ private R4Adder()
public sealed class R8Adder : Combiner
{
- private volatile static R8Adder _instance;
+ private static volatile R8Adder _instance;
public static R8Adder Instance
{
get
@@ -110,7 +110,7 @@ private R8Adder()
// REVIEW: Delete this!
public sealed class U4Adder : Combiner
{
- private volatile static U4Adder _instance;
+ private static volatile U4Adder _instance;
public static U4Adder Instance
{
get
diff --git a/src/Microsoft.ML.Data/Data/Conversion.cs b/src/Microsoft.ML.Data/Data/Conversion.cs
index 974f40c39d..0a9833064a 100644
--- a/src/Microsoft.ML.Data/Data/Conversion.cs
+++ b/src/Microsoft.ML.Data/Data/Conversion.cs
@@ -53,7 +53,7 @@ public sealed class Conversions
// REVIEW: Reconcile implementations with TypeUtils, and clarify the distinction.
// Singleton pattern.
- private volatile static Conversions _instance;
+ private static volatile Conversions _instance;
public static Conversions Instance
{
get
diff --git a/src/Microsoft.ML.Data/Data/DataViewUtils.cs b/src/Microsoft.ML.Data/Data/DataViewUtils.cs
index 4772228fa0..1db4d5ad0a 100644
--- a/src/Microsoft.ML.Data/Data/DataViewUtils.cs
+++ b/src/Microsoft.ML.Data/Data/DataViewUtils.cs
@@ -286,9 +286,9 @@ private sealed class Splitter
private enum ExtraIndex
{
Id,
-#pragma warning disable TLC_GeneralName // Allow for this private enum.
+#pragma warning disable MSML_GeneralName // Allow for this private enum.
_Lim
-#pragma warning restore TLC_GeneralName
+#pragma warning restore MSML_GeneralName
}
private Splitter(ISchema schema)
diff --git a/src/Microsoft.ML.Data/Data/IColumn.cs b/src/Microsoft.ML.Data/Data/IColumn.cs
index 28d6ffd057..2f2f496f99 100644
--- a/src/Microsoft.ML.Data/Data/IColumn.cs
+++ b/src/Microsoft.ML.Data/Data/IColumn.cs
@@ -13,16 +13,16 @@ namespace Microsoft.ML.Runtime.Data
///
/// This interface is an analogy to that encapsulates the contents of a single
/// column.
- ///
+ ///
/// Note that in the same sense that is not thread safe, implementors of this interface
/// by similar token must not be considered thread safe by users of the interface, and by the same token
/// implementors should feel free to write their implementations with the expectation that only one thread
/// will be calling it at a time.
- ///
+ ///
/// Similarly, in the same sense that an can have its values "change under it" by having
/// the underlying cursor move, so too might this item have its values change under it, and they will if
/// they were directly instantiated from a row.
- ///
+ ///
/// Generally actual implementors of this interface should not implement this directly, but instead implement
/// .
///
@@ -495,7 +495,7 @@ public override ValueGetter GetGetter()
///
private sealed class RowColumnRow : IRow
{
- private readonly static DefaultCountedImpl _defCount = new DefaultCountedImpl();
+ private static readonly DefaultCountedImpl _defCount = new DefaultCountedImpl();
private readonly ICounted _counted;
private readonly IColumn[] _columns;
private readonly SchemaImpl _schema;
diff --git a/src/Microsoft.ML.Data/Data/IRowSeekable.cs b/src/Microsoft.ML.Data/Data/IRowSeekable.cs
index c2fb54bf70..3c0bf0db08 100644
--- a/src/Microsoft.ML.Data/Data/IRowSeekable.cs
+++ b/src/Microsoft.ML.Data/Data/IRowSeekable.cs
@@ -6,7 +6,7 @@
namespace Microsoft.ML.Runtime.Data
{
- // REVIEW: Would it be a better apporach to add something akin to CanSeek,
+ // REVIEW: Would it be a better apporach to add something akin to CanSeek,
// as we have a CanShuffle? The idea is trying to make IRowSeekable propagate along certain transforms.
///
/// Represents a data view that supports random access to a specific row.
@@ -18,14 +18,14 @@ public interface IRowSeekable : ISchematized
///
/// Represents a row seeker with random access that can retrieve a specific row by the row index.
- /// For IRowSeeker, when the state is valid (that is when MoveTo() returns true), it returns the
- /// current row index. Otherwise it's -1.
+ /// For IRowSeeker, when the state is valid (that is when MoveTo() returns true), it returns the
+ /// current row index. Otherwise it's -1.
///
public interface IRowSeeker : IRow, IDisposable
{
///
/// Moves the seeker to a row at a specific row index.
- /// If the row index specified is out of range (less than zero or not less than the
+ /// If the row index specified is out of range (less than zero or not less than the
/// row count), it returns false and sets its Position property to -1.
///
/// The row index to move to.
diff --git a/src/Microsoft.ML.Data/Data/ITransposeDataView.cs b/src/Microsoft.ML.Data/Data/ITransposeDataView.cs
index 2188c766de..f247bc9859 100644
--- a/src/Microsoft.ML.Data/Data/ITransposeDataView.cs
+++ b/src/Microsoft.ML.Data/Data/ITransposeDataView.cs
@@ -18,7 +18,7 @@ namespace Microsoft.ML.Runtime.Data
/// ). This interface is intended to be implemented by classes that
/// want to provide an option for an alternate way of accessing the data stored in a
/// .
- ///
+ ///
/// The interface only advertises that columns may be accessible in slot-wise fashion. A column
/// is accessible in this fashion iff 's
/// returns a non-null value.
diff --git a/src/Microsoft.ML.Data/Data/RowCursorUtils.cs b/src/Microsoft.ML.Data/Data/RowCursorUtils.cs
index 3f57266b0f..091fe26cb2 100644
--- a/src/Microsoft.ML.Data/Data/RowCursorUtils.cs
+++ b/src/Microsoft.ML.Data/Data/RowCursorUtils.cs
@@ -39,7 +39,7 @@ private static Delegate GetGetterAsDelegateCore(IRow row, int col)
///
/// Given a destination type, IRow, and column index, return a ValueGetter for the column
- /// with a conversion to typeDst, if needed. This is a weakly typed version of
+ /// with a conversion to typeDst, if needed. This is a weakly typed version of
/// .
///
///
@@ -293,7 +293,7 @@ private static ValueGetter> GetVecGetterAsCore(VectorT
///
/// This method returns a small helper delegate that returns whether we are at the start
- /// of a new group, that is, we have just started, or the key-value at indicated column
+ /// of a new group, that is, we have just started, or the key-value at indicated column
/// is different than it was, in the last call. This is practically useful for determining
/// group boundaries. Note that the delegate will return true on the first row.
///
diff --git a/src/Microsoft.ML.Data/DataLoadSave/Binary/BinaryLoader.cs b/src/Microsoft.ML.Data/DataLoadSave/Binary/BinaryLoader.cs
index 4a58e097fb..7bc0a8d2ad 100644
--- a/src/Microsoft.ML.Data/DataLoadSave/Binary/BinaryLoader.cs
+++ b/src/Microsoft.ML.Data/DataLoadSave/Binary/BinaryLoader.cs
@@ -79,7 +79,7 @@ private sealed class TableOfContentsEntry
public readonly ColumnType Type;
///
- /// The compression scheme used on this column's blocks.
+ /// The compression scheme used on this column's blocks.
///
public readonly CompressionKind Compression;
@@ -971,7 +971,7 @@ public void Save(ModelSaveContext ctx)
}
///
- /// Write the parameters of a loader to the save context. Can be called by , where there's no actual
+ /// Write the parameters of a loader to the save context. Can be called by , where there's no actual
/// loader, only default parameters.
///
private static void SaveParameters(ModelSaveContext ctx, int threads, string generatedRowIndexName, Double shuffleBlocks)
@@ -991,7 +991,7 @@ private static void SaveParameters(ModelSaveContext ctx, int threads, string gen
}
///
- /// Save a zero-row dataview that will be used to infer schema information, used in the case
+ /// Save a zero-row dataview that will be used to infer schema information, used in the case
/// where the binary loader is instantiated with no input streams.
///
private static void SaveSchema(IHostEnvironment env, ModelSaveContext ctx, ISchema schema, out int[] unsavableColIndices)
@@ -1017,10 +1017,10 @@ private static void SaveSchema(IHostEnvironment env, ModelSaveContext ctx, ISche
}
///
- /// Given the schema and a model context, save an imaginary instance of a binary loader with the
- /// specified schema. Deserialization from this context should produce a real binary loader that
+ /// Given the schema and a model context, save an imaginary instance of a binary loader with the
+ /// specified schema. Deserialization from this context should produce a real binary loader that
/// has the specified schema.
- ///
+ ///
/// This is used in an API scenario, when the data originates from something other than a loader.
/// Since our model file requires a loader at the beginning, we have to construct a bogus 'binary' loader
/// to begin the pipe with, with the assumption that the user will bypass the loader at deserialization
@@ -1042,9 +1042,9 @@ public static void SaveInstance(IHostEnvironment env, ModelSaveContext ctx, ISch
int[] unsavable;
SaveSchema(env, ctx, schema, out unsavable);
// REVIEW: we silently ignore unsavable columns.
- // This method is invoked only in an API scenario, where we need to save a loader but we only have a schema.
- // In this case, the API user is likely not subscribed to our environment's channels. Also, in this case, the presence of
- // unsavable columns is not necessarily a bad thing: the user typically provides his own data when loading the transforms,
+ // This method is invoked only in an API scenario, where we need to save a loader but we only have a schema.
+ // In this case, the API user is likely not subscribed to our environment's channels. Also, in this case, the presence of
+ // unsavable columns is not necessarily a bad thing: the user typically provides his own data when loading the transforms,
// thus bypassing the bogus loader.
}
diff --git a/src/Microsoft.ML.Data/DataLoadSave/Binary/BinarySaver.cs b/src/Microsoft.ML.Data/DataLoadSave/Binary/BinarySaver.cs
index 7fe9fbbf4a..e2f44df2a4 100644
--- a/src/Microsoft.ML.Data/DataLoadSave/Binary/BinarySaver.cs
+++ b/src/Microsoft.ML.Data/DataLoadSave/Binary/BinarySaver.cs
@@ -850,7 +850,7 @@ public ColumnType LoadTypeDescriptionOrNull(Stream stream)
/// The type of the codec to write and utilize
/// The value to encode and write
/// The number of bytes written
- /// Whether the write was successful or not
+ /// Whether the write was successful or not
public bool TryWriteTypeAndValue(Stream stream, ColumnType type, ref T value, out int bytesWritten)
{
_host.CheckValue(stream, nameof(stream));
diff --git a/src/Microsoft.ML.Data/DataLoadSave/Binary/IValueCodec.cs b/src/Microsoft.ML.Data/DataLoadSave/Binary/IValueCodec.cs
index fd68a34cc9..9c6e607022 100644
--- a/src/Microsoft.ML.Data/DataLoadSave/Binary/IValueCodec.cs
+++ b/src/Microsoft.ML.Data/DataLoadSave/Binary/IValueCodec.cs
@@ -13,7 +13,7 @@ namespace Microsoft.ML.Runtime.Data.IO
/// on the appropriate ColumnType, then opens multiple writers to write blocks of data
/// to some stream. The idea is that each writer or reader is called on some "managable chunk"
/// of data.
- ///
+ ///
/// Codecs should be thread safe, though the readers and writers they spawn do not need to
/// be thread safe.
///
@@ -60,7 +60,7 @@ internal interface IValueCodec : IValueCodec
/// Stream on which we open reader.
/// The number of items expected to be encoded in the block
/// starting from the current position of the stream. Implementors should, if
- /// possible, throw if it seems if the block contains a different number of
+ /// possible, throw if it seems if the block contains a different number of
/// elements.
IValueReader OpenReader(Stream stream, int items);
}
@@ -89,7 +89,7 @@ internal interface IValueWriter : IDisposable
/// be spawned from an , its write methods called some
/// number of times to write to the stream, and then Commit will be called when
/// all values have been written, the stream now being at the end of the written block.
- ///
+ ///
/// The intended usage of the value writers is that blocks are composed of some small
/// number of values (perhaps a few thousand), the idea being that a block is something
/// that should easily fit in main memory, both for reading and writing. Some writers
diff --git a/src/Microsoft.ML.Data/DataLoadSave/Binary/Zlib/Zlib.cs b/src/Microsoft.ML.Data/DataLoadSave/Binary/Zlib/Zlib.cs
index 4dcc82ac9b..024eaef4a2 100644
--- a/src/Microsoft.ML.Data/DataLoadSave/Binary/Zlib/Zlib.cs
+++ b/src/Microsoft.ML.Data/DataLoadSave/Binary/Zlib/Zlib.cs
@@ -13,20 +13,20 @@ internal static class Zlib
public const string DllPath = "zlib.dll";
[DllImport(DllPath), SuppressUnmanagedCodeSecurity]
- private static unsafe extern Constants.RetCode deflateInit2_(ZStream* strm, int level, int method, int windowBits,
+ private static extern unsafe Constants.RetCode deflateInit2_(ZStream* strm, int level, int method, int windowBits,
int memLevel, Constants.Strategy strategy, byte* version, int streamSize);
[DllImport(DllPath), SuppressUnmanagedCodeSecurity]
- private static unsafe extern Constants.RetCode inflateInit2_(ZStream* strm, int windowBits, byte* version, int streamSize);
+ private static extern unsafe Constants.RetCode inflateInit2_(ZStream* strm, int windowBits, byte* version, int streamSize);
[DllImport(DllPath), SuppressUnmanagedCodeSecurity]
- private static unsafe extern byte* zlibVersion();
+ private static extern unsafe byte* zlibVersion();
[DllImport(DllPath), SuppressUnmanagedCodeSecurity]
- public static unsafe extern Constants.RetCode deflateEnd(ZStream* strm);
+ public static extern unsafe Constants.RetCode deflateEnd(ZStream* strm);
[DllImport(DllPath), SuppressUnmanagedCodeSecurity]
- public static unsafe extern Constants.RetCode deflate(ZStream* strm, Constants.Flush flush);
+ public static extern unsafe Constants.RetCode deflate(ZStream* strm, Constants.Flush flush);
public static unsafe Constants.RetCode DeflateInit2(ZStream* strm, int level, int method, int windowBits,
int memLevel, Constants.Strategy strategy)
@@ -40,10 +40,10 @@ public static unsafe Constants.RetCode InflateInit2(ZStream* strm, int windowBit
}
[DllImport(DllPath), SuppressUnmanagedCodeSecurity]
- public static unsafe extern Constants.RetCode inflate(ZStream* strm, Constants.Flush flush);
+ public static extern unsafe Constants.RetCode inflate(ZStream* strm, Constants.Flush flush);
[DllImport(DllPath), SuppressUnmanagedCodeSecurity]
- public static unsafe extern Constants.RetCode inflateEnd(ZStream* strm);
+ public static extern unsafe Constants.RetCode inflateEnd(ZStream* strm);
}
[StructLayout(LayoutKind.Sequential)]
diff --git a/src/Microsoft.ML.Data/DataLoadSave/PartitionedFileLoader.cs b/src/Microsoft.ML.Data/DataLoadSave/PartitionedFileLoader.cs
index 69eb3bbb3b..10bf816dc1 100644
--- a/src/Microsoft.ML.Data/DataLoadSave/PartitionedFileLoader.cs
+++ b/src/Microsoft.ML.Data/DataLoadSave/PartitionedFileLoader.cs
@@ -682,13 +682,12 @@ private bool TryTruncatePath(int dirCount, string path, out string truncPath)
Ch.Warning($"Path {path} did not have {dirCount} directories necessary for parsing.");
return false;
}
-
+
// Rejoin segments to create a valid path.
truncPath = String.Join(Path.DirectorySeparatorChar.ToString(), segments);
return true;
}
-
///
/// Parse all column values from the directory path.
///
diff --git a/src/Microsoft.ML.Data/DataLoadSave/Text/TextLoader.cs b/src/Microsoft.ML.Data/DataLoadSave/Text/TextLoader.cs
index 3678c749ba..babca545c8 100644
--- a/src/Microsoft.ML.Data/DataLoadSave/Text/TextLoader.cs
+++ b/src/Microsoft.ML.Data/DataLoadSave/Text/TextLoader.cs
@@ -34,7 +34,7 @@ public sealed partial class TextLoader : IDataLoader
///
/// Vector column of I4 that contains values from columns 1, 3 to 10
/// col=ColumnName:I4:1,3-10
- ///
+ ///
/// Key range column of KeyType with underlying storage type U4 that contains values from columns 1, 3 to 10, that can go from 1 to 100 (0 reserved for out of range)
/// col=ColumnName:U4[1-100]:1,3-10
///
@@ -554,7 +554,7 @@ public Bindings(TextLoader parent, Column[] cols, IMultiStreamSource headerFile)
{
var range = col.Source[i];
- // Check for remaining range, raise flag.
+ // Check for remaining range, raise flag.
if (range.AllOther)
{
ch.CheckUserArg(iinfoOther < 0, nameof(Range.AllOther), "At most one all other range can be specified");
@@ -605,7 +605,7 @@ public Bindings(TextLoader parent, Column[] cols, IMultiStreamSource headerFile)
NameToInfoIndex[name] = iinfo;
}
- // Note that segsOther[isegOther] is not a real segment to be included.
+ // Note that segsOther[isegOther] is not a real segment to be included.
// It only persists segment information such as Min, Max, autoEnd, variableEnd for later processing.
// Process all other range.
if (iinfoOther >= 0)
@@ -641,7 +641,7 @@ public Bindings(TextLoader parent, Column[] cols, IMultiStreamSource headerFile)
foreach (var seg in segsAll)
{
- // At this step, all indices less than min is contained in some segment, either in
+ // At this step, all indices less than min is contained in some segment, either in
// segsAll or segsNew.
ch.Assert(min < lim);
if (min < seg.Min)
@@ -1014,7 +1014,7 @@ public TextLoader(IHostEnvironment env, Arguments args, IMultiStreamSource files
_host.CheckNonEmpty(args.Separator, nameof(args.Separator), "Must specify a separator");
//Default arg.Separator is tab and default args.SeparatorChars is also a '\t'.
- //At a time only one default can be different and whichever is different that will
+ //At a time only one default can be different and whichever is different that will
//be used.
if (args.SeparatorChars.Length > 1 || args.SeparatorChars[0] != '\t')
{
@@ -1110,7 +1110,7 @@ private static bool TryParseSchema(IHost host, IMultiStreamSource files,
// Get settings just for core arguments, not everything.
string tmp = CmdParser.GetSettings(host, args, new ArgumentsCore());
- // Try to get the schema information from the file.
+ // Try to get the schema information from the file.
string str = Cursor.GetEmbeddedArgs(files);
if (string.IsNullOrWhiteSpace(str))
return false;
diff --git a/src/Microsoft.ML.Data/DataLoadSave/Text/TextLoaderParser.cs b/src/Microsoft.ML.Data/DataLoadSave/Text/TextLoaderParser.cs
index fde45c6f4f..582d81b546 100644
--- a/src/Microsoft.ML.Data/DataLoadSave/Text/TextLoaderParser.cs
+++ b/src/Microsoft.ML.Data/DataLoadSave/Text/TextLoaderParser.cs
@@ -27,7 +27,7 @@ public sealed partial class TextLoader : IDataLoader
///
private sealed class ValueCreatorCache
{
- private volatile static ValueCreatorCache _instance;
+ private static volatile ValueCreatorCache _instance;
public static ValueCreatorCache Instance
{
get
diff --git a/src/Microsoft.ML.Data/DataLoadSave/Transpose/TransposeLoader.cs b/src/Microsoft.ML.Data/DataLoadSave/Transpose/TransposeLoader.cs
index 173c588607..37cbe23b92 100644
--- a/src/Microsoft.ML.Data/DataLoadSave/Transpose/TransposeLoader.cs
+++ b/src/Microsoft.ML.Data/DataLoadSave/Transpose/TransposeLoader.cs
@@ -531,7 +531,7 @@ public void Save(ModelSaveContext ctx)
}
///
- /// Save a zero-row dataview that will be used to infer schema information, used in the case
+ /// Save a zero-row dataview that will be used to infer schema information, used in the case
/// where the tranpsose loader is instantiated with no input streams.
///
private static void SaveSchema(IHostEnvironment env, ModelSaveContext ctx, ISchema schema)
diff --git a/src/Microsoft.ML.Data/DataView/AppendRowsDataView.cs b/src/Microsoft.ML.Data/DataView/AppendRowsDataView.cs
index 6633e2535f..dc713a82b2 100644
--- a/src/Microsoft.ML.Data/DataView/AppendRowsDataView.cs
+++ b/src/Microsoft.ML.Data/DataView/AppendRowsDataView.cs
@@ -24,7 +24,7 @@ namespace Microsoft.ML.Runtime.Data
/// This class provides the functionality to combine multiple IDataView objects which share the same schema
/// All sources must contain the same number of columns and their column names, sizes, and item types must match.
/// The row count of the resulting IDataView will be the sum over that of each individual.
- ///
+ ///
/// An AppendRowsDataView instance is shuffleable iff all of its sources are shuffleable and their row counts are known.
///
public sealed class AppendRowsDataView : IDataView
@@ -46,8 +46,8 @@ public sealed class AppendRowsDataView : IDataView
///
/// Create a dataview by appending the rows of the sources.
- ///
- /// All sources must be consistent with the passed-in schema in the number of columns, column names,
+ ///
+ /// All sources must be consistent with the passed-in schema in the number of columns, column names,
/// and column types. If schema is null, the first source's schema will be used.
///
/// The host environment.
@@ -203,7 +203,7 @@ public bool IsColumnActive(int col)
}
///
- /// The deterministic cursor. It will scan through the sources sequentially.
+ /// The deterministic cursor. It will scan through the sources sequentially.
///
private sealed class Cursor : CursorBase
{
@@ -293,7 +293,7 @@ public override void Dispose()
///
/// A RandCursor will ask each subordinate cursor to shuffle itself.
- /// Then, at each step, it randomly calls a subordinate to move next with probability (roughly) proportional to
+ /// Then, at each step, it randomly calls a subordinate to move next with probability (roughly) proportional to
/// the number of the subordinate's remaining rows.
///
private sealed class RandCursor : CursorBase
@@ -383,16 +383,16 @@ public override void Dispose()
///
/// Given k classes with counts (N_0, N_2, N_3, ..., N_{k-1}), the goal of this sampler is to select the i-th
- /// class with probability N_i/M, where M = N_0 + N_1 + ... + N_{k-1}.
+ /// class with probability N_i/M, where M = N_0 + N_1 + ... + N_{k-1}.
/// Once the i-th class is selected, its count will be updated to N_i - 1.
- ///
+ ///
/// For efficiency consideration, the sampling distribution is only an approximation of the desired distribution.
///
private sealed class MultinomialWithoutReplacementSampler
{
// Implementation: generate a batch array of size BatchSize.
// Each class will claim a fraction of the batch proportional to its remaining row count.
- // Shuffle the array. The sampler reads from the array one at a time until the batch is consumed.
+ // Shuffle the array. The sampler reads from the array one at a time until the batch is consumed.
// The sampler then generates a new batch and repeat the process.
private const int BatchSize = 1000;
diff --git a/src/Microsoft.ML.Data/DataView/CacheDataView.cs b/src/Microsoft.ML.Data/DataView/CacheDataView.cs
index 3bca858d1d..72fb4b18a5 100644
--- a/src/Microsoft.ML.Data/DataView/CacheDataView.cs
+++ b/src/Microsoft.ML.Data/DataView/CacheDataView.cs
@@ -618,7 +618,7 @@ private interface IWaiter
/// is equivalent to also having waited on i-1, i-2, etc.
/// Note that this is position within the cache, that is, a row index,
/// as opposed to position within the cursor.
- ///
+ ///
/// This method should be thread safe because in the parallel cursor
/// case it will be used by multiple threads.
///
@@ -955,23 +955,23 @@ public Wrapper(RandomIndex index)
/// next job ids before they push the completed jobs to the consumer. So the workers are
/// then subject to being blocked until their current completed jobs are fully accepted
/// (i.e. added to the to-consume queue).
- ///
+ ///
/// How it works:
/// Suppose we have 7 workers (w0,..,w6) and 14 jobs (j0,..,j13).
/// Initially, jobs get assigned to workers using a shared counter.
/// Here is an example outcome of using a shared counter:
/// w1->j0, w6->j1, w0->j2, w3->j3, w4->j4, w5->j5, w2->j6.
- ///
+ ///
/// Suppose workers finished jobs in the following order:
/// w5->j5, w0->j2, w6->j1, w4->j4, w3->j3,w1->j0, w2->j6.
- ///
+ ///
/// w5 finishes processing j5 first, but will be blocked until the processing of jobs
/// j0,..,j4 completes since the consumer can consume jobs in order only.
/// Therefore, the next available job (j7) should not be assigned to w5. It should be
- /// assigned to the worker whose job *get consumed first* (w1 since it processes j0
- /// which is the first job) *not* to the worker who completes its job first (w5 in
+ /// assigned to the worker whose job *get consumed first* (w1 since it processes j0
+ /// which is the first job) *not* to the worker who completes its job first (w5 in
/// this example).
- ///
+ ///
/// So, a shared counter can be used to assign jobs to workers initially but should
/// not be used onwards.
///
diff --git a/src/Microsoft.ML.Data/DataView/CompositeSchema.cs b/src/Microsoft.ML.Data/DataView/CompositeSchema.cs
index 4d387de1d5..81aef4b01e 100644
--- a/src/Microsoft.ML.Data/DataView/CompositeSchema.cs
+++ b/src/Microsoft.ML.Data/DataView/CompositeSchema.cs
@@ -37,7 +37,7 @@ public CompositeSchema(ISchema[] sources)
///
/// Returns an array of input predicated for sources, corresponding to the input predicate.
- /// The returned array size is equal to the number of sources, but if a given source is not needed at all,
+ /// The returned array size is equal to the number of sources, but if a given source is not needed at all,
/// the corresponding predicate will be null.
///
public Func[] GetInputPredicates(Func predicate)
diff --git a/src/Microsoft.ML.Data/DataView/RowToRowMapperTransform.cs b/src/Microsoft.ML.Data/DataView/RowToRowMapperTransform.cs
index b0fde835d8..d69379d5da 100644
--- a/src/Microsoft.ML.Data/DataView/RowToRowMapperTransform.cs
+++ b/src/Microsoft.ML.Data/DataView/RowToRowMapperTransform.cs
@@ -30,7 +30,7 @@ public RowMapperColumnInfo(string name, ColumnType type, ColumnMetadataInfo meta
}
///
- /// This interface is used to create a .
+ /// This interface is used to create a .
/// Implementations should be given an in their constructor, and should have a
/// ctor or Create method with , along with a corresponding
/// .
@@ -44,7 +44,7 @@ public interface IRowMapper : ICanSaveModel
///
/// Returns the getters for the output columns given an active set of output columns. The length of the getters
- /// array should be equal to the number of columns added by the IRowMapper. It should contain the getter for the
+ /// array should be equal to the number of columns added by the IRowMapper. It should contain the getter for the
/// i'th output column if activeOutput(i) is true, and null otherwise.
///
Delegate[] CreateGetters(IRow input, Func activeOutput, out Action disposer);
diff --git a/src/Microsoft.ML.Data/DataView/Transposer.cs b/src/Microsoft.ML.Data/DataView/Transposer.cs
index 74477de9b7..91bb9c8b6a 100644
--- a/src/Microsoft.ML.Data/DataView/Transposer.cs
+++ b/src/Microsoft.ML.Data/DataView/Transposer.cs
@@ -1041,7 +1041,6 @@ private static Splitter CreateCore(IDataView view, int col, int[] ends)
}
#region ISchema implementation
-
// Subclasses should implement ColumnCount and GetColumnType.
public override bool TryGetColumnIndex(string name, out int col)
{
@@ -1062,8 +1061,6 @@ public override string GetColumnName(int col)
Contracts.CheckParam(0 <= col && col < ColumnCount, nameof(col));
return _view.Schema.GetColumnName(SrcCol);
}
-
- public override abstract ColumnType GetColumnType(int col);
#endregion
private abstract class RowBase : IRow
@@ -1215,7 +1212,7 @@ private sealed class Row : RowBase>
private VBuffer _inputValue;
// The delegate to get the input value.
private readonly ValueGetter> _inputGetter;
- // The limit of _inputValue.Indices
+ // The limit of _inputValue.Indices
private readonly int[] _srcIndicesLims;
// Convenient accessor since we use this all over the place.
private int[] Lims { get { return Parent._lims; } }
diff --git a/src/Microsoft.ML.Data/DataView/ZipDataView.cs b/src/Microsoft.ML.Data/DataView/ZipDataView.cs
index 9a7e79bab8..a472b48b36 100644
--- a/src/Microsoft.ML.Data/DataView/ZipDataView.cs
+++ b/src/Microsoft.ML.Data/DataView/ZipDataView.cs
@@ -11,7 +11,7 @@ namespace Microsoft.ML.Runtime.Data
{
///
/// This is a data view that is a 'zip' of several data views.
- /// The length of the zipped data view is equal to the shortest of the lengths of the components.
+ /// The length of the zipped data view is equal to the shortest of the lengths of the components.
///
public sealed class ZipDataView : IDataView
{
@@ -77,7 +77,7 @@ public IRowCursor GetRowCursor(Func predicate, IRandom rand = null)
var srcPredicates = _schema.GetInputPredicates(predicate);
- // REVIEW: if we know the row counts, we could only open cursor if it has needed columns, and have the
+ // REVIEW: if we know the row counts, we could only open cursor if it has needed columns, and have the
// outer cursor handle the early stopping. If we don't know row counts, we need to open all the cursors because
// we don't know which one will be the shortest.
// One reason this is not done currently is because the API has 'somewhat mutable' data views, so potentially this
@@ -88,8 +88,8 @@ public IRowCursor GetRowCursor(Func predicate, IRandom rand = null)
}
///
- /// Create an with no requested columns on a data view.
- /// Potentially, this can be optimized by calling GetRowCount(lazy:true) first, and if the count is not known,
+ /// Create an with no requested columns on a data view.
+ /// Potentially, this can be optimized by calling GetRowCount(lazy:true) first, and if the count is not known,
/// wrapping around GetCursor().
///
private IRowCursor GetMinimumCursor(IDataView dv)
diff --git a/src/Microsoft.ML.Data/Depricated/TGUIAttribute.cs b/src/Microsoft.ML.Data/Depricated/TGUIAttribute.cs
index 7a51e1a5ee..5f09c604bb 100644
--- a/src/Microsoft.ML.Data/Depricated/TGUIAttribute.cs
+++ b/src/Microsoft.ML.Data/Depricated/TGUIAttribute.cs
@@ -7,12 +7,12 @@
namespace Microsoft.ML.Runtime.Internal.Internallearn
{
-#pragma warning disable TLC_GeneralName // This structure should be deprecated anyway.
+#pragma warning disable MSML_GeneralName // This structure should be deprecated anyway.
// REVIEW: Get rid of this. Everything should be in the ArgumentAttribute (or a class
// derived from ArgumentAttribute).
[AttributeUsage(AttributeTargets.Field)]
public class TGUIAttribute : Attribute
-#pragma warning restore TLC_GeneralName
+#pragma warning restore MSML_GeneralName
{
// Display parameters
public string Label { get; set; }
@@ -32,7 +32,7 @@ public class TGUIAttribute : Attribute
public bool NoSweep { get; set; }
//Settings are automatically populated for fields that are classes.
- //The below is an extension of the framework to add settings for
+ //The below is an extension of the framework to add settings for
//boolean type fields.
public bool ShowSettingsForCheckbox { get; set; }
public object Settings { get; set; }
diff --git a/src/Microsoft.ML.Data/Depricated/Vector/VectorUtils.cs b/src/Microsoft.ML.Data/Depricated/Vector/VectorUtils.cs
index fc335bb52e..5b46aad0b8 100644
--- a/src/Microsoft.ML.Data/Depricated/Vector/VectorUtils.cs
+++ b/src/Microsoft.ML.Data/Depricated/Vector/VectorUtils.cs
@@ -57,7 +57,7 @@ public static Float DotProduct(ref VBuffer a, ref VBuffer b)
}
///
- /// Sparsify vector A (keep at most + values)
+ /// Sparsify vector A (keep at most + values)
/// and optionally rescale values to the [-1, 1] range.
/// Vector to be sparsified and normalized.
/// How many top (positive) elements to preserve after sparsification.
diff --git a/src/Microsoft.ML.Data/Dirty/PredictorBase.cs b/src/Microsoft.ML.Data/Dirty/PredictorBase.cs
index 980a85ffd1..35c9a49133 100644
--- a/src/Microsoft.ML.Data/Dirty/PredictorBase.cs
+++ b/src/Microsoft.ML.Data/Dirty/PredictorBase.cs
@@ -41,9 +41,9 @@ protected PredictorBase(IHostEnvironment env, string name, ModelLoadContext ctx)
// Verify that the Float type matches.
int cbFloat = ctx.Reader.ReadInt32();
-#pragma warning disable TLC_NoMessagesForLoadContext // This one is actually useful.
+#pragma warning disable MSML_NoMessagesForLoadContext // This one is actually useful.
Host.CheckDecode(cbFloat == sizeof(Float), "This file was saved by an incompatible version");
-#pragma warning restore TLC_NoMessagesForLoadContext
+#pragma warning restore MSML_NoMessagesForLoadContext
}
public virtual void Save(ModelSaveContext ctx)
diff --git a/src/Microsoft.ML.Data/EntryPoints/CommonOutputs.cs b/src/Microsoft.ML.Data/EntryPoints/CommonOutputs.cs
index 9e99bf8993..37f37f6c64 100644
--- a/src/Microsoft.ML.Data/EntryPoints/CommonOutputs.cs
+++ b/src/Microsoft.ML.Data/EntryPoints/CommonOutputs.cs
@@ -191,7 +191,7 @@ public interface ITrainerOutput
}
///
- /// Macro output class base.
+ /// Macro output class base.
///
public abstract class MacroOutput
{
diff --git a/src/Microsoft.ML.Data/EntryPoints/EntryPointNode.cs b/src/Microsoft.ML.Data/EntryPoints/EntryPointNode.cs
index 4f94e103b8..d2d59eb94b 100644
--- a/src/Microsoft.ML.Data/EntryPoints/EntryPointNode.cs
+++ b/src/Microsoft.ML.Data/EntryPoints/EntryPointNode.cs
@@ -632,7 +632,7 @@ public static EntryPointNode Create(
///
/// Checks the given JSON object key-value pair is a valid EntryPoint input and
/// extracts out any variables that need to be populated. These variables will be
- /// added to the EntryPoint context. Input parameters that are not set to variables
+ /// added to the EntryPoint context. Input parameters that are not set to variables
/// will be immediately set using the input builder instance.
///
private void CheckAndSetInputValue(KeyValuePair pair)
@@ -692,7 +692,7 @@ private void CheckAndSetInputValue(KeyValuePair pair)
///
/// Checks the given JSON object key-value pair is a valid EntryPoint output.
- /// Extracts out any variables that need to be populated and adds them to the
+ /// Extracts out any variables that need to be populated and adds them to the
/// EntryPoint context.
///
private void CheckAndMarkOutputValue(KeyValuePair pair)
@@ -1073,8 +1073,8 @@ protected VariableBinding(string varName)
VariableName = varName;
}
- // A regex to validate an EntryPoint variable value accessor string. Valid EntryPoint variable names
- // can be any sequence of alphanumeric characters and underscores. They must start with a letter or underscore.
+ // A regex to validate an EntryPoint variable value accessor string. Valid EntryPoint variable names
+ // can be any sequence of alphanumeric characters and underscores. They must start with a letter or underscore.
// An EntryPoint variable can be followed with an array or dictionary specifier, which begins
// with '[', contains either an integer or alphanumeric string, optionally wrapped in single-quotes,
// followed with ']'.
diff --git a/src/Microsoft.ML.Data/EntryPoints/InputBuilder.cs b/src/Microsoft.ML.Data/EntryPoints/InputBuilder.cs
index e5afd8dbb5..4d3b765114 100644
--- a/src/Microsoft.ML.Data/EntryPoints/InputBuilder.cs
+++ b/src/Microsoft.ML.Data/EntryPoints/InputBuilder.cs
@@ -14,8 +14,8 @@
namespace Microsoft.ML.Runtime.EntryPoints.JsonUtils
{
///
- /// The class that creates and wraps around an instance of an input object and gradually populates all fields, keeping track of missing
- /// required values. The values can be set from their JSON representation (during the graph parsing stage), as well as directly
+ /// The class that creates and wraps around an instance of an input object and gradually populates all fields, keeping track of missing
+ /// required values. The values can be set from their JSON representation (during the graph parsing stage), as well as directly
/// (in the process of graph execution).
///
public sealed class InputBuilder
@@ -515,7 +515,7 @@ private static object ParseJsonValue(IExceptionContext ectx, Type type, Attribut
}
///
- /// Ensures that the given value can be assigned to an entry point field with
+ /// Ensures that the given value can be assigned to an entry point field with
/// type . This method will wrap the value in the option
/// type if needed and throw an exception if the value isn't assignable.
///
@@ -791,7 +791,7 @@ public static class Range
///
public static class Deprecated
{
- public new static string ToString() => "Deprecated";
+ public static new string ToString() => "Deprecated";
public const string Message = "Message";
}
@@ -800,7 +800,7 @@ public static class Deprecated
///
public static class SweepableLongParam
{
- public new static string ToString() => "SweepRange";
+ public static new string ToString() => "SweepRange";
public const string RangeType = "RangeType";
public const string Max = "Max";
public const string Min = "Min";
@@ -814,7 +814,7 @@ public static class SweepableLongParam
///
public static class SweepableFloatParam
{
- public new static string ToString() => "SweepRange";
+ public static new string ToString() => "SweepRange";
public const string RangeType = "RangeType";
public const string Max = "Max";
public const string Min = "Min";
@@ -828,14 +828,14 @@ public static class SweepableFloatParam
///
public static class SweepableDiscreteParam
{
- public new static string ToString() => "SweepRange";
+ public static new string ToString() => "SweepRange";
public const string RangeType = "RangeType";
public const string Options = "Values";
}
public static class PipelineSweeperSupportedMetrics
{
- public new static string ToString() => "SupportedMetric";
+ public static new string ToString() => "SupportedMetric";
public const string Auc = BinaryClassifierEvaluator.Auc;
public const string AccuracyMicro = Data.MultiClassClassifierEvaluator.AccuracyMicro;
public const string AccuracyMacro = MultiClassClassifierEvaluator.AccuracyMacro;
diff --git a/src/Microsoft.ML.Data/EntryPoints/PredictorModel.cs b/src/Microsoft.ML.Data/EntryPoints/PredictorModel.cs
index 4b474f847c..055b2fa299 100644
--- a/src/Microsoft.ML.Data/EntryPoints/PredictorModel.cs
+++ b/src/Microsoft.ML.Data/EntryPoints/PredictorModel.cs
@@ -74,7 +74,7 @@ public void Save(IHostEnvironment env, Stream stream)
{
// REVIEW: address the asymmetry in the way we're loading and saving the model.
// Effectively, we have methods to load the transform model from a model.zip, but don't have
- // methods to compose the model.zip out of transform model, predictor and role mappings
+ // methods to compose the model.zip out of transform model, predictor and role mappings
// (we use the TrainUtils.SaveModel that does all three).
// Create the chain of transforms for saving.
diff --git a/src/Microsoft.ML.Data/EntryPoints/ScoreModel.cs b/src/Microsoft.ML.Data/EntryPoints/ScoreModel.cs
index 96ce0acac9..312a92bccc 100644
--- a/src/Microsoft.ML.Data/EntryPoints/ScoreModel.cs
+++ b/src/Microsoft.ML.Data/EntryPoints/ScoreModel.cs
@@ -15,9 +15,9 @@ namespace Microsoft.ML.Runtime.EntryPoints
///
/// This module handles scoring a against a new dataset.
/// As a result, we return both the scored data and the scoring transform as a .
- ///
- /// REVIEW: This module does not support 'exotic' scoring scenarios, like recommendation and quantile regression
- /// (those where the user-defined scorer settings are necessary to identify the scorer). We could resolve this by
+ ///
+ /// REVIEW: This module does not support 'exotic' scoring scenarios, like recommendation and quantile regression
+ /// (those where the user-defined scorer settings are necessary to identify the scorer). We could resolve this by
/// adding a sub-component for extra scorer args, or by creating specialized EPs for these scenarios.
///
public static partial class ScoreModel
diff --git a/src/Microsoft.ML.Data/EntryPoints/TransformModel.cs b/src/Microsoft.ML.Data/EntryPoints/TransformModel.cs
index 9edc87df6d..ed8e7d56e2 100644
--- a/src/Microsoft.ML.Data/EntryPoints/TransformModel.cs
+++ b/src/Microsoft.ML.Data/EntryPoints/TransformModel.cs
@@ -43,7 +43,7 @@ public sealed class TransformModel : ITransformModel
///
/// The resulting schema once applied to this model. The might have
- /// columns that are not needed by this transform and these columns will be seen in the
+ /// columns that are not needed by this transform and these columns will be seen in the
/// produced by this transform.
///
public ISchema OutputSchema => _chain.Schema;
diff --git a/src/Microsoft.ML.Data/Evaluators/AnomalyDetectionEvaluator.cs b/src/Microsoft.ML.Data/Evaluators/AnomalyDetectionEvaluator.cs
index 74f7ca0068..8e4f3be56c 100644
--- a/src/Microsoft.ML.Data/Evaluators/AnomalyDetectionEvaluator.cs
+++ b/src/Microsoft.ML.Data/Evaluators/AnomalyDetectionEvaluator.cs
@@ -57,7 +57,7 @@ public static class OverallMetrics
}
///
- /// The anomaly detection evaluator outputs a data view by this name, which contains the the examples
+ /// The anomaly detection evaluator outputs a data view by this name, which contains the the examples
/// with the top scores in the test set. It contains the three columns listed below, with each row corresponding
/// to one test example.
///
diff --git a/src/Microsoft.ML.Data/Evaluators/BinaryClassifierEvaluator.cs b/src/Microsoft.ML.Data/Evaluators/BinaryClassifierEvaluator.cs
index 7161f66439..71c08eecd0 100644
--- a/src/Microsoft.ML.Data/Evaluators/BinaryClassifierEvaluator.cs
+++ b/src/Microsoft.ML.Data/Evaluators/BinaryClassifierEvaluator.cs
@@ -88,7 +88,7 @@ public enum Metrics
///
/// Binary classification evaluator outputs a data view with this name, which contains the p/r data.
- /// It contains the columns listed below, and in case data also contains a weight column, it contains
+ /// It contains the columns listed below, and in case data also contains a weight column, it contains
/// also columns for the weighted values.
/// and false positive rate.
///
@@ -1211,7 +1211,7 @@ public override IEnumerable GetOverallMetricColumns()
}
// This method saves the p/r plots, and returns the p/r metrics data view.
- // In case there are results from multiple folds, they are averaged using
+ // In case there are results from multiple folds, they are averaged using
// vertical averaging for the p/r plot, and appended using AppendRowsDataView for
// the p/r data view.
private bool TryGetPrMetrics(Dictionary[] metrics, out IDataView pr)
diff --git a/src/Microsoft.ML.Data/Evaluators/EvaluatorBase.cs b/src/Microsoft.ML.Data/Evaluators/EvaluatorBase.cs
index ef7183c2fa..c628cff1e4 100644
--- a/src/Microsoft.ML.Data/Evaluators/EvaluatorBase.cs
+++ b/src/Microsoft.ML.Data/Evaluators/EvaluatorBase.cs
@@ -217,7 +217,7 @@ protected ValueGetter> GetKeyValueGetter(AggregatorDictionaryBas
///
/// This is a helper class for evaluators deriving from EvaluatorBase, used for computing aggregate metrics.
/// Aggregators should keep track of the number of passes done. The method should get
- /// the input getters of the given IRow that are needed for the current pass, assuming that all the needed column
+ /// the input getters of the given IRow that are needed for the current pass, assuming that all the needed column
/// information is stored in the given .
/// In the aggregator should call the getters once, and process the input as needed.
/// increments the pass count after each pass.
@@ -251,7 +251,7 @@ public bool Start()
return IsActive();
}
- ///
+ ///
/// This method should get the getters of the new IRow that are needed for the next pass.
///
public abstract void InitializeNextPass(IRow row, RoleMappedSchema schema);
@@ -370,7 +370,7 @@ private static AggregatorDictionaryBase CreateDictionary(RoleMappedSchem
}
///
- /// This method calls the getter of the stratification column, and returns the aggregator corresponding to
+ /// This method calls the getter of the stratification column, and returns the aggregator corresponding to
/// the stratification value.
///
///
diff --git a/src/Microsoft.ML.Data/Evaluators/EvaluatorUtils.cs b/src/Microsoft.ML.Data/Evaluators/EvaluatorUtils.cs
index 86edac082a..942d139425 100644
--- a/src/Microsoft.ML.Data/Evaluators/EvaluatorUtils.cs
+++ b/src/Microsoft.ML.Data/Evaluators/EvaluatorUtils.cs
@@ -115,10 +115,10 @@ public static ColumnInfo GetScoreColumnInfo(IExceptionContext ectx, ISchema sche
ColumnInfo info;
if (!string.IsNullOrWhiteSpace(name))
{
-#pragma warning disable TLC_ContractsNameUsesNameof
+#pragma warning disable MSML_ContractsNameUsesNameof
if (!ColumnInfo.TryCreateFromName(schema, name, out info))
throw ectx.ExceptUserArg(argName, "Score column is missing");
-#pragma warning restore TLC_ContractsNameUsesNameof
+#pragma warning restore MSML_ContractsNameUsesNameof
return info;
}
@@ -145,9 +145,9 @@ public static ColumnInfo GetScoreColumnInfo(IExceptionContext ectx, ISchema sche
if (!string.IsNullOrWhiteSpace(defName) && ColumnInfo.TryCreateFromName(schema, defName, out info))
return info;
-#pragma warning disable TLC_ContractsNameUsesNameof
+#pragma warning disable MSML_ContractsNameUsesNameof
throw ectx.ExceptUserArg(argName, "Score column is missing");
-#pragma warning restore TLC_ContractsNameUsesNameof
+#pragma warning restore MSML_ContractsNameUsesNameof
}
///
@@ -168,12 +168,12 @@ public static ColumnInfo GetOptAuxScoreColumnInfo(IExceptionContext ectx, ISchem
if (!string.IsNullOrWhiteSpace(name))
{
ColumnInfo info;
-#pragma warning disable TLC_ContractsNameUsesNameof
+#pragma warning disable MSML_ContractsNameUsesNameof
if (!ColumnInfo.TryCreateFromName(schema, name, out info))
throw ectx.ExceptUserArg(argName, "{0} column is missing", valueKind);
if (!testType(info.Type))
throw ectx.ExceptUserArg(argName, "{0} column has incompatible type", valueKind);
-#pragma warning restore TLC_ContractsNameUsesNameof
+#pragma warning restore MSML_ContractsNameUsesNameof
return info;
}
@@ -332,15 +332,15 @@ public static IEnumerable> GetMetrics(IDataView met
if (getters[i] != null)
{
getters[i](ref metricVal);
- // For R8 valued columns the metric name is the column name.
+ // For R8 valued columns the metric name is the column name.
yield return new KeyValuePair(schema.GetColumnName(i), metricVal);
}
else if (getVectorMetrics && vBufferGetters[i] != null)
{
vBufferGetters[i](ref metricVals);
- // For R8 vector valued columns the names of the metrics are the column name,
- // followed by the slot name if it exists, or "Label_i" if it doesn't.
+ // For R8 vector valued columns the names of the metrics are the column name,
+ // followed by the slot name if it exists, or "Label_i" if it doesn't.
VBuffer names = default(VBuffer);
var size = schema.GetColumnType(i).VectorSize;
var slotNamesType = schema.GetMetadataTypeOrNull(MetadataUtils.Kinds.SlotNames, i);
@@ -386,7 +386,7 @@ public static IDataView AddFoldIndex(IHostEnvironment env, IDataView input, int
env.CheckValue(input, nameof(input));
env.CheckParam(curFold >= 0, nameof(curFold));
- // We use the first column in the data view as an input column to the LambdaColumnMapper,
+ // We use the first column in the data view as an input column to the LambdaColumnMapper,
// because it must have an input.
int inputCol = 0;
while (inputCol < input.Schema.ColumnCount && input.Schema.IsHidden(inputCol))
@@ -428,7 +428,7 @@ public static IDataView AddFoldIndex(IHostEnvironment env, IDataView input, int
env.CheckParam(curFold >= 0, nameof(curFold));
env.CheckParam(numFolds > 0, nameof(numFolds));
- // We use the first column in the data view as an input column to the LambdaColumnMapper,
+ // We use the first column in the data view as an input column to the LambdaColumnMapper,
// because it must have an input.
int inputCol = 0;
while (inputCol < input.Schema.ColumnCount && input.Schema.IsHidden(inputCol))
@@ -444,7 +444,7 @@ public static IDataView AddFoldIndex(IHostEnvironment env, IDataView input, int
///
/// This method takes an array of data views and a specified input vector column, and adds a new output column to each of the data views.
- /// First, we find the union set of the slot names in the different data views. Next we define a new vector column for each
+ /// First, we find the union set of the slot names in the different data views. Next we define a new vector column for each
/// data view, indexed by the union of the slot names. For each data view, every slot value is the value in the slot corresponding
/// to its slot name in the original column. If a reconciled slot name does not exist in an input column, the value in the output
/// column is def.
@@ -593,7 +593,7 @@ private static int[][] MapKeys(ISchema[] schemas, string columnName, bool isV
///
/// This method takes an array of data views and a specified input key column, and adds a new output column to each of the data views.
- /// First, we find the union set of the key values in the different data views. Next we define a new key column for each
+ /// First, we find the union set of the key values in the different data views. Next we define a new key column for each
/// data view, with the union of the key values as the new key values. For each data view, the value in the output column is the value
/// corresponding to the key value in the original column.
///
@@ -634,7 +634,7 @@ public static void ReconcileKeyValues(IHostEnvironment env, IDataView[] views, s
///
/// This method takes an array of data views and a specified input key column, and adds a new output column to each of the data views.
- /// First, we find the union set of the key values in the different data views. Next we define a new key column for each
+ /// First, we find the union set of the key values in the different data views. Next we define a new key column for each
/// data view, with the union of the key values as the new key values. For each data view, the value in the output column is the value
/// corresponding to the key value in the original column.
///
@@ -993,7 +993,7 @@ private static List GetMetricNames(IChannel ch, ISchema schema, IRow row
ch.Assert(Utils.Size(vBufferGetters) == schema.ColumnCount);
// Get the names of the metrics. For R8 valued columns the metric name is the column name. For R8 vector valued columns
- // the names of the metrics are the column name, followed by the slot name if it exists, or "Label_i" if it doesn't.
+ // the names of the metrics are the column name, followed by the slot name if it exists, or "Label_i" if it doesn't.
VBuffer names = default(VBuffer);
int metricCount = 0;
var metricNames = new List();
@@ -1326,7 +1326,7 @@ private static void AddScalarColumn(this ArrayDataViewBuilder dvBldr, ISchema sc
}
///
- /// Takes a data view containing one or more rows of metrics, and returns a data view containing additional
+ /// Takes a data view containing one or more rows of metrics, and returns a data view containing additional
/// rows with the average and the standard deviation of the metrics in the input data view.
///
public static IDataView CombineFoldMetricsDataViews(IHostEnvironment env, IDataView data, int numFolds)
@@ -1509,8 +1509,8 @@ private static string GetOverallMetricsAsString(double[] sumMetrics, double[] su
}
// This method returns a string representation of a set of metrics. If there are stratification columns, it looks for columns named
- // StratCol and StratVal, and outputs the metrics in the rows with NA in the StratCol column. If weighted is true, it looks
- // for a DvBool column named "IsWeighted" and outputs the metrics in the rows with a value of true in that column.
+ // StratCol and StratVal, and outputs the metrics in the rows with NA in the StratCol column. If weighted is true, it looks
+ // for a DvBool column named "IsWeighted" and outputs the metrics in the rows with a value of true in that column.
// If nonAveragedCols is non-null, it computes the average and standard deviation over all the relevant rows and populates
// nonAveragedCols with columns that are either hidden, or are not of a type that we can display (i.e., either a numeric column,
// or a known length vector of doubles).
@@ -1749,7 +1749,7 @@ public static class MetricKinds
{
///
/// This data view contains the confusion matrix for N-class classification. It has N rows, and each row has
- /// the following columns:
+ /// the following columns:
/// * Count (vector indicating how many examples of this class were predicted as each one of the classes). This column
/// should have metadata containing the class names.
/// * (Optional) Weight (vector with the total weight of the examples of this class that were predicted as each one of the classes).
diff --git a/src/Microsoft.ML.Data/Evaluators/MamlEvaluator.cs b/src/Microsoft.ML.Data/Evaluators/MamlEvaluator.cs
index 1a5a2177f3..2af1b54d92 100644
--- a/src/Microsoft.ML.Data/Evaluators/MamlEvaluator.cs
+++ b/src/Microsoft.ML.Data/Evaluators/MamlEvaluator.cs
@@ -10,10 +10,10 @@
namespace Microsoft.ML.Runtime.Data
{
///
- /// This interface is used by Maml components (the , the
+ /// This interface is used by Maml components (the , the
/// and the to evaluate, print and save the results.
- /// The input to the and the methods
- /// should be assumed to contain only the following column roles: label, group, weight and name. Any other columns needed for
+ /// The input to the and the methods
+ /// should be assumed to contain only the following column roles: label, group, weight and name. Any other columns needed for
/// evaluation should be searched for by name in the .
///
public interface IMamlEvaluator : IEvaluator
diff --git a/src/Microsoft.ML.Data/Evaluators/RankerEvaluator.cs b/src/Microsoft.ML.Data/Evaluators/RankerEvaluator.cs
index cdf3f9c57e..616cff8394 100644
--- a/src/Microsoft.ML.Data/Evaluators/RankerEvaluator.cs
+++ b/src/Microsoft.ML.Data/Evaluators/RankerEvaluator.cs
@@ -48,7 +48,7 @@ public sealed class Arguments
public const string MaxDcg = "MaxDCG";
///
- /// The ranking evaluator outputs a data view by this name, which contains metrics aggregated per group.
+ /// The ranking evaluator outputs a data view by this name, which contains metrics aggregated per group.
/// It contains four columns: GroupId, NDCG, DCG and MaxDCG. Each row in the data view corresponds to one
/// group in the scored data.
///
diff --git a/src/Microsoft.ML.Data/Model/Onnx/OnnxContext.cs b/src/Microsoft.ML.Data/Model/Onnx/OnnxContext.cs
index bdef784b29..230f2600a3 100644
--- a/src/Microsoft.ML.Data/Model/Onnx/OnnxContext.cs
+++ b/src/Microsoft.ML.Data/Model/Onnx/OnnxContext.cs
@@ -73,7 +73,7 @@ public abstract class OnnxContext
public abstract string AddIntermediateVariable(ColumnType type, string colName, bool skip = false);
///
- /// Creates an ONNX node
+ /// Creates an ONNX node
///
/// The name of the ONNX operator to apply
/// The names of the variables as inputs
diff --git a/src/Microsoft.ML.Data/Model/Pfa/BoundPfaContext.cs b/src/Microsoft.ML.Data/Model/Pfa/BoundPfaContext.cs
index d0923a9962..dfd5ef55fb 100644
--- a/src/Microsoft.ML.Data/Model/Pfa/BoundPfaContext.cs
+++ b/src/Microsoft.ML.Data/Model/Pfa/BoundPfaContext.cs
@@ -33,7 +33,7 @@ public sealed class BoundPfaContext
///
private readonly Dictionary _nameToVarName;
///
- /// This contains a map of those names in
+ /// This contains a map of those names in
///
private readonly HashSet _unavailable;
diff --git a/src/Microsoft.ML.Data/Model/Pfa/PfaContext.cs b/src/Microsoft.ML.Data/Model/Pfa/PfaContext.cs
index 55122535d4..c0996beea1 100644
--- a/src/Microsoft.ML.Data/Model/Pfa/PfaContext.cs
+++ b/src/Microsoft.ML.Data/Model/Pfa/PfaContext.cs
@@ -215,7 +215,7 @@ public static JObject CreateFuncBlock(JArray prms, JToken returnType, JToken doB
/// declaration. So, if you use a record type three times, that means one of the three usages must be
/// accompanied by a full type declaration, whereas the other two can just then identify it by name.
/// This is extremely silly, but there you go.
- ///
+ ///
/// Anyway: this will attempt to add a type to the list of registered types. If it returns true
/// then the caller is responsible, then, for ensuring that their PFA code they are generating contains
/// not only a reference of the type, but a declaration of the type. If however this returns false
diff --git a/src/Microsoft.ML.Data/Model/Repository.cs b/src/Microsoft.ML.Data/Model/Repository.cs
index b19fbc8eba..eb665f1bfc 100644
--- a/src/Microsoft.ML.Data/Model/Repository.cs
+++ b/src/Microsoft.ML.Data/Model/Repository.cs
@@ -231,7 +231,7 @@ protected void RemoveEntry(Entry ent)
///
/// When building paths to our local file system, we want to force both forward and backward slashes
/// to the system directory separator character. We do this for cases where we either used Windows-specific
- /// path building logic, or concatenated filesystem paths with zip archive entries on Linux.
+ /// path building logic, or concatenated filesystem paths with zip archive entries on Linux.
///
private static string NormalizeForFileSystem(string path) =>
path?.Replace('/', Path.DirectorySeparatorChar).Replace('\\', Path.DirectorySeparatorChar);
diff --git a/src/Microsoft.ML.Data/Prediction/Calibrator.cs b/src/Microsoft.ML.Data/Prediction/Calibrator.cs
index 895bf92273..237afb400e 100644
--- a/src/Microsoft.ML.Data/Prediction/Calibrator.cs
+++ b/src/Microsoft.ML.Data/Prediction/Calibrator.cs
@@ -111,7 +111,7 @@ public interface ICalibratorTrainer
///
public interface ICalibrator
{
- /// Given a classifier output, produce the probability
+ /// Given a classifier output, produce the probability
Float PredictProbability(Float output);
/// Get the summary of current calibrator settings
@@ -745,7 +745,7 @@ private static bool NeedCalibration(IHostEnvironment env, IChannel ch, ICalibrat
/// The trainer used to train the predictor.
/// The predictor that needs calibration.
/// The examples to used for calibrator training.
- /// The original predictor, if no calibration is needed,
+ /// The original predictor, if no calibration is needed,
/// or a metapredictor that wraps the original predictor and the newly trained calibrator.
public static IPredictor TrainCalibratorIfNeeded(IHostEnvironment env, IChannel ch, ICalibratorTrainer calibrator,
int maxRows, ITrainer trainer, IPredictor predictor, RoleMappedData data)
@@ -771,7 +771,7 @@ public static IPredictor TrainCalibratorIfNeeded(IHostEnvironment env, IChannel
/// The maximum rows to use for calibrator training.
/// The predictor that needs calibration.
/// The examples to used for calibrator training.
- /// The original predictor, if no calibration is needed,
+ /// The original predictor, if no calibration is needed,
/// or a metapredictor that wraps the original predictor and the newly trained calibrator.
public static IPredictor TrainCalibrator(IHostEnvironment env, IChannel ch, ICalibratorTrainer caliTrainer,
int maxRows, IPredictor predictor, RoleMappedData data)
diff --git a/src/Microsoft.ML.Data/Scorers/BinaryClassifierScorer.cs b/src/Microsoft.ML.Data/Scorers/BinaryClassifierScorer.cs
index 598db3ab46..6da402431d 100644
--- a/src/Microsoft.ML.Data/Scorers/BinaryClassifierScorer.cs
+++ b/src/Microsoft.ML.Data/Scorers/BinaryClassifierScorer.cs
@@ -201,7 +201,7 @@ public override void SaveAsOnnx(OnnxContext ctx)
for (int iinfo = 0; iinfo < Bindings.InfoCount; ++iinfo)
outColumnNames[iinfo] = Bindings.GetColumnName(Bindings.MapIinfoToCol(iinfo));
- //Check if "Probability" column was generated by the base class, only then
+ //Check if "Probability" column was generated by the base class, only then
//label can be predicted.
if (Bindings.InfoCount >= 3 && ctx.ContainsColumn(outColumnNames[2]))
{
diff --git a/src/Microsoft.ML.Data/Scorers/GenericScorer.cs b/src/Microsoft.ML.Data/Scorers/GenericScorer.cs
index a407873bac..41c12e94ed 100644
--- a/src/Microsoft.ML.Data/Scorers/GenericScorer.cs
+++ b/src/Microsoft.ML.Data/Scorers/GenericScorer.cs
@@ -20,7 +20,7 @@ namespace Microsoft.ML.Runtime.Data
{
///
/// This class is a scorer that passes through all the ISchemaBound columns without adding any "derived columns".
- /// It also passes through all metadata (except for possibly changing the score column kind), and adds the
+ /// It also passes through all metadata (except for possibly changing the score column kind), and adds the
/// score set id metadata.
///
diff --git a/src/Microsoft.ML.Data/Scorers/ScoreMapperSchema.cs b/src/Microsoft.ML.Data/Scorers/ScoreMapperSchema.cs
index ddb05e3686..0f115bb2f0 100644
--- a/src/Microsoft.ML.Data/Scorers/ScoreMapperSchema.cs
+++ b/src/Microsoft.ML.Data/Scorers/ScoreMapperSchema.cs
@@ -251,7 +251,7 @@ public SequencePredictorSchema(ColumnType type, ref VBuffer keyNames, st
Contracts.CheckParam(keyNames.Length == type.ItemType.KeyCount,
nameof(keyNames), "keyNames length must match type's key count");
// REVIEW: Assuming the caller takes some care, it seems
- // like we can get away with
+ // like we can get away with
_keyNames = keyNames;
_keyNamesType = new VectorType(TextType.Instance, keyNames.Length);
_getKeyNames = GetKeyNames;
diff --git a/src/Microsoft.ML.Data/Training/EarlyStoppingCriteria.cs b/src/Microsoft.ML.Data/Training/EarlyStoppingCriteria.cs
index 285db8bfe1..1da5a5562a 100644
--- a/src/Microsoft.ML.Data/Training/EarlyStoppingCriteria.cs
+++ b/src/Microsoft.ML.Data/Training/EarlyStoppingCriteria.cs
@@ -123,7 +123,7 @@ public override bool CheckScore(Float validationScore, Float trainingScore, out
}
// For the detail of the following rules, see the following paper.
- // Lodwich, Aleksander, Yves Rangoni, and Thomas Breuel. "Evaluation of robustness and performance of early stopping rules with multi layer perceptrons."
+ // Lodwich, Aleksander, Yves Rangoni, and Thomas Breuel. "Evaluation of robustness and performance of early stopping rules with multi layer perceptrons."
// Neural Networks, 2009. IJCNN 2009. International Joint Conference on. IEEE, 2009.
public abstract class MovingWindowEarlyStoppingCriterion : EarlyStoppingCriterion
diff --git a/src/Microsoft.ML.Data/Training/TrainerUtils.cs b/src/Microsoft.ML.Data/Training/TrainerUtils.cs
index b2032bfc38..33d3d1490d 100644
--- a/src/Microsoft.ML.Data/Training/TrainerUtils.cs
+++ b/src/Microsoft.ML.Data/Training/TrainerUtils.cs
@@ -400,10 +400,10 @@ protected static IRowCursor CreateCursor(RoleMappedData data, CursOpt opt, IRand
/// delegate of the cursor, indicating what additional options should be specified on subsequent
/// passes over the data. The base implementation checks if any rows were skipped, and if none were
/// skipped, it signals the context that it needn't bother with any filtering checks.
- ///
+ ///
/// Because the result will be "or"-red, a perfectly acceptable implementation is that this
/// return the default , in which case the flags will not ever change.
- ///
+ ///
/// If the cursor was created with a signal delegate, the return value of this method will be sent
/// to that delegate.
///
diff --git a/src/Microsoft.ML.Data/Transforms/ColumnBindingsBase.cs b/src/Microsoft.ML.Data/Transforms/ColumnBindingsBase.cs
index 58eee5430b..2347d2c679 100644
--- a/src/Microsoft.ML.Data/Transforms/ColumnBindingsBase.cs
+++ b/src/Microsoft.ML.Data/Transforms/ColumnBindingsBase.cs
@@ -324,17 +324,17 @@ protected ColumnBindingsBase(ISchema input, bool user, params string[] names)
if (string.IsNullOrWhiteSpace(name))
{
throw user ?
-#pragma warning disable TLC_ContractsNameUsesNameof // Unfortunately, there is no base class for the columns bindings.
+#pragma warning disable MSML_ContractsNameUsesNameof // Unfortunately, there is no base class for the columns bindings.
Contracts.ExceptUserArg(standardColumnArgName, "New column needs a name") :
-#pragma warning restore TLC_ContractsNameUsesNameof
+#pragma warning restore MSML_ContractsNameUsesNameof
Contracts.ExceptDecode("New column needs a name");
}
if (_nameToInfoIndex.ContainsKey(name))
{
throw user ?
-#pragma warning disable TLC_ContractsNameUsesNameof // Unfortunately, there is no base class for the columns bindings.
+#pragma warning disable MSML_ContractsNameUsesNameof // Unfortunately, there is no base class for the columns bindings.
Contracts.ExceptUserArg(standardColumnArgName, "New column '{0}' specified multiple times", name) :
-#pragma warning restore TLC_ContractsNameUsesNameof
+#pragma warning restore MSML_ContractsNameUsesNameof
Contracts.ExceptDecode("New column '{0}' specified multiple times", name);
}
_nameToInfoIndex.Add(name, iinfo);
@@ -686,10 +686,10 @@ protected ManyToOneColumnBindingsBase(ManyToOneColumn[] column, ISchema input, F
for (int j = 0; j < src.Length; j++)
{
Contracts.CheckUserArg(!string.IsNullOrWhiteSpace(src[j]), nameof(ManyToOneColumn.Source));
-#pragma warning disable TLC_ContractsNameUsesNameof // Unfortunately, there is no base class for the columns bindings.
+#pragma warning disable MSML_ContractsNameUsesNameof // Unfortunately, there is no base class for the columns bindings.
if (!input.TryGetColumnIndex(src[j], out srcIndices[j]))
throw Contracts.ExceptUserArg(standardColumnArgName, "Source column '{0}' not found", src[j]);
-#pragma warning restore TLC_ContractsNameUsesNameof
+#pragma warning restore MSML_ContractsNameUsesNameof
srcTypes[j] = input.GetColumnType(srcIndices[j]);
var size = srcTypes[j].ValueCount;
srcSize = size == 0 ? null : checked(srcSize + size);
@@ -700,10 +700,10 @@ protected ManyToOneColumnBindingsBase(ManyToOneColumn[] column, ISchema input, F
string reason = testTypes(srcTypes);
if (reason != null)
{
-#pragma warning disable TLC_ContractsNameUsesNameof // Unfortunately, there is no base class for the columns bindings.
+#pragma warning disable MSML_ContractsNameUsesNameof // Unfortunately, there is no base class for the columns bindings.
throw Contracts.ExceptUserArg(standardColumnArgName, "Column '{0}' has invalid source types: {1}. Source types: '{2}'.",
item.Name, reason, string.Join(", ", srcTypes.Select(type => type.ToString())));
-#pragma warning restore TLC_ContractsNameUsesNameof
+#pragma warning restore MSML_ContractsNameUsesNameof
}
}
Infos[i] = new ColInfo(srcSize.GetValueOrDefault(), srcIndices, srcTypes);
@@ -861,7 +861,7 @@ public Func GetDependencies(Func predicate)
}
///
- /// Parsing utilities for converting between transform column argument objects and
+ /// Parsing utilities for converting between transform column argument objects and
/// command line representations.
///
public static class ColumnParsingUtils
diff --git a/src/Microsoft.ML.Data/Transforms/ConcatTransform.cs b/src/Microsoft.ML.Data/Transforms/ConcatTransform.cs
index c12dd8fad5..b2024cc18c 100644
--- a/src/Microsoft.ML.Data/Transforms/ConcatTransform.cs
+++ b/src/Microsoft.ML.Data/Transforms/ConcatTransform.cs
@@ -55,7 +55,7 @@ public sealed class TaggedColumn
public string Name;
// The tag here (the key of the KeyValuePair) is the string that will be the prefix of the slot name
- // in the output column. For non-vector columns, the slot name will be either the column name or the
+ // in the output column. For non-vector columns, the slot name will be either the column name or the
// tag if it is non empty. For vector columns, the slot names will be 'ColumnName.SlotName' if the
// tag is empty, 'Tag.SlotName' if tag is non empty, and simply the slot name if tag is non empty
// and equal to the column name.
diff --git a/src/Microsoft.ML.Data/Transforms/DropSlotsTransform.cs b/src/Microsoft.ML.Data/Transforms/DropSlotsTransform.cs
index 9a40f404ea..230cfbe680 100644
--- a/src/Microsoft.ML.Data/Transforms/DropSlotsTransform.cs
+++ b/src/Microsoft.ML.Data/Transforms/DropSlotsTransform.cs
@@ -313,7 +313,7 @@ private void GetSlotsMinMax(Column col, out int[] slotsMin, out int[] slotsMax)
slotsMin[j] = range.Min;
// There are two reasons for setting the max to int.MaxValue - 1:
// 1. max is an index, so it has to be strictly less than int.MaxValue.
- // 2. to prevent overflows when adding 1 to it.
+ // 2. to prevent overflows when adding 1 to it.
slotsMax[j] = range.Max ?? int.MaxValue - 1;
}
Array.Sort(slotsMin, slotsMax);
@@ -473,7 +473,7 @@ private void GetCategoricalSlotRangesCore(int iinfo, int[] slotsMin, int[] slots
// Six possible ways a drop slot range interacts with categorical slots range.
//
- // +--------------Drop-------------+
+ // +--------------Drop-------------+
// | |
//
// +---Drop---+ +---Drop---+ +---Drop---+
diff --git a/src/Microsoft.ML.Data/Transforms/GenerateNumberTransform.cs b/src/Microsoft.ML.Data/Transforms/GenerateNumberTransform.cs
index 713f85f9df..cacd681141 100644
--- a/src/Microsoft.ML.Data/Transforms/GenerateNumberTransform.cs
+++ b/src/Microsoft.ML.Data/Transforms/GenerateNumberTransform.cs
@@ -24,9 +24,9 @@
namespace Microsoft.ML.Runtime.Data
{
///
- /// This transform adds columns containing either random numbers distributed
+ /// This transform adds columns containing either random numbers distributed
/// uniformly between 0 and 1 or an auto-incremented integer starting at zero.
- /// It will be used in conjunction with a filter transform to create random
+ /// It will be used in conjunction with a filter transform to create random
/// partitions of the data, used in cross validation.
///
public sealed class GenerateNumberTransform : RowToRowTransformBase
diff --git a/src/Microsoft.ML.Data/Transforms/HashTransform.cs b/src/Microsoft.ML.Data/Transforms/HashTransform.cs
index 23ba5592b7..0519428284 100644
--- a/src/Microsoft.ML.Data/Transforms/HashTransform.cs
+++ b/src/Microsoft.ML.Data/Transforms/HashTransform.cs
@@ -25,7 +25,7 @@ namespace Microsoft.ML.Runtime.Data
///
/// This transform can hash either single valued columns or vector columns. For vector columns,
- /// it hashes each slot separately.
+ /// it hashes each slot separately.
/// It can hash either text values or key values.
///
public sealed class HashTransform : OneToOneTransformBase, ITransformTemplate
diff --git a/src/Microsoft.ML.Data/Transforms/InvertHashUtils.cs b/src/Microsoft.ML.Data/Transforms/InvertHashUtils.cs
index 7a7e8fafda..d615b96894 100644
--- a/src/Microsoft.ML.Data/Transforms/InvertHashUtils.cs
+++ b/src/Microsoft.ML.Data/Transforms/InvertHashUtils.cs
@@ -265,7 +265,7 @@ public VBuffer GetMetadata()
public void Add(int dstSlot, ValueGetter getter, ref T key)
{
- // REVIEW: I only call the getter if I determine I have to, but
+ // REVIEW: I only call the getter if I determine I have to, but
// at the cost of passing along this getter and ref argument (as opposed
// to just the argument). Is this really appropriate or helpful?
Contracts.Assert(0 <= dstSlot && dstSlot < _slots);
diff --git a/src/Microsoft.ML.Data/Transforms/KeyToValueTransform.cs b/src/Microsoft.ML.Data/Transforms/KeyToValueTransform.cs
index 7c1fa19c10..997fa22d03 100644
--- a/src/Microsoft.ML.Data/Transforms/KeyToValueTransform.cs
+++ b/src/Microsoft.ML.Data/Transforms/KeyToValueTransform.cs
@@ -85,7 +85,6 @@ public KeyToValueTransform(IHostEnvironment env, IDataView input, string name, s
{
}
-
///
/// Public constructor corresponding to SignatureDataTransform.
///
diff --git a/src/Microsoft.ML.Data/Transforms/NormalizeColumnDbl.cs b/src/Microsoft.ML.Data/Transforms/NormalizeColumnDbl.cs
index e577b9370e..6cad82c127 100644
--- a/src/Microsoft.ML.Data/Transforms/NormalizeColumnDbl.cs
+++ b/src/Microsoft.ML.Data/Transforms/NormalizeColumnDbl.cs
@@ -542,7 +542,7 @@ public ImplOne(IHost host, TFloat scale, TFloat offset)
{
}
- public new static ImplOne Create(ModelLoadContext ctx, IHost host, ColumnType typeSrc)
+ public static new ImplOne Create(ModelLoadContext ctx, IHost host, ColumnType typeSrc)
{
host.Check(typeSrc.RawType == typeof(TFloat), "The column type must be R8.");
List nz = null;
@@ -605,7 +605,7 @@ public ImplVec(IHost host, TFloat[] scale, TFloat[] offset, int[] indicesNonZero
{
}
- public new static ImplVec Create(ModelLoadContext ctx, IHost host, ColumnType typeSrc)
+ public static new ImplVec Create(ModelLoadContext ctx, IHost host, ColumnType typeSrc)
{
host.Check(typeSrc.ItemType.RawType == typeof(TFloat), "The column type must be vector of R8.");
int cv = Math.Max(1, typeSrc.VectorSize);
@@ -867,7 +867,7 @@ public ImplOne(IHost host, TFloat mean, TFloat stddev, bool useLog)
{
}
- public new static ImplOne Create(ModelLoadContext ctx, IHost host, ColumnType typeSrc)
+ public static new ImplOne Create(ModelLoadContext ctx, IHost host, ColumnType typeSrc)
{
host.Check(typeSrc.RawType == typeof(TFloat), "The column type must be R8.");
host.CheckValue(ctx, nameof(ctx));
@@ -932,7 +932,7 @@ public ImplVec(IHost host, TFloat[] mean, TFloat[] stddev, bool useLog)
{
}
- public new static ImplVec Create(ModelLoadContext ctx, IHost host, ColumnType typeSrc)
+ public static new ImplVec Create(ModelLoadContext ctx, IHost host, ColumnType typeSrc)
{
host.Check(typeSrc.ItemType.RawType == typeof(TFloat), "The column type must be vector of R8.");
int cv = Math.Max(1, typeSrc.VectorSize);
@@ -1051,7 +1051,7 @@ public ImplOne(IHost host, TFloat[] binUpperBounds, bool fixZero)
Host.Assert(0 <= _offset & _offset <= 1);
}
- public new static ImplOne Create(ModelLoadContext ctx, IHost host, ColumnType typeSrc)
+ public static new ImplOne Create(ModelLoadContext ctx, IHost host, ColumnType typeSrc)
{
host.Check(typeSrc.RawType == typeof(TFloat), "The column type must be R8.");
host.CheckValue(ctx, nameof(ctx));
@@ -1133,7 +1133,7 @@ public ImplVec(IHost host, TFloat[][] binUpperBounds, bool fixZero)
}
}
- public new static ImplVec Create(ModelLoadContext ctx, IHost host, ColumnType typeSrc)
+ public static new ImplVec Create(ModelLoadContext ctx, IHost host, ColumnType typeSrc)
{
host.Check(typeSrc.ItemType.RawType == typeof(TFloat), "The column type must be vector of R8.");
int cv = Math.Max(1, typeSrc.VectorSize);
@@ -1280,7 +1280,7 @@ private static void ComputeScaleAndOffset(TFloat max, TFloat min, out TFloat sca
// but infinities and NaN to NaN.
// REVIEW: If min <= 0 and max >= 0, then why not fix zero for this slot and simply scale by 1 / max(abs(..))?
// We could even be more aggressive about it, and fix zero if 0 < min < max <= 2 * min.
- // Then the common case where features are in the range [1, N] (and integer valued) wouldn't subtract 1 every time....
+ // Then the common case where features are in the range [1, N] (and integer valued) wouldn't subtract 1 every time....
if (!(max > min))
scale = offset = 0;
else if ((scale = 1 / (max - min)) == 0)
@@ -1302,7 +1302,7 @@ private static void ComputeScaleAndOffsetFixZero(TFloat max, TFloat min, out TFl
// In the case where max <= min, the slot contains no useful information (since it is either constant, or
// is all NaNs, or has no rows), so we force it to zero.
// Note that setting scale to zero effectively maps finite values to zero,
- // but infinities and NaN to NaN.
+ // but infinities and NaN to NaN.
offset = 0;
if (!(max > min))
scale = 0;
@@ -1321,7 +1321,7 @@ public static void ComputeScaleAndOffset(Double mean, Double stddev, out TFloat
// In the case where stdev==0, the slot contains no useful information (since it is constant),
// so we force it to zero. Note that setting scale to zero effectively maps finite values to zero,
- // but infinities and NaN to NaN.
+ // but infinities and NaN to NaN.
if (stddev == 0)
scale = offset = 0;
else if ((scale = 1 / (TFloat)stddev) == 0)
@@ -1338,7 +1338,7 @@ public static void ComputeScaleAndOffsetFixZero(Double mean, Double meanSquaredE
// In the case where stdev==0, the slot contains no useful information (since it is constant),
// so we force it to zero. Note that setting scale to zero effectively maps finite values to zero,
- // but infinities and NaN to NaN.
+ // but infinities and NaN to NaN.
offset = 0;
if (meanSquaredError == 0)
scale = 0;
diff --git a/src/Microsoft.ML.Data/Transforms/NormalizeColumnSng.cs b/src/Microsoft.ML.Data/Transforms/NormalizeColumnSng.cs
index 4c6e1fb011..af94f31454 100644
--- a/src/Microsoft.ML.Data/Transforms/NormalizeColumnSng.cs
+++ b/src/Microsoft.ML.Data/Transforms/NormalizeColumnSng.cs
@@ -542,7 +542,7 @@ public ImplOne(IHost host, TFloat scale, TFloat offset)
{
}
- public new static ImplOne Create(ModelLoadContext ctx, IHost host, ColumnType typeSrc)
+ public static new ImplOne Create(ModelLoadContext ctx, IHost host, ColumnType typeSrc)
{
host.Check(typeSrc.RawType == typeof(TFloat), "The column type must be R4.");
List nz = null;
@@ -605,7 +605,7 @@ public ImplVec(IHost host, TFloat[] scale, TFloat[] offset, int[] indicesNonZero
{
}
- public new static ImplVec Create(ModelLoadContext ctx, IHost host, ColumnType typeSrc)
+ public static new ImplVec Create(ModelLoadContext ctx, IHost host, ColumnType typeSrc)
{
host.Check(typeSrc.ItemType.RawType == typeof(TFloat), "The column type must be vector of R4.");
int cv = Math.Max(1, typeSrc.VectorSize);
@@ -869,7 +869,7 @@ public ImplOne(IHost host, TFloat mean, TFloat stddev, bool useLog)
{
}
- public new static ImplOne Create(ModelLoadContext ctx, IHost host, ColumnType typeSrc)
+ public static new ImplOne Create(ModelLoadContext ctx, IHost host, ColumnType typeSrc)
{
host.Check(typeSrc.RawType == typeof(TFloat), "The column type must be R4.");
host.CheckValue(ctx, nameof(ctx));
@@ -934,7 +934,7 @@ public ImplVec(IHost host, TFloat[] mean, TFloat[] stddev, bool useLog)
{
}
- public new static ImplVec Create(ModelLoadContext ctx, IHost host, ColumnType typeSrc)
+ public static new ImplVec Create(ModelLoadContext ctx, IHost host, ColumnType typeSrc)
{
host.Check(typeSrc.ItemType.RawType == typeof(TFloat), "The column type must be vector of R4.");
int cv = Math.Max(1, typeSrc.VectorSize);
@@ -1053,7 +1053,7 @@ public ImplOne(IHost host, TFloat[] binUpperBounds, bool fixZero)
Host.Assert(0 <= _offset & _offset <= 1);
}
- public new static ImplOne Create(ModelLoadContext ctx, IHost host, ColumnType typeSrc)
+ public static new ImplOne Create(ModelLoadContext ctx, IHost host, ColumnType typeSrc)
{
host.Check(typeSrc.RawType == typeof(TFloat), "The column type must be R4.");
host.CheckValue(ctx, nameof(ctx));
@@ -1135,7 +1135,7 @@ public ImplVec(IHost host, TFloat[][] binUpperBounds, bool fixZero)
}
}
- public new static ImplVec Create(ModelLoadContext ctx, IHost host, ColumnType typeSrc)
+ public static new ImplVec Create(ModelLoadContext ctx, IHost host, ColumnType typeSrc)
{
host.Check(typeSrc.ItemType.RawType == typeof(TFloat), "The column type must be vector of R4.");
int cv = Math.Max(1, typeSrc.VectorSize);
@@ -1282,7 +1282,7 @@ private static void ComputeScaleAndOffset(TFloat max, TFloat min, out TFloat sca
// but infinities and NaN to NaN.
// REVIEW: If min <= 0 and max >= 0, then why not fix zero for this slot and simply scale by 1 / max(abs(..))?
// We could even be more aggressive about it, and fix zero if 0 < min < max <= 2 * min.
- // Then the common case where features are in the range [1, N] (and integer valued) wouldn't subtract 1 every time....
+ // Then the common case where features are in the range [1, N] (and integer valued) wouldn't subtract 1 every time....
if (!(max > min))
scale = offset = 0;
else if ((scale = 1 / (max - min)) == 0)
@@ -1304,7 +1304,7 @@ private static void ComputeScaleAndOffsetFixZero(TFloat max, TFloat min, out TFl
// In the case where max <= min, the slot contains no useful information (since it is either constant, or
// is all NaNs, or has no rows), so we force it to zero.
// Note that setting scale to zero effectively maps finite values to zero,
- // but infinities and NaN to NaN.
+ // but infinities and NaN to NaN.
offset = 0;
if (!(max > min))
scale = 0;
@@ -1323,7 +1323,7 @@ public static void ComputeScaleAndOffset(Double mean, Double stddev, out TFloat
// In the case where stdev==0, the slot contains no useful information (since it is constant),
// so we force it to zero. Note that setting scale to zero effectively maps finite values to zero,
- // but infinities and NaN to NaN.
+ // but infinities and NaN to NaN.
if (stddev == 0)
scale = offset = 0;
else if ((scale = 1 / (TFloat)stddev) == 0)
@@ -1340,7 +1340,7 @@ public static void ComputeScaleAndOffsetFixZero(Double mean, Double meanSquaredE
// In the case where stdev==0, the slot contains no useful information (since it is constant),
// so we force it to zero. Note that setting scale to zero effectively maps finite values to zero,
- // but infinities and NaN to NaN.
+ // but infinities and NaN to NaN.
offset = 0;
if (meanSquaredError == 0)
scale = 0;
diff --git a/src/Microsoft.ML.Data/Transforms/NormalizeTransform.cs b/src/Microsoft.ML.Data/Transforms/NormalizeTransform.cs
index 7a4738d5e4..bf9d77ed49 100644
--- a/src/Microsoft.ML.Data/Transforms/NormalizeTransform.cs
+++ b/src/Microsoft.ML.Data/Transforms/NormalizeTransform.cs
@@ -170,7 +170,7 @@ private NormalizeTransform(IHost host, ArgumentsBase args, IDataView input,
while (cursor.MoveNext())
{
// If the row has bad values, the good values are still being used for training.
- // The comparisons in the code below are arranged so that NaNs in the input are not recorded.
+ // The comparisons in the code below are arranged so that NaNs in the input are not recorded.
// REVIEW: Should infinities and/or NaNs be filtered before the normalization? Should we not record infinities for min/max?
// Currently, infinities are recorded and will result in zero scale which in turn will result in NaN output for infinity input.
bool any = false;
@@ -241,7 +241,7 @@ private NormalizeTransform(IHost host, ModelLoadContext ctx, IDataView input)
for (int iinfo = 0; iinfo < Infos.Length; iinfo++)
{
var typeSrc = Infos[iinfo].TypeSrc;
- // REVIEW: this check (was even an assert) here is too late. Apparently, no-one tests compatibility
+ // REVIEW: this check (was even an assert) here is too late. Apparently, no-one tests compatibility
// of the types at deserialization (aka re-application), which is a bug.
if (typeSrc.ValueCount == 0)
throw Host.Except("Column '{0}' is a vector of variable size, which is not supported for normalizers", Infos[iinfo].Name);
diff --git a/src/Microsoft.ML.Data/Transforms/PerGroupTransformBase.cs b/src/Microsoft.ML.Data/Transforms/PerGroupTransformBase.cs
index 35f37d39a8..7b42008b15 100644
--- a/src/Microsoft.ML.Data/Transforms/PerGroupTransformBase.cs
+++ b/src/Microsoft.ML.Data/Transforms/PerGroupTransformBase.cs
@@ -308,7 +308,7 @@ protected override bool MoveNextCore()
if (!_newGroupInInputCursorDel())
return true;
- // If this is the first step, we need to move next on _groupCursor. Otherwise, the position of _groupCursor is
+ // If this is the first step, we need to move next on _groupCursor. Otherwise, the position of _groupCursor is
// at the start of the next group.
if (_groupCursor.State == CursorState.NotStarted)
{
diff --git a/src/Microsoft.ML.Data/Transforms/RangeFilter.cs b/src/Microsoft.ML.Data/Transforms/RangeFilter.cs
index 589a635aff..142779dee2 100644
--- a/src/Microsoft.ML.Data/Transforms/RangeFilter.cs
+++ b/src/Microsoft.ML.Data/Transforms/RangeFilter.cs
@@ -184,9 +184,9 @@ public override void Save(ModelSaveContext ctx)
// int: id of column name
// double: min
// double: max
- // byte: complement
- // byte: includeMin
- // byte: includeMax
+ // byte: complement
+ // byte: includeMin
+ // byte: includeMax
ctx.Writer.Write(sizeof(Float));
ctx.SaveNonEmptyString(Source.Schema.GetColumnName(_index));
Host.Assert(_min < _max);
diff --git a/src/Microsoft.ML.Data/Transforms/ShuffleTransform.cs b/src/Microsoft.ML.Data/Transforms/ShuffleTransform.cs
index 5080208335..3940bbe979 100644
--- a/src/Microsoft.ML.Data/Transforms/ShuffleTransform.cs
+++ b/src/Microsoft.ML.Data/Transforms/ShuffleTransform.cs
@@ -260,7 +260,7 @@ protected override IRowCursor GetRowCursorCore(Func predicate, IRando
// The desired functionality is to support some permutations of whether we allow
// shuffling at the source level, or not.
- //
+ //
// Pool | Source | Options
// -----------+----------+--------
// Randonly | Never | poolOnly+
@@ -301,14 +301,14 @@ public override IRowCursor[] GetRowCursorSet(out IRowCursorConsolidator consolid
/// over a pool of size P. Logically, externally, the cursor acts as if you have this pool
/// P and whenever you randomly sample and yield a row from it, that row is then discarded
/// and replaced with the next row from the input source cursor.
- ///
+ ///
/// It would also be possible to implement in a way that cleaves closely to this logical
/// interpretation, but this would be inefficient. We instead have a buffer of larger size
/// P+B. A consumer (running presumably in the main thread) sampling and fetching items and a
/// producer (running in a task, which may be running in a different thread) filling the buffer
/// with items to sample, utilizing this extra space to enable an efficient possibly
/// multithreaded scheme.
- ///
+ ///
/// The consumer, for its part, at any given time "owns" a contiguous portion of this buffer.
/// (A contiguous portion of this buffer we consider to be able to wrap around, from the end
/// to the beginning. The buffer is accessed in a "circular" fashion.) Consider that this portion
@@ -319,18 +319,18 @@ public override IRowCursor[] GetRowCursorSet(out IRowCursorConsolidator consolid
/// rows ready to be sampled in future iterations, but that we are not sampling yet (in order
/// to behave equivalently to the simple logical model of at any given time sampling P items).
/// The producer owns the complement of the portion owned by the consumer.
- ///
+ ///
/// As the cursor progresses, the producer fills in successive items in its portion of the
/// buffer it owns, and passes them off to the consumer (not one item at a time, but rather in
/// batches, to keep down the amount of intertask communication). The consumer in addition to
/// taking ownership of these items, will also periodically pass dead items back to the producer
/// (again, not one dead item at a time, but in batches when the number of dead items reaches
/// a certain threshold).
- ///
+ ///
/// This communication is accomplished using a pair of BufferBlock instances, through which
/// the producer and consumer are notified how many additional items they can take ownership
/// of.
- ///
+ ///
/// As the consumer "selects" a row from the pool of selectable rows each time it moves to
/// the next row, this randomly selected row is considered to be the "first" index, since this
/// makes its subsequent transition to being a dead row much simpler. It would be inefficient to
@@ -338,7 +338,7 @@ public override IRowCursor[] GetRowCursorSet(out IRowCursorConsolidator consolid
/// first, of course, so one rather swaps an index, so that these nicely behavior contiguous
/// circular indices, get mapped in an index within the buffers, through a permutation maintained
/// in the pipeIndices array.
- ///
+ ///
/// The result is something functionally equivalent to but but considerably faster than the
/// simple implementation described in the first paragraph.
///
diff --git a/src/Microsoft.ML.Data/Transforms/SkipTakeFilter.cs b/src/Microsoft.ML.Data/Transforms/SkipTakeFilter.cs
index bfd3522f73..2adb17258e 100644
--- a/src/Microsoft.ML.Data/Transforms/SkipTakeFilter.cs
+++ b/src/Microsoft.ML.Data/Transforms/SkipTakeFilter.cs
@@ -164,7 +164,7 @@ public override void Save(ModelSaveContext ctx)
public override bool CanShuffle { get { return false; } }
///
- /// Returns the computed count of rows remaining after skip and take operation.
+ /// Returns the computed count of rows remaining after skip and take operation.
/// Returns null if count is unknown.
///
public override long? GetRowCount(bool lazy = true)
diff --git a/src/Microsoft.ML.Data/Transforms/TermTransform.cs b/src/Microsoft.ML.Data/Transforms/TermTransform.cs
index 1e48a5d1e9..6eaf48e995 100644
--- a/src/Microsoft.ML.Data/Transforms/TermTransform.cs
+++ b/src/Microsoft.ML.Data/Transforms/TermTransform.cs
@@ -612,10 +612,10 @@ private TermTransform(IHost host, ModelLoadContext ctx, IDataView input)
termMap[i] = TermMap.TextImpl.Create(c, host);
}
});
-#pragma warning disable TLC_NoMessagesForLoadContext // Vaguely useful.
+#pragma warning disable MSML_NoMessagesForLoadContext // Vaguely useful.
if (!b)
throw Host.ExceptDecode("Missing {0} model", dir);
-#pragma warning restore TLC_NoMessagesForLoadContext
+#pragma warning restore MSML_NoMessagesForLoadContext
_termMap = new BoundTermMap[cinfo];
for (int i = 0; i < cinfo; ++i)
_termMap[i] = termMap[i].Bind(this, i);
@@ -719,7 +719,7 @@ protected override bool SaveAsOnnxCore(OnnxContext ctx, int iinfo, ColInfo info,
var node = ctx.CreateNode(opType, srcVariableName, dstVariableName, ctx.GetNodeName(opType));
node.AddAttribute("classes_strings", terms.DenseValues());
node.AddAttribute("default_int64", -1);
- //default_string needs to be an empty string but there is a BUG in Lotus that
+ //default_string needs to be an empty string but there is a BUG in Lotus that
//throws a validation error when default_string is empty. As a work around, set
//default_string to a space.
node.AddAttribute("default_string", " ");
diff --git a/src/Microsoft.ML.Data/Transforms/TermTransformImpl.cs b/src/Microsoft.ML.Data/Transforms/TermTransformImpl.cs
index a81575b9c9..9a43dc5517 100644
--- a/src/Microsoft.ML.Data/Transforms/TermTransformImpl.cs
+++ b/src/Microsoft.ML.Data/Transforms/TermTransformImpl.cs
@@ -447,10 +447,10 @@ private enum MapType : byte
/// type. The input type, whatever it is, must have as its input item
/// type, and will produce either , or a vector type with that output
/// type if the input was a vector.
- ///
+ ///
/// Note that instances of this class can be shared among multiple
/// instances. To associate this with a particular transform, use the method.
- ///
+ ///
/// These are the immutable and serializable analogs to the used in
/// training.
///
diff --git a/src/Microsoft.ML.Data/Transforms/TransformBase.cs b/src/Microsoft.ML.Data/Transforms/TransformBase.cs
index 263a3cf4ca..2d9cedb17b 100644
--- a/src/Microsoft.ML.Data/Transforms/TransformBase.cs
+++ b/src/Microsoft.ML.Data/Transforms/TransformBase.cs
@@ -596,7 +596,7 @@ public void SaveAsOnnx(OnnxContext ctx)
///
/// Called by . Should be implemented by subclasses that return
- /// true from . Will be called
+ /// true from . Will be called
///
/// The context. Can be used to declare cells, access other information,
/// and whatnot. This method should not actually, however, declare the variable corresponding
diff --git a/src/Microsoft.ML.Data/Utilities/ApplyTransformUtils.cs b/src/Microsoft.ML.Data/Utilities/ApplyTransformUtils.cs
index e0a138fc91..7d0695083b 100644
--- a/src/Microsoft.ML.Data/Utilities/ApplyTransformUtils.cs
+++ b/src/Microsoft.ML.Data/Utilities/ApplyTransformUtils.cs
@@ -73,7 +73,7 @@ public static IDataView ApplyAllTransformsToData(IHostEnvironment env, IDataView
// Backtrack the chain until we reach a chain start or a non-transform.
// REVIEW: we 'unwrap' the composite data loader here and step through its pipeline.
- // It's probably more robust to make CompositeDataLoader not even be an IDataView, this
+ // It's probably more robust to make CompositeDataLoader not even be an IDataView, this
// would force the user to do the right thing and unwrap on his end.
var cdl = chain as CompositeDataLoader;
if (cdl != null)
diff --git a/src/Microsoft.ML.Data/Utilities/ModelFileUtils.cs b/src/Microsoft.ML.Data/Utilities/ModelFileUtils.cs
index 900778cd31..5b99b173fa 100644
--- a/src/Microsoft.ML.Data/Utilities/ModelFileUtils.cs
+++ b/src/Microsoft.ML.Data/Utilities/ModelFileUtils.cs
@@ -79,7 +79,7 @@ public static IDataView LoadPipeline(IHostEnvironment env, RepositoryReader rep,
}
///
- /// Loads all transforms from the model stream, applies them sequentially to the provided data, and returns
+ /// Loads all transforms from the model stream, applies them sequentially to the provided data, and returns
/// the resulting data. If there are no transforms in the stream, or if there's no DataLoader stream at all
/// (this can happen if the model is produced by old TL), returns the source data.
/// If the DataLoader stream is invalid, throws.
@@ -101,7 +101,7 @@ public static IDataView LoadTransforms(IHostEnvironment env, IDataView data, Str
}
///
- /// Loads all transforms from the model stream, applies them sequentially to the provided data, and returns
+ /// Loads all transforms from the model stream, applies them sequentially to the provided data, and returns
/// the resulting data. If there are no transforms in the stream, or if there's no DataLoader stream at all
/// (this can happen if the model is produced by old TL), returns the source data.
/// If the DataLoader stream is invalid, throws.
@@ -157,8 +157,8 @@ public static ModelSaveContext GetDataModelSavingContext(RepositoryWriter rep)
}
///
- /// Loads data view (loader and transforms) from if is set to true,
- /// otherwise loads loader only.
+ /// Loads data view (loader and transforms) from if is set to true,
+ /// otherwise loads loader only.
///
public static IDataLoader LoadLoader(IHostEnvironment env, RepositoryReader rep, IMultiStreamSource files, bool loadTransforms)
{
@@ -188,7 +188,7 @@ public static IDataLoader LoadLoader(IHostEnvironment env, RepositoryReader rep,
}
///
- /// REVIEW: consider adding an overload that returns
+ /// REVIEW: consider adding an overload that returns
/// Loads optionally feature names from the repository directory.
/// Returns false iff no stream was found for feature names, iff result is set to null.
///
@@ -342,7 +342,7 @@ public static RoleMappedSchema LoadRoleMappedSchemaOrNull(IHostEnvironment env,
}
///
- /// The RepositoryStreamWrapper is a IMultiStreamSource wrapper of a Stream object in a repository.
+ /// The RepositoryStreamWrapper is a IMultiStreamSource wrapper of a Stream object in a repository.
/// It is used to deserialize RoleMappings.txt from a model zip file.
///
private sealed class RepositoryStreamWrapper : IMultiStreamSource
@@ -382,7 +382,7 @@ public Stream Open(int index)
public TextReader OpenTextReader(int index) { return new StreamReader(Open(index)); }
///
- /// A custom entry stream wrapper that includes custom dispose logic for disposing the entry
+ /// A custom entry stream wrapper that includes custom dispose logic for disposing the entry
/// when the stream is disposed.
///
private sealed class EntryStream : Stream
diff --git a/src/Microsoft.ML.Data/Utilities/SlotDropper.cs b/src/Microsoft.ML.Data/Utilities/SlotDropper.cs
index cd74463291..64b510a655 100644
--- a/src/Microsoft.ML.Data/Utilities/SlotDropper.cs
+++ b/src/Microsoft.ML.Data/Utilities/SlotDropper.cs
@@ -91,7 +91,7 @@ public ValueGetter> SubsetGetter(ValueGetter> getter)
}
///
- /// Drops slots from src and populates the dst with the resulting vector. Slots are
+ /// Drops slots from src and populates the dst with the resulting vector. Slots are
/// dropped based on min and max slots that were passed at the constructor.
///
public void DropSlots(ref VBuffer src, ref VBuffer dst)
diff --git a/src/Microsoft.ML.Data/Utils/IntSequencePool.cs b/src/Microsoft.ML.Data/Utils/IntSequencePool.cs
index 3efb038e6e..e27b297025 100644
--- a/src/Microsoft.ML.Data/Utils/IntSequencePool.cs
+++ b/src/Microsoft.ML.Data/Utils/IntSequencePool.cs
@@ -173,7 +173,7 @@ private int GetCore(uint[] sequence, int min, int lim, out uint hash)
Contracts.Assert(ibCur <= ibLim);
if (i >= lim)
{
- // Need to make sure that we have reached the end of the sequence in the pool at the
+ // Need to make sure that we have reached the end of the sequence in the pool at the
// same time that we reached the end of sequence.
if (ibCur == ibLim)
return idCur;
diff --git a/src/Microsoft.ML.Data/Utils/LossFunctions.cs b/src/Microsoft.ML.Data/Utils/LossFunctions.cs
index 7ff47a4f9e..7df431c3d1 100644
--- a/src/Microsoft.ML.Data/Utils/LossFunctions.cs
+++ b/src/Microsoft.ML.Data/Utils/LossFunctions.cs
@@ -124,9 +124,9 @@ public Float ComputeDualUpdateInvariant(Float scaledFeaturesNormSquared)
return 1 / Math.Max(1, (Float)0.25 + scaledFeaturesNormSquared);
}
- // REVIEW: this dual update uses a different log loss formulation,
+ // REVIEW: this dual update uses a different log loss formulation,
//although the two are equivalents if the labels are restricted to 0 and 1
- //Need to update so that it can handle probability label and true to the
+ //Need to update so that it can handle probability label and true to the
//definition, which is a smooth loss function
public Float DualUpdate(Float output, Float label, Float dual, Float invariant, int maxNumThreads)
{
diff --git a/src/Microsoft.ML.Ensemble/EnsembleUtils.cs b/src/Microsoft.ML.Ensemble/EnsembleUtils.cs
index 6366321c48..ae6c2adac6 100644
--- a/src/Microsoft.ML.Ensemble/EnsembleUtils.cs
+++ b/src/Microsoft.ML.Ensemble/EnsembleUtils.cs
@@ -38,7 +38,7 @@ public static RoleMappedData SelectFeatures(IHost host, RoleMappedData data, Bit
}
///
- /// Fill dst with values selected from src if the indices of the src values are set in includedIndices,
+ /// Fill dst with values selected from src if the indices of the src values are set in includedIndices,
/// otherwise assign default(T). The length of dst will be equal to src.Length.
///
public static void SelectFeatures(ref VBuffer src, BitArray includedIndices, int cardinality, ref VBuffer dst)
diff --git a/src/Microsoft.ML.Ensemble/EntryPoints/CreateEnsemble.cs b/src/Microsoft.ML.Ensemble/EntryPoints/CreateEnsemble.cs
index f512114bbb..a9d7983adf 100644
--- a/src/Microsoft.ML.Ensemble/EntryPoints/CreateEnsemble.cs
+++ b/src/Microsoft.ML.Ensemble/EntryPoints/CreateEnsemble.cs
@@ -316,10 +316,10 @@ private static TOut CreatePipelineEnsemble(IHostEnvironment env, IPredicto
///
/// This method takes a as input, saves it as an in-memory
- /// and returns two arrays indexed by the entries in the zip:
+ /// and returns two arrays indexed by the entries in the zip:
/// 1. An array of byte arrays, containing the byte sequences of each entry.
/// 2. An array of strings, containing the name of each entry.
- ///
+ ///
/// This method is used for comparing pipelines. Its outputs can be passed to
/// to check if this pipeline is identical to another pipeline.
///
diff --git a/src/Microsoft.ML.Ensemble/PipelineEnsemble.cs b/src/Microsoft.ML.Ensemble/PipelineEnsemble.cs
index 3cf30a3211..3ac78ed91e 100644
--- a/src/Microsoft.ML.Ensemble/PipelineEnsemble.cs
+++ b/src/Microsoft.ML.Ensemble/PipelineEnsemble.cs
@@ -600,7 +600,7 @@ protected static int CheckLabelColumn(IHostEnvironment env, IPredictorModel[] mo
return Utils.MarshalInvoke(CheckKeyLabelColumnCore, mdType.ItemType.RawType, env, models, labelType.AsKey, schema, labelInfo.Index, mdType);
}
- // When the label column is not a key, we check that the number of classes is the same for all the predictors, by checking the
+ // When the label column is not a key, we check that the number of classes is the same for all the predictors, by checking the
// OutputType property of the IValueMapper.
// If any of the predictors do not implement IValueMapper we throw an exception. Returns the class count.
private static int CheckNonKeyLabelColumnCore(IHostEnvironment env, IPredictor pred, IPredictorModel[] models, bool isBinary, ColumnType labelType)
@@ -672,13 +672,13 @@ private static bool AreEqual(ref VBuffer v1, ref VBuffer v2)
}
///
- /// This method outputs a Key-Value Pair (kvp) per model in the ensemble.
+ /// This method outputs a Key-Value Pair (kvp) per model in the ensemble.
/// * The key is the model number such as "Partition model 0 summary". If the model implements
/// then this string is followed by the first line of the model summary (the first line contains a description specific to the
/// model kind, such as "Feature gains" for FastTree or "Feature weights" for linear).
/// * The value:
/// - If the model implements then the value is the list of Key-Value pairs
- /// containing the detailed summary for that model (for example, linear models have a list containing kvps where the keys
+ /// containing the detailed summary for that model (for example, linear models have a list containing kvps where the keys
/// are the feature names and the values are the weights. FastTree has a similar list with the feature gains as values).
/// - If the model does not implement but does implement ,
/// the value is a string containing the summary of that model.
diff --git a/src/Microsoft.ML.Ensemble/Trainer/EnsemblePredictorBase.cs b/src/Microsoft.ML.Ensemble/Trainer/EnsemblePredictorBase.cs
index 9f2ebfb804..3d5c871117 100644
--- a/src/Microsoft.ML.Ensemble/Trainer/EnsemblePredictorBase.cs
+++ b/src/Microsoft.ML.Ensemble/Trainer/EnsemblePredictorBase.cs
@@ -152,7 +152,7 @@ public void SaveSummary(TextWriter writer, RoleMappedSchema schema)
writer.WriteLine(";; Partition model {0}", i);
writer.WriteLine(";; Weight={0}", (Weights != null ? Weights[i] : 1));
- // REVIEW: The featureName Collection names may vary for different base learners.
+ // REVIEW: The featureName Collection names may vary for different base learners.
// How do we get the right collection for the base learners?
if (Models[i].Predictor is ICanSaveSummary summaryModel)
summaryModel.SaveSummary(writer, schema);
diff --git a/src/Microsoft.ML.FastTree/BinFile/IniFileParserInterface.cs b/src/Microsoft.ML.FastTree/BinFile/IniFileParserInterface.cs
index c4b1af9a7c..994b711510 100644
--- a/src/Microsoft.ML.FastTree/BinFile/IniFileParserInterface.cs
+++ b/src/Microsoft.ML.FastTree/BinFile/IniFileParserInterface.cs
@@ -42,15 +42,15 @@ private static class Native
[DllImport(DllName, CharSet = CharSet.Ansi, EntryPoint = "FeatureMapGetFeatureName")]
[return: MarshalAs(UnmanagedType.U1)]
- public unsafe static extern bool GetFeatureName(IntPtr pObject, UInt32 featureIndex, byte[] buffer, UInt32 sizeOfBuffer, IntPtr resultLength);
+ public static extern unsafe bool GetFeatureName(IntPtr pObject, UInt32 featureIndex, byte[] buffer, UInt32 sizeOfBuffer, IntPtr resultLength);
[DllImport(DllName, CharSet = CharSet.Ansi, EntryPoint = "InputExtractorGetInputName")]
[return: MarshalAs(UnmanagedType.U1)]
- public unsafe static extern bool GetInputName(IntPtr pObject, UInt32 featureIndex, byte[] buffer, UInt32 sizeOfBuffer, IntPtr resultLength);
+ public static extern unsafe bool GetInputName(IntPtr pObject, UInt32 featureIndex, byte[] buffer, UInt32 sizeOfBuffer, IntPtr resultLength);
[DllImport(DllName, CharSet = CharSet.Ansi)]
[return: MarshalAs(UnmanagedType.U1)]
- public unsafe static extern bool GetSectionContent(IntPtr pObject, string sectionName, byte[] buffer, UInt32 sizeOfBuffer, IntPtr resultLength);
+ public static extern unsafe bool GetSectionContent(IntPtr pObject, string sectionName, byte[] buffer, UInt32 sizeOfBuffer, IntPtr resultLength);
[DllImport(DllName, EntryPoint = "InputExtractorGetInputCount")]
public static extern UInt32 GetInputCount(IntPtr pObject);
@@ -59,17 +59,17 @@ private static class Native
public static extern IntPtr GetInput(IntPtr pObject, UInt32 index);
[DllImport(DllName, EntryPoint = "InputGetFeatures")]
- public static unsafe extern void GetInputFeatures(IntPtr pInput, UInt32[] features, UInt32 sizeOfFeatures, out UInt32 featureCount);
+ public static extern unsafe void GetInputFeatures(IntPtr pInput, UInt32[] features, UInt32 sizeOfFeatures, out UInt32 featureCount);
[DllImport(DllName, EntryPoint = "InputIsCopy")]
[return: MarshalAs(UnmanagedType.U1)]
- public unsafe static extern bool IsCopyInput(IntPtr pInput);
+ public static extern unsafe bool IsCopyInput(IntPtr pInput);
[DllImport(DllName, EntryPoint = "InputEvaluate")]
- public static unsafe extern double EvaluateInput(IntPtr pInput, UInt32* input);
+ public static extern unsafe double EvaluateInput(IntPtr pInput, UInt32* input);
[DllImport(DllName, EntryPoint = "InputEvaluateMany")]
- public static unsafe extern void EvaluateMany(IntPtr pInput, UInt32*[] inputs, double* outputs, UInt32 count);
+ public static extern unsafe void EvaluateMany(IntPtr pInput, UInt32*[] inputs, double* outputs, UInt32 count);
[DllImport(DllName, EntryPoint = "InputExtractorGetFeatureMap")]
public static extern IntPtr GetFeatureMap(IntPtr pExtractor);
diff --git a/src/Microsoft.ML.FastTree/Dataset/DenseIntArray.cs b/src/Microsoft.ML.FastTree/Dataset/DenseIntArray.cs
index 508a6a5229..ca98c91d2d 100644
--- a/src/Microsoft.ML.FastTree/Dataset/DenseIntArray.cs
+++ b/src/Microsoft.ML.FastTree/Dataset/DenseIntArray.cs
@@ -70,18 +70,18 @@ public override IntArray[] Split(int[][] assignment)
#if USE_FASTTREENATIVE
[DllImport("FastTreeNative", CallingConvention = CallingConvention.StdCall)]
- private unsafe static extern int C_Sumup_float(
+ private static extern unsafe int C_Sumup_float(
int numBits, byte* pData, int* pIndices, float* pSampleOutputs, double* pSampleOutputWeights,
FloatType* pSumTargetsByBin, double* pSumTargets2ByBin, int* pCountByBin,
int totalCount, double totalSampleOutputs, double totalSampleOutputWeights);
[DllImport("FastTreeNative", CallingConvention = CallingConvention.StdCall)]
- private unsafe static extern int C_Sumup_double(
+ private static extern unsafe int C_Sumup_double(
int numBits, byte* pData, int* pIndices, double* pSampleOutputs, double* pSampleOutputWeights,
FloatType* pSumTargetsByBin, double* pSumTargets2ByBin, int* pCountByBin,
int totalCount, double totalSampleOutputs, double totalSampleOutputWeights);
- protected unsafe static void SumupCPlusPlusDense(SumupInputData input, FeatureHistogram histogram,
+ protected static unsafe void SumupCPlusPlusDense(SumupInputData input, FeatureHistogram histogram,
byte* data, int numBits)
{
using (Timer.Time(TimerEvent.SumupCppDense))
diff --git a/src/Microsoft.ML.FastTree/Dataset/FeatureFlock.cs b/src/Microsoft.ML.FastTree/Dataset/FeatureFlock.cs
index 9b5bf9b9ae..642c2349e8 100644
--- a/src/Microsoft.ML.FastTree/Dataset/FeatureFlock.cs
+++ b/src/Microsoft.ML.FastTree/Dataset/FeatureFlock.cs
@@ -47,7 +47,7 @@ public PerBinStats(Double sumTargets, Double sumWeights, int count)
/// These objects are stateful, reusable objects that enable the collection of sufficient
/// stats per feature flock, per node or leaf of a tree, to enable it to find the "best"
/// splits.
- ///
+ ///
/// Each instance of this corresponds to a single flock, but multiple of these will be created
/// per flock. Note that feature indices, whenever present, refer to the feature within the
/// particular flock the same as they do with .
@@ -176,7 +176,7 @@ public void Subtract(SufficientStatsBase other)
protected abstract double GetBinGradient(int featureIndex, double bias);
///
- /// Get a fullcopy of histogram for one sub feature.
+ /// Get a fullcopy of histogram for one sub feature.
///
public void CopyFeatureHistogram(int subfeatureIndex, ref PerBinStats[] hist)
{
@@ -919,7 +919,7 @@ public void FillSplitCandidatesCategoricalNeighborBundling(LeastSquaresRegressio
{
var binStats = virtualBins[i];
catFeatureCount += 1 + binStats.SubFeatures.Length;
-
+
sumGTTargets += binStats.SumTargets;
gtCount += binStats.Count;
docsInCurrentGroup += binStats.Count;
@@ -1039,7 +1039,7 @@ protected sealed override void SubtractCore(SufficientStatsBase other)
/// A feature flock is a collection of features, grouped together because storing the
/// features and performing the key operations on them in a collection can be done
/// more efficiently than if they were stored as separate features.
- ///
+ ///
/// Since this is a collection of features, feature specific quantities and methods
/// will have a feature index parameter. Note that this index is always, for every
/// flock, from 0 up to but not including . Now,
@@ -1132,7 +1132,7 @@ public virtual IIntArrayForwardIndexer GetIndexer(int featureIndex)
public abstract double[] BinUpperBounds(int featureIndex);
///
- /// If you need to implement you can use
+ /// If you need to implement you can use
/// . This will be slower than a
/// specialized implementation but is at least a useful shim.
///
@@ -1216,10 +1216,10 @@ internal abstract class SinglePartitionedIntArrayFlockBase : FeatureF
///
/// Imagine we have a six row dataset, with two features, which if stored separately in,
/// say, a , would have bin values as follows.
- ///
+ ///
/// f0 = { 0, 1, 0, 0, 2, 0}
/// f1 = { 0, 0, 1, 0, 0, 1}
- ///
+ ///
/// These two are a candidate for a , because they never both
/// have a non-zero bin value for any row. Then, in order to represent this in this feature,
/// we would pass in this value for the :
@@ -1231,18 +1231,18 @@ internal abstract class SinglePartitionedIntArrayFlockBase : FeatureF
/// what feature is which can be reconstructed from , which
/// for each feature specifies the range in corresponding to the
/// "logical" bin value for that feature starting from 1.
- ///
+ ///
/// Note that it would also have been legal for to be
/// larger than the actual observed range, e.g., it could have been:
/// = { 1, 5, 8}
/// or something. This could happen if binning happened over a different dataset from the data
/// being represented right now, for example, but this is a more complex case.
- ///
+ ///
/// The would contain the upper bounds for both of these features,
/// which would be arrays large enough so that the maximum value of the logical bin for each feature
/// in the flock could index it. (So in this example, the first bin upper bound would be at least
/// length 3, and the second at least length 2.)
- ///
+ ///
/// The indicates if the flock is a categorical feature.
///
protected SinglePartitionedIntArrayFlockBase(TIntArray bins, int[] hotFeatureStarts, double[][] binUpperBounds, bool categorical = false)
@@ -1264,19 +1264,19 @@ protected SinglePartitionedIntArrayFlockBase(TIntArray bins, int[] hotFeatureSta
Contracts.Assert(AllBinUpperBounds.Select((b, f) => HotFeatureStarts[f + 1] - HotFeatureStarts[f] + 1 == b.Length).All(i => i));
}
- public override sealed double[] BinUpperBounds(int featureIndex)
+ public sealed override double[] BinUpperBounds(int featureIndex)
{
Contracts.Assert(0 <= featureIndex && featureIndex < Count);
return AllBinUpperBounds[featureIndex];
}
- public override sealed double Trust(int featureIndex)
+ public sealed override double Trust(int featureIndex)
{
Contracts.Assert(0 <= featureIndex && featureIndex < Count);
return 1;
}
- public override sealed int BinCount(int featureIndex)
+ public sealed override int BinCount(int featureIndex)
{
Contracts.Assert(0 <= featureIndex && featureIndex < Count);
return AllBinUpperBounds[featureIndex].Length;
diff --git a/src/Microsoft.ML.FastTree/Dataset/SegmentIntArray.cs b/src/Microsoft.ML.FastTree/Dataset/SegmentIntArray.cs
index 02ae0f2c84..1c29e4582d 100644
--- a/src/Microsoft.ML.FastTree/Dataset/SegmentIntArray.cs
+++ b/src/Microsoft.ML.FastTree/Dataset/SegmentIntArray.cs
@@ -428,7 +428,7 @@ public static void SegmentFindOptimalCost(uint[] array, int len, int bitsNeeded,
}
}
- public unsafe static void SegmentFindOptimalPath7(uint[] array, int len, out long bits, out int transitions)
+ public static unsafe void SegmentFindOptimalPath7(uint[] array, int len, out long bits, out int transitions)
{
long b = 0;
int t = 0;
@@ -441,7 +441,7 @@ public unsafe static void SegmentFindOptimalPath7(uint[] array, int len, out lon
transitions = t;
}
- public unsafe static void SegmentFindOptimalPath15(uint[] array, int len, out long bits, out int transitions)
+ public static unsafe void SegmentFindOptimalPath15(uint[] array, int len, out long bits, out int transitions)
{
long b = 0;
int t = 0;
@@ -454,7 +454,7 @@ public unsafe static void SegmentFindOptimalPath15(uint[] array, int len, out lo
transitions = t;
}
- public unsafe static void SegmentFindOptimalPath21(uint[] array, int len, out long bits, out int transitions)
+ public static unsafe void SegmentFindOptimalPath21(uint[] array, int len, out long bits, out int transitions)
{
long b = 0;
int t = 0;
@@ -467,7 +467,7 @@ public unsafe static void SegmentFindOptimalPath21(uint[] array, int len, out lo
transitions = t;
}
- public unsafe static void SegmentFindOptimalCost15(uint[] array, int len, out long bits)
+ public static unsafe void SegmentFindOptimalCost15(uint[] array, int len, out long bits)
{
long b = 0;
fixed (uint* pArray = array)
@@ -478,7 +478,7 @@ public unsafe static void SegmentFindOptimalCost15(uint[] array, int len, out lo
bits = b;
}
- public unsafe static void SegmentFindOptimalCost31(uint[] array, int len, out long bits)
+ public static unsafe void SegmentFindOptimalCost31(uint[] array, int len, out long bits)
{
long b = 0;
fixed (uint* pArray = array)
@@ -491,29 +491,29 @@ public unsafe static void SegmentFindOptimalCost31(uint[] array, int len, out lo
#pragma warning disable TLC_GeneralName // Externs follow their own rules.
[DllImport("FastTreeNative", CallingConvention = CallingConvention.StdCall, CharSet = CharSet.Ansi)]
- private unsafe static extern void C_SegmentFindOptimalPath21(uint* valv, int valc, long* pBits, int* pTransitions);
+ private static extern unsafe void C_SegmentFindOptimalPath21(uint* valv, int valc, long* pBits, int* pTransitions);
[DllImport("FastTreeNative", CallingConvention = CallingConvention.StdCall, CharSet = CharSet.Ansi)]
- private unsafe static extern void C_SegmentFindOptimalPath15(uint* valv, int valc, long* pBits, int* pTransitions);
+ private static extern unsafe void C_SegmentFindOptimalPath15(uint* valv, int valc, long* pBits, int* pTransitions);
[DllImport("FastTreeNative", CallingConvention = CallingConvention.StdCall, CharSet = CharSet.Ansi)]
- private unsafe static extern void C_SegmentFindOptimalPath7(uint* valv, int valc, long* pBits, int* pTransitions);
+ private static extern unsafe void C_SegmentFindOptimalPath7(uint* valv, int valc, long* pBits, int* pTransitions);
[DllImport("FastTreeNative", CallingConvention = CallingConvention.StdCall, CharSet = CharSet.Ansi)]
- private unsafe static extern void C_SegmentFindOptimalCost15(uint* valv, int valc, long* pBits);
+ private static extern unsafe void C_SegmentFindOptimalCost15(uint* valv, int valc, long* pBits);
[DllImport("FastTreeNative", CallingConvention = CallingConvention.StdCall, CharSet = CharSet.Ansi)]
- private unsafe static extern void C_SegmentFindOptimalCost31(uint* valv, int valc, long* pBits);
+ private static extern unsafe void C_SegmentFindOptimalCost31(uint* valv, int valc, long* pBits);
[DllImport("FastTreeNative", CallingConvention = CallingConvention.StdCall)]
- private unsafe static extern int C_SumupSegment_float(
+ private static extern unsafe int C_SumupSegment_float(
uint* pData, byte* pSegType, int* pSegLength, int* pIndices,
float* pSampleOutputs, double* pSampleOutputWeights,
float* pSumTargetsByBin, double* pSumWeightsByBin,
int* pCountByBin, int totalCount, double totalSampleOutputs);
[DllImport("FastTreeNative", CallingConvention = CallingConvention.StdCall)]
- private unsafe static extern int C_SumupSegment_double(
+ private static extern unsafe int C_SumupSegment_double(
uint* pData, byte* pSegType, int* pSegLength, int* pIndices,
double* pSampleOutputs, double* pSampleOutputWeights,
double* pSumTargetsByBin, double* pSumWeightsByBin,
diff --git a/src/Microsoft.ML.FastTree/Dataset/SparseIntArray.cs b/src/Microsoft.ML.FastTree/Dataset/SparseIntArray.cs
index fc360ddea4..5ca048647e 100644
--- a/src/Microsoft.ML.FastTree/Dataset/SparseIntArray.cs
+++ b/src/Microsoft.ML.FastTree/Dataset/SparseIntArray.cs
@@ -18,7 +18,7 @@ namespace Microsoft.ML.Runtime.FastTree.Internal
/// This implementation represents a sequence of values using parallel
/// arrays of both values, as well as deltas indicating the number of values to the next
/// explicit value. Values "between" these deltas are implicitly zero.
- ///
+ ///
/// Note that it is possible to misuse the deltas by making some of them themselves 0, allowing
/// us to represent multiple values per row. In this case,
/// and will not have sensible values, but
@@ -490,12 +490,12 @@ public override void Sumup(SumupInputData input, FeatureHistogram histogram)
#if USE_FASTTREENATIVE
[DllImport("FastTreeNative", CallingConvention = CallingConvention.StdCall)]
- private unsafe static extern int C_SumupDeltaSparse_float(int numBits, byte* pValues, byte* pDeltas, int numDeltas, int* pIndices, float* pSampleOutputs, double* pSampleOutputWeights,
+ private static extern unsafe int C_SumupDeltaSparse_float(int numBits, byte* pValues, byte* pDeltas, int numDeltas, int* pIndices, float* pSampleOutputs, double* pSampleOutputWeights,
float* pSumTargetsByBin, double* pSumTargets2ByBin, int* pCountByBin,
int totalCount, double totalSampleOutputs, double totalSampleOutputWeights);
[DllImport("FastTreeNative", CallingConvention = CallingConvention.StdCall)]
- private unsafe static extern int C_SumupDeltaSparse_double(int numBits, byte* pValues, byte* pDeltas, int numDeltas, int* pIndices, double* pSampleOutputs, double* pSampleOutputWeights,
+ private static extern unsafe int C_SumupDeltaSparse_double(int numBits, byte* pValues, byte* pDeltas, int numDeltas, int* pIndices, double* pSampleOutputs, double* pSampleOutputWeights,
double* pSumTargetsByBin, double* pSumTargets2ByBin, int* pCountByBin,
int totalCount, double totalSampleOutputs, double totalSampleOutputWeights);
diff --git a/src/Microsoft.ML.FastTree/FastTree.cs b/src/Microsoft.ML.FastTree/FastTree.cs
index 29fe439e0a..fad40495f4 100644
--- a/src/Microsoft.ML.FastTree/FastTree.cs
+++ b/src/Microsoft.ML.FastTree/FastTree.cs
@@ -1143,7 +1143,7 @@ private FeatureFlockBase CreateOneHotFlockCategorical(IChannel ch,
#endif
Double[] bub = BinUpperBounds[fi];
ch.Assert(bub.Length == 2);
- //REVIEW: leaving out check for the value to reduced memory consuption and going with
+ //REVIEW: leaving out check for the value to reduced memory consuption and going with
//leap of faith based on what the user told.
binnedValues[i] = hotFeatureStarts[subfeature] + 1;
hotCount++;
@@ -1380,7 +1380,7 @@ private Dataset Construct(RoleMappedData examples, ref int numExamples, int maxB
// There is no good mechanism to filter out rows with missing feature values on transposed data.
// So, we instead perform one featurization pass which, if successful, will remain one pass but,
// if we ever encounter missing values will become a "detect missing features" pass, which will
- // in turn inform a necessary featurization pass secondary
+ // in turn inform a necessary featurization pass secondary
SlotDropper slotDropper = null;
bool[] localConstructBinFeatures = Utils.CreateArray(NumFeatures, true);
@@ -1661,7 +1661,7 @@ private static ValueGetter> SubsetGetter(ValueGetter> g
}
///
- /// Returns a slot dropper object that has ranges of slots to be dropped,
+ /// Returns a slot dropper object that has ranges of slots to be dropped,
/// based on an examination of the feature values.
///
private static SlotDropper ConstructDropSlotRanges(ISlotCursor cursor,
@@ -2198,7 +2198,7 @@ private IEnumerable CreateFlocksCore(IChannel ch, IProgressCha
int limMade = startFeatureIndex;
int countBins = 1; // Count of bins we'll need to represent. Starts at 1, accumulates "hot" features.
// Tracking for n-hot flocks.
- long countHotRows = 0; // The count of hot "rows"
+ long countHotRows = 0; // The count of hot "rows"
long hotNThreshold = (long)(0.1 * NumExamples);
bool canBeOneHot = true;
@@ -2617,7 +2617,7 @@ public sealed class ForwardIndexer
// Parallel to the subsequence of _values in min to lim, indicates the index where
// we should start to look for the next value, if the corresponding value list in
// _values is sparse. If the corresponding value list is dense the entry at this
- // position is not used.
+ // position is not used.
private readonly int[] _perFeaturePosition;
private readonly int[] _featureIndices;
#if DEBUG
@@ -2790,7 +2790,7 @@ public abstract class FastTreePredictionWrapper :
// Inner args is used only for documentation purposes when saving comments to INI files.
protected readonly string InnerArgs;
- // The total number of features used in training (takes the value of zero if the
+ // The total number of features used in training (takes the value of zero if the
// written version of the loaded model is less than VerNumFeaturesSerialized)
protected readonly int NumFeatures;
@@ -3000,13 +3000,13 @@ private enum NodeMode
[Description("BRANCH_LT")]
BranchLT,
[Description("BRANCH_GTE")]
- BranchGTE,
+ BranchGte,
[Description("BRANCH_GT")]
BranchGT,
[Description("BRANCH_EQ")]
- BranchEQ,
+ BranchEq,
[Description("BRANCH_LT")]
- BranchNEQ,
+ BranchNeq,
[Description("LEAF")]
Leaf
};
@@ -3070,7 +3070,7 @@ public virtual bool SaveAsOnnx(OnnxContext ctx, string[] outputNames, string fea
nodesValues.Add(tree.RawThresholds[nodeIndex]);
nodesTrueNodeIds.Add(tree.LteChild[nodeIndex] < 0 ? ~tree.LteChild[nodeIndex] + tree.NumNodes : tree.LteChild[nodeIndex]);
nodesFalseNodeIds.Add(tree.GtChild[nodeIndex] < 0 ? ~tree.GtChild[nodeIndex] + tree.NumNodes : tree.GtChild[nodeIndex]);
- if (tree._defaultValueForMissing?[nodeIndex] <= tree.RawThresholds[nodeIndex])
+ if (tree.DefaultValueForMissing?[nodeIndex] <= tree.RawThresholds[nodeIndex])
missingValueTracksTrue.Add(true);
else
missingValueTracksTrue.Add(false);
@@ -3266,8 +3266,8 @@ public Float GetLeafValue(int treeId, int leafId)
}
///
- /// Returns the leaf node in the requested tree for the given feature vector, and populates 'path' with the list of
- /// internal nodes in the path from the root to that leaf. If 'path' is null a new list is initialized. All elements
+ /// Returns the leaf node in the requested tree for the given feature vector, and populates 'path' with the list of
+ /// internal nodes in the path from the root to that leaf. If 'path' is null a new list is initialized. All elements
/// in 'path' are cleared before filling in the current path nodes.
///
public int GetLeaf(int treeId, ref VBuffer features, ref List path)
diff --git a/src/Microsoft.ML.FastTree/FastTreeClassification.cs b/src/Microsoft.ML.FastTree/FastTreeClassification.cs
index ac92867c27..6796dd6c79 100644
--- a/src/Microsoft.ML.FastTree/FastTreeClassification.cs
+++ b/src/Microsoft.ML.FastTree/FastTreeClassification.cs
@@ -334,7 +334,7 @@ public void AdjustTreeOutputs(IChannel ch, RegressionTree tree,
}
///
- /// The Entry Point for the FastTree Binary Classifier.
+ /// The Entry Point for the FastTree Binary Classifier.
///
public static partial class FastTree
{
diff --git a/src/Microsoft.ML.FastTree/FastTreeRanking.cs b/src/Microsoft.ML.FastTree/FastTreeRanking.cs
index cc246f25cd..7173a9f6a3 100644
--- a/src/Microsoft.ML.FastTree/FastTreeRanking.cs
+++ b/src/Microsoft.ML.FastTree/FastTreeRanking.cs
@@ -1028,7 +1028,7 @@ private static void PermutationSort(int[] permutation, double[] scores, short[]
}
[DllImport("FastTreeNative", EntryPoint = "C_GetDerivatives", CallingConvention = CallingConvention.StdCall, CharSet = CharSet.Ansi)]
- private unsafe static extern void GetDerivatives(
+ private static extern unsafe void GetDerivatives(
int numDocuments, int begin, int* pPermutation, short* pLabels,
double* pScores, double* pLambdas, double* pWeights, double* pDiscount,
double inverseMaxDcg, double* pGainLabels,
diff --git a/src/Microsoft.ML.FastTree/GamTrainer.cs b/src/Microsoft.ML.FastTree/GamTrainer.cs
index 3b3ca9e92f..51d2d809bb 100644
--- a/src/Microsoft.ML.FastTree/GamTrainer.cs
+++ b/src/Microsoft.ML.FastTree/GamTrainer.cs
@@ -748,7 +748,7 @@ private void Map(ref VBuffer src, ref Float dst)
///
/// Returns a vector of feature contributions for a given example.
- /// is used as a buffer to accumulate the contributions across trees.
+ /// is used as a buffer to accumulate the contributions across trees.
/// If is null, it will be created, otherwise it will be reused.
///
internal void GetFeatureContributions(ref VBuffer features, ref VBuffer contribs, ref BufferBuilder builder)
@@ -791,7 +791,7 @@ internal double GetFeatureBinsAndScore(ref VBuffer features, int[] bins)
for (int i = 0; i < features.Count; ++i)
{
int j;
- // Where we have a sparse output,
+ // Where we have a sparse output,
if (_inputFeatureToDatasetFeatureMap.TryGetValue(features.Indices[i], out j))
{
int index = Algorithms.FindFirstGE(_binUpperBounds[j], features.Values[i]);
@@ -1116,7 +1116,7 @@ public sealed class FeatureInfo
public long Version { get; }
///
- /// For features belonging to the same categorical, this value will be the same,
+ /// For features belonging to the same categorical, this value will be the same,
/// Set to -1 for non-categoricals.
///
public int CategoricalFeatureIndex { get; }
diff --git a/src/Microsoft.ML.FastTree/RandomForestClassification.cs b/src/Microsoft.ML.FastTree/RandomForestClassification.cs
index 512e79faf9..ae79c991d3 100644
--- a/src/Microsoft.ML.FastTree/RandomForestClassification.cs
+++ b/src/Microsoft.ML.FastTree/RandomForestClassification.cs
@@ -156,7 +156,7 @@ public override IPredictorWithFeatureWeights Train(TrainContext context)
}
// LogitBoost is naturally calibrated to
// output probabilities when transformed using
- // the logistic function, so if we have trained no
+ // the logistic function, so if we have trained no
// calibrator, transform the scores using that.
// REVIEW: Need a way to signal the outside world that we prefer simple sigmoid?
diff --git a/src/Microsoft.ML.FastTree/SumupPerformanceCommand.cs b/src/Microsoft.ML.FastTree/SumupPerformanceCommand.cs
index efe52c1f26..f1db3fae2c 100644
--- a/src/Microsoft.ML.FastTree/SumupPerformanceCommand.cs
+++ b/src/Microsoft.ML.FastTree/SumupPerformanceCommand.cs
@@ -110,7 +110,7 @@ private IEnumerable CreateDense(IChannel ch, Random rgen)
private IEnumerable CreateSparse(IChannel ch, Random rgen)
{
ch.CheckUserArg(0 <= _param && _param < 1, nameof(Arguments.Parameter), "For sparse ararys");
- // The parameter is the level of sparsity. Use the geometric distribution to determine the number of
+ // The parameter is the level of sparsity. Use the geometric distribution to determine the number of
// Geometric distribution (with 0 support) would be Math.
double denom = Math.Log(1 - _param);
if (double.IsNegativeInfinity(denom))
diff --git a/src/Microsoft.ML.FastTree/Training/DcgPermutationComparer.cs b/src/Microsoft.ML.FastTree/Training/DcgPermutationComparer.cs
index be449598a3..1421abb589 100644
--- a/src/Microsoft.ML.FastTree/Training/DcgPermutationComparer.cs
+++ b/src/Microsoft.ML.FastTree/Training/DcgPermutationComparer.cs
@@ -44,10 +44,10 @@ public static DcgPermutationComparer GetDcgPermutationFactory(string name)
///
public class DescendingStablePessimisticPermutationComparer : DescendingStablePermutationComparer
{
-#pragma warning disable TLC_GeneralName // The naming is the least of this class's problems. A setter with no getter??
+#pragma warning disable MSML_GeneralName // The naming is the least of this class's problems. A setter with no getter??
protected short[] _labels;
protected int _labelsOffset;
-#pragma warning restore TLC_GeneralName
+#pragma warning restore MSML_GeneralName
public override short[] Labels {
set { _labels = value; }
@@ -76,10 +76,10 @@ public override int Compare(int i, int j)
///
public class DescendingStablePermutationComparer : DcgPermutationComparer
{
-#pragma warning disable TLC_GeneralName // The naming is the least of this class's problems. A setter with no getter??
+#pragma warning disable MSML_GeneralName // The naming is the least of this class's problems. A setter with no getter??
protected double[] _scores;
protected int _scoresOffset;
-#pragma warning restore TLC_GeneralName
+#pragma warning restore MSML_GeneralName
public override double[] Scores { set { _scores = value; } }
diff --git a/src/Microsoft.ML.FastTree/Training/DocumentPartitioning.cs b/src/Microsoft.ML.FastTree/Training/DocumentPartitioning.cs
index 2518fc839e..1aec08271e 100644
--- a/src/Microsoft.ML.FastTree/Training/DocumentPartitioning.cs
+++ b/src/Microsoft.ML.FastTree/Training/DocumentPartitioning.cs
@@ -195,7 +195,7 @@ public double[] GetDistribution(double[] targets, double[] weights, int quantile
/// the leaf being split
///
/// the threshold
- /// Index of child node that contains documents whose split
+ /// Index of child node that contains documents whose split
/// feature value is greater than the split threshold
public unsafe void Split(int leaf, IIntArrayForwardIndexer indexer, UInt32 threshold, int gtChildIndex)
{
@@ -239,7 +239,7 @@ public unsafe void Split(int leaf, IIntArrayForwardIndexer indexer, UInt32 thres
/// the leaf being split
/// Split feature flock's bin
/// Catgeorical feature indices
- /// Index of child node that contains documents whose split
+ /// Index of child node that contains documents whose split
/// feature value is greater than the split threshold
public unsafe void Split(int leaf, IntArray bins, HashSet categoricalIndices, int gtChildIndex)
{
diff --git a/src/Microsoft.ML.FastTree/Training/OptimizationAlgorithms/GradientDescent.cs b/src/Microsoft.ML.FastTree/Training/OptimizationAlgorithms/GradientDescent.cs
index 8b2b345508..a749158b2f 100644
--- a/src/Microsoft.ML.FastTree/Training/OptimizationAlgorithms/GradientDescent.cs
+++ b/src/Microsoft.ML.FastTree/Training/OptimizationAlgorithms/GradientDescent.cs
@@ -51,7 +51,7 @@ protected virtual double[] GetGradient(IChannel ch)
if ((_numberOfDroppedTrees == 0) && (numberOfTrees > 0))
{
droppedTrees = new int[] { DropoutRng.Next(numberOfTrees) };
- // force at least a single tree to be dropped
+ // force at least a single tree to be dropped
_numberOfDroppedTrees = droppedTrees.Length;
}
ch.Trace("dropout: Dropping {0} trees of {1} for rate {2}",
@@ -104,7 +104,7 @@ public override RegressionTree TrainingIteration(IChannel ch, bool[] activeFeatu
using (Timer.Time(TimerEvent.TreeLearnerAdjustTreeOutputs))
{
double[] backupScores = null;
- // when doing dropouts we need to replace the TrainingScores with the scores without the dropped trees
+ // when doing dropouts we need to replace the TrainingScores with the scores without the dropped trees
if (DropoutRate > 0)
{
backupScores = TrainingScores.Scores;
diff --git a/src/Microsoft.ML.FastTree/Training/OptimizationAlgorithms/NoOptimizationAlgorithm.cs b/src/Microsoft.ML.FastTree/Training/OptimizationAlgorithms/NoOptimizationAlgorithm.cs
index 563a7891fd..dbc1f04147 100644
--- a/src/Microsoft.ML.FastTree/Training/OptimizationAlgorithms/NoOptimizationAlgorithm.cs
+++ b/src/Microsoft.ML.FastTree/Training/OptimizationAlgorithms/NoOptimizationAlgorithm.cs
@@ -7,7 +7,7 @@ namespace Microsoft.ML.Runtime.FastTree.Internal
///
/// This is dummy optimizer. As Random forest does not have any boosting based optimization, this is place holder to be consistent
/// with other fast tree based applications
- ///
+ ///
public class RandomForestOptimizer : GradientDescent
{
private IGradientAdjuster _gradientWrapper;
diff --git a/src/Microsoft.ML.FastTree/Training/Parallel/IParallelTraining.cs b/src/Microsoft.ML.FastTree/Training/Parallel/IParallelTraining.cs
index 08ae6fb16f..c968804708 100644
--- a/src/Microsoft.ML.FastTree/Training/Parallel/IParallelTraining.cs
+++ b/src/Microsoft.ML.FastTree/Training/Parallel/IParallelTraining.cs
@@ -38,7 +38,7 @@ public delegate void FindBestThresholdFromRawArrayFun(LeafSplitCandidates leafSp
/// To speed up the find bin process, it let different workers to find bins for different features.
/// Then perform global sync up.
/// In Feature parallel, every machines holds all data, so this is unneeded.
- /// 2. interactive with TreeLearner: , , ,
+ /// 2. interactive with TreeLearner: , , ,
/// , , , .
/// A full process is:
/// Use to alter local active features.
@@ -75,7 +75,7 @@ public interface IParallelTraining
///
/// Initialize every time before training a tree.
- /// will alter activeFeatures in Feature parallel.
+ /// will alter activeFeatures in Feature parallel.
/// Because it only need to find threshold for part of features in feature parallel.
///
void InitIteration(ref bool[] activeFeatures);
@@ -98,10 +98,10 @@ public interface IParallelTraining
bool IsNeedFindLocalBestSplit();
///
- /// True if need to skip non-splittable histogram.
- /// Only will return False in Voting parallel.
+ /// True if need to skip non-splittable histogram.
+ /// Only will return False in Voting parallel.
/// That is because local doesn't have global histograms in Voting parallel,
- /// So the information about NonSplittable is not correct, and we cannot skip it.
+ /// So the information about NonSplittable is not correct, and we cannot skip it.
///
bool IsSkipNonSplittableHistogram();
@@ -133,7 +133,7 @@ void FindGlobalBestSplit(LeafSplitCandidates smallerChildSplitCandidates,
///
/// Get indices of features that should be find bin in local.
- /// After construct local boundary, should call
+ /// After construct local boundary, should call
/// to get boundaries for all features.
///
bool[] GetLocalBinConstructionFeatures(int numFeatures);
@@ -141,8 +141,8 @@ void FindGlobalBestSplit(LeafSplitCandidates smallerChildSplitCandidates,
///
/// Sync Global feature bucket.
/// used in Data parallel and Voting parallel.
- /// Data are partitioned by row. To speed up the Global find bin process,
- /// we can let different workers construct Bin Boundary for different features,
+ /// Data are partitioned by row. To speed up the Global find bin process,
+ /// we can let different workers construct Bin Boundary for different features,
/// then perform a global sync up.
///
void SyncGlobalBoundary(int numFeatures, int maxBin, Double[][] binUpperBounds);
diff --git a/src/Microsoft.ML.FastTree/Training/StepSearch.cs b/src/Microsoft.ML.FastTree/Training/StepSearch.cs
index db6f61ada2..e3d14aeb8e 100644
--- a/src/Microsoft.ML.FastTree/Training/StepSearch.cs
+++ b/src/Microsoft.ML.FastTree/Training/StepSearch.cs
@@ -30,7 +30,7 @@ public LineSearch(Test lossCalculator, int lossIndex)
_historicStepSize = Math.Max(1.0, _minStepSize);
}
- private readonly static double _phi = (1.0 + Math.Sqrt(5)) / 2;
+ private static readonly double _phi = (1.0 + Math.Sqrt(5)) / 2;
private static void Swap(ref T a, ref T b)
{
diff --git a/src/Microsoft.ML.FastTree/Training/TreeLearners/LeastSquaresRegressionTreeLearner.cs b/src/Microsoft.ML.FastTree/Training/TreeLearners/LeastSquaresRegressionTreeLearner.cs
index cb902fad2b..fd1fb548da 100644
--- a/src/Microsoft.ML.FastTree/Training/TreeLearners/LeastSquaresRegressionTreeLearner.cs
+++ b/src/Microsoft.ML.FastTree/Training/TreeLearners/LeastSquaresRegressionTreeLearner.cs
@@ -103,7 +103,7 @@ public class LeastSquaresRegressionTreeLearner : TreeLearner
/// Only consider a gain if its likelihood versus a random
/// choice gain is above a certain value (so 0.95 would mean restricting to gains that have less
/// than a 0.05 change of being generated randomly through choice of a random split).
- /// Maximum categorical split points to consider when splitting on a
+ /// Maximum categorical split points to consider when splitting on a
/// categorical feature.
///
/// -1 if best step ranking is to be disabled, otherwise it
diff --git a/src/Microsoft.ML.FastTree/TreeEnsemble/Ensemble.cs b/src/Microsoft.ML.FastTree/TreeEnsemble/Ensemble.cs
index 0c75b2dd18..0d48bb8123 100644
--- a/src/Microsoft.ML.FastTree/TreeEnsemble/Ensemble.cs
+++ b/src/Microsoft.ML.FastTree/TreeEnsemble/Ensemble.cs
@@ -336,7 +336,7 @@ public string ToGainSummary(FeaturesToContentMap fmap, Dictionary feat
///
/// Returns a vector of feature contributions for a given example.
- /// is used as a buffer to accumulate the contributions across trees.
+ /// is used as a buffer to accumulate the contributions across trees.
/// If is null, it will be created, otherwise it will be reused.
///
internal void GetFeatureContributions(ref VBuffer features, ref VBuffer contribs, ref BufferBuilder builder)
diff --git a/src/Microsoft.ML.FastTree/TreeEnsemble/QuantileRegressionTree.cs b/src/Microsoft.ML.FastTree/TreeEnsemble/QuantileRegressionTree.cs
index 642d82ede1..bcec5ac082 100644
--- a/src/Microsoft.ML.FastTree/TreeEnsemble/QuantileRegressionTree.cs
+++ b/src/Microsoft.ML.FastTree/TreeEnsemble/QuantileRegressionTree.cs
@@ -58,9 +58,9 @@ public override void Save(ModelSaveContext ctx)
}
///
- /// Loads the sampled labels of this tree to the distribution array for the sparse instance type.
+ /// Loads the sampled labels of this tree to the distribution array for the sparse instance type.
/// By calling for all the trees, the distribution array will have all the samples from all the trees
- ///
+ ///
public void LoadSampledLabels(ref VBuffer feat, Float[] distribution, Float[] weights, int sampleCount, int destinationIndex)
{
int leaf = GetLeaf(ref feat);
diff --git a/src/Microsoft.ML.FastTree/TreeEnsemble/RegressionTree.cs b/src/Microsoft.ML.FastTree/TreeEnsemble/RegressionTree.cs
index d2edeb796a..65701f0d03 100644
--- a/src/Microsoft.ML.FastTree/TreeEnsemble/RegressionTree.cs
+++ b/src/Microsoft.ML.FastTree/TreeEnsemble/RegressionTree.cs
@@ -26,7 +26,7 @@ public class RegressionTree
// Weight of this tree in the ensemble
// for each non-leaf, we keep the following data
- public Float[] _defaultValueForMissing;
+ public Float[] DefaultValueForMissing;
private double[] _splitGain;
private double[] _gainPValue;
// The value of this non-leaf node, prior to split when it was a leaf.
@@ -42,12 +42,12 @@ public class RegressionTree
///
public bool[] CategoricalSplit { get; }
///
- /// Array of categorical values for the categorical feature that might be chosen as
+ /// Array of categorical values for the categorical feature that might be chosen as
/// a split feature for a node.
///
public int[][] CategoricalSplitFeatures;
///
- /// For a given categorical feature that is chosen as a split feature for a node, this
+ /// For a given categorical feature that is chosen as a split feature for a node, this
/// array contains it's start and end range in the input feature vector at prediction time.
///
public int[][] CategoricalSplitFeatureRanges;
@@ -89,7 +89,7 @@ public RegressionTree(int maxLeaves)
_gainPValue = new double[maxLeaves - 1];
_previousLeafValue = new double[maxLeaves - 1];
Thresholds = new UInt32[maxLeaves - 1];
- _defaultValueForMissing = null;
+ DefaultValueForMissing = null;
LteChild = new int[maxLeaves - 1];
GtChild = new int[maxLeaves - 1];
LeafValues = new double[maxLeaves];
@@ -202,7 +202,7 @@ internal RegressionTree(int[] splitFeatures, Double[] splitGain, Double[] gainPV
_splitGain = splitGain;
_gainPValue = gainPValue;
RawThresholds = rawThresholds;
- _defaultValueForMissing = defaultValueForMissing;
+ DefaultValueForMissing = defaultValueForMissing;
LteChild = lteChild;
GtChild = gtChild;
LeafValues = leafValues;
@@ -222,10 +222,10 @@ internal RegressionTree(int[] splitFeatures, Double[] splitGain, Double[] gainPV
CheckValid(Contracts.Check);
- if (_defaultValueForMissing != null)
+ if (DefaultValueForMissing != null)
{
bool allZero = true;
- foreach (var val in _defaultValueForMissing)
+ foreach (var val in DefaultValueForMissing)
{
if (val != 0.0f)
{
@@ -234,7 +234,7 @@ internal RegressionTree(int[] splitFeatures, Double[] splitGain, Double[] gainPV
}
}
if (allZero)
- _defaultValueForMissing = null;
+ DefaultValueForMissing = null;
}
}
@@ -300,9 +300,9 @@ internal RegressionTree(ModelLoadContext ctx, bool usingDefaultValue, bool categ
Thresholds = reader.ReadUIntArray();
RawThresholds = reader.ReadFloatArray();
- _defaultValueForMissing = null;
+ DefaultValueForMissing = null;
if (usingDefaultValue)
- _defaultValueForMissing = reader.ReadFloatArray();
+ DefaultValueForMissing = reader.ReadFloatArray();
LeafValues = reader.ReadDoubleArray();
// Informational...
@@ -313,10 +313,10 @@ internal RegressionTree(ModelLoadContext ctx, bool usingDefaultValue, bool categ
CheckValid(Contracts.CheckDecode);
// Check the need of _defaultValueForMissing
- if (_defaultValueForMissing != null)
+ if (DefaultValueForMissing != null)
{
bool allZero = true;
- foreach (var val in _defaultValueForMissing)
+ foreach (var val in DefaultValueForMissing)
{
if (val != 0.0f)
{
@@ -325,7 +325,7 @@ internal RegressionTree(ModelLoadContext ctx, bool usingDefaultValue, bool categ
}
}
if (allZero)
- _defaultValueForMissing = null;
+ DefaultValueForMissing = null;
}
}
@@ -402,7 +402,7 @@ protected void Save(ModelSaveContext ctx, TreeType code)
writer.WriteUIntArray(Thresholds);
writer.WriteFloatArray(RawThresholds);
- writer.WriteFloatArray(_defaultValueForMissing);
+ writer.WriteFloatArray(DefaultValueForMissing);
writer.WriteDoubleArray(LeafValues);
writer.WriteDoubleArray(_splitGain);
@@ -804,12 +804,12 @@ public int GetLeaf(ref VBuffer feat, ref List path)
private Float GetFeatureValue(Float x, int node)
{
// Not need to convert missing vaules.
- if (_defaultValueForMissing == null)
+ if (DefaultValueForMissing == null)
return x;
if (Double.IsNaN(x))
{
- return _defaultValueForMissing[node];
+ return DefaultValueForMissing[node];
}
else
{
@@ -1198,7 +1198,7 @@ public void ToTreeEnsembleFormat(StringBuilder sbEvaluator, StringBuilder sbInpu
private void ToTreeEnsembleFormatForCategoricalSplit(StringBuilder sbEvaluator, StringBuilder sbInput, FeaturesToContentMap featureContents,
ref int evaluatorCounter, Dictionary featureToId, Dictionary categoricalSplitNodeToId)
{
- //REVIEW: Can all these conditions even be true?
+ //REVIEW: Can all these conditions even be true?
if (CategoricalSplitFeatures == null ||
CategoricalSplitFeatures.Length == 0 ||
CategoricalSplitFeatures.All(val => val == null))
@@ -1518,7 +1518,7 @@ public void AppendFeatureContributions(ref VBuffer src, BufferBuilder
/// A bindable mapper wrapper for tree ensembles, that creates a bound mapper with three outputs:
- /// 1. A vector containing the individual tree outputs of the tree ensemble.
+ /// 1. A vector containing the individual tree outputs of the tree ensemble.
/// 2. An indicator vector for the leaves that the feature vector falls on in the tree ensemble.
/// 3. An indicator vector for the internal nodes on the paths that the feature vector falls on in the tree ensemble.
///
@@ -192,15 +192,15 @@ public BoundMapper(IExceptionContext ectx, TreeEnsembleFeaturizerBindableMapper
// A vector containing the output of each tree on a given example.
var treeValueType = new VectorType(NumberType.Float, _owner._ensemble.NumTrees);
- // An indicator vector with length = the total number of leaves in the ensemble, indicating which leaf the example
+ // An indicator vector with length = the total number of leaves in the ensemble, indicating which leaf the example
// ends up in all the trees in the ensemble.
var leafIdType = new VectorType(NumberType.Float, _owner._totalLeafCount);
- // An indicator vector with length = the total number of nodes in the ensemble, indicating the nodes on
+ // An indicator vector with length = the total number of nodes in the ensemble, indicating the nodes on
// the paths of the example in all the trees in the ensemble.
// The total number of nodes in a binary tree is equal to the number of internal nodes + the number of leaf nodes,
// and it is also equal to the number of children of internal nodes (which is 2 * the number of internal nodes)
- // plus one (since the root node is not a child of any node). So we have #internal + #leaf = 2*(#internal) + 1,
- // which means that #internal = #leaf - 1.
+ // plus one (since the root node is not a child of any node). So we have #internal + #leaf = 2*(#internal) + 1,
+ // which means that #internal = #leaf - 1.
// Therefore, the number of internal nodes in the ensemble is #leaf - #trees.
var pathIdType = new VectorType(NumberType.Float, _owner._totalLeafCount - _owner._ensemble.NumTrees);
_outputSchema = new Schema(ectx, owner, treeValueType, leafIdType, pathIdType);
@@ -563,8 +563,8 @@ public sealed class Arguments : TrainAndScoreTransform.ArgumentsBase
- /// REVIEW: Ideally we should have only one arguments class by using IComponentFactory for the model.
- /// For now it probably warrants a REVIEW comment here in case we'd like to merge these two arguments in the future.
+ /// REVIEW: Ideally we should have only one arguments class by using IComponentFactory for the model.
+ /// For now it probably warrants a REVIEW comment here in case we'd like to merge these two arguments in the future.
/// Also, it might be worthwhile to extract the common arguments to a base class.
///
[TlcModule.EntryPointKind(typeof(CommonInputs.IFeaturizerInput))]
@@ -803,9 +803,9 @@ private static IDataView AppendLabelTransform(IHostEnvironment env, IChannel ch,
public static partial class TreeFeaturize
{
- [TlcModule.EntryPoint(Name = "Transforms.TreeLeafFeaturizer",
- Desc = TreeEnsembleFeaturizerTransform.TreeEnsembleSummary,
- UserName = TreeEnsembleFeaturizerTransform.UserName,
+ [TlcModule.EntryPoint(Name = "Transforms.TreeLeafFeaturizer",
+ Desc = TreeEnsembleFeaturizerTransform.TreeEnsembleSummary,
+ UserName = TreeEnsembleFeaturizerTransform.UserName,
ShortName = TreeEnsembleFeaturizerBindableMapper.LoadNameShort,
XmlInclude = new[] { @"" })]
public static CommonOutputs.TransformOutput Featurizer(IHostEnvironment env, TreeEnsembleFeaturizerTransform.ArgumentsForEntryPoint input)
diff --git a/src/Microsoft.ML.FastTree/Utils/ToByteArrayExtensions.cs b/src/Microsoft.ML.FastTree/Utils/ToByteArrayExtensions.cs
index 5763e3c9dd..d4bc6e1962 100644
--- a/src/Microsoft.ML.FastTree/Utils/ToByteArrayExtensions.cs
+++ b/src/Microsoft.ML.FastTree/Utils/ToByteArrayExtensions.cs
@@ -47,7 +47,7 @@ public static int SizeInBytes(this short a)
return sizeof(short);
}
- public unsafe static void ToByteArray(this short a, byte[] buffer, ref int position)
+ public static unsafe void ToByteArray(this short a, byte[] buffer, ref int position)
{
fixed (byte* pBuffer = buffer)
{
@@ -71,7 +71,7 @@ public static int SizeInBytes(this ushort a)
return sizeof(ushort);
}
- public unsafe static void ToByteArray(this ushort a, byte[] buffer, ref int position)
+ public static unsafe void ToByteArray(this ushort a, byte[] buffer, ref int position)
{
fixed (byte* pBuffer = buffer)
{
@@ -95,7 +95,7 @@ public static int SizeInBytes(this int a)
return sizeof(int);
}
- public unsafe static void ToByteArray(this int a, byte[] buffer, ref int position)
+ public static unsafe void ToByteArray(this int a, byte[] buffer, ref int position)
{
fixed (byte* pBuffer = buffer)
{
@@ -105,7 +105,7 @@ public unsafe static void ToByteArray(this int a, byte[] buffer, ref int positio
position += sizeof(int);
}
- public unsafe static int ToInt(this byte[] buffer, ref int position)
+ public static unsafe int ToInt(this byte[] buffer, ref int position)
{
int a;
fixed (byte* pBuffer = buffer)
@@ -124,7 +124,7 @@ public static int SizeInBytes(this uint a)
return sizeof(uint);
}
- public unsafe static void ToByteArray(this uint a, byte[] buffer, ref int position)
+ public static unsafe void ToByteArray(this uint a, byte[] buffer, ref int position)
{
fixed (byte* pBuffer = buffer)
{
@@ -134,7 +134,7 @@ public unsafe static void ToByteArray(this uint a, byte[] buffer, ref int positi
position += sizeof(uint);
}
- public unsafe static uint ToUInt(this byte[] buffer, ref int position)
+ public static unsafe uint ToUInt(this byte[] buffer, ref int position)
{
uint a;
fixed (byte* pBuffer = buffer)
@@ -153,7 +153,7 @@ public static int SizeInBytes(this long a)
return sizeof(long);
}
- public unsafe static void ToByteArray(this long a, byte[] buffer, ref int position)
+ public static unsafe void ToByteArray(this long a, byte[] buffer, ref int position)
{
fixed (byte* pBuffer = buffer)
{
@@ -177,7 +177,7 @@ public static int SizeInBytes(this ulong a)
return sizeof(ulong);
}
- public unsafe static void ToByteArray(this ulong a, byte[] buffer, ref int position)
+ public static unsafe void ToByteArray(this ulong a, byte[] buffer, ref int position)
{
fixed (byte* pBuffer = buffer)
{
@@ -213,7 +213,7 @@ public static int SizeInBytes(this float a)
return sizeof(float);
}
- public unsafe static void ToByteArray(this float a, byte[] buffer, ref int position)
+ public static unsafe void ToByteArray(this float a, byte[] buffer, ref int position)
{
fixed (byte* pBuffer = buffer)
{
@@ -237,7 +237,7 @@ public static int SizeInBytes(this double a)
return sizeof(double);
}
- public unsafe static void ToByteArray(this double a, byte[] buffer, ref int position)
+ public static unsafe void ToByteArray(this double a, byte[] buffer, ref int position)
{
fixed (byte* pBuffer = buffer)
{
@@ -318,7 +318,7 @@ public static int SizeInBytes(this short[] a)
return sizeof(int) + Utils.Size(a) * sizeof(short);
}
- public unsafe static void ToByteArray(this short[] a, byte[] buffer, ref int position)
+ public static unsafe void ToByteArray(this short[] a, byte[] buffer, ref int position)
{
int length = a.Length;
length.ToByteArray(buffer, ref position);
@@ -333,7 +333,7 @@ public unsafe static void ToByteArray(this short[] a, byte[] buffer, ref int pos
position += length * sizeof(short);
}
- public unsafe static short[] ToShortArray(this byte[] buffer, ref int position)
+ public static unsafe short[] ToShortArray(this byte[] buffer, ref int position)
{
int length = buffer.ToInt(ref position);
short[] a = new short[length];
@@ -357,7 +357,7 @@ public static int SizeInBytes(this ushort[] a)
return sizeof(int) + Utils.Size(a) * sizeof(ushort);
}
- public unsafe static void ToByteArray(this ushort[] a, byte[] buffer, ref int position)
+ public static unsafe void ToByteArray(this ushort[] a, byte[] buffer, ref int position)
{
int length = a.Length;
length.ToByteArray(buffer, ref position);
@@ -372,7 +372,7 @@ public unsafe static void ToByteArray(this ushort[] a, byte[] buffer, ref int po
position += length * sizeof(ushort);
}
- public unsafe static ushort[] ToUShortArray(this byte[] buffer, ref int position)
+ public static unsafe ushort[] ToUShortArray(this byte[] buffer, ref int position)
{
int length = buffer.ToInt(ref position);
ushort[] a = new ushort[length];
@@ -396,7 +396,7 @@ public static int SizeInBytes(this int[] array)
return sizeof(int) + Utils.Size(array) * sizeof(int);
}
- public unsafe static void ToByteArray(this int[] a, byte[] buffer, ref int position)
+ public static unsafe void ToByteArray(this int[] a, byte[] buffer, ref int position)
{
int length = Utils.Size(a);
length.ToByteArray(buffer, ref position);
@@ -411,10 +411,10 @@ public unsafe static void ToByteArray(this int[] a, byte[] buffer, ref int posit
position += length * sizeof(int);
}
- public unsafe static int[] ToIntArray(this byte[] buffer, ref int position)
+ public static unsafe int[] ToIntArray(this byte[] buffer, ref int position)
=> buffer.ToIntArray(ref position, buffer.ToInt(ref position));
- public unsafe static int[] ToIntArray(this byte[] buffer, ref int position, int length)
+ public static unsafe int[] ToIntArray(this byte[] buffer, ref int position, int length)
{
if (length == 0)
return null;
@@ -440,7 +440,7 @@ public static int SizeInBytes(this uint[] array)
return sizeof(int) + Utils.Size(array) * sizeof(uint);
}
- public unsafe static void ToByteArray(this uint[] a, byte[] buffer, ref int position)
+ public static unsafe void ToByteArray(this uint[] a, byte[] buffer, ref int position)
{
int length = a.Length;
length.ToByteArray(buffer, ref position);
@@ -455,7 +455,7 @@ public unsafe static void ToByteArray(this uint[] a, byte[] buffer, ref int posi
position += length * sizeof(uint);
}
- public unsafe static uint[] ToUIntArray(this byte[] buffer, ref int position)
+ public static unsafe uint[] ToUIntArray(this byte[] buffer, ref int position)
{
int length = buffer.ToInt(ref position);
uint[] a = new uint[length];
@@ -479,7 +479,7 @@ public static int SizeInBytes(this long[] array)
return sizeof(int) + Utils.Size(array) * sizeof(long);
}
- public unsafe static void ToByteArray(this long[] a, byte[] buffer, ref int position)
+ public static unsafe void ToByteArray(this long[] a, byte[] buffer, ref int position)
{
int length = a.Length;
length.ToByteArray(buffer, ref position);
@@ -494,7 +494,7 @@ public unsafe static void ToByteArray(this long[] a, byte[] buffer, ref int posi
position += length * sizeof(long);
}
- public unsafe static long[] ToLongArray(this byte[] buffer, ref int position)
+ public static unsafe long[] ToLongArray(this byte[] buffer, ref int position)
{
int length = buffer.ToInt(ref position);
long[] a = new long[length];
@@ -518,7 +518,7 @@ public static int SizeInBytes(this ulong[] array)
return sizeof(int) + Utils.Size(array) * sizeof(ulong);
}
- public unsafe static void ToByteArray(this ulong[] a, byte[] buffer, ref int position)
+ public static unsafe void ToByteArray(this ulong[] a, byte[] buffer, ref int position)
{
int length = a.Length;
length.ToByteArray(buffer, ref position);
@@ -533,7 +533,7 @@ public unsafe static void ToByteArray(this ulong[] a, byte[] buffer, ref int pos
position += length * sizeof(ulong);
}
- public unsafe static ulong[] ToULongArray(this byte[] buffer, ref int position)
+ public static unsafe ulong[] ToULongArray(this byte[] buffer, ref int position)
{
int length = buffer.ToInt(ref position);
ulong[] a = new ulong[length];
@@ -566,7 +566,7 @@ public static void ToByteArray(this MD5Hash[] a, byte[] buffer, ref int position
}
}
- public unsafe static MD5Hash[] ToUInt128Array(this byte[] buffer, ref int position)
+ public static unsafe MD5Hash[] ToUInt128Array(this byte[] buffer, ref int position)
{
int length = buffer.ToInt(ref position);
MD5Hash[] a = new MD5Hash[length];
@@ -584,7 +584,7 @@ public static int SizeInBytes(this float[] array)
return sizeof(int) + Utils.Size(array) * sizeof(float);
}
- public unsafe static void ToByteArray(this float[] a, byte[] buffer, ref int position)
+ public static unsafe void ToByteArray(this float[] a, byte[] buffer, ref int position)
{
int length = a.Length;
length.ToByteArray(buffer, ref position);
@@ -599,7 +599,7 @@ public unsafe static void ToByteArray(this float[] a, byte[] buffer, ref int pos
position += length * sizeof(float);
}
- public unsafe static float[] ToFloatArray(this byte[] buffer, ref int position)
+ public static unsafe float[] ToFloatArray(this byte[] buffer, ref int position)
{
int length = buffer.ToInt(ref position);
float[] a = new float[length];
@@ -623,7 +623,7 @@ public static int SizeInBytes(this double[] array)
return sizeof(int) + Utils.Size(array) * sizeof(double);
}
- public unsafe static void ToByteArray(this double[] a, byte[] buffer, ref int position)
+ public static unsafe void ToByteArray(this double[] a, byte[] buffer, ref int position)
{
int length = a.Length;
length.ToByteArray(buffer, ref position);
@@ -638,7 +638,7 @@ public unsafe static void ToByteArray(this double[] a, byte[] buffer, ref int po
position += length * sizeof(double);
}
- public unsafe static double[] ToDoubleArray(this byte[] buffer, ref int position)
+ public static unsafe double[] ToDoubleArray(this byte[] buffer, ref int position)
{
int length = buffer.ToInt(ref position);
double[] a = new double[length];
diff --git a/src/Microsoft.ML.FastTree/Utils/VectorUtils.cs b/src/Microsoft.ML.FastTree/Utils/VectorUtils.cs
index fdf695b174..b804523cc1 100644
--- a/src/Microsoft.ML.FastTree/Utils/VectorUtils.cs
+++ b/src/Microsoft.ML.FastTree/Utils/VectorUtils.cs
@@ -17,7 +17,7 @@ public static double GetVectorSize(double[] vector)
}
// Normalizes the vector to have size of 1
- public unsafe static void NormalizeVectorSize(double[] vector)
+ public static unsafe void NormalizeVectorSize(double[] vector)
{
double size = GetVectorSize(vector);
int length = vector.Length;
@@ -34,7 +34,7 @@ public unsafe static void NormalizeVectorSize(double[] vector)
}
// Center vector to have mean = 0
- public unsafe static void CenterVector(double[] vector)
+ public static unsafe void CenterVector(double[] vector)
{
double mean = GetMean(vector);
int length = vector.Length;
@@ -51,7 +51,7 @@ public unsafe static void CenterVector(double[] vector)
}
// Normalizes the vector to have mean = 0 and std = 1
- public unsafe static void NormalizeVector(double[] vector)
+ public static unsafe void NormalizeVector(double[] vector)
{
double mean = GetMean(vector);
double std = GetStandardDeviation(vector, mean);
@@ -59,7 +59,7 @@ public unsafe static void NormalizeVector(double[] vector)
}
// Normalizes the vector to have mean = 0 and std = 1
- public unsafe static void NormalizeVector(double[] vector, double mean, double std)
+ public static unsafe void NormalizeVector(double[] vector, double mean, double std)
{
int length = vector.Length;
unsafe
@@ -74,17 +74,17 @@ public unsafe static void NormalizeVector(double[] vector, double mean, double s
}
}
- public unsafe static double GetDotProduct(double[] vector1, double[] vector2)
+ public static unsafe double GetDotProduct(double[] vector1, double[] vector2)
{
return GetDotProduct(vector1, vector2, vector1.Length);
}
- public unsafe static double GetDotProduct(float[] vector1, float[] vector2)
+ public static unsafe double GetDotProduct(float[] vector1, float[] vector2)
{
return GetDotProduct(vector1, vector2, vector1.Length);
}
- public unsafe static double GetDotProduct(double[] vector1, double[] vector2, int length)
+ public static unsafe double GetDotProduct(double[] vector1, double[] vector2, int length)
{
double product = 0;
unsafe
@@ -101,7 +101,7 @@ public unsafe static double GetDotProduct(double[] vector1, double[] vector2, in
return product;
}
- public unsafe static double GetDotProduct(float[] vector1, float[] vector2, int length)
+ public static unsafe double GetDotProduct(float[] vector1, float[] vector2, int length)
{
double product = 0;
unsafe
@@ -118,7 +118,7 @@ public unsafe static double GetDotProduct(float[] vector1, float[] vector2, int
return product;
}
- public unsafe static double GetMean(double[] vector)
+ public static unsafe double GetMean(double[] vector)
{
double sum = 0;
int length = vector.Length;
@@ -135,7 +135,7 @@ public unsafe static double GetMean(double[] vector)
return sum / length;
}
- public unsafe static double GetMean(float[] vector)
+ public static unsafe double GetMean(float[] vector)
{
double sum = 0;
int length = vector.Length;
@@ -157,7 +157,7 @@ public static double GetStandardDeviation(double[] vector)
return GetStandardDeviation(vector, GetMean(vector));
}
- public unsafe static double GetStandardDeviation(double[] vector, double mean)
+ public static unsafe double GetStandardDeviation(double[] vector, double mean)
{
double sum = 0;
int length = vector.Length;
@@ -176,7 +176,7 @@ public unsafe static double GetStandardDeviation(double[] vector, double mean)
return Math.Sqrt(sum / length);
}
- public unsafe static int GetIndexOfMax(double[] vector)
+ public static unsafe int GetIndexOfMax(double[] vector)
{
int length = vector.Length;
double max = vector[0];
@@ -199,7 +199,7 @@ public unsafe static int GetIndexOfMax(double[] vector)
}
// Subtracts the second vector from the first one (vector1[i] -= vector2[i])
- public unsafe static void SubtractInPlace(double[] vector1, double[] vector2)
+ public static unsafe void SubtractInPlace(double[] vector1, double[] vector2)
{
int length = vector1.Length;
unsafe
@@ -215,7 +215,7 @@ public unsafe static void SubtractInPlace(double[] vector1, double[] vector2)
}
}
- public unsafe static double[] Subtract(double[] vector1, double[] vector2)
+ public static unsafe double[] Subtract(double[] vector1, double[] vector2)
{
int length = vector1.Length;
double[] result = new double[length];
@@ -235,7 +235,7 @@ public unsafe static double[] Subtract(double[] vector1, double[] vector2)
}
// Subtracts the second vector from the first one (vector1[i] += vector2[i])
- public unsafe static void AddInPlace(double[] vector1, double[] vector2)
+ public static unsafe void AddInPlace(double[] vector1, double[] vector2)
{
int length = vector1.Length;
unsafe
@@ -252,7 +252,7 @@ public unsafe static void AddInPlace(double[] vector1, double[] vector2)
}
// Mutiplies the second vector from the first one (vector1[i] /= val)
- public unsafe static void MutiplyInPlace(double[] vector, double val)
+ public static unsafe void MutiplyInPlace(double[] vector, double val)
{
int length = vector.Length;
unsafe
@@ -268,7 +268,7 @@ public unsafe static void MutiplyInPlace(double[] vector, double val)
}
// Divides the second vector from the first one (vector1[i] /= val)
- public unsafe static void DivideInPlace(double[] vector, double val)
+ public static unsafe void DivideInPlace(double[] vector, double val)
{
int length = vector.Length;
unsafe
@@ -284,7 +284,7 @@ public unsafe static void DivideInPlace(double[] vector, double val)
}
// Divides the second vector from the first one (vector1[i] /= val)
- public unsafe static void DivideInPlace(float[] vector, float val)
+ public static unsafe void DivideInPlace(float[] vector, float val)
{
int length = vector.Length;
unsafe
@@ -299,7 +299,7 @@ public unsafe static void DivideInPlace(float[] vector, float val)
}
}
- public unsafe static double GetEuclideanDistance(double[] vector1, double[] vector2)
+ public static unsafe double GetEuclideanDistance(double[] vector1, double[] vector2)
{
double sum = 0;
double diff;
diff --git a/src/Microsoft.ML.KMeansClustering/KMeansPlusPlusTrainer.cs b/src/Microsoft.ML.KMeansClustering/KMeansPlusPlusTrainer.cs
index 3bab791fe1..1ab65a8cd5 100644
--- a/src/Microsoft.ML.KMeansClustering/KMeansPlusPlusTrainer.cs
+++ b/src/Microsoft.ML.KMeansClustering/KMeansPlusPlusTrainer.cs
@@ -147,7 +147,7 @@ private KMeansPredictor TrainCore(IChannel ch, RoleMappedData data, int dimensio
var cursorFactory = new FeatureFloatVectorCursor.Factory(data, CursOpt.Features | CursOpt.Id | CursOpt.Weight);
// REVIEW: It would be nice to extract these out into subcomponents in the future. We should
// revisit and even consider breaking these all into individual KMeans-flavored trainers, they
- // all produce a valid set of output centroids with various trade-offs in runtime (with perhaps
+ // all produce a valid set of output centroids with various trade-offs in runtime (with perhaps
// random initialization creating a set that's not terribly useful.) They could also be extended to
// pay attention to their incoming set of centroids and incrementally train.
if (_initAlgorithm == InitAlgorithm.KMeansPlusPlus)
@@ -346,13 +346,13 @@ public static void Initialize(
///
/// An instance of this class is used by SharedStates in YinYangTrainer
- /// and KMeansBarBarInitialization. It effectively bounds MaxInstancesToAccelerate and
+ /// and KMeansBarBarInitialization. It effectively bounds MaxInstancesToAccelerate and
/// initializes RowIndexGetter.
///
internal sealed class KMeansAcceleratedRowMap
{
// Retrieves the row's index for per-instance data. If the
- // row is not assigned an index (it occurred after 'maxInstancesToAccelerate')
+ // row is not assigned an index (it occurred after 'maxInstancesToAccelerate')
// or we are not accelerating then this returns -1.
public readonly KMeansUtils.RowIndexGetter RowIndexGetter;
@@ -434,14 +434,14 @@ internal static class KMeansBarBarInitialization
///
/// Data for optimizing KMeans|| initialization. Very similar to SharedState class
/// For every instance, there is a space for the best weight and best cluster computed.
- ///
+ ///
/// In this class, new clusters mean the clusters that were added to the cluster set
- /// in the previous round of KMeans|| and old clusters are the rest of them (the ones
+ /// in the previous round of KMeans|| and old clusters are the rest of them (the ones
/// that were added in the rounds before the previous one).
- ///
+ ///
/// In every round of KMeans||, numSamplesPerRound new clusters are added to the set of clusters.
- /// There are 'numRounds' number of rounds. We compute and store the distance of each new
- /// cluster from every round to all of the previous clusters and use it
+ /// There are 'numRounds' number of rounds. We compute and store the distance of each new
+ /// cluster from every round to all of the previous clusters and use it
/// to avoid unnecessary computation by applying the triangle inequality.
///
private sealed class SharedState
@@ -453,12 +453,12 @@ private sealed class SharedState
// Note that this array is only allocated for MaxInstancesToAccelerate elements.
private readonly int[] _bestCluster;
- // _bestWeight holds the weight of instance x to _bestCluster[x] where weight(x) = dist(x, _bestCluster[x])^2 - norm(x)^2.
+ // _bestWeight holds the weight of instance x to _bestCluster[x] where weight(x) = dist(x, _bestCluster[x])^2 - norm(x)^2.
// Note that this array is only allocated for MaxInstancesToAccelerate elements.
private readonly Float[] _bestWeight;
// The distance of each newly added cluster from the previous round to every old cluster
- // the first dimension of this array is the size of numSamplesPerRound
+ // the first dimension of this array is the size of numSamplesPerRound
// and the second dimension is the size of numRounds * numSamplesPerRound.
// _clusterDistances[i][j] = dist(cluster[i+clusterPrevCount], cluster[j])
// where clusterPrevCount-1 is the last index of the old clusters
@@ -510,8 +510,8 @@ public Float GetBestWeight(int idx)
///
/// When assigning an accelerated row to a cluster, we store away the weight
/// to its closest cluster, as well as the identity of the new
- /// closest cluster. Note that bestWeight can be negative since it is
- /// corresponding to the weight of a distance which does not have
+ /// closest cluster. Note that bestWeight can be negative since it is
+ /// corresponding to the weight of a distance which does not have
/// the L2 norm of the point itself.
///
public void SetInstanceCluster(int n, Float bestWeight, int bestCluster)
@@ -565,7 +565,7 @@ public bool CanWeightComputationBeAvoided(Float instanceDistanceToBestOldCluster
// Use triangle inequality to evaluate whether weight computation can be avoided
// dist(x,cNew) + dist(x,cOld) > dist(cOld,cNew) =>
// dist(x,cNew) > dist(cOld,cNew) - dist(x,cOld) =>
- // If dist(cOld,cNew) - dist(x,cOld) > dist(x,cOld), then dist(x,cNew) > dist(x,cOld). Therefore it is
+ // If dist(cOld,cNew) - dist(x,cOld) > dist(x,cOld), then dist(x,cNew) > dist(x,cOld). Therefore it is
// not necessary to compute dist(x,cNew).
if (distanceBetweenOldAndNewClusters - instanceDistanceToBestOldCluster > instanceDistanceToBestOldCluster)
return true;
@@ -577,7 +577,7 @@ public bool CanWeightComputationBeAvoided(Float instanceDistanceToBestOldCluster
///
/// This function finds the best cluster and the best weight for an instance using
/// smart triangle inequality to avoid unnecessary weight computations.
- ///
+ ///
/// Note that is used to avoid the storing the new cluster in
/// final round. After the final round, best cluster information will be ignored.
///
@@ -649,7 +649,7 @@ private static void FindBestCluster(ref VBuffer point, int pointRowIndex,
}
///
- /// This method computes the memory requirement for _clusterDistances in SharedState (clusterBytes) and
+ /// This method computes the memory requirement for _clusterDistances in SharedState (clusterBytes) and
/// the maximum number of instances whose weight to the closest cluster can be memorized in order to avoid
/// recomputation later.
///
@@ -678,7 +678,7 @@ private static void ComputeAccelerationMemoryRequirement(long accelMemBudgetMb,
///
/// Uses memory in initializationState to cache distances and avoids unnecessary distance computations
/// akin to YinYang-KMeans paper.
- ///
+ ///
/// Everywhere in this function, weight of an instance x from a cluster c means weight(x,c) = dist(x,c)^2-norm(x)^2.
/// We store weight in most cases to avoid unnecessary computation of norm(x).
///
@@ -1019,7 +1019,7 @@ public void UpdateClusterAssignment(bool firstIteration, ref VBuffer feat
{
// update the cachedSum as the instance moves from (previous) bestCluster[n] to cluster
VectorUtils.Add(ref features, ref CachedSum[cluster]);
- // There doesnt seem to be a Subtract function that does a -= b, so doing a += (-1 * b)
+ // There doesnt seem to be a Subtract function that does a -= b, so doing a += (-1 * b)
VectorUtils.AddMult(ref features, -1, ref CachedSum[previousCluster]);
NumChanged++;
}
@@ -1151,7 +1151,7 @@ private sealed class SharedState
// max value of delta[i] for 0 <= i < _k
public Float DeltaMax;
- // Per instance structures
+ // Per instance structures
public int GetBestCluster(int idx)
{
@@ -1180,7 +1180,7 @@ public SharedState(FeatureFloatVectorCursor.Factory factory, IChannel ch, long b
if (MaxInstancesToAccelerate > 0)
{
- // allocate data structures
+ // allocate data structures
Delta = new Float[k];
_bestCluster = new int[MaxInstancesToAccelerate];
@@ -1478,7 +1478,7 @@ public struct RowStats
/// data set with a probability of numSamples/N * weight/(sum(weight)). Buffer
/// is sized to the number of threads plus one and stores the minheaps needed to
/// perform the per-thread reservior samples.
- ///
+ ///
/// This method assumes that the numSamples is much smaller than the full dataset as
/// it expects to be able to sample numSamples * numThreads.
///
@@ -1514,13 +1514,13 @@ public static RowStats ParallelWeightedReservoirSample(
// We use distance as a proxy for 'is the same point'. By excluding
// all points that lie within a very small distance of our current set of
// centroids we force the algorithm to explore more broadly and avoid creating a
- // set of centroids containing the same, or very close to the same, point
+ // set of centroids containing the same, or very close to the same, point
// more than once.
Float sameClusterEpsilon = (Float)1e-15;
Float weight = weightFn(ref point, pointRowIndex);
- // If numeric instability has forced it to zero, then we bound it to epsilon to
+ // If numeric instability has forced it to zero, then we bound it to epsilon to
// keep the key valid and avoid NaN, (although the math does tend to work out regardless:
// 1 / 0 => Inf, base ^ Inf => 0, when |base| < 1)
if (weight == 0)
diff --git a/src/Microsoft.ML.LightGBM/LightGbmArguments.cs b/src/Microsoft.ML.LightGBM/LightGbmArguments.cs
index 0612135ce3..f31f67a08d 100644
--- a/src/Microsoft.ML.LightGBM/LightGbmArguments.cs
+++ b/src/Microsoft.ML.LightGBM/LightGbmArguments.cs
@@ -36,7 +36,7 @@ public interface IBoosterParameter
}
///
- /// Parameters names comes from LightGBM library.
+ /// Parameters names comes from LightGBM library.
/// See https://github.com/Microsoft/LightGBM/blob/master/docs/Parameters.rst.
///
public sealed class LightGbmArguments : LearnerInputBaseWithGroupId
diff --git a/src/Microsoft.ML.LightGBM/LightGbmBinaryTrainer.cs b/src/Microsoft.ML.LightGBM/LightGbmBinaryTrainer.cs
index 7ce9f42b10..f788b4feab 100644
--- a/src/Microsoft.ML.LightGBM/LightGbmBinaryTrainer.cs
+++ b/src/Microsoft.ML.LightGBM/LightGbmBinaryTrainer.cs
@@ -27,7 +27,7 @@ public sealed class LightGbmBinaryPredictor : FastTreePredictionWrapper
{
public const string LoaderSignature = "LightGBMBinaryExec";
public const string RegistrationName = "LightGBMBinaryPredictor";
-
+
private static VersionInfo GetVersionInfo()
{
// REVIEW: can we decouple the version from FastTree predictor version ?
@@ -129,9 +129,9 @@ protected override void CheckAndUpdateParametersBeforeTraining(IChannel ch, Role
public static partial class LightGbm
{
[TlcModule.EntryPoint(
- Name = "Trainers.LightGbmBinaryClassifier",
+ Name = "Trainers.LightGbmBinaryClassifier",
Desc = LightGbmBinaryTrainer.Summary,
- UserName = LightGbmBinaryTrainer.UserName,
+ UserName = LightGbmBinaryTrainer.UserName,
ShortName = LightGbmBinaryTrainer.ShortName,
XmlInclude = new[] { @"",
@""})]
diff --git a/src/Microsoft.ML.LightGBM/LightGbmMulticlassTrainer.cs b/src/Microsoft.ML.LightGBM/LightGbmMulticlassTrainer.cs
index 0534f0d660..ff44139877 100644
--- a/src/Microsoft.ML.LightGBM/LightGbmMulticlassTrainer.cs
+++ b/src/Microsoft.ML.LightGBM/LightGbmMulticlassTrainer.cs
@@ -182,9 +182,9 @@ protected override void CheckAndUpdateParametersBeforeTraining(IChannel ch, Role
public static partial class LightGbm
{
[TlcModule.EntryPoint(
- Name = "Trainers.LightGbmClassifier",
- Desc = "Train a LightGBM multi class model.",
- UserName = LightGbmMulticlassTrainer.Summary,
+ Name = "Trainers.LightGbmClassifier",
+ Desc = "Train a LightGBM multi class model.",
+ UserName = LightGbmMulticlassTrainer.Summary,
ShortName = LightGbmMulticlassTrainer.ShortName,
XmlInclude = new[] { @"",
@""})]
diff --git a/src/Microsoft.ML.LightGBM/LightGbmRankingTrainer.cs b/src/Microsoft.ML.LightGBM/LightGbmRankingTrainer.cs
index 3bafb09ab3..3fe4628182 100644
--- a/src/Microsoft.ML.LightGBM/LightGbmRankingTrainer.cs
+++ b/src/Microsoft.ML.LightGBM/LightGbmRankingTrainer.cs
@@ -127,9 +127,9 @@ protected override void CheckAndUpdateParametersBeforeTraining(IChannel ch, Role
///
public static partial class LightGbm
{
- [TlcModule.EntryPoint(Name = "Trainers.LightGbmRanker",
- Desc = "Train a LightGBM ranking model.",
- UserName = LightGbmRankingTrainer.UserName,
+ [TlcModule.EntryPoint(Name = "Trainers.LightGbmRanker",
+ Desc = "Train a LightGBM ranking model.",
+ UserName = LightGbmRankingTrainer.UserName,
ShortName = LightGbmRankingTrainer.ShortName,
XmlInclude = new[] { @"",
@""})]
diff --git a/src/Microsoft.ML.LightGBM/LightGbmRegressionTrainer.cs b/src/Microsoft.ML.LightGBM/LightGbmRegressionTrainer.cs
index f1b8850a72..0011a8d8e6 100644
--- a/src/Microsoft.ML.LightGBM/LightGbmRegressionTrainer.cs
+++ b/src/Microsoft.ML.LightGBM/LightGbmRegressionTrainer.cs
@@ -119,9 +119,9 @@ protected override void CheckAndUpdateParametersBeforeTraining(IChannel ch, Role
///
public static partial class LightGbm
{
- [TlcModule.EntryPoint(Name = "Trainers.LightGbmRegressor",
- Desc = LightGbmRegressorTrainer.Summary,
- UserName = LightGbmRegressorTrainer.UserNameValue,
+ [TlcModule.EntryPoint(Name = "Trainers.LightGbmRegressor",
+ Desc = LightGbmRegressorTrainer.Summary,
+ UserName = LightGbmRegressorTrainer.UserNameValue,
ShortName = LightGbmRegressorTrainer.ShortName,
XmlInclude = new[] { @"",
@""})]
diff --git a/src/Microsoft.ML.LightGBM/LightGbmTrainerBase.cs b/src/Microsoft.ML.LightGBM/LightGbmTrainerBase.cs
index 83e0f7803b..eae632eb24 100644
--- a/src/Microsoft.ML.LightGBM/LightGbmTrainerBase.cs
+++ b/src/Microsoft.ML.LightGBM/LightGbmTrainerBase.cs
@@ -40,9 +40,9 @@ private sealed class CategoricalMetaData
private protected readonly LightGbmArguments Args;
///
- /// Stores argumments as objects to convert them to invariant string type in the end so that
- /// the code is culture agnostic. When retrieving key value from this dictionary as string
- /// please convert to string invariant by string.Format(CultureInfo.InvariantCulture, "{0}", Option[key]).
+ /// Stores argumments as objects to convert them to invariant string type in the end so that
+ /// the code is culture agnostic. When retrieving key value from this dictionary as string
+ /// please convert to string invariant by string.Format(CultureInfo.InvariantCulture, "{0}", Option[key]).
///
private protected readonly Dictionary Options;
private protected readonly IParallel ParallelTraining;
@@ -467,7 +467,7 @@ private void GetFeatureValueDense(IChannel ch, FloatLabelCursor cursor, Categori
hotIdx = j;
}
}
- // All-Zero is category 0.
+ // All-Zero is category 0.
fv = hotIdx - catMetaData.CategoricalBoudaries[i] + 1;
}
featureValues[i] = fv;
@@ -479,8 +479,8 @@ private void GetFeatureValueDense(IChannel ch, FloatLabelCursor cursor, Categori
}
}
- private void GetFeatureValueSparse(IChannel ch, FloatLabelCursor cursor,
- CategoricalMetaData catMetaData, IRandom rand, out int[] indices,
+ private void GetFeatureValueSparse(IChannel ch, FloatLabelCursor cursor,
+ CategoricalMetaData catMetaData, IRandom rand, out int[] indices,
out float[] featureValues, out int cnt)
{
if (catMetaData.CategoricalBoudaries != null)
@@ -680,7 +680,7 @@ private void LoadDataset(IChannel ch, FloatLabelCursor.Factory factory, Dataset
// Need push rows to LightGBM.
if (numElem + cursor.Features.Count > features.Length)
{
- // Mini batch size is greater than size of one row.
+ // Mini batch size is greater than size of one row.
// So, at least we have the data of one row.
ch.Assert(curRowCount > 0);
Utils.EnsureSize(ref indptr, curRowCount + 1);
diff --git a/src/Microsoft.ML.LightGBM/WrappedLightGbmDataset.cs b/src/Microsoft.ML.LightGBM/WrappedLightGbmDataset.cs
index 991aadfb14..4cb8637e2d 100644
--- a/src/Microsoft.ML.LightGBM/WrappedLightGbmDataset.cs
+++ b/src/Microsoft.ML.LightGBM/WrappedLightGbmDataset.cs
@@ -137,7 +137,7 @@ public unsafe void SetWeights(float[] weights)
if (weights != null)
{
Contracts.Assert(weights.Length == GetNumRows());
- // Skip SetWeights if all weights are same.
+ // Skip SetWeights if all weights are same.
bool allSame = true;
for (int i = 1; i < weights.Length; ++i)
{
diff --git a/src/Microsoft.ML.LightGBM/WrappedLightGbmInterface.cs b/src/Microsoft.ML.LightGBM/WrappedLightGbmInterface.cs
index eec00d9bd1..4abdb47354 100644
--- a/src/Microsoft.ML.LightGBM/WrappedLightGbmInterface.cs
+++ b/src/Microsoft.ML.LightGBM/WrappedLightGbmInterface.cs
@@ -46,7 +46,7 @@ public static extern int FreeArray(
IntPtr ret,
int type);
- #endregion
+ #endregion
#region API ERROR
@@ -149,7 +149,7 @@ public static extern int BoosterCreate(IntPtr trainset,
public static extern int BoosterAddValidData(IntPtr handle, IntPtr validset);
[DllImport(DllName, EntryPoint = "LGBM_BoosterSaveModelToString", CallingConvention = CallingConvention.StdCall)]
- public unsafe static extern int BoosterSaveModelToString(IntPtr handle,
+ public static extern unsafe int BoosterSaveModelToString(IntPtr handle,
int numIteration,
int bufferLen,
ref int outLen,
@@ -166,7 +166,7 @@ public unsafe static extern int BoosterSaveModelToString(IntPtr handle,
public static extern int BoosterGetEvalCounts(IntPtr handle, ref int outLen);
[DllImport(DllName, EntryPoint = "LGBM_BoosterGetEval", CallingConvention = CallingConvention.StdCall)]
- public unsafe static extern int BoosterGetEval(IntPtr handle, int dataIdx,
+ public static extern unsafe int BoosterGetEval(IntPtr handle, int dataIdx,
ref int outLen, double* outResult);
#endregion
diff --git a/src/Microsoft.ML.Maml/HelpCommand.cs b/src/Microsoft.ML.Maml/HelpCommand.cs
index e0941f5a93..a815ffc0e5 100644
--- a/src/Microsoft.ML.Maml/HelpCommand.cs
+++ b/src/Microsoft.ML.Maml/HelpCommand.cs
@@ -344,7 +344,7 @@ private void ShowFormattedSummary(IndentingTextWriter writer, string summary, in
// REVIEW: should we replace consecutive spaces with a single space as a preprocessing step?
int screenWidth = (columns ?? CmdParser.GetConsoleWindowWidth()) - 1;
- // GetConsoleWindowWidth returns 0 if command redirection operator is used
+ // GetConsoleWindowWidth returns 0 if command redirection operator is used
if (screenWidth <= 0)
screenWidth = 80;
diff --git a/src/Microsoft.ML.Onnx/OnnxContextImpl.cs b/src/Microsoft.ML.Onnx/OnnxContextImpl.cs
index f37a1ea557..5341b35d55 100644
--- a/src/Microsoft.ML.Onnx/OnnxContextImpl.cs
+++ b/src/Microsoft.ML.Onnx/OnnxContextImpl.cs
@@ -57,8 +57,8 @@ public OnnxContextImpl(IHostEnvironment env, string name, string producerName,
public override bool ContainsColumn(string colName) => _columnNameMap.ContainsKey(colName);
///
- /// Stops tracking a column. If removeVariable is true then it also removes the
- /// variable associated with it, this is useful in the event where an output variable is
+ /// Stops tracking a column. If removeVariable is true then it also removes the
+ /// variable associated with it, this is useful in the event where an output variable is
/// created before realizing the transform cannot actually save as ONNX.
///
/// IDataView column name to stop tracking
@@ -82,7 +82,7 @@ public override void RemoveColumn(string colName, bool removeVariable)
}
///
- /// Removes an ONNX variable. If removeColumn is true then it also removes the
+ /// Removes an ONNX variable. If removeColumn is true then it also removes the
/// IDataView column associated with it.
///
/// ONNX variable to remove.
@@ -165,7 +165,7 @@ private string GetUniqueName(string prefix, Func pred)
}
///
- /// Retrieves the variable name that maps to the IDataView column name at a
+ /// Retrieves the variable name that maps to the IDataView column name at a
/// given point in the pipeline execution.
///
/// Column Name mapping.
@@ -178,7 +178,7 @@ public override string GetVariableName(string colName)
}
///
- /// Retrieves the variable name that maps to the IDataView column name at a
+ /// Retrieves the variable name that maps to the IDataView column name at a
/// given point in the pipeline execution.
///
/// Column Name mapping.
diff --git a/src/Microsoft.ML.PCA/PcaTrainer.cs b/src/Microsoft.ML.PCA/PcaTrainer.cs
index f90b405c4e..5814d840ca 100644
--- a/src/Microsoft.ML.PCA/PcaTrainer.cs
+++ b/src/Microsoft.ML.PCA/PcaTrainer.cs
@@ -290,7 +290,7 @@ public static CommonOutputs.AnomalyDetectionOutput TrainPcaAnomaly(IHostEnvironm
// - For each new instance, it computes the norm difference between the raw feature vector and the projected feature on that subspace.
// - - If the error is close to 0, the instance is considered normal (non-anomaly).
// REVIEW: move the predictor to a different file and fold EigenUtils.cs to this file.
- // REVIEW: Include the above detail in the XML documentation file.
+ // REVIEW: Include the above detail in the XML documentation file.
///
public sealed class PcaPredictor : PredictorBase,
IValueMapper,
diff --git a/src/Microsoft.ML.PCA/PcaTransform.cs b/src/Microsoft.ML.PCA/PcaTransform.cs
index 6efbead226..6de130532e 100644
--- a/src/Microsoft.ML.PCA/PcaTransform.cs
+++ b/src/Microsoft.ML.PCA/PcaTransform.cs
@@ -337,7 +337,7 @@ private void Train(Arguments args, TransformInfo[] transformInfos, IDataView tra
for (int iinfo = 0; iinfo < transformInfos.Length; iinfo++)
{
- //Orthonormalize Y in-place using stabilized Gram Schmidt algorithm
+ //Orthonormalize Y in-place using stabilized Gram Schmidt algorithm
//Ref: http://en.wikipedia.org/wiki/Gram-Schmidt#Algorithm
for (var i = 0; i < oversampledRank[iinfo]; ++i)
{
@@ -537,10 +537,10 @@ private static void TransformFeatures(IExceptionContext ectx, ref VBuffer
dst = new VBuffer(transformInfo.Rank, values, dst.Indices);
}
- [TlcModule.EntryPoint(Name = "Transforms.PcaCalculator",
+ [TlcModule.EntryPoint(Name = "Transforms.PcaCalculator",
Desc = Summary,
- UserName = UserName,
- ShortName = ShortName,
+ UserName = UserName,
+ ShortName = ShortName,
XmlInclude = new[] { @"",
@""})]
public static CommonOutputs.TransformOutput Calculate(IHostEnvironment env, Arguments input)
diff --git a/src/Microsoft.ML.Parquet/ParquetLoader.cs b/src/Microsoft.ML.Parquet/ParquetLoader.cs
index 2def7006d2..503debae65 100644
--- a/src/Microsoft.ML.Parquet/ParquetLoader.cs
+++ b/src/Microsoft.ML.Parquet/ParquetLoader.cs
@@ -33,7 +33,7 @@ namespace Microsoft.ML.Runtime.Data
public sealed class ParquetLoader : IDataLoader, IDisposable
{
///
- /// A Column is a singular representation that consolidates all the related column chunks in the
+ /// A Column is a singular representation that consolidates all the related column chunks in the
/// Parquet file. Information stored within the Column includes its name, raw type read from Parquet,
/// its corresponding ColumnType, and index.
/// Complex columns in Parquet like structs, maps, and lists are flattened into multiple columns.
diff --git a/src/Microsoft.ML.PipelineInference/AutoInference.cs b/src/Microsoft.ML.PipelineInference/AutoInference.cs
index 6ec2894895..54e95e595d 100644
--- a/src/Microsoft.ML.PipelineInference/AutoInference.cs
+++ b/src/Microsoft.ML.PipelineInference/AutoInference.cs
@@ -62,22 +62,24 @@ public class EntryPointGraphDef
///
/// Get the name of the variable asssigned to the Data or Training Data input, based on what is the first node of the subgraph.
- /// A better way to do this would be with a ICanBeSubGraphFirstNode common interface between ITransformInput and ITrainerInputs
- /// and a custom deserializer.
+ /// A better way to do this would be with a ICanBeSubGraphFirstNode common interface between ITransformInput and ITrainerInputs
+ /// and a custom deserializer.
///
public string GetSubgraphFirstNodeDataVarName(IExceptionContext ectx)
{
var nodes = Graph.GetNodes();
- ectx.CheckValue(nodes, nameof(nodes), "Empty Subgraph");
- ectx.CheckValue(nodes[0], nameof(nodes), "Empty Subgraph");
- ectx.CheckValue(nodes[0][FieldNames.Inputs], "Inputs", "Empty subgraph node inputs.");
+ ectx.Check(nodes != null || nodes.Count == 0, "Empty Subgraph");
+ ectx.Check(nodes[0] != null, "Subgraph's first note is empty");
+ ectx.Check(nodes[0][FieldNames.Inputs] != null, "Empty subgraph node inputs.");
string variableName;
if (!GetDataVariableName(ectx, "Data", nodes[0][FieldNames.Inputs], out variableName))
GetDataVariableName(ectx, "TrainingData", nodes[0][FieldNames.Inputs], out variableName);
- ectx.CheckNonEmpty(variableName, nameof(variableName), "Subgraph needs to start with an ITransformInput, or an ITrainerInput. Check your subgraph, or account for variation of the name of the Data input here.");
+ ectx.CheckNonEmpty(variableName, nameof(variableName), "Subgraph needs to start with an" +
+ nameof(CommonInputs.ITransformInput) + ", or an " + nameof(CommonInputs.ITrainerInput) +
+ ". Check your subgraph, or account for variation of the name of the Data input here.");
return variableName;
}
@@ -157,7 +159,7 @@ public sealed class Arguments : ISupportAutoMlStateFactory
public AutoMlMlState(IHostEnvironment env, Arguments args)
: this(env,
- PipelineSweeperSupportedMetrics.GetSupportedMetric(args.Metric),
+ PipelineSweeperSupportedMetrics.GetSupportedMetric(args.Metric),
args.Engine.CreateComponent(env),
args.TerminatorArgs.CreateComponent(env), args.TrainerKind, requestedLearners: args.RequestedLearners)
{
@@ -462,7 +464,7 @@ public void ClearEvaluatedPipelines()
///
/// The InferPipelines methods are just public portals to the internal function that handle different
/// types of data being passed in: training IDataView, path to training file, or train and test files.
- ///
+ ///
public static AutoMlMlState InferPipelines(IHostEnvironment env, PipelineOptimizerBase autoMlEngine,
IDataView trainData, IDataView testData, int numTransformLevels, int batchSize, SupportedMetric metric,
out PipelinePattern bestPipeline, ITerminator terminator, MacroUtils.TrainerKinds trainerKind)
@@ -483,7 +485,7 @@ public static AutoMlMlState InferPipelines(IHostEnvironment env, PipelineOptimiz
{
Contracts.CheckValue(env, nameof(env));
- // REVIEW: Should be able to infer schema by itself, without having to
+ // REVIEW: Should be able to infer schema by itself, without having to
// infer recipes. Look into this.
// Set loader settings through inference
RecipeInference.InferRecipesFromData(env, trainDataPath, schemaDefinitionFile,
diff --git a/src/Microsoft.ML.PipelineInference/AutoMlEngines/DefaultsEngine.cs b/src/Microsoft.ML.PipelineInference/AutoMlEngines/DefaultsEngine.cs
index 19583cef8c..929dec86cf 100644
--- a/src/Microsoft.ML.PipelineInference/AutoMlEngines/DefaultsEngine.cs
+++ b/src/Microsoft.ML.PipelineInference/AutoMlEngines/DefaultsEngine.cs
@@ -55,7 +55,7 @@ public override PipelinePattern[] GetNextCandidates(IEnumerable
do
{ // Make sure transforms set is valid. Repeat until passes verifier.
- pipeline = new PipelinePattern(SampleTransforms(out var transformsBitMask),
+ pipeline = new PipelinePattern(SampleTransforms(out var transformsBitMask),
learner, "", Env);
valid = PipelineVerifier(pipeline, transformsBitMask);
count++;
diff --git a/src/Microsoft.ML.PipelineInference/AutoMlEngines/RocketEngine.cs b/src/Microsoft.ML.PipelineInference/AutoMlEngines/RocketEngine.cs
index f06fa759e8..867c53053e 100644
--- a/src/Microsoft.ML.PipelineInference/AutoMlEngines/RocketEngine.cs
+++ b/src/Microsoft.ML.PipelineInference/AutoMlEngines/RocketEngine.cs
@@ -165,7 +165,7 @@ private TransformInference.SuggestedTransform[] SampleTransforms(RecipeInference
}
}
- // Take average mass as weight, and take convex combination of
+ // Take average mass as weight, and take convex combination of
// learner-specific weight and unconditioned weight.
allWeight /= allCounts > 0 ? allCounts : 1;
learnerWeight /= learnerCounts > 0 ? learnerCounts : 1;
@@ -182,9 +182,9 @@ private TransformInference.SuggestedTransform[] SampleTransforms(RecipeInference
sampledTransforms.AddRange(remainingAvailableTransforms.Where(t =>
AutoMlUtils.AtomicGroupPresent(mask, t.AtomicGroupId)));
- // Add final features concat transform. NOTE: computed bitmask should always
- // exclude the final features concat. If we forget to exclude that one, will
- // cause an error in verification, since it isn't included in the original
+ // Add final features concat transform. NOTE: computed bitmask should always
+ // exclude the final features concat. If we forget to exclude that one, will
+ // cause an error in verification, since it isn't included in the original
// dependency mapping (i.e., its level isn't in the dictionary).
sampledTransforms.AddRange(AutoMlUtils.GetFinalFeatureConcat(Env, FullyTransformedData,
DependencyMapping, sampledTransforms.ToArray(), AvailableTransforms, DataRoles));
@@ -217,7 +217,7 @@ public override PipelinePattern[] GetNextCandidates(IEnumerable
var remainingNum = Math.Min(numStageOneTrials - prevCandidates.Length, numCandidates);
if (remainingNum < 1)
{
- // Select top k learners, update stage, then get requested
+ // Select top k learners, update stage, then get requested
// number of candidates, using second stage logic.
UpdateLearners(GetTopLearners(prevCandidates));
_currentStage++;
@@ -295,10 +295,10 @@ private PipelinePattern[] NextCandidates(PipelinePattern[] history, int numCandi
AutoMlUtils.PopulateSweepableParams(learner);
do
- { // Make sure transforms set is valid and have not seen pipeline before.
+ { // Make sure transforms set is valid and have not seen pipeline before.
// Repeat until passes or runs out of chances.
pipeline = new PipelinePattern(
- SampleTransforms(learner, history, out var transformsBitMask, uniformRandomTransforms),
+ SampleTransforms(learner, history, out var transformsBitMask, uniformRandomTransforms),
learner, "", Env);
hashKey = GetHashKey(transformsBitMask, learner);
valid = PipelineVerifier(pipeline, transformsBitMask) && !VisitedPipelines.Contains(hashKey);
diff --git a/src/Microsoft.ML.PipelineInference/AutoMlEngines/UniformRandomEngine.cs b/src/Microsoft.ML.PipelineInference/AutoMlEngines/UniformRandomEngine.cs
index 23afce66ff..2ad0137fe9 100644
--- a/src/Microsoft.ML.PipelineInference/AutoMlEngines/UniformRandomEngine.cs
+++ b/src/Microsoft.ML.PipelineInference/AutoMlEngines/UniformRandomEngine.cs
@@ -14,9 +14,9 @@
namespace Microsoft.ML.Runtime.PipelineInference
{
///
- /// Example class of an autoML engine (a pipeline optimizer) that simply tries random enumeration.
- /// If we use a third-party solution for autoML, we can just implement a new wrapper for it as a
- /// PipelineOptimizerBase, and use our existing autoML body code to take advantage of it. This design
+ /// Example class of an autoML engine (a pipeline optimizer) that simply tries random enumeration.
+ /// If we use a third-party solution for autoML, we can just implement a new wrapper for it as a
+ /// PipelineOptimizerBase, and use our existing autoML body code to take advantage of it. This design
/// should allow for easy development of new autoML methods.
///
public sealed class UniformRandomEngine : PipelineOptimizerBase
diff --git a/src/Microsoft.ML.PipelineInference/AutoMlUtils.cs b/src/Microsoft.ML.PipelineInference/AutoMlUtils.cs
index e0bf7dbcca..a8459c5b3c 100644
--- a/src/Microsoft.ML.PipelineInference/AutoMlUtils.cs
+++ b/src/Microsoft.ML.PipelineInference/AutoMlUtils.cs
@@ -67,8 +67,8 @@ private static T CloneEvaluatorInstance(T evaler)
///
/// Using the dependencyMapping and included transforms, determines whether every
- /// transform present only consumes columns produced by a lower- or same-level transform,
- /// or existed in the original dataset. Note, a column could be produced by a
+ /// transform present only consumes columns produced by a lower- or same-level transform,
+ /// or existed in the original dataset. Note, a column could be produced by a
/// transform on the same level, such as in multipart (atomic group) transforms.
///
public static bool AreColumnsConsistent(TransformInference.SuggestedTransform[] includedTransforms,
@@ -173,8 +173,8 @@ private static int[] GetExcludedColumnIndices(TransformInference.SuggestedTransf
{
List includedColumnIndices = new List();
- // For every column, see if either present in initial dataset, or
- // produced by a transform used in current pipeline.
+ // For every column, see if either present in initial dataset, or
+ // produced by a transform used in current pipeline.
for (int columnIndex = 0; columnIndex < dataSample.Schema.ColumnCount; columnIndex++)
{
// Create ColumnInfo object for indexing dictionary
@@ -185,7 +185,7 @@ private static int[] GetExcludedColumnIndices(TransformInference.SuggestedTransf
IsHidden = dataSample.Schema.IsHidden(columnIndex)
};
- // Exclude all hidden and non-numeric columns
+ // Exclude all hidden and non-numeric columns
if (colInfo.IsHidden || !colInfo.ItemType.IsNumber)
continue;
@@ -429,7 +429,7 @@ private static void SetValue(PropertyInfo pi, IComparable value, object entryPoi
///
/// Updates properties of entryPointObj instance based on the values in sweepParams
- ///
+ ///
public static bool UpdateProperties(object entryPointObj, TlcModule.SweepableParamAttribute[] sweepParams)
{
bool result = true;
@@ -484,7 +484,7 @@ public static bool UpdateProperties(object entryPointObj, TlcModule.SweepablePar
///
/// Updates properties of entryPointObj instance based on the values in sweepParams
- ///
+ ///
public static void PopulateSweepableParams(RecipeInference.SuggestedRecipe.SuggestedLearner learner)
{
foreach (var param in learner.PipelineNode.SweepParams)
diff --git a/src/Microsoft.ML.PipelineInference/ColumnGroupingInference.cs b/src/Microsoft.ML.PipelineInference/ColumnGroupingInference.cs
index 7e1c4bfc05..36392c47ac 100644
--- a/src/Microsoft.ML.PipelineInference/ColumnGroupingInference.cs
+++ b/src/Microsoft.ML.PipelineInference/ColumnGroupingInference.cs
@@ -56,7 +56,7 @@ public InferenceResult(GroupingColumn[] columns)
/// Group together the single-valued columns with the same type and purpose and generate column names.
///
/// The host environment to use.
- /// Whether the original file had a header.
+ /// Whether the original file had a header.
/// If yes, the fields are used to generate the column
/// names, otherwise they are ignored.
/// The (detected) column types.
diff --git a/src/Microsoft.ML.PipelineInference/DatasetFeaturesInference.cs b/src/Microsoft.ML.PipelineInference/DatasetFeaturesInference.cs
index e8d40fc6c8..a3a8876c7e 100644
--- a/src/Microsoft.ML.PipelineInference/DatasetFeaturesInference.cs
+++ b/src/Microsoft.ML.PipelineInference/DatasetFeaturesInference.cs
@@ -14,7 +14,7 @@
namespace Microsoft.ML.Runtime.PipelineInference
{
///
- /// Featurization ideas inspired from:
+ /// Featurization ideas inspired from:
/// http://aad.informatik.uni-freiburg.de/papers/15-NIPS-auto-sklearn-supplementary.pdf
///
public static class DatasetFeatureInference
diff --git a/src/Microsoft.ML.PipelineInference/ExperimentsGenerator.cs b/src/Microsoft.ML.PipelineInference/ExperimentsGenerator.cs
index 6b184e7b61..c9621029d8 100644
--- a/src/Microsoft.ML.PipelineInference/ExperimentsGenerator.cs
+++ b/src/Microsoft.ML.PipelineInference/ExperimentsGenerator.cs
@@ -109,14 +109,14 @@ public static List GenerateCandidates(IHostEnvironment env, string dataFi
RecipeInference.SuggestedRecipe[] recipes = RecipeInference.InferRecipesFromData(env, dataFile, schemaDefinitionFile, out predictorType, out loaderSettings, out inferenceResult);
//get all the trainers for this task, and generate the initial set of candidates.
- // Exclude the hidden learners, and the metalinear learners.
+ // Exclude the hidden learners, and the metalinear learners.
var trainers = ComponentCatalog.GetAllDerivedClasses(typeof(ITrainer), predictorType).Where(cls => !cls.IsHidden);
var loaderSubComponent = new SubComponent("TextLoader", loaderSettings);
string loader = $" loader={loaderSubComponent}";
- // REVIEW: there are more learners than recipes atm.
- // Flip looping through recipes, than through learners if the cardinality changes.
+ // REVIEW: there are more learners than recipes atm.
+ // Flip looping through recipes, than through learners if the cardinality changes.
foreach (ComponentCatalog.LoadableClassInfo cl in trainers)
{
string learnerSettings;
diff --git a/src/Microsoft.ML.PipelineInference/Interfaces/IPipelineOptimizer.cs b/src/Microsoft.ML.PipelineInference/Interfaces/IPipelineOptimizer.cs
index 5fe46ec61e..00d7654c25 100644
--- a/src/Microsoft.ML.PipelineInference/Interfaces/IPipelineOptimizer.cs
+++ b/src/Microsoft.ML.PipelineInference/Interfaces/IPipelineOptimizer.cs
@@ -143,7 +143,7 @@ protected void SampleHyperparameters(RecipeInference.SuggestedRecipe.SuggestedLe
var proposedParamSet = sweeper.ProposeSweeps(1, AutoMlUtils.ConvertToRunResults(history, isMaximizingMetric)).First();
Env.Assert(proposedParamSet != null && proposedParamSet.All(ps => hyperParams.Any(hp => hp.Name == ps.Name)));
- // Associate proposed param set with learner, so that smart hyperparam
+ // Associate proposed param set with learner, so that smart hyperparam
// sweepers (like KDO) can map them back.
learner.PipelineNode.HyperSweeperParamSet = proposedParamSet;
diff --git a/src/Microsoft.ML.PipelineInference/Interfaces/ITerminator.cs b/src/Microsoft.ML.PipelineInference/Interfaces/ITerminator.cs
index ce51e91031..488c21fc74 100644
--- a/src/Microsoft.ML.PipelineInference/Interfaces/ITerminator.cs
+++ b/src/Microsoft.ML.PipelineInference/Interfaces/ITerminator.cs
@@ -7,9 +7,9 @@
namespace Microsoft.ML.Runtime.PipelineInference
{
///
- /// Interface defining various stopping criteria for pipeline sweeps.
+ /// Interface defining various stopping criteria for pipeline sweeps.
/// This could include number of total iterations, compute time,
- /// budget expended, etc.
+ /// budget expended, etc.
///
public interface ITerminator
{
diff --git a/src/Microsoft.ML.PipelineInference/Macros/PipelineSweeperMacro.cs b/src/Microsoft.ML.PipelineInference/Macros/PipelineSweeperMacro.cs
index c5c23ce675..2f70645d8b 100644
--- a/src/Microsoft.ML.PipelineInference/Macros/PipelineSweeperMacro.cs
+++ b/src/Microsoft.ML.PipelineInference/Macros/PipelineSweeperMacro.cs
@@ -211,7 +211,7 @@ public static CommonOutputs.MacroOutput