|
19 | 19 |
|
20 | 20 | from pyspark.ml.param import Param, Params |
21 | 21 |
|
22 | | -import random |
23 | | - |
24 | | -import sys |
25 | | - |
26 | 22 |
|
27 | 23 | class HasMaxIter(Params): |
28 | 24 | """ |
@@ -174,8 +170,7 @@ def __init__(self): |
174 | 170 | super(HasProbabilityCol, self).__init__() |
175 | 171 | #: param for Column name for predicted class conditional probabilities. Note: Not all models output well-calibrated probability estimates! These probabilities should be treated as confidences, not precise probabilities. |
176 | 172 | self.probabilityCol = Param(self, "probabilityCol", "Column name for predicted class conditional probabilities. Note: Not all models output well-calibrated probability estimates! These probabilities should be treated as confidences, not precise probabilities.") |
177 | | - if 'probability' is not None: |
178 | | - self._setDefault(probabilityCol='probability') |
| 173 | + self._setDefault(probabilityCol='probability') |
179 | 174 |
|
180 | 175 | def setProbabilityCol(self, value): |
181 | 176 | """ |
@@ -366,7 +361,7 @@ def __init__(self): |
366 | 361 | super(HasSeed, self).__init__() |
367 | 362 | #: param for random seed |
368 | 363 | self.seed = Param(self, "seed", "random seed") |
369 | | - self._setDefault(seed=random.randint(0, sys.maxsize)) |
| 364 | + self._setDefault(seed=hash(type(self).name)) |
370 | 365 |
|
371 | 366 | def setSeed(self, value): |
372 | 367 | """ |
@@ -452,11 +447,17 @@ class DecisionTreeParams(Params): |
452 | 447 |
|
453 | 448 | def __init__(self): |
454 | 449 | super(DecisionTreeParams, self).__init__() |
| 450 | + #: param for Maximum depth of the tree. (>= 0) E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes. |
455 | 451 | self.maxDepth = Param(self, "maxDepth", "Maximum depth of the tree. (>= 0) E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes.") |
| 452 | + #: param for Max number of bins for discretizing continuous features. Must be >=2 and >= number of categories for any categorical feature. |
456 | 453 | self.maxBins = Param(self, "maxBins", "Max number of bins for discretizing continuous features. Must be >=2 and >= number of categories for any categorical feature.") |
| 454 | + #: param for Minimum number of instances each child must have after split. If a split causes the left or right child to have fewer than minInstancesPerNode, the split will be discarded as invalid. Should be >= 1. |
457 | 455 | self.minInstancesPerNode = Param(self, "minInstancesPerNode", "Minimum number of instances each child must have after split. If a split causes the left or right child to have fewer than minInstancesPerNode, the split will be discarded as invalid. Should be >= 1.") |
| 456 | + #: param for Minimum information gain for a split to be considered at a tree node. |
458 | 457 | self.minInfoGain = Param(self, "minInfoGain", "Minimum information gain for a split to be considered at a tree node.") |
| 458 | + #: param for Maximum memory in MB allocated to histogram aggregation. |
459 | 459 | self.maxMemoryInMB = Param(self, "maxMemoryInMB", "Maximum memory in MB allocated to histogram aggregation.") |
| 460 | + #: param for If false, the algorithm will pass trees to executors to match instances with nodes. If true, the algorithm will cache node IDs for each instance. Caching can speed up training of deeper trees. |
460 | 461 | self.cacheNodeIds = Param(self, "cacheNodeIds", "If false, the algorithm will pass trees to executors to match instances with nodes. If true, the algorithm will cache node IDs for each instance. Caching can speed up training of deeper trees.") |
461 | 462 |
|
462 | 463 | def setMaxDepth(self, value): |
|
0 commit comments