diff --git a/docs/configuration.md b/docs/configuration.md
index 0de824546c751..fb509781f4a5a 100644
--- a/docs/configuration.md
+++ b/docs/configuration.md
@@ -334,7 +334,7 @@ Apart from these, the following properties are also available, and may be useful
Enable profiling in Python worker, the profile result will show up by `sc.show_profiles()`,
or it will be displayed before the driver exiting. It also can be dumped into disk by
- `sc.dump_profiles(path)`. If some of the profile results had been displayed maually,
+ `sc.dump_profiles(path)`. If some of the profile results had been displayed manually,
they will not be displayed automatically before driver exiting.
By default the `pyspark.profiler.BasicProfiler` will be used, but this can be overridden by
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
index d78b4c2f8909c..feab870af02dd 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
@@ -576,7 +576,7 @@ class DataFrame private[sql](
def as(alias: Symbol): DataFrame = as(alias.name)
/**
- * Selects a set of expressions.
+ * Selects a set of column based expressions.
* {{{
* df.select($"colA", $"colB" + 1)
* }}}
|