diff --git a/docs/configuration.md b/docs/configuration.md
index 2915fb5fa9197..4337f4f517c35 100644
--- a/docs/configuration.md
+++ b/docs/configuration.md
@@ -190,6 +190,8 @@ of the most common options to set are:
and it is up to the application to avoid exceeding the overhead memory space
shared with other non-JVM processes. When PySpark is run in YARN or Kubernetes, this memory
is added to executor resource requests.
+
+ NOTE: Python memory usage may not be limited on platforms that do not support resource limiting, such as Windows.
diff --git a/python/pyspark/worker.py b/python/pyspark/worker.py
index 8c59f1f999f18..953b468e96519 100644
--- a/python/pyspark/worker.py
+++ b/python/pyspark/worker.py
@@ -22,7 +22,12 @@
import os
import sys
import time
-import resource
+# 'resource' is a Unix specific module.
+has_resource_module = True
+try:
+ import resource
+except ImportError:
+ has_resource_module = False
import socket
import traceback
@@ -268,9 +273,9 @@ def main(infile, outfile):
# set up memory limits
memory_limit_mb = int(os.environ.get('PYSPARK_EXECUTOR_MEMORY_MB', "-1"))
- total_memory = resource.RLIMIT_AS
- try:
- if memory_limit_mb > 0:
+ if memory_limit_mb > 0 and has_resource_module:
+ total_memory = resource.RLIMIT_AS
+ try:
(soft_limit, hard_limit) = resource.getrlimit(total_memory)
msg = "Current mem limits: {0} of max {1}\n".format(soft_limit, hard_limit)
print(msg, file=sys.stderr)
@@ -283,9 +288,9 @@ def main(infile, outfile):
print(msg, file=sys.stderr)
resource.setrlimit(total_memory, (new_limit, new_limit))
- except (resource.error, OSError, ValueError) as e:
- # not all systems support resource limits, so warn instead of failing
- print("WARN: Failed to set memory limit: {0}\n".format(e), file=sys.stderr)
+ except (resource.error, OSError, ValueError) as e:
+ # not all systems support resource limits, so warn instead of failing
+ print("WARN: Failed to set memory limit: {0}\n".format(e), file=sys.stderr)
# initialize global state
taskContext = None