diff --git a/.gitignore b/.gitignore
index 8b11f3df6..c6cefb63b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,9 +9,12 @@ dist/*
#########################
*.egg-info/
-
+# Build docs and packages
build/*
doc/build/
# virtual environment
-venv/
\ No newline at end of file
+venv/
+
+# sphinx autogen
+doc/source/documentation_style/api/*
\ No newline at end of file
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 60dc75351..cb5709c9c 100755
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -1,12 +1,14 @@
from datetime import datetime
+from pyansys_sphinx_theme import __version__, pyansys_logo_black
+
# Project information
project = 'PyAnsys Developers Guide'
-copyright = f'{datetime.now().year}, ANSYS'
-author = 'ANSYS, Inc.'
+copyright = f"(c) {datetime.now().year} ANSYS, Inc. All rights reserved"
+author = "Ansys Inc."
release = version = '0.1.dev0'
-html_logo = 'https://docs.pyansys.com/_static/pyansys-logo-black-cropped.png'
+html_logo = pyansys_logo_black
html_theme = 'pyansys_sphinx_theme'
html_theme_options = {
@@ -16,21 +18,31 @@
# Sphinx extensions
extensions = [
- 'sphinx.ext.todo',
+ "sphinx_copybutton",
+ 'sphinx_toolbox.collapse',
'sphinx.ext.autodoc',
- 'sphinx.ext.napoleon',
'sphinx.ext.autosummary',
+ 'sphinx.ext.intersphinx',
+ 'sphinx.ext.napoleon',
+ 'sphinx.ext.todo',
]
+# Intersphinx mapping
+intersphinx_mapping = {
+ "python": ("https://docs.python.org/dev", None),
+ # "scipy": ("https://docs.scipy.org/doc/scipy/reference", None),
+ # "numpy": ("https://numpy.org/devdocs", None),
+ # "matplotlib": ("https://matplotlib.org/stable", None),
+ # "pandas": ("https://pandas.pydata.org/pandas-docs/stable", None),
+ # "pyvista": ("https://docs.pyvista.org/", None),
+}
+
# The suffix(es) of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
-
-master_doc = 'index'
-
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
diff --git a/doc/source/documentation_style/api/pyansys_sphinx_theme.samples.Complex.abs.rst b/doc/source/documentation_style/api/pyansys_sphinx_theme.samples.Complex.abs.rst
deleted file mode 100644
index aafd4ad8f..000000000
--- a/doc/source/documentation_style/api/pyansys_sphinx_theme.samples.Complex.abs.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-pyansys\_sphinx\_theme.samples.Complex.abs
-==========================================
-
-.. currentmodule:: pyansys_sphinx_theme.samples
-
-.. autoproperty:: Complex.abs
\ No newline at end of file
diff --git a/doc/source/documentation_style/api/pyansys_sphinx_theme.samples.Complex.imag.rst b/doc/source/documentation_style/api/pyansys_sphinx_theme.samples.Complex.imag.rst
deleted file mode 100644
index bb88a8ac3..000000000
--- a/doc/source/documentation_style/api/pyansys_sphinx_theme.samples.Complex.imag.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-pyansys\_sphinx\_theme.samples.Complex.imag
-===========================================
-
-.. currentmodule:: pyansys_sphinx_theme.samples
-
-.. autoproperty:: Complex.imag
\ No newline at end of file
diff --git a/doc/source/documentation_style/api/pyansys_sphinx_theme.samples.Complex.real.rst b/doc/source/documentation_style/api/pyansys_sphinx_theme.samples.Complex.real.rst
deleted file mode 100644
index e81172e28..000000000
--- a/doc/source/documentation_style/api/pyansys_sphinx_theme.samples.Complex.real.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-pyansys\_sphinx\_theme.samples.Complex.real
-===========================================
-
-.. currentmodule:: pyansys_sphinx_theme.samples
-
-.. autoproperty:: Complex.real
\ No newline at end of file
diff --git a/doc/source/guidelines/images/Guidelines_chart.png b/doc/source/guidelines/images/Guidelines_chart.png
new file mode 100644
index 000000000..5b57653d9
Binary files /dev/null and b/doc/source/guidelines/images/Guidelines_chart.png differ
diff --git a/doc/source/guidelines/images/log_flow.png b/doc/source/guidelines/images/log_flow.png
new file mode 100644
index 000000000..0c468d414
Binary files /dev/null and b/doc/source/guidelines/images/log_flow.png differ
diff --git a/doc/source/guidelines/index.rst b/doc/source/guidelines/index.rst
index 89d0a8f80..51030f4b2 100644
--- a/doc/source/guidelines/index.rst
+++ b/doc/source/guidelines/index.rst
@@ -1,20 +1,21 @@
-Guidelines
+Guidelines and Best Practices
#############################
-The purpose of this section is to highligh some common practices that
-can be applied to the entire PyAnsys project in order to remain consistent.
+This section describes and outlines several best practices that can be
+applied to PyAnsys libaries for the purpose of creating effective and
+efficient Python libraries to interface with Ansys products and
+services. These guidelines demonstrate how applications and complex
+services expose functionalities such as logging, data transfer, and
+Application APIs.
-One of the main objectives of PyAnsys libraries is to wrap (encapsulate)
-data and methods within units of execution while hiding data or parameters
-in protected variables.
-
-Those guidelines demonstrate how applications and complex services expose
-functionalities such as logging, data transfer...
+Table of Contents
+-----------------
.. toctree::
- :hidden:
- :maxdepth: 3
+ :maxdepth: 2
dev_practices
app_interface_abstraction
- service_abstraction
data_transfer_and_representation
+ logging
+ service_abstraction
+
diff --git a/doc/source/guidelines/logging.rst b/doc/source/guidelines/logging.rst
new file mode 100644
index 000000000..8154082e3
--- /dev/null
+++ b/doc/source/guidelines/logging.rst
@@ -0,0 +1,338 @@
+Logging Guidelines
+##################
+
+This section describes several guidelines for logging in PyAnsys
+libraries. These guidelines are best practices discovered through
+implementing logging services and modules within PyAnsys
+libraries. Suggestions and improvements are welcome.
+External resources also describe `basic `__
+and `advanced `__ technics.
+
+
+Description and usage
+=====================
+Logging helps to track events occurring in the application. For each of them a log record
+is created. It contains a detailed set of information about the current application operation.
+Whenever an information must be exposed, displayed and shared, logging is the
+way to do it.
+It is destinated to both the users and the application developers.
+It can serve several purposes:
+
+ - extract some valuable data for the final users to know the status of their work.
+ - track the progress and the course of the application usage.
+ - provide the developer with as much information as possible if an issue happens.
+
+The message logged can contain generic information or embed data specific
+to the current session.
+Message content is associated to a level of severity (info, warning, error...).
+Generally, this degree of significance indicates the recipient of the message.
+An info message is directed to the user while a debug message is useful for
+the developer itself.
+
+
+Logging in PyAnsys Libraries
+============================
+
+The logging capabilities in PyAnsys libraries should be built upon the
+`standard logging `__
+library. PyAnsys libries should not to replace this library, rather provide
+a standardized way to interact between the built-in :mod:`logging`
+library and ``PyAnsys`` libraries.
+
+
+Logging Best Practices
+----------------------
+
+Avoid printing to the console
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+A common habit while prototyping a new feature is to print message into the command line executable.
+Instead of using the common ``Print()`` method, it is advised to use a ``StreamHandler`` and redirect its content.
+Indeed that will allow to filter messages based on their level and apply properly the formatter.
+To do so, a boolean argument can be added in the initializer of the ``Logger`` class.
+This argument specifies how to handle the stream.
+
+Enable/Disable handlers
+~~~~~~~~~~~~~~~~~~~~~~~
+Sometimes the user might want to disable specific handlers such as a
+file handler where log messages are written. If so, the existing
+handler must be properly closed and removed. Otherwise the file access
+might be denied later when you try to write new log content.
+
+Here's one approach to closing log handlers.
+
+.. code:: python
+
+ for handler in design_logger.handlers:
+ if isinstance(handler, logging.FileHandler):
+ handler.close()
+ design_logger.removeHandler(handler)
+
+
+App Filter
+~~~~~~~~~~
+A filter shows all its value when the content of a message depends on some conditions.
+It injects contextual information in the core of the message.
+This can be useful to harmonize the message rendering when the application output is not consistent
+and vary upon the data processed.
+It requires the creation of class based on the logging.Filter and the implementation of
+the ``filter`` method. This method will contain all the modified content send to the stream.
+
+.. code:: python
+
+ class AppFilter(logging.Filter):
+
+ def __init__(self, destination="Global", extra=""):
+ self._destination = destination
+ self._extra = extra
+
+ def filter(self, record):
+ """Modify the record sent to the stream.""""
+
+ record.destination = self._destination
+
+ # This will avoid the extra '::' for Global that does not have any extra info.
+ if not self._extra:
+ record.extra = self._extra
+ else:
+ record.extra = self._extra + ":"
+ return True
+
+
+.. code:: python
+
+ class CustomLogger(object):
+
+ def __init__(self, messenger, level=logging.DEBUG, to_stdout=False):
+
+ if to_stdout:
+ self._std_out_handler = logging.StreamHandler()
+ self._std_out_handler.setLevel(level)
+ self._std_out_handler.setFormatter(FORMATTER)
+ self.global_logger.addHandler(self._std_out_handler)
+
+
+String format
+~~~~~~~~~~~~~
+Even if the current practice recommends using the f-string to format
+most strings, when it comes to logging, the former %-formatting is
+preferable. This way the string format is not evaluated at
+runtime. It is deferred and evaluated only when the message is
+emitted. If there is any formatting or evaluation error, these will be
+reported as logging errors and will not halt code execution.
+
+.. code:: python
+
+ logger.info("Project %s has been opened.", project.GetName())
+
+
+Application or Service Logging
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The following guidelines describe "Application" or "Service" logging
+module for a PyAnsys library, where a PyAnsys library is used to
+extend or expose features from an Ansys application, product, or
+service that may be local or remote.
+
+This section describes two main loggers for a PyAnsys library that
+exposes or extends a service based application, the *Global logger*
+and the *Instance logger*. These loggers are customized classes that wrap
+:class:`logging.Logger` from :mod:`logging` module and add specific
+features to it. :ref:`logging_in_pymapdl_figure` outlines the logging
+approach used by PyMAPDL and the scopes of the global and local
+loggers.
+
+.. _logging_in_pymapdl_figure:
+
+.. figure:: images/Guidelines_chart.png
+ :align: center
+ :alt: Logging in PyMAPDL
+ :figclass: align-center
+
+ **Figure 1: Example Logging Structure in PyMAPDL**
+
+The source for this example logger can be found both within developers
+guide repository at `pyansys_logging.py
+`_
+as well as below in the collapsable section below:
+
+.. collapse:: Example PyAnsys custom logger module
+
+ .. literalinclude:: ../../../logging/pyansys_logging.py
+
+Following are some unit tests demonstatring how to use the code implemented above:
+
+.. collapse:: How to use PyAnsys custom logger module
+
+ .. literalinclude:: ../../../logging/test_pyansys_logging.py
+
+Example Global logger
+~~~~~~~~~~~~~~~~~~~~~
+
+There is a global logger named ``py*_global`` which is created when
+importing ``ansys.product.service``
+(``ansys.product.service.__init__``). This logger is recommended for
+most scenarios, especially when complex modules or classes are not
+involved, since it does not track instances, rather can be used
+globally. If you intend to log the initialization of a library or
+module, you should use this logger. To use this global logger, you
+must import it at the top of your script or module:
+
+.. code:: python
+
+ from ansys.product.service import LOG
+
+You could also rename it to avoid conflicts with other loggers (if any):
+
+.. code:: python
+
+ from ansys.product.service import LOG as logger
+
+
+It should be noted that the default logging level of ``LOG`` is
+``ERROR`` (``logging.ERROR``). To change this and output different
+ferror level messages you can use the next approach:
+
+.. code:: python
+
+ LOG.logger.setLevel('DEBUG')
+ LOG.file_handler.setLevel('DEBUG') # if present
+ LOG.stdout_handler.setLevel('DEBUG') # if present
+
+
+Alternatively, you can use:
+
+.. code:: python
+
+ LOG.setLevel('DEBUG')
+
+
+This way ensures all the handlers are set to the desired log level.
+
+By default, this logger does not log to a file. If you wish to do so,
+you can add a file handler using:
+
+.. code:: python
+
+ import os
+ file_path = os.path.join(os.getcwd(), 'pylibrary.log')
+ LOG.log_to_file(file_path)
+
+This enables logging to that file in addition of the standard output.
+If you wish to change the characteristics of this global logger from
+the beginning of the execution, you must edit the file ``__init__`` in
+the directory of your library.
+
+To log using this logger, simply call the desired method as a normal
+logger.
+
+.. code:: python
+
+ >>> import logging
+ >>> from ansys.mapdl.core.logging import Logger
+ >>> LOG = Logger(level=logging.DEBUG, to_file=False, to_stdout=True)
+ >>> LOG.debug('This is LOG debug message.')
+ | Level | Instance | Module | Function | Message
+ |----------|-----------------|------------------|----------------------|--------------------------------------------------------
+ | DEBUG | | __init__ | | This is LOG debug message.
+
+
+Instance logger
+~~~~~~~~~~~~~~~
+Every time that the class ``_MapdlCore`` is instantiated, a logger is
+created. This logger is recommended when using the ``pool`` library
+or when using multiple instances of ``Mapdl``. The main feature of
+this logger is that it tracks each instance and it includes its name
+when logging. The name of the instances are unique. For example in
+case of using the ``gRPC`` ``Mapdl`` version, its name includes the IP
+and port of the correspondent instance, making unique its logger.
+
+
+The instance loggers can be accessed in two places:
+
+* ``_MapdlCore._log``. For backward compatibility.
+* ``LOG._instances``. This field is a ``dict`` where the key is the
+ name of the created logger.
+
+These instance loggers inherit from the ``pymapdl_global`` output
+handlers and logging level unless otherwise specified. The way this
+logger works is very similar to the global logger. You can add a file
+handler if you wish using the method ``log_to_file`` or change the log
+level using :meth:`logging.Logger.setLevel`.
+
+You can use this logger like this:
+
+.. code:: python
+
+ >>> from ansys.mapdl.core import launch_mapdl
+ >>> mapdl = launch_mapdl()
+ >>> mapdl._log.info('This is an useful message')
+
+ | Level | Instance | Module | Function | Message
+ |----------|-----------------|------------------|----------------------|--------------------------------------------------------
+ | INFO | 127.0.0.1:50052 | test | | This is an useful message
+
+
+
+Wrapping Other Loggers
+~~~~~~~~~~~~~~~~~~~~~~
+A product, due to its architecture can be made of several loggers.
+The ``logging`` library features allows to work with a finite number of loggers.
+The factory function logging.getLogger() helps to access each logger by its name.
+In addition of this naming-mappings, a hierachy can be established to structure the loggers
+parenting and their connection.
+
+
+For instance, if an ANSYS product is using a pre-exsiting custom logger encapsulated inside the product itself, the will benefit from exposing it through the standard python tools.
+It is recommended to use the standard library as much as possible. It will facilitate every contribution -both external and internal- to the by exposing common tools that are widely spread.
+Each developer will be able to operate quickly and autonomously.
+The project will take advantage of the entire set of features exposed in the standard logger and all the upcoming improvements.
+
+Create a custom log handler to catch each product message and redirect them on another logger:
+==============================================================================================
+
+Context:
+--------
+
+AEDT product has its own internal logger called the message manager made of 3 main destinations:
+
+ * *Global*: for the entire Project manager
+ * *Project*: related to the project
+ * *Design*: related to the design (most specific destination of each 3 loggers.)
+
+The message manager is not using the standard python logging module and this might be a problem later when exporting messages and data from each ANSYS product to a common tool.
+In most of the cases, it is easier to work with the standard python module to extract data.
+In order to overcome this limitation, the existing message manager is wrapped into a logger based on the standard python `logging `__ module.
+
+
+.. figure:: images/log_flow.png
+ :align: center
+ :alt: Loggers message passing flow.
+ :figclass: align-center
+
+ **Figure 1: Loggers message passing flow.**
+
+
+This wrapper implementation boils down to a custom handler. It is based on a class inherited from logging.Handler.
+The initializer of this class will require the message manager to be passed as an argument in order to link the standard logging service with the ANSYS internal message manager.
+
+.. code:: python
+
+ class LogHandler(logging.Handler):
+
+ def __init__(self, internal_app_messenger, log_destination, level=logging.INFO):
+ logging.Handler.__init__(self, level)
+ # destination is used if when the internal message manager
+ # is made of several different logs. Otherwise it is not relevant.
+ self.destination = log_destination
+ self.messenger = internal_app_messenger
+
+ def emit(self, record):
+ pass
+
+
+The purpose of this class is to send log messages in AEDT logging stream.
+One of the mandatory actions is to overwrite the ``emit`` function. This method operates as a proxy. It will dispatch all the log message toward the message manager.
+Based on the record level, the message is sent to the appropriate log level (debug, info, error...) into the message manager to fit the level provided by the ANSYS product.
+As a reminder the record is an object containing all kind of information related to the event logged.
+
+This custom handler is used into the new logger instance (the one based on the standard library).
+A good practice before to add a handler on any logger is to verify if any appropriate handler is already available in order to avoid any conflict, message duplication...
diff --git a/logging/README.md b/logging/README.md
new file mode 100644
index 000000000..4bb356371
--- /dev/null
+++ b/logging/README.md
@@ -0,0 +1,5 @@
+#### Example Logging Modules
+
+These modules demonstrate one way of logging at the global and
+instance level for a PyAnsys libary that exposes and extends an a
+service based application.
diff --git a/logging/pyansys_logging.py b/logging/pyansys_logging.py
new file mode 100644
index 000000000..ff7288726
--- /dev/null
+++ b/logging/pyansys_logging.py
@@ -0,0 +1,484 @@
+from copy import copy
+from datetime import datetime
+import logging
+from logging import DEBUG, INFO, WARN, ERROR, CRITICAL
+import sys
+
+# Default configuration
+LOG_LEVEL = logging.DEBUG
+FILE_NAME = "PyProject.log"
+
+
+# Formatting
+STDOUT_MSG_FORMAT = (
+ "%(levelname)s - %(instance_name)s - %(module)s - %(funcName)s - %(message)s"
+)
+FILE_MSG_FORMAT = STDOUT_MSG_FORMAT
+
+DEFAULT_STDOUT_HEADER = """
+LEVEL - INSTANCE NAME - MODULE - FUNCTION - MESSAGE
+"""
+DEFAULT_FILE_HEADER = DEFAULT_STDOUT_HEADER
+
+NEW_SESSION_HEADER = f"""
+===============================================================================
+ NEW SESSION - {datetime.now().strftime("%m/%d/%Y, %H:%M:%S")}
+==============================================================================="""
+
+string_to_loglevel = {
+ "DEBUG": DEBUG,
+ "INFO": INFO,
+ "WARN": WARN,
+ "WARNING": WARN,
+ "ERROR": ERROR,
+ "CRITICAL": CRITICAL,
+}
+
+
+class InstanceCustomAdapter(logging.LoggerAdapter):
+ """This is key to keep the reference to a product instance name dynamic.
+
+ If we use the standard approach which is supplying ``extra`` input
+ to the logger, we would need to keep inputting product instances
+ every time a log is created.
+
+ Using adapters we just need to specify the product instance we refer
+ to once.
+ """
+
+ # level is kept for compatibility with ``suppress_logging``,
+ # but it does nothing.
+ level = None
+ file_handler = None
+ stdout_handler = None
+
+ def __init__(self, logger, extra=None):
+ self.logger = logger
+ self.extra = extra
+ self.file_handler = logger.file_handler
+ self.std_out_handler = logger.std_out_handler
+
+ def process(self, msg, kwargs):
+ kwargs["extra"] = {}
+ # These are the extra parameters sent to log
+ # here self.extra is the argument pass to the log records.
+ kwargs["extra"]["instance_name"] = self.extra.get_name()
+ return msg, kwargs
+
+ def log_to_file(self, filename=FILE_NAME, level=LOG_LEVEL):
+ """Add file handler to logger.
+
+ Parameters
+ ----------
+ filename : str, optional
+ Name of the file where the logs are recorded. By default
+ ``PyProject.log``
+ level : str, optional
+ Level of logging, for example ``'DEBUG'``. By default
+ ``logging.DEBUG``.
+ """
+
+ self.logger = add_file_handler(
+ self.logger, filename=filename, level=level, write_headers=True
+ )
+ self.file_handler = self.logger.file_handler
+
+ def log_to_stdout(self, level=LOG_LEVEL):
+ """Add standard output handler to the logger.
+
+ Parameters
+ ----------
+ level : str, optional
+ Level of logging record. By default ``logging.DEBUG``.
+ """
+ if self.std_out_handler:
+ raise Exception("Stdout logger already defined.")
+
+ self.logger = add_stdout_handler(self.logger, level=level)
+ self.std_out_handler = self.logger.std_out_handler
+
+ def setLevel(self, level="DEBUG"):
+ """Change the log level of the object and the attached handlers."""
+ self.logger.setLevel(level)
+ for each_handler in self.logger.handlers:
+ each_handler.setLevel(level)
+ self.level = level
+
+
+class PyAnsysPercentStyle(logging.PercentStyle):
+ def __init__(self, fmt, *, defaults=None):
+ self._fmt = fmt or self.default_format
+ self._defaults = defaults
+
+ def _format(self, record):
+ defaults = self._defaults
+ if defaults:
+ values = defaults | record.__dict__
+ else:
+ values = record.__dict__
+
+ # Here we can make any changes we want in the record, for
+ # example adding a key.
+
+ # We could create an if here if we want conditional formatting, and even
+ # change the record.__dict__.
+ # Since now we don't want to create conditional fields, it is fine to keep
+ # the same MSG_FORMAT for all of them.
+
+ # For the case of logging exceptions to the logger.
+ values.setdefault("instance_name", "")
+
+ return STDOUT_MSG_FORMAT % values
+
+
+class PyProjectFormatter(logging.Formatter):
+ """Customized ``Formatter`` class used to overwrite the defaults format styles."""
+
+ def __init__(
+ self,
+ fmt=STDOUT_MSG_FORMAT,
+ datefmt=None,
+ style="%",
+ validate=True,
+ defaults=None,
+ ):
+ if sys.version_info[1] < 8:
+ super().__init__(fmt, datefmt, style)
+ else:
+ # 3.8: The validate parameter was added
+ super().__init__(fmt, datefmt, style, validate)
+ self._style = PyAnsysPercentStyle(fmt, defaults=defaults) # overwriting
+
+
+class InstanceFilter(logging.Filter):
+ """Ensures that instance_name record always exists."""
+
+ def filter(self, record):
+ if not hasattr(record, "instance_name"):
+ record.instance_name = ""
+ return True
+
+
+class Logger:
+ """Logger used for each PyProject session.
+
+ This class allows you to add a handler to a file or standard output.
+
+ Parameters
+ ----------
+ level : int, optional
+ Logging level to filter the message severity allowed in the logger.
+ The default is ``logging.DEBUG``.
+ to_file : bool, optional
+ Write log messages to a file. The default is ``False``.
+ to_stdout : bool, optional
+ Write log messages into the standard output. The
+ default is ``True``.
+ filename : str, optional
+ Name of the file where log messages are written to.
+ The default is ``None``.
+ """
+
+ file_handler = None
+ std_out_handler = None
+ _level = logging.DEBUG
+ _instances = {}
+
+ def __init__(
+ self, level=logging.DEBUG, to_file=False, to_stdout=True, filename=FILE_NAME, cleanup=True
+ ):
+ """Initialize Logger class."""
+
+ self.logger = logging.getLogger(
+ "pyproject_global"
+ ) # Creating default main logger.
+ self.logger.addFilter(InstanceFilter())
+ self.logger.setLevel(level)
+ self.logger.propagate = True
+ self.level = self.logger.level # TODO: TO REMOVE
+
+ # Writing logging methods.
+ self.debug = self.logger.debug
+ self.info = self.logger.info
+ self.warning = self.logger.warning
+ self.error = self.logger.error
+ self.critical = self.logger.critical
+ self.log = self.logger.log
+
+ if to_file or filename != FILE_NAME:
+ # We record to file.
+ self.log_to_file(filename=filename, level=level)
+
+ if to_stdout:
+ self.log_to_stdout(level=level)
+
+ self.add_handling_uncaught_expections(
+ self.logger
+ ) # Using logger to record unhandled exceptions.
+
+ self.cleanup = cleanup
+
+ def log_to_file(self, filename=FILE_NAME, level=LOG_LEVEL):
+ """Add file handler to logger.
+
+ Parameters
+ ----------
+ filename : str, optional
+ Name of the file where the logs are recorded. By default FILE_NAME
+ level : str, optional
+ Level of logging. E.x. 'DEBUG'. By default LOG_LEVEL
+ """
+
+ self = add_file_handler(
+ self, filename=filename, level=level, write_headers=True
+ )
+
+ def log_to_stdout(self, level=LOG_LEVEL):
+ """Add standard output handler to the logger.
+
+ Parameters
+ ----------
+ level : str, optional
+ Level of logging record. By default LOG_LEVEL
+ """
+
+ self = add_stdout_handler(self, level=level)
+
+ def setLevel(self, level="DEBUG"):
+ """Change the log level of the object and the attached handlers."""
+ self.logger.setLevel(level)
+ for each_handler in self.logger.handlers:
+ each_handler.setLevel(level)
+ self._level = level
+
+ def _make_child_logger(self, sufix, level):
+ """Create a child logger.
+
+ Create a child logger either using ``getChild`` or copying
+ attributes between ``pyproject_global`` logger and the new
+ one.
+
+ """
+ logger = logging.getLogger(sufix)
+ logger.std_out_handler = None
+ logger.file_handler = None
+
+ if self.logger.hasHandlers:
+ for each_handler in self.logger.handlers:
+ new_handler = copy(each_handler)
+
+ if each_handler == self.file_handler:
+ logger.file_handler = new_handler
+ elif each_handler == self.std_out_handler:
+ logger.std_out_handler = new_handler
+
+ if level:
+ # The logger handlers are copied and changed the
+ # loglevel if the specified log level is lower
+ # than the one of the global.
+ if each_handler.level > string_to_loglevel[level.upper()]:
+ new_handler.setLevel(level)
+
+ logger.addHandler(new_handler)
+
+ if level:
+ if isinstance(level, str):
+ level = string_to_loglevel[level.upper()]
+ logger.setLevel(level)
+
+ else:
+ logger.setLevel(self.logger.level)
+
+ logger.propagate = True
+ return logger
+
+ def add_child_logger(self, sufix, level=None):
+ """Add a child logger to the main logger.
+
+ This logger is more general than an instance logger which is designed
+ to track the state of the application instances.
+
+ If the logging level is in the arguments, a new logger with a
+ reference to the ``_global`` logger handlers is created
+ instead of a child.
+
+ Parameters
+ ----------
+ sufix : str
+ Name of the logger.
+ level : str
+ Level of logging
+
+ Returns
+ -------
+ logging.logger
+ Logger class.
+ """
+ name = self.logger.name + "." + sufix
+ self._instances[name] = self._make_child_logger(self, name, level)
+ return self._instances[name]
+
+ def _add_product_instance_logger(self, name, product_instance, level):
+ if isinstance(name, str):
+ instance_logger = InstanceCustomAdapter(
+ self._make_child_logger(name, level), product_instance
+ )
+ elif isinstance(name, None):
+ instance_logger = InstanceCustomAdapter(
+ self._make_child_logger("NO_NAMED_YET", level), product_instance
+ )
+ else:
+ raise TypeError(
+ f"``name`` parameter must be a string or None, not f{type(name)}"
+ )
+
+ return instance_logger
+
+ def add_instance_logger(self, name, product_instance, level=None):
+ """Create a logger for an application instance.
+
+ This instance logger is a logger with an adapter which add the
+ contextual information such as instance
+ name. This logger is returned and you can use it to log events
+ as a normal logger. It is also stored in the ``_instances``
+ attribute.
+
+ Parameters
+ ----------
+ name : str
+ Name for the new logger
+ product_instance : ansys.product.service.module.ProductClass
+ Class instance. This must contain the attribute ``name``.
+
+ Returns
+ -------
+ InstanceCustomAdapter
+ Logger adapter customized to add additional information to
+ the logs. You can use this class to log events in the
+ same way you would with a logger class.
+
+ Raises
+ ------
+ TypeError
+ You can only input strings as ``name`` to this method.
+ """
+ count_ = 0
+ new_name = name
+ while new_name in logging.root.manager.__dict__.keys():
+ count_ += 1
+ new_name = name + "_" + str(count_)
+
+ self._instances[new_name] = self._add_product_instance_logger(
+ new_name, product_instance, level
+ )
+ return self._instances[new_name]
+
+ def __getitem__(self, key):
+ if key in self._instances.keys():
+ return self._instances[key]
+ else:
+ raise KeyError(f"There are no instances with name {key}")
+
+ def add_handling_uncaught_expections(self, logger):
+ """This just redirects the output of an exception to the logger."""
+
+ def handle_exception(exc_type, exc_value, exc_traceback):
+ if issubclass(exc_type, KeyboardInterrupt):
+ sys.__excepthook__(exc_type, exc_value, exc_traceback)
+ return
+ logger.critical(
+ "Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback)
+ )
+
+ sys.excepthook = handle_exception
+
+ def __del__(self):
+ """Close the logger and all its handlers."""
+ self.logger.debug("Collecting logger")
+ if self.cleanup:
+ try:
+ for handler in self.logger.handlers:
+ handler.close()
+ self.logger.removeHandler(handler)
+ except Exception as e:
+ try:
+ if self.logger is not None:
+ self.logger.error("The logger was not deleted properly.")
+ except Exception:
+ pass
+ else:
+ self.logger.debug("Collecting but not exiting due to 'cleanup = False'")
+
+
+def add_file_handler(logger, filename=FILE_NAME, level=LOG_LEVEL, write_headers=False):
+ """Add a file handler to the input.
+
+ Parameters
+ ----------
+ logger : logging.Logger or logging.Logger
+ Logger where to add the file handler.
+ filename : str, optional
+ Name of the output file. By default FILE_NAME
+ level : str, optional
+ Level of log recording. By default LOG_LEVEL
+ write_headers : bool, optional
+ Record the headers to the file. By default ``False``.
+
+ Returns
+ -------
+ logger
+ Return the logger or Logger object.
+ """
+
+ file_handler = logging.FileHandler(filename)
+ file_handler.setLevel(level)
+ file_handler.setFormatter(logging.Formatter(FILE_MSG_FORMAT))
+
+ if isinstance(logger, Logger):
+ logger.file_handler = file_handler
+ logger.logger.addHandler(file_handler)
+
+ elif isinstance(logger, logging.Logger):
+ logger.file_handler = file_handler
+ logger.addHandler(file_handler)
+
+ if write_headers:
+ file_handler.stream.write(NEW_SESSION_HEADER)
+ file_handler.stream.write(DEFAULT_FILE_HEADER)
+
+ return logger
+
+
+def add_stdout_handler(logger, level=LOG_LEVEL, write_headers=False):
+ """Add a stream handler to the logger.
+
+ Parameters
+ ----------
+ logger : logging.Logger or logging.Logger
+ Logger where to add the stream handler.
+ level : str, optional
+ Level of log recording. By default ``logging.DEBUG``.
+ write_headers : bool, optional
+ Record the headers to the stream. By default ``False``.
+
+ Returns
+ -------
+ logger
+ The logger or Logger object.
+ """
+
+ std_out_handler = logging.StreamHandler(sys.stdout)
+ std_out_handler.setLevel(level)
+ std_out_handler.setFormatter(PyProjectFormatter(STDOUT_MSG_FORMAT))
+
+ if isinstance(logger, Logger):
+ logger.std_out_handler = std_out_handler
+ logger.logger.addHandler(std_out_handler)
+
+ elif isinstance(logger, logging.Logger):
+ logger.addHandler(std_out_handler)
+
+ if write_headers:
+ std_out_handler.stream.write(DEFAULT_STDOUT_HEADER)
+
+ return logger
diff --git a/logging/test_pyansys_logging.py b/logging/test_pyansys_logging.py
new file mode 100644
index 000000000..44eb377ad
--- /dev/null
+++ b/logging/test_pyansys_logging.py
@@ -0,0 +1,104 @@
+import io
+import logging
+import os
+import sys
+import weakref
+
+import pyansys_logging
+
+
+def test_default_logger():
+ """Create a logger with default options.
+ Only stdout logger must be used."""
+
+ capture = CaptureStdOut()
+ with capture:
+ test_logger = pyansys_logging.Logger()
+ test_logger.info("Test stdout")
+
+ assert "INFO - - test_pyansys_logging - test_default_logger - Test stdout" in capture.content
+ # File handlers are not activated.
+ assert os.path.exists(os.path.exists(os.path.join(os.getcwd(), "PyProject.log")))
+
+
+def test_level_stdout():
+ """Create a logger with default options.
+ Only stdout logger must be used."""
+
+ capture = CaptureStdOut()
+ with capture:
+ test_logger = pyansys_logging.Logger(level=logging.INFO)
+ test_logger.debug("Debug stdout with level=INFO")
+ test_logger.info("Info stdout with level=INFO")
+ test_logger.warning("Warning stdout with level=INFO")
+ test_logger.error("Error stdout with level=INFO")
+ test_logger.critical("Critical stdout with level=INFO")
+
+ # Modify the level
+ test_logger.setLevel(level=logging.WARNING)
+ test_logger.debug("Debug stdout with level=WARNING")
+ test_logger.info("Info stdout with level=WARNING")
+ test_logger.warning("Warning stdout with level=WARNING")
+ test_logger.error("Error stdout with level=WARNING")
+ test_logger.critical("Critical stdout with level=WARNING")
+
+ # level=INFO
+ assert "DEBUG - - test_pyansys_logging - test_level_stdout - Debug stdout with level=INFO" not in capture.content
+ assert "INFO - - test_pyansys_logging - test_level_stdout - Info stdout with level=INFO" in capture.content
+ assert "WARNING - - test_pyansys_logging - test_level_stdout - Warning stdout with level=INFO" in capture.content
+ assert "ERROR - - test_pyansys_logging - test_level_stdout - Error stdout with level=INFO" in capture.content
+ assert "CRITICAL - - test_pyansys_logging - test_level_stdout - Critical stdout with level=INFO" in capture.content
+ # level=WARNING
+ assert "INFO - - test_pyansys_logging - test_level_stdout - Info stdout with level=WARNING" not in capture.content
+ assert (
+ "WARNING - - test_pyansys_logging - test_level_stdout - Warning stdout with level=WARNING" in capture.content
+ )
+ assert "ERROR - - test_pyansys_logging - test_level_stdout - Error stdout with level=WARNING" in capture.content
+ assert (
+ "CRITICAL - - test_pyansys_logging - test_level_stdout - Critical stdout with level=WARNING" in capture.content
+ )
+
+ # File handlers are not activated.
+ assert os.path.exists(os.path.exists(os.path.join(os.getcwd(), "PyProject.log")))
+
+
+def test_file_handlers(tmpdir):
+ """Activate a file handler different from `PyProject.log`."""
+
+ file_logger = tmpdir.mkdir("sub").join("test_logger.txt")
+
+ test_logger = pyansys_logging.Logger(to_file=True, filename=file_logger)
+ test_logger.info("Test Misc File")
+
+ with open(file_logger, "r") as f:
+ content = f.readlines()
+
+ assert os.path.exists(file_logger) # The file handler is not the default PyProject.Log
+ assert len(content) == 6
+ assert "NEW SESSION" in content[2]
+ assert "===============================================================================" in content[3]
+ assert "LEVEL - INSTANCE NAME - MODULE - FUNCTION - MESSAGE" in content[4]
+ assert "INFO - - test_pyansys_logging - test_file_handlers - Test Misc File" in content[5]
+
+ # Delete the logger and its file handler.
+ test_logger_ref = weakref.ref(test_logger)
+ del test_logger
+ assert test_logger_ref() is None
+
+
+class CaptureStdOut:
+ """Capture standard output with a context manager."""
+
+ def __init__(self):
+ self._stream = io.StringIO()
+
+ def __enter__(self):
+ sys.stdout = self._stream
+
+ def __exit__(self, type, value, traceback):
+ sys.stdout = sys.__stdout__
+
+ @property
+ def content(self):
+ """Return the captured content."""
+ return self._stream.getvalue()
diff --git a/requirements_docs.txt b/requirements_docs.txt
index 8b485d918..352bef49b 100644
--- a/requirements_docs.txt
+++ b/requirements_docs.txt
@@ -1,2 +1,4 @@
-pyansys-sphinx-theme
-Sphinx==4.0.3 # using this version due to the link to the logo
+Sphinx==4.3.2
+pyansys-sphinx-theme==0.2.0
+sphinx-copybutton==0.4.0
+sphinx_toolbox
diff --git a/requirements_style.txt b/requirements_style.txt
index f249615f6..f85124708 100644
--- a/requirements_style.txt
+++ b/requirements_style.txt
@@ -1,3 +1,3 @@
-codespell==2.0.0
+codespell==2.1.0