diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 000000000000..f7e6eb212bc8
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,4 @@
+[report]
+sort = Cover
+omit =
+ .env/*
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 000000000000..176a458f94e0
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+* text=auto
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 000000000000..260b9704eda7
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1,88 @@
+# This is a comment.
+# Each line is a file pattern followed by one or more owners.
+
+# More details are here: https://help.github.com/articles/about-codeowners/
+
+# The '*' pattern is global owners.
+
+# Order is important. The last matching pattern has the most precedence.
+
+/.* @cclauss @dhruvmanila
+
+# /arithmetic_analysis/
+
+# /backtracking/
+
+# /bit_manipulation/
+
+# /blockchain/
+
+# /boolean_algebra/
+
+# /cellular_automata/
+
+# /ciphers/ @cclauss # TODO: Uncomment this line after Hacktoberfest
+
+# /compression/
+
+# /computer_vision/
+
+# /conversions/ @cclauss # TODO: Uncomment this line after Hacktoberfest
+
+# /data_structures/ @cclauss # TODO: Uncomment this line after Hacktoberfest
+
+/digital_image_processing/ @mateuszz0000
+
+# /divide_and_conquer/
+
+/dynamic_programming/ @Kush1101
+
+# /file_transfer/
+
+# /fuzzy_logic/
+
+# /genetic_algorithm/
+
+# /geodesy/
+
+# /graphics/
+
+# /graphs/
+
+# /greedy_method/
+
+# /hashes/
+
+# /images/
+
+# /linear_algebra/
+
+# /machine_learning/
+
+/maths/ @Kush1101
+
+# /matrix/
+
+# /networking_flow/
+
+# /neural_network/
+
+# /other/ @cclauss # TODO: Uncomment this line after Hacktoberfest
+
+/project_euler/ @dhruvmanila @Kush1101
+
+# /quantum/
+
+# /scheduling/
+
+# /scripts/
+
+# /searches/
+
+/sorts/ @mateuszz0000
+
+# /strings/ @cclauss # TODO: Uncomment this line after Hacktoberfest
+
+# /traversals/
+
+/web_programming/ @cclauss
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
new file mode 100644
index 000000000000..103ecf7c288a
--- /dev/null
+++ b/.github/pull_request_template.md
@@ -0,0 +1,20 @@
+### **Describe your change:**
+
+
+
+* [ ] Add an algorithm?
+* [ ] Fix a bug or typo in an existing algorithm?
+* [ ] Documentation change?
+
+### **Checklist:**
+* [ ] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
+* [ ] This pull request is all my own work -- I have not plagiarized.
+* [ ] I know that pull requests will not be merged if they fail the automated tests.
+* [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
+* [ ] All new Python files are placed inside an existing directory.
+* [ ] All filenames are in all lowercase characters with no spaces or dashes.
+* [ ] All functions and variable names follow Python naming conventions.
+* [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
+* [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
+* [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
+* [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
diff --git a/.github/stale.yml b/.github/stale.yml
new file mode 100644
index 000000000000..36ca56266b26
--- /dev/null
+++ b/.github/stale.yml
@@ -0,0 +1,63 @@
+# Configuration for probot-stale - https://github.com/probot/stale
+
+# Number of days of inactivity before an Issue or Pull Request becomes stale
+daysUntilStale: 30
+
+# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
+# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
+daysUntilClose: 7
+
+# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
+onlyLabels: []
+
+# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
+exemptLabels:
+ - "Status: on hold"
+
+# Set to true to ignore issues in a project (defaults to false)
+exemptProjects: false
+
+# Set to true to ignore issues in a milestone (defaults to false)
+exemptMilestones: false
+
+# Set to true to ignore issues with an assignee (defaults to false)
+exemptAssignees: false
+
+# Label to use when marking as stale
+staleLabel: stale
+
+# Limit the number of actions per hour, from 1-30. Default is 30
+limitPerRun: 5
+
+# Comment to post when removing the stale label.
+# unmarkComment: >
+# Your comment here.
+
+# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
+pulls:
+ # Comment to post when marking as stale. Set to `false` to disable
+ markComment: >
+ This pull request has been automatically marked as stale because it has not had
+ recent activity. It will be closed if no further activity occurs. Thank you
+ for your contributions.
+
+ # Comment to post when closing a stale Pull Request.
+ closeComment: >
+ Please reopen this pull request once you commit the changes requested
+ or make improvements on the code. If this is not the case and you need
+ some help, feel free to seek help from our [Gitter](https://gitter.im/TheAlgorithms)
+ or ping one of the reviewers. Thank you for your contributions!
+
+issues:
+ # Comment to post when marking as stale. Set to `false` to disable
+ markComment: >
+ This issue has been automatically marked as stale because it has not had
+ recent activity. It will be closed if no further activity occurs. Thank you
+ for your contributions.
+
+ # Comment to post when closing a stale Issue.
+ closeComment: >
+ Please reopen this issue once you add more information and updates here.
+ If this is not the case and you need some help, feel free to seek help
+ from our [Gitter](https://gitter.im/TheAlgorithms) or ping one of the
+ reviewers. Thank you for your contributions!
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
new file mode 100644
index 000000000000..9e15d18ade8e
--- /dev/null
+++ b/.github/workflows/build.yml
@@ -0,0 +1,27 @@
+name: "build"
+
+on:
+ pull_request:
+ schedule:
+ - cron: "0 0 * * *" # Run everyday
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions/setup-python@v2
+ with:
+ python-version: "3.9"
+ - uses: actions/cache@v2
+ with:
+ path: ~/.cache/pip
+ key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }}
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip setuptools six wheel
+ python -m pip install pytest-cov -r requirements.txt
+ - name: Run tests
+ run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/ --cov-report=term-missing:skip-covered --cov=. .
+ - if: ${{ success() }}
+ run: scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md
diff --git a/.github/workflows/directory_writer.yml b/.github/workflows/directory_writer.yml
new file mode 100644
index 000000000000..be8154a32696
--- /dev/null
+++ b/.github/workflows/directory_writer.yml
@@ -0,0 +1,21 @@
+# The objective of this GitHub Action is to update the DIRECTORY.md file (if needed)
+# when doing a git push
+name: directory_writer
+on: [push]
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v1 # v1, NOT v2
+ - uses: actions/setup-python@v2
+ - name: Write DIRECTORY.md
+ run: |
+ scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md
+ git config --global user.name github-actions
+ git config --global user.email '${GITHUB_ACTOR}@users.noreply.github.com'
+ git remote set-url origin https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/$GITHUB_REPOSITORY
+ - name: Update DIRECTORY.md
+ run: |
+ git add DIRECTORY.md
+ git commit -am "updating DIRECTORY.md" || true
+ git push --force origin HEAD:$GITHUB_REF || true
diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml
new file mode 100644
index 000000000000..96175cfecea5
--- /dev/null
+++ b/.github/workflows/pre-commit.yml
@@ -0,0 +1,21 @@
+name: pre-commit
+
+on: [push, pull_request]
+
+jobs:
+ pre-commit:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions/cache@v2
+ with:
+ path: |
+ ~/.cache/pre-commit
+ ~/.cache/pip
+ key: ${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }}
+ - uses: actions/setup-python@v2
+ - name: Install pre-commit
+ run: |
+ python -m pip install --upgrade pip
+ python -m pip install --upgrade pre-commit
+ - run: pre-commit run --verbose --all-files --show-diff-on-failure
diff --git a/.github/workflows/project_euler.yml b/.github/workflows/project_euler.yml
new file mode 100644
index 000000000000..995295fcaa9a
--- /dev/null
+++ b/.github/workflows/project_euler.yml
@@ -0,0 +1,35 @@
+on:
+ pull_request:
+ # Run only if a file is changed within the project_euler directory and related files
+ paths:
+ - "project_euler/**"
+ - ".github/workflows/project_euler.yml"
+ - "scripts/validate_solutions.py"
+ schedule:
+ - cron: "0 0 * * *" # Run everyday
+
+name: "Project Euler"
+
+jobs:
+ project-euler:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions/setup-python@v2
+ - name: Install pytest and pytest-cov
+ run: |
+ python -m pip install --upgrade pip
+ python -m pip install --upgrade pytest pytest-cov
+ - run: pytest --doctest-modules --cov-report=term-missing:skip-covered --cov=project_euler/ project_euler/
+ validate-solutions:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions/setup-python@v2
+ - name: Install pytest and requests
+ run: |
+ python -m pip install --upgrade pip
+ python -m pip install --upgrade pytest requests
+ - run: pytest scripts/validate_solutions.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.gitignore b/.gitignore
index 5f9132236c26..574cdf312836 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,9 +7,7 @@ __pycache__/
*.so
# Distribution / packaging
-.vscode/
.Python
-env/
build/
develop-eggs/
dist/
@@ -21,12 +19,14 @@ lib64/
parts/
sdist/
var/
+wheels/
*.egg-info/
.installed.cfg
*.egg
+MANIFEST
# PyInstaller
-# Usually these files are written by a python script from a template
+# Usually these files are written by a Python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
@@ -43,8 +43,9 @@ htmlcov/
.cache
nosetests.xml
coverage.xml
-*,cover
+*.cover
.hypothesis/
+.pytest_cache/
# Translations
*.mo
@@ -53,6 +54,7 @@ coverage.xml
# Django stuff:
*.log
local_settings.py
+db.sqlite3
# Flask stuff:
instance/
@@ -67,7 +69,7 @@ docs/_build/
# PyBuilder
target/
-# IPython Notebook
+# Jupyter Notebook
.ipynb_checkpoints
# pyenv
@@ -76,17 +78,32 @@ target/
# celery beat schedule file
celerybeat-schedule
-# dotenv
-.env
+# SageMath parsed files
+*.sage.py
-# virtualenv
+# Environments
+.env
+.venv
+env/
venv/
ENV/
+env.bak/
+venv.bak/
# Spyder project settings
.spyderproject
+.spyproject
# Rope project settings
.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+
+.DS_Store
.idea
-.DS_Store
\ No newline at end of file
+.try
+.vscode/
diff --git a/.gitpod.yml b/.gitpod.yml
new file mode 100644
index 000000000000..5975b8b8e983
--- /dev/null
+++ b/.gitpod.yml
@@ -0,0 +1,2 @@
+tasks:
+ - init: pip3 install -r ./requirements.txt
diff --git a/.lgtm.yml b/.lgtm.yml
deleted file mode 100644
index ec550ab72705..000000000000
--- a/.lgtm.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-extraction:
- python:
- python_setup:
- version: 3
- after_prepare:
- - python3 -m pip install --upgrade --user flake8
- before_index:
- - python3 -m flake8 --version # flake8 3.6.0 on CPython 3.6.5 on Linux
- # stop the build if there are Python syntax errors or undefined names
- - python3 -m flake8 . --count --select=E901,E999,F821,F822,F823 --show-source --statistics
- # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
- - python3 -m flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 000000000000..a3288e1c5eef
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,60 @@
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v3.2.0
+ hooks:
+ - id: check-executables-have-shebangs
+ - id: check-yaml
+ - id: end-of-file-fixer
+ types: [python]
+ - id: trailing-whitespace
+ exclude: |
+ (?x)^(
+ data_structures/heap/binomial_heap.py
+ )$
+ - id: requirements-txt-fixer
+ - repo: https://github.com/psf/black
+ rev: stable
+ hooks:
+ - id: black
+ - repo: https://github.com/PyCQA/isort
+ rev: 5.5.3
+ hooks:
+ - id: isort
+ args:
+ - --profile=black
+ - repo: https://gitlab.com/pycqa/flake8
+ rev: 3.8.3
+ hooks:
+ - id: flake8
+ args:
+ - --ignore=E203,W503
+ - --max-complexity=25
+ - --max-line-length=88
+# FIXME: fix mypy errors and then uncomment this
+# - repo: https://github.com/pre-commit/mirrors-mypy
+# rev: v0.782
+# hooks:
+# - id: mypy
+# args:
+# - --ignore-missing-imports
+ - repo: https://github.com/codespell-project/codespell
+ rev: v1.17.1
+ hooks:
+ - id: codespell
+ args:
+ - --ignore-words-list=ans,fo,followings,hist,iff,mater,secant,som,tim
+ - --skip="./.*,./other/dictionary.txt,./other/words,./project_euler/problem_022/p022_names.txt"
+ - --quiet-level=2
+ exclude: |
+ (?x)^(
+ other/dictionary.txt |
+ other/words |
+ project_euler/problem_022/p022_names.txt
+ )$
+ - repo: local
+ hooks:
+ - id: validate-filenames
+ name: Validate filenames
+ entry: ./scripts/validate_filenames.py
+ language: script
+ pass_filenames: false
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 5fba6987bb66..000000000000
--- a/.travis.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-language: python
-cache: pip
-python:
- - 2.7
- - 3.6
- #- nightly
- #- pypy
- #- pypy3
-matrix:
- allow_failures:
- - python: nightly
- - python: pypy
- - python: pypy3
-install:
- #- pip install -r requirements.txt
- - pip install flake8 # pytest # add another testing frameworks later
-before_script:
- # stop the build if there are Python syntax errors or undefined names
- - flake8 . --count --select=E901,E999,F821,F822,F823 --show-source --statistics
- # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
- - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
-script:
- - true # pytest --capture=sys # add other tests here
-notifications:
- on_success: change
- on_failure: change # `always` will be the setting once code changes slow down
diff --git a/.vs/Python/v15/.suo b/.vs/Python/v15/.suo
deleted file mode 100644
index 0e3f4807567d..000000000000
Binary files a/.vs/Python/v15/.suo and /dev/null differ
diff --git a/.vs/slnx.sqlite b/.vs/slnx.sqlite
deleted file mode 100644
index 2fe4a449f121..000000000000
Binary files a/.vs/slnx.sqlite and /dev/null differ
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 000000000000..e4c81a5ecd98
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,180 @@
+# Contributing guidelines
+
+## Before contributing
+
+Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before sending your pull requests, make sure that you **read the whole guidelines**. If you have any doubt on the contributing guide, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://gitter.im/TheAlgorithms).
+
+## Contributing
+
+### Contributor
+
+We are very happy that you consider implementing algorithms and data structure for others! This repository is referenced and used by learners from all over the globe. Being one of our contributors, you agree and confirm that:
+
+- You did your work - no plagiarism allowed
+ - Any plagiarized work will not be merged.
+- Your work will be distributed under [MIT License](LICENSE.md) once your pull request is merged
+- You submitted work fulfils or mostly fulfils our styles and standards
+
+**New implementation** is welcome! For example, new solutions for a problem, different representations for a graph data structure or algorithm designs with different complexity but **identical implementation** of an existing implementation is not allowed. Please check whether the solution is already implemented or not before submitting your pull request.
+
+**Improving comments** and **writing proper tests** are also highly welcome.
+
+### Contribution
+
+We appreciate any contribution, from fixing a grammar mistake in a comment to implementing complex algorithms. Please read this section if you are contributing your work.
+
+Your contribution will be tested by our [automated testing on Travis CI](https://travis-ci.org/TheAlgorithms/Python/pull_requests) to save time and mental energy. After you have submitted your pull request, you should see the Travis tests start to run at the bottom of your submission page. If those tests fail, then click on the ___details___ button try to read through the Travis output to understand the failure. If you do not understand, please leave a comment on your submission page and a community member will try to help.
+
+Please help us keep our issue list small by adding fixes: #{$ISSUE_NO} to the commit message of pull requests that resolve open issues. GitHub will use this tag to auto close the issue when the PR is merged.
+
+#### What is an Algorithm?
+
+An Algorithm is one or more functions (or classes) that:
+* take one or more inputs,
+* perform some internal calculations or data manipulations,
+* return one or more outputs,
+* have minimal side effects (Ex. print(), plot(), read(), write()).
+
+Algorithms should be packaged in a way that would make it easy for readers to put them into larger programs.
+
+Algorithms should:
+* have intuitive class and function names that make their purpose clear to readers
+* use Python naming conventions and intuitive variable names to ease comprehension
+* be flexible to take different input values
+* have Python type hints for their input parameters and return values
+* raise Python exceptions (ValueError, etc.) on erroneous input values
+* have docstrings with clear explanations and/or URLs to source materials
+* contain doctests that test both valid and erroneous input values
+* return all calculation results instead of printing or plotting them
+
+Algorithms in this repo should not be how-to examples for existing Python packages. Instead, they should perform internal calculations or manipulations to convert input values into different output values. Those calculations or manipulations can use data types, classes, or functions of existing Python packages but each algorithm in this repo should add unique value.
+
+#### Pre-commit plugin
+Use [pre-commit](https://pre-commit.com/#installation) to automatically format your code to match our coding style:
+
+```bash
+python3 -m pip install pre-commit # required only once
+pre-commit install
+```
+That's it! The plugin will run every time you commit any changes. If there are any errors found during the run, fix them and commit those changes. You can even run the plugin manually on all files:
+
+```bash
+pre-commit run --all-files --show-diff-on-failure
+```
+
+#### Coding Style
+
+We want your work to be readable by others; therefore, we encourage you to note the following:
+
+- Please write in Python 3.7+. For instance: __print()__ is a function in Python 3 so __print "Hello"__ will _not_ work but __print("Hello")__ will.
+- Please focus hard on naming of functions, classes, and variables. Help your reader by using __descriptive names__ that can help you to remove redundant comments.
+ - Single letter variable names are _old school_ so please avoid them unless their life only spans a few lines.
+ - Expand acronyms because __gcd()__ is hard to understand but __greatest_common_divisor()__ is not.
+ - Please follow the [Python Naming Conventions](https://pep8.org/#prescriptive-naming-conventions) so variable_names and function_names should be lower_case, CONSTANTS in UPPERCASE, ClassNames should be CamelCase, etc.
+
+- We encourage the use of Python [f-strings](https://realpython.com/python-f-strings/#f-strings-a-new-and-improved-way-to-format-strings-in-python) where they make the code easier to read.
+
+- Please consider running [__psf/black__](https://github.com/python/black) on your Python file(s) before submitting your pull request. This is not yet a requirement but it does make your code more readable and automatically aligns it with much of [PEP 8](https://www.python.org/dev/peps/pep-0008/). There are other code formatters (autopep8, yapf) but the __black__ formatter is now hosted by the Python Software Foundation. To use it,
+
+ ```bash
+ python3 -m pip install black # only required the first time
+ black .
+ ```
+
+- All submissions will need to pass the test __flake8 . --ignore=E203,W503 --max-line-length=88__ before they will be accepted so if possible, try this test locally on your Python file(s) before submitting your pull request.
+
+ ```bash
+ python3 -m pip install flake8 # only required the first time
+ flake8 . --ignore=E203,W503 --max-line-length=88 --show-source
+ ```
+
+- Original code submission require docstrings or comments to describe your work.
+
+- More on docstrings and comments:
+
+ If you used a Wikipedia article or some other source material to create your algorithm, please add the URL in a docstring or comment to help your reader.
+
+ The following are considered to be bad and may be requested to be improved:
+
+ ```python
+ x = x + 2 # increased by 2
+ ```
+
+ This is too trivial. Comments are expected to be explanatory. For comments, you can write them above, on or below a line of code, as long as you are consistent within the same piece of code.
+
+ We encourage you to put docstrings inside your functions but please pay attention to indentation of docstrings. The following is a good example:
+
+ ```python
+ def sum_ab(a, b):
+ """
+ Return the sum of two integers a and b.
+ """
+ return a + b
+ ```
+
+- Write tests (especially [__doctests__](https://docs.python.org/3/library/doctest.html)) to illustrate and verify your work. We highly encourage the use of _doctests on all functions_.
+
+ ```python
+ def sum_ab(a, b):
+ """
+ Return the sum of two integers a and b
+ >>> sum_ab(2, 2)
+ 4
+ >>> sum_ab(-2, 3)
+ 1
+ >>> sum_ab(4.9, 5.1)
+ 10.0
+ """
+ return a + b
+ ```
+
+ These doctests will be run by pytest as part of our automated testing so please try to run your doctests locally and make sure that they are found and pass:
+
+ ```bash
+ python3 -m doctest -v my_submission.py
+ ```
+
+ The use of the Python builtin __input()__ function is **not** encouraged:
+
+ ```python
+ input('Enter your input:')
+ # Or even worse...
+ input = eval(input("Enter your input: "))
+ ```
+
+ However, if your code uses __input()__ then we encourage you to gracefully deal with leading and trailing whitespace in user input by adding __.strip()__ as in:
+
+ ```python
+ starting_value = int(input("Please enter a starting value: ").strip())
+ ```
+
+ The use of [Python type hints](https://docs.python.org/3/library/typing.html) is encouraged for function parameters and return values. Our automated testing will run [mypy](http://mypy-lang.org) so run that locally before making your submission.
+
+ ```python
+ def sum_ab(a: int, b: int) -> int:
+ return a + b
+ ```
+
+- [__List comprehensions and generators__](https://docs.python.org/3/tutorial/datastructures.html#list-comprehensions) are preferred over the use of `lambda`, `map`, `filter`, `reduce` but the important thing is to demonstrate the power of Python in code that is easy to read and maintain.
+
+- Avoid importing external libraries for basic algorithms. Only use those libraries for complicated algorithms.
+- If you need a third party module that is not in the file __requirements.txt__, please add it to that file as part of your submission.
+
+#### Other Requirements for Submissions
+- If you are submitting code in the `project_euler/` directory, please also read [the dedicated Guideline](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md) before contributing to our Project Euler library.
+- The file extension for code files should be `.py`. Jupyter Notebooks should be submitted to [TheAlgorithms/Jupyter](https://github.com/TheAlgorithms/Jupyter).
+- Strictly use snake_case (underscore_separated) in your file_name, as it will be easy to parse in future using scripts.
+- Please avoid creating new directories if at all possible. Try to fit your work into the existing directory structure.
+- If possible, follow the standard *within* the folder you are submitting to.
+- If you have modified/added code work, make sure the code compiles before submitting.
+- If you have modified/added documentation work, ensure your language is concise and contains no grammar errors.
+- Do not update the README.md or DIRECTORY.md file which will be periodically autogenerated by our Travis CI processes.
+- Add a corresponding explanation to [Algorithms-Explanation](https://github.com/TheAlgorithms/Algorithms-Explanation) (Optional but recommended).
+- All submissions will be tested with [__mypy__](http://www.mypy-lang.org) so we encourage to add [__Python type hints__](https://docs.python.org/3/library/typing.html) where it makes sense to do so.
+
+- Most importantly,
+ - **Be consistent in the use of these guidelines when submitting.**
+ - **Join** [Gitter](https://gitter.im/TheAlgorithms) **now!**
+ - Happy coding!
+
+Writer [@poyea](https://github.com/poyea), Jun 2019.
diff --git a/DIRECTORY.md b/DIRECTORY.md
new file mode 100644
index 000000000000..4f17cf9c03ed
--- /dev/null
+++ b/DIRECTORY.md
@@ -0,0 +1,916 @@
+
+## Arithmetic Analysis
+ * [Bisection](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/bisection.py)
+ * [Gaussian Elimination](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/gaussian_elimination.py)
+ * [In Static Equilibrium](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/in_static_equilibrium.py)
+ * [Intersection](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/intersection.py)
+ * [Lu Decomposition](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/lu_decomposition.py)
+ * [Newton Forward Interpolation](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/newton_forward_interpolation.py)
+ * [Newton Method](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/newton_method.py)
+ * [Newton Raphson](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/newton_raphson.py)
+ * [Secant Method](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/secant_method.py)
+
+## Backtracking
+ * [All Combinations](https://github.com/TheAlgorithms/Python/blob/master/backtracking/all_combinations.py)
+ * [All Permutations](https://github.com/TheAlgorithms/Python/blob/master/backtracking/all_permutations.py)
+ * [All Subsequences](https://github.com/TheAlgorithms/Python/blob/master/backtracking/all_subsequences.py)
+ * [Coloring](https://github.com/TheAlgorithms/Python/blob/master/backtracking/coloring.py)
+ * [Hamiltonian Cycle](https://github.com/TheAlgorithms/Python/blob/master/backtracking/hamiltonian_cycle.py)
+ * [Knight Tour](https://github.com/TheAlgorithms/Python/blob/master/backtracking/knight_tour.py)
+ * [Minimax](https://github.com/TheAlgorithms/Python/blob/master/backtracking/minimax.py)
+ * [N Queens](https://github.com/TheAlgorithms/Python/blob/master/backtracking/n_queens.py)
+ * [N Queens Math](https://github.com/TheAlgorithms/Python/blob/master/backtracking/n_queens_math.py)
+ * [Rat In Maze](https://github.com/TheAlgorithms/Python/blob/master/backtracking/rat_in_maze.py)
+ * [Sudoku](https://github.com/TheAlgorithms/Python/blob/master/backtracking/sudoku.py)
+ * [Sum Of Subsets](https://github.com/TheAlgorithms/Python/blob/master/backtracking/sum_of_subsets.py)
+
+## Bit Manipulation
+ * [Binary And Operator](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_and_operator.py)
+ * [Binary Count Setbits](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_count_setbits.py)
+ * [Binary Count Trailing Zeros](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_count_trailing_zeros.py)
+ * [Binary Or Operator](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_or_operator.py)
+ * [Binary Xor Operator](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_xor_operator.py)
+ * [Single Bit Manipulation Operations](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/single_bit_manipulation_operations.py)
+
+## Blockchain
+ * [Chinese Remainder Theorem](https://github.com/TheAlgorithms/Python/blob/master/blockchain/chinese_remainder_theorem.py)
+ * [Diophantine Equation](https://github.com/TheAlgorithms/Python/blob/master/blockchain/diophantine_equation.py)
+ * [Modular Division](https://github.com/TheAlgorithms/Python/blob/master/blockchain/modular_division.py)
+
+## Boolean Algebra
+ * [Quine Mc Cluskey](https://github.com/TheAlgorithms/Python/blob/master/boolean_algebra/quine_mc_cluskey.py)
+
+## Cellular Automata
+ * [Conways Game Of Life](https://github.com/TheAlgorithms/Python/blob/master/cellular_automata/conways_game_of_life.py)
+ * [One Dimensional](https://github.com/TheAlgorithms/Python/blob/master/cellular_automata/one_dimensional.py)
+
+## Ciphers
+ * [A1Z26](https://github.com/TheAlgorithms/Python/blob/master/ciphers/a1z26.py)
+ * [Affine Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/affine_cipher.py)
+ * [Atbash](https://github.com/TheAlgorithms/Python/blob/master/ciphers/atbash.py)
+ * [Base16](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base16.py)
+ * [Base32](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base32.py)
+ * [Base64 Encoding](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base64_encoding.py)
+ * [Base85](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base85.py)
+ * [Beaufort Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/beaufort_cipher.py)
+ * [Brute Force Caesar Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/brute_force_caesar_cipher.py)
+ * [Caesar Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/caesar_cipher.py)
+ * [Cryptomath Module](https://github.com/TheAlgorithms/Python/blob/master/ciphers/cryptomath_module.py)
+ * [Decrypt Caesar With Chi Squared](https://github.com/TheAlgorithms/Python/blob/master/ciphers/decrypt_caesar_with_chi_squared.py)
+ * [Deterministic Miller Rabin](https://github.com/TheAlgorithms/Python/blob/master/ciphers/deterministic_miller_rabin.py)
+ * [Diffie](https://github.com/TheAlgorithms/Python/blob/master/ciphers/diffie.py)
+ * [Elgamal Key Generator](https://github.com/TheAlgorithms/Python/blob/master/ciphers/elgamal_key_generator.py)
+ * [Enigma Machine2](https://github.com/TheAlgorithms/Python/blob/master/ciphers/enigma_machine2.py)
+ * [Hill Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/hill_cipher.py)
+ * [Mixed Keyword Cypher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/mixed_keyword_cypher.py)
+ * [Mono Alphabetic Ciphers](https://github.com/TheAlgorithms/Python/blob/master/ciphers/mono_alphabetic_ciphers.py)
+ * [Morse Code Implementation](https://github.com/TheAlgorithms/Python/blob/master/ciphers/morse_code_implementation.py)
+ * [Onepad Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/onepad_cipher.py)
+ * [Playfair Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/playfair_cipher.py)
+ * [Porta Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/porta_cipher.py)
+ * [Rabin Miller](https://github.com/TheAlgorithms/Python/blob/master/ciphers/rabin_miller.py)
+ * [Rail Fence Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/rail_fence_cipher.py)
+ * [Rot13](https://github.com/TheAlgorithms/Python/blob/master/ciphers/rot13.py)
+ * [Rsa Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/rsa_cipher.py)
+ * [Rsa Factorization](https://github.com/TheAlgorithms/Python/blob/master/ciphers/rsa_factorization.py)
+ * [Rsa Key Generator](https://github.com/TheAlgorithms/Python/blob/master/ciphers/rsa_key_generator.py)
+ * [Shuffled Shift Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/shuffled_shift_cipher.py)
+ * [Simple Keyword Cypher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/simple_keyword_cypher.py)
+ * [Simple Substitution Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/simple_substitution_cipher.py)
+ * [Trafid Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/trafid_cipher.py)
+ * [Transposition Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/transposition_cipher.py)
+ * [Transposition Cipher Encrypt Decrypt File](https://github.com/TheAlgorithms/Python/blob/master/ciphers/transposition_cipher_encrypt_decrypt_file.py)
+ * [Vigenere Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/vigenere_cipher.py)
+ * [Xor Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/xor_cipher.py)
+
+## Compression
+ * [Burrows Wheeler](https://github.com/TheAlgorithms/Python/blob/master/compression/burrows_wheeler.py)
+ * [Huffman](https://github.com/TheAlgorithms/Python/blob/master/compression/huffman.py)
+ * [Lempel Ziv](https://github.com/TheAlgorithms/Python/blob/master/compression/lempel_ziv.py)
+ * [Lempel Ziv Decompress](https://github.com/TheAlgorithms/Python/blob/master/compression/lempel_ziv_decompress.py)
+ * [Peak Signal To Noise Ratio](https://github.com/TheAlgorithms/Python/blob/master/compression/peak_signal_to_noise_ratio.py)
+
+## Computer Vision
+ * [Harriscorner](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/harriscorner.py)
+ * [Meanthreshold](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/meanthreshold.py)
+
+## Conversions
+ * [Binary To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/binary_to_decimal.py)
+ * [Binary To Octal](https://github.com/TheAlgorithms/Python/blob/master/conversions/binary_to_octal.py)
+ * [Decimal To Any](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_any.py)
+ * [Decimal To Binary](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_binary.py)
+ * [Decimal To Binary Recursion](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_binary_recursion.py)
+ * [Decimal To Hexadecimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_hexadecimal.py)
+ * [Decimal To Octal](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_octal.py)
+ * [Hexadecimal To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/hexadecimal_to_decimal.py)
+ * [Molecular Chemistry](https://github.com/TheAlgorithms/Python/blob/master/conversions/molecular_chemistry.py)
+ * [Octal To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/octal_to_decimal.py)
+ * [Prefix Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/prefix_conversions.py)
+ * [Roman Numerals](https://github.com/TheAlgorithms/Python/blob/master/conversions/roman_numerals.py)
+ * [Temperature Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/temperature_conversions.py)
+ * [Weight Conversion](https://github.com/TheAlgorithms/Python/blob/master/conversions/weight_conversion.py)
+
+## Data Structures
+ * Binary Tree
+ * [Avl Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/avl_tree.py)
+ * [Basic Binary Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/basic_binary_tree.py)
+ * [Binary Search Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/binary_search_tree.py)
+ * [Binary Search Tree Recursive](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/binary_search_tree_recursive.py)
+ * [Binary Tree Mirror](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/binary_tree_mirror.py)
+ * [Binary Tree Traversals](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/binary_tree_traversals.py)
+ * [Fenwick Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/fenwick_tree.py)
+ * [Lazy Segment Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/lazy_segment_tree.py)
+ * [Lowest Common Ancestor](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/lowest_common_ancestor.py)
+ * [Non Recursive Segment Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/non_recursive_segment_tree.py)
+ * [Number Of Possible Binary Trees](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/number_of_possible_binary_trees.py)
+ * [Red Black Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/red_black_tree.py)
+ * [Segment Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/segment_tree.py)
+ * [Segment Tree Other](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/segment_tree_other.py)
+ * [Treap](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/treap.py)
+ * Disjoint Set
+ * [Alternate Disjoint Set](https://github.com/TheAlgorithms/Python/blob/master/data_structures/disjoint_set/alternate_disjoint_set.py)
+ * [Disjoint Set](https://github.com/TheAlgorithms/Python/blob/master/data_structures/disjoint_set/disjoint_set.py)
+ * Hashing
+ * [Double Hash](https://github.com/TheAlgorithms/Python/blob/master/data_structures/hashing/double_hash.py)
+ * [Hash Table](https://github.com/TheAlgorithms/Python/blob/master/data_structures/hashing/hash_table.py)
+ * [Hash Table With Linked List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/hashing/hash_table_with_linked_list.py)
+ * Number Theory
+ * [Prime Numbers](https://github.com/TheAlgorithms/Python/blob/master/data_structures/hashing/number_theory/prime_numbers.py)
+ * [Quadratic Probing](https://github.com/TheAlgorithms/Python/blob/master/data_structures/hashing/quadratic_probing.py)
+ * Heap
+ * [Binomial Heap](https://github.com/TheAlgorithms/Python/blob/master/data_structures/heap/binomial_heap.py)
+ * [Heap](https://github.com/TheAlgorithms/Python/blob/master/data_structures/heap/heap.py)
+ * [Heap Generic](https://github.com/TheAlgorithms/Python/blob/master/data_structures/heap/heap_generic.py)
+ * [Max Heap](https://github.com/TheAlgorithms/Python/blob/master/data_structures/heap/max_heap.py)
+ * [Min Heap](https://github.com/TheAlgorithms/Python/blob/master/data_structures/heap/min_heap.py)
+ * [Randomized Heap](https://github.com/TheAlgorithms/Python/blob/master/data_structures/heap/randomized_heap.py)
+ * [Skew Heap](https://github.com/TheAlgorithms/Python/blob/master/data_structures/heap/skew_heap.py)
+ * Linked List
+ * [Circular Linked List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/circular_linked_list.py)
+ * [Deque Doubly](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/deque_doubly.py)
+ * [Doubly Linked List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/doubly_linked_list.py)
+ * [Doubly Linked List Two](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/doubly_linked_list_two.py)
+ * [From Sequence](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/from_sequence.py)
+ * [Has Loop](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/has_loop.py)
+ * [Is Palindrome](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/is_palindrome.py)
+ * [Merge Two Lists](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/merge_two_lists.py)
+ * [Middle Element Of Linked List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/middle_element_of_linked_list.py)
+ * [Print Reverse](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/print_reverse.py)
+ * [Singly Linked List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/singly_linked_list.py)
+ * [Skip List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/skip_list.py)
+ * [Swap Nodes](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/swap_nodes.py)
+ * Queue
+ * [Circular Queue](https://github.com/TheAlgorithms/Python/blob/master/data_structures/queue/circular_queue.py)
+ * [Double Ended Queue](https://github.com/TheAlgorithms/Python/blob/master/data_structures/queue/double_ended_queue.py)
+ * [Linked Queue](https://github.com/TheAlgorithms/Python/blob/master/data_structures/queue/linked_queue.py)
+ * [Priority Queue Using List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/queue/priority_queue_using_list.py)
+ * [Queue On List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/queue/queue_on_list.py)
+ * [Queue On Pseudo Stack](https://github.com/TheAlgorithms/Python/blob/master/data_structures/queue/queue_on_pseudo_stack.py)
+ * Stacks
+ * [Balanced Parentheses](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/balanced_parentheses.py)
+ * [Dijkstras Two Stack Algorithm](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/dijkstras_two_stack_algorithm.py)
+ * [Evaluate Postfix Notations](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/evaluate_postfix_notations.py)
+ * [Infix To Postfix Conversion](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/infix_to_postfix_conversion.py)
+ * [Infix To Prefix Conversion](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/infix_to_prefix_conversion.py)
+ * [Linked Stack](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/linked_stack.py)
+ * [Next Greater Element](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/next_greater_element.py)
+ * [Postfix Evaluation](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/postfix_evaluation.py)
+ * [Prefix Evaluation](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/prefix_evaluation.py)
+ * [Stack](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/stack.py)
+ * [Stack Using Dll](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/stack_using_dll.py)
+ * [Stock Span Problem](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/stock_span_problem.py)
+ * Trie
+ * [Trie](https://github.com/TheAlgorithms/Python/blob/master/data_structures/trie/trie.py)
+
+## Digital Image Processing
+ * [Change Brightness](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/change_brightness.py)
+ * [Change Contrast](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/change_contrast.py)
+ * [Convert To Negative](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/convert_to_negative.py)
+ * Dithering
+ * [Burkes](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/dithering/burkes.py)
+ * Edge Detection
+ * [Canny](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/edge_detection/canny.py)
+ * Filters
+ * [Bilateral Filter](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/filters/bilateral_filter.py)
+ * [Convolve](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/filters/convolve.py)
+ * [Gaussian Filter](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/filters/gaussian_filter.py)
+ * [Median Filter](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/filters/median_filter.py)
+ * [Sobel Filter](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/filters/sobel_filter.py)
+ * Histogram Equalization
+ * [Histogram Stretch](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/histogram_equalization/histogram_stretch.py)
+ * [Index Calculation](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/index_calculation.py)
+ * Resize
+ * [Resize](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/resize/resize.py)
+ * Rotation
+ * [Rotation](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/rotation/rotation.py)
+ * [Sepia](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/sepia.py)
+ * [Test Digital Image Processing](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/test_digital_image_processing.py)
+
+## Divide And Conquer
+ * [Closest Pair Of Points](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/closest_pair_of_points.py)
+ * [Convex Hull](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/convex_hull.py)
+ * [Heaps Algorithm](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/heaps_algorithm.py)
+ * [Heaps Algorithm Iterative](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/heaps_algorithm_iterative.py)
+ * [Inversions](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/inversions.py)
+ * [Kth Order Statistic](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/kth_order_statistic.py)
+ * [Max Difference Pair](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/max_difference_pair.py)
+ * [Max Subarray Sum](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/max_subarray_sum.py)
+ * [Mergesort](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/mergesort.py)
+ * [Peak](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/peak.py)
+ * [Power](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/power.py)
+ * [Strassen Matrix Multiplication](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/strassen_matrix_multiplication.py)
+
+## Dynamic Programming
+ * [Abbreviation](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/abbreviation.py)
+ * [Bitmask](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/bitmask.py)
+ * [Climbing Stairs](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/climbing_stairs.py)
+ * [Coin Change](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/coin_change.py)
+ * [Edit Distance](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/edit_distance.py)
+ * [Factorial](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/factorial.py)
+ * [Fast Fibonacci](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/fast_fibonacci.py)
+ * [Fibonacci](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/fibonacci.py)
+ * [Floyd Warshall](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/floyd_warshall.py)
+ * [Fractional Knapsack](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/fractional_knapsack.py)
+ * [Fractional Knapsack 2](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/fractional_knapsack_2.py)
+ * [Integer Partition](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/integer_partition.py)
+ * [Iterating Through Submasks](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/iterating_through_submasks.py)
+ * [Knapsack](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/knapsack.py)
+ * [Longest Common Subsequence](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/longest_common_subsequence.py)
+ * [Longest Increasing Subsequence](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/longest_increasing_subsequence.py)
+ * [Longest Increasing Subsequence O(Nlogn)](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/longest_increasing_subsequence_o(nlogn).py)
+ * [Longest Sub Array](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/longest_sub_array.py)
+ * [Matrix Chain Order](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/matrix_chain_order.py)
+ * [Max Non Adjacent Sum](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/max_non_adjacent_sum.py)
+ * [Max Sub Array](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/max_sub_array.py)
+ * [Max Sum Contiguous Subsequence](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/max_sum_contiguous_subsequence.py)
+ * [Minimum Cost Path](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/minimum_cost_path.py)
+ * [Minimum Partition](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/minimum_partition.py)
+ * [Minimum Steps To One](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/minimum_steps_to_one.py)
+ * [Optimal Binary Search Tree](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/optimal_binary_search_tree.py)
+ * [Rod Cutting](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/rod_cutting.py)
+ * [Subset Generation](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/subset_generation.py)
+ * [Sum Of Subset](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/sum_of_subset.py)
+
+## Electronics
+ * [Electric Power](https://github.com/TheAlgorithms/Python/blob/master/electronics/electric_power.py)
+ * [Ohms Law](https://github.com/TheAlgorithms/Python/blob/master/electronics/ohms_law.py)
+
+## File Transfer
+ * [Receive File](https://github.com/TheAlgorithms/Python/blob/master/file_transfer/receive_file.py)
+ * [Send File](https://github.com/TheAlgorithms/Python/blob/master/file_transfer/send_file.py)
+ * Tests
+ * [Test Send File](https://github.com/TheAlgorithms/Python/blob/master/file_transfer/tests/test_send_file.py)
+
+## Fuzzy Logic
+ * [Fuzzy Operations](https://github.com/TheAlgorithms/Python/blob/master/fuzzy_logic/fuzzy_operations.py)
+
+## Genetic Algorithm
+ * [Basic String](https://github.com/TheAlgorithms/Python/blob/master/genetic_algorithm/basic_string.py)
+
+## Geodesy
+ * [Haversine Distance](https://github.com/TheAlgorithms/Python/blob/master/geodesy/haversine_distance.py)
+ * [Lamberts Ellipsoidal Distance](https://github.com/TheAlgorithms/Python/blob/master/geodesy/lamberts_ellipsoidal_distance.py)
+
+## Graphics
+ * [Bezier Curve](https://github.com/TheAlgorithms/Python/blob/master/graphics/bezier_curve.py)
+ * [Vector3 For 2D Rendering](https://github.com/TheAlgorithms/Python/blob/master/graphics/vector3_for_2d_rendering.py)
+
+## Graphs
+ * [A Star](https://github.com/TheAlgorithms/Python/blob/master/graphs/a_star.py)
+ * [Articulation Points](https://github.com/TheAlgorithms/Python/blob/master/graphs/articulation_points.py)
+ * [Basic Graphs](https://github.com/TheAlgorithms/Python/blob/master/graphs/basic_graphs.py)
+ * [Bellman Ford](https://github.com/TheAlgorithms/Python/blob/master/graphs/bellman_ford.py)
+ * [Bfs Shortest Path](https://github.com/TheAlgorithms/Python/blob/master/graphs/bfs_shortest_path.py)
+ * [Bfs Zero One Shortest Path](https://github.com/TheAlgorithms/Python/blob/master/graphs/bfs_zero_one_shortest_path.py)
+ * [Bidirectional A Star](https://github.com/TheAlgorithms/Python/blob/master/graphs/bidirectional_a_star.py)
+ * [Bidirectional Breadth First Search](https://github.com/TheAlgorithms/Python/blob/master/graphs/bidirectional_breadth_first_search.py)
+ * [Breadth First Search](https://github.com/TheAlgorithms/Python/blob/master/graphs/breadth_first_search.py)
+ * [Breadth First Search 2](https://github.com/TheAlgorithms/Python/blob/master/graphs/breadth_first_search_2.py)
+ * [Breadth First Search Shortest Path](https://github.com/TheAlgorithms/Python/blob/master/graphs/breadth_first_search_shortest_path.py)
+ * [Check Bipartite Graph Bfs](https://github.com/TheAlgorithms/Python/blob/master/graphs/check_bipartite_graph_bfs.py)
+ * [Check Bipartite Graph Dfs](https://github.com/TheAlgorithms/Python/blob/master/graphs/check_bipartite_graph_dfs.py)
+ * [Connected Components](https://github.com/TheAlgorithms/Python/blob/master/graphs/connected_components.py)
+ * [Depth First Search](https://github.com/TheAlgorithms/Python/blob/master/graphs/depth_first_search.py)
+ * [Depth First Search 2](https://github.com/TheAlgorithms/Python/blob/master/graphs/depth_first_search_2.py)
+ * [Dijkstra](https://github.com/TheAlgorithms/Python/blob/master/graphs/dijkstra.py)
+ * [Dijkstra 2](https://github.com/TheAlgorithms/Python/blob/master/graphs/dijkstra_2.py)
+ * [Dijkstra Algorithm](https://github.com/TheAlgorithms/Python/blob/master/graphs/dijkstra_algorithm.py)
+ * [Dinic](https://github.com/TheAlgorithms/Python/blob/master/graphs/dinic.py)
+ * [Directed And Undirected (Weighted) Graph](https://github.com/TheAlgorithms/Python/blob/master/graphs/directed_and_undirected_(weighted)_graph.py)
+ * [Edmonds Karp Multiple Source And Sink](https://github.com/TheAlgorithms/Python/blob/master/graphs/edmonds_karp_multiple_source_and_sink.py)
+ * [Eulerian Path And Circuit For Undirected Graph](https://github.com/TheAlgorithms/Python/blob/master/graphs/eulerian_path_and_circuit_for_undirected_graph.py)
+ * [Even Tree](https://github.com/TheAlgorithms/Python/blob/master/graphs/even_tree.py)
+ * [Finding Bridges](https://github.com/TheAlgorithms/Python/blob/master/graphs/finding_bridges.py)
+ * [Frequent Pattern Graph Miner](https://github.com/TheAlgorithms/Python/blob/master/graphs/frequent_pattern_graph_miner.py)
+ * [G Topological Sort](https://github.com/TheAlgorithms/Python/blob/master/graphs/g_topological_sort.py)
+ * [Gale Shapley Bigraph](https://github.com/TheAlgorithms/Python/blob/master/graphs/gale_shapley_bigraph.py)
+ * [Graph List](https://github.com/TheAlgorithms/Python/blob/master/graphs/graph_list.py)
+ * [Graph Matrix](https://github.com/TheAlgorithms/Python/blob/master/graphs/graph_matrix.py)
+ * [Graphs Floyd Warshall](https://github.com/TheAlgorithms/Python/blob/master/graphs/graphs_floyd_warshall.py)
+ * [Greedy Best First](https://github.com/TheAlgorithms/Python/blob/master/graphs/greedy_best_first.py)
+ * [Kahns Algorithm Long](https://github.com/TheAlgorithms/Python/blob/master/graphs/kahns_algorithm_long.py)
+ * [Kahns Algorithm Topo](https://github.com/TheAlgorithms/Python/blob/master/graphs/kahns_algorithm_topo.py)
+ * [Karger](https://github.com/TheAlgorithms/Python/blob/master/graphs/karger.py)
+ * [Minimum Spanning Tree Boruvka](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_boruvka.py)
+ * [Minimum Spanning Tree Kruskal](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_kruskal.py)
+ * [Minimum Spanning Tree Kruskal2](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_kruskal2.py)
+ * [Minimum Spanning Tree Prims](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_prims.py)
+ * [Minimum Spanning Tree Prims2](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_prims2.py)
+ * [Multi Heuristic Astar](https://github.com/TheAlgorithms/Python/blob/master/graphs/multi_heuristic_astar.py)
+ * [Page Rank](https://github.com/TheAlgorithms/Python/blob/master/graphs/page_rank.py)
+ * [Prim](https://github.com/TheAlgorithms/Python/blob/master/graphs/prim.py)
+ * [Scc Kosaraju](https://github.com/TheAlgorithms/Python/blob/master/graphs/scc_kosaraju.py)
+ * [Strongly Connected Components](https://github.com/TheAlgorithms/Python/blob/master/graphs/strongly_connected_components.py)
+ * [Tarjans Scc](https://github.com/TheAlgorithms/Python/blob/master/graphs/tarjans_scc.py)
+ * Tests
+ * [Test Min Spanning Tree Kruskal](https://github.com/TheAlgorithms/Python/blob/master/graphs/tests/test_min_spanning_tree_kruskal.py)
+ * [Test Min Spanning Tree Prim](https://github.com/TheAlgorithms/Python/blob/master/graphs/tests/test_min_spanning_tree_prim.py)
+
+## Hashes
+ * [Adler32](https://github.com/TheAlgorithms/Python/blob/master/hashes/adler32.py)
+ * [Chaos Machine](https://github.com/TheAlgorithms/Python/blob/master/hashes/chaos_machine.py)
+ * [Djb2](https://github.com/TheAlgorithms/Python/blob/master/hashes/djb2.py)
+ * [Enigma Machine](https://github.com/TheAlgorithms/Python/blob/master/hashes/enigma_machine.py)
+ * [Hamming Code](https://github.com/TheAlgorithms/Python/blob/master/hashes/hamming_code.py)
+ * [Md5](https://github.com/TheAlgorithms/Python/blob/master/hashes/md5.py)
+ * [Sdbm](https://github.com/TheAlgorithms/Python/blob/master/hashes/sdbm.py)
+ * [Sha1](https://github.com/TheAlgorithms/Python/blob/master/hashes/sha1.py)
+
+## Knapsack
+ * [Greedy Knapsack](https://github.com/TheAlgorithms/Python/blob/master/knapsack/greedy_knapsack.py)
+ * [Knapsack](https://github.com/TheAlgorithms/Python/blob/master/knapsack/knapsack.py)
+ * Tests
+ * [Test Greedy Knapsack](https://github.com/TheAlgorithms/Python/blob/master/knapsack/tests/test_greedy_knapsack.py)
+ * [Test Knapsack](https://github.com/TheAlgorithms/Python/blob/master/knapsack/tests/test_knapsack.py)
+
+## Linear Algebra
+ * Src
+ * [Conjugate Gradient](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/conjugate_gradient.py)
+ * [Lib](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/lib.py)
+ * [Polynom For Points](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/polynom_for_points.py)
+ * [Power Iteration](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/power_iteration.py)
+ * [Rayleigh Quotient](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/rayleigh_quotient.py)
+ * [Test Linear Algebra](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/test_linear_algebra.py)
+ * [Transformations 2D](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/transformations_2d.py)
+
+## Machine Learning
+ * [Astar](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/astar.py)
+ * [Data Transformations](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/data_transformations.py)
+ * [Decision Tree](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/decision_tree.py)
+ * Forecasting
+ * [Run](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/forecasting/run.py)
+ * [Gaussian Naive Bayes](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/gaussian_naive_bayes.py)
+ * [Gradient Boosting Regressor](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/gradient_boosting_regressor.py)
+ * [Gradient Descent](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/gradient_descent.py)
+ * [K Means Clust](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/k_means_clust.py)
+ * [K Nearest Neighbours](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/k_nearest_neighbours.py)
+ * [Knn Sklearn](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/knn_sklearn.py)
+ * [Linear Discriminant Analysis](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/linear_discriminant_analysis.py)
+ * [Linear Regression](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/linear_regression.py)
+ * [Logistic Regression](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/logistic_regression.py)
+ * [Multilayer Perceptron Classifier](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/multilayer_perceptron_classifier.py)
+ * [Polymonial Regression](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/polymonial_regression.py)
+ * [Random Forest Classifier](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/random_forest_classifier.py)
+ * [Random Forest Regressor](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/random_forest_regressor.py)
+ * [Scoring Functions](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/scoring_functions.py)
+ * [Sequential Minimum Optimization](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/sequential_minimum_optimization.py)
+ * [Similarity Search](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/similarity_search.py)
+ * [Support Vector Machines](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/support_vector_machines.py)
+ * [Word Frequency Functions](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/word_frequency_functions.py)
+
+## Maths
+ * [3N Plus 1](https://github.com/TheAlgorithms/Python/blob/master/maths/3n_plus_1.py)
+ * [Abs](https://github.com/TheAlgorithms/Python/blob/master/maths/abs.py)
+ * [Abs Max](https://github.com/TheAlgorithms/Python/blob/master/maths/abs_max.py)
+ * [Abs Min](https://github.com/TheAlgorithms/Python/blob/master/maths/abs_min.py)
+ * [Add](https://github.com/TheAlgorithms/Python/blob/master/maths/add.py)
+ * [Aliquot Sum](https://github.com/TheAlgorithms/Python/blob/master/maths/aliquot_sum.py)
+ * [Allocation Number](https://github.com/TheAlgorithms/Python/blob/master/maths/allocation_number.py)
+ * [Area](https://github.com/TheAlgorithms/Python/blob/master/maths/area.py)
+ * [Area Under Curve](https://github.com/TheAlgorithms/Python/blob/master/maths/area_under_curve.py)
+ * [Armstrong Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/armstrong_numbers.py)
+ * [Average Mean](https://github.com/TheAlgorithms/Python/blob/master/maths/average_mean.py)
+ * [Average Median](https://github.com/TheAlgorithms/Python/blob/master/maths/average_median.py)
+ * [Average Mode](https://github.com/TheAlgorithms/Python/blob/master/maths/average_mode.py)
+ * [Bailey Borwein Plouffe](https://github.com/TheAlgorithms/Python/blob/master/maths/bailey_borwein_plouffe.py)
+ * [Basic Maths](https://github.com/TheAlgorithms/Python/blob/master/maths/basic_maths.py)
+ * [Binary Exp Mod](https://github.com/TheAlgorithms/Python/blob/master/maths/binary_exp_mod.py)
+ * [Binary Exponentiation](https://github.com/TheAlgorithms/Python/blob/master/maths/binary_exponentiation.py)
+ * [Binomial Coefficient](https://github.com/TheAlgorithms/Python/blob/master/maths/binomial_coefficient.py)
+ * [Binomial Distribution](https://github.com/TheAlgorithms/Python/blob/master/maths/binomial_distribution.py)
+ * [Bisection](https://github.com/TheAlgorithms/Python/blob/master/maths/bisection.py)
+ * [Ceil](https://github.com/TheAlgorithms/Python/blob/master/maths/ceil.py)
+ * [Chudnovsky Algorithm](https://github.com/TheAlgorithms/Python/blob/master/maths/chudnovsky_algorithm.py)
+ * [Collatz Sequence](https://github.com/TheAlgorithms/Python/blob/master/maths/collatz_sequence.py)
+ * [Combinations](https://github.com/TheAlgorithms/Python/blob/master/maths/combinations.py)
+ * [Decimal Isolate](https://github.com/TheAlgorithms/Python/blob/master/maths/decimal_isolate.py)
+ * [Entropy](https://github.com/TheAlgorithms/Python/blob/master/maths/entropy.py)
+ * [Euclidean Distance](https://github.com/TheAlgorithms/Python/blob/master/maths/euclidean_distance.py)
+ * [Eulers Totient](https://github.com/TheAlgorithms/Python/blob/master/maths/eulers_totient.py)
+ * [Explicit Euler](https://github.com/TheAlgorithms/Python/blob/master/maths/explicit_euler.py)
+ * [Extended Euclidean Algorithm](https://github.com/TheAlgorithms/Python/blob/master/maths/extended_euclidean_algorithm.py)
+ * [Factorial Iterative](https://github.com/TheAlgorithms/Python/blob/master/maths/factorial_iterative.py)
+ * [Factorial Python](https://github.com/TheAlgorithms/Python/blob/master/maths/factorial_python.py)
+ * [Factorial Recursive](https://github.com/TheAlgorithms/Python/blob/master/maths/factorial_recursive.py)
+ * [Factors](https://github.com/TheAlgorithms/Python/blob/master/maths/factors.py)
+ * [Fermat Little Theorem](https://github.com/TheAlgorithms/Python/blob/master/maths/fermat_little_theorem.py)
+ * [Fibonacci](https://github.com/TheAlgorithms/Python/blob/master/maths/fibonacci.py)
+ * [Fibonacci Sequence Recursion](https://github.com/TheAlgorithms/Python/blob/master/maths/fibonacci_sequence_recursion.py)
+ * [Find Max](https://github.com/TheAlgorithms/Python/blob/master/maths/find_max.py)
+ * [Find Max Recursion](https://github.com/TheAlgorithms/Python/blob/master/maths/find_max_recursion.py)
+ * [Find Min](https://github.com/TheAlgorithms/Python/blob/master/maths/find_min.py)
+ * [Find Min Recursion](https://github.com/TheAlgorithms/Python/blob/master/maths/find_min_recursion.py)
+ * [Floor](https://github.com/TheAlgorithms/Python/blob/master/maths/floor.py)
+ * [Gamma](https://github.com/TheAlgorithms/Python/blob/master/maths/gamma.py)
+ * [Gaussian](https://github.com/TheAlgorithms/Python/blob/master/maths/gaussian.py)
+ * [Greatest Common Divisor](https://github.com/TheAlgorithms/Python/blob/master/maths/greatest_common_divisor.py)
+ * [Hardy Ramanujanalgo](https://github.com/TheAlgorithms/Python/blob/master/maths/hardy_ramanujanalgo.py)
+ * [Is Square Free](https://github.com/TheAlgorithms/Python/blob/master/maths/is_square_free.py)
+ * [Jaccard Similarity](https://github.com/TheAlgorithms/Python/blob/master/maths/jaccard_similarity.py)
+ * [Kadanes](https://github.com/TheAlgorithms/Python/blob/master/maths/kadanes.py)
+ * [Karatsuba](https://github.com/TheAlgorithms/Python/blob/master/maths/karatsuba.py)
+ * [Krishnamurthy Number](https://github.com/TheAlgorithms/Python/blob/master/maths/krishnamurthy_number.py)
+ * [Kth Lexicographic Permutation](https://github.com/TheAlgorithms/Python/blob/master/maths/kth_lexicographic_permutation.py)
+ * [Largest Of Very Large Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/largest_of_very_large_numbers.py)
+ * [Least Common Multiple](https://github.com/TheAlgorithms/Python/blob/master/maths/least_common_multiple.py)
+ * [Line Length](https://github.com/TheAlgorithms/Python/blob/master/maths/line_length.py)
+ * [Lucas Lehmer Primality Test](https://github.com/TheAlgorithms/Python/blob/master/maths/lucas_lehmer_primality_test.py)
+ * [Lucas Series](https://github.com/TheAlgorithms/Python/blob/master/maths/lucas_series.py)
+ * [Matrix Exponentiation](https://github.com/TheAlgorithms/Python/blob/master/maths/matrix_exponentiation.py)
+ * [Miller Rabin](https://github.com/TheAlgorithms/Python/blob/master/maths/miller_rabin.py)
+ * [Mobius Function](https://github.com/TheAlgorithms/Python/blob/master/maths/mobius_function.py)
+ * [Modular Exponential](https://github.com/TheAlgorithms/Python/blob/master/maths/modular_exponential.py)
+ * [Monte Carlo](https://github.com/TheAlgorithms/Python/blob/master/maths/monte_carlo.py)
+ * [Monte Carlo Dice](https://github.com/TheAlgorithms/Python/blob/master/maths/monte_carlo_dice.py)
+ * [Newton Raphson](https://github.com/TheAlgorithms/Python/blob/master/maths/newton_raphson.py)
+ * [Number Of Digits](https://github.com/TheAlgorithms/Python/blob/master/maths/number_of_digits.py)
+ * [Numerical Integration](https://github.com/TheAlgorithms/Python/blob/master/maths/numerical_integration.py)
+ * [Perfect Cube](https://github.com/TheAlgorithms/Python/blob/master/maths/perfect_cube.py)
+ * [Perfect Number](https://github.com/TheAlgorithms/Python/blob/master/maths/perfect_number.py)
+ * [Perfect Square](https://github.com/TheAlgorithms/Python/blob/master/maths/perfect_square.py)
+ * [Pi Monte Carlo Estimation](https://github.com/TheAlgorithms/Python/blob/master/maths/pi_monte_carlo_estimation.py)
+ * [Polynomial Evaluation](https://github.com/TheAlgorithms/Python/blob/master/maths/polynomial_evaluation.py)
+ * [Power Using Recursion](https://github.com/TheAlgorithms/Python/blob/master/maths/power_using_recursion.py)
+ * [Prime Check](https://github.com/TheAlgorithms/Python/blob/master/maths/prime_check.py)
+ * [Prime Factors](https://github.com/TheAlgorithms/Python/blob/master/maths/prime_factors.py)
+ * [Prime Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/prime_numbers.py)
+ * [Prime Sieve Eratosthenes](https://github.com/TheAlgorithms/Python/blob/master/maths/prime_sieve_eratosthenes.py)
+ * [Pythagoras](https://github.com/TheAlgorithms/Python/blob/master/maths/pythagoras.py)
+ * [Qr Decomposition](https://github.com/TheAlgorithms/Python/blob/master/maths/qr_decomposition.py)
+ * [Quadratic Equations Complex Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/quadratic_equations_complex_numbers.py)
+ * [Radians](https://github.com/TheAlgorithms/Python/blob/master/maths/radians.py)
+ * [Radix2 Fft](https://github.com/TheAlgorithms/Python/blob/master/maths/radix2_fft.py)
+ * [Relu](https://github.com/TheAlgorithms/Python/blob/master/maths/relu.py)
+ * [Runge Kutta](https://github.com/TheAlgorithms/Python/blob/master/maths/runge_kutta.py)
+ * [Segmented Sieve](https://github.com/TheAlgorithms/Python/blob/master/maths/segmented_sieve.py)
+ * Series
+ * [Geometric Series](https://github.com/TheAlgorithms/Python/blob/master/maths/series/geometric_series.py)
+ * [Harmonic Series](https://github.com/TheAlgorithms/Python/blob/master/maths/series/harmonic_series.py)
+ * [P Series](https://github.com/TheAlgorithms/Python/blob/master/maths/series/p_series.py)
+ * [Sieve Of Eratosthenes](https://github.com/TheAlgorithms/Python/blob/master/maths/sieve_of_eratosthenes.py)
+ * [Sigmoid](https://github.com/TheAlgorithms/Python/blob/master/maths/sigmoid.py)
+ * [Simpson Rule](https://github.com/TheAlgorithms/Python/blob/master/maths/simpson_rule.py)
+ * [Softmax](https://github.com/TheAlgorithms/Python/blob/master/maths/softmax.py)
+ * [Square Root](https://github.com/TheAlgorithms/Python/blob/master/maths/square_root.py)
+ * [Sum Of Arithmetic Series](https://github.com/TheAlgorithms/Python/blob/master/maths/sum_of_arithmetic_series.py)
+ * [Sum Of Digits](https://github.com/TheAlgorithms/Python/blob/master/maths/sum_of_digits.py)
+ * [Sum Of Geometric Progression](https://github.com/TheAlgorithms/Python/blob/master/maths/sum_of_geometric_progression.py)
+ * [Test Prime Check](https://github.com/TheAlgorithms/Python/blob/master/maths/test_prime_check.py)
+ * [Trapezoidal Rule](https://github.com/TheAlgorithms/Python/blob/master/maths/trapezoidal_rule.py)
+ * [Ugly Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/ugly_numbers.py)
+ * [Volume](https://github.com/TheAlgorithms/Python/blob/master/maths/volume.py)
+ * [Zellers Congruence](https://github.com/TheAlgorithms/Python/blob/master/maths/zellers_congruence.py)
+
+## Matrix
+ * [Count Islands In Matrix](https://github.com/TheAlgorithms/Python/blob/master/matrix/count_islands_in_matrix.py)
+ * [Inverse Of Matrix](https://github.com/TheAlgorithms/Python/blob/master/matrix/inverse_of_matrix.py)
+ * [Matrix Class](https://github.com/TheAlgorithms/Python/blob/master/matrix/matrix_class.py)
+ * [Matrix Operation](https://github.com/TheAlgorithms/Python/blob/master/matrix/matrix_operation.py)
+ * [Nth Fibonacci Using Matrix Exponentiation](https://github.com/TheAlgorithms/Python/blob/master/matrix/nth_fibonacci_using_matrix_exponentiation.py)
+ * [Rotate Matrix](https://github.com/TheAlgorithms/Python/blob/master/matrix/rotate_matrix.py)
+ * [Searching In Sorted Matrix](https://github.com/TheAlgorithms/Python/blob/master/matrix/searching_in_sorted_matrix.py)
+ * [Sherman Morrison](https://github.com/TheAlgorithms/Python/blob/master/matrix/sherman_morrison.py)
+ * [Spiral Print](https://github.com/TheAlgorithms/Python/blob/master/matrix/spiral_print.py)
+ * Tests
+ * [Test Matrix Operation](https://github.com/TheAlgorithms/Python/blob/master/matrix/tests/test_matrix_operation.py)
+
+## Networking Flow
+ * [Ford Fulkerson](https://github.com/TheAlgorithms/Python/blob/master/networking_flow/ford_fulkerson.py)
+ * [Minimum Cut](https://github.com/TheAlgorithms/Python/blob/master/networking_flow/minimum_cut.py)
+
+## Neural Network
+ * [2 Hidden Layers Neural Network](https://github.com/TheAlgorithms/Python/blob/master/neural_network/2_hidden_layers_neural_network.py)
+ * [Back Propagation Neural Network](https://github.com/TheAlgorithms/Python/blob/master/neural_network/back_propagation_neural_network.py)
+ * [Convolution Neural Network](https://github.com/TheAlgorithms/Python/blob/master/neural_network/convolution_neural_network.py)
+ * [Perceptron](https://github.com/TheAlgorithms/Python/blob/master/neural_network/perceptron.py)
+
+## Other
+ * [Activity Selection](https://github.com/TheAlgorithms/Python/blob/master/other/activity_selection.py)
+ * [Anagrams](https://github.com/TheAlgorithms/Python/blob/master/other/anagrams.py)
+ * [Autocomplete Using Trie](https://github.com/TheAlgorithms/Python/blob/master/other/autocomplete_using_trie.py)
+ * [Binary Exponentiation](https://github.com/TheAlgorithms/Python/blob/master/other/binary_exponentiation.py)
+ * [Binary Exponentiation 2](https://github.com/TheAlgorithms/Python/blob/master/other/binary_exponentiation_2.py)
+ * [Davis–Putnam–Logemann–Loveland](https://github.com/TheAlgorithms/Python/blob/master/other/davis–putnam–logemann–loveland.py)
+ * [Detecting English Programmatically](https://github.com/TheAlgorithms/Python/blob/master/other/detecting_english_programmatically.py)
+ * [Dijkstra Bankers Algorithm](https://github.com/TheAlgorithms/Python/blob/master/other/dijkstra_bankers_algorithm.py)
+ * [Doomsday](https://github.com/TheAlgorithms/Python/blob/master/other/doomsday.py)
+ * [Euclidean Gcd](https://github.com/TheAlgorithms/Python/blob/master/other/euclidean_gcd.py)
+ * [Fischer Yates Shuffle](https://github.com/TheAlgorithms/Python/blob/master/other/fischer_yates_shuffle.py)
+ * [Frequency Finder](https://github.com/TheAlgorithms/Python/blob/master/other/frequency_finder.py)
+ * [Game Of Life](https://github.com/TheAlgorithms/Python/blob/master/other/game_of_life.py)
+ * [Gauss Easter](https://github.com/TheAlgorithms/Python/blob/master/other/gauss_easter.py)
+ * [Greedy](https://github.com/TheAlgorithms/Python/blob/master/other/greedy.py)
+ * [Integeration By Simpson Approx](https://github.com/TheAlgorithms/Python/blob/master/other/integeration_by_simpson_approx.py)
+ * [Largest Subarray Sum](https://github.com/TheAlgorithms/Python/blob/master/other/largest_subarray_sum.py)
+ * [Least Recently Used](https://github.com/TheAlgorithms/Python/blob/master/other/least_recently_used.py)
+ * [Lfu Cache](https://github.com/TheAlgorithms/Python/blob/master/other/lfu_cache.py)
+ * [Linear Congruential Generator](https://github.com/TheAlgorithms/Python/blob/master/other/linear_congruential_generator.py)
+ * [Lru Cache](https://github.com/TheAlgorithms/Python/blob/master/other/lru_cache.py)
+ * [Magicdiamondpattern](https://github.com/TheAlgorithms/Python/blob/master/other/magicdiamondpattern.py)
+ * [Markov Chain](https://github.com/TheAlgorithms/Python/blob/master/other/markov_chain.py)
+ * [Max Sum Sliding Window](https://github.com/TheAlgorithms/Python/blob/master/other/max_sum_sliding_window.py)
+ * [Median Of Two Arrays](https://github.com/TheAlgorithms/Python/blob/master/other/median_of_two_arrays.py)
+ * [Nested Brackets](https://github.com/TheAlgorithms/Python/blob/master/other/nested_brackets.py)
+ * [Palindrome](https://github.com/TheAlgorithms/Python/blob/master/other/palindrome.py)
+ * [Password Generator](https://github.com/TheAlgorithms/Python/blob/master/other/password_generator.py)
+ * [Primelib](https://github.com/TheAlgorithms/Python/blob/master/other/primelib.py)
+ * [Scoring Algorithm](https://github.com/TheAlgorithms/Python/blob/master/other/scoring_algorithm.py)
+ * [Sdes](https://github.com/TheAlgorithms/Python/blob/master/other/sdes.py)
+ * [Sierpinski Triangle](https://github.com/TheAlgorithms/Python/blob/master/other/sierpinski_triangle.py)
+ * [Tower Of Hanoi](https://github.com/TheAlgorithms/Python/blob/master/other/tower_of_hanoi.py)
+ * [Triplet Sum](https://github.com/TheAlgorithms/Python/blob/master/other/triplet_sum.py)
+ * [Two Pointer](https://github.com/TheAlgorithms/Python/blob/master/other/two_pointer.py)
+ * [Two Sum](https://github.com/TheAlgorithms/Python/blob/master/other/two_sum.py)
+ * [Word Patterns](https://github.com/TheAlgorithms/Python/blob/master/other/word_patterns.py)
+
+## Project Euler
+ * Problem 001
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_001/sol1.py)
+ * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_001/sol2.py)
+ * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_001/sol3.py)
+ * [Sol4](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_001/sol4.py)
+ * [Sol5](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_001/sol5.py)
+ * [Sol6](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_001/sol6.py)
+ * [Sol7](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_001/sol7.py)
+ * Problem 002
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_002/sol1.py)
+ * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_002/sol2.py)
+ * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_002/sol3.py)
+ * [Sol4](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_002/sol4.py)
+ * [Sol5](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_002/sol5.py)
+ * Problem 003
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_003/sol1.py)
+ * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_003/sol2.py)
+ * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_003/sol3.py)
+ * Problem 004
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_004/sol1.py)
+ * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_004/sol2.py)
+ * Problem 005
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_005/sol1.py)
+ * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_005/sol2.py)
+ * Problem 006
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_006/sol1.py)
+ * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_006/sol2.py)
+ * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_006/sol3.py)
+ * [Sol4](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_006/sol4.py)
+ * Problem 007
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_007/sol1.py)
+ * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_007/sol2.py)
+ * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_007/sol3.py)
+ * Problem 008
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_008/sol1.py)
+ * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_008/sol2.py)
+ * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_008/sol3.py)
+ * Problem 009
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_009/sol1.py)
+ * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_009/sol2.py)
+ * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_009/sol3.py)
+ * Problem 010
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_010/sol1.py)
+ * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_010/sol2.py)
+ * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_010/sol3.py)
+ * Problem 011
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_011/sol1.py)
+ * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_011/sol2.py)
+ * Problem 012
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_012/sol1.py)
+ * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_012/sol2.py)
+ * Problem 013
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_013/sol1.py)
+ * Problem 014
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_014/sol1.py)
+ * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_014/sol2.py)
+ * Problem 015
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_015/sol1.py)
+ * Problem 016
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_016/sol1.py)
+ * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_016/sol2.py)
+ * Problem 017
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_017/sol1.py)
+ * Problem 018
+ * [Solution](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_018/solution.py)
+ * Problem 019
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_019/sol1.py)
+ * Problem 020
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_020/sol1.py)
+ * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_020/sol2.py)
+ * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_020/sol3.py)
+ * [Sol4](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_020/sol4.py)
+ * Problem 021
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_021/sol1.py)
+ * Problem 022
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_022/sol1.py)
+ * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_022/sol2.py)
+ * Problem 023
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_023/sol1.py)
+ * Problem 024
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_024/sol1.py)
+ * Problem 025
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_025/sol1.py)
+ * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_025/sol2.py)
+ * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_025/sol3.py)
+ * Problem 026
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_026/sol1.py)
+ * Problem 027
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_027/sol1.py)
+ * Problem 028
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_028/sol1.py)
+ * Problem 029
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_029/sol1.py)
+ * Problem 030
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_030/sol1.py)
+ * Problem 031
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_031/sol1.py)
+ * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_031/sol2.py)
+ * Problem 032
+ * [Sol32](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_032/sol32.py)
+ * Problem 033
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_033/sol1.py)
+ * Problem 034
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_034/sol1.py)
+ * Problem 035
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_035/sol1.py)
+ * Problem 036
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_036/sol1.py)
+ * Problem 037
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_037/sol1.py)
+ * Problem 038
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_038/sol1.py)
+ * Problem 039
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_039/sol1.py)
+ * Problem 040
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_040/sol1.py)
+ * Problem 041
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_041/sol1.py)
+ * Problem 042
+ * [Solution42](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_042/solution42.py)
+ * Problem 043
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_043/sol1.py)
+ * Problem 044
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_044/sol1.py)
+ * Problem 045
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_045/sol1.py)
+ * Problem 046
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_046/sol1.py)
+ * Problem 047
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_047/sol1.py)
+ * Problem 048
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_048/sol1.py)
+ * Problem 049
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_049/sol1.py)
+ * Problem 050
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_050/sol1.py)
+ * Problem 051
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_051/sol1.py)
+ * Problem 052
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_052/sol1.py)
+ * Problem 053
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_053/sol1.py)
+ * Problem 054
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_054/sol1.py)
+ * [Test Poker Hand](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_054/test_poker_hand.py)
+ * Problem 055
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_055/sol1.py)
+ * Problem 056
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_056/sol1.py)
+ * Problem 057
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_057/sol1.py)
+ * Problem 058
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_058/sol1.py)
+ * Problem 059
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_059/sol1.py)
+ * Problem 062
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_062/sol1.py)
+ * Problem 063
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_063/sol1.py)
+ * Problem 064
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_064/sol1.py)
+ * Problem 065
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_065/sol1.py)
+ * Problem 067
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_067/sol1.py)
+ * Problem 069
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_069/sol1.py)
+ * Problem 070
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_070/sol1.py)
+ * Problem 071
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_071/sol1.py)
+ * Problem 072
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_072/sol1.py)
+ * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_072/sol2.py)
+ * Problem 074
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_074/sol1.py)
+ * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_074/sol2.py)
+ * Problem 075
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_075/sol1.py)
+ * Problem 076
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_076/sol1.py)
+ * Problem 077
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_077/sol1.py)
+ * Problem 080
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_080/sol1.py)
+ * Problem 081
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_081/sol1.py)
+ * Problem 085
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_085/sol1.py)
+ * Problem 086
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_086/sol1.py)
+ * Problem 087
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_087/sol1.py)
+ * Problem 089
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_089/sol1.py)
+ * Problem 091
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_091/sol1.py)
+ * Problem 097
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_097/sol1.py)
+ * Problem 099
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_099/sol1.py)
+ * Problem 101
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_101/sol1.py)
+ * Problem 102
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_102/sol1.py)
+ * Problem 107
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_107/sol1.py)
+ * Problem 112
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_112/sol1.py)
+ * Problem 113
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_113/sol1.py)
+ * Problem 119
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_119/sol1.py)
+ * Problem 120
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_120/sol1.py)
+ * Problem 123
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_123/sol1.py)
+ * Problem 125
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_125/sol1.py)
+ * Problem 129
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_129/sol1.py)
+ * Problem 135
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_135/sol1.py)
+ * Problem 173
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_173/sol1.py)
+ * Problem 174
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_174/sol1.py)
+ * Problem 180
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_180/sol1.py)
+ * Problem 188
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_188/sol1.py)
+ * Problem 191
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_191/sol1.py)
+ * Problem 203
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_203/sol1.py)
+ * Problem 206
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_206/sol1.py)
+ * Problem 207
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_207/sol1.py)
+ * Problem 234
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_234/sol1.py)
+ * Problem 301
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_301/sol1.py)
+ * Problem 551
+ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_551/sol1.py)
+
+## Quantum
+ * [Deutsch Jozsa](https://github.com/TheAlgorithms/Python/blob/master/quantum/deutsch_jozsa.py)
+ * [Half Adder](https://github.com/TheAlgorithms/Python/blob/master/quantum/half_adder.py)
+ * [Not Gate](https://github.com/TheAlgorithms/Python/blob/master/quantum/not_gate.py)
+ * [Quantum Entanglement](https://github.com/TheAlgorithms/Python/blob/master/quantum/quantum_entanglement.py)
+ * [Ripple Adder Classic](https://github.com/TheAlgorithms/Python/blob/master/quantum/ripple_adder_classic.py)
+ * [Single Qubit Measure](https://github.com/TheAlgorithms/Python/blob/master/quantum/single_qubit_measure.py)
+
+## Scheduling
+ * [First Come First Served](https://github.com/TheAlgorithms/Python/blob/master/scheduling/first_come_first_served.py)
+ * [Round Robin](https://github.com/TheAlgorithms/Python/blob/master/scheduling/round_robin.py)
+ * [Shortest Job First](https://github.com/TheAlgorithms/Python/blob/master/scheduling/shortest_job_first.py)
+
+## Searches
+ * [Binary Search](https://github.com/TheAlgorithms/Python/blob/master/searches/binary_search.py)
+ * [Double Linear Search](https://github.com/TheAlgorithms/Python/blob/master/searches/double_linear_search.py)
+ * [Double Linear Search Recursion](https://github.com/TheAlgorithms/Python/blob/master/searches/double_linear_search_recursion.py)
+ * [Fibonacci Search](https://github.com/TheAlgorithms/Python/blob/master/searches/fibonacci_search.py)
+ * [Hill Climbing](https://github.com/TheAlgorithms/Python/blob/master/searches/hill_climbing.py)
+ * [Interpolation Search](https://github.com/TheAlgorithms/Python/blob/master/searches/interpolation_search.py)
+ * [Jump Search](https://github.com/TheAlgorithms/Python/blob/master/searches/jump_search.py)
+ * [Linear Search](https://github.com/TheAlgorithms/Python/blob/master/searches/linear_search.py)
+ * [Quick Select](https://github.com/TheAlgorithms/Python/blob/master/searches/quick_select.py)
+ * [Sentinel Linear Search](https://github.com/TheAlgorithms/Python/blob/master/searches/sentinel_linear_search.py)
+ * [Simple Binary Search](https://github.com/TheAlgorithms/Python/blob/master/searches/simple_binary_search.py)
+ * [Simulated Annealing](https://github.com/TheAlgorithms/Python/blob/master/searches/simulated_annealing.py)
+ * [Tabu Search](https://github.com/TheAlgorithms/Python/blob/master/searches/tabu_search.py)
+ * [Ternary Search](https://github.com/TheAlgorithms/Python/blob/master/searches/ternary_search.py)
+
+## Sorts
+ * [Bead Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/bead_sort.py)
+ * [Bitonic Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/bitonic_sort.py)
+ * [Bogo Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/bogo_sort.py)
+ * [Bubble Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/bubble_sort.py)
+ * [Bucket Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/bucket_sort.py)
+ * [Cocktail Shaker Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/cocktail_shaker_sort.py)
+ * [Comb Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/comb_sort.py)
+ * [Counting Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/counting_sort.py)
+ * [Cycle Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/cycle_sort.py)
+ * [Double Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/double_sort.py)
+ * [External Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/external_sort.py)
+ * [Gnome Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/gnome_sort.py)
+ * [Heap Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/heap_sort.py)
+ * [Insertion Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/insertion_sort.py)
+ * [Intro Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/intro_sort.py)
+ * [Iterative Merge Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/iterative_merge_sort.py)
+ * [Merge Insertion Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/merge_insertion_sort.py)
+ * [Merge Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/merge_sort.py)
+ * [Natural Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/natural_sort.py)
+ * [Odd Even Transposition Parallel](https://github.com/TheAlgorithms/Python/blob/master/sorts/odd_even_transposition_parallel.py)
+ * [Odd Even Transposition Single Threaded](https://github.com/TheAlgorithms/Python/blob/master/sorts/odd_even_transposition_single_threaded.py)
+ * [Pancake Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/pancake_sort.py)
+ * [Patience Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/patience_sort.py)
+ * [Pigeon Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/pigeon_sort.py)
+ * [Pigeonhole Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/pigeonhole_sort.py)
+ * [Quick Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/quick_sort.py)
+ * [Quick Sort 3 Partition](https://github.com/TheAlgorithms/Python/blob/master/sorts/quick_sort_3_partition.py)
+ * [Radix Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/radix_sort.py)
+ * [Random Normal Distribution Quicksort](https://github.com/TheAlgorithms/Python/blob/master/sorts/random_normal_distribution_quicksort.py)
+ * [Random Pivot Quick Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/random_pivot_quick_sort.py)
+ * [Recursive Bubble Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/recursive_bubble_sort.py)
+ * [Recursive Insertion Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/recursive_insertion_sort.py)
+ * [Recursive Quick Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/recursive_quick_sort.py)
+ * [Selection Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/selection_sort.py)
+ * [Shell Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/shell_sort.py)
+ * [Stooge Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/stooge_sort.py)
+ * [Strand Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/strand_sort.py)
+ * [Tim Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/tim_sort.py)
+ * [Topological Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/topological_sort.py)
+ * [Tree Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/tree_sort.py)
+ * [Unknown Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/unknown_sort.py)
+ * [Wiggle Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/wiggle_sort.py)
+
+## Strings
+ * [Aho Corasick](https://github.com/TheAlgorithms/Python/blob/master/strings/aho_corasick.py)
+ * [Boyer Moore Search](https://github.com/TheAlgorithms/Python/blob/master/strings/boyer_moore_search.py)
+ * [Can String Be Rearranged As Palindrome](https://github.com/TheAlgorithms/Python/blob/master/strings/can_string_be_rearranged_as_palindrome.py)
+ * [Capitalize](https://github.com/TheAlgorithms/Python/blob/master/strings/capitalize.py)
+ * [Check Anagrams](https://github.com/TheAlgorithms/Python/blob/master/strings/check_anagrams.py)
+ * [Check Pangram](https://github.com/TheAlgorithms/Python/blob/master/strings/check_pangram.py)
+ * [Is Palindrome](https://github.com/TheAlgorithms/Python/blob/master/strings/is_palindrome.py)
+ * [Jaro Winkler](https://github.com/TheAlgorithms/Python/blob/master/strings/jaro_winkler.py)
+ * [Knuth Morris Pratt](https://github.com/TheAlgorithms/Python/blob/master/strings/knuth_morris_pratt.py)
+ * [Levenshtein Distance](https://github.com/TheAlgorithms/Python/blob/master/strings/levenshtein_distance.py)
+ * [Lower](https://github.com/TheAlgorithms/Python/blob/master/strings/lower.py)
+ * [Manacher](https://github.com/TheAlgorithms/Python/blob/master/strings/manacher.py)
+ * [Min Cost String Conversion](https://github.com/TheAlgorithms/Python/blob/master/strings/min_cost_string_conversion.py)
+ * [Naive String Search](https://github.com/TheAlgorithms/Python/blob/master/strings/naive_string_search.py)
+ * [Prefix Function](https://github.com/TheAlgorithms/Python/blob/master/strings/prefix_function.py)
+ * [Rabin Karp](https://github.com/TheAlgorithms/Python/blob/master/strings/rabin_karp.py)
+ * [Remove Duplicate](https://github.com/TheAlgorithms/Python/blob/master/strings/remove_duplicate.py)
+ * [Reverse Letters](https://github.com/TheAlgorithms/Python/blob/master/strings/reverse_letters.py)
+ * [Reverse Words](https://github.com/TheAlgorithms/Python/blob/master/strings/reverse_words.py)
+ * [Split](https://github.com/TheAlgorithms/Python/blob/master/strings/split.py)
+ * [Swap Case](https://github.com/TheAlgorithms/Python/blob/master/strings/swap_case.py)
+ * [Upper](https://github.com/TheAlgorithms/Python/blob/master/strings/upper.py)
+ * [Word Occurrence](https://github.com/TheAlgorithms/Python/blob/master/strings/word_occurrence.py)
+ * [Z Function](https://github.com/TheAlgorithms/Python/blob/master/strings/z_function.py)
+
+## Traversals
+ * [Binary Tree Traversals](https://github.com/TheAlgorithms/Python/blob/master/traversals/binary_tree_traversals.py)
+
+## Web Programming
+ * [Co2 Emission](https://github.com/TheAlgorithms/Python/blob/master/web_programming/co2_emission.py)
+ * [Covid Stats Via Xpath](https://github.com/TheAlgorithms/Python/blob/master/web_programming/covid_stats_via_xpath.py)
+ * [Crawl Google Results](https://github.com/TheAlgorithms/Python/blob/master/web_programming/crawl_google_results.py)
+ * [Crawl Google Scholar Citation](https://github.com/TheAlgorithms/Python/blob/master/web_programming/crawl_google_scholar_citation.py)
+ * [Currency Converter](https://github.com/TheAlgorithms/Python/blob/master/web_programming/currency_converter.py)
+ * [Current Stock Price](https://github.com/TheAlgorithms/Python/blob/master/web_programming/current_stock_price.py)
+ * [Current Weather](https://github.com/TheAlgorithms/Python/blob/master/web_programming/current_weather.py)
+ * [Daily Horoscope](https://github.com/TheAlgorithms/Python/blob/master/web_programming/daily_horoscope.py)
+ * [Emails From Url](https://github.com/TheAlgorithms/Python/blob/master/web_programming/emails_from_url.py)
+ * [Fetch Bbc News](https://github.com/TheAlgorithms/Python/blob/master/web_programming/fetch_bbc_news.py)
+ * [Fetch Github Info](https://github.com/TheAlgorithms/Python/blob/master/web_programming/fetch_github_info.py)
+ * [Fetch Jobs](https://github.com/TheAlgorithms/Python/blob/master/web_programming/fetch_jobs.py)
+ * [Get Imdb Top 250 Movies Csv](https://github.com/TheAlgorithms/Python/blob/master/web_programming/get_imdb_top_250_movies_csv.py)
+ * [Get Imdbtop](https://github.com/TheAlgorithms/Python/blob/master/web_programming/get_imdbtop.py)
+ * [Instagram Crawler](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_crawler.py)
+ * [Instagram Pic](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_pic.py)
+ * [Instagram Video](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_video.py)
+ * [Recaptcha Verification](https://github.com/TheAlgorithms/Python/blob/master/web_programming/recaptcha_verification.py)
+ * [Slack Message](https://github.com/TheAlgorithms/Python/blob/master/web_programming/slack_message.py)
+ * [Test Fetch Github Info](https://github.com/TheAlgorithms/Python/blob/master/web_programming/test_fetch_github_info.py)
+ * [World Covid19 Stats](https://github.com/TheAlgorithms/Python/blob/master/web_programming/world_covid19_stats.py)
diff --git a/License b/LICENSE.md
similarity index 96%
rename from License
rename to LICENSE.md
index c84ae570c084..3b7951527ab3 100644
--- a/License
+++ b/LICENSE.md
@@ -1,6 +1,6 @@
MIT License
-Copyright (c) 2016 The Algorithms
+Copyright (c) 2020 The Algorithms
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/README.md b/README.md
index 1e43deb6bdef..f81031b53ebb 100644
--- a/README.md
+++ b/README.md
@@ -1,355 +1,27 @@
-# The Algorithms - Python
-[](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=JP3BLXA6KMDGW)
-
+# The Algorithms - Python
+[](https://gitpod.io/#https://github.com/TheAlgorithms/Python)
+[](https://gitter.im/TheAlgorithms)
+[](https://github.com/TheAlgorithms/Python/actions)
+[](https://lgtm.com/projects/g/TheAlgorithms/Python/alerts)
+[](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md)
+[](https://www.paypal.me/TheAlgorithms/100)
+
+[](https://github.com/pre-commit/pre-commit)
+[](https://github.com/psf/black)
+
### All algorithms implemented in Python (for education)
-These implementations are for demonstration purposes. They are less efficient than the implementations in the Python standard library.
-
-## Sorting Algorithms
-
-
-### Bubble Sort
-![alt text][bubble-image]
-
-**Bubble sort**, sometimes referred to as *sinking sort*, is a simple sorting algorithm that repeatedly steps through the list to be sorted, compares each pair of adjacent items and swaps them if they are in the wrong order. The pass through the list is repeated until no swaps are needed, which indicates that the list is sorted.
-
-__Properties__
-* Worst case performance O(n2)
-* Best case performance O(n)
-* Average case performance O(n2)
-
-###### Source: [Wikipedia][bubble-wiki]
-###### View the algorithm in [action][bubble-toptal]
-
-### Bucket
-![alt text][bucket-image-1]
-![alt text][bucket-image-2]
-
-**Bucket sort**, or _bin sort_, is a sorting algorithm that works by distributing the elements of an array into a number of buckets. Each bucket is then sorted individually, either using a different sorting algorithm, or by recursively applying the bucket sorting algorithm.
-
-__Properties__
-* Worst case performance O(n2)
-* Best case performance O(n+k)
-* Average case performance O(n+k)
-
-###### Source: [Wikipedia][bucket-wiki]
-
-
-### Cocktail shaker
-![alt text][cocktail-shaker-image]
-
-**Cocktail shaker sort**, also known as _bidirectional bubble sort_, _cocktail sort_, _shaker sort_ (which can also refer to a variant of _selection sort_), _ripple sort_, _shuffle sort_, or _shuttle sort_, is a variation of bubble sort that is both a stable sorting algorithm and a comparison sort. The algorithm differs from a bubble sort in that it sorts in both directions on each pass through the list.
-
-__Properties__
-* Worst case performance O(n2)
-* Best case performance O(n)
-* Average case performance O(n2)
-
-###### Source: [Wikipedia][cocktail-shaker-wiki]
-
-
-### Insertion Sort
-![alt text][insertion-image]
-
-**Insertion sort** is a simple sorting algorithm that builds the final sorted array (or list) one item at a time. It is much less efficient on *large* lists than more advanced algorithms such as quicksort, heapsort, or merge sort.
-
-__Properties__
-* Worst case performance O(n2)
-* Best case performance O(n)
-* Average case performance O(n2)
-
-###### Source: [Wikipedia][insertion-wiki]
-###### View the algorithm in [action][insertion-toptal]
-
-
-### Merge Sort
-![alt text][merge-image]
-
-**Merge sort** (also commonly spelled *mergesort*) is an efficient, general-purpose, comparison-based sorting algorithm. Most implementations produce a stable sort, which means that the implementation preserves the input order of equal elements in the sorted output. Mergesort is a divide and conquer algorithm that was invented by John von Neumann in 1945.
-
-__Properties__
-* Worst case performance O(n log n)
-* Best case performance O(n log n)
-* Average case performance O(n log n)
-
-###### Source: [Wikipedia][merge-wiki]
-###### View the algorithm in [action][merge-toptal]
-
-### Quick
-![alt text][quick-image]
-
-**Quicksort** (sometimes called *partition-exchange sort*) is an efficient sorting algorithm, serving as a systematic method for placing the elements of an array in order.
-
-__Properties__
-* Worst case performance O(n2)
-* Best case performance O(*n* log *n*) or O(n) with three-way partition
-* Average case performance O(*n* log *n*)
-
-###### Source: [Wikipedia][quick-wiki]
-###### View the algorithm in [action][quick-toptal]
-
-
-### Heap
-![alt text][heapsort-image]
-
-**Heapsort** is a _comparison-based_ sorting algorithm. It can be thought of as an improved selection sort. It divides its input into a sorted and an unsorted region, and it iteratively shrinks the unsorted region by extracting the largest element and moving that to the sorted region.
-
-__Properties__
-* Worst case performance O(*n* log *n*)
-* Best case performance O(*n* log *n*)
-* Average case performance O(*n* log *n*)
-
-###### Source: [Wikipedia][heap-wiki]
-###### View the algorithm in [action](https://www.toptal.com/developers/sorting-algorithms/heap-sort)
-
-
-### Radix
-
-From [Wikipedia][radix-wiki]: Radix sort is a non-comparative integer sorting algorithm that sorts data with integer keys by grouping keys by the individual digits which share the same significant position and value.
-
-__Properties__
-* Worst case performance O(wn)
-* Best case performance O(wn)
-* Average case performance O(wn)
-
-###### Source: [Wikipedia][radix-wiki]
-
-
-### Selection
-![alt text][selection-image]
-
-**Selection sort** is an algorithm that divides the input list into two parts: the sublist of items already sorted, which is built up from left to right at the front (left) of the list, and the sublist of items remaining to be sorted that occupy the rest of the list. Initially, the sorted sublist is empty and the unsorted sublist is the entire input list. The algorithm proceeds by finding the smallest (or largest, depending on sorting order) element in the unsorted sublist, exchanging (swapping) it with the leftmost unsorted element (putting it in sorted order), and moving the sublist boundaries one element to the right.
-
-__Properties__
-* Worst case performance O(n2)
-* Best case performance O(n2)
-* Average case performance O(n2)
-
-###### Source: [Wikipedia][selection-wiki]
-###### View the algorithm in [action][selection-toptal]
-
-
-### Shell
-![alt text][shell-image]
-
-**Shellsort** is a generalization of *insertion sort* that allows the exchange of items that are far apart. The idea is to arrange the list of elements so that, starting anywhere, considering every nth element gives a sorted list. Such a list is said to be h-sorted. Equivalently, it can be thought of as h interleaved lists, each individually sorted.
-
-__Properties__
-* Worst case performance O(*n*log2*n*)
-* Best case performance O(*n* log *n*)
-* Average case performance depends on gap sequence
-
-###### Source: [Wikipedia][shell-wiki]
-###### View the algorithm in [action][shell-toptal]
-
-
-### Topological
-
-From [Wikipedia][topological-wiki]: **Topological sort**, or _topological ordering of a directed graph_ is a linear ordering of its vertices such that for every directed edge _uv_ from vertex _u_ to vertex _v_, _u_ comes before _v_ in the ordering. For instance, the vertices of the graph may represent tasks to be performed, and the edges may represent constraints that one task must be performed before another; in this application, a topological ordering is just a valid sequence for the tasks. A topological ordering is possible if and only if the graph has no directed cycles, that is, if it is a _directed acyclic graph_ (DAG). Any DAG has at least one topological ordering, and algorithms are known for constructing a topological ordering of any DAG in linear time.
-
-### Time-Complexity Graphs
-
-Comparing the complexity of sorting algorithms (*Bubble Sort*, *Insertion Sort*, *Selection Sort*)
-
-
-
-Comparing the sorting algorithms:
- -Quicksort is a very fast algorithm but can be pretty tricky to implement
- -Bubble sort is a slow algorithm but is very easy to implement. To sort small sets of data, bubble sort may be a better option since it can be implemented quickly, but for larger datasets, the speedup from quicksort might be worth the trouble implementing the algorithm.
-
-----------------------------------------------------------------------------------
-
-## Search Algorithms
-
-### Linear
-![alt text][linear-image]
-
-**Linear search** or *sequential search* is a method for finding a target value within a list. It sequentially checks each element of the list for the target value until a match is found or until all the elements have been searched. Linear search runs in at worst linear time and makes at most n comparisons, where n is the length of the list.
-
-__Properties__
-* Worst case performance O(n)
-* Best case performance O(1)
-* Average case performance O(n)
-* Worst case space complexity O(1) iterative
-
-###### Source: [Wikipedia][linear-wiki]
-
-
-### Binary
-![alt text][binary-image]
-
-**Binary search**, also known as *half-interval search* or *logarithmic search*, is a search algorithm that finds the position of a target value within a sorted array. It compares the target value to the middle element of the array; if they are unequal, the half in which the target cannot lie is eliminated and the search continues on the remaining half until it is successful.
-
-__Properties__
-* Worst case performance O(log n)
-* Best case performance O(1)
-* Average case performance O(log n)
-* Worst case space complexity O(1)
-
-###### Source: [Wikipedia][binary-wiki]
-
-
-## Interpolation
-**Interpolation search** is an algorithm for searching for a key in an array that has been ordered by numerical values assigned to the keys (key values). It was first described by W. W. Peterson in 1957. Interpolation search resembles the method by which people search a telephone directory for a name (the key value by which the book's entries are ordered): in each step the algorithm calculates where in the remaining search space the sought item might be, based on the key values at the bounds of the search space and the value of the sought key, usually via a linear interpolation. The key value actually found at this estimated position is then compared to the key value being sought. If it is not equal, then depending on the comparison, the remaining search space is reduced to the part before or after the estimated position. This method will only work if calculations on the size of differences between key values are sensible.
-
-By comparison, binary search always chooses the middle of the remaining search space, discarding one half or the other, depending on the comparison between the key found at the estimated position and the key sought — it does not require numerical values for the keys, just a total order on them. The remaining search space is reduced to the part before or after the estimated position. The linear search uses equality only as it compares elements one-by-one from the start, ignoring any sorting.
-
-On average the interpolation search makes about log(log(n)) comparisons (if the elements are uniformly distributed), where n is the number of elements to be searched. In the worst case (for instance where the numerical values of the keys increase exponentially) it can make up to O(n) comparisons.
-
-In interpolation-sequential search, interpolation is used to find an item near the one being searched for, then linear search is used to find the exact item.
-
-###### Source: [Wikipedia][interpolation-wiki]
-
-
-## Jump Search
-**Jump search** or _block search_ refers to a search algorithm for ordered lists. It works by first checking all items Lkm, where {\displaystyle k\in \mathbb {N} } k\in \mathbb {N} and m is the block size, until an item is found that is larger than the search key. To find the exact position of the search key in the list a linear search is performed on the sublist L[(k-1)m, km].
-
-The optimal value of m is √n, where n is the length of the list L. Because both steps of the algorithm look at, at most, √n items the algorithm runs in O(√n) time. This is better than a linear search, but worse than a binary search. The advantage over the latter is that a jump search only needs to jump backwards once, while a binary can jump backwards up to log n times. This can be important if a jumping backwards takes significantly more time than jumping forward.
-
-The algorithm can be modified by performing multiple levels of jump search on the sublists, before finally performing the linear search. For an k-level jump search the optimum block size ml for the lth level (counting from 1) is n(k-l)/k. The modified algorithm will perform k backward jumps and runs in O(kn1/(k+1)) time.
-
-###### Source: [Wikipedia][jump-wiki]
-
-
-## Quick Select
-![alt text][QuickSelect-image]
-
-**Quick Select** is a selection algorithm to find the kth smallest element in an unordered list. It is related to the quicksort sorting algorithm. Like quicksort, it was developed by Tony Hoare, and thus is also known as Hoare's selection algorithm.[1] Like quicksort, it is efficient in practice and has good average-case performance, but has poor worst-case performance. Quickselect and its variants are the selection algorithms most often used in efficient real-world implementations.
-
-Quickselect uses the same overall approach as quicksort, choosing one element as a pivot and partitioning the data in two based on the pivot, accordingly as less than or greater than the pivot. However, instead of recursing into both sides, as in quicksort, quickselect only recurses into one side – the side with the element it is searching for. This reduces the average complexity from O(n log n) to O(n), with a worst case of O(n2).
-
-As with quicksort, quickselect is generally implemented as an in-place algorithm, and beyond selecting the k'th element, it also partially sorts the data. See selection algorithm for further discussion of the connection with sorting.
-
-###### Source: [Wikipedia][quick-wiki]
-
-
-## Tabu
-**Tabu search** uses a local or neighborhood search procedure to iteratively move from one potential solution {\displaystyle x} x to an improved solution {\displaystyle x'} x' in the neighborhood of {\displaystyle x} x, until some stopping criterion has been satisfied (generally, an attempt limit or a score threshold). Local search procedures often become stuck in poor-scoring areas or areas where scores plateau. In order to avoid these pitfalls and explore regions of the search space that would be left unexplored by other local search procedures, tabu search carefully explores the neighborhood of each solution as the search progresses. The solutions admitted to the new neighborhood, {\displaystyle N^{*}(x)} N^*(x), are determined through the use of memory structures. Using these memory structures, the search progresses by iteratively moving from the current solution {\displaystyle x} x to an improved solution {\displaystyle x'} x' in {\displaystyle N^{*}(x)} N^*(x).
-
-These memory structures form what is known as the tabu list, a set of rules and banned solutions used to filter which solutions will be admitted to the neighborhood {\displaystyle N^{*}(x)} N^*(x) to be explored by the search. In its simplest form, a tabu list is a short-term set of the solutions that have been visited in the recent past (less than {\displaystyle n} n iterations ago, where {\displaystyle n} n is the number of previous solutions to be stored — is also called the tabu tenure). More commonly, a tabu list consists of solutions that have changed by the process of moving from one solution to another. It is convenient, for ease of description, to understand a “solution” to be coded and represented by such attributes.
-
-###### Source: [Wikipedia][tabu-wiki]
-
-----------------------------------------------------------------------------------------------------------------------
-
-## Ciphers
-
-### Caesar
-![alt text][caesar]
-
-**Caesar cipher**, also known as _Caesar's cipher_, the _shift cipher_, _Caesar's code_ or _Caesar shift_, is one of the simplest and most widely known encryption techniques.
-It is **a type of substitution cipher** in which each letter in the plaintext is replaced by a letter some fixed number of positions down the alphabet. For example, with a left shift of 3, D would be replaced by A, E would become B, and so on.
-The method is named after **Julius Caesar**, who used it in his private correspondence.
-The encryption step performed by a Caesar cipher is often incorporated as part of more complex schemes, such as the Vigenère cipher, and still has modern application in the ROT13 system. As with all single-alphabet substitution ciphers, the Caesar cipher is easily broken and in modern practice offers essentially no communication security.
-
-###### Source: [Wikipedia](https://en.wikipedia.org/wiki/Caesar_cipher)
-
-
-### Vigenère
-
-**Vigenère cipher** is a method of encrypting alphabetic text by using a series of **interwoven Caesar ciphers** based on the letters of a keyword. It is **a form of polyalphabetic substitution**.
-The Vigenère cipher has been reinvented many times. The method was originally described by Giovan Battista Bellaso in his 1553 book La cifra del. Sig. Giovan Battista Bellaso; however, the scheme was later misattributed to Blaise de Vigenère in the 19th century, and is now widely known as the "Vigenère cipher".
-Though the cipher is easy to understand and implement, for three centuries it resisted all attempts to break it; this earned it the description **le chiffre indéchiffrable**(French for 'the indecipherable cipher').
-Many people have tried to implement encryption schemes that are essentially Vigenère ciphers. Friedrich Kasiski was the first to publish a general method of deciphering a Vigenère cipher in 1863.
-
-###### Source: [Wikipedia](https://en.wikipedia.org/wiki/Vigen%C3%A8re_cipher)
-
-
-### Transposition
-
-**Transposition cipher** is a method of encryption by which the positions held by units of *plaintext* (which are commonly characters or groups of characters) are shifted according to a regular system, so that the *ciphertext* constitutes a permutation of the plaintext. That is, the order of the units is changed (the plaintext is reordered).
-
-Mathematically a bijective function is used on the characters' positions to encrypt and an inverse function to decrypt.
-
-###### Source: [Wikipedia](https://en.wikipedia.org/wiki/Transposition_cipher)
-
-
-### RSA (Rivest–Shamir–Adleman)
-**RSA** _(Rivest–Shamir–Adleman)_ is one of the first public-key cryptosystems and is widely used for secure data transmission. In such a cryptosystem, the encryption key is public and it is different from the decryption key which is kept secret (private). In RSA, this asymmetry is based on the practical difficulty of the factorization of the product of two large prime numbers, the "factoring problem". The acronym RSA is made of the initial letters of the surnames of Ron Rivest, Adi Shamir, and Leonard Adleman, who first publicly described the algorithm in 1978. Clifford Cocks, an English mathematician working for the British intelligence agency Government Communications Headquarters (GCHQ), had developed an equivalent system in 1973, but this was not declassified until 1997.[1]
-
-A user of RSA creates and then publishes a public key based on two large prime numbers, along with an auxiliary value. The prime numbers must be kept secret. Anyone can use the public key to encrypt a message, but with currently published methods, and if the public key is large enough, only someone with knowledge of the prime numbers can decode the message feasibly.[2] Breaking RSA encryption is known as the RSA problem. Whether it is as difficult as the factoring problem remains an open question.
-
-###### Source: [Wikipedia](https://en.wikipedia.org/wiki/RSA_(cryptosystem))
-
-
-## ROT13
-![alt text][ROT13-image]
-
-**ROT13** ("rotate by 13 places", sometimes hyphenated _ROT-13_) is a simple letter substitution cipher that replaces a letter with the 13th letter after it, in the alphabet. ROT13 is a special case of the Caesar cipher which was developed in ancient Rome.
-
-Because there are 26 letters (2×13) in the basic Latin alphabet, ROT13 is its own inverse; that is, to undo ROT13, the same algorithm is applied, so the same action can be used for encoding and decoding. The algorithm provides virtually no cryptographic security, and is often cited as a canonical example of weak encryption.[1]
-
-###### Source: [Wikipedia](https://en.wikipedia.org/wiki/ROT13)
-
-
-## XOR
-**XOR cipher** is a simple type of additive cipher,[1] an encryption algorithm that operates according to the principles:
-
-A {\displaystyle \oplus } \oplus 0 = A,
-A {\displaystyle \oplus } \oplus A = 0,
-(A {\displaystyle \oplus } \oplus B) {\displaystyle \oplus } \oplus C = A {\displaystyle \oplus } \oplus (B {\displaystyle \oplus } \oplus C),
-(B {\displaystyle \oplus } \oplus A) {\displaystyle \oplus } \oplus A = B {\displaystyle \oplus } \oplus 0 = B,
-where {\displaystyle \oplus } \oplus denotes the exclusive disjunction (XOR) operation. This operation is sometimes called modulus 2 addition (or subtraction, which is identical).[2] With this logic, a string of text can be encrypted by applying the bitwise XOR operator to every character using a given key. To decrypt the output, merely reapplying the XOR function with the key will remove the cipher.
-
-###### Source: [Wikipedia](https://en.wikipedia.org/wiki/XOR_cipher)
-
-
-[bubble-toptal]: https://www.toptal.com/developers/sorting-algorithms/bubble-sort
-[bubble-wiki]: https://en.wikipedia.org/wiki/Bubble_sort
-[bubble-image]: https://upload.wikimedia.org/wikipedia/commons/thumb/8/83/Bubblesort-edited-color.svg/220px-Bubblesort-edited-color.svg.png "Bubble Sort"
-
-[bucket-wiki]: https://en.wikipedia.org/wiki/Bucket_sort
-[bucket-image-1]: https://upload.wikimedia.org/wikipedia/commons/thumb/6/61/Bucket_sort_1.svg/311px-Bucket_sort_1.svg.png "Bucket Sort"
-[bucket-image-2]: https://upload.wikimedia.org/wikipedia/commons/thumb/e/e3/Bucket_sort_2.svg/311px-Bucket_sort_2.svg.png "Bucket Sort"
-
-[cocktail-shaker-wiki]: https://en.wikipedia.org/wiki/Cocktail_shaker_sort
-[cocktail-shaker-image]: https://upload.wikimedia.org/wikipedia/commons/e/ef/Sorting_shaker_sort_anim.gif "Cocktail Shaker Sort"
-
-[insertion-toptal]: https://www.toptal.com/developers/sorting-algorithms/insertion-sort
-[insertion-wiki]: https://en.wikipedia.org/wiki/Insertion_sort
-[insertion-image]: https://upload.wikimedia.org/wikipedia/commons/7/7e/Insertionsort-edited.png "Insertion Sort"
-
-[quick-toptal]: https://www.toptal.com/developers/sorting-algorithms/quick-sort
-[quick-wiki]: https://en.wikipedia.org/wiki/Quicksort
-[quick-image]: https://upload.wikimedia.org/wikipedia/commons/6/6a/Sorting_quicksort_anim.gif "Quick Sort"
-
-[heapsort-image]: https://upload.wikimedia.org/wikipedia/commons/4/4d/Heapsort-example.gif "Heap Sort"
-[heap-wiki]: https://en.wikipedia.org/wiki/Heapsort
-
-[radix-wiki]: https://en.wikipedia.org/wiki/Radix_sort
-
-[merge-toptal]: https://www.toptal.com/developers/sorting-algorithms/merge-sort
-[merge-wiki]: https://en.wikipedia.org/wiki/Merge_sort
-[merge-image]: https://upload.wikimedia.org/wikipedia/commons/c/cc/Merge-sort-example-300px.gif "Merge Sort"
-
-[selection-toptal]: https://www.toptal.com/developers/sorting-algorithms/selection-sort
-[selection-wiki]: https://en.wikipedia.org/wiki/Selection_sort
-[selection-image]: https://upload.wikimedia.org/wikipedia/commons/thumb/b/b0/Selection_sort_animation.gif/250px-Selection_sort_animation.gif "Selection Sort Sort"
-
-[shell-toptal]: https://www.toptal.com/developers/sorting-algorithms/shell-sort
-[shell-wiki]: https://en.wikipedia.org/wiki/Shellsort
-[shell-image]: https://upload.wikimedia.org/wikipedia/commons/d/d8/Sorting_shellsort_anim.gif "Shell Sort"
-
-[topological-wiki]: https://en.wikipedia.org/wiki/Topological_sorting
-
-[linear-wiki]: https://en.wikipedia.org/wiki/Linear_search
-[linear-image]: http://www.tutorialspoint.com/data_structures_algorithms/images/linear_search.gif "Linear Search"
-
-[binary-wiki]: https://en.wikipedia.org/wiki/Binary_search_algorithm
-[binary-image]: https://upload.wikimedia.org/wikipedia/commons/f/f7/Binary_search_into_array.png "Binary Search"
-
-
-[interpolation-wiki]: https://en.wikipedia.org/wiki/Interpolation_search
+These implementations are for learning purposes only. Therefore they may be less efficient than the implementations in the Python standard library.
-[jump-wiki]: https://en.wikipedia.org/wiki/Jump_search
+## Contribution Guidelines
-[quick-wiki]: https://en.wikipedia.org/wiki/Quickselect
+Read our [Contribution Guidelines](CONTRIBUTING.md) before you contribute.
-[tabu-wiki]: https://en.wikipedia.org/wiki/Tabu_search
+## Community Channel
-[ROT13-image]: https://upload.wikimedia.org/wikipedia/commons/3/33/ROT13_table_with_example.svg "ROT13"
+We're on [Gitter](https://gitter.im/TheAlgorithms)! Please join us.
-[JumpSearch-image]: https://i1.wp.com/theoryofprogramming.com/wp-content/uploads/2016/11/jump-search-1.jpg "Jump Search"
+## List of Algorithms
-[QuickSelect-image]: https://upload.wikimedia.org/wikipedia/commons/0/04/Selecting_quickselect_frames.gif "Quick Select"
+See our [directory](DIRECTORY.md).
diff --git a/data_structures/union_find/__init__.py b/arithmetic_analysis/__init__.py
similarity index 100%
rename from data_structures/union_find/__init__.py
rename to arithmetic_analysis/__init__.py
diff --git a/arithmetic_analysis/bisection.py b/arithmetic_analysis/bisection.py
index c81fa84f81e1..0ef691678702 100644
--- a/arithmetic_analysis/bisection.py
+++ b/arithmetic_analysis/bisection.py
@@ -1,33 +1,55 @@
-import math
+from typing import Callable
-def bisection(function, a, b): # finds where the function becomes 0 in [a,b] using bolzano
-
- start = a
- end = b
+def bisection(function: Callable[[float], float], a: float, b: float) -> float:
+ """
+ finds where function becomes 0 in [a,b] using bolzano
+ >>> bisection(lambda x: x ** 3 - 1, -5, 5)
+ 1.0000000149011612
+ >>> bisection(lambda x: x ** 3 - 1, 2, 1000)
+ Traceback (most recent call last):
+ ...
+ ValueError: could not find root in given interval.
+ >>> bisection(lambda x: x ** 2 - 4 * x + 3, 0, 2)
+ 1.0
+ >>> bisection(lambda x: x ** 2 - 4 * x + 3, 2, 4)
+ 3.0
+ >>> bisection(lambda x: x ** 2 - 4 * x + 3, 4, 1000)
+ Traceback (most recent call last):
+ ...
+ ValueError: could not find root in given interval.
+ """
+ start: float = a
+ end: float = b
if function(a) == 0: # one of the a or b is a root for the function
return a
elif function(b) == 0:
return b
- elif function(a) * function(b) > 0: # if none of these are root and they are both positive or negative,
- # then his algorithm can't find the root
- print("couldn't find root in [a,b]")
- return
+ elif (
+ function(a) * function(b) > 0
+ ): # if none of these are root and they are both positive or negative,
+ # then this algorithm can't find the root
+ raise ValueError("could not find root in given interval.")
else:
- mid = (start + end) / 2
- while abs(start - mid) > 10**-7: # until we achieve precise equals to 10^-7
+ mid: float = start + (end - start) / 2.0
+ while abs(start - mid) > 10 ** -7: # until precisely equals to 10^-7
if function(mid) == 0:
return mid
elif function(mid) * function(start) < 0:
end = mid
else:
start = mid
- mid = (start + end) / 2
+ mid = start + (end - start) / 2.0
return mid
-def f(x):
- return math.pow(x, 3) - 2*x - 5
+def f(x: float) -> float:
+ return x ** 3 - 2 * x - 5
+
if __name__ == "__main__":
print(bisection(f, 1, 1000))
+
+ import doctest
+
+ doctest.testmod()
diff --git a/arithmetic_analysis/gaussian_elimination.py b/arithmetic_analysis/gaussian_elimination.py
new file mode 100644
index 000000000000..51207686c12a
--- /dev/null
+++ b/arithmetic_analysis/gaussian_elimination.py
@@ -0,0 +1,83 @@
+"""
+Gaussian elimination method for solving a system of linear equations.
+Gaussian elimination - https://en.wikipedia.org/wiki/Gaussian_elimination
+"""
+
+
+import numpy as np
+
+
+def retroactive_resolution(coefficients: np.matrix, vector: np.array) -> np.array:
+ """
+ This function performs a retroactive linear system resolution
+ for triangular matrix
+
+ Examples:
+ 2x1 + 2x2 - 1x3 = 5 2x1 + 2x2 = -1
+ 0x1 - 2x2 - 1x3 = -7 0x1 - 2x2 = -1
+ 0x1 + 0x2 + 5x3 = 15
+ >>> gaussian_elimination([[2, 2, -1], [0, -2, -1], [0, 0, 5]], [[5], [-7], [15]])
+ array([[2.],
+ [2.],
+ [3.]])
+ >>> gaussian_elimination([[2, 2], [0, -2]], [[-1], [-1]])
+ array([[-1. ],
+ [ 0.5]])
+ """
+
+ rows, columns = np.shape(coefficients)
+
+ x = np.zeros((rows, 1), dtype=float)
+ for row in reversed(range(rows)):
+ sum = 0
+ for col in range(row + 1, columns):
+ sum += coefficients[row, col] * x[col]
+
+ x[row, 0] = (vector[row] - sum) / coefficients[row, row]
+
+ return x
+
+
+def gaussian_elimination(coefficients: np.matrix, vector: np.array) -> np.array:
+ """
+ This function performs Gaussian elimination method
+
+ Examples:
+ 1x1 - 4x2 - 2x3 = -2 1x1 + 2x2 = 5
+ 5x1 + 2x2 - 2x3 = -3 5x1 + 2x2 = 5
+ 1x1 - 1x2 + 0x3 = 4
+ >>> gaussian_elimination([[1, -4, -2], [5, 2, -2], [1, -1, 0]], [[-2], [-3], [4]])
+ array([[ 2.3 ],
+ [-1.7 ],
+ [ 5.55]])
+ >>> gaussian_elimination([[1, 2], [5, 2]], [[5], [5]])
+ array([[0. ],
+ [2.5]])
+ """
+ # coefficients must to be a square matrix so we need to check first
+ rows, columns = np.shape(coefficients)
+ if rows != columns:
+ return []
+
+ # augmented matrix
+ augmented_mat = np.concatenate((coefficients, vector), axis=1)
+ augmented_mat = augmented_mat.astype("float64")
+
+ # scale the matrix leaving it triangular
+ for row in range(rows - 1):
+ pivot = augmented_mat[row, row]
+ for col in range(row + 1, columns):
+ factor = augmented_mat[col, row] / pivot
+ augmented_mat[col, :] -= factor * augmented_mat[row, :]
+
+ x = retroactive_resolution(
+ augmented_mat[:, 0:columns], augmented_mat[:, columns : columns + 1]
+ )
+
+ return x
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/arithmetic_analysis/image_data/2D_problems.jpg b/arithmetic_analysis/image_data/2D_problems.jpg
new file mode 100644
index 000000000000..8887cf641685
Binary files /dev/null and b/arithmetic_analysis/image_data/2D_problems.jpg differ
diff --git a/arithmetic_analysis/image_data/2D_problems_1.jpg b/arithmetic_analysis/image_data/2D_problems_1.jpg
new file mode 100644
index 000000000000..aa9f45362014
Binary files /dev/null and b/arithmetic_analysis/image_data/2D_problems_1.jpg differ
diff --git a/arithmetic_analysis/image_data/__init__.py b/arithmetic_analysis/image_data/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/arithmetic_analysis/in_static_equilibrium.py b/arithmetic_analysis/in_static_equilibrium.py
new file mode 100644
index 000000000000..9b2892151850
--- /dev/null
+++ b/arithmetic_analysis/in_static_equilibrium.py
@@ -0,0 +1,82 @@
+"""
+Checks if a system of forces is in static equilibrium.
+"""
+from typing import List
+
+from numpy import array, cos, cross, radians, sin
+
+
+def polar_force(
+ magnitude: float, angle: float, radian_mode: bool = False
+) -> List[float]:
+ """
+ Resolves force along rectangular components.
+ (force, angle) => (force_x, force_y)
+ >>> polar_force(10, 45)
+ [7.0710678118654755, 7.071067811865475]
+ >>> polar_force(10, 3.14, radian_mode=True)
+ [-9.999987317275394, 0.01592652916486828]
+ """
+ if radian_mode:
+ return [magnitude * cos(angle), magnitude * sin(angle)]
+ return [magnitude * cos(radians(angle)), magnitude * sin(radians(angle))]
+
+
+def in_static_equilibrium(
+ forces: array, location: array, eps: float = 10 ** -1
+) -> bool:
+ """
+ Check if a system is in equilibrium.
+ It takes two numpy.array objects.
+ forces ==> [
+ [force1_x, force1_y],
+ [force2_x, force2_y],
+ ....]
+ location ==> [
+ [x1, y1],
+ [x2, y2],
+ ....]
+ >>> force = array([[1, 1], [-1, 2]])
+ >>> location = array([[1, 0], [10, 0]])
+ >>> in_static_equilibrium(force, location)
+ False
+ """
+ # summation of moments is zero
+ moments: array = cross(location, forces)
+ sum_moments: float = sum(moments)
+ return abs(sum_moments) < eps
+
+
+if __name__ == "__main__":
+ # Test to check if it works
+ forces = array(
+ [polar_force(718.4, 180 - 30), polar_force(879.54, 45), polar_force(100, -90)]
+ )
+
+ location = array([[0, 0], [0, 0], [0, 0]])
+
+ assert in_static_equilibrium(forces, location)
+
+ # Problem 1 in image_data/2D_problems.jpg
+ forces = array(
+ [
+ polar_force(30 * 9.81, 15),
+ polar_force(215, 180 - 45),
+ polar_force(264, 90 - 30),
+ ]
+ )
+
+ location = array([[0, 0], [0, 0], [0, 0]])
+
+ assert in_static_equilibrium(forces, location)
+
+ # Problem in image_data/2D_problems_1.jpg
+ forces = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
+
+ location = array([[0, 0], [6, 0], [10, 0], [12, 0]])
+
+ assert in_static_equilibrium(forces, location)
+
+ import doctest
+
+ doctest.testmod()
diff --git a/arithmetic_analysis/intersection.py b/arithmetic_analysis/intersection.py
index 2f25f76ebd96..204dd5d8a935 100644
--- a/arithmetic_analysis/intersection.py
+++ b/arithmetic_analysis/intersection.py
@@ -1,17 +1,49 @@
import math
+from typing import Callable
-def intersection(function,x0,x1): #function is the f we want to find its root and x0 and x1 are two random starting points
- x_n = x0
- x_n1 = x1
+
+def intersection(function: Callable[[float], float], x0: float, x1: float) -> float:
+ """
+ function is the f we want to find its root
+ x0 and x1 are two random starting points
+ >>> intersection(lambda x: x ** 3 - 1, -5, 5)
+ 0.9999999999954654
+ >>> intersection(lambda x: x ** 3 - 1, 5, 5)
+ Traceback (most recent call last):
+ ...
+ ZeroDivisionError: float division by zero, could not find root
+ >>> intersection(lambda x: x ** 3 - 1, 100, 200)
+ 1.0000000000003888
+ >>> intersection(lambda x: x ** 2 - 4 * x + 3, 0, 2)
+ 0.9999999998088019
+ >>> intersection(lambda x: x ** 2 - 4 * x + 3, 2, 4)
+ 2.9999999998088023
+ >>> intersection(lambda x: x ** 2 - 4 * x + 3, 4, 1000)
+ 3.0000000001786042
+ >>> intersection(math.sin, -math.pi, math.pi)
+ 0.0
+ >>> intersection(math.cos, -math.pi, math.pi)
+ Traceback (most recent call last):
+ ...
+ ZeroDivisionError: float division by zero, could not find root
+ """
+ x_n: float = x0
+ x_n1: float = x1
while True:
- x_n2 = x_n1-(function(x_n1)/((function(x_n1)-function(x_n))/(x_n1-x_n)))
- if abs(x_n2 - x_n1) < 10**-5:
+ if x_n == x_n1 or function(x_n1) == function(x_n):
+ raise ZeroDivisionError("float division by zero, could not find root")
+ x_n2: float = x_n1 - (
+ function(x_n1) / ((function(x_n1) - function(x_n)) / (x_n1 - x_n))
+ )
+ if abs(x_n2 - x_n1) < 10 ** -5:
return x_n2
- x_n=x_n1
- x_n1=x_n2
+ x_n = x_n1
+ x_n1 = x_n2
+
+
+def f(x: float) -> float:
+ return math.pow(x, 3) - (2 * x) - 5
-def f(x):
- return math.pow(x , 3) - (2 * x) -5
if __name__ == "__main__":
- print(intersection(f,3,3.5))
+ print(intersection(f, 3, 3.5))
diff --git a/arithmetic_analysis/lu_decomposition.py b/arithmetic_analysis/lu_decomposition.py
index f291d2dfe003..ef37d1b7b4ef 100644
--- a/arithmetic_analysis/lu_decomposition.py
+++ b/arithmetic_analysis/lu_decomposition.py
@@ -1,32 +1,64 @@
-# lower–upper (LU) decomposition - https://en.wikipedia.org/wiki/LU_decomposition
-import numpy
+"""Lower-Upper (LU) Decomposition.
-def LUDecompose (table):
+Reference:
+- https://en.wikipedia.org/wiki/LU_decomposition
+"""
+from typing import Tuple
+
+import numpy as np
+from numpy import ndarray
+
+
+def lower_upper_decomposition(table: ndarray) -> Tuple[ndarray, ndarray]:
+ """Lower-Upper (LU) Decomposition
+
+ Example:
+
+ >>> matrix = np.array([[2, -2, 1], [0, 1, 2], [5, 3, 1]])
+ >>> outcome = lower_upper_decomposition(matrix)
+ >>> outcome[0]
+ array([[1. , 0. , 0. ],
+ [0. , 1. , 0. ],
+ [2.5, 8. , 1. ]])
+ >>> outcome[1]
+ array([[ 2. , -2. , 1. ],
+ [ 0. , 1. , 2. ],
+ [ 0. , 0. , -17.5]])
+
+ >>> matrix = np.array([[2, -2, 1], [0, 1, 2]])
+ >>> lower_upper_decomposition(matrix)
+ Traceback (most recent call last):
+ ...
+ ValueError: 'table' has to be of square shaped array but got a 2x3 array:
+ [[ 2 -2 1]
+ [ 0 1 2]]
+ """
# Table that contains our data
# Table has to be a square array so we need to check first
- rows,columns=numpy.shape(table)
- L=numpy.zeros((rows,columns))
- U=numpy.zeros((rows,columns))
- if rows!=columns:
- return []
- for i in range (columns):
- for j in range(i-1):
- sum=0
- for k in range (j-1):
- sum+=L[i][k]*U[k][j]
- L[i][j]=(table[i][j]-sum)/U[j][j]
- L[i][i]=1
- for j in range(i-1,columns):
- sum1=0
- for k in range(i-1):
- sum1+=L[i][k]*U[k][j]
- U[i][j]=table[i][j]-sum1
- return L,U
+ rows, columns = np.shape(table)
+ if rows != columns:
+ raise ValueError(
+ f"'table' has to be of square shaped array but got a {rows}x{columns} "
+ + f"array:\n{table}"
+ )
+ lower = np.zeros((rows, columns))
+ upper = np.zeros((rows, columns))
+ for i in range(columns):
+ for j in range(i):
+ total = 0
+ for k in range(j):
+ total += lower[i][k] * upper[k][j]
+ lower[i][j] = (table[i][j] - total) / upper[j][j]
+ lower[i][i] = 1
+ for j in range(i, columns):
+ total = 0
+ for k in range(i):
+ total += lower[i][k] * upper[k][j]
+ upper[i][j] = table[i][j] - total
+ return lower, upper
+
if __name__ == "__main__":
- matrix =numpy.array([[2,-2,1],
- [0,1,2],
- [5,3,1]])
- L,U = LUDecompose(matrix)
- print(L)
- print(U)
+ import doctest
+
+ doctest.testmod()
diff --git a/arithmetic_analysis/newton_forward_interpolation.py b/arithmetic_analysis/newton_forward_interpolation.py
new file mode 100644
index 000000000000..66cde4b73c4f
--- /dev/null
+++ b/arithmetic_analysis/newton_forward_interpolation.py
@@ -0,0 +1,57 @@
+# https://www.geeksforgeeks.org/newton-forward-backward-interpolation/
+
+import math
+from typing import List
+
+
+# for calculating u value
+def ucal(u: float, p: int) -> float:
+ """
+ >>> ucal(1, 2)
+ 0
+ >>> ucal(1.1, 2)
+ 0.11000000000000011
+ >>> ucal(1.2, 2)
+ 0.23999999999999994
+ """
+ temp = u
+ for i in range(1, p):
+ temp = temp * (u - i)
+ return temp
+
+
+def main() -> None:
+ n = int(input("enter the numbers of values: "))
+ y: List[List[float]] = []
+ for i in range(n):
+ y.append([])
+ for i in range(n):
+ for j in range(n):
+ y[i].append(j)
+ y[i][j] = 0
+
+ print("enter the values of parameters in a list: ")
+ x = list(map(int, input().split()))
+
+ print("enter the values of corresponding parameters: ")
+ for i in range(n):
+ y[i][0] = float(input())
+
+ value = int(input("enter the value to interpolate: "))
+ u = (value - x[0]) / (x[1] - x[0])
+
+ # for calculating forward difference table
+
+ for i in range(1, n):
+ for j in range(n - i):
+ y[j][i] = y[j + 1][i - 1] - y[j][i - 1]
+
+ summ = y[0][0]
+ for i in range(1, n):
+ summ += (ucal(u, i) * y[0][i]) / math.factorial(i)
+
+ print(f"the value at {value} is {summ}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/arithmetic_analysis/newton_method.py b/arithmetic_analysis/newton_method.py
index 2ed29502522e..a9a94372671e 100644
--- a/arithmetic_analysis/newton_method.py
+++ b/arithmetic_analysis/newton_method.py
@@ -1,18 +1,54 @@
+"""Newton's Method."""
+
# Newton's Method - https://en.wikipedia.org/wiki/Newton%27s_method
+from typing import Callable
+
+RealFunc = Callable[[float], float] # type alias for a real -> real function
+
+
+# function is the f(x) and derivative is the f'(x)
+def newton(
+ function: RealFunc,
+ derivative: RealFunc,
+ starting_int: int,
+) -> float:
+ """
+ >>> newton(lambda x: x ** 3 - 2 * x - 5, lambda x: 3 * x ** 2 - 2, 3)
+ 2.0945514815423474
+ >>> newton(lambda x: x ** 3 - 1, lambda x: 3 * x ** 2, -2)
+ 1.0
+ >>> newton(lambda x: x ** 3 - 1, lambda x: 3 * x ** 2, -4)
+ 1.0000000000000102
+ >>> import math
+ >>> newton(math.sin, math.cos, 1)
+ 0.0
+ >>> newton(math.sin, math.cos, 2)
+ 3.141592653589793
+ >>> newton(math.cos, lambda x: -math.sin(x), 2)
+ 1.5707963267948966
+ >>> newton(math.cos, lambda x: -math.sin(x), 0)
+ Traceback (most recent call last):
+ ...
+ ZeroDivisionError: Could not find root
+ """
+ prev_guess = float(starting_int)
+ while True:
+ try:
+ next_guess = prev_guess - function(prev_guess) / derivative(prev_guess)
+ except ZeroDivisionError:
+ raise ZeroDivisionError("Could not find root") from None
+ if abs(prev_guess - next_guess) < 10 ** -5:
+ return next_guess
+ prev_guess = next_guess
+
+
+def f(x: float) -> float:
+ return (x ** 3) - (2 * x) - 5
+
-def newton(function,function1,startingInt): #function is the f(x) and function1 is the f'(x)
- x_n=startingInt
- while True:
- x_n1=x_n-function(x_n)/function1(x_n)
- if abs(x_n-x_n1) < 10**-5:
- return x_n1
- x_n=x_n1
-
-def f(x):
- return (x**3) - (2 * x) -5
+def f1(x: float) -> float:
+ return 3 * (x ** 2) - 2
-def f1(x):
- return 3 * (x**2) -2
if __name__ == "__main__":
- print(newton(f,f1,3))
+ print(newton(f, f1, 3))
diff --git a/arithmetic_analysis/newton_raphson.py b/arithmetic_analysis/newton_raphson.py
new file mode 100644
index 000000000000..146bb0aa5adf
--- /dev/null
+++ b/arithmetic_analysis/newton_raphson.py
@@ -0,0 +1,43 @@
+# Implementing Newton Raphson method in Python
+# Author: Syed Haseeb Shah (github.com/QuantumNovice)
+# The Newton-Raphson method (also known as Newton's method) is a way to
+# quickly find a good approximation for the root of a real-valued function
+from decimal import Decimal
+from math import * # noqa: F401, F403
+from typing import Union
+
+from sympy import diff
+
+
+def newton_raphson(
+ func: str, a: Union[float, Decimal], precision: float = 10 ** -10
+) -> float:
+ """Finds root from the point 'a' onwards by Newton-Raphson method
+ >>> newton_raphson("sin(x)", 2)
+ 3.1415926536808043
+ >>> newton_raphson("x**2 - 5*x +2", 0.4)
+ 0.4384471871911695
+ >>> newton_raphson("x**2 - 5", 0.1)
+ 2.23606797749979
+ >>> newton_raphson("log(x)- 1", 2)
+ 2.718281828458938
+ """
+ x = a
+ while True:
+ x = Decimal(x) - (Decimal(eval(func)) / Decimal(eval(str(diff(func)))))
+ # This number dictates the accuracy of the answer
+ if abs(eval(func)) < precision:
+ return float(x)
+
+
+# Let's Execute
+if __name__ == "__main__":
+ # Find root of trigonometric function
+ # Find value of pi
+ print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
+ # Find root of polynomial
+ print(f"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
+ # Find Square Root of 5
+ print(f"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
+ # Exponential Roots
+ print(f"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
diff --git a/arithmetic_analysis/newton_raphson_method.py b/arithmetic_analysis/newton_raphson_method.py
deleted file mode 100644
index 5e7e2f930abc..000000000000
--- a/arithmetic_analysis/newton_raphson_method.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Implementing Newton Raphson method in Python
-# Author: Haseeb
-
-from sympy import diff
-from decimal import Decimal
-
-def NewtonRaphson(func, a):
- ''' Finds root from the point 'a' onwards by Newton-Raphson method '''
- while True:
- c = Decimal(a) - ( Decimal(eval(func)) / Decimal(eval(str(diff(func)))) )
-
- a = c
-
- # This number dictates the accuracy of the answer
- if abs(eval(func)) < 10**-15:
- return c
-
-
-# Let's Execute
-if __name__ == '__main__':
- # Find root of trigonometric function
- # Find value of pi
- print ('sin(x) = 0', NewtonRaphson('sin(x)', 2))
-
- # Find root of polynomial
- print ('x**2 - 5*x +2 = 0', NewtonRaphson('x**2 - 5*x +2', 0.4))
-
- # Find Square Root of 5
- print ('x**2 - 5 = 0', NewtonRaphson('x**2 - 5', 0.1))
-
- # Exponential Roots
- print ('exp(x) - 1 = 0', NewtonRaphson('exp(x) - 1', 0))
-
-
-
-
diff --git a/arithmetic_analysis/secant_method.py b/arithmetic_analysis/secant_method.py
new file mode 100644
index 000000000000..7eb1dd8f5c6b
--- /dev/null
+++ b/arithmetic_analysis/secant_method.py
@@ -0,0 +1,29 @@
+"""
+Implementing Secant method in Python
+Author: dimgrichr
+"""
+from math import exp
+
+
+def f(x: float) -> float:
+ """
+ >>> f(5)
+ 39.98652410600183
+ """
+ return 8 * x - 2 * exp(-x)
+
+
+def secant_method(lower_bound: float, upper_bound: float, repeats: int) -> float:
+ """
+ >>> secant_method(1, 3, 2)
+ 0.2139409276214589
+ """
+ x0 = lower_bound
+ x1 = upper_bound
+ for i in range(0, repeats):
+ x0, x1 = x1, x1 - (f(x1) * (x1 - x0)) / (f(x1) - f(x0))
+ return x1
+
+
+if __name__ == "__main__":
+ print(f"Example: {secant_method(1, 3, 2) = }")
diff --git a/backtracking/__init__.py b/backtracking/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/backtracking/all_combinations.py b/backtracking/all_combinations.py
new file mode 100644
index 000000000000..76462837ce35
--- /dev/null
+++ b/backtracking/all_combinations.py
@@ -0,0 +1,46 @@
+"""
+ In this problem, we want to determine all possible combinations of k
+ numbers out of 1 ... n. We use backtracking to solve this problem.
+ Time complexity: O(C(n,k)) which is O(n choose k) = O((n!/(k! * (n - k)!)))
+"""
+from typing import List
+
+
+def generate_all_combinations(n: int, k: int) -> List[List[int]]:
+ """
+ >>> generate_all_combinations(n=4, k=2)
+ [[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]]
+ """
+
+ result: List[List[int]] = []
+ create_all_state(1, n, k, [], result)
+ return result
+
+
+def create_all_state(
+ increment: int,
+ total_number: int,
+ level: int,
+ current_list: List[int],
+ total_list: List[List[int]],
+) -> None:
+ if level == 0:
+ total_list.append(current_list[:])
+ return
+
+ for i in range(increment, total_number - level + 2):
+ current_list.append(i)
+ create_all_state(i + 1, total_number, level - 1, current_list, total_list)
+ current_list.pop()
+
+
+def print_all_state(total_list: List[List[int]]) -> None:
+ for i in total_list:
+ print(*i)
+
+
+if __name__ == "__main__":
+ n = 4
+ k = 2
+ total_list = generate_all_combinations(n, k)
+ print_all_state(total_list)
diff --git a/backtracking/all_permutations.py b/backtracking/all_permutations.py
new file mode 100644
index 000000000000..a0032c5ca814
--- /dev/null
+++ b/backtracking/all_permutations.py
@@ -0,0 +1,51 @@
+"""
+ In this problem, we want to determine all possible permutations
+ of the given sequence. We use backtracking to solve this problem.
+
+ Time complexity: O(n! * n),
+ where n denotes the length of the given sequence.
+"""
+from typing import List, Union
+
+
+def generate_all_permutations(sequence: List[Union[int, str]]) -> None:
+ create_state_space_tree(sequence, [], 0, [0 for i in range(len(sequence))])
+
+
+def create_state_space_tree(
+ sequence: List[Union[int, str]],
+ current_sequence: List[Union[int, str]],
+ index: int,
+ index_used: List[int],
+) -> None:
+ """
+ Creates a state space tree to iterate through each branch using DFS.
+ We know that each state has exactly len(sequence) - index children.
+ It terminates when it reaches the end of the given sequence.
+ """
+
+ if index == len(sequence):
+ print(current_sequence)
+ return
+
+ for i in range(len(sequence)):
+ if not index_used[i]:
+ current_sequence.append(sequence[i])
+ index_used[i] = True
+ create_state_space_tree(sequence, current_sequence, index + 1, index_used)
+ current_sequence.pop()
+ index_used[i] = False
+
+
+"""
+remove the comment to take an input from the user
+
+print("Enter the elements")
+sequence = list(map(int, input().split()))
+"""
+
+sequence: List[Union[int, str]] = [3, 1, 2, 4]
+generate_all_permutations(sequence)
+
+sequence_2: List[Union[int, str]] = ["A", "B", "C"]
+generate_all_permutations(sequence_2)
diff --git a/backtracking/all_subsequences.py b/backtracking/all_subsequences.py
new file mode 100644
index 000000000000..99db4ea46589
--- /dev/null
+++ b/backtracking/all_subsequences.py
@@ -0,0 +1,40 @@
+"""
+In this problem, we want to determine all possible subsequences
+of the given sequence. We use backtracking to solve this problem.
+
+Time complexity: O(2^n),
+where n denotes the length of the given sequence.
+"""
+from typing import Any, List
+
+
+def generate_all_subsequences(sequence: List[Any]) -> None:
+ create_state_space_tree(sequence, [], 0)
+
+
+def create_state_space_tree(
+ sequence: List[Any], current_subsequence: List[Any], index: int
+) -> None:
+ """
+ Creates a state space tree to iterate through each branch using DFS.
+ We know that each state has exactly two children.
+ It terminates when it reaches the end of the given sequence.
+ """
+
+ if index == len(sequence):
+ print(current_subsequence)
+ return
+
+ create_state_space_tree(sequence, current_subsequence, index + 1)
+ current_subsequence.append(sequence[index])
+ create_state_space_tree(sequence, current_subsequence, index + 1)
+ current_subsequence.pop()
+
+
+if __name__ == "__main__":
+ seq: List[Any] = [3, 1, 2, 4]
+ generate_all_subsequences(seq)
+
+ seq.clear()
+ seq.extend(["A", "B", "C"])
+ generate_all_subsequences(seq)
diff --git a/backtracking/coloring.py b/backtracking/coloring.py
new file mode 100644
index 000000000000..3956b21a9182
--- /dev/null
+++ b/backtracking/coloring.py
@@ -0,0 +1,114 @@
+"""
+ Graph Coloring also called "m coloring problem"
+ consists of coloring given graph with at most m colors
+ such that no adjacent vertices are assigned same color
+
+ Wikipedia: https://en.wikipedia.org/wiki/Graph_coloring
+"""
+from typing import List
+
+
+def valid_coloring(
+ neighbours: List[int], colored_vertices: List[int], color: int
+) -> bool:
+ """
+ For each neighbour check if coloring constraint is satisfied
+ If any of the neighbours fail the constraint return False
+ If all neighbours validate constraint return True
+
+ >>> neighbours = [0,1,0,1,0]
+ >>> colored_vertices = [0, 2, 1, 2, 0]
+
+ >>> color = 1
+ >>> valid_coloring(neighbours, colored_vertices, color)
+ True
+
+ >>> color = 2
+ >>> valid_coloring(neighbours, colored_vertices, color)
+ False
+ """
+ # Does any neighbour not satisfy the constraints
+ return not any(
+ neighbour == 1 and colored_vertices[i] == color
+ for i, neighbour in enumerate(neighbours)
+ )
+
+
+def util_color(
+ graph: List[List[int]], max_colors: int, colored_vertices: List[int], index: int
+) -> bool:
+ """
+ Pseudo-Code
+
+ Base Case:
+ 1. Check if coloring is complete
+ 1.1 If complete return True (meaning that we successfully colored graph)
+
+ Recursive Step:
+ 2. Itterates over each color:
+ Check if current coloring is valid:
+ 2.1. Color given vertex
+ 2.2. Do recursive call check if this coloring leads to solving problem
+ 2.4. if current coloring leads to solution return
+ 2.5. Uncolor given vertex
+
+ >>> graph = [[0, 1, 0, 0, 0],
+ ... [1, 0, 1, 0, 1],
+ ... [0, 1, 0, 1, 0],
+ ... [0, 1, 1, 0, 0],
+ ... [0, 1, 0, 0, 0]]
+ >>> max_colors = 3
+ >>> colored_vertices = [0, 1, 0, 0, 0]
+ >>> index = 3
+
+ >>> util_color(graph, max_colors, colored_vertices, index)
+ True
+
+ >>> max_colors = 2
+ >>> util_color(graph, max_colors, colored_vertices, index)
+ False
+ """
+
+ # Base Case
+ if index == len(graph):
+ return True
+
+ # Recursive Step
+ for i in range(max_colors):
+ if valid_coloring(graph[index], colored_vertices, i):
+ # Color current vertex
+ colored_vertices[index] = i
+ # Validate coloring
+ if util_color(graph, max_colors, colored_vertices, index + 1):
+ return True
+ # Backtrack
+ colored_vertices[index] = -1
+ return False
+
+
+def color(graph: List[List[int]], max_colors: int) -> List[int]:
+ """
+ Wrapper function to call subroutine called util_color
+ which will either return True or False.
+ If True is returned colored_vertices list is filled with correct colorings
+
+ >>> graph = [[0, 1, 0, 0, 0],
+ ... [1, 0, 1, 0, 1],
+ ... [0, 1, 0, 1, 0],
+ ... [0, 1, 1, 0, 0],
+ ... [0, 1, 0, 0, 0]]
+
+ >>> max_colors = 3
+ >>> color(graph, max_colors)
+ [0, 1, 0, 2, 0]
+
+ >>> max_colors = 2
+ >>> color(graph, max_colors)
+ []
+ """
+ colored_vertices = [-1] * len(graph)
+
+ if util_color(graph, max_colors, colored_vertices, 0):
+ return colored_vertices
+
+ return []
diff --git a/backtracking/hamiltonian_cycle.py b/backtracking/hamiltonian_cycle.py
new file mode 100644
index 000000000000..7be1ea350d7c
--- /dev/null
+++ b/backtracking/hamiltonian_cycle.py
@@ -0,0 +1,177 @@
+"""
+ A Hamiltonian cycle (Hamiltonian circuit) is a graph cycle
+ through a graph that visits each node exactly once.
+ Determining whether such paths and cycles exist in graphs
+ is the 'Hamiltonian path problem', which is NP-complete.
+
+ Wikipedia: https://en.wikipedia.org/wiki/Hamiltonian_path
+"""
+from typing import List
+
+
+def valid_connection(
+ graph: List[List[int]], next_ver: int, curr_ind: int, path: List[int]
+) -> bool:
+ """
+ Checks whether it is possible to add next into path by validating 2 statements
+ 1. There should be path between current and next vertex
+ 2. Next vertex should not be in path
+ If both validations succeeds we return True saying that it is possible to connect
+ this vertices either we return False
+
+ Case 1:Use exact graph as in main function, with initialized values
+ >>> graph = [[0, 1, 0, 1, 0],
+ ... [1, 0, 1, 1, 1],
+ ... [0, 1, 0, 0, 1],
+ ... [1, 1, 0, 0, 1],
+ ... [0, 1, 1, 1, 0]]
+ >>> path = [0, -1, -1, -1, -1, 0]
+ >>> curr_ind = 1
+ >>> next_ver = 1
+ >>> valid_connection(graph, next_ver, curr_ind, path)
+ True
+
+ Case 2: Same graph, but trying to connect to node that is already in path
+ >>> path = [0, 1, 2, 4, -1, 0]
+ >>> curr_ind = 4
+ >>> next_ver = 1
+ >>> valid_connection(graph, next_ver, curr_ind, path)
+ False
+ """
+
+ # 1. Validate that path exists between current and next vertices
+ if graph[path[curr_ind - 1]][next_ver] == 0:
+ return False
+
+ # 2. Validate that next vertex is not already in path
+ return not any(vertex == next_ver for vertex in path)
+
+
+def util_hamilton_cycle(graph: List[List[int]], path: List[int], curr_ind: int) -> bool:
+ """
+ Pseudo-Code
+ Base Case:
+ 1. Check if we visited all of vertices
+ 1.1 If last visited vertex has path to starting vertex return True either
+ return False
+ Recursive Step:
+ 2. Iterate over each vertex
+ Check if next vertex is valid for transiting from current vertex
+ 2.1 Remember next vertex as next transition
+ 2.2 Do recursive call and check if going to this vertex solves problem
+ 2.3 If next vertex leads to solution return True
+ 2.4 Else backtrack, delete remembered vertex
+
+ Case 1: Use exact graph as in main function, with initialized values
+ >>> graph = [[0, 1, 0, 1, 0],
+ ... [1, 0, 1, 1, 1],
+ ... [0, 1, 0, 0, 1],
+ ... [1, 1, 0, 0, 1],
+ ... [0, 1, 1, 1, 0]]
+ >>> path = [0, -1, -1, -1, -1, 0]
+ >>> curr_ind = 1
+ >>> util_hamilton_cycle(graph, path, curr_ind)
+ True
+ >>> print(path)
+ [0, 1, 2, 4, 3, 0]
+
+ Case 2: Use exact graph as in previous case, but in the properties taken from
+ middle of calculation
+ >>> graph = [[0, 1, 0, 1, 0],
+ ... [1, 0, 1, 1, 1],
+ ... [0, 1, 0, 0, 1],
+ ... [1, 1, 0, 0, 1],
+ ... [0, 1, 1, 1, 0]]
+ >>> path = [0, 1, 2, -1, -1, 0]
+ >>> curr_ind = 3
+ >>> util_hamilton_cycle(graph, path, curr_ind)
+ True
+ >>> print(path)
+ [0, 1, 2, 4, 3, 0]
+ """
+
+ # Base Case
+ if curr_ind == len(graph):
+ # return whether path exists between current and starting vertices
+ return graph[path[curr_ind - 1]][path[0]] == 1
+
+ # Recursive Step
+ for next in range(0, len(graph)):
+ if valid_connection(graph, next, curr_ind, path):
+ # Insert current vertex into path as next transition
+ path[curr_ind] = next
+ # Validate created path
+ if util_hamilton_cycle(graph, path, curr_ind + 1):
+ return True
+ # Backtrack
+ path[curr_ind] = -1
+ return False
+
+
+def hamilton_cycle(graph: List[List[int]], start_index: int = 0) -> List[int]:
+ r"""
+ Wrapper function to call subroutine called util_hamilton_cycle,
+ which will either return array of vertices indicating hamiltonian cycle
+ or an empty list indicating that hamiltonian cycle was not found.
+ Case 1:
+ Following graph consists of 5 edges.
+ If we look closely, we can see that there are multiple Hamiltonian cycles.
+ For example one result is when we iterate like:
+ (0)->(1)->(2)->(4)->(3)->(0)
+
+ (0)---(1)---(2)
+ | / \ |
+ | / \ |
+ | / \ |
+ |/ \|
+ (3)---------(4)
+ >>> graph = [[0, 1, 0, 1, 0],
+ ... [1, 0, 1, 1, 1],
+ ... [0, 1, 0, 0, 1],
+ ... [1, 1, 0, 0, 1],
+ ... [0, 1, 1, 1, 0]]
+ >>> hamilton_cycle(graph)
+ [0, 1, 2, 4, 3, 0]
+
+ Case 2:
+ Same Graph as it was in Case 1, changed starting index from default to 3
+
+ (0)---(1)---(2)
+ | / \ |
+ | / \ |
+ | / \ |
+ |/ \|
+ (3)---------(4)
+ >>> graph = [[0, 1, 0, 1, 0],
+ ... [1, 0, 1, 1, 1],
+ ... [0, 1, 0, 0, 1],
+ ... [1, 1, 0, 0, 1],
+ ... [0, 1, 1, 1, 0]]
+ >>> hamilton_cycle(graph, 3)
+ [3, 0, 1, 2, 4, 3]
+
+ Case 3:
+ Following Graph is exactly what it was before, but edge 3-4 is removed.
+ Result is that there is no Hamiltonian Cycle anymore.
+
+ (0)---(1)---(2)
+ | / \ |
+ | / \ |
+ | / \ |
+ |/ \|
+ (3) (4)
+ >>> graph = [[0, 1, 0, 1, 0],
+ ... [1, 0, 1, 1, 1],
+ ... [0, 1, 0, 0, 1],
+ ... [1, 1, 0, 0, 0],
+ ... [0, 1, 1, 0, 0]]
+ >>> hamilton_cycle(graph,4)
+ []
+ """
+
+ # Initialize path with -1, indicating that we have not visited them yet
+ path = [-1] * (len(graph) + 1)
+ # initialize start and end of path with starting index
+ path[0] = path[-1] = start_index
+ # evaluate and if we find answer return path either return empty array
+ return path if util_hamilton_cycle(graph, path, 1) else []
diff --git a/backtracking/knight_tour.py b/backtracking/knight_tour.py
new file mode 100644
index 000000000000..8e6613e07d8b
--- /dev/null
+++ b/backtracking/knight_tour.py
@@ -0,0 +1,100 @@
+# Knight Tour Intro: https://www.youtube.com/watch?v=ab_dY3dZFHM
+
+from typing import List, Tuple
+
+
+def get_valid_pos(position: Tuple[int, int], n: int) -> List[Tuple[int, int]]:
+ """
+ Find all the valid positions a knight can move to from the current position.
+
+ >>> get_valid_pos((1, 3), 4)
+ [(2, 1), (0, 1), (3, 2)]
+ """
+
+ y, x = position
+ positions = [
+ (y + 1, x + 2),
+ (y - 1, x + 2),
+ (y + 1, x - 2),
+ (y - 1, x - 2),
+ (y + 2, x + 1),
+ (y + 2, x - 1),
+ (y - 2, x + 1),
+ (y - 2, x - 1),
+ ]
+ permissible_positions = []
+
+ for position in positions:
+ y_test, x_test = position
+ if 0 <= y_test < n and 0 <= x_test < n:
+ permissible_positions.append(position)
+
+ return permissible_positions
+
+
+def is_complete(board: List[List[int]]) -> bool:
+ """
+ Check if the board (matrix) has been completely filled with non-zero values.
+
+ >>> is_complete([[1]])
+ True
+
+ >>> is_complete([[1, 2], [3, 0]])
+ False
+ """
+
+ return not any(elem == 0 for row in board for elem in row)
+
+
+def open_knight_tour_helper(
+ board: List[List[int]], pos: Tuple[int, int], curr: int
+) -> bool:
+ """
+ Helper function to solve knight tour problem.
+ """
+
+ if is_complete(board):
+ return True
+
+ for position in get_valid_pos(pos, len(board)):
+ y, x = position
+
+ if board[y][x] == 0:
+ board[y][x] = curr + 1
+ if open_knight_tour_helper(board, position, curr + 1):
+ return True
+ board[y][x] = 0
+
+ return False
+
+
+def open_knight_tour(n: int) -> List[List[int]]:
+ """
+ Find the solution for the knight tour problem for a board of size n. Raises
+ ValueError if the tour cannot be performed for the given size.
+
+ >>> open_knight_tour(1)
+ [[1]]
+
+ >>> open_knight_tour(2)
+ Traceback (most recent call last):
+ ...
+ ValueError: Open Kight Tour cannot be performed on a board of size 2
+ """
+
+ board = [[0 for i in range(n)] for j in range(n)]
+
+ for i in range(n):
+ for j in range(n):
+ board[i][j] = 1
+ if open_knight_tour_helper(board, (i, j), 1):
+ return board
+ board[i][j] = 0
+
+ raise ValueError(f"Open Kight Tour cannot be performed on a board of size {n}")
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/backtracking/minimax.py b/backtracking/minimax.py
new file mode 100644
index 000000000000..dda29b47d6cc
--- /dev/null
+++ b/backtracking/minimax.py
@@ -0,0 +1,69 @@
+"""
+Minimax helps to achieve maximum score in a game by checking all possible moves
+depth is current depth in game tree.
+
+nodeIndex is index of current node in scores[].
+if move is of maximizer return true else false
+leaves of game tree is stored in scores[]
+height is maximum height of Game tree
+"""
+import math
+from typing import List
+
+
+def minimax(
+ depth: int, node_index: int, is_max: bool, scores: List[int], height: float
+) -> int:
+ """
+ >>> import math
+ >>> scores = [90, 23, 6, 33, 21, 65, 123, 34423]
+ >>> height = math.log(len(scores), 2)
+ >>> minimax(0, 0, True, scores, height)
+ 65
+ >>> minimax(-1, 0, True, scores, height)
+ Traceback (most recent call last):
+ ...
+ ValueError: Depth cannot be less than 0
+ >>> minimax(0, 0, True, [], 2)
+ Traceback (most recent call last):
+ ...
+ ValueError: Scores cannot be empty
+ >>> scores = [3, 5, 2, 9, 12, 5, 23, 23]
+ >>> height = math.log(len(scores), 2)
+ >>> minimax(0, 0, True, scores, height)
+ 12
+ """
+
+ if depth < 0:
+ raise ValueError("Depth cannot be less than 0")
+
+ if len(scores) == 0:
+ raise ValueError("Scores cannot be empty")
+
+ if depth == height:
+ return scores[node_index]
+
+ if is_max:
+ return max(
+ minimax(depth + 1, node_index * 2, False, scores, height),
+ minimax(depth + 1, node_index * 2 + 1, False, scores, height),
+ )
+
+ return min(
+ minimax(depth + 1, node_index * 2, True, scores, height),
+ minimax(depth + 1, node_index * 2 + 1, True, scores, height),
+ )
+
+
+def main() -> None:
+ scores = [90, 23, 6, 33, 21, 65, 123, 34423]
+ height = math.log(len(scores), 2)
+ print("Optimal value : ", end="")
+ print(minimax(0, 0, True, scores, height))
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ main()
diff --git a/backtracking/n_queens.py b/backtracking/n_queens.py
new file mode 100644
index 000000000000..29b8d819acf3
--- /dev/null
+++ b/backtracking/n_queens.py
@@ -0,0 +1,90 @@
+"""
+
+ The nqueens problem is of placing N queens on a N * N
+ chess board such that no queen can attack any other queens placed
+ on that chess board.
+ This means that one queen cannot have any other queen on its horizontal, vertical and
+ diagonal lines.
+
+"""
+from typing import List
+
+solution = []
+
+
+def isSafe(board: List[List[int]], row: int, column: int) -> bool:
+ """
+ This function returns a boolean value True if it is safe to place a queen there
+ considering the current state of the board.
+
+ Parameters :
+ board(2D matrix) : board
+ row ,column : coordinates of the cell on a board
+
+ Returns :
+ Boolean Value
+
+ """
+ for i in range(len(board)):
+ if board[row][i] == 1:
+ return False
+ for i in range(len(board)):
+ if board[i][column] == 1:
+ return False
+ for i, j in zip(range(row, -1, -1), range(column, -1, -1)):
+ if board[i][j] == 1:
+ return False
+ for i, j in zip(range(row, -1, -1), range(column, len(board))):
+ if board[i][j] == 1:
+ return False
+ return True
+
+
+def solve(board: List[List[int]], row: int) -> bool:
+ """
+ It creates a state space tree and calls the safe function until it receives a
+ False Boolean and terminates that branch and backtracks to the next
+ possible solution branch.
+ """
+ if row >= len(board):
+ """
+ If the row number exceeds N we have board with a successful combination
+ and that combination is appended to the solution list and the board is printed.
+
+ """
+ solution.append(board)
+ printboard(board)
+ print()
+ return True
+ for i in range(len(board)):
+ """
+ For every row it iterates through each column to check if it is feasible to
+ place a queen there.
+ If all the combinations for that particular branch are successful the board is
+ reinitialized for the next possible combination.
+ """
+ if isSafe(board, row, i):
+ board[row][i] = 1
+ solve(board, row + 1)
+ board[row][i] = 0
+ return False
+
+
+def printboard(board: List[List[int]]) -> None:
+ """
+ Prints the boards that have a successful combination.
+ """
+ for i in range(len(board)):
+ for j in range(len(board)):
+ if board[i][j] == 1:
+ print("Q", end=" ")
+ else:
+ print(".", end=" ")
+ print()
+
+
+# n=int(input("The no. of queens"))
+n = 8
+board = [[0 for i in range(n)] for j in range(n)]
+solve(board, 0)
+print("The total no. of solutions are :", len(solution))
diff --git a/backtracking/n_queens_math.py b/backtracking/n_queens_math.py
new file mode 100644
index 000000000000..a8651c5c362e
--- /dev/null
+++ b/backtracking/n_queens_math.py
@@ -0,0 +1,158 @@
+r"""
+Problem:
+
+The n queens problem is of placing N queens on a N * N chess board such that no queen
+can attack any other queens placed on that chess board. This means that one queen
+cannot have any other queen on its horizontal, vertical and diagonal lines.
+
+Solution:
+
+To solve this problem we will use simple math. First we know the queen can move in all
+the possible ways, we can simplify it in this: vertical, horizontal, diagonal left and
+ diagonal right.
+
+We can visualize it like this:
+
+left diagonal = \
+right diagonal = /
+
+On a chessboard vertical movement could be the rows and horizontal movement could be
+the columns.
+
+In programming we can use an array, and in this array each index could be the rows and
+each value in the array could be the column. For example:
+
+ . Q . . We have this chessboard with one queen in each column and each queen
+ . . . Q can't attack to each other.
+ Q . . . The array for this example would look like this: [1, 3, 0, 2]
+ . . Q .
+
+So if we use an array and we verify that each value in the array is different to each
+other we know that at least the queens can't attack each other in horizontal and
+vertical.
+
+At this point we have that halfway completed and we will treat the chessboard as a
+Cartesian plane. Hereinafter we are going to remember basic math, so in the school we
+learned this formula:
+
+ Slope of a line:
+
+ y2 - y1
+ m = ----------
+ x2 - x1
+
+This formula allow us to get the slope. For the angles 45º (right diagonal) and 135º
+(left diagonal) this formula gives us m = 1, and m = -1 respectively.
+
+See::
+https://www.enotes.com/homework-help/write-equation-line-that-hits-origin-45-degree-1474860
+
+Then we have this another formula:
+
+Slope intercept:
+
+y = mx + b
+
+b is where the line crosses the Y axis (to get more information see:
+https://www.mathsisfun.com/y_intercept.html), if we change the formula to solve for b
+we would have:
+
+y - mx = b
+
+And like we already have the m values for the angles 45º and 135º, this formula would
+look like this:
+
+45º: y - (1)x = b
+45º: y - x = b
+
+135º: y - (-1)x = b
+135º: y + x = b
+
+y = row
+x = column
+
+Applying this two formulas we can check if a queen in some position is being attacked
+for another one or vice versa.
+
+"""
+from typing import List
+
+
+def depth_first_search(
+ possible_board: List[int],
+ diagonal_right_collisions: List[int],
+ diagonal_left_collisions: List[int],
+ boards: List[List[str]],
+ n: int,
+) -> None:
+ """
+ >>> boards = []
+ >>> depth_first_search([], [], [], boards, 4)
+ >>> for board in boards:
+ ... print(board)
+ ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
+ ['. . Q . ', 'Q . . . ', '. . . Q ', '. Q . . ']
+ """
+
+ # Get next row in the current board (possible_board) to fill it with a queen
+ row = len(possible_board)
+
+ # If row is equal to the size of the board it means there are a queen in each row in
+ # the current board (possible_board)
+ if row == n:
+ # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
+ # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
+ boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board])
+ return
+
+ # We iterate each column in the row to find all possible results in each row
+ for col in range(n):
+
+ # We apply that we learned previously. First we check that in the current board
+ # (possible_board) there are not other same value because if there is it means
+ # that there are a collision in vertical. Then we apply the two formulas we
+ # learned before:
+ #
+ # 45º: y - x = b or 45: row - col = b
+ # 135º: y + x = b or row + col = b.
+ #
+ # And we verify if the results of this two formulas not exist in their variables
+ # respectively. (diagonal_right_collisions, diagonal_left_collisions)
+ #
+ # If any or these are True it means there is a collision so we continue to the
+ # next value in the for loop.
+ if (
+ col in possible_board
+ or row - col in diagonal_right_collisions
+ or row + col in diagonal_left_collisions
+ ):
+ continue
+
+ # If it is False we call dfs function again and we update the inputs
+ depth_first_search(
+ possible_board + [col],
+ diagonal_right_collisions + [row - col],
+ diagonal_left_collisions + [row + col],
+ boards,
+ n,
+ )
+
+
+def n_queens_solution(n: int) -> None:
+ boards: List[List[str]] = []
+ depth_first_search([], [], [], boards, n)
+
+ # Print all the boards
+ for board in boards:
+ for column in board:
+ print(column)
+ print("")
+
+ print(len(boards), "solutions were found.")
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ n_queens_solution(4)
diff --git a/backtracking/rat_in_maze.py b/backtracking/rat_in_maze.py
new file mode 100644
index 000000000000..cd2a8f41daa8
--- /dev/null
+++ b/backtracking/rat_in_maze.py
@@ -0,0 +1,118 @@
+from typing import List
+
+
+def solve_maze(maze: List[List[int]]) -> bool:
+ """
+ This method solves the "rat in maze" problem.
+ In this problem we have some n by n matrix, a start point and an end point.
+ We want to go from the start to the end. In this matrix zeroes represent walls
+ and ones paths we can use.
+ Parameters :
+ maze(2D matrix) : maze
+ Returns:
+ Return: True if the maze has a solution or False if it does not.
+ >>> maze = [[0, 1, 0, 1, 1],
+ ... [0, 0, 0, 0, 0],
+ ... [1, 0, 1, 0, 1],
+ ... [0, 0, 1, 0, 0],
+ ... [1, 0, 0, 1, 0]]
+ >>> solve_maze(maze)
+ [1, 0, 0, 0, 0]
+ [1, 1, 1, 1, 0]
+ [0, 0, 0, 1, 0]
+ [0, 0, 0, 1, 1]
+ [0, 0, 0, 0, 1]
+ True
+
+ >>> maze = [[0, 1, 0, 1, 1],
+ ... [0, 0, 0, 0, 0],
+ ... [0, 0, 0, 0, 1],
+ ... [0, 0, 0, 0, 0],
+ ... [0, 0, 0, 0, 0]]
+ >>> solve_maze(maze)
+ [1, 0, 0, 0, 0]
+ [1, 0, 0, 0, 0]
+ [1, 0, 0, 0, 0]
+ [1, 0, 0, 0, 0]
+ [1, 1, 1, 1, 1]
+ True
+
+ >>> maze = [[0, 0, 0],
+ ... [0, 1, 0],
+ ... [1, 0, 0]]
+ >>> solve_maze(maze)
+ [1, 1, 1]
+ [0, 0, 1]
+ [0, 0, 1]
+ True
+
+ >>> maze = [[0, 1, 0],
+ ... [0, 1, 0],
+ ... [1, 0, 0]]
+ >>> solve_maze(maze)
+ No solution exists!
+ False
+
+ >>> maze = [[0, 1],
+ ... [1, 0]]
+ >>> solve_maze(maze)
+ No solution exists!
+ False
+ """
+ size = len(maze)
+ # We need to create solution object to save path.
+ solutions = [[0 for _ in range(size)] for _ in range(size)]
+ solved = run_maze(maze, 0, 0, solutions)
+ if solved:
+ print("\n".join(str(row) for row in solutions))
+ else:
+ print("No solution exists!")
+ return solved
+
+
+def run_maze(maze: List[List[int]], i: int, j: int, solutions: List[List[int]]) -> bool:
+ """
+ This method is recursive starting from (i, j) and going in one of four directions:
+ up, down, left, right.
+ If a path is found to destination it returns True otherwise it returns False.
+ Parameters:
+ maze(2D matrix) : maze
+ i, j : coordinates of matrix
+ solutions(2D matrix) : solutions
+ Returns:
+ Boolean if path is found True, Otherwise False.
+ """
+ size = len(maze)
+ # Final check point.
+ if i == j == (size - 1):
+ solutions[i][j] = 1
+ return True
+
+ lower_flag = (not (i < 0)) and (not (j < 0)) # Check lower bounds
+ upper_flag = (i < size) and (j < size) # Check upper bounds
+
+ if lower_flag and upper_flag:
+ # check for already visited and block points.
+ block_flag = (not (solutions[i][j])) and (not (maze[i][j]))
+ if block_flag:
+ # check visited
+ solutions[i][j] = 1
+
+ # check for directions
+ if (
+ run_maze(maze, i + 1, j, solutions)
+ or run_maze(maze, i, j + 1, solutions)
+ or run_maze(maze, i - 1, j, solutions)
+ or run_maze(maze, i, j - 1, solutions)
+ ):
+ return True
+
+ solutions[i][j] = 0
+ return False
+ return False
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/backtracking/sudoku.py b/backtracking/sudoku.py
new file mode 100644
index 000000000000..3bfaddd6e56f
--- /dev/null
+++ b/backtracking/sudoku.py
@@ -0,0 +1,158 @@
+"""
+Given a partially filled 9×9 2D array, the objective is to fill a 9×9
+square grid with digits numbered 1 to 9, so that every row, column, and
+and each of the nine 3×3 sub-grids contains all of the digits.
+
+This can be solved using Backtracking and is similar to n-queens.
+We check to see if a cell is safe or not and recursively call the
+function on the next column to see if it returns True. if yes, we
+have solved the puzzle. else, we backtrack and place another number
+in that cell and repeat this process.
+"""
+from typing import List, Optional, Tuple
+
+Matrix = List[List[int]]
+
+# assigning initial values to the grid
+initial_grid: Matrix = [
+ [3, 0, 6, 5, 0, 8, 4, 0, 0],
+ [5, 2, 0, 0, 0, 0, 0, 0, 0],
+ [0, 8, 7, 0, 0, 0, 0, 3, 1],
+ [0, 0, 3, 0, 1, 0, 0, 8, 0],
+ [9, 0, 0, 8, 6, 3, 0, 0, 5],
+ [0, 5, 0, 0, 9, 0, 6, 0, 0],
+ [1, 3, 0, 0, 0, 0, 2, 5, 0],
+ [0, 0, 0, 0, 0, 0, 0, 7, 4],
+ [0, 0, 5, 2, 0, 6, 3, 0, 0],
+]
+
+# a grid with no solution
+no_solution: Matrix = [
+ [5, 0, 6, 5, 0, 8, 4, 0, 3],
+ [5, 2, 0, 0, 0, 0, 0, 0, 2],
+ [1, 8, 7, 0, 0, 0, 0, 3, 1],
+ [0, 0, 3, 0, 1, 0, 0, 8, 0],
+ [9, 0, 0, 8, 6, 3, 0, 0, 5],
+ [0, 5, 0, 0, 9, 0, 6, 0, 0],
+ [1, 3, 0, 0, 0, 0, 2, 5, 0],
+ [0, 0, 0, 0, 0, 0, 0, 7, 4],
+ [0, 0, 5, 2, 0, 6, 3, 0, 0],
+]
+
+
+def is_safe(grid: Matrix, row: int, column: int, n: int) -> bool:
+ """
+ This function checks the grid to see if each row,
+ column, and the 3x3 subgrids contain the digit 'n'.
+ It returns False if it is not 'safe' (a duplicate digit
+ is found) else returns True if it is 'safe'
+ """
+ for i in range(9):
+ if grid[row][i] == n or grid[i][column] == n:
+ return False
+
+ for i in range(3):
+ for j in range(3):
+ if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
+ return False
+
+ return True
+
+
+def is_completed(grid: Matrix) -> bool:
+ """
+ This function checks if the puzzle is completed or not.
+ it is completed when all the cells are assigned with a non-zero number.
+
+ >>> is_completed([[0]])
+ False
+ >>> is_completed([[1]])
+ True
+ >>> is_completed([[1, 2], [0, 4]])
+ False
+ >>> is_completed([[1, 2], [3, 4]])
+ True
+ >>> is_completed(initial_grid)
+ False
+ >>> is_completed(no_solution)
+ False
+ """
+ return all(all(cell != 0 for cell in row) for row in grid)
+
+
+def find_empty_location(grid: Matrix) -> Optional[Tuple[int, int]]:
+ """
+ This function finds an empty location so that we can assign a number
+ for that particular row and column.
+ """
+ for i in range(9):
+ for j in range(9):
+ if grid[i][j] == 0:
+ return i, j
+ return None
+
+
+def sudoku(grid: Matrix) -> Optional[Matrix]:
+ """
+ Takes a partially filled-in grid and attempts to assign values to
+ all unassigned locations in such a way to meet the requirements
+ for Sudoku solution (non-duplication across rows, columns, and boxes)
+
+ >>> sudoku(initial_grid) # doctest: +NORMALIZE_WHITESPACE
+ [[3, 1, 6, 5, 7, 8, 4, 9, 2],
+ [5, 2, 9, 1, 3, 4, 7, 6, 8],
+ [4, 8, 7, 6, 2, 9, 5, 3, 1],
+ [2, 6, 3, 4, 1, 5, 9, 8, 7],
+ [9, 7, 4, 8, 6, 3, 1, 2, 5],
+ [8, 5, 1, 7, 9, 2, 6, 4, 3],
+ [1, 3, 8, 9, 4, 7, 2, 5, 6],
+ [6, 9, 2, 3, 5, 1, 8, 7, 4],
+ [7, 4, 5, 2, 8, 6, 3, 1, 9]]
+ >>> sudoku(no_solution) is None
+ True
+ """
+
+ if is_completed(grid):
+ return grid
+
+ location = find_empty_location(grid)
+ if location is not None:
+ row, column = location
+ else:
+ # If the location is ``None``, then the grid is solved.
+ return grid
+
+ for digit in range(1, 10):
+ if is_safe(grid, row, column, digit):
+ grid[row][column] = digit
+
+ if sudoku(grid) is not None:
+ return grid
+
+ grid[row][column] = 0
+
+ return None
+
+
+def print_solution(grid: Matrix) -> None:
+ """
+ A function to print the solution in the form
+ of a 9x9 grid
+ """
+ for row in grid:
+ for cell in row:
+ print(cell, end=" ")
+ print()
+
+
+if __name__ == "__main__":
+ # make a copy of grid so that you can compare with the unmodified grid
+ for example_grid in (initial_grid, no_solution):
+ print("\nExample grid:\n" + "=" * 20)
+ print_solution(example_grid)
+ print("\nExample grid solution:")
+ solution = sudoku(example_grid)
+ if solution is not None:
+ print_solution(solution)
+ else:
+ print("Cannot find a solution.")
diff --git a/backtracking/sum_of_subsets.py b/backtracking/sum_of_subsets.py
new file mode 100644
index 000000000000..f695b8f7a80e
--- /dev/null
+++ b/backtracking/sum_of_subsets.py
@@ -0,0 +1,65 @@
+"""
+ The sum-of-subsetsproblem states that a set of non-negative integers, and a
+ value M, determine all possible subsets of the given set whose summation sum
+ equal to given M.
+
+ Summation of the chosen numbers must be equal to given number M and one number
+ can be used only once.
+"""
+from typing import List
+
+
+def generate_sum_of_subsets_soln(nums: List[int], max_sum: int) -> List[List[int]]:
+ result: List[List[int]] = []
+ path: List[int] = []
+ num_index = 0
+ remaining_nums_sum = sum(nums)
+ create_state_space_tree(nums, max_sum, num_index, path, result, remaining_nums_sum)
+ return result
+
+
+def create_state_space_tree(
+ nums: List[int],
+ max_sum: int,
+ num_index: int,
+ path: List[int],
+ result: List[List[int]],
+ remaining_nums_sum: int,
+) -> None:
+ """
+ Creates a state space tree to iterate through each branch using DFS.
+ It terminates the branching of a node when any of the two conditions
+ given below satisfy.
+ This algorithm follows depth-fist-search and backtracks when the node is not
+ branchable.
+
+ """
+ if sum(path) > max_sum or (remaining_nums_sum + sum(path)) < max_sum:
+ return
+ if sum(path) == max_sum:
+ result.append(path)
+ return
+ for num_index in range(num_index, len(nums)):
+ create_state_space_tree(
+ nums,
+ max_sum,
+ num_index + 1,
+ path + [nums[num_index]],
+ result,
+ remaining_nums_sum - nums[num_index],
+ )
+
+
+"""
+remove the comment to take an input from the user
+
+print("Enter the elements")
+nums = list(map(int, input().split()))
+print("Enter max_sum sum")
+max_sum = int(input())
+
+"""
+nums = [3, 34, 4, 12, 5, 2]
+max_sum = 9
+result = generate_sum_of_subsets_soln(nums, max_sum)
+print(*result)
diff --git a/binary_tree/basic_binary_tree.py b/binary_tree/basic_binary_tree.py
deleted file mode 100644
index 7c6240fb4dd4..000000000000
--- a/binary_tree/basic_binary_tree.py
+++ /dev/null
@@ -1,63 +0,0 @@
-class Node: # This is the Class Node with constructor that contains data variable to type data and left,right pointers.
- def __init__(self, data):
- self.data = data
- self.left = None
- self.right = None
-
-def display(tree): #In Order traversal of the tree
-
- if tree is None:
- return
-
- if tree.left is not None:
- display(tree.left)
-
- print(tree.data)
-
- if tree.right is not None:
- display(tree.right)
-
- return
-
-def depth_of_tree(tree): #This is the recursive function to find the depth of binary tree.
- if tree is None:
- return 0
- else:
- depth_l_tree = depth_of_tree(tree.left)
- depth_r_tree = depth_of_tree(tree.right)
- if depth_l_tree > depth_r_tree:
- return 1 + depth_l_tree
- else:
- return 1 + depth_r_tree
-
-
-def is_full_binary_tree(tree): # This functions returns that is it full binary tree or not?
- if tree is None:
- return True
- if (tree.left is None) and (tree.right is None):
- return True
- if (tree.left is not None) and (tree.right is not None):
- return (is_full_binary_tree(tree.left) and is_full_binary_tree(tree.right))
- else:
- return False
-
-
-def main(): # Main func for testing.
- tree = Node(1)
- tree.left = Node(2)
- tree.right = Node(3)
- tree.left.left = Node(4)
- tree.left.right = Node(5)
- tree.left.right.left = Node(6)
- tree.right.left = Node(7)
- tree.right.left.left = Node(8)
- tree.right.left.left.right = Node(9)
-
- print(is_full_binary_tree(tree))
- print(depth_of_tree(tree))
- print("Tree is: ")
- display(tree)
-
-
-if __name__ == '__main__':
- main()
diff --git a/bit_manipulation/README.md b/bit_manipulation/README.md
new file mode 100644
index 000000000000..2ef1661524f2
--- /dev/null
+++ b/bit_manipulation/README.md
@@ -0,0 +1,7 @@
+https://docs.python.org/3/reference/expressions.html#binary-bitwise-operations
+https://docs.python.org/3/reference/expressions.html#unary-arithmetic-and-bitwise-operations
+https://docs.python.org/3/library/stdtypes.html#bitwise-operations-on-integer-types
+
+https://wiki.python.org/moin/BitManipulation
+https://wiki.python.org/moin/BitwiseOperators
+https://www.tutorialspoint.com/python3/bitwise_operators_example.htm
diff --git a/bit_manipulation/__init__.py b/bit_manipulation/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/bit_manipulation/binary_and_operator.py b/bit_manipulation/binary_and_operator.py
new file mode 100644
index 000000000000..191ff8eb44a4
--- /dev/null
+++ b/bit_manipulation/binary_and_operator.py
@@ -0,0 +1,52 @@
+# https://www.tutorialspoint.com/python3/bitwise_operators_example.htm
+
+
+def binary_and(a: int, b: int) -> str:
+ """
+ Take in 2 integers, convert them to binary,
+ return a binary number that is the
+ result of a binary and operation on the integers provided.
+
+ >>> binary_and(25, 32)
+ '0b000000'
+ >>> binary_and(37, 50)
+ '0b100000'
+ >>> binary_and(21, 30)
+ '0b10100'
+ >>> binary_and(58, 73)
+ '0b0001000'
+ >>> binary_and(0, 255)
+ '0b00000000'
+ >>> binary_and(256, 256)
+ '0b100000000'
+ >>> binary_and(0, -1)
+ Traceback (most recent call last):
+ ...
+ ValueError: the value of both input must be positive
+ >>> binary_and(0, 1.1)
+ Traceback (most recent call last):
+ ...
+ TypeError: 'float' object cannot be interpreted as an integer
+ >>> binary_and("0", "1")
+ Traceback (most recent call last):
+ ...
+ TypeError: '<' not supported between instances of 'str' and 'int'
+ """
+ if a < 0 or b < 0:
+ raise ValueError("the value of both input must be positive")
+
+ a_binary = str(bin(a))[2:] # remove the leading "0b"
+ b_binary = str(bin(b))[2:] # remove the leading "0b"
+
+ max_len = max(len(a_binary), len(b_binary))
+
+ return "0b" + "".join(
+ str(int(char_a == "1" and char_b == "1"))
+ for char_a, char_b in zip(a_binary.zfill(max_len), b_binary.zfill(max_len))
+ )
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/bit_manipulation/binary_count_setbits.py b/bit_manipulation/binary_count_setbits.py
new file mode 100644
index 000000000000..3c92694533aa
--- /dev/null
+++ b/bit_manipulation/binary_count_setbits.py
@@ -0,0 +1,41 @@
+def binary_count_setbits(a: int) -> int:
+ """
+ Take in 1 integer, return a number that is
+ the number of 1's in binary representation of that number.
+
+ >>> binary_count_setbits(25)
+ 3
+ >>> binary_count_setbits(36)
+ 2
+ >>> binary_count_setbits(16)
+ 1
+ >>> binary_count_setbits(58)
+ 4
+ >>> binary_count_setbits(4294967295)
+ 32
+ >>> binary_count_setbits(0)
+ 0
+ >>> binary_count_setbits(-10)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input value must be a positive integer
+ >>> binary_count_setbits(0.8)
+ Traceback (most recent call last):
+ ...
+ TypeError: Input value must be a 'int' type
+ >>> binary_count_setbits("0")
+ Traceback (most recent call last):
+ ...
+ TypeError: '<' not supported between instances of 'str' and 'int'
+ """
+ if a < 0:
+ raise ValueError("Input value must be a positive integer")
+ elif isinstance(a, float):
+ raise TypeError("Input value must be a 'int' type")
+ return bin(a).count("1")
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/bit_manipulation/binary_count_trailing_zeros.py b/bit_manipulation/binary_count_trailing_zeros.py
new file mode 100644
index 000000000000..f401c4ab9266
--- /dev/null
+++ b/bit_manipulation/binary_count_trailing_zeros.py
@@ -0,0 +1,44 @@
+from math import log2
+
+
+def binary_count_trailing_zeros(a: int) -> int:
+ """
+ Take in 1 integer, return a number that is
+ the number of trailing zeros in binary representation of that number.
+
+ >>> binary_count_trailing_zeros(25)
+ 0
+ >>> binary_count_trailing_zeros(36)
+ 2
+ >>> binary_count_trailing_zeros(16)
+ 4
+ >>> binary_count_trailing_zeros(58)
+ 1
+ >>> binary_count_trailing_zeros(4294967296)
+ 32
+ >>> binary_count_trailing_zeros(0)
+ 0
+ >>> binary_count_trailing_zeros(-10)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input value must be a positive integer
+ >>> binary_count_trailing_zeros(0.8)
+ Traceback (most recent call last):
+ ...
+ TypeError: Input value must be a 'int' type
+ >>> binary_count_trailing_zeros("0")
+ Traceback (most recent call last):
+ ...
+ TypeError: '<' not supported between instances of 'str' and 'int'
+ """
+ if a < 0:
+ raise ValueError("Input value must be a positive integer")
+ elif isinstance(a, float):
+ raise TypeError("Input value must be a 'int' type")
+ return 0 if (a == 0) else int(log2(a & -a))
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/bit_manipulation/binary_or_operator.py b/bit_manipulation/binary_or_operator.py
new file mode 100644
index 000000000000..dabf5bcb09fd
--- /dev/null
+++ b/bit_manipulation/binary_or_operator.py
@@ -0,0 +1,48 @@
+# https://www.tutorialspoint.com/python3/bitwise_operators_example.htm
+
+
+def binary_or(a: int, b: int) -> str:
+ """
+ Take in 2 integers, convert them to binary, and return a binary number that is the
+ result of a binary or operation on the integers provided.
+
+ >>> binary_or(25, 32)
+ '0b111001'
+ >>> binary_or(37, 50)
+ '0b110111'
+ >>> binary_or(21, 30)
+ '0b11111'
+ >>> binary_or(58, 73)
+ '0b1111011'
+ >>> binary_or(0, 255)
+ '0b11111111'
+ >>> binary_or(0, 256)
+ '0b100000000'
+ >>> binary_or(0, -1)
+ Traceback (most recent call last):
+ ...
+ ValueError: the value of both input must be positive
+ >>> binary_or(0, 1.1)
+ Traceback (most recent call last):
+ ...
+ TypeError: 'float' object cannot be interpreted as an integer
+ >>> binary_or("0", "1")
+ Traceback (most recent call last):
+ ...
+ TypeError: '<' not supported between instances of 'str' and 'int'
+ """
+ if a < 0 or b < 0:
+ raise ValueError("the value of both input must be positive")
+ a_binary = str(bin(a))[2:] # remove the leading "0b"
+ b_binary = str(bin(b))[2:]
+ max_len = max(len(a_binary), len(b_binary))
+ return "0b" + "".join(
+ str(int("1" in (char_a, char_b)))
+ for char_a, char_b in zip(a_binary.zfill(max_len), b_binary.zfill(max_len))
+ )
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/bit_manipulation/binary_xor_operator.py b/bit_manipulation/binary_xor_operator.py
new file mode 100644
index 000000000000..6f8962192ad8
--- /dev/null
+++ b/bit_manipulation/binary_xor_operator.py
@@ -0,0 +1,52 @@
+# https://www.tutorialspoint.com/python3/bitwise_operators_example.htm
+
+
+def binary_xor(a: int, b: int) -> str:
+ """
+ Take in 2 integers, convert them to binary,
+ return a binary number that is the
+ result of a binary xor operation on the integers provided.
+
+ >>> binary_xor(25, 32)
+ '0b111001'
+ >>> binary_xor(37, 50)
+ '0b010111'
+ >>> binary_xor(21, 30)
+ '0b01011'
+ >>> binary_xor(58, 73)
+ '0b1110011'
+ >>> binary_xor(0, 255)
+ '0b11111111'
+ >>> binary_xor(256, 256)
+ '0b000000000'
+ >>> binary_xor(0, -1)
+ Traceback (most recent call last):
+ ...
+ ValueError: the value of both input must be positive
+ >>> binary_xor(0, 1.1)
+ Traceback (most recent call last):
+ ...
+ TypeError: 'float' object cannot be interpreted as an integer
+ >>> binary_xor("0", "1")
+ Traceback (most recent call last):
+ ...
+ TypeError: '<' not supported between instances of 'str' and 'int'
+ """
+ if a < 0 or b < 0:
+ raise ValueError("the value of both input must be positive")
+
+ a_binary = str(bin(a))[2:] # remove the leading "0b"
+ b_binary = str(bin(b))[2:] # remove the leading "0b"
+
+ max_len = max(len(a_binary), len(b_binary))
+
+ return "0b" + "".join(
+ str(int(char_a != char_b))
+ for char_a, char_b in zip(a_binary.zfill(max_len), b_binary.zfill(max_len))
+ )
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/bit_manipulation/single_bit_manipulation_operations.py b/bit_manipulation/single_bit_manipulation_operations.py
new file mode 100644
index 000000000000..e4a54028d9ee
--- /dev/null
+++ b/bit_manipulation/single_bit_manipulation_operations.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python3
+
+"""Provide the functionality to manipulate a single bit."""
+
+
+def set_bit(number: int, position: int) -> int:
+ """
+ Set the bit at position to 1.
+
+ Details: perform bitwise or for given number and X.
+ Where X is a number with all the bits – zeroes and bit on given
+ position – one.
+
+ >>> set_bit(0b1101, 1) # 0b1111
+ 15
+ >>> set_bit(0b0, 5) # 0b100000
+ 32
+ >>> set_bit(0b1111, 1) # 0b1111
+ 15
+ """
+ return number | (1 << position)
+
+
+def clear_bit(number: int, position: int) -> int:
+ """
+ Set the bit at position to 0.
+
+ Details: perform bitwise and for given number and X.
+ Where X is a number with all the bits – ones and bit on given
+ position – zero.
+
+ >>> clear_bit(0b10010, 1) # 0b10000
+ 16
+ >>> clear_bit(0b0, 5) # 0b0
+ 0
+ """
+ return number & ~(1 << position)
+
+
+def flip_bit(number: int, position: int) -> int:
+ """
+ Flip the bit at position.
+
+ Details: perform bitwise xor for given number and X.
+ Where X is a number with all the bits – zeroes and bit on given
+ position – one.
+
+ >>> flip_bit(0b101, 1) # 0b111
+ 7
+ >>> flip_bit(0b101, 0) # 0b100
+ 4
+ """
+ return number ^ (1 << position)
+
+
+def is_bit_set(number: int, position: int) -> bool:
+ """
+ Is the bit at position set?
+
+ Details: Shift the bit at position to be the first (smallest) bit.
+ Then check if the first bit is set by anding the shifted number with 1.
+
+ >>> is_bit_set(0b1010, 0)
+ False
+ >>> is_bit_set(0b1010, 1)
+ True
+ >>> is_bit_set(0b1010, 2)
+ False
+ >>> is_bit_set(0b1010, 3)
+ True
+ >>> is_bit_set(0b0, 17)
+ False
+ """
+ return ((number >> position) & 1) == 1
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/blockchain/__init__.py b/blockchain/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/blockchain/chinese_remainder_theorem.py b/blockchain/chinese_remainder_theorem.py
new file mode 100644
index 000000000000..b50147ac1215
--- /dev/null
+++ b/blockchain/chinese_remainder_theorem.py
@@ -0,0 +1,94 @@
+"""
+Chinese Remainder Theorem:
+GCD ( Greatest Common Divisor ) or HCF ( Highest Common Factor )
+
+If GCD(a,b) = 1, then for any remainder ra modulo a and any remainder rb modulo b
+there exists integer n, such that n = ra (mod a) and n = ra(mod b). If n1 and n2 are
+two such integers, then n1=n2(mod ab)
+
+Algorithm :
+
+1. Use extended euclid algorithm to find x,y such that a*x + b*y = 1
+2. Take n = ra*by + rb*ax
+"""
+from typing import Tuple
+
+
+# Extended Euclid
+def extended_euclid(a: int, b: int) -> Tuple[int, int]:
+ """
+ >>> extended_euclid(10, 6)
+ (-1, 2)
+
+ >>> extended_euclid(7, 5)
+ (-2, 3)
+
+ """
+ if b == 0:
+ return (1, 0)
+ (x, y) = extended_euclid(b, a % b)
+ k = a // b
+ return (y, x - k * y)
+
+
+# Uses ExtendedEuclid to find inverses
+def chinese_remainder_theorem(n1: int, r1: int, n2: int, r2: int) -> int:
+ """
+ >>> chinese_remainder_theorem(5,1,7,3)
+ 31
+
+ Explanation : 31 is the smallest number such that
+ (i) When we divide it by 5, we get remainder 1
+ (ii) When we divide it by 7, we get remainder 3
+
+ >>> chinese_remainder_theorem(6,1,4,3)
+ 14
+
+ """
+ (x, y) = extended_euclid(n1, n2)
+ m = n1 * n2
+ n = r2 * x * n1 + r1 * y * n2
+ return (n % m + m) % m
+
+
+# ----------SAME SOLUTION USING InvertModulo instead ExtendedEuclid----------------
+
+# This function find the inverses of a i.e., a^(-1)
+def invert_modulo(a: int, n: int) -> int:
+ """
+ >>> invert_modulo(2, 5)
+ 3
+
+ >>> invert_modulo(8,7)
+ 1
+
+ """
+ (b, x) = extended_euclid(a, n)
+ if b < 0:
+ b = (b % n + n) % n
+ return b
+
+
+# Same a above using InvertingModulo
+def chinese_remainder_theorem2(n1: int, r1: int, n2: int, r2: int) -> int:
+ """
+ >>> chinese_remainder_theorem2(5,1,7,3)
+ 31
+
+ >>> chinese_remainder_theorem2(6,1,4,3)
+ 14
+
+ """
+ x, y = invert_modulo(n1, n2), invert_modulo(n2, n1)
+ m = n1 * n2
+ n = r2 * x * n1 + r1 * y * n2
+ return (n % m + m) % m
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod(name="chinese_remainder_theorem", verbose=True)
+ testmod(name="chinese_remainder_theorem2", verbose=True)
+ testmod(name="invert_modulo", verbose=True)
+ testmod(name="extended_euclid", verbose=True)
diff --git a/blockchain/diophantine_equation.py b/blockchain/diophantine_equation.py
new file mode 100644
index 000000000000..7df674cb1438
--- /dev/null
+++ b/blockchain/diophantine_equation.py
@@ -0,0 +1,131 @@
+from typing import Tuple
+
+
+def diophantine(a: int, b: int, c: int) -> Tuple[float, float]:
+ """
+ Diophantine Equation : Given integers a,b,c ( at least one of a and b != 0), the
+ diophantine equation a*x + b*y = c has a solution (where x and y are integers)
+ iff gcd(a,b) divides c.
+
+ GCD ( Greatest Common Divisor ) or HCF ( Highest Common Factor )
+
+ >>> diophantine(10,6,14)
+ (-7.0, 14.0)
+
+ >>> diophantine(391,299,-69)
+ (9.0, -12.0)
+
+ But above equation has one more solution i.e., x = -4, y = 5.
+ That's why we need diophantine all solution function.
+
+ """
+
+ assert (
+ c % greatest_common_divisor(a, b) == 0
+ ) # greatest_common_divisor(a,b) function implemented below
+ (d, x, y) = extended_gcd(a, b) # extended_gcd(a,b) function implemented below
+ r = c / d
+ return (r * x, r * y)
+
+
+def diophantine_all_soln(a: int, b: int, c: int, n: int = 2) -> None:
+ """
+ Lemma : if n|ab and gcd(a,n) = 1, then n|b.
+
+ Finding All solutions of Diophantine Equations:
+
+ Theorem : Let gcd(a,b) = d, a = d*p, b = d*q. If (x0,y0) is a solution of
+ Diophantine Equation a*x + b*y = c. a*x0 + b*y0 = c, then all the
+ solutions have the form a(x0 + t*q) + b(y0 - t*p) = c,
+ where t is an arbitrary integer.
+
+ n is the number of solution you want, n = 2 by default
+
+ >>> diophantine_all_soln(10, 6, 14)
+ -7.0 14.0
+ -4.0 9.0
+
+ >>> diophantine_all_soln(10, 6, 14, 4)
+ -7.0 14.0
+ -4.0 9.0
+ -1.0 4.0
+ 2.0 -1.0
+
+ >>> diophantine_all_soln(391, 299, -69, n = 4)
+ 9.0 -12.0
+ 22.0 -29.0
+ 35.0 -46.0
+ 48.0 -63.0
+
+ """
+ (x0, y0) = diophantine(a, b, c) # Initial value
+ d = greatest_common_divisor(a, b)
+ p = a // d
+ q = b // d
+
+ for i in range(n):
+ x = x0 + i * q
+ y = y0 - i * p
+ print(x, y)
+
+
+def greatest_common_divisor(a: int, b: int) -> int:
+ """
+ Euclid's Lemma : d divides a and b, if and only if d divides a-b and b
+
+ Euclid's Algorithm
+
+ >>> greatest_common_divisor(7,5)
+ 1
+
+ Note : In number theory, two integers a and b are said to be relatively prime,
+ mutually prime, or co-prime if the only positive integer (factor) that
+ divides both of them is 1 i.e., gcd(a,b) = 1.
+
+ >>> greatest_common_divisor(121, 11)
+ 11
+
+ """
+ if a < b:
+ a, b = b, a
+
+ while a % b != 0:
+ a, b = b, a % b
+
+ return b
+
+
+def extended_gcd(a: int, b: int) -> Tuple[int, int, int]:
+ """
+ Extended Euclid's Algorithm : If d divides a and b and d = a*x + b*y for integers
+ x and y, then d = gcd(a,b)
+
+ >>> extended_gcd(10, 6)
+ (2, -1, 2)
+
+ >>> extended_gcd(7, 5)
+ (1, -2, 3)
+
+ """
+ assert a >= 0 and b >= 0
+
+ if b == 0:
+ d, x, y = a, 1, 0
+ else:
+ (d, p, q) = extended_gcd(b, a % b)
+ x = q
+ y = p - q * (a // b)
+
+ assert a % d == 0 and b % d == 0
+ assert d == a * x + b * y
+
+ return (d, x, y)
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod(name="diophantine", verbose=True)
+ testmod(name="diophantine_all_soln", verbose=True)
+ testmod(name="extended_gcd", verbose=True)
+ testmod(name="greatest_common_divisor", verbose=True)
diff --git a/blockchain/modular_division.py b/blockchain/modular_division.py
new file mode 100644
index 000000000000..4f7f50a92ad0
--- /dev/null
+++ b/blockchain/modular_division.py
@@ -0,0 +1,154 @@
+from typing import Tuple
+
+
+def modular_division(a: int, b: int, n: int) -> int:
+ """
+ Modular Division :
+ An efficient algorithm for dividing b by a modulo n.
+
+ GCD ( Greatest Common Divisor ) or HCF ( Highest Common Factor )
+
+ Given three integers a, b, and n, such that gcd(a,n)=1 and n>1, the algorithm should
+ return an integer x such that 0≤x≤n−1, and b/a=x(modn) (that is, b=ax(modn)).
+
+ Theorem:
+ a has a multiplicative inverse modulo n iff gcd(a,n) = 1
+
+
+ This find x = b*a^(-1) mod n
+ Uses ExtendedEuclid to find the inverse of a
+
+ >>> modular_division(4,8,5)
+ 2
+
+ >>> modular_division(3,8,5)
+ 1
+
+ >>> modular_division(4, 11, 5)
+ 4
+
+ """
+ assert n > 1 and a > 0 and greatest_common_divisor(a, n) == 1
+ (d, t, s) = extended_gcd(n, a) # Implemented below
+ x = (b * s) % n
+ return x
+
+
+def invert_modulo(a: int, n: int) -> int:
+ """
+ This function find the inverses of a i.e., a^(-1)
+
+ >>> invert_modulo(2, 5)
+ 3
+
+ >>> invert_modulo(8,7)
+ 1
+
+ """
+ (b, x) = extended_euclid(a, n) # Implemented below
+ if b < 0:
+ b = (b % n + n) % n
+ return b
+
+
+# ------------------ Finding Modular division using invert_modulo -------------------
+
+
+def modular_division2(a: int, b: int, n: int) -> int:
+ """
+ This function used the above inversion of a to find x = (b*a^(-1))mod n
+
+ >>> modular_division2(4,8,5)
+ 2
+
+ >>> modular_division2(3,8,5)
+ 1
+
+ >>> modular_division2(4, 11, 5)
+ 4
+
+ """
+ s = invert_modulo(a, n)
+ x = (b * s) % n
+ return x
+
+
+def extended_gcd(a: int, b: int) -> Tuple[int, int, int]:
+ """
+ Extended Euclid's Algorithm : If d divides a and b and d = a*x + b*y for integers x
+ and y, then d = gcd(a,b)
+ >>> extended_gcd(10, 6)
+ (2, -1, 2)
+
+ >>> extended_gcd(7, 5)
+ (1, -2, 3)
+
+ ** extended_gcd function is used when d = gcd(a,b) is required in output
+
+ """
+ assert a >= 0 and b >= 0
+
+ if b == 0:
+ d, x, y = a, 1, 0
+ else:
+ (d, p, q) = extended_gcd(b, a % b)
+ x = q
+ y = p - q * (a // b)
+
+ assert a % d == 0 and b % d == 0
+ assert d == a * x + b * y
+
+ return (d, x, y)
+
+
+def extended_euclid(a: int, b: int) -> Tuple[int, int]:
+ """
+ Extended Euclid
+ >>> extended_euclid(10, 6)
+ (-1, 2)
+
+ >>> extended_euclid(7, 5)
+ (-2, 3)
+
+ """
+ if b == 0:
+ return (1, 0)
+ (x, y) = extended_euclid(b, a % b)
+ k = a // b
+ return (y, x - k * y)
+
+
+def greatest_common_divisor(a: int, b: int) -> int:
+ """
+ Euclid's Lemma : d divides a and b, if and only if d divides a-b and b
+ Euclid's Algorithm
+
+ >>> greatest_common_divisor(7,5)
+ 1
+
+ Note : In number theory, two integers a and b are said to be relatively prime,
+ mutually prime, or co-prime if the only positive integer (factor) that divides
+ both of them is 1 i.e., gcd(a,b) = 1.
+
+ >>> greatest_common_divisor(121, 11)
+ 11
+
+ """
+ if a < b:
+ a, b = b, a
+
+ while a % b != 0:
+ a, b = b, a % b
+
+ return b
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod(name="modular_division", verbose=True)
+ testmod(name="modular_division2", verbose=True)
+ testmod(name="invert_modulo", verbose=True)
+ testmod(name="extended_gcd", verbose=True)
+ testmod(name="extended_euclid", verbose=True)
+ testmod(name="greatest_common_divisor", verbose=True)
diff --git a/boolean_algebra/__init__.py b/boolean_algebra/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/boolean_algebra/quine_mc_cluskey.py b/boolean_algebra/quine_mc_cluskey.py
index db4d153cbfd7..19bac336f6c5 100644
--- a/boolean_algebra/quine_mc_cluskey.py
+++ b/boolean_algebra/quine_mc_cluskey.py
@@ -1,116 +1,168 @@
-def compare_string(string1, string2):
- l1 = list(string1); l2 = list(string2)
- count = 0
- for i in range(len(l1)):
- if l1[i] != l2[i]:
- count += 1
- l1[i] = '_'
- if count > 1:
- return -1
- else:
- return("".join(l1))
-
-def check(binary):
- pi = []
- while 1:
- check1 = ['$']*len(binary)
- temp = []
- for i in range(len(binary)):
- for j in range(i+1, len(binary)):
- k=compare_string(binary[i], binary[j])
- if k != -1:
- check1[i] = '*'
- check1[j] = '*'
- temp.append(k)
- for i in range(len(binary)):
- if check1[i] == '$':
- pi.append(binary[i])
- if len(temp) == 0:
- return pi
- binary = list(set(temp))
-
-def decimal_to_binary(no_of_variable, minterms):
- temp = []
- s = ''
- for m in minterms:
- for i in range(no_of_variable):
- s = str(m%2) + s
- m //= 2
- temp.append(s)
- s = ''
- return temp
-
-def is_for_table(string1, string2, count):
- l1 = list(string1);l2=list(string2)
- count_n = 0
- for i in range(len(l1)):
- if l1[i] != l2[i]:
- count_n += 1
- if count_n == count:
- return True
- else:
- return False
-
-def selection(chart, prime_implicants):
- temp = []
- select = [0]*len(chart)
- for i in range(len(chart[0])):
- count = 0
- rem = -1
- for j in range(len(chart)):
- if chart[j][i] == 1:
- count += 1
- rem = j
- if count == 1:
- select[rem] = 1
- for i in range(len(select)):
- if select[i] == 1:
- for j in range(len(chart[0])):
- if chart[i][j] == 1:
- for k in range(len(chart)):
- chart[k][j] = 0
- temp.append(prime_implicants[i])
- while 1:
- max_n = 0; rem = -1; count_n = 0
- for i in range(len(chart)):
- count_n = chart[i].count(1)
- if count_n > max_n:
- max_n = count_n
- rem = i
-
- if max_n == 0:
- return temp
-
- temp.append(prime_implicants[rem])
-
- for i in range(len(chart[0])):
- if chart[rem][i] == 1:
- for j in range(len(chart)):
- chart[j][i] = 0
-
-def prime_implicant_chart(prime_implicants, binary):
- chart = [[0 for x in range(len(binary))] for x in range(len(prime_implicants))]
- for i in range(len(prime_implicants)):
- count = prime_implicants[i].count('_')
- for j in range(len(binary)):
- if(is_for_table(prime_implicants[i], binary[j], count)):
- chart[i][j] = 1
-
- return chart
+def compare_string(string1: str, string2: str) -> str:
+ """
+ >>> compare_string('0010','0110')
+ '0_10'
+
+ >>> compare_string('0110','1101')
+ -1
+ """
+ l1 = list(string1)
+ l2 = list(string2)
+ count = 0
+ for i in range(len(l1)):
+ if l1[i] != l2[i]:
+ count += 1
+ l1[i] = "_"
+ if count > 1:
+ return -1
+ else:
+ return "".join(l1)
+
+
+def check(binary: [str]) -> [str]:
+ """
+ >>> check(['0.00.01.5'])
+ ['0.00.01.5']
+ """
+ pi = []
+ while 1:
+ check1 = ["$"] * len(binary)
+ temp = []
+ for i in range(len(binary)):
+ for j in range(i + 1, len(binary)):
+ k = compare_string(binary[i], binary[j])
+ if k != -1:
+ check1[i] = "*"
+ check1[j] = "*"
+ temp.append(k)
+ for i in range(len(binary)):
+ if check1[i] == "$":
+ pi.append(binary[i])
+ if len(temp) == 0:
+ return pi
+ binary = list(set(temp))
+
+
+def decimal_to_binary(no_of_variable: int, minterms: [float]) -> [str]:
+ """
+ >>> decimal_to_binary(3,[1.5])
+ ['0.00.01.5']
+ """
+ temp = []
+ s = ""
+ for m in minterms:
+ for i in range(no_of_variable):
+ s = str(m % 2) + s
+ m //= 2
+ temp.append(s)
+ s = ""
+ return temp
+
+
+def is_for_table(string1: str, string2: str, count: int) -> bool:
+ """
+ >>> is_for_table('__1','011',2)
+ True
+
+ >>> is_for_table('01_','001',1)
+ False
+ """
+ l1 = list(string1)
+ l2 = list(string2)
+ count_n = 0
+ for i in range(len(l1)):
+ if l1[i] != l2[i]:
+ count_n += 1
+ if count_n == count:
+ return True
+ else:
+ return False
+
+
+def selection(chart: [[int]], prime_implicants: [str]) -> [str]:
+ """
+ >>> selection([[1]],['0.00.01.5'])
+ ['0.00.01.5']
+
+ >>> selection([[1]],['0.00.01.5'])
+ ['0.00.01.5']
+ """
+ temp = []
+ select = [0] * len(chart)
+ for i in range(len(chart[0])):
+ count = 0
+ rem = -1
+ for j in range(len(chart)):
+ if chart[j][i] == 1:
+ count += 1
+ rem = j
+ if count == 1:
+ select[rem] = 1
+ for i in range(len(select)):
+ if select[i] == 1:
+ for j in range(len(chart[0])):
+ if chart[i][j] == 1:
+ for k in range(len(chart)):
+ chart[k][j] = 0
+ temp.append(prime_implicants[i])
+ while 1:
+ max_n = 0
+ rem = -1
+ count_n = 0
+ for i in range(len(chart)):
+ count_n = chart[i].count(1)
+ if count_n > max_n:
+ max_n = count_n
+ rem = i
+
+ if max_n == 0:
+ return temp
+
+ temp.append(prime_implicants[rem])
+
+ for i in range(len(chart[0])):
+ if chart[rem][i] == 1:
+ for j in range(len(chart)):
+ chart[j][i] = 0
+
+
+def prime_implicant_chart(prime_implicants: [str], binary: [str]) -> [[int]]:
+ """
+ >>> prime_implicant_chart(['0.00.01.5'],['0.00.01.5'])
+ [[1]]
+ """
+ chart = [[0 for x in range(len(binary))] for x in range(len(prime_implicants))]
+ for i in range(len(prime_implicants)):
+ count = prime_implicants[i].count("_")
+ for j in range(len(binary)):
+ if is_for_table(prime_implicants[i], binary[j], count):
+ chart[i][j] = 1
+
+ return chart
+
def main():
- no_of_variable = int(input("Enter the no. of variables\n"))
- minterms = [int(x) for x in input("Enter the decimal representation of Minterms 'Spaces Seprated'\n").split()]
- binary = decimal_to_binary(no_of_variable, minterms)
-
- prime_implicants = check(binary)
- print("Prime Implicants are:")
- print(prime_implicants)
- chart = prime_implicant_chart(prime_implicants, binary)
-
- essential_prime_implicants = selection(chart,prime_implicants)
- print("Essential Prime Implicants are:")
- print(essential_prime_implicants)
-
-if __name__ == '__main__':
- main()
+ no_of_variable = int(input("Enter the no. of variables\n"))
+ minterms = [
+ int(x)
+ for x in input(
+ "Enter the decimal representation of Minterms 'Spaces Separated'\n"
+ ).split()
+ ]
+ binary = decimal_to_binary(no_of_variable, minterms)
+
+ prime_implicants = check(binary)
+ print("Prime Implicants are:")
+ print(prime_implicants)
+ chart = prime_implicant_chart(prime_implicants, binary)
+
+ essential_prime_implicants = selection(chart, prime_implicants)
+ print("Essential Prime Implicants are:")
+ print(essential_prime_implicants)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ main()
diff --git a/cellular_automata/README.md b/cellular_automata/README.md
new file mode 100644
index 000000000000..c3fa0516f5dd
--- /dev/null
+++ b/cellular_automata/README.md
@@ -0,0 +1,4 @@
+# Cellular Automata
+
+* https://en.wikipedia.org/wiki/Cellular_automaton
+* https://mathworld.wolfram.com/ElementaryCellularAutomaton.html
diff --git a/cellular_automata/__init__.py b/cellular_automata/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/cellular_automata/conways_game_of_life.py b/cellular_automata/conways_game_of_life.py
new file mode 100644
index 000000000000..321baa3a3794
--- /dev/null
+++ b/cellular_automata/conways_game_of_life.py
@@ -0,0 +1,100 @@
+"""
+Conway's Game of Life implemented in Python.
+https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life
+"""
+
+from __future__ import annotations
+
+from typing import List
+
+from PIL import Image
+
+# Define glider example
+GLIDER = [
+ [0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0, 0, 0, 0],
+ [1, 1, 1, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+]
+
+# Define blinker example
+BLINKER = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
+
+
+def new_generation(cells: List[List[int]]) -> List[List[int]]:
+ """
+ Generates the next generation for a given state of Conway's Game of Life.
+ >>> new_generation(BLINKER)
+ [[0, 0, 0], [1, 1, 1], [0, 0, 0]]
+ """
+ next_generation = []
+ for i in range(len(cells)):
+ next_generation_row = []
+ for j in range(len(cells[i])):
+ # Get the number of live neighbours
+ neighbour_count = 0
+ if i > 0 and j > 0:
+ neighbour_count += cells[i - 1][j - 1]
+ if i > 0:
+ neighbour_count += cells[i - 1][j]
+ if i > 0 and j < len(cells[i]) - 1:
+ neighbour_count += cells[i - 1][j + 1]
+ if j > 0:
+ neighbour_count += cells[i][j - 1]
+ if j < len(cells[i]) - 1:
+ neighbour_count += cells[i][j + 1]
+ if i < len(cells) - 1 and j > 0:
+ neighbour_count += cells[i + 1][j - 1]
+ if i < len(cells) - 1:
+ neighbour_count += cells[i + 1][j]
+ if i < len(cells) - 1 and j < len(cells[i]) - 1:
+ neighbour_count += cells[i + 1][j + 1]
+
+ # Rules of the game of life (excerpt from Wikipedia):
+ # 1. Any live cell with two or three live neighbours survives.
+ # 2. Any dead cell with three live neighbours becomes a live cell.
+ # 3. All other live cells die in the next generation.
+ # Similarly, all other dead cells stay dead.
+ alive = cells[i][j] == 1
+ if (
+ (alive and 2 <= neighbour_count <= 3)
+ or not alive
+ and neighbour_count == 3
+ ):
+ next_generation_row.append(1)
+ else:
+ next_generation_row.append(0)
+
+ next_generation.append(next_generation_row)
+ return next_generation
+
+
+def generate_images(cells: list[list[int]], frames) -> list[Image.Image]:
+ """
+ Generates a list of images of subsequent Game of Life states.
+ """
+ images = []
+ for _ in range(frames):
+ # Create output image
+ img = Image.new("RGB", (len(cells[0]), len(cells)))
+ pixels = img.load()
+
+ # Save cells to image
+ for x in range(len(cells)):
+ for y in range(len(cells[0])):
+ colour = 255 - cells[y][x] * 255
+ pixels[x, y] = (colour, colour, colour)
+
+ # Save image
+ images.append(img)
+ cells = new_generation(cells)
+ return images
+
+
+if __name__ == "__main__":
+ images = generate_images(GLIDER, 16)
+ images[0].save("out.gif", save_all=True, append_images=images[1:])
diff --git a/cellular_automata/one_dimensional.py b/cellular_automata/one_dimensional.py
new file mode 100644
index 000000000000..da77e444502f
--- /dev/null
+++ b/cellular_automata/one_dimensional.py
@@ -0,0 +1,74 @@
+"""
+Return an image of 16 generations of one-dimensional cellular automata based on a given
+ruleset number
+https://mathworld.wolfram.com/ElementaryCellularAutomaton.html
+"""
+
+from __future__ import annotations
+
+from PIL import Image
+
+# Define the first generation of cells
+# fmt: off
+CELLS = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
+# fmt: on
+
+
+def format_ruleset(ruleset: int) -> list[int]:
+ """
+ >>> format_ruleset(11100)
+ [0, 0, 0, 1, 1, 1, 0, 0]
+ >>> format_ruleset(0)
+ [0, 0, 0, 0, 0, 0, 0, 0]
+ >>> format_ruleset(11111111)
+ [1, 1, 1, 1, 1, 1, 1, 1]
+ """
+ return [int(c) for c in f"{ruleset:08}"[:8]]
+
+
+def new_generation(cells: list[list[int]], rule: list[int], time: int) -> list[int]:
+ population = len(cells[0]) # 31
+ next_generation = []
+ for i in range(population):
+ # Get the neighbors of each cell
+ # Handle neighbours outside bounds by using 0 as their value
+ left_neighbor = 0 if i == 0 else cells[time][i - 1]
+ right_neighbor = 0 if i == population - 1 else cells[time][i + 1]
+ # Define a new cell and add it to the new generation
+ situation = 7 - int(f"{left_neighbor}{cells[time][i]}{right_neighbor}", 2)
+ next_generation.append(rule[situation])
+ return next_generation
+
+
+def generate_image(cells: list[list[int]]) -> Image.Image:
+ """
+ Convert the cells into a greyscale PIL.Image.Image and return it to the caller.
+ >>> from random import random
+ >>> cells = [[random() for w in range(31)] for h in range(16)]
+ >>> img = generate_image(cells)
+ >>> isinstance(img, Image.Image)
+ True
+ >>> img.width, img.height
+ (31, 16)
+ """
+ # Create the output image
+ img = Image.new("RGB", (len(cells[0]), len(cells)))
+ pixels = img.load()
+ # Generates image
+ for w in range(img.width):
+ for h in range(img.height):
+ color = 255 - int(255 * cells[h][w])
+ pixels[w, h] = (color, color, color)
+ return img
+
+
+if __name__ == "__main__":
+ rule_num = bin(int(input("Rule:\n").strip()))[2:]
+ rule = format_ruleset(int(rule_num))
+ for time in range(16):
+ CELLS.append(new_generation(CELLS, rule, time))
+ img = generate_image(CELLS)
+ # Uncomment to save the image
+ # img.save(f"rule_{rule_num}.png")
+ img.show()
diff --git a/ciphers/__init__.py b/ciphers/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/ciphers/a1z26.py b/ciphers/a1z26.py
new file mode 100644
index 000000000000..92710ec44b0e
--- /dev/null
+++ b/ciphers/a1z26.py
@@ -0,0 +1,33 @@
+"""
+Convert a string of characters to a sequence of numbers
+corresponding to the character's position in the alphabet.
+
+https://www.dcode.fr/letter-number-cipher
+http://bestcodes.weebly.com/a1z26.html
+"""
+
+
+def encode(plain: str) -> list:
+ """
+ >>> encode("myname")
+ [13, 25, 14, 1, 13, 5]
+ """
+ return [ord(elem) - 96 for elem in plain]
+
+
+def decode(encoded: list) -> str:
+ """
+ >>> decode([13, 25, 14, 1, 13, 5])
+ 'myname'
+ """
+ return "".join(chr(elem + 96) for elem in encoded)
+
+
+def main():
+ encoded = encode(input("->").strip().lower())
+ print("Encoded: ", encoded)
+ print("Decoded:", decode(encoded))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ciphers/affine_cipher.py b/ciphers/affine_cipher.py
index af5f4e0ff4c6..cf8c0d5f4c1d 100644
--- a/ciphers/affine_cipher.py
+++ b/ciphers/affine_cipher.py
@@ -1,44 +1,67 @@
-from __future__ import print_function
-import sys, random, cryptomath_module as cryptoMath
+import random
+import sys
+
+from . import cryptomath_module as cryptomath
+
+SYMBOLS = (
+ r""" !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`"""
+ r"""abcdefghijklmnopqrstuvwxyz{|}~"""
+)
-SYMBOLS = r""" !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~"""
def main():
- message = input('Enter message: ')
- key = int(input('Enter key [2000 - 9000]: '))
- mode = input('Encrypt/Decrypt [E/D]: ')
-
- if mode.lower().startswith('e'):
- mode = 'encrypt'
- translated = encryptMessage(key, message)
- elif mode.lower().startswith('d'):
- mode = 'decrypt'
- translated = decryptMessage(key, message)
- print('\n%sed text: \n%s' % (mode.title(), translated))
-
-def getKeyParts(key):
- keyA = key // len(SYMBOLS)
- keyB = key % len(SYMBOLS)
- return (keyA, keyB)
-
-def checkKeys(keyA, keyB, mode):
- if keyA == 1 and mode == 'encrypt':
- sys.exit('The affine cipher becomes weak when key A is set to 1. Choose different key')
- if keyB == 0 and mode == 'encrypt':
- sys.exit('The affine cipher becomes weak when key A is set to 1. Choose different key')
+ """
+ >>> key = get_random_key()
+ >>> msg = "This is a test!"
+ >>> decrypt_message(key, encrypt_message(key, msg)) == msg
+ True
+ """
+ message = input("Enter message: ").strip()
+ key = int(input("Enter key [2000 - 9000]: ").strip())
+ mode = input("Encrypt/Decrypt [E/D]: ").strip().lower()
+
+ if mode.startswith("e"):
+ mode = "encrypt"
+ translated = encrypt_message(key, message)
+ elif mode.startswith("d"):
+ mode = "decrypt"
+ translated = decrypt_message(key, message)
+ print(f"\n{mode.title()}ed text: \n{translated}")
+
+
+def check_keys(keyA: int, keyB: int, mode: str) -> None:
+ if mode == "encrypt":
+ if keyA == 1:
+ sys.exit(
+ "The affine cipher becomes weak when key "
+ "A is set to 1. Choose different key"
+ )
+ if keyB == 0:
+ sys.exit(
+ "The affine cipher becomes weak when key "
+ "B is set to 0. Choose different key"
+ )
if keyA < 0 or keyB < 0 or keyB > len(SYMBOLS) - 1:
- sys.exit('Key A must be greater than 0 and key B must be between 0 and %s.' % (len(SYMBOLS) - 1))
- if cryptoMath.gcd(keyA, len(SYMBOLS)) != 1:
- sys.exit('Key A %s and the symbol set size %s are not relatively prime. Choose a different key.' % (keyA, len(SYMBOLS)))
+ sys.exit(
+ "Key A must be greater than 0 and key B must "
+ f"be between 0 and {len(SYMBOLS) - 1}."
+ )
+ if cryptomath.gcd(keyA, len(SYMBOLS)) != 1:
+ sys.exit(
+ f"Key A {keyA} and the symbol set size {len(SYMBOLS)} "
+ "are not relatively prime. Choose a different key."
+ )
-def encryptMessage(key, message):
- '''
- >>> encryptMessage(4545, 'The affine cipher is a type of monoalphabetic substitution cipher.')
+
+def encrypt_message(key: int, message: str) -> str:
+ """
+ >>> encrypt_message(4545, 'The affine cipher is a type of monoalphabetic '
+ ... 'substitution cipher.')
'VL}p MM{I}p~{HL}Gp{vp pFsH}pxMpyxIx JHL O}F{~pvuOvF{FuF{xIp~{HL}Gi'
- '''
- keyA, keyB = getKeyParts(key)
- checkKeys(keyA, keyB, 'encrypt')
- cipherText = ''
+ """
+ keyA, keyB = divmod(key, len(SYMBOLS))
+ check_keys(keyA, keyB, "encrypt")
+ cipherText = ""
for symbol in message:
if symbol in SYMBOLS:
symIndex = SYMBOLS.find(symbol)
@@ -47,15 +70,17 @@ def encryptMessage(key, message):
cipherText += symbol
return cipherText
-def decryptMessage(key, message):
- '''
- >>> decryptMessage(4545, 'VL}p MM{I}p~{HL}Gp{vp pFsH}pxMpyxIx JHL O}F{~pvuOvF{FuF{xIp~{HL}Gi')
+
+def decrypt_message(key: int, message: str) -> str:
+ """
+ >>> decrypt_message(4545, 'VL}p MM{I}p~{HL}Gp{vp pFsH}pxMpyxIx JHL O}F{~pvuOvF{FuF'
+ ... '{xIp~{HL}Gi')
'The affine cipher is a type of monoalphabetic substitution cipher.'
- '''
- keyA, keyB = getKeyParts(key)
- checkKeys(keyA, keyB, 'decrypt')
- plainText = ''
- modInverseOfkeyA = cryptoMath.findModInverse(keyA, len(SYMBOLS))
+ """
+ keyA, keyB = divmod(key, len(SYMBOLS))
+ check_keys(keyA, keyB, "decrypt")
+ plainText = ""
+ modInverseOfkeyA = cryptomath.findModInverse(keyA, len(SYMBOLS))
for symbol in message:
if symbol in SYMBOLS:
symIndex = SYMBOLS.find(symbol)
@@ -64,14 +89,17 @@ def decryptMessage(key, message):
plainText += symbol
return plainText
-def getRandomKey():
+
+def get_random_key() -> int:
while True:
keyA = random.randint(2, len(SYMBOLS))
keyB = random.randint(2, len(SYMBOLS))
- if cryptoMath.gcd(keyA, len(SYMBOLS)) == 1:
+ if cryptomath.gcd(keyA, len(SYMBOLS)) == 1 and keyB % len(SYMBOLS) != 0:
return keyA * len(SYMBOLS) + keyB
-if __name__ == '__main__':
+
+if __name__ == "__main__":
import doctest
+
doctest.testmod()
- main()
+ # main()
diff --git a/ciphers/atbash.py b/ciphers/atbash.py
new file mode 100644
index 000000000000..c17d1e34f37a
--- /dev/null
+++ b/ciphers/atbash.py
@@ -0,0 +1,66 @@
+""" https://en.wikipedia.org/wiki/Atbash """
+import string
+
+
+def atbash_slow(sequence: str) -> str:
+ """
+ >>> atbash_slow("ABCDEFG")
+ 'ZYXWVUT'
+
+ >>> atbash_slow("aW;;123BX")
+ 'zD;;123YC'
+ """
+ output = ""
+ for i in sequence:
+ extract = ord(i)
+ if 65 <= extract <= 90:
+ output += chr(155 - extract)
+ elif 97 <= extract <= 122:
+ output += chr(219 - extract)
+ else:
+ output += i
+ return output
+
+
+def atbash(sequence: str) -> str:
+ """
+ >>> atbash("ABCDEFG")
+ 'ZYXWVUT'
+
+ >>> atbash("aW;;123BX")
+ 'zD;;123YC'
+ """
+ letters = string.ascii_letters
+ letters_reversed = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
+ return "".join(
+ letters_reversed[letters.index(c)] if c in letters else c for c in sequence
+ )
+
+
+def benchmark() -> None:
+ """Let's benchmark them side-by-side..."""
+ from timeit import timeit
+
+ print("Running performance benchmarks...")
+ print(
+ "> atbash_slow()",
+ timeit(
+ "atbash_slow(printable)",
+ setup="from string import printable ; from __main__ import atbash_slow",
+ ),
+ "seconds",
+ )
+ print(
+ "> atbash()",
+ timeit(
+ "atbash(printable)",
+ setup="from string import printable ; from __main__ import atbash",
+ ),
+ "seconds",
+ )
+
+
+if __name__ == "__main__":
+ for sequence in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
+ print(f"{sequence} encrypted in atbash: {atbash(sequence)}")
+ benchmark()
diff --git a/ciphers/base16.py b/ciphers/base16.py
index 9bc0e5d8337a..f27ea4628e54 100644
--- a/ciphers/base16.py
+++ b/ciphers/base16.py
@@ -1,11 +1,22 @@
import base64
-def main():
- inp = input('->')
- encoded = inp.encode('utf-8') #encoded the input (we need a bytes like object)
- b16encoded = base64.b16encode(encoded) #b16encoded the encoded string
- print(b16encoded)
- print(base64.b16decode(b16encoded).decode('utf-8'))#decoded it
-
-if __name__ == '__main__':
- main()
+
+def encode_to_b16(inp: str) -> bytes:
+ """
+ Encodes a given utf-8 string into base-16.
+ >>> encode_to_b16('Hello World!')
+ b'48656C6C6F20576F726C6421'
+ >>> encode_to_b16('HELLO WORLD!')
+ b'48454C4C4F20574F524C4421'
+ >>> encode_to_b16('')
+ b''
+ """
+ encoded = inp.encode("utf-8") # encoded the input (we need a bytes like object)
+ b16encoded = base64.b16encode(encoded) # b16encoded the encoded string
+ return b16encoded
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/ciphers/base32.py b/ciphers/base32.py
index 2ac29f441e94..5bba8c4dd685 100644
--- a/ciphers/base32.py
+++ b/ciphers/base32.py
@@ -1,11 +1,13 @@
import base64
+
def main():
- inp = input('->')
- encoded = inp.encode('utf-8') #encoded the input (we need a bytes like object)
- b32encoded = base64.b32encode(encoded) #b32encoded the encoded string
+ inp = input("->")
+ encoded = inp.encode("utf-8") # encoded the input (we need a bytes like object)
+ b32encoded = base64.b32encode(encoded) # b32encoded the encoded string
print(b32encoded)
- print(base64.b32decode(b32encoded).decode('utf-8'))#decoded it
+ print(base64.b32decode(b32encoded).decode("utf-8")) # decoded it
+
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ciphers/base64_cipher.py b/ciphers/base64_cipher.py
deleted file mode 100644
index fa3451c0cbae..000000000000
--- a/ciphers/base64_cipher.py
+++ /dev/null
@@ -1,64 +0,0 @@
-def encodeBase64(text):
- base64chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
-
- r = "" #the result
- c = 3 - len(text) % 3 #the length of padding
- p = "=" * c #the padding
- s = text + "\0" * c #the text to encode
-
- i = 0
- while i < len(s):
- if i > 0 and ((i / 3 * 4) % 76) == 0:
- r = r + "\r\n"
-
- n = (ord(s[i]) << 16) + (ord(s[i+1]) << 8 ) + ord(s[i+2])
-
- n1 = (n >> 18) & 63
- n2 = (n >> 12) & 63
- n3 = (n >> 6) & 63
- n4 = n & 63
-
- r += base64chars[n1] + base64chars[n2] + base64chars[n3] + base64chars[n4]
- i += 3
-
- return r[0: len(r)-len(p)] + p
-
-def decodeBase64(text):
- base64chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
- s = ""
-
- for i in text:
- if i in base64chars:
- s += i
- c = ""
- else:
- if i == '=':
- c += '='
-
- p = ""
- if c == "=":
- p = 'A'
- else:
- if c == "==":
- p = "AA"
-
- r = ""
- s = s + p
-
- i = 0
- while i < len(s):
- n = (base64chars.index(s[i]) << 18) + (base64chars.index(s[i+1]) << 12) + (base64chars.index(s[i+2]) << 6) +base64chars.index(s[i+3])
-
- r += chr((n >> 16) & 255) + chr((n >> 8) & 255) + chr(n & 255)
-
- i += 4
-
- return r[0: len(r) - len(p)]
-
-def main():
- print(encodeBase64("WELCOME to base64 encoding"))
- print(decodeBase64(encodeBase64("WELCOME to base64 encoding")))
-
-
-if __name__ == '__main__':
- main()
diff --git a/ciphers/base64_encoding.py b/ciphers/base64_encoding.py
new file mode 100644
index 000000000000..634afcb89873
--- /dev/null
+++ b/ciphers/base64_encoding.py
@@ -0,0 +1,142 @@
+B64_CHARSET = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
+
+
+def base64_encode(data: bytes) -> bytes:
+ """Encodes data according to RFC4648.
+
+ The data is first transformed to binary and appended with binary digits so that its
+ length becomes a multiple of 6, then each 6 binary digits will match a character in
+ the B64_CHARSET string. The number of appended binary digits would later determine
+ how many "=" sign should be added, the padding.
+ For every 2 binary digits added, a "=" sign is added in the output.
+ We can add any binary digits to make it a multiple of 6, for instance, consider the
+ following example:
+ "AA" -> 0010100100101001 -> 001010 010010 1001
+ As can be seen above, 2 more binary digits should be added, so there's 4
+ possibilities here: 00, 01, 10 or 11.
+ That being said, Base64 encoding can be used in Steganography to hide data in these
+ appended digits.
+
+ >>> from base64 import b64encode
+ >>> a = b"This pull request is part of Hacktoberfest20!"
+ >>> b = b"https://tools.ietf.org/html/rfc4648"
+ >>> c = b"A"
+ >>> base64_encode(a) == b64encode(a)
+ True
+ >>> base64_encode(b) == b64encode(b)
+ True
+ >>> base64_encode(c) == b64encode(c)
+ True
+ >>> base64_encode("abc")
+ Traceback (most recent call last):
+ ...
+ TypeError: a bytes-like object is required, not 'str'
+ """
+ # Make sure the supplied data is a bytes-like object
+ if not isinstance(data, bytes):
+ raise TypeError(
+ f"a bytes-like object is required, not '{data.__class__.__name__}'"
+ )
+
+ binary_stream = "".join(bin(byte)[2:].zfill(8) for byte in data)
+
+ padding_needed = len(binary_stream) % 6 != 0
+
+ if padding_needed:
+ # The padding that will be added later
+ padding = b"=" * ((6 - len(binary_stream) % 6) // 2)
+
+ # Append binary_stream with arbitrary binary digits (0's by default) to make its
+ # length a multiple of 6.
+ binary_stream += "0" * (6 - len(binary_stream) % 6)
+ else:
+ padding = b""
+
+ # Encode every 6 binary digits to their corresponding Base64 character
+ return (
+ "".join(
+ B64_CHARSET[int(binary_stream[index : index + 6], 2)]
+ for index in range(0, len(binary_stream), 6)
+ ).encode()
+ + padding
+ )
+
+
+def base64_decode(encoded_data: str) -> bytes:
+ """Decodes data according to RFC4648.
+
+ This does the reverse operation of base64_encode.
+ We first transform the encoded data back to a binary stream, take off the
+ previously appended binary digits according to the padding, at this point we
+ would have a binary stream whose length is multiple of 8, the last step is
+ to convert every 8 bits to a byte.
+
+ >>> from base64 import b64decode
+ >>> a = "VGhpcyBwdWxsIHJlcXVlc3QgaXMgcGFydCBvZiBIYWNrdG9iZXJmZXN0MjAh"
+ >>> b = "aHR0cHM6Ly90b29scy5pZXRmLm9yZy9odG1sL3JmYzQ2NDg="
+ >>> c = "QQ=="
+ >>> base64_decode(a) == b64decode(a)
+ True
+ >>> base64_decode(b) == b64decode(b)
+ True
+ >>> base64_decode(c) == b64decode(c)
+ True
+ >>> base64_decode("abc")
+ Traceback (most recent call last):
+ ...
+ AssertionError: Incorrect padding
+ """
+ # Make sure encoded_data is either a string or a bytes-like object
+ if not isinstance(encoded_data, bytes) and not isinstance(encoded_data, str):
+ raise TypeError(
+ "argument should be a bytes-like object or ASCII string, not "
+ f"'{encoded_data.__class__.__name__}'"
+ )
+
+ # In case encoded_data is a bytes-like object, make sure it contains only
+ # ASCII characters so we convert it to a string object
+ if isinstance(encoded_data, bytes):
+ try:
+ encoded_data = encoded_data.decode("utf-8")
+ except UnicodeDecodeError:
+ raise ValueError("base64 encoded data should only contain ASCII characters")
+
+ padding = encoded_data.count("=")
+
+ # Check if the encoded string contains non base64 characters
+ if padding:
+ assert all(
+ char in B64_CHARSET for char in encoded_data[:-padding]
+ ), "Invalid base64 character(s) found."
+ else:
+ assert all(
+ char in B64_CHARSET for char in encoded_data
+ ), "Invalid base64 character(s) found."
+
+ # Check the padding
+ assert len(encoded_data) % 4 == 0 and padding < 3, "Incorrect padding"
+
+ if padding:
+ # Remove padding if there is one
+ encoded_data = encoded_data[:-padding]
+
+ binary_stream = "".join(
+ bin(B64_CHARSET.index(char))[2:].zfill(6) for char in encoded_data
+ )[: -padding * 2]
+ else:
+ binary_stream = "".join(
+ bin(B64_CHARSET.index(char))[2:].zfill(6) for char in encoded_data
+ )
+
+ data = [
+ int(binary_stream[index : index + 8], 2)
+ for index in range(0, len(binary_stream), 8)
+ ]
+
+ return bytes(data)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/ciphers/base85.py b/ciphers/base85.py
index 5fd13837f662..ebfd0480f794 100644
--- a/ciphers/base85.py
+++ b/ciphers/base85.py
@@ -1,11 +1,13 @@
import base64
+
def main():
- inp = input('->')
- encoded = inp.encode('utf-8') #encoded the input (we need a bytes like object)
- a85encoded = base64.a85encode(encoded) #a85encoded the encoded string
+ inp = input("->")
+ encoded = inp.encode("utf-8") # encoded the input (we need a bytes like object)
+ a85encoded = base64.a85encode(encoded) # a85encoded the encoded string
print(a85encoded)
- print(base64.a85decode(a85encoded).decode('utf-8'))#decoded it
+ print(base64.a85decode(a85encoded).decode("utf-8")) # decoded it
+
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ciphers/beaufort_cipher.py b/ciphers/beaufort_cipher.py
new file mode 100644
index 000000000000..c885dec74001
--- /dev/null
+++ b/ciphers/beaufort_cipher.py
@@ -0,0 +1,82 @@
+"""
+Author: Mohit Radadiya
+"""
+
+from string import ascii_uppercase
+
+dict1 = {char: i for i, char in enumerate(ascii_uppercase)}
+dict2 = {i: char for i, char in enumerate(ascii_uppercase)}
+
+
+# This function generates the key in
+# a cyclic manner until it's length isn't
+# equal to the length of original text
+def generate_key(message: str, key: str) -> str:
+ """
+ >>> generate_key("THE GERMAN ATTACK","SECRET")
+ 'SECRETSECRETSECRE'
+ """
+ x = len(message)
+ i = 0
+ while True:
+ if x == i:
+ i = 0
+ if len(key) == len(message):
+ break
+ key += key[i]
+ i += 1
+ return key
+
+
+# This function returns the encrypted text
+# generated with the help of the key
+def cipher_text(message: str, key_new: str) -> str:
+ """
+ >>> cipher_text("THE GERMAN ATTACK","SECRETSECRETSECRE")
+ 'BDC PAYUWL JPAIYI'
+ """
+ cipher_text = ""
+ i = 0
+ for letter in message:
+ if letter == " ":
+ cipher_text += " "
+ else:
+ x = (dict1[letter] - dict1[key_new[i]]) % 26
+ i += 1
+ cipher_text += dict2[x]
+ return cipher_text
+
+
+# This function decrypts the encrypted text
+# and returns the original text
+def original_text(cipher_text: str, key_new: str) -> str:
+ """
+ >>> original_text("BDC PAYUWL JPAIYI","SECRETSECRETSECRE")
+ 'THE GERMAN ATTACK'
+ """
+ or_txt = ""
+ i = 0
+ for letter in cipher_text:
+ if letter == " ":
+ or_txt += " "
+ else:
+ x = (dict1[letter] + dict1[key_new[i]] + 26) % 26
+ i += 1
+ or_txt += dict2[x]
+ return or_txt
+
+
+def main():
+ message = "THE GERMAN ATTACK"
+ key = "SECRET"
+ key_new = generate_key(message, key)
+ s = cipher_text(message, key_new)
+ print(f"Encrypted Text = {s}")
+ print(f"Original Text = {original_text(s, key_new)}")
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ main()
diff --git a/ciphers/brute_force_caesar_cipher.py b/ciphers/brute_force_caesar_cipher.py
index 3b0716442fc5..13a165245403 100644
--- a/ciphers/brute_force_caesar_cipher.py
+++ b/ciphers/brute_force_caesar_cipher.py
@@ -1,5 +1,4 @@
-from __future__ import print_function
-def decrypt(message):
+def decrypt(message: str) -> None:
"""
>>> decrypt('TMDETUX PMDVU')
Decryption using Key #0: TMDETUX PMDVU
@@ -41,14 +40,17 @@ def decrypt(message):
translated = translated + LETTERS[num]
else:
translated = translated + symbol
- print("Decryption using Key #%s: %s" % (key, translated))
+ print(f"Decryption using Key #{key}: {translated}")
+
def main():
message = input("Encrypted message: ")
message = message.upper()
decrypt(message)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
import doctest
+
doctest.testmod()
main()
diff --git a/ciphers/caesar_cipher.py b/ciphers/caesar_cipher.py
index 39c069c95a7c..4b2f76c7d873 100644
--- a/ciphers/caesar_cipher.py
+++ b/ciphers/caesar_cipher.py
@@ -1,63 +1,238 @@
-import sys
-def encrypt(strng, key):
- encrypted = ''
- for x in strng:
- indx = (ord(x) + key) % 256
- if indx > 126:
- indx = indx - 95
- encrypted = encrypted + chr(indx)
- return encrypted
-
-
-def decrypt(strng, key):
- decrypted = ''
- for x in strng:
- indx = (ord(x) - key) % 256
- if indx < 32:
- indx = indx + 95
- decrypted = decrypted + chr(indx)
- return decrypted
-
-def brute_force(strng):
- key = 1
- decrypted = ''
- while key <= 94:
- for x in strng:
- indx = (ord(x) - key) % 256
- if indx < 32:
- indx = indx + 95
- decrypted = decrypted + chr(indx)
- print("Key: {}\t| Message: {}".format(key, decrypted))
- decrypted = ''
- key += 1
- return None
-
-
-def main():
+from string import ascii_letters
+from typing import Dict, Optional
+
+
+def encrypt(input_string: str, key: int, alphabet: Optional[str] = None) -> str:
+ """
+ encrypt
+ =======
+ Encodes a given string with the caesar cipher and returns the encoded
+ message
+
+ Parameters:
+ -----------
+ * input_string: the plain-text that needs to be encoded
+ * key: the number of letters to shift the message by
+
+ Optional:
+ * alphabet (None): the alphabet used to encode the cipher, if not
+ specified, the standard english alphabet with upper and lowercase
+ letters is used
+
+ Returns:
+ * A string containing the encoded cipher-text
+
+ More on the caesar cipher
+ =========================
+ The caesar cipher is named after Julius Caesar who used it when sending
+ secret military messages to his troops. This is a simple substitution cipher
+ where very character in the plain-text is shifted by a certain number known
+ as the "key" or "shift".
+
+ Example:
+ Say we have the following message:
+ "Hello, captain"
+
+ And our alphabet is made up of lower and uppercase letters:
+ "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+
+ And our shift is "2"
+
+ We can then encode the message, one letter at a time. "H" would become "J",
+ since "J" is two letters away, and so on. If the shift is ever two large, or
+ our letter is at the end of the alphabet, we just start at the beginning
+ ("Z" would shift to "a" then "b" and so on).
+
+ Our final message would be "Jgnnq, ecrvckp"
+
+ Further reading
+ ===============
+ * https://en.m.wikipedia.org/wiki/Caesar_cipher
+
+ Doctests
+ ========
+ >>> encrypt('The quick brown fox jumps over the lazy dog', 8)
+ 'bpm yCqks jzwEv nwF rCuxA wDmz Bpm tiHG lwo'
+
+ >>> encrypt('A very large key', 8000)
+ 's nWjq dSjYW cWq'
+
+ >>> encrypt('a lowercase alphabet', 5, 'abcdefghijklmnopqrstuvwxyz')
+ 'f qtbjwhfxj fqumfgjy'
+ """
+ # Set default alphabet to lower and upper case english chars
+ alpha = alphabet or ascii_letters
+
+ # The final result string
+ result = ""
+
+ for character in input_string:
+ if character not in alpha:
+ # Append without encryption if character is not in the alphabet
+ result += character
+ else:
+ # Get the index of the new key and make sure it isn't too large
+ new_key = (alpha.index(character) + key) % len(alpha)
+
+ # Append the encoded character to the alphabet
+ result += alpha[new_key]
+
+ return result
+
+
+def decrypt(input_string: str, key: int, alphabet: Optional[str] = None) -> str:
+ """
+ decrypt
+ =======
+ Decodes a given string of cipher-text and returns the decoded plain-text
+
+ Parameters:
+ -----------
+ * input_string: the cipher-text that needs to be decoded
+ * key: the number of letters to shift the message backwards by to decode
+
+ Optional:
+ * alphabet (None): the alphabet used to decode the cipher, if not
+ specified, the standard english alphabet with upper and lowercase
+ letters is used
+
+ Returns:
+ * A string containing the decoded plain-text
+
+ More on the caesar cipher
+ =========================
+ The caesar cipher is named after Julius Caesar who used it when sending
+ secret military messages to his troops. This is a simple substitution cipher
+ where very character in the plain-text is shifted by a certain number known
+ as the "key" or "shift". Please keep in mind, here we will be focused on
+ decryption.
+
+ Example:
+ Say we have the following cipher-text:
+ "Jgnnq, ecrvckp"
+
+ And our alphabet is made up of lower and uppercase letters:
+ "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+
+ And our shift is "2"
+
+ To decode the message, we would do the same thing as encoding, but in
+ reverse. The first letter, "J" would become "H" (remember: we are decoding)
+ because "H" is two letters in reverse (to the left) of "J". We would
+ continue doing this. A letter like "a" would shift back to the end of
+ the alphabet, and would become "Z" or "Y" and so on.
+
+ Our final message would be "Hello, captain"
+
+ Further reading
+ ===============
+ * https://en.m.wikipedia.org/wiki/Caesar_cipher
+
+ Doctests
+ ========
+ >>> decrypt('bpm yCqks jzwEv nwF rCuxA wDmz Bpm tiHG lwo', 8)
+ 'The quick brown fox jumps over the lazy dog'
+
+ >>> decrypt('s nWjq dSjYW cWq', 8000)
+ 'A very large key'
+
+ >>> decrypt('f qtbjwhfxj fqumfgjy', 5, 'abcdefghijklmnopqrstuvwxyz')
+ 'a lowercase alphabet'
+ """
+ # Turn on decode mode by making the key negative
+ key *= -1
+
+ return encrypt(input_string, key, alphabet)
+
+
+def brute_force(input_string: str, alphabet: Optional[str] = None) -> Dict[int, str]:
+ """
+ brute_force
+ ===========
+ Returns all the possible combinations of keys and the decoded strings in the
+ form of a dictionary
+
+ Parameters:
+ -----------
+ * input_string: the cipher-text that needs to be used during brute-force
+
+ Optional:
+ * alphabet: (None): the alphabet used to decode the cipher, if not
+ specified, the standard english alphabet with upper and lowercase
+ letters is used
+
+ More about brute force
+ ======================
+ Brute force is when a person intercepts a message or password, not knowing
+ the key and tries every single combination. This is easy with the caesar
+ cipher since there are only all the letters in the alphabet. The more
+ complex the cipher, the larger amount of time it will take to do brute force
+
+ Ex:
+ Say we have a 5 letter alphabet (abcde), for simplicity and we intercepted the
+ following message:
+
+ "dbc"
+
+ we could then just write out every combination:
+ ecd... and so on, until we reach a combination that makes sense:
+ "cab"
+
+ Further reading
+ ===============
+ * https://en.wikipedia.org/wiki/Brute_force
+
+ Doctests
+ ========
+ >>> brute_force("jFyuMy xIH'N vLONy zILwy Gy!")[20]
+ "Please don't brute force me!"
+
+ >>> brute_force(1)
+ Traceback (most recent call last):
+ TypeError: 'int' object is not iterable
+ """
+ # Set default alphabet to lower and upper case english chars
+ alpha = alphabet or ascii_letters
+
+ # To store data on all the combinations
+ brute_force_data = {}
+
+ # Cycle through each combination
+ for key in range(1, len(alpha) + 1):
+ # Decrypt the message and store the result in the data
+ brute_force_data[key] = decrypt(input_string, key, alpha)
+
+ return brute_force_data
+
+
+if __name__ == "__main__":
while True:
- print('-' * 10 + "\n**Menu**\n" + '-' * 10)
- print("1.Encrpyt")
- print("2.Decrypt")
- print("3.BruteForce")
- print("4.Quit")
- choice = input("What would you like to do?: ")
- if choice not in ['1', '2', '3', '4']:
- print ("Invalid choice, please enter a valid choice")
- elif choice == '1':
- strng = input("Please enter the string to be encrypted: ")
- key = int(input("Please enter off-set between 1-94: "))
- if key in range(1, 95):
- print (encrypt(strng.lower(), key))
- elif choice == '2':
- strng = input("Please enter the string to be decrypted: ")
- key = int(input("Please enter off-set between 1-94: "))
- if key in range(1,95):
- print(decrypt(strng, key))
- elif choice == '3':
- strng = input("Please enter the string to be decrypted: ")
- brute_force(strng)
- main()
- elif choice == '4':
- print ("Goodbye.")
+ print(f'\n{"-" * 10}\n Menu\n{"-" * 10}')
+ print(*["1.Encrypt", "2.Decrypt", "3.BruteForce", "4.Quit"], sep="\n")
+
+ # get user input
+ choice = input("\nWhat would you like to do?: ").strip() or "4"
+
+ # run functions based on what the user chose
+ if choice not in ("1", "2", "3", "4"):
+ print("Invalid choice, please enter a valid choice")
+ elif choice == "1":
+ input_string = input("Please enter the string to be encrypted: ")
+ key = int(input("Please enter off-set: ").strip())
+
+ print(encrypt(input_string, key))
+ elif choice == "2":
+ input_string = input("Please enter the string to be decrypted: ")
+ key = int(input("Please enter off-set: ").strip())
+
+ print(decrypt(input_string, key))
+ elif choice == "3":
+ input_string = input("Please enter the string to be decrypted: ")
+ brute_force_data = brute_force(input_string)
+
+ for key, value in brute_force_data.items():
+ print(f"Key: {key} | Message: {value}")
+
+ elif choice == "4":
+ print("Goodbye.")
break
-main()
diff --git a/ciphers/cryptomath_module.py b/ciphers/cryptomath_module.py
index 3e8e71b117ed..ffeac1617f64 100644
--- a/ciphers/cryptomath_module.py
+++ b/ciphers/cryptomath_module.py
@@ -1,14 +1,15 @@
-def gcd(a, b):
+def gcd(a: int, b: int) -> int:
while a != 0:
a, b = b % a, a
return b
-def findModInverse(a, m):
+
+def findModInverse(a: int, m: int) -> int:
if gcd(a, m) != 1:
return None
u1, u2, u3 = 1, 0, a
v1, v2, v3 = 0, 1, m
while v3 != 0:
q = u3 // v3
- v1, v2, v3, u1, u2, u3 = (u1 - q * v1), (u2 - q * v2), (u3 - q *v3), v1, v2, v3
- return u1 % m
+ v1, v2, v3, u1, u2, u3 = (u1 - q * v1), (u2 - q * v2), (u3 - q * v3), v1, v2, v3
+ return u1 % m
diff --git a/ciphers/decrypt_caesar_with_chi_squared.py b/ciphers/decrypt_caesar_with_chi_squared.py
new file mode 100644
index 000000000000..41b4a12ba453
--- /dev/null
+++ b/ciphers/decrypt_caesar_with_chi_squared.py
@@ -0,0 +1,240 @@
+#!/usr/bin/env python3
+
+from typing import Tuple
+
+
+def decrypt_caesar_with_chi_squared(
+ ciphertext: str,
+ cipher_alphabet: str = None,
+ frequencies_dict: str = None,
+ case_sensetive: bool = False,
+) -> Tuple[int, float, str]:
+ """
+ Basic Usage
+ ===========
+ Arguments:
+ * ciphertext (str): the text to decode (encoded with the caesar cipher)
+
+ Optional Arguments:
+ * cipher_alphabet (list): the alphabet used for the cipher (each letter is
+ a string separated by commas)
+ * frequencies_dict (dict): a dictionary of word frequencies where keys are
+ the letters and values are a percentage representation of the frequency as
+ a decimal/float
+ * case_sensetive (bool): a boolean value: True if the case matters during
+ decryption, False if it doesn't
+
+ Returns:
+ * A tuple in the form of:
+ (
+ most_likely_cipher,
+ most_likely_cipher_chi_squared_value,
+ decoded_most_likely_cipher
+ )
+
+ where...
+ - most_likely_cipher is an integer representing the shift of the smallest
+ chi-squared statistic (most likely key)
+ - most_likely_cipher_chi_squared_value is a float representing the
+ chi-squared statistic of the most likely shift
+ - decoded_most_likely_cipher is a string with the decoded cipher
+ (decoded by the most_likely_cipher key)
+
+
+ The Chi-squared test
+ ====================
+
+ The caesar cipher
+ -----------------
+ The caesar cipher is a very insecure encryption algorithm, however it has
+ been used since Julius Caesar. The cipher is a simple substitution cipher
+ where each character in the plain text is replaced by a character in the
+ alphabet a certain number of characters after the original character. The
+ number of characters away is called the shift or key. For example:
+
+ Plain text: hello
+ Key: 1
+ Cipher text: ifmmp
+ (each letter in hello has been shifted one to the right in the eng. alphabet)
+
+ As you can imagine, this doesn't provide lots of security. In fact
+ decrypting ciphertext by brute-force is extremely easy even by hand. However
+ one way to do that is the chi-squared test.
+
+ The chi-squared test
+ -------------------
+ Each letter in the english alphabet has a frequency, or the amount of times
+ it shows up compared to other letters (usually expressed as a decimal
+ representing the percentage likelihood). The most common letter in the
+ english language is "e" with a frequency of 0.11162 or 11.162%. The test is
+ completed in the following fashion.
+
+ 1. The ciphertext is decoded in a brute force way (every combination of the
+ 26 possible combinations)
+ 2. For every combination, for each letter in the combination, the average
+ amount of times the letter should appear the message is calculated by
+ multiplying the total number of characters by the frequency of the letter
+
+ For example:
+ In a message of 100 characters, e should appear around 11.162 times.
+
+ 3. Then, to calculate the margin of error (the amount of times the letter
+ SHOULD appear with the amount of times the letter DOES appear), we use
+ the chi-squared test. The following formula is used:
+
+ Let:
+ - n be the number of times the letter actually appears
+ - p be the predicted value of the number of times the letter should
+ appear (see #2)
+ - let v be the chi-squared test result (referred to here as chi-squared
+ value/statistic)
+
+ (n - p)^2
+ --------- = v
+ p
+
+ 4. Each chi squared value for each letter is then added up to the total.
+ The total is the chi-squared statistic for that encryption key.
+ 5. The encryption key with the lowest chi-squared value is the most likely
+ to be the decoded answer.
+
+ Further Reading
+ ================
+
+ * http://practicalcryptography.com/cryptanalysis/text-characterisation/chi-squared-
+ statistic/
+ * https://en.wikipedia.org/wiki/Letter_frequency
+ * https://en.wikipedia.org/wiki/Chi-squared_test
+ * https://en.m.wikipedia.org/wiki/Caesar_cipher
+
+ Doctests
+ ========
+ >>> decrypt_caesar_with_chi_squared(
+ ... 'dof pz aol jhlzhy jpwoly zv wvwbshy? pa pz avv lhzf av jyhjr!'
+ ... ) # doctest: +NORMALIZE_WHITESPACE
+ (7, 3129.228005747531,
+ 'why is the caesar cipher so popular? it is too easy to crack!')
+
+ >>> decrypt_caesar_with_chi_squared('crybd cdbsxq')
+ (10, 233.35343938980898, 'short string')
+
+ >>> decrypt_caesar_with_chi_squared(12)
+ Traceback (most recent call last):
+ AttributeError: 'int' object has no attribute 'lower'
+ """
+ alphabet_letters = cipher_alphabet or [chr(i) for i in range(97, 123)]
+ frequencies_dict = frequencies_dict or {}
+
+ if frequencies_dict == {}:
+ # Frequencies of letters in the english language (how much they show up)
+ frequencies = {
+ "a": 0.08497,
+ "b": 0.01492,
+ "c": 0.02202,
+ "d": 0.04253,
+ "e": 0.11162,
+ "f": 0.02228,
+ "g": 0.02015,
+ "h": 0.06094,
+ "i": 0.07546,
+ "j": 0.00153,
+ "k": 0.01292,
+ "l": 0.04025,
+ "m": 0.02406,
+ "n": 0.06749,
+ "o": 0.07507,
+ "p": 0.01929,
+ "q": 0.00095,
+ "r": 0.07587,
+ "s": 0.06327,
+ "t": 0.09356,
+ "u": 0.02758,
+ "v": 0.00978,
+ "w": 0.02560,
+ "x": 0.00150,
+ "y": 0.01994,
+ "z": 0.00077,
+ }
+ else:
+ # Custom frequencies dictionary
+ frequencies = frequencies_dict
+
+ if not case_sensetive:
+ ciphertext = ciphertext.lower()
+
+ # Chi squared statistic values
+ chi_squared_statistic_values = {}
+
+ # cycle through all of the shifts
+ for shift in range(len(alphabet_letters)):
+ decrypted_with_shift = ""
+
+ # decrypt the message with the shift
+ for letter in ciphertext:
+ try:
+ # Try to index the letter in the alphabet
+ new_key = (alphabet_letters.index(letter) - shift) % len(
+ alphabet_letters
+ )
+ decrypted_with_shift += alphabet_letters[new_key]
+ except ValueError:
+ # Append the character if it isn't in the alphabet
+ decrypted_with_shift += letter
+
+ chi_squared_statistic = 0.0
+
+ # Loop through each letter in the decoded message with the shift
+ for letter in decrypted_with_shift:
+ if case_sensetive:
+ if letter in frequencies:
+ # Get the amount of times the letter occurs in the message
+ occurrences = decrypted_with_shift.count(letter)
+
+ # Get the excepcted amount of times the letter should appear based
+ # on letter frequencies
+ expected = frequencies[letter] * occurrences
+
+ # Complete the chi squared statistic formula
+ chi_letter_value = ((occurrences - expected) ** 2) / expected
+
+ # Add the margin of error to the total chi squared statistic
+ chi_squared_statistic += chi_letter_value
+ else:
+ if letter.lower() in frequencies:
+ # Get the amount of times the letter occurs in the message
+ occurrences = decrypted_with_shift.count(letter)
+
+ # Get the excepcted amount of times the letter should appear based
+ # on letter frequencies
+ expected = frequencies[letter] * occurrences
+
+ # Complete the chi squared statistic formula
+ chi_letter_value = ((occurrences - expected) ** 2) / expected
+
+ # Add the margin of error to the total chi squared statistic
+ chi_squared_statistic += chi_letter_value
+
+ # Add the data to the chi_squared_statistic_values dictionary
+ chi_squared_statistic_values[shift] = [
+ chi_squared_statistic,
+ decrypted_with_shift,
+ ]
+
+ # Get the most likely cipher by finding the cipher with the smallest chi squared
+ # statistic
+ most_likely_cipher = min(
+ chi_squared_statistic_values, key=chi_squared_statistic_values.get
+ )
+
+ # Get all the data from the most likely cipher (key, decoded message)
+ most_likely_cipher_chi_squared_value = chi_squared_statistic_values[
+ most_likely_cipher
+ ][0]
+ decoded_most_likely_cipher = chi_squared_statistic_values[most_likely_cipher][1]
+
+ # Return the data on the most likely shift
+ return (
+ most_likely_cipher,
+ most_likely_cipher_chi_squared_value,
+ decoded_most_likely_cipher,
+ )
diff --git a/ciphers/deterministic_miller_rabin.py b/ciphers/deterministic_miller_rabin.py
new file mode 100644
index 000000000000..d7fcb67e936c
--- /dev/null
+++ b/ciphers/deterministic_miller_rabin.py
@@ -0,0 +1,137 @@
+"""Created by Nathan Damon, @bizzfitch on github
+>>> test_miller_rabin()
+"""
+
+
+def miller_rabin(n: int, allow_probable: bool = False) -> bool:
+ """Deterministic Miller-Rabin algorithm for primes ~< 3.32e24.
+
+ Uses numerical analysis results to return whether or not the passed number
+ is prime. If the passed number is above the upper limit, and
+ allow_probable is True, then a return value of True indicates that n is
+ probably prime. This test does not allow False negatives- a return value
+ of False is ALWAYS composite.
+
+ Parameters
+ ----------
+ n : int
+ The integer to be tested. Since we usually care if a number is prime,
+ n < 2 returns False instead of raising a ValueError.
+ allow_probable: bool, default False
+ Whether or not to test n above the upper bound of the deterministic test.
+
+ Raises
+ ------
+ ValueError
+
+ Reference
+ ---------
+ https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test
+ """
+ if n == 2:
+ return True
+ if not n % 2 or n < 2:
+ return False
+ if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
+ return False
+ if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
+ raise ValueError(
+ "Warning: upper bound of deterministic test is exceeded. "
+ "Pass allow_probable=True to allow probabilistic test. "
+ "A return value of True indicates a probable prime."
+ )
+ # array bounds provided by analysis
+ bounds = [
+ 2_047,
+ 1_373_653,
+ 25_326_001,
+ 3_215_031_751,
+ 2_152_302_898_747,
+ 3_474_749_660_383,
+ 341_550_071_728_321,
+ 1,
+ 3_825_123_056_546_413_051,
+ 1,
+ 1,
+ 318_665_857_834_031_151_167_461,
+ 3_317_044_064_679_887_385_961_981,
+ ]
+
+ primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
+ for idx, _p in enumerate(bounds, 1):
+ if n < _p:
+ # then we have our last prime to check
+ plist = primes[:idx]
+ break
+ d, s = n - 1, 0
+ # break up n -1 into a power of 2 (s) and
+ # remaining odd component
+ # essentially, solve for d * 2 ** s == n - 1
+ while d % 2 == 0:
+ d //= 2
+ s += 1
+ for prime in plist:
+ pr = False
+ for r in range(s):
+ m = pow(prime, d * 2 ** r, n)
+ # see article for analysis explanation for m
+ if (r == 0 and m == 1) or ((m + 1) % n == 0):
+ pr = True
+ # this loop will not determine compositeness
+ break
+ if pr:
+ continue
+ # if pr is False, then the above loop never evaluated to true,
+ # and the n MUST be composite
+ return False
+ return True
+
+
+def test_miller_rabin() -> None:
+ """Testing a nontrivial (ends in 1, 3, 7, 9) composite
+ and a prime in each range.
+ """
+ assert not miller_rabin(561)
+ assert miller_rabin(563)
+ # 2047
+
+ assert not miller_rabin(838_201)
+ assert miller_rabin(838_207)
+ # 1_373_653
+
+ assert not miller_rabin(17_316_001)
+ assert miller_rabin(17_316_017)
+ # 25_326_001
+
+ assert not miller_rabin(3_078_386_641)
+ assert miller_rabin(3_078_386_653)
+ # 3_215_031_751
+
+ assert not miller_rabin(1_713_045_574_801)
+ assert miller_rabin(1_713_045_574_819)
+ # 2_152_302_898_747
+
+ assert not miller_rabin(2_779_799_728_307)
+ assert miller_rabin(2_779_799_728_327)
+ # 3_474_749_660_383
+
+ assert not miller_rabin(113_850_023_909_441)
+ assert miller_rabin(113_850_023_909_527)
+ # 341_550_071_728_321
+
+ assert not miller_rabin(1_275_041_018_848_804_351)
+ assert miller_rabin(1_275_041_018_848_804_391)
+ # 3_825_123_056_546_413_051
+
+ assert not miller_rabin(79_666_464_458_507_787_791_867)
+ assert miller_rabin(79_666_464_458_507_787_791_951)
+ # 318_665_857_834_031_151_167_461
+
+ assert not miller_rabin(552_840_677_446_647_897_660_333)
+ assert miller_rabin(552_840_677_446_647_897_660_359)
+ # 3_317_044_064_679_887_385_961_981
+ # upper limit for probabilistic test
+
+
+if __name__ == "__main__":
+ test_miller_rabin()
diff --git a/ciphers/diffie.py b/ciphers/diffie.py
new file mode 100644
index 000000000000..44b12bf9d103
--- /dev/null
+++ b/ciphers/diffie.py
@@ -0,0 +1,25 @@
+def find_primitive(n: int) -> int:
+ for r in range(1, n):
+ li = []
+ for x in range(n - 1):
+ val = pow(r, x, n)
+ if val in li:
+ break
+ li.append(val)
+ else:
+ return r
+
+
+if __name__ == "__main__":
+ q = int(input("Enter a prime number q: "))
+ a = find_primitive(q)
+ a_private = int(input("Enter private key of A: "))
+ a_public = pow(a, a_private, q)
+ b_private = int(input("Enter private key of B: "))
+ b_public = pow(a, b_private, q)
+
+ a_secret = pow(b_public, a_private, q)
+ b_secret = pow(a_public, b_private, q)
+
+ print("The key value generated by A is: ", a_secret)
+ print("The key value generated by B is: ", b_secret)
diff --git a/ciphers/elgamal_key_generator.py b/ciphers/elgamal_key_generator.py
index 6a8751f69524..52cf69074187 100644
--- a/ciphers/elgamal_key_generator.py
+++ b/ciphers/elgamal_key_generator.py
@@ -1,25 +1,28 @@
import os
import random
import sys
-import rabin_miller as rabinMiller, cryptomath_module as cryptoMath
+
+from . import cryptomath_module as cryptoMath
+from . import rabin_miller as rabinMiller
min_primitive_root = 3
def main():
- print('Making key files...')
- makeKeyFiles('elgamal', 2048)
- print('Key files generation successful')
+ print("Making key files...")
+ makeKeyFiles("elgamal", 2048)
+ print("Key files generation successful")
# I have written my code naively same as definition of primitive root
# however every time I run this program, memory exceeded...
-# so I used 4.80 Algorithm in Handbook of Applied Cryptography(CRC Press, ISBN : 0-8493-8523-7, October 1996)
+# so I used 4.80 Algorithm in
+# Handbook of Applied Cryptography(CRC Press, ISBN : 0-8493-8523-7, October 1996)
# and it seems to run nicely!
-def primitiveRoot(p_val):
+def primitiveRoot(p_val: int) -> int:
print("Generating primitive root of p")
while True:
- g = random.randrange(3,p_val)
+ g = random.randrange(3, p_val)
if pow(g, 2, p_val) == 1:
continue
if pow(g, p_val, p_val) == 1:
@@ -27,8 +30,8 @@ def primitiveRoot(p_val):
return g
-def generateKey(keySize):
- print('Generating prime p...')
+def generateKey(keySize: int) -> ((int, int, int, int), (int, int)):
+ print("Generating prime p...")
p = rabinMiller.generateLargePrime(keySize) # select large prime number.
e_1 = primitiveRoot(p) # one primitive root on modulo p.
d = random.randrange(3, p) # private_key -> have to be greater than 2 for safety.
@@ -40,24 +43,29 @@ def generateKey(keySize):
return publicKey, privateKey
-def makeKeyFiles(name, keySize):
- if os.path.exists('%s_pubkey.txt' % name) or os.path.exists('%s_privkey.txt' % name):
- print('\nWARNING:')
- print('"%s_pubkey.txt" or "%s_privkey.txt" already exists. \n'
- 'Use a different name or delete these files and re-run this program.' %
- (name, name))
+def makeKeyFiles(name: str, keySize: int):
+ if os.path.exists("%s_pubkey.txt" % name) or os.path.exists(
+ "%s_privkey.txt" % name
+ ):
+ print("\nWARNING:")
+ print(
+ '"%s_pubkey.txt" or "%s_privkey.txt" already exists. \n'
+ "Use a different name or delete these files and re-run this program."
+ % (name, name)
+ )
sys.exit()
publicKey, privateKey = generateKey(keySize)
- print('\nWriting public key to file %s_pubkey.txt...' % name)
- with open('%s_pubkey.txt' % name, 'w') as fo:
- fo.write('%d,%d,%d,%d' % (publicKey[0], publicKey[1], publicKey[2], publicKey[3]))
+ print("\nWriting public key to file %s_pubkey.txt..." % name)
+ with open("%s_pubkey.txt" % name, "w") as fo:
+ fo.write(
+ "%d,%d,%d,%d" % (publicKey[0], publicKey[1], publicKey[2], publicKey[3])
+ )
- print('Writing private key to file %s_privkey.txt...' % name)
- with open('%s_privkey.txt' % name, 'w') as fo:
- fo.write('%d,%d' % (privateKey[0], privateKey[1]))
+ print("Writing private key to file %s_privkey.txt..." % name)
+ with open("%s_privkey.txt" % name, "w") as fo:
+ fo.write("%d,%d" % (privateKey[0], privateKey[1]))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
-
\ No newline at end of file
diff --git a/ciphers/enigma_machine2.py b/ciphers/enigma_machine2.py
new file mode 100644
index 000000000000..4344db0056fd
--- /dev/null
+++ b/ciphers/enigma_machine2.py
@@ -0,0 +1,287 @@
+"""
+Wikipedia: https://en.wikipedia.org/wiki/Enigma_machine
+Video explanation: https://youtu.be/QwQVMqfoB2E
+Also check out Numberphile's and Computerphile's videos on this topic
+
+This module contains function 'enigma' which emulates
+the famous Enigma machine from WWII.
+Module includes:
+- enigma function
+- showcase of function usage
+- 9 randnomly generated rotors
+- reflector (aka static rotor)
+- original alphabet
+
+Created by TrapinchO
+"""
+
+# used alphabet --------------------------
+# from string.ascii_uppercase
+abc = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+
+# -------------------------- default selection --------------------------
+# rotors --------------------------
+rotor1 = "EGZWVONAHDCLFQMSIPJBYUKXTR"
+rotor2 = "FOBHMDKEXQNRAULPGSJVTYICZW"
+rotor3 = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
+# reflector --------------------------
+reflector = {
+ "A": "N",
+ "N": "A",
+ "B": "O",
+ "O": "B",
+ "C": "P",
+ "P": "C",
+ "D": "Q",
+ "Q": "D",
+ "E": "R",
+ "R": "E",
+ "F": "S",
+ "S": "F",
+ "G": "T",
+ "T": "G",
+ "H": "U",
+ "U": "H",
+ "I": "V",
+ "V": "I",
+ "J": "W",
+ "W": "J",
+ "K": "X",
+ "X": "K",
+ "L": "Y",
+ "Y": "L",
+ "M": "Z",
+ "Z": "M",
+}
+
+# -------------------------- extra rotors --------------------------
+rotor4 = "RMDJXFUWGISLHVTCQNKYPBEZOA"
+rotor5 = "SGLCPQWZHKXAREONTFBVIYJUDM"
+rotor6 = "HVSICLTYKQUBXDWAJZOMFGPREN"
+rotor7 = "RZWQHFMVDBKICJLNTUXAGYPSOE"
+rotor8 = "LFKIJODBEGAMQPXVUHYSTCZRWN"
+rotor9 = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
+
+
+def _validator(rotpos: tuple, rotsel: tuple, pb: str) -> tuple:
+ """
+ Checks if the values can be used for the 'enigma' function
+
+ >>> _validator((1,1,1), (rotor1, rotor2, rotor3), 'POLAND')
+ ((1, 1, 1), ('EGZWVONAHDCLFQMSIPJBYUKXTR', 'FOBHMDKEXQNRAULPGSJVTYICZW', \
+'ZJXESIUQLHAVRMDOYGTNFWPBKC'), \
+{'P': 'O', 'O': 'P', 'L': 'A', 'A': 'L', 'N': 'D', 'D': 'N'})
+
+ :param rotpos: rotor_positon
+ :param rotsel: rotor_selection
+ :param pb: plugb -> validated and transformed
+ :return: (rotpos, rotsel, pb)
+ """
+ # Checks if there are 3 unique rotors
+
+ unique_rotsel = len(set(rotsel))
+ if unique_rotsel < 3:
+ raise Exception(f"Please use 3 unique rotors (not {unique_rotsel})")
+
+ # Checks if rotor positions are valid
+ rotorpos1, rotorpos2, rotorpos3 = rotpos
+ if not 0 < rotorpos1 <= len(abc):
+ raise ValueError(
+ f"First rotor position is not within range of 1..26 (" f"{rotorpos1}"
+ )
+ if not 0 < rotorpos2 <= len(abc):
+ raise ValueError(
+ f"Second rotor position is not within range of 1..26 (" f"{rotorpos2})"
+ )
+ if not 0 < rotorpos3 <= len(abc):
+ raise ValueError(
+ f"Third rotor position is not within range of 1..26 (" f"{rotorpos3})"
+ )
+
+ # Validates string and returns dict
+ pb = _plugboard(pb)
+
+ return rotpos, rotsel, pb
+
+
+def _plugboard(pbstring: str) -> dict:
+ """
+ https://en.wikipedia.org/wiki/Enigma_machine#Plugboard
+
+ >>> _plugboard('PICTURES')
+ {'P': 'I', 'I': 'P', 'C': 'T', 'T': 'C', 'U': 'R', 'R': 'U', 'E': 'S', 'S': 'E'}
+ >>> _plugboard('POLAND')
+ {'P': 'O', 'O': 'P', 'L': 'A', 'A': 'L', 'N': 'D', 'D': 'N'}
+
+ In the code, 'pb' stands for 'plugboard'
+
+ Pairs can be separated by spaces
+ :param pbstring: string containing plugboard setting for the Enigma machine
+ :return: dictionary containing converted pairs
+ """
+
+ # tests the input string if it
+ # a) is type string
+ # b) has even length (so pairs can be made)
+ if not isinstance(pbstring, str):
+ raise TypeError(f"Plugboard setting isn't type string ({type(pbstring)})")
+ elif len(pbstring) % 2 != 0:
+ raise Exception(f"Odd number of symbols ({len(pbstring)})")
+ elif pbstring == "":
+ return {}
+
+ pbstring.replace(" ", "")
+
+ # Checks if all characters are unique
+ tmppbl = set()
+ for i in pbstring:
+ if i not in abc:
+ raise Exception(f"'{i}' not in list of symbols")
+ elif i in tmppbl:
+ raise Exception(f"Duplicate symbol ({i})")
+ else:
+ tmppbl.add(i)
+ del tmppbl
+
+ # Created the dictionary
+ pb = {}
+ for i in range(0, len(pbstring) - 1, 2):
+ pb[pbstring[i]] = pbstring[i + 1]
+ pb[pbstring[i + 1]] = pbstring[i]
+
+ return pb
+
+
+def enigma(
+ text: str,
+ rotor_position: tuple,
+ rotor_selection: tuple = (rotor1, rotor2, rotor3),
+ plugb: str = "",
+) -> str:
+ """
+ The only difference with real-world enigma is that I allowed string input.
+ All characters are converted to uppercase. (non-letter symbol are ignored)
+ How it works:
+ (for every letter in the message)
+
+ - Input letter goes into the plugboard.
+ If it is connected to another one, switch it.
+
+ - Letter goes through 3 rotors.
+ Each rotor can be represented as 2 sets of symbol, where one is shuffled.
+ Each symbol from the first set has corresponding symbol in
+ the second set and vice versa.
+
+ example:
+ | ABCDEFGHIJKLMNOPQRSTUVWXYZ | e.g. F=D and D=F
+ | VKLEPDBGRNWTFCJOHQAMUZYIXS |
+
+ - Symbol then goes through reflector (static rotor).
+ There it is switched with paired symbol
+ The reflector can be represented as2 sets, each with half of the alphanet.
+ There are usually 10 pairs of letters.
+
+ Example:
+ | ABCDEFGHIJKLM | e.g. E is paired to X
+ | ZYXWVUTSRQPON | so when E goes in X goes out and vice versa
+
+ - Letter then goes through the rotors again
+
+ - If the letter is connected to plugboard, it is switched.
+
+ - Return the letter
+
+ >>> enigma('Hello World!', (1, 2, 1), plugb='pictures')
+ 'KORYH JUHHI!'
+ >>> enigma('KORYH, juhhi!', (1, 2, 1), plugb='pictures')
+ 'HELLO, WORLD!'
+ >>> enigma('hello world!', (1, 1, 1), plugb='pictures')
+ 'FPNCZ QWOBU!'
+ >>> enigma('FPNCZ QWOBU', (1, 1, 1), plugb='pictures')
+ 'HELLO WORLD'
+
+
+ :param text: input message
+ :param rotor_position: tuple with 3 values in range 1..26
+ :param rotor_selection: tuple with 3 rotors ()
+ :param plugb: string containing plugboard configuration (default '')
+ :return: en/decrypted string
+ """
+
+ text = text.upper()
+ rotor_position, rotor_selection, plugboard = _validator(
+ rotor_position, rotor_selection, plugb.upper()
+ )
+
+ rotorpos1, rotorpos2, rotorpos3 = rotor_position
+ rotor1, rotor2, rotor3 = rotor_selection
+ rotorpos1 -= 1
+ rotorpos2 -= 1
+ rotorpos3 -= 1
+
+ result = []
+
+ # encryption/decryption process --------------------------
+ for symbol in text:
+ if symbol in abc:
+
+ # 1st plugboard --------------------------
+ if symbol in plugboard:
+ symbol = plugboard[symbol]
+
+ # rotor ra --------------------------
+ index = abc.index(symbol) + rotorpos1
+ symbol = rotor1[index % len(abc)]
+
+ # rotor rb --------------------------
+ index = abc.index(symbol) + rotorpos2
+ symbol = rotor2[index % len(abc)]
+
+ # rotor rc --------------------------
+ index = abc.index(symbol) + rotorpos3
+ symbol = rotor3[index % len(abc)]
+
+ # reflector --------------------------
+ # this is the reason you don't need another machine to decipher
+
+ symbol = reflector[symbol]
+
+ # 2nd rotors
+ symbol = abc[rotor3.index(symbol) - rotorpos3]
+ symbol = abc[rotor2.index(symbol) - rotorpos2]
+ symbol = abc[rotor1.index(symbol) - rotorpos1]
+
+ # 2nd plugboard
+ if symbol in plugboard:
+ symbol = plugboard[symbol]
+
+ # moves/resets rotor positions
+ rotorpos1 += 1
+ if rotorpos1 >= len(abc):
+ rotorpos1 = 0
+ rotorpos2 += 1
+ if rotorpos2 >= len(abc):
+ rotorpos2 = 0
+ rotorpos3 += 1
+ if rotorpos3 >= len(abc):
+ rotorpos3 = 0
+
+ # else:
+ # pass
+ # Error could be also raised
+ # raise ValueError(
+ # 'Invalid symbol('+repr(symbol)+')')
+ result.append(symbol)
+
+ return "".join(result)
+
+
+if __name__ == "__main__":
+ message = "This is my Python script that emulates the Enigma machine from WWII."
+ rotor_pos = (1, 1, 1)
+ pb = "pictures"
+ rotor_sel = (rotor2, rotor4, rotor8)
+ en = enigma(message, rotor_pos, rotor_sel, pb)
+
+ print("Encrypted message:", en)
+ print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
diff --git a/ciphers/hill_cipher.py b/ciphers/hill_cipher.py
index 89b88beed17e..8237abf6aa5d 100644
--- a/ciphers/hill_cipher.py
+++ b/ciphers/hill_cipher.py
@@ -1,17 +1,16 @@
"""
Hill Cipher:
-The below defined class 'HillCipher' implements the Hill Cipher algorithm.
-The Hill Cipher is an algorithm that implements modern linear algebra techniques
-In this algortihm, you have an encryption key matrix. This is what will be used
-in encoding and decoding your text.
+The 'HillCipher' class below implements the Hill Cipher algorithm which uses
+modern linear algebra techniques to encode and decode text using an encryption
+key matrix.
-Algortihm:
+Algorithm:
Let the order of the encryption key be N (as it is a square matrix).
Your text is divided into batches of length N and converted to numerical vectors
by a simple mapping starting with A=0 and so on.
-The key is then mulitplied with the newly created batch vector to obtain the
+The key is then multiplied with the newly created batch vector to obtain the
encoded vector. After each multiplication modular 36 calculations are performed
on the vectors so as to bring the numbers between 0 and 36 and then mapped with
their corresponding alphanumerics.
@@ -24,12 +23,11 @@
The determinant of the encryption key matrix must be relatively prime w.r.t 36.
Note:
-The algorithm implemented in this code considers only alphanumerics in the text.
-If the length of the text to be encrypted is not a multiple of the
-break key(the length of one batch of letters),the last character of the text
-is added to the text until the length of the text reaches a multiple of
-the break_key. So the text after decrypting might be a little different than
-the original text.
+This implementation only considers alphanumerics in the text. If the length of
+the text to be encrypted is not a multiple of the break key(the length of one
+batch of letters), the last character of the text is added to the text until the
+length of the text reaches a multiple of the break_key. So the text after
+decrypting might be a little different than the original text.
References:
https://apprendre-en-ligne.net/crypto/hill/Hillciph.pdf
@@ -37,75 +35,131 @@
https://www.youtube.com/watch?v=4RhLNDqcjpA
"""
+import string
import numpy
-def gcd(a, b):
- if a == 0:
- return b
- return gcd(b%a, a)
+def greatest_common_divisor(a: int, b: int) -> int:
+ """
+ >>> greatest_common_divisor(4, 8)
+ 4
+ >>> greatest_common_divisor(8, 4)
+ 4
+ >>> greatest_common_divisor(4, 7)
+ 1
+ >>> greatest_common_divisor(0, 10)
+ 10
+ """
+ return b if a == 0 else greatest_common_divisor(b % a, a)
class HillCipher:
- key_string = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
+ key_string = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
- replaceLetters = lambda self, letter: self.key_string.index(letter)
- replaceNumbers = lambda self, num: self.key_string[round(num)]
-
# take x and return x % len(key_string)
modulus = numpy.vectorize(lambda x: x % 36)
- toInt = numpy.vectorize(lambda x: round(x))
-
- def __init__(self, encrypt_key):
+ to_int = numpy.vectorize(lambda x: round(x))
+
+ def __init__(self, encrypt_key: int):
"""
- encrypt_key is an NxN numpy matrix
+ encrypt_key is an NxN numpy array
"""
- self.encrypt_key = self.modulus(encrypt_key) # mod36 calc's on the encrypt key
- self.checkDeterminant() # validate the determinant of the encryption key
+ self.encrypt_key = self.modulus(encrypt_key) # mod36 calc's on the encrypt key
+ self.check_determinant() # validate the determinant of the encryption key
self.decrypt_key = None
self.break_key = encrypt_key.shape[0]
- def checkDeterminant(self):
+ def replace_letters(self, letter: str) -> int:
+ """
+ >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
+ >>> hill_cipher.replace_letters('T')
+ 19
+ >>> hill_cipher.replace_letters('0')
+ 26
+ """
+ return self.key_string.index(letter)
+
+ def replace_digits(self, num: int) -> str:
+ """
+ >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
+ >>> hill_cipher.replace_digits(19)
+ 'T'
+ >>> hill_cipher.replace_digits(26)
+ '0'
+ """
+ return self.key_string[round(num)]
+
+ def check_determinant(self) -> None:
+ """
+ >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
+ >>> hill_cipher.check_determinant()
+ """
det = round(numpy.linalg.det(self.encrypt_key))
-
+
if det < 0:
det = det % len(self.key_string)
req_l = len(self.key_string)
- if gcd(det, len(self.key_string)) != 1:
- raise ValueError("discriminant modular {0} of encryption key({1}) is not co prime w.r.t {2}.\nTry another key.".format(req_l, det, req_l))
+ if greatest_common_divisor(det, len(self.key_string)) != 1:
+ raise ValueError(
+ f"determinant modular {req_l} of encryption key({det}) is not co prime "
+ f"w.r.t {req_l}.\nTry another key."
+ )
- def processText(self, text):
- text = list(text.upper())
- text = [char for char in text if char in self.key_string]
+ def process_text(self, text: str) -> str:
+ """
+ >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
+ >>> hill_cipher.process_text('Testing Hill Cipher')
+ 'TESTINGHILLCIPHERR'
+ >>> hill_cipher.process_text('hello')
+ 'HELLOO'
+ """
+ chars = [char for char in text.upper() if char in self.key_string]
- last = text[-1]
- while len(text) % self.break_key != 0:
- text.append(last)
+ last = chars[-1]
+ while len(chars) % self.break_key != 0:
+ chars.append(last)
- return ''.join(text)
-
- def encrypt(self, text):
- text = self.processText(text.upper())
- encrypted = ''
+ return "".join(chars)
+
+ def encrypt(self, text: str) -> str:
+ """
+ >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
+ >>> hill_cipher.encrypt('testing hill cipher')
+ 'WHXYJOLM9C6XT085LL'
+ >>> hill_cipher.encrypt('hello')
+ '85FF00'
+ """
+ text = self.process_text(text.upper())
+ encrypted = ""
for i in range(0, len(text) - self.break_key + 1, self.break_key):
- batch = text[i:i+self.break_key]
- batch_vec = list(map(self.replaceLetters, batch))
- batch_vec = numpy.matrix([batch_vec]).T
- batch_encrypted = self.modulus(self.encrypt_key.dot(batch_vec)).T.tolist()[0]
- encrypted_batch = ''.join(list(map(self.replaceNumbers, batch_encrypted)))
+ batch = text[i : i + self.break_key]
+ batch_vec = [self.replace_letters(char) for char in batch]
+ batch_vec = numpy.array([batch_vec]).T
+ batch_encrypted = self.modulus(self.encrypt_key.dot(batch_vec)).T.tolist()[
+ 0
+ ]
+ encrypted_batch = "".join(
+ self.replace_digits(num) for num in batch_encrypted
+ )
encrypted += encrypted_batch
return encrypted
- def makeDecryptKey(self):
+ def make_decrypt_key(self):
+ """
+ >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
+ >>> hill_cipher.make_decrypt_key()
+ array([[ 6, 25],
+ [ 5, 26]])
+ """
det = round(numpy.linalg.det(self.encrypt_key))
-
+
if det < 0:
det = det % len(self.key_string)
det_inv = None
@@ -114,22 +168,36 @@ def makeDecryptKey(self):
det_inv = i
break
- inv_key = det_inv * numpy.linalg.det(self.encrypt_key) *\
- numpy.linalg.inv(self.encrypt_key)
+ inv_key = (
+ det_inv
+ * numpy.linalg.det(self.encrypt_key)
+ * numpy.linalg.inv(self.encrypt_key)
+ )
- return self.toInt(self.modulus(inv_key))
-
- def decrypt(self, text):
- self.decrypt_key = self.makeDecryptKey()
- text = self.processText(text.upper())
- decrypted = ''
+ return self.to_int(self.modulus(inv_key))
+
+ def decrypt(self, text: str) -> str:
+ """
+ >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
+ >>> hill_cipher.decrypt('WHXYJOLM9C6XT085LL')
+ 'TESTINGHILLCIPHERR'
+ >>> hill_cipher.decrypt('85FF00')
+ 'HELLOO'
+ """
+ self.decrypt_key = self.make_decrypt_key()
+ text = self.process_text(text.upper())
+ decrypted = ""
for i in range(0, len(text) - self.break_key + 1, self.break_key):
- batch = text[i:i+self.break_key]
- batch_vec = list(map(self.replaceLetters, batch))
- batch_vec = numpy.matrix([batch_vec]).T
- batch_decrypted = self.modulus(self.decrypt_key.dot(batch_vec)).T.tolist()[0]
- decrypted_batch = ''.join(list(map(self.replaceNumbers, batch_decrypted)))
+ batch = text[i : i + self.break_key]
+ batch_vec = [self.replace_letters(char) for char in batch]
+ batch_vec = numpy.array([batch_vec]).T
+ batch_decrypted = self.modulus(self.decrypt_key.dot(batch_vec)).T.tolist()[
+ 0
+ ]
+ decrypted_batch = "".join(
+ self.replace_digits(num) for num in batch_decrypted
+ )
decrypted += decrypted_batch
return decrypted
@@ -141,27 +209,26 @@ def main():
print("Enter each row of the encryption key with space separated integers")
for i in range(N):
- row = list(map(int, input().split()))
+ row = [int(x) for x in input().split()]
hill_matrix.append(row)
- hc = HillCipher(numpy.matrix(hill_matrix))
+ hc = HillCipher(numpy.array(hill_matrix))
print("Would you like to encrypt or decrypt some text? (1 or 2)")
- option = input("""
-1. Encrypt
-2. Decrypt
-"""
- )
-
- if option == '1':
+ option = input("\n1. Encrypt\n2. Decrypt\n")
+ if option == "1":
text_e = input("What text would you like to encrypt?: ")
print("Your encrypted text is:")
print(hc.encrypt(text_e))
- elif option == '2':
+ elif option == "2":
text_d = input("What text would you like to decrypt?: ")
print("Your decrypted text is:")
print(hc.decrypt(text_d))
-
+
if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+
main()
diff --git a/ciphers/mixed_keyword_cypher.py b/ciphers/mixed_keyword_cypher.py
new file mode 100644
index 000000000000..59298d310ce0
--- /dev/null
+++ b/ciphers/mixed_keyword_cypher.py
@@ -0,0 +1,68 @@
+def mixed_keyword(key: str = "college", pt: str = "UNIVERSITY") -> str:
+ """
+
+ For key:hello
+
+ H E L O
+ A B C D
+ F G I J
+ K M N P
+ Q R S T
+ U V W X
+ Y Z
+ and map vertically
+
+ >>> mixed_keyword("college", "UNIVERSITY") # doctest: +NORMALIZE_WHITESPACE
+ {'A': 'C', 'B': 'A', 'C': 'I', 'D': 'P', 'E': 'U', 'F': 'Z', 'G': 'O', 'H': 'B',
+ 'I': 'J', 'J': 'Q', 'K': 'V', 'L': 'L', 'M': 'D', 'N': 'K', 'O': 'R', 'P': 'W',
+ 'Q': 'E', 'R': 'F', 'S': 'M', 'T': 'S', 'U': 'X', 'V': 'G', 'W': 'H', 'X': 'N',
+ 'Y': 'T', 'Z': 'Y'}
+ 'XKJGUFMJST'
+ """
+ key = key.upper()
+ pt = pt.upper()
+ temp = []
+ for i in key:
+ if i not in temp:
+ temp.append(i)
+ len_temp = len(temp)
+ # print(temp)
+ alpha = []
+ modalpha = []
+ for i in range(65, 91):
+ t = chr(i)
+ alpha.append(t)
+ if t not in temp:
+ temp.append(t)
+ # print(temp)
+ r = int(26 / 4)
+ # print(r)
+ k = 0
+ for i in range(r):
+ t = []
+ for j in range(len_temp):
+ t.append(temp[k])
+ if not (k < 25):
+ break
+ k += 1
+ modalpha.append(t)
+ # print(modalpha)
+ d = {}
+ j = 0
+ k = 0
+ for j in range(len_temp):
+ for i in modalpha:
+ if not (len(i) - 1 >= j):
+ break
+ d[alpha[k]] = i[j]
+ if not k < 25:
+ break
+ k += 1
+ print(d)
+ cypher = ""
+ for i in pt:
+ cypher += d[i]
+ return cypher
+
+
+print(mixed_keyword("college", "UNIVERSITY"))
diff --git a/ciphers/mono_alphabetic_ciphers.py b/ciphers/mono_alphabetic_ciphers.py
new file mode 100644
index 000000000000..0a29d6442896
--- /dev/null
+++ b/ciphers/mono_alphabetic_ciphers.py
@@ -0,0 +1,59 @@
+LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+
+
+def translate_message(key, message, mode):
+ """
+ >>> translate_message("QWERTYUIOPASDFGHJKLZXCVBNM","Hello World","encrypt")
+ 'Pcssi Bidsm'
+ """
+ chars_a = LETTERS if mode == "decrypt" else key
+ chars_b = key if mode == "decrypt" else LETTERS
+ translated = ""
+ # loop through each symbol in the message
+ for symbol in message:
+ if symbol.upper() in chars_a:
+ # encrypt/decrypt the symbol
+ sym_index = chars_a.find(symbol.upper())
+ if symbol.isupper():
+ translated += chars_b[sym_index].upper()
+ else:
+ translated += chars_b[sym_index].lower()
+ else:
+ # symbol is not in LETTERS, just add it
+ translated += symbol
+ return translated
+
+
+def encrypt_message(key: str, message: str) -> str:
+ """
+ >>> encrypt_message("QWERTYUIOPASDFGHJKLZXCVBNM", "Hello World")
+ 'Pcssi Bidsm'
+ """
+ return translate_message(key, message, "encrypt")
+
+
+def decrypt_message(key: str, message: str) -> str:
+ """
+ >>> decrypt_message("QWERTYUIOPASDFGHJKLZXCVBNM", "Hello World")
+ 'Itssg Vgksr'
+ """
+ return translate_message(key, message, "decrypt")
+
+
+def main():
+ message = "Hello World"
+ key = "QWERTYUIOPASDFGHJKLZXCVBNM"
+ mode = "decrypt" # set to 'encrypt' or 'decrypt'
+
+ if mode == "encrypt":
+ translated = encrypt_message(key, message)
+ elif mode == "decrypt":
+ translated = decrypt_message(key, message)
+ print(f"Using the key {key}, the {mode}ed message is: {translated}")
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ main()
diff --git a/ciphers/morse_code_implementation.py b/ciphers/morse_code_implementation.py
new file mode 100644
index 000000000000..1cce2ef8b386
--- /dev/null
+++ b/ciphers/morse_code_implementation.py
@@ -0,0 +1,97 @@
+# Python program to implement Morse Code Translator
+
+# Dictionary representing the morse code chart
+MORSE_CODE_DICT = {
+ "A": ".-",
+ "B": "-...",
+ "C": "-.-.",
+ "D": "-..",
+ "E": ".",
+ "F": "..-.",
+ "G": "--.",
+ "H": "....",
+ "I": "..",
+ "J": ".---",
+ "K": "-.-",
+ "L": ".-..",
+ "M": "--",
+ "N": "-.",
+ "O": "---",
+ "P": ".--.",
+ "Q": "--.-",
+ "R": ".-.",
+ "S": "...",
+ "T": "-",
+ "U": "..-",
+ "V": "...-",
+ "W": ".--",
+ "X": "-..-",
+ "Y": "-.--",
+ "Z": "--..",
+ "1": ".----",
+ "2": "..---",
+ "3": "...--",
+ "4": "....-",
+ "5": ".....",
+ "6": "-....",
+ "7": "--...",
+ "8": "---..",
+ "9": "----.",
+ "0": "-----",
+ "&": ".-...",
+ "@": ".--.-.",
+ ":": "---...",
+ ",": "--..--",
+ ".": ".-.-.-",
+ "'": ".----.",
+ '"': ".-..-.",
+ "?": "..--..",
+ "/": "-..-.",
+ "=": "-...-",
+ "+": ".-.-.",
+ "-": "-....-",
+ "(": "-.--.",
+ ")": "-.--.-",
+ # Exclamation mark is not in ITU-R recommendation
+ "!": "-.-.--",
+}
+
+
+def encrypt(message: str) -> str:
+ cipher = ""
+ for letter in message:
+ if letter != " ":
+ cipher += MORSE_CODE_DICT[letter] + " "
+ else:
+ cipher += "/ "
+
+ # Remove trailing space added on line 64
+ return cipher[:-1]
+
+
+def decrypt(message: str) -> str:
+ decipher = ""
+ letters = message.split(" ")
+ for letter in letters:
+ if letter != "/":
+ decipher += list(MORSE_CODE_DICT.keys())[
+ list(MORSE_CODE_DICT.values()).index(letter)
+ ]
+ else:
+ decipher += " "
+
+ return decipher
+
+
+def main():
+ message = "Morse code here"
+ result = encrypt(message.upper())
+ print(result)
+
+ message = result
+ result = decrypt(message)
+ print(result)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ciphers/onepad_cipher.py b/ciphers/onepad_cipher.py
index 6afbd45249ec..a91f2b4d31c5 100644
--- a/ciphers/onepad_cipher.py
+++ b/ciphers/onepad_cipher.py
@@ -1,32 +1,30 @@
-from __future__ import print_function
-
import random
class Onepad:
- def encrypt(self, text):
- '''Function to encrypt text using psedo-random numbers'''
+ def encrypt(self, text: str) -> ([str], [int]):
+ """Function to encrypt text using pseudo-random numbers"""
plain = [ord(i) for i in text]
key = []
cipher = []
for i in plain:
k = random.randint(1, 300)
- c = (i+k)*k
+ c = (i + k) * k
cipher.append(c)
key.append(k)
return cipher, key
-
- def decrypt(self, cipher, key):
- '''Function to decrypt text using psedo-random numbers.'''
+
+ def decrypt(self, cipher: [str], key: [int]) -> str:
+ """Function to decrypt text using pseudo-random numbers."""
plain = []
for i in range(len(key)):
- p = int((cipher[i]-(key[i])**2)/key[i])
+ p = int((cipher[i] - (key[i]) ** 2) / key[i])
plain.append(chr(p))
- plain = ''.join([i for i in plain])
+ plain = "".join([i for i in plain])
return plain
-if __name__ == '__main__':
- c, k = Onepad().encrypt('Hello')
+if __name__ == "__main__":
+ c, k = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
diff --git a/ciphers/playfair_cipher.py b/ciphers/playfair_cipher.py
index 20449b161963..219437448e53 100644
--- a/ciphers/playfair_cipher.py
+++ b/ciphers/playfair_cipher.py
@@ -1,47 +1,48 @@
-import string
import itertools
+import string
+
def chunker(seq, size):
it = iter(seq)
while True:
- chunk = tuple(itertools.islice(it, size))
- if not chunk:
- return
- yield chunk
+ chunk = tuple(itertools.islice(it, size))
+ if not chunk:
+ return
+ yield chunk
-
-def prepare_input(dirty):
+def prepare_input(dirty: str) -> str:
"""
Prepare the plaintext by up-casing it
and separating repeated letters with X's
"""
-
- dirty = ''.join([c.upper() for c in dirty if c in string.ascii_letters])
+
+ dirty = "".join([c.upper() for c in dirty if c in string.ascii_letters])
clean = ""
-
+
if len(dirty) < 2:
return dirty
- for i in range(len(dirty)-1):
+ for i in range(len(dirty) - 1):
clean += dirty[i]
-
- if dirty[i] == dirty[i+1]:
- clean += 'X'
-
+
+ if dirty[i] == dirty[i + 1]:
+ clean += "X"
+
clean += dirty[-1]
if len(clean) & 1:
- clean += 'X'
+ clean += "X"
return clean
-def generate_table(key):
+
+def generate_table(key: str) -> [str]:
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
alphabet = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
- # we're using a list instead of a '2d' array because it makes the math
+ # we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
table = []
@@ -57,7 +58,8 @@ def generate_table(key):
return table
-def encode(plaintext, key):
+
+def encode(plaintext: str, key: str) -> str:
table = generate_table(key)
plaintext = prepare_input(plaintext)
ciphertext = ""
@@ -68,19 +70,19 @@ def encode(plaintext, key):
row2, col2 = divmod(table.index(char2), 5)
if row1 == row2:
- ciphertext += table[row1*5+(col1+1)%5]
- ciphertext += table[row2*5+(col2+1)%5]
+ ciphertext += table[row1 * 5 + (col1 + 1) % 5]
+ ciphertext += table[row2 * 5 + (col2 + 1) % 5]
elif col1 == col2:
- ciphertext += table[((row1+1)%5)*5+col1]
- ciphertext += table[((row2+1)%5)*5+col2]
- else: # rectangle
- ciphertext += table[row1*5+col2]
- ciphertext += table[row2*5+col1]
+ ciphertext += table[((row1 + 1) % 5) * 5 + col1]
+ ciphertext += table[((row2 + 1) % 5) * 5 + col2]
+ else: # rectangle
+ ciphertext += table[row1 * 5 + col2]
+ ciphertext += table[row2 * 5 + col1]
return ciphertext
-def decode(ciphertext, key):
+def decode(ciphertext: str, key: str) -> str:
table = generate_table(key)
plaintext = ""
@@ -90,13 +92,13 @@ def decode(ciphertext, key):
row2, col2 = divmod(table.index(char2), 5)
if row1 == row2:
- plaintext += table[row1*5+(col1-1)%5]
- plaintext += table[row2*5+(col2-1)%5]
+ plaintext += table[row1 * 5 + (col1 - 1) % 5]
+ plaintext += table[row2 * 5 + (col2 - 1) % 5]
elif col1 == col2:
- plaintext += table[((row1-1)%5)*5+col1]
- plaintext += table[((row2-1)%5)*5+col2]
- else: # rectangle
- plaintext += table[row1*5+col2]
- plaintext += table[row2*5+col1]
+ plaintext += table[((row1 - 1) % 5) * 5 + col1]
+ plaintext += table[((row2 - 1) % 5) * 5 + col2]
+ else: # rectangle
+ plaintext += table[row1 * 5 + col2]
+ plaintext += table[row2 * 5 + col1]
return plaintext
diff --git a/ciphers/porta_cipher.py b/ciphers/porta_cipher.py
new file mode 100644
index 000000000000..29043c4c9fac
--- /dev/null
+++ b/ciphers/porta_cipher.py
@@ -0,0 +1,110 @@
+alphabet = {
+ "A": ("ABCDEFGHIJKLM", "NOPQRSTUVWXYZ"),
+ "B": ("ABCDEFGHIJKLM", "NOPQRSTUVWXYZ"),
+ "C": ("ABCDEFGHIJKLM", "ZNOPQRSTUVWXY"),
+ "D": ("ABCDEFGHIJKLM", "ZNOPQRSTUVWXY"),
+ "E": ("ABCDEFGHIJKLM", "YZNOPQRSTUVWX"),
+ "F": ("ABCDEFGHIJKLM", "YZNOPQRSTUVWX"),
+ "G": ("ABCDEFGHIJKLM", "XYZNOPQRSTUVW"),
+ "H": ("ABCDEFGHIJKLM", "XYZNOPQRSTUVW"),
+ "I": ("ABCDEFGHIJKLM", "WXYZNOPQRSTUV"),
+ "J": ("ABCDEFGHIJKLM", "WXYZNOPQRSTUV"),
+ "K": ("ABCDEFGHIJKLM", "VWXYZNOPQRSTU"),
+ "L": ("ABCDEFGHIJKLM", "VWXYZNOPQRSTU"),
+ "M": ("ABCDEFGHIJKLM", "UVWXYZNOPQRST"),
+ "N": ("ABCDEFGHIJKLM", "UVWXYZNOPQRST"),
+ "O": ("ABCDEFGHIJKLM", "TUVWXYZNOPQRS"),
+ "P": ("ABCDEFGHIJKLM", "TUVWXYZNOPQRS"),
+ "Q": ("ABCDEFGHIJKLM", "STUVWXYZNOPQR"),
+ "R": ("ABCDEFGHIJKLM", "STUVWXYZNOPQR"),
+ "S": ("ABCDEFGHIJKLM", "RSTUVWXYZNOPQ"),
+ "T": ("ABCDEFGHIJKLM", "RSTUVWXYZNOPQ"),
+ "U": ("ABCDEFGHIJKLM", "QRSTUVWXYZNOP"),
+ "V": ("ABCDEFGHIJKLM", "QRSTUVWXYZNOP"),
+ "W": ("ABCDEFGHIJKLM", "PQRSTUVWXYZNO"),
+ "X": ("ABCDEFGHIJKLM", "PQRSTUVWXYZNO"),
+ "Y": ("ABCDEFGHIJKLM", "OPQRSTUVWXYZN"),
+ "Z": ("ABCDEFGHIJKLM", "OPQRSTUVWXYZN"),
+}
+
+
+def generate_table(key: str) -> [(str, str)]:
+ """
+ >>> generate_table('marvin') # doctest: +NORMALIZE_WHITESPACE
+ [('ABCDEFGHIJKLM', 'UVWXYZNOPQRST'), ('ABCDEFGHIJKLM', 'NOPQRSTUVWXYZ'),
+ ('ABCDEFGHIJKLM', 'STUVWXYZNOPQR'), ('ABCDEFGHIJKLM', 'QRSTUVWXYZNOP'),
+ ('ABCDEFGHIJKLM', 'WXYZNOPQRSTUV'), ('ABCDEFGHIJKLM', 'UVWXYZNOPQRST')]
+ """
+ return [alphabet[char] for char in key.upper()]
+
+
+def encrypt(key: str, words: str) -> str:
+ """
+ >>> encrypt('marvin', 'jessica')
+ 'QRACRWU'
+ """
+ cipher = ""
+ count = 0
+ table = generate_table(key)
+ for char in words.upper():
+ cipher += get_opponent(table[count], char)
+ count = (count + 1) % len(table)
+ return cipher
+
+
+def decrypt(key: str, words: str) -> str:
+ """
+ >>> decrypt('marvin', 'QRACRWU')
+ 'JESSICA'
+ """
+ return encrypt(key, words)
+
+
+def get_position(table: [(str, str)], char: str) -> (int, int) or (None, None):
+ """
+ >>> table = [
+ ... ('ABCDEFGHIJKLM', 'UVWXYZNOPQRST'), ('ABCDEFGHIJKLM', 'NOPQRSTUVWXYZ'),
+ ... ('ABCDEFGHIJKLM', 'STUVWXYZNOPQR'), ('ABCDEFGHIJKLM', 'QRSTUVWXYZNOP'),
+ ... ('ABCDEFGHIJKLM', 'WXYZNOPQRSTUV'), ('ABCDEFGHIJKLM', 'UVWXYZNOPQRST')]
+ >>> get_position(table, 'A')
+ (None, None)
+ """
+ if char in table[0]:
+ row = 0
+ else:
+ row = 1 if char in table[1] else -1
+ return (None, None) if row == -1 else (row, table[row].index(char))
+
+
+def get_opponent(table: [(str, str)], char: str) -> str:
+ """
+ >>> table = [
+ ... ('ABCDEFGHIJKLM', 'UVWXYZNOPQRST'), ('ABCDEFGHIJKLM', 'NOPQRSTUVWXYZ'),
+ ... ('ABCDEFGHIJKLM', 'STUVWXYZNOPQR'), ('ABCDEFGHIJKLM', 'QRSTUVWXYZNOP'),
+ ... ('ABCDEFGHIJKLM', 'WXYZNOPQRSTUV'), ('ABCDEFGHIJKLM', 'UVWXYZNOPQRST')]
+ >>> get_opponent(table, 'A')
+ 'A'
+ """
+ row, col = get_position(table, char.upper())
+ if row == 1:
+ return table[0][col]
+ else:
+ return table[1][col] if row == 0 else char
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod() # Fist ensure that all our tests are passing...
+ """
+ ENTER KEY: marvin
+ ENTER TEXT TO ENCRYPT: jessica
+ ENCRYPTED: QRACRWU
+ DECRYPTED WITH KEY: JESSICA
+ """
+ key = input("ENTER KEY: ").strip()
+ text = input("ENTER TEXT TO ENCRYPT: ").strip()
+ cipher_text = encrypt(key, text)
+
+ print(f"ENCRYPTED: {cipher_text}")
+ print(f"DECRYPTED WITH KEY: {decrypt(key, cipher_text)}")
diff --git a/ciphers/prehistoric_men.txt b/ciphers/prehistoric_men.txt
index 86c4de821bfc..a58e533a8405 100644
--- a/ciphers/prehistoric_men.txt
+++ b/ciphers/prehistoric_men.txt
@@ -3,9 +3,9 @@ Braidwood, Illustrated by Susan T. Richert
This eBook is for the use of anyone anywhere in the United States and most
-other parts of the world at no cost and with almost no restrictions
+other parts of the world at no cost and with almost no restrictions
whatsoever. You may copy it, give it away or re-use it under the terms of
-the Project Gutenberg License included with this eBook or online at
+the Project Gutenberg License included with this eBook or online at
www.gutenberg.org. If you are not located in the United States, you'll have
to check the laws of the country where you are located before using this ebook.
@@ -7109,9 +7109,9 @@ and permanent future for Project Gutenberg-tm and future
generations. To learn more about the Project Gutenberg Literary
Archive Foundation and how your efforts and donations can help, see
Sections 3 and 4 and the Foundation information page at
-www.gutenberg.org
+www.gutenberg.org
-Section 3. Information about the Project Gutenberg Literary
+Section 3. Information about the Project Gutenberg Literary
Archive Foundation
The Project Gutenberg Literary Archive Foundation is a non profit
diff --git a/ciphers/rabin_miller.py b/ciphers/rabin_miller.py
index f71fb03c0051..65c162984ece 100644
--- a/ciphers/rabin_miller.py
+++ b/ciphers/rabin_miller.py
@@ -1,9 +1,9 @@
-from __future__ import print_function
# Primality Testing with the Rabin-Miller Algorithm
import random
-def rabinMiller(num):
+
+def rabinMiller(num: int) -> bool:
s = num - 1
t = 0
@@ -24,24 +24,181 @@ def rabinMiller(num):
v = (v ** 2) % num
return True
-def isPrime(num):
- if (num < 2):
+
+def isPrime(num: int) -> bool:
+ if num < 2:
return False
- lowPrimes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59,
- 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127,
- 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191,
- 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257,
- 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331,
- 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401,
- 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467,
- 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563,
- 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
- 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709,
- 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797,
- 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877,
- 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967,
- 971, 977, 983, 991, 997]
+ lowPrimes = [
+ 2,
+ 3,
+ 5,
+ 7,
+ 11,
+ 13,
+ 17,
+ 19,
+ 23,
+ 29,
+ 31,
+ 37,
+ 41,
+ 43,
+ 47,
+ 53,
+ 59,
+ 61,
+ 67,
+ 71,
+ 73,
+ 79,
+ 83,
+ 89,
+ 97,
+ 101,
+ 103,
+ 107,
+ 109,
+ 113,
+ 127,
+ 131,
+ 137,
+ 139,
+ 149,
+ 151,
+ 157,
+ 163,
+ 167,
+ 173,
+ 179,
+ 181,
+ 191,
+ 193,
+ 197,
+ 199,
+ 211,
+ 223,
+ 227,
+ 229,
+ 233,
+ 239,
+ 241,
+ 251,
+ 257,
+ 263,
+ 269,
+ 271,
+ 277,
+ 281,
+ 283,
+ 293,
+ 307,
+ 311,
+ 313,
+ 317,
+ 331,
+ 337,
+ 347,
+ 349,
+ 353,
+ 359,
+ 367,
+ 373,
+ 379,
+ 383,
+ 389,
+ 397,
+ 401,
+ 409,
+ 419,
+ 421,
+ 431,
+ 433,
+ 439,
+ 443,
+ 449,
+ 457,
+ 461,
+ 463,
+ 467,
+ 479,
+ 487,
+ 491,
+ 499,
+ 503,
+ 509,
+ 521,
+ 523,
+ 541,
+ 547,
+ 557,
+ 563,
+ 569,
+ 571,
+ 577,
+ 587,
+ 593,
+ 599,
+ 601,
+ 607,
+ 613,
+ 617,
+ 619,
+ 631,
+ 641,
+ 643,
+ 647,
+ 653,
+ 659,
+ 661,
+ 673,
+ 677,
+ 683,
+ 691,
+ 701,
+ 709,
+ 719,
+ 727,
+ 733,
+ 739,
+ 743,
+ 751,
+ 757,
+ 761,
+ 769,
+ 773,
+ 787,
+ 797,
+ 809,
+ 811,
+ 821,
+ 823,
+ 827,
+ 829,
+ 839,
+ 853,
+ 857,
+ 859,
+ 863,
+ 877,
+ 881,
+ 883,
+ 887,
+ 907,
+ 911,
+ 919,
+ 929,
+ 937,
+ 941,
+ 947,
+ 953,
+ 967,
+ 971,
+ 977,
+ 983,
+ 991,
+ 997,
+ ]
if num in lowPrimes:
return True
@@ -52,13 +209,15 @@ def isPrime(num):
return rabinMiller(num)
-def generateLargePrime(keysize = 1024):
+
+def generateLargePrime(keysize: int = 1024) -> int:
while True:
num = random.randrange(2 ** (keysize - 1), 2 ** (keysize))
if isPrime(num):
return num
-if __name__ == '__main__':
+
+if __name__ == "__main__":
num = generateLargePrime()
- print(('Prime number:', num))
- print(('isPrime:', isPrime(num)))
+ print(("Prime number:", num))
+ print(("isPrime:", isPrime(num)))
diff --git a/ciphers/rail_fence_cipher.py b/ciphers/rail_fence_cipher.py
new file mode 100644
index 000000000000..2596415207ae
--- /dev/null
+++ b/ciphers/rail_fence_cipher.py
@@ -0,0 +1,102 @@
+""" https://en.wikipedia.org/wiki/Rail_fence_cipher """
+
+
+def encrypt(input_string: str, key: int) -> str:
+ """
+ Shuffles the character of a string by placing each of them
+ in a grid (the height is dependent on the key) in a zigzag
+ formation and reading it left to right.
+
+ >>> encrypt("Hello World", 4)
+ 'HWe olordll'
+
+ >>> encrypt("This is a message", 0)
+ Traceback (most recent call last):
+ ...
+ ValueError: Height of grid can't be 0 or negative
+
+ >>> encrypt(b"This is a byte string", 5)
+ Traceback (most recent call last):
+ ...
+ TypeError: sequence item 0: expected str instance, int found
+ """
+ grid = [[] for _ in range(key)]
+ lowest = key - 1
+
+ if key <= 0:
+ raise ValueError("Height of grid can't be 0 or negative")
+ if key == 1 or len(input_string) <= key:
+ return input_string
+
+ for position, character in enumerate(input_string):
+ num = position % (lowest * 2) # puts it in bounds
+ num = min(num, lowest * 2 - num) # creates zigzag pattern
+ grid[num].append(character)
+ grid = ["".join(row) for row in grid]
+ output_string = "".join(grid)
+
+ return output_string
+
+
+def decrypt(input_string: str, key: int) -> str:
+ """
+ Generates a template based on the key and fills it in with
+ the characters of the input string and then reading it in
+ a zigzag formation.
+
+ >>> decrypt("HWe olordll", 4)
+ 'Hello World'
+
+ >>> decrypt("This is a message", -10)
+ Traceback (most recent call last):
+ ...
+ ValueError: Height of grid can't be 0 or negative
+
+ >>> decrypt("My key is very big", 100)
+ 'My key is very big'
+ """
+ grid = []
+ lowest = key - 1
+
+ if key <= 0:
+ raise ValueError("Height of grid can't be 0 or negative")
+ if key == 1:
+ return input_string
+
+ temp_grid = [[] for _ in range(key)] # generates template
+ for position in range(len(input_string)):
+ num = position % (lowest * 2) # puts it in bounds
+ num = min(num, lowest * 2 - num) # creates zigzag pattern
+ temp_grid[num].append("*")
+
+ counter = 0
+ for row in temp_grid: # fills in the characters
+ splice = input_string[counter : counter + len(row)]
+ grid.append([character for character in splice])
+ counter += len(row)
+
+ output_string = "" # reads as zigzag
+ for position in range(len(input_string)):
+ num = position % (lowest * 2) # puts it in bounds
+ num = min(num, lowest * 2 - num) # creates zigzag pattern
+ output_string += grid[num][0]
+ grid[num].pop(0)
+ return output_string
+
+
+def bruteforce(input_string: str) -> dict:
+ """Uses decrypt function by guessing every key
+
+ >>> bruteforce("HWe olordll")[4]
+ 'Hello World'
+ """
+ results = {}
+ for key_guess in range(1, len(input_string)): # tries every key
+ results[key_guess] = decrypt(input_string, key_guess)
+ return results
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/ciphers/rot13.py b/ciphers/rot13.py
index 2abf981e9d7d..21dbda98eecc 100644
--- a/ciphers/rot13.py
+++ b/ciphers/rot13.py
@@ -1,25 +1,37 @@
-from __future__ import print_function
-def dencrypt(s, n):
- out = ''
+def dencrypt(s: str, n: int = 13) -> str:
+ """
+ https://en.wikipedia.org/wiki/ROT13
+
+ >>> msg = "My secret bank account number is 173-52946 so don't tell anyone!!"
+ >>> s = dencrypt(msg)
+ >>> s
+ "Zl frperg onax nppbhag ahzore vf 173-52946 fb qba'g gryy nalbar!!"
+ >>> dencrypt(s) == msg
+ True
+ """
+ out = ""
for c in s:
- if c >= 'A' and c <= 'Z':
- out += chr(ord('A') + (ord(c) - ord('A') + n) % 26)
- elif c >= 'a' and c <= 'z':
- out += chr(ord('a') + (ord(c) - ord('a') + n) % 26)
+ if "A" <= c <= "Z":
+ out += chr(ord("A") + (ord(c) - ord("A") + n) % 26)
+ elif "a" <= c <= "z":
+ out += chr(ord("a") + (ord(c) - ord("a") + n) % 26)
else:
out += c
return out
def main():
- s0 = 'HELLO'
+ s0 = input("Enter message: ")
s1 = dencrypt(s0, 13)
- print(s1) # URYYB
+ print("Encryption:", s1)
s2 = dencrypt(s1, 13)
- print(s2) # HELLO
+ print("Decryption: ", s2)
+
+if __name__ == "__main__":
+ import doctest
-if __name__ == '__main__':
+ doctest.testmod()
main()
diff --git a/ciphers/rsa_cipher.py b/ciphers/rsa_cipher.py
index d81f1ffc1a1e..57c916a44d4b 100644
--- a/ciphers/rsa_cipher.py
+++ b/ciphers/rsa_cipher.py
@@ -1,44 +1,47 @@
-from __future__ import print_function
-import sys, rsa_key_generator as rkg, os
+import os
+import sys
+
+from . import rsa_key_generator as rkg
DEFAULT_BLOCK_SIZE = 128
BYTE_SIZE = 256
+
def main():
- filename = 'encrypted_file.txt'
- response = input(r'Encrypte\Decrypt [e\d]: ')
-
- if response.lower().startswith('e'):
- mode = 'encrypt'
- elif response.lower().startswith('d'):
- mode = 'decrypt'
-
- if mode == 'encrypt':
- if not os.path.exists('rsa_pubkey.txt'):
- rkg.makeKeyFiles('rsa', 1024)
-
- message = input('\nEnter message: ')
- pubKeyFilename = 'rsa_pubkey.txt'
- print('Encrypting and writing to %s...' % (filename))
+ filename = "encrypted_file.txt"
+ response = input(r"Encrypt\Decrypt [e\d]: ")
+
+ if response.lower().startswith("e"):
+ mode = "encrypt"
+ elif response.lower().startswith("d"):
+ mode = "decrypt"
+
+ if mode == "encrypt":
+ if not os.path.exists("rsa_pubkey.txt"):
+ rkg.makeKeyFiles("rsa", 1024)
+
+ message = input("\nEnter message: ")
+ pubKeyFilename = "rsa_pubkey.txt"
+ print("Encrypting and writing to %s..." % (filename))
encryptedText = encryptAndWriteToFile(filename, pubKeyFilename, message)
- print('\nEncrypted text:')
+ print("\nEncrypted text:")
print(encryptedText)
- elif mode == 'decrypt':
- privKeyFilename = 'rsa_privkey.txt'
- print('Reading from %s and decrypting...' % (filename))
+ elif mode == "decrypt":
+ privKeyFilename = "rsa_privkey.txt"
+ print("Reading from %s and decrypting..." % (filename))
decryptedText = readFromFileAndDecrypt(filename, privKeyFilename)
- print('writing decryption to rsa_decryption.txt...')
- with open('rsa_decryption.txt', 'w') as dec:
+ print("writing decryption to rsa_decryption.txt...")
+ with open("rsa_decryption.txt", "w") as dec:
dec.write(decryptedText)
- print('\nDecryption:')
+ print("\nDecryption:")
print(decryptedText)
-def getBlocksFromText(message, blockSize=DEFAULT_BLOCK_SIZE):
- messageBytes = message.encode('ascii')
+def getBlocksFromText(message: int, blockSize: int = DEFAULT_BLOCK_SIZE) -> [int]:
+ messageBytes = message.encode("ascii")
blockInts = []
for blockStart in range(0, len(messageBytes), blockSize):
@@ -49,7 +52,9 @@ def getBlocksFromText(message, blockSize=DEFAULT_BLOCK_SIZE):
return blockInts
-def getTextFromBlocks(blockInts, messageLength, blockSize=DEFAULT_BLOCK_SIZE):
+def getTextFromBlocks(
+ blockInts: [int], messageLength: int, blockSize: int = DEFAULT_BLOCK_SIZE
+) -> str:
message = []
for blockInt in blockInts:
blockMessage = []
@@ -59,10 +64,12 @@ def getTextFromBlocks(blockInts, messageLength, blockSize=DEFAULT_BLOCK_SIZE):
blockInt = blockInt % (BYTE_SIZE ** i)
blockMessage.insert(0, chr(asciiNumber))
message.extend(blockMessage)
- return ''.join(message)
+ return "".join(message)
-def encryptMessage(message, key, blockSize=DEFAULT_BLOCK_SIZE):
+def encryptMessage(
+ message: str, key: (int, int), blockSize: int = DEFAULT_BLOCK_SIZE
+) -> [int]:
encryptedBlocks = []
n, e = key
@@ -71,7 +78,12 @@ def encryptMessage(message, key, blockSize=DEFAULT_BLOCK_SIZE):
return encryptedBlocks
-def decryptMessage(encryptedBlocks, messageLength, key, blockSize=DEFAULT_BLOCK_SIZE):
+def decryptMessage(
+ encryptedBlocks: [int],
+ messageLength: int,
+ key: (int, int),
+ blockSize: int = DEFAULT_BLOCK_SIZE,
+) -> str:
decryptedBlocks = []
n, d = key
for block in encryptedBlocks:
@@ -79,45 +91,61 @@ def decryptMessage(encryptedBlocks, messageLength, key, blockSize=DEFAULT_BLOCK_
return getTextFromBlocks(decryptedBlocks, messageLength, blockSize)
-def readKeyFile(keyFilename):
+def readKeyFile(keyFilename: str) -> (int, int, int):
with open(keyFilename) as fo:
content = fo.read()
- keySize, n, EorD = content.split(',')
+ keySize, n, EorD = content.split(",")
return (int(keySize), int(n), int(EorD))
-def encryptAndWriteToFile(messageFilename, keyFilename, message, blockSize=DEFAULT_BLOCK_SIZE):
+def encryptAndWriteToFile(
+ messageFilename: str,
+ keyFilename: str,
+ message: str,
+ blockSize: int = DEFAULT_BLOCK_SIZE,
+) -> str:
keySize, n, e = readKeyFile(keyFilename)
if keySize < blockSize * 8:
- sys.exit('ERROR: Block size is %s bits and key size is %s bits. The RSA cipher requires the block size to be equal to or greater than the key size. Either decrease the block size or use different keys.' % (blockSize * 8, keySize))
+ sys.exit(
+ "ERROR: Block size is %s bits and key size is %s bits. The RSA cipher "
+ "requires the block size to be equal to or greater than the key size. "
+ "Either decrease the block size or use different keys."
+ % (blockSize * 8, keySize)
+ )
encryptedBlocks = encryptMessage(message, (n, e), blockSize)
for i in range(len(encryptedBlocks)):
encryptedBlocks[i] = str(encryptedBlocks[i])
- encryptedContent = ','.join(encryptedBlocks)
- encryptedContent = '%s_%s_%s' % (len(message), blockSize, encryptedContent)
- with open(messageFilename, 'w') as fo:
+ encryptedContent = ",".join(encryptedBlocks)
+ encryptedContent = "{}_{}_{}".format(len(message), blockSize, encryptedContent)
+ with open(messageFilename, "w") as fo:
fo.write(encryptedContent)
return encryptedContent
-def readFromFileAndDecrypt(messageFilename, keyFilename):
+def readFromFileAndDecrypt(messageFilename: str, keyFilename: str) -> str:
keySize, n, d = readKeyFile(keyFilename)
with open(messageFilename) as fo:
content = fo.read()
- messageLength, blockSize, encryptedMessage = content.split('_')
+ messageLength, blockSize, encryptedMessage = content.split("_")
messageLength = int(messageLength)
blockSize = int(blockSize)
if keySize < blockSize * 8:
- sys.exit('ERROR: Block size is %s bits and key size is %s bits. The RSA cipher requires the block size to be equal to or greater than the key size. Did you specify the correct key file and encrypted file?' % (blockSize * 8, keySize))
+ sys.exit(
+ "ERROR: Block size is %s bits and key size is %s bits. The RSA cipher "
+ "requires the block size to be equal to or greater than the key size. "
+ "Did you specify the correct key file and encrypted file?"
+ % (blockSize * 8, keySize)
+ )
encryptedBlocks = []
- for block in encryptedMessage.split(','):
+ for block in encryptedMessage.split(","):
encryptedBlocks.append(int(block))
return decryptMessage(encryptedBlocks, messageLength, (n, d), blockSize)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()
diff --git a/ciphers/rsa_factorization.py b/ciphers/rsa_factorization.py
new file mode 100644
index 000000000000..b18aab609e2d
--- /dev/null
+++ b/ciphers/rsa_factorization.py
@@ -0,0 +1,57 @@
+"""
+An RSA prime factor algorithm.
+
+The program can efficiently factor RSA prime number given the private key d and
+public key e.
+Source: on page 3 of https://crypto.stanford.edu/~dabo/papers/RSA-survey.pdf
+More readable source: https://www.di-mgt.com.au/rsa_factorize_n.html
+large number can take minutes to factor, therefore are not included in doctest.
+"""
+from __future__ import annotations
+
+import math
+import random
+
+
+def rsafactor(d: int, e: int, N: int) -> [int]:
+ """
+ This function returns the factors of N, where p*q=N
+ Return: [p, q]
+
+ We call N the RSA modulus, e the encryption exponent, and d the decryption exponent.
+ The pair (N, e) is the public key. As its name suggests, it is public and is used to
+ encrypt messages.
+ The pair (N, d) is the secret key or private key and is known only to the recipient
+ of encrypted messages.
+
+ >>> rsafactor(3, 16971, 25777)
+ [149, 173]
+ >>> rsafactor(7331, 11, 27233)
+ [113, 241]
+ >>> rsafactor(4021, 13, 17711)
+ [89, 199]
+ """
+ k = d * e - 1
+ p = 0
+ q = 0
+ while p == 0:
+ g = random.randint(2, N - 1)
+ t = k
+ while True:
+ if t % 2 == 0:
+ t = t // 2
+ x = (g ** t) % N
+ y = math.gcd(x - 1, N)
+ if x > 1 and y > 1:
+ p = y
+ q = N // y
+ break # find the correct factors
+ else:
+ break # t is not divisible by 2, break and choose another g
+ return sorted([p, q])
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/ciphers/rsa_key_generator.py b/ciphers/rsa_key_generator.py
index 541e90d6e884..5693aa637ee9 100644
--- a/ciphers/rsa_key_generator.py
+++ b/ciphers/rsa_key_generator.py
@@ -1,46 +1,60 @@
-from __future__ import print_function
-import random, sys, os
-import rabin_miller as rabinMiller, cryptomath_module as cryptoMath
+import os
+import random
+import sys
+from typing import Tuple
+
+from . import cryptomath_module as cryptoMath
+from . import rabin_miller as rabinMiller
+
def main():
- print('Making key files...')
- makeKeyFiles('rsa', 1024)
- print('Key files generation successful.')
+ print("Making key files...")
+ makeKeyFiles("rsa", 1024)
+ print("Key files generation successful.")
+
-def generateKey(keySize):
- print('Generating prime p...')
+def generateKey(keySize: int) -> Tuple[Tuple[int, int], Tuple[int, int]]:
+ print("Generating prime p...")
p = rabinMiller.generateLargePrime(keySize)
- print('Generating prime q...')
+ print("Generating prime q...")
q = rabinMiller.generateLargePrime(keySize)
n = p * q
- print('Generating e that is relatively prime to (p - 1) * (q - 1)...')
+ print("Generating e that is relatively prime to (p - 1) * (q - 1)...")
while True:
e = random.randrange(2 ** (keySize - 1), 2 ** (keySize))
if cryptoMath.gcd(e, (p - 1) * (q - 1)) == 1:
break
- print('Calculating d that is mod inverse of e...')
+ print("Calculating d that is mod inverse of e...")
d = cryptoMath.findModInverse(e, (p - 1) * (q - 1))
publicKey = (n, e)
privateKey = (n, d)
return (publicKey, privateKey)
-def makeKeyFiles(name, keySize):
- if os.path.exists('%s_pubkey.txt' % (name)) or os.path.exists('%s_privkey.txt' % (name)):
- print('\nWARNING:')
- print('"%s_pubkey.txt" or "%s_privkey.txt" already exists. \nUse a different name or delete these files and re-run this program.' % (name, name))
+
+def makeKeyFiles(name: int, keySize: int) -> None:
+ if os.path.exists("%s_pubkey.txt" % (name)) or os.path.exists(
+ "%s_privkey.txt" % (name)
+ ):
+ print("\nWARNING:")
+ print(
+ '"%s_pubkey.txt" or "%s_privkey.txt" already exists. \n'
+ "Use a different name or delete these files and re-run this program."
+ % (name, name)
+ )
sys.exit()
publicKey, privateKey = generateKey(keySize)
- print('\nWriting public key to file %s_pubkey.txt...' % name)
- with open('%s_pubkey.txt' % name, 'w') as fo:
- fo.write('%s,%s,%s' % (keySize, publicKey[0], publicKey[1]))
+ print("\nWriting public key to file %s_pubkey.txt..." % name)
+ with open("%s_pubkey.txt" % name, "w") as out_file:
+ out_file.write("{},{},{}".format(keySize, publicKey[0], publicKey[1]))
+
+ print("Writing private key to file %s_privkey.txt..." % name)
+ with open("%s_privkey.txt" % name, "w") as out_file:
+ out_file.write("{},{},{}".format(keySize, privateKey[0], privateKey[1]))
- print('Writing private key to file %s_privkey.txt...' % name)
- with open('%s_privkey.txt' % name, 'w') as fo:
- fo.write('%s,%s,%s' % (keySize, privateKey[0], privateKey[1]))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ciphers/shuffled_shift_cipher.py b/ciphers/shuffled_shift_cipher.py
new file mode 100644
index 000000000000..22628f3c9d9e
--- /dev/null
+++ b/ciphers/shuffled_shift_cipher.py
@@ -0,0 +1,182 @@
+import random
+import string
+
+
+class ShuffledShiftCipher:
+ """
+ This algorithm uses the Caesar Cipher algorithm but removes the option to
+ use brute force to decrypt the message.
+
+ The passcode is a a random password from the selection buffer of
+ 1. uppercase letters of the English alphabet
+ 2. lowercase letters of the English alphabet
+ 3. digits from 0 to 9
+
+ Using unique characters from the passcode, the normal list of characters,
+ that can be allowed in the plaintext, is pivoted and shuffled. Refer to docstring
+ of __make_key_list() to learn more about the shuffling.
+
+ Then, using the passcode, a number is calculated which is used to encrypt the
+ plaintext message with the normal shift cipher method, only in this case, the
+ reference, to look back at while decrypting, is shuffled.
+
+ Each cipher object can possess an optional argument as passcode, without which a
+ new passcode is generated for that object automatically.
+ cip1 = ShuffledShiftCipher('d4usr9TWxw9wMD')
+ cip2 = ShuffledShiftCipher()
+ """
+
+ def __init__(self, passcode: str = None):
+ """
+ Initializes a cipher object with a passcode as it's entity
+ Note: No new passcode is generated if user provides a passcode
+ while creating the object
+ """
+ self.__passcode = passcode or self.__passcode_creator()
+ self.__key_list = self.__make_key_list()
+ self.__shift_key = self.__make_shift_key()
+
+ def __str__(self):
+ """
+ :return: passcode of the cipher object
+ """
+ return "Passcode is: " + "".join(self.__passcode)
+
+ def __neg_pos(self, iterlist: list) -> list:
+ """
+ Mutates the list by changing the sign of each alternate element
+
+ :param iterlist: takes a list iterable
+ :return: the mutated list
+
+ """
+ for i in range(1, len(iterlist), 2):
+ iterlist[i] *= -1
+ return iterlist
+
+ def __passcode_creator(self) -> list:
+ """
+ Creates a random password from the selection buffer of
+ 1. uppercase letters of the English alphabet
+ 2. lowercase letters of the English alphabet
+ 3. digits from 0 to 9
+
+ :rtype: list
+ :return: a password of a random length between 10 to 20
+ """
+ choices = string.ascii_letters + string.digits
+ password = [random.choice(choices) for i in range(random.randint(10, 20))]
+ return password
+
+ def __make_key_list(self) -> list:
+ """
+ Shuffles the ordered character choices by pivoting at breakpoints
+ Breakpoints are the set of characters in the passcode
+
+ eg:
+ if, ABCDEFGHIJKLMNOPQRSTUVWXYZ are the possible characters
+ and CAMERA is the passcode
+ then, breakpoints = [A,C,E,M,R] # sorted set of characters from passcode
+ shuffled parts: [A,CB,ED,MLKJIHGF,RQPON,ZYXWVUTS]
+ shuffled __key_list : ACBEDMLKJIHGFRQPONZYXWVUTS
+
+ Shuffling only 26 letters of the english alphabet can generate 26!
+ combinations for the shuffled list. In the program we consider, a set of
+ 97 characters (including letters, digits, punctuation and whitespaces),
+ thereby creating a possibility of 97! combinations (which is a 152 digit number
+ in itself), thus diminishing the possibility of a brute force approach.
+ Moreover, shift keys even introduce a multiple of 26 for a brute force approach
+ for each of the already 97! combinations.
+ """
+ # key_list_options contain nearly all printable except few elements from
+ # string.whitespace
+ key_list_options = (
+ string.ascii_letters + string.digits + string.punctuation + " \t\n"
+ )
+
+ keys_l = []
+
+ # creates points known as breakpoints to break the key_list_options at those
+ # points and pivot each substring
+ breakpoints = sorted(set(self.__passcode))
+ temp_list = []
+
+ # algorithm for creating a new shuffled list, keys_l, out of key_list_options
+ for i in key_list_options:
+ temp_list.extend(i)
+
+ # checking breakpoints at which to pivot temporary sublist and add it into
+ # keys_l
+ if i in breakpoints or i == key_list_options[-1]:
+ keys_l.extend(temp_list[::-1])
+ temp_list = []
+
+ # returning a shuffled keys_l to prevent brute force guessing of shift key
+ return keys_l
+
+ def __make_shift_key(self) -> int:
+ """
+ sum() of the mutated list of ascii values of all characters where the
+ mutated list is the one returned by __neg_pos()
+ """
+ num = sum(self.__neg_pos([ord(x) for x in self.__passcode]))
+ return num if num > 0 else len(self.__passcode)
+
+ def decrypt(self, encoded_message: str) -> str:
+ """
+ Performs shifting of the encoded_message w.r.t. the shuffled __key_list
+ to create the decoded_message
+
+ >>> ssc = ShuffledShiftCipher('4PYIXyqeQZr44')
+ >>> ssc.decrypt("d>**-1z6&'5z'5z:z+-='$'>=zp:>5:#z<'.&>#")
+ 'Hello, this is a modified Caesar cipher'
+
+ """
+ decoded_message = ""
+
+ # decoding shift like Caesar cipher algorithm implementing negative shift or
+ # reverse shift or left shift
+ for i in encoded_message:
+ position = self.__key_list.index(i)
+ decoded_message += self.__key_list[
+ (position - self.__shift_key) % -len(self.__key_list)
+ ]
+
+ return decoded_message
+
+ def encrypt(self, plaintext: str) -> str:
+ """
+ Performs shifting of the plaintext w.r.t. the shuffled __key_list
+ to create the encoded_message
+
+ >>> ssc = ShuffledShiftCipher('4PYIXyqeQZr44')
+ >>> ssc.encrypt('Hello, this is a modified Caesar cipher')
+ "d>**-1z6&'5z'5z:z+-='$'>=zp:>5:#z<'.&>#"
+
+ """
+ encoded_message = ""
+
+ # encoding shift like Caesar cipher algorithm implementing positive shift or
+ # forward shift or right shift
+ for i in plaintext:
+ position = self.__key_list.index(i)
+ encoded_message += self.__key_list[
+ (position + self.__shift_key) % len(self.__key_list)
+ ]
+
+ return encoded_message
+
+
+def test_end_to_end(msg: str = "Hello, this is a modified Caesar cipher"):
+ """
+ >>> test_end_to_end()
+ 'Hello, this is a modified Caesar cipher'
+ """
+ cip1 = ShuffledShiftCipher()
+ return cip1.decrypt(cip1.encrypt(msg))
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/ciphers/simple_keyword_cypher.py b/ciphers/simple_keyword_cypher.py
new file mode 100644
index 000000000000..71c3083e9dfc
--- /dev/null
+++ b/ciphers/simple_keyword_cypher.py
@@ -0,0 +1,90 @@
+def remove_duplicates(key: str) -> str:
+ """
+ Removes duplicate alphabetic characters in a keyword (letter is ignored after its
+ first appearance).
+ :param key: Keyword to use
+ :return: String with duplicates removed
+ >>> remove_duplicates('Hello World!!')
+ 'Helo Wrd'
+ """
+
+ key_no_dups = ""
+ for ch in key:
+ if ch == " " or ch not in key_no_dups and ch.isalpha():
+ key_no_dups += ch
+ return key_no_dups
+
+
+def create_cipher_map(key: str) -> dict:
+ """
+ Returns a cipher map given a keyword.
+ :param key: keyword to use
+ :return: dictionary cipher map
+ """
+ # Create alphabet list
+ alphabet = [chr(i + 65) for i in range(26)]
+ # Remove duplicate characters from key
+ key = remove_duplicates(key.upper())
+ offset = len(key)
+ # First fill cipher with key characters
+ cipher_alphabet = {alphabet[i]: char for i, char in enumerate(key)}
+ # Then map remaining characters in alphabet to
+ # the alphabet from the beginning
+ for i in range(len(cipher_alphabet), 26):
+ char = alphabet[i - offset]
+ # Ensure we are not mapping letters to letters previously mapped
+ while char in key:
+ offset -= 1
+ char = alphabet[i - offset]
+ cipher_alphabet[alphabet[i]] = char
+ return cipher_alphabet
+
+
+def encipher(message: str, cipher_map: dict) -> str:
+ """
+ Enciphers a message given a cipher map.
+ :param message: Message to encipher
+ :param cipher_map: Cipher map
+ :return: enciphered string
+ >>> encipher('Hello World!!', create_cipher_map('Goodbye!!'))
+ 'CYJJM VMQJB!!'
+ """
+ return "".join(cipher_map.get(ch, ch) for ch in message.upper())
+
+
+def decipher(message: str, cipher_map: dict) -> str:
+ """
+ Deciphers a message given a cipher map
+ :param message: Message to decipher
+ :param cipher_map: Dictionary mapping to use
+ :return: Deciphered string
+ >>> cipher_map = create_cipher_map('Goodbye!!')
+ >>> decipher(encipher('Hello World!!', cipher_map), cipher_map)
+ 'HELLO WORLD!!'
+ """
+ # Reverse our cipher mappings
+ rev_cipher_map = {v: k for k, v in cipher_map.items()}
+ return "".join(rev_cipher_map.get(ch, ch) for ch in message.upper())
+
+
+def main():
+ """
+ Handles I/O
+ :return: void
+ """
+ message = input("Enter message to encode or decode: ").strip()
+ key = input("Enter keyword: ").strip()
+ option = input("Encipher or decipher? E/D:").strip()[0].lower()
+ try:
+ func = {"e": encipher, "d": decipher}[option]
+ except KeyError:
+ raise KeyError("invalid input option")
+ cipher_map = create_cipher_map(key)
+ print(func(message, cipher_map))
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ main()
diff --git a/ciphers/simple_substitution_cipher.py b/ciphers/simple_substitution_cipher.py
index 1bdd7dc04a57..646ea449fc06 100644
--- a/ciphers/simple_substitution_cipher.py
+++ b/ciphers/simple_substitution_cipher.py
@@ -1,55 +1,60 @@
-from __future__ import print_function
-import sys, random
+import random
+import sys
+
+LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def main():
- message = input('Enter message: ')
- key = 'LFWOAYUISVKMNXPBDCRJTQEGHZ'
- resp = input('Encrypt/Decrypt [e/d]: ')
+ message = input("Enter message: ")
+ key = "LFWOAYUISVKMNXPBDCRJTQEGHZ"
+ resp = input("Encrypt/Decrypt [e/d]: ")
checkValidKey(key)
- if resp.lower().startswith('e'):
- mode = 'encrypt'
+ if resp.lower().startswith("e"):
+ mode = "encrypt"
translated = encryptMessage(key, message)
- elif resp.lower().startswith('d'):
- mode = 'decrypt'
+ elif resp.lower().startswith("d"):
+ mode = "decrypt"
translated = decryptMessage(key, message)
- print('\n%sion: \n%s' % (mode.title(), translated))
-
-def checkValidKey(key):
+ print(f"\n{mode.title()}ion: \n{translated}")
+
+
+def checkValidKey(key: str) -> None:
keyList = list(key)
lettersList = list(LETTERS)
keyList.sort()
lettersList.sort()
if keyList != lettersList:
- sys.exit('Error in the key or symbol set.')
+ sys.exit("Error in the key or symbol set.")
-def encryptMessage(key, message):
+
+def encryptMessage(key: str, message: str) -> str:
"""
>>> encryptMessage('LFWOAYUISVKMNXPBDCRJTQEGHZ', 'Harshil Darji')
'Ilcrism Olcvs'
"""
- return translateMessage(key, message, 'encrypt')
+ return translateMessage(key, message, "encrypt")
+
-def decryptMessage(key, message):
+def decryptMessage(key: str, message: str) -> str:
"""
>>> decryptMessage('LFWOAYUISVKMNXPBDCRJTQEGHZ', 'Ilcrism Olcvs')
'Harshil Darji'
"""
- return translateMessage(key, message, 'decrypt')
+ return translateMessage(key, message, "decrypt")
-def translateMessage(key, message, mode):
- translated = ''
+
+def translateMessage(key: str, message: str, mode: str) -> str:
+ translated = ""
charsA = LETTERS
charsB = key
- if mode == 'decrypt':
+ if mode == "decrypt":
charsA, charsB = charsB, charsA
-
+
for symbol in message:
if symbol.upper() in charsA:
symIndex = charsA.find(symbol.upper())
@@ -62,10 +67,12 @@ def translateMessage(key, message, mode):
return translated
+
def getRandomKey():
key = list(LETTERS)
random.shuffle(key)
- return ''.join(key)
+ return "".join(key)
+
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ciphers/trafid_cipher.py b/ciphers/trafid_cipher.py
index 0453272f26a0..328814f97744 100644
--- a/ciphers/trafid_cipher.py
+++ b/ciphers/trafid_cipher.py
@@ -1,9 +1,10 @@
-#https://en.wikipedia.org/wiki/Trifid_cipher
+# https://en.wikipedia.org/wiki/Trifid_cipher
-def __encryptPart(messagePart, character2Number):
+
+def __encryptPart(messagePart: str, character2Number: dict) -> str:
one, two, three = "", "", ""
tmp = []
-
+
for character in messagePart:
tmp.append(character2Number[character])
@@ -11,10 +12,11 @@ def __encryptPart(messagePart, character2Number):
one += each[0]
two += each[1]
three += each[2]
-
- return one+two+three
-def __decryptPart(messagePart, character2Number):
+ return one + two + three
+
+
+def __decryptPart(messagePart: str, character2Number: dict) -> (str, str, str):
tmp, thisPart = "", ""
result = []
@@ -25,62 +27,98 @@ def __decryptPart(messagePart, character2Number):
tmp += digit
if len(tmp) == len(messagePart):
result.append(tmp)
- tmp = ""
+ tmp = ""
return result[0], result[1], result[2]
-def __prepare(message, alphabet):
- #Validate message and alphabet, set to upper and remove spaces
+
+def __prepare(message: str, alphabet: str) -> (str, str, dict, dict):
+ # Validate message and alphabet, set to upper and remove spaces
alphabet = alphabet.replace(" ", "").upper()
message = message.replace(" ", "").upper()
- #Check length and characters
+ # Check length and characters
if len(alphabet) != 27:
raise KeyError("Length of alphabet has to be 27.")
for each in message:
if each not in alphabet:
raise ValueError("Each message character has to be included in alphabet!")
- #Generate dictionares
- numbers = ("111","112","113","121","122","123","131","132","133","211","212","213","221","222","223","231","232","233","311","312","313","321","322","323","331","332","333")
+ # Generate dictionares
+ numbers = (
+ "111",
+ "112",
+ "113",
+ "121",
+ "122",
+ "123",
+ "131",
+ "132",
+ "133",
+ "211",
+ "212",
+ "213",
+ "221",
+ "222",
+ "223",
+ "231",
+ "232",
+ "233",
+ "311",
+ "312",
+ "313",
+ "321",
+ "322",
+ "323",
+ "331",
+ "332",
+ "333",
+ )
character2Number = {}
number2Character = {}
for letter, number in zip(alphabet, numbers):
character2Number[letter] = number
number2Character[number] = letter
-
+
return message, alphabet, character2Number, number2Character
-def encryptMessage(message, alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ.", period=5):
+
+def encryptMessage(
+ message: str, alphabet: str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ.", period: int = 5
+) -> str:
message, alphabet, character2Number, number2Character = __prepare(message, alphabet)
encrypted, encrypted_numeric = "", ""
- for i in range(0, len(message)+1, period):
- encrypted_numeric += __encryptPart(message[i:i+period], character2Number)
-
+ for i in range(0, len(message) + 1, period):
+ encrypted_numeric += __encryptPart(message[i : i + period], character2Number)
+
for i in range(0, len(encrypted_numeric), 3):
- encrypted += number2Character[encrypted_numeric[i:i+3]]
+ encrypted += number2Character[encrypted_numeric[i : i + 3]]
return encrypted
-def decryptMessage(message, alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ.", period=5):
+
+def decryptMessage(
+ message: str, alphabet: str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ.", period: int = 5
+) -> str:
message, alphabet, character2Number, number2Character = __prepare(message, alphabet)
decrypted_numeric = []
decrypted = ""
- for i in range(0, len(message)+1, period):
- a,b,c = __decryptPart(message[i:i+period], character2Number)
-
+ for i in range(0, len(message) + 1, period):
+ a, b, c = __decryptPart(message[i : i + period], character2Number)
+
for j in range(0, len(a)):
- decrypted_numeric.append(a[j]+b[j]+c[j])
+ decrypted_numeric.append(a[j] + b[j] + c[j])
for each in decrypted_numeric:
decrypted += number2Character[each]
return decrypted
-if __name__ == '__main__':
+
+if __name__ == "__main__":
msg = "DEFEND THE EAST WALL OF THE CASTLE."
- encrypted = encryptMessage(msg,"EPSDUCVWYM.ZLKXNBTFGORIJHAQ")
+ encrypted = encryptMessage(msg, "EPSDUCVWYM.ZLKXNBTFGORIJHAQ")
decrypted = decryptMessage(encrypted, "EPSDUCVWYM.ZLKXNBTFGORIJHAQ")
- print ("Encrypted: {}\nDecrypted: {}".format(encrypted, decrypted))
\ No newline at end of file
+ print(f"Encrypted: {encrypted}\nDecrypted: {decrypted}")
diff --git a/ciphers/transposition_cipher.py b/ciphers/transposition_cipher.py
index dbb358315d22..6a0a22d3e31d 100644
--- a/ciphers/transposition_cipher.py
+++ b/ciphers/transposition_cipher.py
@@ -1,33 +1,42 @@
-from __future__ import print_function
import math
+"""
+In cryptography, the TRANSPOSITION cipher is a method of encryption where the
+positions of plaintext are shifted a certain number(determined by the key) that
+follows a regular system that results in the permuted text, known as the encrypted
+text. The type of transposition cipher demonstrated under is the ROUTE cipher.
+"""
+
+
def main():
- message = input('Enter message: ')
- key = int(input('Enter key [2-%s]: ' % (len(message) - 1)))
- mode = input('Encryption/Decryption [e/d]: ')
+ message = input("Enter message: ")
+ key = int(input("Enter key [2-%s]: " % (len(message) - 1)))
+ mode = input("Encryption/Decryption [e/d]: ")
- if mode.lower().startswith('e'):
+ if mode.lower().startswith("e"):
text = encryptMessage(key, message)
- elif mode.lower().startswith('d'):
+ elif mode.lower().startswith("d"):
text = decryptMessage(key, message)
# Append pipe symbol (vertical bar) to identify spaces at the end.
- print('Output:\n%s' %(text + '|'))
+ print("Output:\n%s" % (text + "|"))
+
-def encryptMessage(key, message):
+def encryptMessage(key: int, message: str) -> str:
"""
>>> encryptMessage(6, 'Harshil Darji')
'Hlia rDsahrij'
"""
- cipherText = [''] * key
+ cipherText = [""] * key
for col in range(key):
pointer = col
while pointer < len(message):
cipherText[col] += message[pointer]
pointer += key
- return ''.join(cipherText)
+ return "".join(cipherText)
-def decryptMessage(key, message):
+
+def decryptMessage(key: int, message: str) -> str:
"""
>>> decryptMessage(6, 'Hlia rDsahrij')
'Harshil Darji'
@@ -36,19 +45,26 @@ def decryptMessage(key, message):
numRows = key
numShadedBoxes = (numCols * numRows) - len(message)
plainText = [""] * numCols
- col = 0; row = 0;
+ col = 0
+ row = 0
for symbol in message:
plainText[col] += symbol
col += 1
- if (col == numCols) or (col == numCols - 1) and (row >= numRows - numShadedBoxes):
+ if (
+ (col == numCols)
+ or (col == numCols - 1)
+ and (row >= numRows - numShadedBoxes)
+ ):
col = 0
row += 1
return "".join(plainText)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
import doctest
+
doctest.testmod()
main()
diff --git a/ciphers/transposition_cipher_encrypt_decrypt_file.py b/ciphers/transposition_cipher_encrypt_decrypt_file.py
index a186cf81cde7..45aab056109a 100644
--- a/ciphers/transposition_cipher_encrypt_decrypt_file.py
+++ b/ciphers/transposition_cipher_encrypt_decrypt_file.py
@@ -1,37 +1,41 @@
-from __future__ import print_function
-import time, os, sys
-import transposition_cipher as transCipher
+import os
+import sys
+import time
+
+from . import transposition_cipher as transCipher
+
def main():
- inputFile = 'Prehistoric Men.txt'
- outputFile = 'Output.txt'
- key = int(input('Enter key: '))
- mode = input('Encrypt/Decrypt [e/d]: ')
+ inputFile = "Prehistoric Men.txt"
+ outputFile = "Output.txt"
+ key = int(input("Enter key: "))
+ mode = input("Encrypt/Decrypt [e/d]: ")
if not os.path.exists(inputFile):
- print('File %s does not exist. Quitting...' % inputFile)
+ print("File %s does not exist. Quitting..." % inputFile)
sys.exit()
if os.path.exists(outputFile):
- print('Overwrite %s? [y/n]' % outputFile)
- response = input('> ')
- if not response.lower().startswith('y'):
+ print("Overwrite %s? [y/n]" % outputFile)
+ response = input("> ")
+ if not response.lower().startswith("y"):
sys.exit()
-
+
startTime = time.time()
- if mode.lower().startswith('e'):
+ if mode.lower().startswith("e"):
with open(inputFile) as f:
content = f.read()
translated = transCipher.encryptMessage(key, content)
- elif mode.lower().startswith('d'):
+ elif mode.lower().startswith("d"):
with open(outputFile) as f:
content = f.read()
- translated =transCipher .decryptMessage(key, content)
+ translated = transCipher.decryptMessage(key, content)
- with open(outputFile, 'w') as outputObj:
+ with open(outputFile, "w") as outputObj:
outputObj.write(translated)
-
+
totalTime = round(time.time() - startTime, 2)
- print(('Done (', totalTime, 'seconds )'))
-
-if __name__ == '__main__':
+ print(("Done (", totalTime, "seconds )"))
+
+
+if __name__ == "__main__":
main()
diff --git a/ciphers/vigenere_cipher.py b/ciphers/vigenere_cipher.py
index 5d5be0792835..eb523d078005 100644
--- a/ciphers/vigenere_cipher.py
+++ b/ciphers/vigenere_cipher.py
@@ -1,36 +1,39 @@
-from __future__ import print_function
-LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+
def main():
- message = input('Enter message: ')
- key = input('Enter key [alphanumeric]: ')
- mode = input('Encrypt/Decrypt [e/d]: ')
+ message = input("Enter message: ")
+ key = input("Enter key [alphanumeric]: ")
+ mode = input("Encrypt/Decrypt [e/d]: ")
- if mode.lower().startswith('e'):
- mode = 'encrypt'
+ if mode.lower().startswith("e"):
+ mode = "encrypt"
translated = encryptMessage(key, message)
- elif mode.lower().startswith('d'):
- mode = 'decrypt'
+ elif mode.lower().startswith("d"):
+ mode = "decrypt"
translated = decryptMessage(key, message)
- print('\n%sed message:' % mode.title())
+ print("\n%sed message:" % mode.title())
print(translated)
-def encryptMessage(key, message):
- '''
+
+def encryptMessage(key: str, message: str) -> str:
+ """
>>> encryptMessage('HDarji', 'This is Harshil Darji from Dharmaj.')
'Akij ra Odrjqqs Gaisq muod Mphumrs.'
- '''
- return translateMessage(key, message, 'encrypt')
+ """
+ return translateMessage(key, message, "encrypt")
+
-def decryptMessage(key, message):
- '''
+def decryptMessage(key: str, message: str) -> str:
+ """
>>> decryptMessage('HDarji', 'Akij ra Odrjqqs Gaisq muod Mphumrs.')
'This is Harshil Darji from Dharmaj.'
- '''
- return translateMessage(key, message, 'decrypt')
+ """
+ return translateMessage(key, message, "decrypt")
-def translateMessage(key, message, mode):
+
+def translateMessage(key: str, message: str, mode: str) -> str:
translated = []
keyIndex = 0
key = key.upper()
@@ -38,9 +41,9 @@ def translateMessage(key, message, mode):
for symbol in message:
num = LETTERS.find(symbol.upper())
if num != -1:
- if mode == 'encrypt':
+ if mode == "encrypt":
num += LETTERS.find(key[keyIndex])
- elif mode == 'decrypt':
+ elif mode == "decrypt":
num -= LETTERS.find(key[keyIndex])
num %= len(LETTERS)
@@ -55,7 +58,8 @@ def translateMessage(key, message, mode):
keyIndex = 0
else:
translated.append(symbol)
- return ''.join(translated)
+ return "".join(translated)
+
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ciphers/xor_cipher.py b/ciphers/xor_cipher.py
index 727fac3b0703..32a350d4e61c 100644
--- a/ciphers/xor_cipher.py
+++ b/ciphers/xor_cipher.py
@@ -1,209 +1,205 @@
"""
- author: Christian Bender
- date: 21.12.2017
- class: XORCipher
-
- This class implements the XOR-cipher algorithm and provides
- some useful methods for encrypting and decrypting strings and
- files.
-
- Overview about methods
-
- - encrypt : list of char
- - decrypt : list of char
- - encrypt_string : str
- - decrypt_string : str
- - encrypt_file : boolean
- - decrypt_file : boolean
+ author: Christian Bender
+ date: 21.12.2017
+ class: XORCipher
+
+ This class implements the XOR-cipher algorithm and provides
+ some useful methods for encrypting and decrypting strings and
+ files.
+
+ Overview about methods
+
+ - encrypt : list of char
+ - decrypt : list of char
+ - encrypt_string : str
+ - decrypt_string : str
+ - encrypt_file : boolean
+ - decrypt_file : boolean
"""
-class XORCipher(object):
- def __init__(self, key = 0):
- """
- simple constructor that receives a key or uses
- default key = 0
- """
- #private field
- self.__key = key
+class XORCipher:
+ def __init__(self, key: int = 0):
+ """
+ simple constructor that receives a key or uses
+ default key = 0
+ """
- def encrypt(self, content, key):
- """
- input: 'content' of type string and 'key' of type int
- output: encrypted string 'content' as a list of chars
- if key not passed the method uses the key by the constructor.
- otherwise key = 1
- """
+ # private field
+ self.__key = key
- # precondition
- assert (isinstance(key,int) and isinstance(content,str))
+ def encrypt(self, content: str, key: int) -> [str]:
+ """
+ input: 'content' of type string and 'key' of type int
+ output: encrypted string 'content' as a list of chars
+ if key not passed the method uses the key by the constructor.
+ otherwise key = 1
+ """
- key = key or self.__key or 1
+ # precondition
+ assert isinstance(key, int) and isinstance(content, str)
- # make sure key can be any size
- while (key > 255):
- key -= 255
+ key = key or self.__key or 1
- # This will be returned
- ans = []
+ # make sure key can be any size
+ while key > 255:
+ key -= 255
- for ch in content:
- ans.append(chr(ord(ch) ^ key))
+ # This will be returned
+ ans = []
- return ans
+ for ch in content:
+ ans.append(chr(ord(ch) ^ key))
- def decrypt(self,content,key):
- """
- input: 'content' of type list and 'key' of type int
- output: decrypted string 'content' as a list of chars
- if key not passed the method uses the key by the constructor.
- otherwise key = 1
- """
+ return ans
- # precondition
- assert (isinstance(key,int) and isinstance(content,list))
+ def decrypt(self, content: str, key: int) -> [str]:
+ """
+ input: 'content' of type list and 'key' of type int
+ output: decrypted string 'content' as a list of chars
+ if key not passed the method uses the key by the constructor.
+ otherwise key = 1
+ """
- key = key or self.__key or 1
+ # precondition
+ assert isinstance(key, int) and isinstance(content, list)
- # make sure key can be any size
- while (key > 255):
- key -= 255
+ key = key or self.__key or 1
- # This will be returned
- ans = []
+ # make sure key can be any size
+ while key > 255:
+ key -= 255
- for ch in content:
- ans.append(chr(ord(ch) ^ key))
+ # This will be returned
+ ans = []
- return ans
+ for ch in content:
+ ans.append(chr(ord(ch) ^ key))
+ return ans
- def encrypt_string(self,content, key = 0):
- """
- input: 'content' of type string and 'key' of type int
- output: encrypted string 'content'
- if key not passed the method uses the key by the constructor.
- otherwise key = 1
- """
+ def encrypt_string(self, content: str, key: int = 0) -> str:
+ """
+ input: 'content' of type string and 'key' of type int
+ output: encrypted string 'content'
+ if key not passed the method uses the key by the constructor.
+ otherwise key = 1
+ """
- # precondition
- assert (isinstance(key,int) and isinstance(content,str))
+ # precondition
+ assert isinstance(key, int) and isinstance(content, str)
- key = key or self.__key or 1
+ key = key or self.__key or 1
- # make sure key can be any size
- while (key > 255):
- key -= 255
+ # make sure key can be any size
+ while key > 255:
+ key -= 255
- # This will be returned
- ans = ""
+ # This will be returned
+ ans = ""
- for ch in content:
- ans += chr(ord(ch) ^ key)
+ for ch in content:
+ ans += chr(ord(ch) ^ key)
- return ans
+ return ans
- def decrypt_string(self,content,key = 0):
- """
- input: 'content' of type string and 'key' of type int
- output: decrypted string 'content'
- if key not passed the method uses the key by the constructor.
- otherwise key = 1
- """
+ def decrypt_string(self, content: str, key: int = 0) -> str:
+ """
+ input: 'content' of type string and 'key' of type int
+ output: decrypted string 'content'
+ if key not passed the method uses the key by the constructor.
+ otherwise key = 1
+ """
- # precondition
- assert (isinstance(key,int) and isinstance(content,str))
+ # precondition
+ assert isinstance(key, int) and isinstance(content, str)
- key = key or self.__key or 1
+ key = key or self.__key or 1
- # make sure key can be any size
- while (key > 255):
- key -= 255
+ # make sure key can be any size
+ while key > 255:
+ key -= 255
- # This will be returned
- ans = ""
-
- for ch in content:
- ans += chr(ord(ch) ^ key)
+ # This will be returned
+ ans = ""
- return ans
+ for ch in content:
+ ans += chr(ord(ch) ^ key)
+ return ans
- def encrypt_file(self, file, key = 0):
- """
- input: filename (str) and a key (int)
- output: returns true if encrypt process was
- successful otherwise false
- if key not passed the method uses the key by the constructor.
- otherwise key = 1
- """
+ def encrypt_file(self, file: str, key: int = 0) -> bool:
+ """
+ input: filename (str) and a key (int)
+ output: returns true if encrypt process was
+ successful otherwise false
+ if key not passed the method uses the key by the constructor.
+ otherwise key = 1
+ """
- #precondition
- assert (isinstance(file,str) and isinstance(key,int))
+ # precondition
+ assert isinstance(file, str) and isinstance(key, int)
- try:
- with open(file,"r") as fin:
- with open("encrypt.out","w+") as fout:
+ try:
+ with open(file) as fin:
+ with open("encrypt.out", "w+") as fout:
- # actual encrypt-process
- for line in fin:
- fout.write(self.encrypt_string(line,key))
+ # actual encrypt-process
+ for line in fin:
+ fout.write(self.encrypt_string(line, key))
- except:
- return False
+ except OSError:
+ return False
- return True
+ return True
+ def decrypt_file(self, file: str, key: int) -> bool:
+ """
+ input: filename (str) and a key (int)
+ output: returns true if decrypt process was
+ successful otherwise false
+ if key not passed the method uses the key by the constructor.
+ otherwise key = 1
+ """
- def decrypt_file(self,file, key):
- """
- input: filename (str) and a key (int)
- output: returns true if decrypt process was
- successful otherwise false
- if key not passed the method uses the key by the constructor.
- otherwise key = 1
- """
+ # precondition
+ assert isinstance(file, str) and isinstance(key, int)
- #precondition
- assert (isinstance(file,str) and isinstance(key,int))
+ try:
+ with open(file) as fin:
+ with open("decrypt.out", "w+") as fout:
- try:
- with open(file,"r") as fin:
- with open("decrypt.out","w+") as fout:
-
- # actual encrypt-process
- for line in fin:
- fout.write(self.decrypt_string(line,key))
-
- except:
- return False
-
- return True
+ # actual encrypt-process
+ for line in fin:
+ fout.write(self.decrypt_string(line, key))
+ except OSError:
+ return False
+ return True
# Tests
# crypt = XORCipher()
# key = 67
-# # test enrcypt
-# print crypt.encrypt("hallo welt",key)
+# # test encrypt
+# print(crypt.encrypt("hallo welt",key))
# # test decrypt
-# print crypt.decrypt(crypt.encrypt("hallo welt",key), key)
+# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
-# print crypt.encrypt_string("hallo welt",key)
+# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
-# print crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)
+# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
-# print "encrypt successful"
+# print("encrypt successful")
# else:
-# print "encrypt unsuccessful"
+# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
-# print "decrypt successful"
+# print("decrypt successful")
# else:
-# print "decrypt unsuccessful"
\ No newline at end of file
+# print("decrypt unsuccessful")
diff --git a/compression/__init__.py b/compression/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/compression/burrows_wheeler.py b/compression/burrows_wheeler.py
new file mode 100644
index 000000000000..1a6610915e65
--- /dev/null
+++ b/compression/burrows_wheeler.py
@@ -0,0 +1,167 @@
+"""
+https://en.wikipedia.org/wiki/Burrows%E2%80%93Wheeler_transform
+
+The Burrows–Wheeler transform (BWT, also called block-sorting compression)
+rearranges a character string into runs of similar characters. This is useful
+for compression, since it tends to be easy to compress a string that has runs
+of repeated characters by techniques such as move-to-front transform and
+run-length encoding. More importantly, the transformation is reversible,
+without needing to store any additional data except the position of the first
+original character. The BWT is thus a "free" method of improving the efficiency
+of text compression algorithms, costing only some extra computation.
+"""
+from __future__ import annotations
+
+
+def all_rotations(s: str) -> list[str]:
+ """
+ :param s: The string that will be rotated len(s) times.
+ :return: A list with the rotations.
+ :raises TypeError: If s is not an instance of str.
+ Examples:
+
+ >>> all_rotations("^BANANA|") # doctest: +NORMALIZE_WHITESPACE
+ ['^BANANA|', 'BANANA|^', 'ANANA|^B', 'NANA|^BA', 'ANA|^BAN', 'NA|^BANA',
+ 'A|^BANAN', '|^BANANA']
+ >>> all_rotations("a_asa_da_casa") # doctest: +NORMALIZE_WHITESPACE
+ ['a_asa_da_casa', '_asa_da_casaa', 'asa_da_casaa_', 'sa_da_casaa_a',
+ 'a_da_casaa_as', '_da_casaa_asa', 'da_casaa_asa_', 'a_casaa_asa_d',
+ '_casaa_asa_da', 'casaa_asa_da_', 'asaa_asa_da_c', 'saa_asa_da_ca',
+ 'aa_asa_da_cas']
+ >>> all_rotations("panamabanana") # doctest: +NORMALIZE_WHITESPACE
+ ['panamabanana', 'anamabananap', 'namabananapa', 'amabananapan',
+ 'mabananapana', 'abananapanam', 'bananapanama', 'ananapanamab',
+ 'nanapanamaba', 'anapanamaban', 'napanamabana', 'apanamabanan']
+ >>> all_rotations(5)
+ Traceback (most recent call last):
+ ...
+ TypeError: The parameter s type must be str.
+ """
+ if not isinstance(s, str):
+ raise TypeError("The parameter s type must be str.")
+
+ return [s[i:] + s[:i] for i in range(len(s))]
+
+
+def bwt_transform(s: str) -> dict:
+ """
+ :param s: The string that will be used at bwt algorithm
+ :return: the string composed of the last char of each row of the ordered
+ rotations and the index of the original string at ordered rotations list
+ :raises TypeError: If the s parameter type is not str
+ :raises ValueError: If the s parameter is empty
+ Examples:
+
+ >>> bwt_transform("^BANANA")
+ {'bwt_string': 'BNN^AAA', 'idx_original_string': 6}
+ >>> bwt_transform("a_asa_da_casa")
+ {'bwt_string': 'aaaadss_c__aa', 'idx_original_string': 3}
+ >>> bwt_transform("panamabanana")
+ {'bwt_string': 'mnpbnnaaaaaa', 'idx_original_string': 11}
+ >>> bwt_transform(4)
+ Traceback (most recent call last):
+ ...
+ TypeError: The parameter s type must be str.
+ >>> bwt_transform('')
+ Traceback (most recent call last):
+ ...
+ ValueError: The parameter s must not be empty.
+ """
+ if not isinstance(s, str):
+ raise TypeError("The parameter s type must be str.")
+ if not s:
+ raise ValueError("The parameter s must not be empty.")
+
+ rotations = all_rotations(s)
+ rotations.sort() # sort the list of rotations in alphabetically order
+ # make a string composed of the last char of each rotation
+ return {
+ "bwt_string": "".join([word[-1] for word in rotations]),
+ "idx_original_string": rotations.index(s),
+ }
+
+
+def reverse_bwt(bwt_string: str, idx_original_string: int) -> str:
+ """
+ :param bwt_string: The string returned from bwt algorithm execution
+ :param idx_original_string: A 0-based index of the string that was used to
+ generate bwt_string at ordered rotations list
+ :return: The string used to generate bwt_string when bwt was executed
+ :raises TypeError: If the bwt_string parameter type is not str
+ :raises ValueError: If the bwt_string parameter is empty
+ :raises TypeError: If the idx_original_string type is not int or if not
+ possible to cast it to int
+ :raises ValueError: If the idx_original_string value is lower than 0 or
+ greater than len(bwt_string) - 1
+
+ >>> reverse_bwt("BNN^AAA", 6)
+ '^BANANA'
+ >>> reverse_bwt("aaaadss_c__aa", 3)
+ 'a_asa_da_casa'
+ >>> reverse_bwt("mnpbnnaaaaaa", 11)
+ 'panamabanana'
+ >>> reverse_bwt(4, 11)
+ Traceback (most recent call last):
+ ...
+ TypeError: The parameter bwt_string type must be str.
+ >>> reverse_bwt("", 11)
+ Traceback (most recent call last):
+ ...
+ ValueError: The parameter bwt_string must not be empty.
+ >>> reverse_bwt("mnpbnnaaaaaa", "asd") # doctest: +NORMALIZE_WHITESPACE
+ Traceback (most recent call last):
+ ...
+ TypeError: The parameter idx_original_string type must be int or passive
+ of cast to int.
+ >>> reverse_bwt("mnpbnnaaaaaa", -1)
+ Traceback (most recent call last):
+ ...
+ ValueError: The parameter idx_original_string must not be lower than 0.
+ >>> reverse_bwt("mnpbnnaaaaaa", 12) # doctest: +NORMALIZE_WHITESPACE
+ Traceback (most recent call last):
+ ...
+ ValueError: The parameter idx_original_string must be lower than
+ len(bwt_string).
+ >>> reverse_bwt("mnpbnnaaaaaa", 11.0)
+ 'panamabanana'
+ >>> reverse_bwt("mnpbnnaaaaaa", 11.4)
+ 'panamabanana'
+ """
+ if not isinstance(bwt_string, str):
+ raise TypeError("The parameter bwt_string type must be str.")
+ if not bwt_string:
+ raise ValueError("The parameter bwt_string must not be empty.")
+ try:
+ idx_original_string = int(idx_original_string)
+ except ValueError:
+ raise TypeError(
+ "The parameter idx_original_string type must be int or passive"
+ " of cast to int."
+ )
+ if idx_original_string < 0:
+ raise ValueError("The parameter idx_original_string must not be lower than 0.")
+ if idx_original_string >= len(bwt_string):
+ raise ValueError(
+ "The parameter idx_original_string must be lower than" " len(bwt_string)."
+ )
+
+ ordered_rotations = [""] * len(bwt_string)
+ for x in range(len(bwt_string)):
+ for i in range(len(bwt_string)):
+ ordered_rotations[i] = bwt_string[i] + ordered_rotations[i]
+ ordered_rotations.sort()
+ return ordered_rotations[idx_original_string]
+
+
+if __name__ == "__main__":
+ entry_msg = "Provide a string that I will generate its BWT transform: "
+ s = input(entry_msg).strip()
+ result = bwt_transform(s)
+ bwt_output_msg = "Burrows Wheeler transform for string '{}' results in '{}'"
+ print(bwt_output_msg.format(s, result["bwt_string"]))
+ original_string = reverse_bwt(result["bwt_string"], result["idx_original_string"])
+ fmt = (
+ "Reversing Burrows Wheeler transform for entry '{}' we get original"
+ " string '{}'"
+ )
+ print(fmt.format(result["bwt_string"], original_string))
diff --git a/compression/huffman.py b/compression/huffman.py
new file mode 100644
index 000000000000..3a3cbfa4b0c6
--- /dev/null
+++ b/compression/huffman.py
@@ -0,0 +1,87 @@
+import sys
+
+
+class Letter:
+ def __init__(self, letter, freq):
+ self.letter = letter
+ self.freq = freq
+ self.bitstring = ""
+
+ def __repr__(self):
+ return f"{self.letter}:{self.freq}"
+
+
+class TreeNode:
+ def __init__(self, freq, left, right):
+ self.freq = freq
+ self.left = left
+ self.right = right
+
+
+def parse_file(file_path):
+ """
+ Read the file and build a dict of all letters and their
+ frequencies, then convert the dict into a list of Letters.
+ """
+ chars = {}
+ with open(file_path) as f:
+ while True:
+ c = f.read(1)
+ if not c:
+ break
+ chars[c] = chars[c] + 1 if c in chars.keys() else 1
+ return sorted([Letter(c, f) for c, f in chars.items()], key=lambda l: l.freq)
+
+
+def build_tree(letters):
+ """
+ Run through the list of Letters and build the min heap
+ for the Huffman Tree.
+ """
+ while len(letters) > 1:
+ left = letters.pop(0)
+ right = letters.pop(0)
+ total_freq = left.freq + right.freq
+ node = TreeNode(total_freq, left, right)
+ letters.append(node)
+ letters.sort(key=lambda l: l.freq)
+ return letters[0]
+
+
+def traverse_tree(root, bitstring):
+ """
+ Recursively traverse the Huffman Tree to set each
+ Letter's bitstring, and return the list of Letters
+ """
+ if type(root) is Letter:
+ root.bitstring = bitstring
+ return [root]
+ letters = []
+ letters += traverse_tree(root.left, bitstring + "0")
+ letters += traverse_tree(root.right, bitstring + "1")
+ return letters
+
+
+def huffman(file_path):
+ """
+ Parse the file, build the tree, then run through the file
+ again, using the list of Letters to find and print out the
+ bitstring for each letter.
+ """
+ letters_list = parse_file(file_path)
+ root = build_tree(letters_list)
+ letters = traverse_tree(root, "")
+ print(f"Huffman Coding of {file_path}: ")
+ with open(file_path) as f:
+ while True:
+ c = f.read(1)
+ if not c:
+ break
+ le = list(filter(lambda l: l.letter == c, letters))[0]
+ print(le.bitstring, end=" ")
+ print()
+
+
+if __name__ == "__main__":
+ # pass the file path to the huffman function
+ huffman(sys.argv[1])
diff --git a/analysis/compression_analysis/PSNR-example-base.png b/compression/image_data/PSNR-example-base.png
similarity index 100%
rename from analysis/compression_analysis/PSNR-example-base.png
rename to compression/image_data/PSNR-example-base.png
diff --git a/analysis/compression_analysis/PSNR-example-comp-10.jpg b/compression/image_data/PSNR-example-comp-10.jpg
similarity index 100%
rename from analysis/compression_analysis/PSNR-example-comp-10.jpg
rename to compression/image_data/PSNR-example-comp-10.jpg
diff --git a/compression/image_data/__init__.py b/compression/image_data/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/analysis/compression_analysis/compressed_image.png b/compression/image_data/compressed_image.png
similarity index 100%
rename from analysis/compression_analysis/compressed_image.png
rename to compression/image_data/compressed_image.png
diff --git a/analysis/compression_analysis/example_image.jpg b/compression/image_data/example_image.jpg
similarity index 100%
rename from analysis/compression_analysis/example_image.jpg
rename to compression/image_data/example_image.jpg
diff --git a/analysis/compression_analysis/example_wikipedia_image.jpg b/compression/image_data/example_wikipedia_image.jpg
similarity index 100%
rename from analysis/compression_analysis/example_wikipedia_image.jpg
rename to compression/image_data/example_wikipedia_image.jpg
diff --git a/analysis/compression_analysis/original_image.png b/compression/image_data/original_image.png
similarity index 100%
rename from analysis/compression_analysis/original_image.png
rename to compression/image_data/original_image.png
diff --git a/compression/lempel_ziv.py b/compression/lempel_ziv.py
new file mode 100644
index 000000000000..2d0601b27b34
--- /dev/null
+++ b/compression/lempel_ziv.py
@@ -0,0 +1,125 @@
+"""
+ One of the several implementations of Lempel–Ziv–Welch compression algorithm
+ https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Welch
+"""
+
+import math
+import os
+import sys
+
+
+def read_file_binary(file_path: str) -> str:
+ """
+ Reads given file as bytes and returns them as a long string
+ """
+ result = ""
+ try:
+ with open(file_path, "rb") as binary_file:
+ data = binary_file.read()
+ for dat in data:
+ curr_byte = f"{dat:08b}"
+ result += curr_byte
+ return result
+ except OSError:
+ print("File not accessible")
+ sys.exit()
+
+
+def add_key_to_lexicon(
+ lexicon: dict, curr_string: str, index: int, last_match_id: int
+) -> None:
+ """
+ Adds new strings (curr_string + "0", curr_string + "1") to the lexicon
+ """
+ lexicon.pop(curr_string)
+ lexicon[curr_string + "0"] = last_match_id
+
+ if math.log2(index).is_integer():
+ for curr_key in lexicon:
+ lexicon[curr_key] = "0" + lexicon[curr_key]
+
+ lexicon[curr_string + "1"] = bin(index)[2:]
+
+
+def compress_data(data_bits: str) -> str:
+ """
+ Compresses given data_bits using Lempel–Ziv–Welch compression algorithm
+ and returns the result as a string
+ """
+ lexicon = {"0": "0", "1": "1"}
+ result, curr_string = "", ""
+ index = len(lexicon)
+
+ for i in range(len(data_bits)):
+ curr_string += data_bits[i]
+ if curr_string not in lexicon:
+ continue
+
+ last_match_id = lexicon[curr_string]
+ result += last_match_id
+ add_key_to_lexicon(lexicon, curr_string, index, last_match_id)
+ index += 1
+ curr_string = ""
+
+ while curr_string != "" and curr_string not in lexicon:
+ curr_string += "0"
+
+ if curr_string != "":
+ last_match_id = lexicon[curr_string]
+ result += last_match_id
+
+ return result
+
+
+def add_file_length(source_path: str, compressed: str) -> str:
+ """
+ Adds given file's length in front (using Elias gamma coding) of the compressed
+ string
+ """
+ file_length = os.path.getsize(source_path)
+ file_length_binary = bin(file_length)[2:]
+ length_length = len(file_length_binary)
+
+ return "0" * (length_length - 1) + file_length_binary + compressed
+
+
+def write_file_binary(file_path: str, to_write: str) -> None:
+ """
+ Writes given to_write string (should only consist of 0's and 1's) as bytes in the
+ file
+ """
+ byte_length = 8
+ try:
+ with open(file_path, "wb") as opened_file:
+ result_byte_array = [
+ to_write[i : i + byte_length]
+ for i in range(0, len(to_write), byte_length)
+ ]
+
+ if len(result_byte_array[-1]) % byte_length == 0:
+ result_byte_array.append("10000000")
+ else:
+ result_byte_array[-1] += "1" + "0" * (
+ byte_length - len(result_byte_array[-1]) - 1
+ )
+
+ for elem in result_byte_array:
+ opened_file.write(int(elem, 2).to_bytes(1, byteorder="big"))
+ except OSError:
+ print("File not accessible")
+ sys.exit()
+
+
+def compress(source_path, destination_path: str) -> None:
+ """
+ Reads source file, compresses it and writes the compressed result in destination
+ file
+ """
+ data_bits = read_file_binary(source_path)
+ compressed = compress_data(data_bits)
+ compressed = add_file_length(source_path, compressed)
+ write_file_binary(destination_path, compressed)
+
+
+if __name__ == "__main__":
+ compress(sys.argv[1], sys.argv[2])
diff --git a/compression/lempel_ziv_decompress.py b/compression/lempel_ziv_decompress.py
new file mode 100644
index 000000000000..4d3c2c0d2cf3
--- /dev/null
+++ b/compression/lempel_ziv_decompress.py
@@ -0,0 +1,111 @@
+"""
+ One of the several implementations of Lempel–Ziv–Welch decompression algorithm
+ https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Welch
+"""
+
+import math
+import sys
+
+
+def read_file_binary(file_path: str) -> str:
+ """
+ Reads given file as bytes and returns them as a long string
+ """
+ result = ""
+ try:
+ with open(file_path, "rb") as binary_file:
+ data = binary_file.read()
+ for dat in data:
+ curr_byte = f"{dat:08b}"
+ result += curr_byte
+ return result
+ except OSError:
+ print("File not accessible")
+ sys.exit()
+
+
+def decompress_data(data_bits: str) -> str:
+ """
+ Decompresses given data_bits using Lempel–Ziv–Welch compression algorithm
+ and returns the result as a string
+ """
+ lexicon = {"0": "0", "1": "1"}
+ result, curr_string = "", ""
+ index = len(lexicon)
+
+ for i in range(len(data_bits)):
+ curr_string += data_bits[i]
+ if curr_string not in lexicon:
+ continue
+
+ last_match_id = lexicon[curr_string]
+ result += last_match_id
+ lexicon[curr_string] = last_match_id + "0"
+
+ if math.log2(index).is_integer():
+ newLex = {}
+ for curr_key in list(lexicon):
+ newLex["0" + curr_key] = lexicon.pop(curr_key)
+ lexicon = newLex
+
+ lexicon[bin(index)[2:]] = last_match_id + "1"
+ index += 1
+ curr_string = ""
+ return result
+
+
+def write_file_binary(file_path: str, to_write: str) -> None:
+ """
+ Writes given to_write string (should only consist of 0's and 1's) as bytes in the
+ file
+ """
+ byte_length = 8
+ try:
+ with open(file_path, "wb") as opened_file:
+ result_byte_array = [
+ to_write[i : i + byte_length]
+ for i in range(0, len(to_write), byte_length)
+ ]
+
+ if len(result_byte_array[-1]) % byte_length == 0:
+ result_byte_array.append("10000000")
+ else:
+ result_byte_array[-1] += "1" + "0" * (
+ byte_length - len(result_byte_array[-1]) - 1
+ )
+
+ for elem in result_byte_array[:-1]:
+ opened_file.write(int(elem, 2).to_bytes(1, byteorder="big"))
+ except OSError:
+ print("File not accessible")
+ sys.exit()
+
+
+def remove_prefix(data_bits: str) -> str:
+ """
+ Removes size prefix, that compressed file should have
+ Returns the result
+ """
+ counter = 0
+ for letter in data_bits:
+ if letter == "1":
+ break
+ counter += 1
+
+ data_bits = data_bits[counter:]
+ data_bits = data_bits[counter + 1 :]
+ return data_bits
+
+
+def compress(source_path: str, destination_path: str) -> None:
+ """
+ Reads source file, decompresses it and writes the result in destination file
+ """
+ data_bits = read_file_binary(source_path)
+ data_bits = remove_prefix(data_bits)
+ decompressed = decompress_data(data_bits)
+ write_file_binary(destination_path, decompressed)
+
+
+if __name__ == "__main__":
+ compress(sys.argv[1], sys.argv[2])
diff --git a/analysis/compression_analysis/psnr.py b/compression/peak_signal_to_noise_ratio.py
similarity index 53%
rename from analysis/compression_analysis/psnr.py
rename to compression/peak_signal_to_noise_ratio.py
index 0f21aac07d34..6c6c4c38a12a 100644
--- a/analysis/compression_analysis/psnr.py
+++ b/compression/peak_signal_to_noise_ratio.py
@@ -1,6 +1,8 @@
"""
- Peak signal-to-noise ratio - PSNR - https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
- Soruce: https://tutorials.techonical.com/how-to-calculate-psnr-value-of-two-images-using-python/
+Peak signal-to-noise ratio - PSNR
+ https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
+Source:
+https://tutorials.techonical.com/how-to-calculate-psnr-value-of-two-images-using-python
"""
import math
@@ -9,6 +11,7 @@
import cv2
import numpy as np
+
def psnr(original, contrast):
mse = np.mean((original - contrast) ** 2)
if mse == 0:
@@ -21,11 +24,13 @@ def psnr(original, contrast):
def main():
dir_path = os.path.dirname(os.path.realpath(__file__))
# Loading images (original image and compressed image)
- original = cv2.imread(os.path.join(dir_path, 'original_image.png'))
- contrast = cv2.imread(os.path.join(dir_path, 'compressed_image.png'), 1)
+ original = cv2.imread(os.path.join(dir_path, "image_data/original_image.png"))
+ contrast = cv2.imread(os.path.join(dir_path, "image_data/compressed_image.png"), 1)
- original2 = cv2.imread(os.path.join(dir_path, 'PSNR-example-base.png'))
- contrast2 = cv2.imread(os.path.join(dir_path, 'PSNR-example-comp-10.jpg'), 1)
+ original2 = cv2.imread(os.path.join(dir_path, "image_data/PSNR-example-base.png"))
+ contrast2 = cv2.imread(
+ os.path.join(dir_path, "image_data/PSNR-example-comp-10.jpg"), 1
+ )
# Value expected: 29.73dB
print("-- First Test --")
@@ -36,5 +41,5 @@ def main():
print(f"PSNR value is {psnr(original2, contrast2)} dB")
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/computer_vision/README.md b/computer_vision/README.md
new file mode 100644
index 000000000000..94ee493086cc
--- /dev/null
+++ b/computer_vision/README.md
@@ -0,0 +1,7 @@
+### Computer Vision
+
+Computer vision is a field of computer science that works on enabling computers to see,
+identify and process images in the same way that human vision does, and then provide appropriate output.
+It is like imparting human intelligence and instincts to a computer.
+Image processing and computer vision are a little different from each other. Image processing means applying some algorithms for transforming image from one form to the other like smoothing, contrasting, stretching, etc.
+While computer vision comes from modelling image processing using the techniques of machine learning, computer vision applies machine learning to recognize patterns for interpretation of images (much like the process of visual reasoning of human vision).
diff --git a/computer_vision/__init__.py b/computer_vision/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/computer_vision/harriscorner.py b/computer_vision/harriscorner.py
new file mode 100644
index 000000000000..fb7f560f7873
--- /dev/null
+++ b/computer_vision/harriscorner.py
@@ -0,0 +1,75 @@
+import cv2
+import numpy as np
+
+"""
+Harris Corner Detector
+https://en.wikipedia.org/wiki/Harris_Corner_Detector
+"""
+
+
+class Harris_Corner:
+ def __init__(self, k: float, window_size: int):
+
+ """
+ k : is an empirically determined constant in [0.04,0.06]
+ window_size : neighbourhoods considered
+ """
+
+ if k in (0.04, 0.06):
+ self.k = k
+ self.window_size = window_size
+ else:
+ raise ValueError("invalid k value")
+
+ def __str__(self):
+
+ return f"Harris Corner detection with k : {self.k}"
+
+ def detect(self, img_path: str):
+
+ """
+ Returns the image with corners identified
+ img_path : path of the image
+ output : list of the corner positions, image
+ """
+
+ img = cv2.imread(img_path, 0)
+ h, w = img.shape
+ corner_list = []
+ color_img = img.copy()
+ color_img = cv2.cvtColor(color_img, cv2.COLOR_GRAY2RGB)
+ dy, dx = np.gradient(img)
+ ixx = dx ** 2
+ iyy = dy ** 2
+ ixy = dx * dy
+ k = 0.04
+ offset = self.window_size // 2
+ for y in range(offset, h - offset):
+ for x in range(offset, w - offset):
+ wxx = ixx[
+ y - offset : y + offset + 1, x - offset : x + offset + 1
+ ].sum()
+ wyy = iyy[
+ y - offset : y + offset + 1, x - offset : x + offset + 1
+ ].sum()
+ wxy = ixy[
+ y - offset : y + offset + 1, x - offset : x + offset + 1
+ ].sum()
+
+ det = (wxx * wyy) - (wxy ** 2)
+ trace = wxx + wyy
+ r = det - k * (trace ** 2)
+ # Can change the value
+ if r > 0.5:
+ corner_list.append([x, y, r])
+ color_img.itemset((y, x, 0), 0)
+ color_img.itemset((y, x, 1), 0)
+ color_img.itemset((y, x, 2), 255)
+ return color_img, corner_list
+
+
+if __name__ == "__main__":
+
+ edge_detect = Harris_Corner(0.04, 3)
+ color_img, _ = edge_detect.detect("path_to_image")
+ cv2.imwrite("detect.png", color_img)
diff --git a/computer_vision/meanthreshold.py b/computer_vision/meanthreshold.py
new file mode 100644
index 000000000000..76657933d6a9
--- /dev/null
+++ b/computer_vision/meanthreshold.py
@@ -0,0 +1,30 @@
+from PIL import Image
+
+"""
+Mean thresholding algorithm for image processing
+https://en.wikipedia.org/wiki/Thresholding_(image_processing)
+"""
+
+
+def mean_threshold(image: Image) -> Image:
+ """
+ image: is a grayscale PIL image object
+ """
+ height, width = image.size
+ mean = 0
+ pixels = image.load()
+ for i in range(width):
+ for j in range(height):
+ pixel = pixels[j, i]
+ mean += pixel
+ mean //= width * height
+
+ for j in range(width):
+ for i in range(height):
+ pixels[i, j] = 255 if pixels[i, j] > mean else 0
+ return image
+
+
+if __name__ == "__main__":
+ image = mean_threshold(Image.open("path_to_image").convert("L"))
+ image.save("output_image_path")
diff --git a/conversions/__init__.py b/conversions/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/conversions/binary_to_decimal.py b/conversions/binary_to_decimal.py
new file mode 100644
index 000000000000..a7625e475bdc
--- /dev/null
+++ b/conversions/binary_to_decimal.py
@@ -0,0 +1,43 @@
+def bin_to_decimal(bin_string: str) -> int:
+ """
+ Convert a binary value to its decimal equivalent
+
+ >>> bin_to_decimal("101")
+ 5
+ >>> bin_to_decimal(" 1010 ")
+ 10
+ >>> bin_to_decimal("-11101")
+ -29
+ >>> bin_to_decimal("0")
+ 0
+ >>> bin_to_decimal("a")
+ Traceback (most recent call last):
+ ...
+ ValueError: Non-binary value was passed to the function
+ >>> bin_to_decimal("")
+ Traceback (most recent call last):
+ ...
+ ValueError: Empty string was passed to the function
+ >>> bin_to_decimal("39")
+ Traceback (most recent call last):
+ ...
+ ValueError: Non-binary value was passed to the function
+ """
+ bin_string = str(bin_string).strip()
+ if not bin_string:
+ raise ValueError("Empty string was passed to the function")
+ is_negative = bin_string[0] == "-"
+ if is_negative:
+ bin_string = bin_string[1:]
+ if not all(char in "01" for char in bin_string):
+ raise ValueError("Non-binary value was passed to the function")
+ decimal_number = 0
+ for char in bin_string:
+ decimal_number = 2 * decimal_number + int(char)
+ return -decimal_number if is_negative else decimal_number
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
diff --git a/conversions/binary_to_octal.py b/conversions/binary_to_octal.py
new file mode 100644
index 000000000000..8b594887867e
--- /dev/null
+++ b/conversions/binary_to_octal.py
@@ -0,0 +1,45 @@
+"""
+The function below will convert any binary string to the octal equivalent.
+
+>>> bin_to_octal("1111")
+'17'
+
+>>> bin_to_octal("101010101010011")
+'52523'
+
+>>> bin_to_octal("")
+Traceback (most recent call last):
+...
+ValueError: Empty string was passed to the function
+>>> bin_to_octal("a-1")
+Traceback (most recent call last):
+...
+ValueError: Non-binary value was passed to the function
+"""
+
+
+def bin_to_octal(bin_string: str) -> str:
+ if not all(char in "01" for char in bin_string):
+ raise ValueError("Non-binary value was passed to the function")
+ if not bin_string:
+ raise ValueError("Empty string was passed to the function")
+ oct_string = ""
+ while len(bin_string) % 3 != 0:
+ bin_string = "0" + bin_string
+ bin_string_in_3_list = [
+ bin_string[index : index + 3]
+ for index, value in enumerate(bin_string)
+ if index % 3 == 0
+ ]
+ for bin_group in bin_string_in_3_list:
+ oct_val = 0
+ for index, val in enumerate(bin_group):
+ oct_val += int(2 ** (2 - index) * int(val))
+ oct_string += str(oct_val)
+ return oct_string
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
diff --git a/conversions/decimal_to_any.py b/conversions/decimal_to_any.py
new file mode 100644
index 000000000000..3c72a7732ac6
--- /dev/null
+++ b/conversions/decimal_to_any.py
@@ -0,0 +1,107 @@
+"""Convert a positive Decimal Number to Any Other Representation"""
+
+
+def decimal_to_any(num: int, base: int) -> str:
+ """
+ Convert a positive integer to another base as str.
+ >>> decimal_to_any(0, 2)
+ '0'
+ >>> decimal_to_any(5, 4)
+ '11'
+ >>> decimal_to_any(20, 3)
+ '202'
+ >>> decimal_to_any(58, 16)
+ '3A'
+ >>> decimal_to_any(243, 17)
+ 'E5'
+ >>> decimal_to_any(34923, 36)
+ 'QY3'
+ >>> decimal_to_any(10, 11)
+ 'A'
+ >>> decimal_to_any(16, 16)
+ '10'
+ >>> decimal_to_any(36, 36)
+ '10'
+ >>> # negatives will error
+ >>> decimal_to_any(-45, 8) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ ...
+ ValueError: parameter must be positive int
+ >>> # floats will error
+ >>> decimal_to_any(34.4, 6) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ ...
+ TypeError: int() can't convert non-string with explicit base
+ >>> # a float base will error
+ >>> decimal_to_any(5, 2.5) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ ...
+ TypeError: 'float' object cannot be interpreted as an integer
+ >>> # a str base will error
+ >>> decimal_to_any(10, '16') # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ ...
+ TypeError: 'str' object cannot be interpreted as an integer
+ >>> # a base less than 2 will error
+ >>> decimal_to_any(7, 0) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ ...
+ ValueError: base must be >= 2
+ >>> # a base greater than 36 will error
+ >>> decimal_to_any(34, 37) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ ...
+ ValueError: base must be <= 36
+ """
+ if isinstance(num, float):
+ raise TypeError("int() can't convert non-string with explicit base")
+ if num < 0:
+ raise ValueError("parameter must be positive int")
+ if isinstance(base, str):
+ raise TypeError("'str' object cannot be interpreted as an integer")
+ if isinstance(base, float):
+ raise TypeError("'float' object cannot be interpreted as an integer")
+ if base in (0, 1):
+ raise ValueError("base must be >= 2")
+ if base > 36:
+ raise ValueError("base must be <= 36")
+ # fmt: off
+ ALPHABET_VALUES = {'10': 'A', '11': 'B', '12': 'C', '13': 'D', '14': 'E', '15': 'F',
+ '16': 'G', '17': 'H', '18': 'I', '19': 'J', '20': 'K', '21': 'L',
+ '22': 'M', '23': 'N', '24': 'O', '25': 'P', '26': 'Q', '27': 'R',
+ '28': 'S', '29': 'T', '30': 'U', '31': 'V', '32': 'W', '33': 'X',
+ '34': 'Y', '35': 'Z'}
+ # fmt: on
+ new_value = ""
+ mod = 0
+ div = 0
+ while div != 1:
+ div, mod = divmod(num, base)
+ if base >= 11 and 9 < mod < 36:
+ actual_value = ALPHABET_VALUES[str(mod)]
+ mod = actual_value
+ new_value += str(mod)
+ div = num // base
+ num = div
+ if div == 0:
+ return str(new_value[::-1])
+ elif div == 1:
+ new_value += str(div)
+ return str(new_value[::-1])
+
+ return new_value[::-1]
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+
+ for base in range(2, 37):
+ for num in range(1000):
+ assert int(decimal_to_any(num, base), base) == num, (
+ num,
+ base,
+ decimal_to_any(num, base),
+ int(decimal_to_any(num, base), base),
+ )
diff --git a/conversions/decimal_to_binary.py b/conversions/decimal_to_binary.py
new file mode 100644
index 000000000000..7e83aee4f7a5
--- /dev/null
+++ b/conversions/decimal_to_binary.py
@@ -0,0 +1,59 @@
+"""Convert a Decimal Number to a Binary Number."""
+
+
+def decimal_to_binary(num: int) -> str:
+
+ """
+ Convert an Integer Decimal Number to a Binary Number as str.
+ >>> decimal_to_binary(0)
+ '0b0'
+ >>> decimal_to_binary(2)
+ '0b10'
+ >>> decimal_to_binary(7)
+ '0b111'
+ >>> decimal_to_binary(35)
+ '0b100011'
+ >>> # negatives work too
+ >>> decimal_to_binary(-2)
+ '-0b10'
+ >>> # other floats will error
+ >>> decimal_to_binary(16.16) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ ...
+ TypeError: 'float' object cannot be interpreted as an integer
+ >>> # strings will error as well
+ >>> decimal_to_binary('0xfffff') # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ ...
+ TypeError: 'str' object cannot be interpreted as an integer
+ """
+
+ if type(num) == float:
+ raise TypeError("'float' object cannot be interpreted as an integer")
+ if type(num) == str:
+ raise TypeError("'str' object cannot be interpreted as an integer")
+
+ if num == 0:
+ return "0b0"
+
+ negative = False
+
+ if num < 0:
+ negative = True
+ num = -num
+
+ binary = []
+ while num > 0:
+ binary.insert(0, num % 2)
+ num >>= 1
+
+ if negative:
+ return "-0b" + "".join(str(e) for e in binary)
+
+ return "0b" + "".join(str(e) for e in binary)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/conversions/decimal_to_binary_recursion.py b/conversions/decimal_to_binary_recursion.py
new file mode 100644
index 000000000000..c149ea86592f
--- /dev/null
+++ b/conversions/decimal_to_binary_recursion.py
@@ -0,0 +1,53 @@
+def binary_recursive(decimal: int) -> str:
+ """
+ Take a positive integer value and return its binary equivalent.
+ >>> binary_recursive(1000)
+ '1111101000'
+ >>> binary_recursive("72")
+ '1001000'
+ >>> binary_recursive("number")
+ Traceback (most recent call last):
+ ...
+ ValueError: invalid literal for int() with base 10: 'number'
+ """
+ decimal = int(decimal)
+ if decimal in (0, 1): # Exit cases for the recursion
+ return str(decimal)
+ div, mod = divmod(decimal, 2)
+ return binary_recursive(div) + str(mod)
+
+
+def main(number: str) -> str:
+ """
+ Take an integer value and raise ValueError for wrong inputs,
+ call the function above and return the output with prefix "0b" & "-0b"
+ for positive and negative integers respectively.
+ >>> main(0)
+ '0b0'
+ >>> main(40)
+ '0b101000'
+ >>> main(-40)
+ '-0b101000'
+ >>> main(40.8)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input value is not an integer
+ >>> main("forty")
+ Traceback (most recent call last):
+ ...
+ ValueError: Input value is not an integer
+ """
+ number = str(number).strip()
+ if not number:
+ raise ValueError("No input value was provided")
+ negative = "-" if number.startswith("-") else ""
+ number = number.lstrip("-")
+ if not number.isnumeric():
+ raise ValueError("Input value is not an integer")
+ return f"{negative}0b{binary_recursive(int(number))}"
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
diff --git a/conversions/decimal_to_hexadecimal.py b/conversions/decimal_to_hexadecimal.py
new file mode 100644
index 000000000000..433f78dfecb7
--- /dev/null
+++ b/conversions/decimal_to_hexadecimal.py
@@ -0,0 +1,78 @@
+""" Convert Base 10 (Decimal) Values to Hexadecimal Representations """
+
+# set decimal value for each hexadecimal digit
+values = {
+ 0: "0",
+ 1: "1",
+ 2: "2",
+ 3: "3",
+ 4: "4",
+ 5: "5",
+ 6: "6",
+ 7: "7",
+ 8: "8",
+ 9: "9",
+ 10: "a",
+ 11: "b",
+ 12: "c",
+ 13: "d",
+ 14: "e",
+ 15: "f",
+}
+
+
+def decimal_to_hexadecimal(decimal):
+ """
+ take integer decimal value, return hexadecimal representation as str beginning
+ with 0x
+ >>> decimal_to_hexadecimal(5)
+ '0x5'
+ >>> decimal_to_hexadecimal(15)
+ '0xf'
+ >>> decimal_to_hexadecimal(37)
+ '0x25'
+ >>> decimal_to_hexadecimal(255)
+ '0xff'
+ >>> decimal_to_hexadecimal(4096)
+ '0x1000'
+ >>> decimal_to_hexadecimal(999098)
+ '0xf3eba'
+ >>> # negatives work too
+ >>> decimal_to_hexadecimal(-256)
+ '-0x100'
+ >>> # floats are acceptable if equivalent to an int
+ >>> decimal_to_hexadecimal(17.0)
+ '0x11'
+ >>> # other floats will error
+ >>> decimal_to_hexadecimal(16.16) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ ...
+ AssertionError
+ >>> # strings will error as well
+ >>> decimal_to_hexadecimal('0xfffff') # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ ...
+ AssertionError
+ >>> # results are the same when compared to Python's default hex function
+ >>> decimal_to_hexadecimal(-256) == hex(-256)
+ True
+ """
+ assert type(decimal) in (int, float) and decimal == int(decimal)
+ hexadecimal = ""
+ negative = False
+ if decimal < 0:
+ negative = True
+ decimal *= -1
+ while decimal > 0:
+ decimal, remainder = divmod(decimal, 16)
+ hexadecimal = values[remainder] + hexadecimal
+ hexadecimal = "0x" + hexadecimal
+ if negative:
+ hexadecimal = "-" + hexadecimal
+ return hexadecimal
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/conversions/decimal_to_octal.py b/conversions/decimal_to_octal.py
new file mode 100644
index 000000000000..8dc04830ad87
--- /dev/null
+++ b/conversions/decimal_to_octal.py
@@ -0,0 +1,43 @@
+"""Convert a Decimal Number to an Octal Number."""
+
+import math
+
+# Modified from:
+# https://github.com/TheAlgorithms/Javascript/blob/master/Conversions/DecimalToOctal.js
+
+
+def decimal_to_octal(num: int) -> str:
+ """Convert a Decimal Number to an Octal Number.
+
+ >>> all(decimal_to_octal(i) == oct(i) for i
+ ... in (0, 2, 8, 64, 65, 216, 255, 256, 512))
+ True
+ """
+ octal = 0
+ counter = 0
+ while num > 0:
+ remainder = num % 8
+ octal = octal + (remainder * math.pow(10, counter))
+ counter += 1
+ num = math.floor(num / 8) # basically /= 8 without remainder if any
+ # This formatting removes trailing '.0' from `octal`.
+ return f"0o{int(octal)}"
+
+
+def main():
+ """Print octal equivalents of decimal numbers."""
+ print("\n2 in octal is:")
+ print(decimal_to_octal(2)) # = 2
+ print("\n8 in octal is:")
+ print(decimal_to_octal(8)) # = 10
+ print("\n65 in octal is:")
+ print(decimal_to_octal(65)) # = 101
+ print("\n216 in octal is:")
+ print(decimal_to_octal(216)) # = 330
+ print("\n512 in octal is:")
+ print(decimal_to_octal(512)) # = 1000
+ print("\n")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/conversions/hexadecimal_to_decimal.py b/conversions/hexadecimal_to_decimal.py
new file mode 100644
index 000000000000..beb1c2c3ded6
--- /dev/null
+++ b/conversions/hexadecimal_to_decimal.py
@@ -0,0 +1,49 @@
+hex_table = {hex(i)[2:]: i for i in range(16)} # Use [:2] to strip off the leading '0x'
+
+
+def hex_to_decimal(hex_string: str) -> int:
+ """
+ Convert a hexadecimal value to its decimal equivalent
+ #https://www.programiz.com/python-programming/methods/built-in/hex
+
+ >>> hex_to_decimal("a")
+ 10
+ >>> hex_to_decimal("12f")
+ 303
+ >>> hex_to_decimal(" 12f ")
+ 303
+ >>> hex_to_decimal("FfFf")
+ 65535
+ >>> hex_to_decimal("-Ff")
+ -255
+ >>> hex_to_decimal("F-f")
+ Traceback (most recent call last):
+ ...
+ ValueError: Non-hexadecimal value was passed to the function
+ >>> hex_to_decimal("")
+ Traceback (most recent call last):
+ ...
+ ValueError: Empty string was passed to the function
+ >>> hex_to_decimal("12m")
+ Traceback (most recent call last):
+ ...
+ ValueError: Non-hexadecimal value was passed to the function
+ """
+ hex_string = hex_string.strip().lower()
+ if not hex_string:
+ raise ValueError("Empty string was passed to the function")
+ is_negative = hex_string[0] == "-"
+ if is_negative:
+ hex_string = hex_string[1:]
+ if not all(char in hex_table for char in hex_string):
+ raise ValueError("Non-hexadecimal value was passed to the function")
+ decimal_number = 0
+ for char in hex_string:
+ decimal_number = 16 * decimal_number + hex_table[char]
+ return -decimal_number if is_negative else decimal_number
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
diff --git a/conversions/molecular_chemistry.py b/conversions/molecular_chemistry.py
new file mode 100644
index 000000000000..8c68459965b0
--- /dev/null
+++ b/conversions/molecular_chemistry.py
@@ -0,0 +1,92 @@
+"""
+Functions useful for doing molecular chemistry:
+* molarity_to_normality
+* moles_to_pressure
+* moles_to_volume
+* pressure_and_volume_to_temperature
+"""
+
+
+def molarity_to_normality(nfactor: int, moles: float, volume: float) -> float:
+ """
+ Convert molarity to normality.
+ Volume is taken in litres.
+
+ Wikipedia reference: https://en.wikipedia.org/wiki/Equivalent_concentration
+ Wikipedia reference: https://en.wikipedia.org/wiki/Molar_concentration
+
+ >>> molarity_to_normality(2, 3.1, 0.31)
+ 20
+ >>> molarity_to_normality(4, 11.4, 5.7)
+ 8
+ """
+ return round((float(moles / volume) * nfactor))
+
+
+def moles_to_pressure(volume: float, moles: float, temperature: float) -> float:
+ """
+ Convert moles to pressure.
+ Ideal gas laws are used.
+ Temperature is taken in kelvin.
+ Volume is taken in litres.
+ Pressure has atm as SI unit.
+
+ Wikipedia reference: https://en.wikipedia.org/wiki/Gas_laws
+ Wikipedia reference: https://en.wikipedia.org/wiki/Pressure
+ Wikipedia reference: https://en.wikipedia.org/wiki/Temperature
+
+ >>> moles_to_pressure(0.82, 3, 300)
+ 90
+ >>> moles_to_pressure(8.2, 5, 200)
+ 10
+ """
+ return round(float((moles * 0.0821 * temperature) / (volume)))
+
+
+def moles_to_volume(pressure: float, moles: float, temperature: float) -> float:
+ """
+ Convert moles to volume.
+ Ideal gas laws are used.
+ Temperature is taken in kelvin.
+ Volume is taken in litres.
+ Pressure has atm as SI unit.
+
+ Wikipedia reference: https://en.wikipedia.org/wiki/Gas_laws
+ Wikipedia reference: https://en.wikipedia.org/wiki/Pressure
+ Wikipedia reference: https://en.wikipedia.org/wiki/Temperature
+
+ >>> moles_to_volume(0.82, 3, 300)
+ 90
+ >>> moles_to_volume(8.2, 5, 200)
+ 10
+ """
+ return round(float((moles * 0.0821 * temperature) / (pressure)))
+
+
+def pressure_and_volume_to_temperature(
+ pressure: float, moles: float, volume: float
+) -> float:
+ """
+ Convert pressure and volume to temperature.
+ Ideal gas laws are used.
+ Temperature is taken in kelvin.
+ Volume is taken in litres.
+ Pressure has atm as SI unit.
+
+ Wikipedia reference: https://en.wikipedia.org/wiki/Gas_laws
+ Wikipedia reference: https://en.wikipedia.org/wiki/Pressure
+ Wikipedia reference: https://en.wikipedia.org/wiki/Temperature
+
+ >>> pressure_and_volume_to_temperature(0.82, 1, 2)
+ 20
+ >>> pressure_and_volume_to_temperature(8.2, 5, 3)
+ 60
+ """
+ return round(float((pressure * volume) / (0.0821 * moles)))
+
+
+if __name__ == "__main__":
+
+ import doctest
+
+ doctest.testmod()
diff --git a/conversions/octal_to_decimal.py b/conversions/octal_to_decimal.py
new file mode 100644
index 000000000000..5a7373fef7e3
--- /dev/null
+++ b/conversions/octal_to_decimal.py
@@ -0,0 +1,43 @@
+def oct_to_decimal(oct_string: str) -> int:
+ """
+ Convert a octal value to its decimal equivalent
+
+ >>> oct_to_decimal("12")
+ 10
+ >>> oct_to_decimal(" 12 ")
+ 10
+ >>> oct_to_decimal("-45")
+ -37
+ >>> oct_to_decimal("2-0Fm")
+ Traceback (most recent call last):
+ ...
+ ValueError: Non-octal value was passed to the function
+ >>> oct_to_decimal("")
+ Traceback (most recent call last):
+ ...
+ ValueError: Empty string was passed to the function
+ >>> oct_to_decimal("19")
+ Traceback (most recent call last):
+ ...
+ ValueError: Non-octal value was passed to the function
+ """
+ oct_string = str(oct_string).strip()
+ if not oct_string:
+ raise ValueError("Empty string was passed to the function")
+ is_negative = oct_string[0] == "-"
+ if is_negative:
+ oct_string = oct_string[1:]
+ if not oct_string.isdigit() or not all(0 <= int(char) <= 7 for char in oct_string):
+ raise ValueError("Non-octal value was passed to the function")
+ decimal_number = 0
+ for char in oct_string:
+ decimal_number = 8 * decimal_number + int(char)
+ if is_negative:
+ decimal_number = -decimal_number
+ return decimal_number
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
diff --git a/conversions/prefix_conversions.py b/conversions/prefix_conversions.py
new file mode 100644
index 000000000000..c2440d1cf886
--- /dev/null
+++ b/conversions/prefix_conversions.py
@@ -0,0 +1,100 @@
+"""
+Convert International System of Units (SI) and Binary prefixes
+"""
+from enum import Enum
+from typing import Union
+
+
+class SI_Unit(Enum):
+ yotta = 24
+ zetta = 21
+ exa = 18
+ peta = 15
+ tera = 12
+ giga = 9
+ mega = 6
+ kilo = 3
+ hecto = 2
+ deca = 1
+ deci = -1
+ centi = -2
+ milli = -3
+ micro = -6
+ nano = -9
+ pico = -12
+ femto = -15
+ atto = -18
+ zepto = -21
+ yocto = -24
+
+
+class Binary_Unit(Enum):
+ yotta = 8
+ zetta = 7
+ exa = 6
+ peta = 5
+ tera = 4
+ giga = 3
+ mega = 2
+ kilo = 1
+
+
+def convert_si_prefix(
+ known_amount: float,
+ known_prefix: Union[str, SI_Unit],
+ unknown_prefix: Union[str, SI_Unit],
+) -> float:
+ """
+ Wikipedia reference: https://en.wikipedia.org/wiki/Binary_prefix
+ Wikipedia reference: https://en.wikipedia.org/wiki/International_System_of_Units
+ >>> convert_si_prefix(1, SI_Unit.giga, SI_Unit.mega)
+ 1000
+ >>> convert_si_prefix(1, SI_Unit.mega, SI_Unit.giga)
+ 0.001
+ >>> convert_si_prefix(1, SI_Unit.kilo, SI_Unit.kilo)
+ 1
+ >>> convert_si_prefix(1, 'giga', 'mega')
+ 1000
+ >>> convert_si_prefix(1, 'gIGa', 'mEGa')
+ 1000
+ """
+ if isinstance(known_prefix, str):
+ known_prefix: SI_Unit = SI_Unit[known_prefix.lower()]
+ if isinstance(unknown_prefix, str):
+ unknown_prefix: SI_Unit = SI_Unit[unknown_prefix.lower()]
+ unknown_amount = known_amount * (10 ** (known_prefix.value - unknown_prefix.value))
+ return unknown_amount
+
+
+def convert_binary_prefix(
+ known_amount: float,
+ known_prefix: Union[str, Binary_Unit],
+ unknown_prefix: Union[str, Binary_Unit],
+) -> float:
+ """
+ Wikipedia reference: https://en.wikipedia.org/wiki/Metric_prefix
+ >>> convert_binary_prefix(1, Binary_Unit.giga, Binary_Unit.mega)
+ 1024
+ >>> convert_binary_prefix(1, Binary_Unit.mega, Binary_Unit.giga)
+ 0.0009765625
+ >>> convert_binary_prefix(1, Binary_Unit.kilo, Binary_Unit.kilo)
+ 1
+ >>> convert_binary_prefix(1, 'giga', 'mega')
+ 1024
+ >>> convert_binary_prefix(1, 'gIGa', 'mEGa')
+ 1024
+ """
+ if isinstance(known_prefix, str):
+ known_prefix: Binary_Unit = Binary_Unit[known_prefix.lower()]
+ if isinstance(unknown_prefix, str):
+ unknown_prefix: Binary_Unit = Binary_Unit[unknown_prefix.lower()]
+ unknown_amount = known_amount * (
+ 2 ** ((known_prefix.value - unknown_prefix.value) * 10)
+ )
+ return unknown_amount
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/conversions/roman_numerals.py b/conversions/roman_numerals.py
new file mode 100644
index 000000000000..9933e6a78a4d
--- /dev/null
+++ b/conversions/roman_numerals.py
@@ -0,0 +1,59 @@
+def roman_to_int(roman: str) -> int:
+ """
+ LeetCode No. 13 Roman to Integer
+ Given a roman numeral, convert it to an integer.
+ Input is guaranteed to be within the range from 1 to 3999.
+ https://en.wikipedia.org/wiki/Roman_numerals
+ >>> tests = {"III": 3, "CLIV": 154, "MIX": 1009, "MMD": 2500, "MMMCMXCIX": 3999}
+ >>> all(roman_to_int(key) == value for key, value in tests.items())
+ True
+ """
+ vals = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
+ total = 0
+ place = 0
+ while place < len(roman):
+ if (place + 1 < len(roman)) and (vals[roman[place]] < vals[roman[place + 1]]):
+ total += vals[roman[place + 1]] - vals[roman[place]]
+ place += 2
+ else:
+ total += vals[roman[place]]
+ place += 1
+ return total
+
+
+def int_to_roman(number: int) -> str:
+ """
+ Given a integer, convert it to an roman numeral.
+ https://en.wikipedia.org/wiki/Roman_numerals
+ >>> tests = {"III": 3, "CLIV": 154, "MIX": 1009, "MMD": 2500, "MMMCMXCIX": 3999}
+ >>> all(int_to_roman(value) == key for key, value in tests.items())
+ True
+ """
+ ROMAN = [
+ (1000, "M"),
+ (900, "CM"),
+ (500, "D"),
+ (400, "CD"),
+ (100, "C"),
+ (90, "XC"),
+ (50, "L"),
+ (40, "XL"),
+ (10, "X"),
+ (9, "IX"),
+ (5, "V"),
+ (4, "IV"),
+ (1, "I"),
+ ]
+ result = []
+ for (arabic, roman) in ROMAN:
+ (factor, number) = divmod(number, arabic)
+ result.append(roman * factor)
+ if number == 0:
+ break
+ return "".join(result)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/conversions/temperature_conversions.py b/conversions/temperature_conversions.py
new file mode 100644
index 000000000000..167c9dc64727
--- /dev/null
+++ b/conversions/temperature_conversions.py
@@ -0,0 +1,386 @@
+""" Convert between different units of temperature """
+
+
+def celsius_to_fahrenheit(celsius: float, ndigits: int = 2) -> float:
+ """
+ Convert a given value from Celsius to Fahrenheit and round it to 2 decimal places.
+ Wikipedia reference: https://en.wikipedia.org/wiki/Celsius
+ Wikipedia reference: https://en.wikipedia.org/wiki/Fahrenheit
+
+ >>> celsius_to_fahrenheit(273.354, 3)
+ 524.037
+ >>> celsius_to_fahrenheit(273.354, 0)
+ 524.0
+ >>> celsius_to_fahrenheit(-40.0)
+ -40.0
+ >>> celsius_to_fahrenheit(-20.0)
+ -4.0
+ >>> celsius_to_fahrenheit(0)
+ 32.0
+ >>> celsius_to_fahrenheit(20)
+ 68.0
+ >>> celsius_to_fahrenheit("40")
+ 104.0
+ >>> celsius_to_fahrenheit("celsius")
+ Traceback (most recent call last):
+ ...
+ ValueError: could not convert string to float: 'celsius'
+ """
+ return round((float(celsius) * 9 / 5) + 32, ndigits)
+
+
+def celsius_to_kelvin(celsius: float, ndigits: int = 2) -> float:
+ """
+ Convert a given value from Celsius to Kelvin and round it to 2 decimal places.
+ Wikipedia reference: https://en.wikipedia.org/wiki/Celsius
+ Wikipedia reference: https://en.wikipedia.org/wiki/Kelvin
+
+ >>> celsius_to_kelvin(273.354, 3)
+ 546.504
+ >>> celsius_to_kelvin(273.354, 0)
+ 547.0
+ >>> celsius_to_kelvin(0)
+ 273.15
+ >>> celsius_to_kelvin(20.0)
+ 293.15
+ >>> celsius_to_kelvin("40")
+ 313.15
+ >>> celsius_to_kelvin("celsius")
+ Traceback (most recent call last):
+ ...
+ ValueError: could not convert string to float: 'celsius'
+ """
+ return round(float(celsius) + 273.15, ndigits)
+
+
+def celsius_to_rankine(celsius: float, ndigits: int = 2) -> float:
+ """
+ Convert a given value from Celsius to Rankine and round it to 2 decimal places.
+ Wikipedia reference: https://en.wikipedia.org/wiki/Celsius
+ Wikipedia reference: https://en.wikipedia.org/wiki/Rankine_scale
+
+ >>> celsius_to_rankine(273.354, 3)
+ 983.707
+ >>> celsius_to_rankine(273.354, 0)
+ 984.0
+ >>> celsius_to_rankine(0)
+ 491.67
+ >>> celsius_to_rankine(20.0)
+ 527.67
+ >>> celsius_to_rankine("40")
+ 563.67
+ >>> celsius_to_rankine("celsius")
+ Traceback (most recent call last):
+ ...
+ ValueError: could not convert string to float: 'celsius'
+ """
+ return round((float(celsius) * 9 / 5) + 491.67, ndigits)
+
+
+def fahrenheit_to_celsius(fahrenheit: float, ndigits: int = 2) -> float:
+ """
+ Convert a given value from Fahrenheit to Celsius and round it to 2 decimal places.
+ Wikipedia reference: https://en.wikipedia.org/wiki/Fahrenheit
+ Wikipedia reference: https://en.wikipedia.org/wiki/Celsius
+
+ >>> fahrenheit_to_celsius(273.354, 3)
+ 134.086
+ >>> fahrenheit_to_celsius(273.354, 0)
+ 134.0
+ >>> fahrenheit_to_celsius(0)
+ -17.78
+ >>> fahrenheit_to_celsius(20.0)
+ -6.67
+ >>> fahrenheit_to_celsius(40.0)
+ 4.44
+ >>> fahrenheit_to_celsius(60)
+ 15.56
+ >>> fahrenheit_to_celsius(80)
+ 26.67
+ >>> fahrenheit_to_celsius("100")
+ 37.78
+ >>> fahrenheit_to_celsius("fahrenheit")
+ Traceback (most recent call last):
+ ...
+ ValueError: could not convert string to float: 'fahrenheit'
+ """
+ return round((float(fahrenheit) - 32) * 5 / 9, ndigits)
+
+
+def fahrenheit_to_kelvin(fahrenheit: float, ndigits: int = 2) -> float:
+ """
+ Convert a given value from Fahrenheit to Kelvin and round it to 2 decimal places.
+ Wikipedia reference: https://en.wikipedia.org/wiki/Fahrenheit
+ Wikipedia reference: https://en.wikipedia.org/wiki/Kelvin
+
+ >>> fahrenheit_to_kelvin(273.354, 3)
+ 407.236
+ >>> fahrenheit_to_kelvin(273.354, 0)
+ 407.0
+ >>> fahrenheit_to_kelvin(0)
+ 255.37
+ >>> fahrenheit_to_kelvin(20.0)
+ 266.48
+ >>> fahrenheit_to_kelvin(40.0)
+ 277.59
+ >>> fahrenheit_to_kelvin(60)
+ 288.71
+ >>> fahrenheit_to_kelvin(80)
+ 299.82
+ >>> fahrenheit_to_kelvin("100")
+ 310.93
+ >>> fahrenheit_to_kelvin("fahrenheit")
+ Traceback (most recent call last):
+ ...
+ ValueError: could not convert string to float: 'fahrenheit'
+ """
+ return round(((float(fahrenheit) - 32) * 5 / 9) + 273.15, ndigits)
+
+
+def fahrenheit_to_rankine(fahrenheit: float, ndigits: int = 2) -> float:
+ """
+ Convert a given value from Fahrenheit to Rankine and round it to 2 decimal places.
+ Wikipedia reference: https://en.wikipedia.org/wiki/Fahrenheit
+ Wikipedia reference: https://en.wikipedia.org/wiki/Rankine_scale
+
+ >>> fahrenheit_to_rankine(273.354, 3)
+ 733.024
+ >>> fahrenheit_to_rankine(273.354, 0)
+ 733.0
+ >>> fahrenheit_to_rankine(0)
+ 459.67
+ >>> fahrenheit_to_rankine(20.0)
+ 479.67
+ >>> fahrenheit_to_rankine(40.0)
+ 499.67
+ >>> fahrenheit_to_rankine(60)
+ 519.67
+ >>> fahrenheit_to_rankine(80)
+ 539.67
+ >>> fahrenheit_to_rankine("100")
+ 559.67
+ >>> fahrenheit_to_rankine("fahrenheit")
+ Traceback (most recent call last):
+ ...
+ ValueError: could not convert string to float: 'fahrenheit'
+ """
+ return round(float(fahrenheit) + 459.67, ndigits)
+
+
+def kelvin_to_celsius(kelvin: float, ndigits: int = 2) -> float:
+ """
+ Convert a given value from Kelvin to Celsius and round it to 2 decimal places.
+ Wikipedia reference: https://en.wikipedia.org/wiki/Kelvin
+ Wikipedia reference: https://en.wikipedia.org/wiki/Celsius
+
+ >>> kelvin_to_celsius(273.354, 3)
+ 0.204
+ >>> kelvin_to_celsius(273.354, 0)
+ 0.0
+ >>> kelvin_to_celsius(273.15)
+ 0.0
+ >>> kelvin_to_celsius(300)
+ 26.85
+ >>> kelvin_to_celsius("315.5")
+ 42.35
+ >>> kelvin_to_celsius("kelvin")
+ Traceback (most recent call last):
+ ...
+ ValueError: could not convert string to float: 'kelvin'
+ """
+ return round(float(kelvin) - 273.15, ndigits)
+
+
+def kelvin_to_fahrenheit(kelvin: float, ndigits: int = 2) -> float:
+ """
+ Convert a given value from Kelvin to Fahrenheit and round it to 2 decimal places.
+ Wikipedia reference: https://en.wikipedia.org/wiki/Kelvin
+ Wikipedia reference: https://en.wikipedia.org/wiki/Fahrenheit
+
+ >>> kelvin_to_fahrenheit(273.354, 3)
+ 32.367
+ >>> kelvin_to_fahrenheit(273.354, 0)
+ 32.0
+ >>> kelvin_to_fahrenheit(273.15)
+ 32.0
+ >>> kelvin_to_fahrenheit(300)
+ 80.33
+ >>> kelvin_to_fahrenheit("315.5")
+ 108.23
+ >>> kelvin_to_fahrenheit("kelvin")
+ Traceback (most recent call last):
+ ...
+ ValueError: could not convert string to float: 'kelvin'
+ """
+ return round(((float(kelvin) - 273.15) * 9 / 5) + 32, ndigits)
+
+
+def kelvin_to_rankine(kelvin: float, ndigits: int = 2) -> float:
+ """
+ Convert a given value from Kelvin to Rankine and round it to 2 decimal places.
+ Wikipedia reference: https://en.wikipedia.org/wiki/Kelvin
+ Wikipedia reference: https://en.wikipedia.org/wiki/Rankine_scale
+
+ >>> kelvin_to_rankine(273.354, 3)
+ 492.037
+ >>> kelvin_to_rankine(273.354, 0)
+ 492.0
+ >>> kelvin_to_rankine(0)
+ 0.0
+ >>> kelvin_to_rankine(20.0)
+ 36.0
+ >>> kelvin_to_rankine("40")
+ 72.0
+ >>> kelvin_to_rankine("kelvin")
+ Traceback (most recent call last):
+ ...
+ ValueError: could not convert string to float: 'kelvin'
+ """
+ return round((float(kelvin) * 9 / 5), ndigits)
+
+
+def rankine_to_celsius(rankine: float, ndigits: int = 2) -> float:
+ """
+ Convert a given value from Rankine to Celsius and round it to 2 decimal places.
+ Wikipedia reference: https://en.wikipedia.org/wiki/Rankine_scale
+ Wikipedia reference: https://en.wikipedia.org/wiki/Celsius
+
+ >>> rankine_to_celsius(273.354, 3)
+ -121.287
+ >>> rankine_to_celsius(273.354, 0)
+ -121.0
+ >>> rankine_to_celsius(273.15)
+ -121.4
+ >>> rankine_to_celsius(300)
+ -106.48
+ >>> rankine_to_celsius("315.5")
+ -97.87
+ >>> rankine_to_celsius("rankine")
+ Traceback (most recent call last):
+ ...
+ ValueError: could not convert string to float: 'rankine'
+ """
+ return round((float(rankine) - 491.67) * 5 / 9, ndigits)
+
+
+def rankine_to_fahrenheit(rankine: float, ndigits: int = 2) -> float:
+ """
+ Convert a given value from Rankine to Fahrenheit and round it to 2 decimal places.
+ Wikipedia reference: https://en.wikipedia.org/wiki/Rankine_scale
+ Wikipedia reference: https://en.wikipedia.org/wiki/Fahrenheit
+
+ >>> rankine_to_fahrenheit(273.15)
+ -186.52
+ >>> rankine_to_fahrenheit(300)
+ -159.67
+ >>> rankine_to_fahrenheit("315.5")
+ -144.17
+ >>> rankine_to_fahrenheit("rankine")
+ Traceback (most recent call last):
+ ...
+ ValueError: could not convert string to float: 'rankine'
+ """
+ return round(float(rankine) - 459.67, ndigits)
+
+
+def rankine_to_kelvin(rankine: float, ndigits: int = 2) -> float:
+ """
+ Convert a given value from Rankine to Kelvin and round it to 2 decimal places.
+ Wikipedia reference: https://en.wikipedia.org/wiki/Rankine_scale
+ Wikipedia reference: https://en.wikipedia.org/wiki/Kelvin
+
+ >>> rankine_to_kelvin(0)
+ 0.0
+ >>> rankine_to_kelvin(20.0)
+ 11.11
+ >>> rankine_to_kelvin("40")
+ 22.22
+ >>> rankine_to_kelvin("rankine")
+ Traceback (most recent call last):
+ ...
+ ValueError: could not convert string to float: 'rankine'
+ """
+ return round((float(rankine) * 5 / 9), ndigits)
+
+
+def reaumur_to_kelvin(reaumur: float, ndigits: int = 2) -> float:
+ """
+ Convert a given value from reaumur to Kelvin and round it to 2 decimal places.
+ Reference:- http://www.csgnetwork.com/temp2conv.html
+
+ >>> reaumur_to_kelvin(0)
+ 273.15
+ >>> reaumur_to_kelvin(20.0)
+ 298.15
+ >>> reaumur_to_kelvin(40)
+ 323.15
+ >>> reaumur_to_kelvin("reaumur")
+ Traceback (most recent call last):
+ ...
+ ValueError: could not convert string to float: 'reaumur'
+ """
+ return round((float(reaumur) * 1.25 + 273.15), ndigits)
+
+
+def reaumur_to_fahrenheit(reaumur: float, ndigits: int = 2) -> float:
+ """
+ Convert a given value from reaumur to fahrenheit and round it to 2 decimal places.
+ Reference:- http://www.csgnetwork.com/temp2conv.html
+
+ >>> reaumur_to_fahrenheit(0)
+ 32.0
+ >>> reaumur_to_fahrenheit(20.0)
+ 77.0
+ >>> reaumur_to_fahrenheit(40)
+ 122.0
+ >>> reaumur_to_fahrenheit("reaumur")
+ Traceback (most recent call last):
+ ...
+ ValueError: could not convert string to float: 'reaumur'
+ """
+ return round((float(reaumur) * 2.25 + 32), ndigits)
+
+
+def reaumur_to_celsius(reaumur: float, ndigits: int = 2) -> float:
+ """
+ Convert a given value from reaumur to celsius and round it to 2 decimal places.
+ Reference:- http://www.csgnetwork.com/temp2conv.html
+
+ >>> reaumur_to_celsius(0)
+ 0.0
+ >>> reaumur_to_celsius(20.0)
+ 25.0
+ >>> reaumur_to_celsius(40)
+ 50.0
+ >>> reaumur_to_celsius("reaumur")
+ Traceback (most recent call last):
+ ...
+ ValueError: could not convert string to float: 'reaumur'
+ """
+ return round((float(reaumur) * 1.25), ndigits)
+
+
+def reaumur_to_rankine(reaumur: float, ndigits: int = 2) -> float:
+ """
+ Convert a given value from reaumur to rankine and round it to 2 decimal places.
+ Reference:- http://www.csgnetwork.com/temp2conv.html
+
+ >>> reaumur_to_rankine(0)
+ 491.67
+ >>> reaumur_to_rankine(20.0)
+ 536.67
+ >>> reaumur_to_rankine(40)
+ 581.67
+ >>> reaumur_to_rankine("reaumur")
+ Traceback (most recent call last):
+ ...
+ ValueError: could not convert string to float: 'reaumur'
+ """
+ return round((float(reaumur) * 2.25 + 32 + 459.67), ndigits)
+
+
+if __name__ == "__main__":
+
+ import doctest
+
+ doctest.testmod()
diff --git a/conversions/weight_conversion.py b/conversions/weight_conversion.py
new file mode 100644
index 000000000000..85515f2f6f88
--- /dev/null
+++ b/conversions/weight_conversion.py
@@ -0,0 +1,287 @@
+"""
+Conversion of weight units.
+
+__author__ = "Anubhav Solanki"
+__license__ = "MIT"
+__version__ = "1.0.0"
+__maintainer__ = "Anubhav Solanki"
+__email__ = "anubhavsolanki0@gmail.com"
+
+USAGE :
+-> Import this file into their respective project.
+-> Use the function weight_conversion() for conversion of weight units.
+-> Parameters :
+ -> from_type : From which type you want to convert
+ -> to_type : To which type you want to convert
+ -> value : the value which you want to convert
+
+REFERENCES :
+
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Kilogram
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Gram
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Millimetre
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Tonne
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Long_ton
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Short_ton
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Pound
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Ounce
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Fineness#Karat
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Dalton_(unit)
+"""
+
+KILOGRAM_CHART = {
+ "kilogram": 1,
+ "gram": pow(10, 3),
+ "milligram": pow(10, 6),
+ "metric-ton": pow(10, -3),
+ "long-ton": 0.0009842073,
+ "short-ton": 0.0011023122,
+ "pound": 2.2046244202,
+ "ounce": 35.273990723,
+ "carrat": 5000,
+ "atomic-mass-unit": 6.022136652e26,
+}
+
+WEIGHT_TYPE_CHART = {
+ "kilogram": 1,
+ "gram": pow(10, -3),
+ "milligram": pow(10, -6),
+ "metric-ton": pow(10, 3),
+ "long-ton": 1016.04608,
+ "short-ton": 907.184,
+ "pound": 0.453592,
+ "ounce": 0.0283495,
+ "carrat": 0.0002,
+ "atomic-mass-unit": 1.660540199e-27,
+}
+
+
+def weight_conversion(from_type: str, to_type: str, value: float) -> float:
+ """
+ Conversion of weight unit with the help of KILOGRAM_CHART
+
+ "kilogram" : 1,
+ "gram" : pow(10, 3),
+ "milligram" : pow(10, 6),
+ "metric-ton" : pow(10, -3),
+ "long-ton" : 0.0009842073,
+ "short-ton" : 0.0011023122,
+ "pound" : 2.2046244202,
+ "ounce" : 35.273990723,
+ "carrat" : 5000,
+ "atomic-mass-unit" : 6.022136652E+26
+
+ >>> weight_conversion("kilogram","kilogram",4)
+ 4
+ >>> weight_conversion("kilogram","gram",1)
+ 1000
+ >>> weight_conversion("kilogram","milligram",4)
+ 4000000
+ >>> weight_conversion("kilogram","metric-ton",4)
+ 0.004
+ >>> weight_conversion("kilogram","long-ton",3)
+ 0.0029526219
+ >>> weight_conversion("kilogram","short-ton",1)
+ 0.0011023122
+ >>> weight_conversion("kilogram","pound",4)
+ 8.8184976808
+ >>> weight_conversion("kilogram","ounce",4)
+ 141.095962892
+ >>> weight_conversion("kilogram","carrat",3)
+ 15000
+ >>> weight_conversion("kilogram","atomic-mass-unit",1)
+ 6.022136652e+26
+ >>> weight_conversion("gram","kilogram",1)
+ 0.001
+ >>> weight_conversion("gram","gram",3)
+ 3.0
+ >>> weight_conversion("gram","milligram",2)
+ 2000.0
+ >>> weight_conversion("gram","metric-ton",4)
+ 4e-06
+ >>> weight_conversion("gram","long-ton",3)
+ 2.9526219e-06
+ >>> weight_conversion("gram","short-ton",3)
+ 3.3069366000000003e-06
+ >>> weight_conversion("gram","pound",3)
+ 0.0066138732606
+ >>> weight_conversion("gram","ounce",1)
+ 0.035273990723
+ >>> weight_conversion("gram","carrat",2)
+ 10.0
+ >>> weight_conversion("gram","atomic-mass-unit",1)
+ 6.022136652e+23
+ >>> weight_conversion("milligram","kilogram",1)
+ 1e-06
+ >>> weight_conversion("milligram","gram",2)
+ 0.002
+ >>> weight_conversion("milligram","milligram",3)
+ 3.0
+ >>> weight_conversion("milligram","metric-ton",3)
+ 3e-09
+ >>> weight_conversion("milligram","long-ton",3)
+ 2.9526219e-09
+ >>> weight_conversion("milligram","short-ton",1)
+ 1.1023122e-09
+ >>> weight_conversion("milligram","pound",3)
+ 6.6138732605999995e-06
+ >>> weight_conversion("milligram","ounce",2)
+ 7.054798144599999e-05
+ >>> weight_conversion("milligram","carrat",1)
+ 0.005
+ >>> weight_conversion("milligram","atomic-mass-unit",1)
+ 6.022136652e+20
+ >>> weight_conversion("metric-ton","kilogram",2)
+ 2000
+ >>> weight_conversion("metric-ton","gram",2)
+ 2000000
+ >>> weight_conversion("metric-ton","milligram",3)
+ 3000000000
+ >>> weight_conversion("metric-ton","metric-ton",2)
+ 2.0
+ >>> weight_conversion("metric-ton","long-ton",3)
+ 2.9526219
+ >>> weight_conversion("metric-ton","short-ton",2)
+ 2.2046244
+ >>> weight_conversion("metric-ton","pound",3)
+ 6613.8732606
+ >>> weight_conversion("metric-ton","ounce",4)
+ 141095.96289199998
+ >>> weight_conversion("metric-ton","carrat",4)
+ 20000000
+ >>> weight_conversion("metric-ton","atomic-mass-unit",1)
+ 6.022136652e+29
+ >>> weight_conversion("long-ton","kilogram",4)
+ 4064.18432
+ >>> weight_conversion("long-ton","gram",4)
+ 4064184.32
+ >>> weight_conversion("long-ton","milligram",3)
+ 3048138240.0
+ >>> weight_conversion("long-ton","metric-ton",4)
+ 4.06418432
+ >>> weight_conversion("long-ton","long-ton",3)
+ 2.999999907217152
+ >>> weight_conversion("long-ton","short-ton",1)
+ 1.119999989746176
+ >>> weight_conversion("long-ton","pound",3)
+ 6720.000000049448
+ >>> weight_conversion("long-ton","ounce",1)
+ 35840.000000060514
+ >>> weight_conversion("long-ton","carrat",4)
+ 20320921.599999998
+ >>> weight_conversion("long-ton","atomic-mass-unit",4)
+ 2.4475073353955697e+30
+ >>> weight_conversion("short-ton","kilogram",3)
+ 2721.5519999999997
+ >>> weight_conversion("short-ton","gram",3)
+ 2721552.0
+ >>> weight_conversion("short-ton","milligram",1)
+ 907184000.0
+ >>> weight_conversion("short-ton","metric-ton",4)
+ 3.628736
+ >>> weight_conversion("short-ton","long-ton",3)
+ 2.6785713457296
+ >>> weight_conversion("short-ton","short-ton",3)
+ 2.9999999725344
+ >>> weight_conversion("short-ton","pound",2)
+ 4000.0000000294335
+ >>> weight_conversion("short-ton","ounce",4)
+ 128000.00000021611
+ >>> weight_conversion("short-ton","carrat",4)
+ 18143680.0
+ >>> weight_conversion("short-ton","atomic-mass-unit",1)
+ 5.463186016507968e+29
+ >>> weight_conversion("pound","kilogram",4)
+ 1.814368
+ >>> weight_conversion("pound","gram",2)
+ 907.184
+ >>> weight_conversion("pound","milligram",3)
+ 1360776.0
+ >>> weight_conversion("pound","metric-ton",3)
+ 0.001360776
+ >>> weight_conversion("pound","long-ton",2)
+ 0.0008928571152432
+ >>> weight_conversion("pound","short-ton",1)
+ 0.0004999999954224
+ >>> weight_conversion("pound","pound",3)
+ 3.0000000000220752
+ >>> weight_conversion("pound","ounce",1)
+ 16.000000000027015
+ >>> weight_conversion("pound","carrat",1)
+ 2267.96
+ >>> weight_conversion("pound","atomic-mass-unit",4)
+ 1.0926372033015936e+27
+ >>> weight_conversion("ounce","kilogram",3)
+ 0.0850485
+ >>> weight_conversion("ounce","gram",3)
+ 85.0485
+ >>> weight_conversion("ounce","milligram",4)
+ 113398.0
+ >>> weight_conversion("ounce","metric-ton",4)
+ 0.000113398
+ >>> weight_conversion("ounce","long-ton",4)
+ 0.0001116071394054
+ >>> weight_conversion("ounce","short-ton",4)
+ 0.0001249999988556
+ >>> weight_conversion("ounce","pound",1)
+ 0.0625000000004599
+ >>> weight_conversion("ounce","ounce",2)
+ 2.000000000003377
+ >>> weight_conversion("ounce","carrat",1)
+ 141.7475
+ >>> weight_conversion("ounce","atomic-mass-unit",1)
+ 1.70724563015874e+25
+ >>> weight_conversion("carrat","kilogram",1)
+ 0.0002
+ >>> weight_conversion("carrat","gram",4)
+ 0.8
+ >>> weight_conversion("carrat","milligram",2)
+ 400.0
+ >>> weight_conversion("carrat","metric-ton",2)
+ 4.0000000000000003e-07
+ >>> weight_conversion("carrat","long-ton",3)
+ 5.9052438e-07
+ >>> weight_conversion("carrat","short-ton",4)
+ 8.818497600000002e-07
+ >>> weight_conversion("carrat","pound",1)
+ 0.00044092488404000004
+ >>> weight_conversion("carrat","ounce",2)
+ 0.0141095962892
+ >>> weight_conversion("carrat","carrat",4)
+ 4.0
+ >>> weight_conversion("carrat","atomic-mass-unit",4)
+ 4.8177093216e+23
+ >>> weight_conversion("atomic-mass-unit","kilogram",4)
+ 6.642160796e-27
+ >>> weight_conversion("atomic-mass-unit","gram",2)
+ 3.321080398e-24
+ >>> weight_conversion("atomic-mass-unit","milligram",2)
+ 3.3210803980000002e-21
+ >>> weight_conversion("atomic-mass-unit","metric-ton",3)
+ 4.9816205970000004e-30
+ >>> weight_conversion("atomic-mass-unit","long-ton",3)
+ 4.9029473573977584e-30
+ >>> weight_conversion("atomic-mass-unit","short-ton",1)
+ 1.830433719948128e-30
+ >>> weight_conversion("atomic-mass-unit","pound",3)
+ 1.0982602420317504e-26
+ >>> weight_conversion("atomic-mass-unit","ounce",2)
+ 1.1714775914938915e-25
+ >>> weight_conversion("atomic-mass-unit","carrat",2)
+ 1.660540199e-23
+ >>> weight_conversion("atomic-mass-unit","atomic-mass-unit",2)
+ 1.999999998903455
+ """
+ if to_type not in KILOGRAM_CHART or from_type not in WEIGHT_TYPE_CHART:
+ raise ValueError(
+ f"Invalid 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
+ f"Supported values are: {', '.join(WEIGHT_TYPE_CHART)}"
+ )
+ return value * KILOGRAM_CHART[to_type] * WEIGHT_TYPE_CHART[from_type]
+
+
+if __name__ == "__main__":
+
+ import doctest
+
+ doctest.testmod()
diff --git a/data_structures/arrays.py b/data_structures/arrays.py
deleted file mode 100644
index feb061013556..000000000000
--- a/data_structures/arrays.py
+++ /dev/null
@@ -1,3 +0,0 @@
-arr = [10, 20, 30, 40]
-arr[1] = 30 # set element 1 (20) of array to 30
-print(arr)
diff --git a/data_structures/avl.py b/data_structures/avl.py
deleted file mode 100644
index d01e8f825368..000000000000
--- a/data_structures/avl.py
+++ /dev/null
@@ -1,181 +0,0 @@
-"""
-An AVL tree
-"""
-from __future__ import print_function
-
-
-class Node:
-
- def __init__(self, label):
- self.label = label
- self._parent = None
- self._left = None
- self._right = None
- self.height = 0
-
- @property
- def right(self):
- return self._right
-
- @right.setter
- def right(self, node):
- if node is not None:
- node._parent = self
- self._right = node
-
- @property
- def left(self):
- return self._left
-
- @left.setter
- def left(self, node):
- if node is not None:
- node._parent = self
- self._left = node
-
- @property
- def parent(self):
- return self._parent
-
- @parent.setter
- def parent(self, node):
- if node is not None:
- self._parent = node
- self.height = self.parent.height + 1
- else:
- self.height = 0
-
-
-class AVL:
-
- def __init__(self):
- self.root = None
- self.size = 0
-
- def insert(self, value):
- node = Node(value)
-
- if self.root is None:
- self.root = node
- self.root.height = 0
- self.size = 1
- else:
- # Same as Binary Tree
- dad_node = None
- curr_node = self.root
-
- while True:
- if curr_node is not None:
-
- dad_node = curr_node
-
- if node.label < curr_node.label:
- curr_node = curr_node.left
- else:
- curr_node = curr_node.right
- else:
- node.height = dad_node.height
- dad_node.height += 1
- if node.label < dad_node.label:
- dad_node.left = node
- else:
- dad_node.right = node
- self.rebalance(node)
- self.size += 1
- break
-
- def rebalance(self, node):
- n = node
-
- while n is not None:
- height_right = n.height
- height_left = n.height
-
- if n.right is not None:
- height_right = n.right.height
-
- if n.left is not None:
- height_left = n.left.height
-
- if abs(height_left - height_right) > 1:
- if height_left > height_right:
- left_child = n.left
- if left_child is not None:
- h_right = (left_child.right.height
- if (left_child.right is not None) else 0)
- h_left = (left_child.left.height
- if (left_child.left is not None) else 0)
- if (h_left > h_right):
- self.rotate_left(n)
- break
- else:
- self.double_rotate_right(n)
- break
- else:
- right_child = n.right
- if right_child is not None:
- h_right = (right_child.right.height
- if (right_child.right is not None) else 0)
- h_left = (right_child.left.height
- if (right_child.left is not None) else 0)
- if (h_left > h_right):
- self.double_rotate_left(n)
- break
- else:
- self.rotate_right(n)
- break
- n = n.parent
-
- def rotate_left(self, node):
- aux = node.parent.label
- node.parent.label = node.label
- node.parent.right = Node(aux)
- node.parent.right.height = node.parent.height + 1
- node.parent.left = node.right
-
-
- def rotate_right(self, node):
- aux = node.parent.label
- node.parent.label = node.label
- node.parent.left = Node(aux)
- node.parent.left.height = node.parent.height + 1
- node.parent.right = node.right
-
- def double_rotate_left(self, node):
- self.rotate_right(node.getRight().getRight())
- self.rotate_left(node)
-
- def double_rotate_right(self, node):
- self.rotate_left(node.getLeft().getLeft())
- self.rotate_right(node)
-
- def empty(self):
- if self.root is None:
- return True
- return False
-
- def preShow(self, curr_node):
- if curr_node is not None:
- self.preShow(curr_node.left)
- print(curr_node.label, end=" ")
- self.preShow(curr_node.right)
-
- def preorder(self, curr_node):
- if curr_node is not None:
- self.preShow(curr_node.left)
- self.preShow(curr_node.right)
- print(curr_node.label, end=" ")
-
- def getRoot(self):
- return self.root
-
-t = AVL()
-t.insert(1)
-t.insert(2)
-t.insert(3)
-# t.preShow(t.root)
-# print("\n")
-# t.insert(4)
-# t.insert(5)
-# t.preShow(t.root)
-# t.preorden(t.root)
diff --git a/data_structures/binary tree/AVLtree.py b/data_structures/binary tree/AVLtree.py
deleted file mode 100644
index ff44963d1690..000000000000
--- a/data_structures/binary tree/AVLtree.py
+++ /dev/null
@@ -1,255 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-An auto-balanced binary tree!
-'''
-import math
-import random
-class my_queue:
- def __init__(self):
- self.data = []
- self.head = 0
- self.tail = 0
- def isEmpty(self):
- return self.head == self.tail
- def push(self,data):
- self.data.append(data)
- self.tail = self.tail + 1
- def pop(self):
- ret = self.data[self.head]
- self.head = self.head + 1
- return ret
- def count(self):
- return self.tail - self.head
- def print(self):
- print(self.data)
- print("**************")
- print(self.data[self.head:self.tail])
-
-class my_node:
- def __init__(self,data):
- self.data = data
- self.left = None
- self.right = None
- self.height = 1
- def getdata(self):
- return self.data
- def getleft(self):
- return self.left
- def getright(self):
- return self.right
- def getheight(self):
- return self.height
- def setdata(self,data):
- self.data = data
- return
- def setleft(self,node):
- self.left = node
- return
- def setright(self,node):
- self.right = node
- return
- def setheight(self,height):
- self.height = height
- return
-
-def getheight(node):
- if node is None:
- return 0
- return node.getheight()
-
-def my_max(a,b):
- if a > b:
- return a
- return b
-
-
-
-def leftrotation(node):
- r'''
- A B
- / \ / \
- B C Bl A
- / \ --> / / \
- Bl Br UB Br C
- /
- UB
-
- UB = unbalanced node
- '''
- print("left rotation node:",node.getdata())
- ret = node.getleft()
- node.setleft(ret.getright())
- ret.setright(node)
- h1 = my_max(getheight(node.getright()),getheight(node.getleft())) + 1
- node.setheight(h1)
- h2 = my_max(getheight(ret.getright()),getheight(ret.getleft())) + 1
- ret.setheight(h2)
- return ret
-
-def rightrotation(node):
- '''
- a mirror symmetry rotation of the leftrotation
- '''
- print("right rotation node:",node.getdata())
- ret = node.getright()
- node.setright(ret.getleft())
- ret.setleft(node)
- h1 = my_max(getheight(node.getright()),getheight(node.getleft())) + 1
- node.setheight(h1)
- h2 = my_max(getheight(ret.getright()),getheight(ret.getleft())) + 1
- ret.setheight(h2)
- return ret
-
-def rlrotation(node):
- r'''
- A A Br
- / \ / \ / \
- B C RR Br C LR B A
- / \ --> / \ --> / / \
- Bl Br B UB Bl UB C
- \ /
- UB Bl
- RR = rightrotation LR = leftrotation
- '''
- node.setleft(rightrotation(node.getleft()))
- return leftrotation(node)
-
-def lrrotation(node):
- node.setright(leftrotation(node.getright()))
- return rightrotation(node)
-
-
-def insert_node(node,data):
- if node is None:
- return my_node(data)
- if data < node.getdata():
- node.setleft(insert_node(node.getleft(),data))
- if getheight(node.getleft()) - getheight(node.getright()) == 2: #an unbalance detected
- if data < node.getleft().getdata(): #new node is the left child of the left child
- node = leftrotation(node)
- else:
- node = rlrotation(node) #new node is the right child of the left child
- else:
- node.setright(insert_node(node.getright(),data))
- if getheight(node.getright()) - getheight(node.getleft()) == 2:
- if data < node.getright().getdata():
- node = lrrotation(node)
- else:
- node = rightrotation(node)
- h1 = my_max(getheight(node.getright()),getheight(node.getleft())) + 1
- node.setheight(h1)
- return node
-
-def getRightMost(root):
- while root.getright() is not None:
- root = root.getright()
- return root.getdata()
-def getLeftMost(root):
- while root.getleft() is not None:
- root = root.getleft()
- return root.getdata()
-
-def del_node(root,data):
- if root.getdata() == data:
- if root.getleft() is not None and root.getright() is not None:
- temp_data = getLeftMost(root.getright())
- root.setdata(temp_data)
- root.setright(del_node(root.getright(),temp_data))
- elif root.getleft() is not None:
- root = root.getleft()
- else:
- root = root.getright()
- elif root.getdata() > data:
- if root.getleft() is None:
- print("No such data")
- return root
- else:
- root.setleft(del_node(root.getleft(),data))
- elif root.getdata() < data:
- if root.getright() is None:
- return root
- else:
- root.setright(del_node(root.getright(),data))
- if root is None:
- return root
- if getheight(root.getright()) - getheight(root.getleft()) == 2:
- if getheight(root.getright().getright()) > getheight(root.getright().getleft()):
- root = rightrotation(root)
- else:
- root = lrrotation(root)
- elif getheight(root.getright()) - getheight(root.getleft()) == -2:
- if getheight(root.getleft().getleft()) > getheight(root.getleft().getright()):
- root = leftrotation(root)
- else:
- root = rlrotation(root)
- height = my_max(getheight(root.getright()),getheight(root.getleft())) + 1
- root.setheight(height)
- return root
-
-class AVLtree:
- def __init__(self):
- self.root = None
- def getheight(self):
-# print("yyy")
- return getheight(self.root)
- def insert(self,data):
- print("insert:"+str(data))
- self.root = insert_node(self.root,data)
-
- def del_node(self,data):
- print("delete:"+str(data))
- if self.root is None:
- print("Tree is empty!")
- return
- self.root = del_node(self.root,data)
- def traversale(self): #a level traversale, gives a more intuitive look on the tree
- q = my_queue()
- q.push(self.root)
- layer = self.getheight()
- if layer == 0:
- return
- cnt = 0
- while not q.isEmpty():
- node = q.pop()
- space = " "*int(math.pow(2,layer-1))
- print(space,end = "")
- if node is None:
- print("*",end = "")
- q.push(None)
- q.push(None)
- else:
- print(node.getdata(),end = "")
- q.push(node.getleft())
- q.push(node.getright())
- print(space,end = "")
- cnt = cnt + 1
- for i in range(100):
- if cnt == math.pow(2,i) - 1:
- layer = layer -1
- if layer == 0:
- print()
- print("*************************************")
- return
- print()
- break
- print()
- print("*************************************")
- return
-
- def test(self):
- getheight(None)
- print("****")
- self.getheight()
-if __name__ == "__main__":
- t = AVLtree()
- t.traversale()
- l = list(range(10))
- random.shuffle(l)
- for i in l:
- t.insert(i)
- t.traversale()
-
- random.shuffle(l)
- for i in l:
- t.del_node(i)
- t.traversale()
diff --git a/data_structures/binary tree/binary_search_tree.py b/data_structures/binary tree/binary_search_tree.py
deleted file mode 100644
index cef5b55f245d..000000000000
--- a/data_structures/binary tree/binary_search_tree.py
+++ /dev/null
@@ -1,258 +0,0 @@
-'''
-A binary search Tree
-'''
-from __future__ import print_function
-class Node:
-
- def __init__(self, label, parent):
- self.label = label
- self.left = None
- self.right = None
- #Added in order to delete a node easier
- self.parent = parent
-
- def getLabel(self):
- return self.label
-
- def setLabel(self, label):
- self.label = label
-
- def getLeft(self):
- return self.left
-
- def setLeft(self, left):
- self.left = left
-
- def getRight(self):
- return self.right
-
- def setRight(self, right):
- self.right = right
-
- def getParent(self):
- return self.parent
-
- def setParent(self, parent):
- self.parent = parent
-
-class BinarySearchTree:
-
- def __init__(self):
- self.root = None
-
- def insert(self, label):
- # Create a new Node
- new_node = Node(label, None)
- # If Tree is empty
- if self.empty():
- self.root = new_node
- else:
- #If Tree is not empty
- curr_node = self.root
- #While we don't get to a leaf
- while curr_node is not None:
- #We keep reference of the parent node
- parent_node = curr_node
- #If node label is less than current node
- if new_node.getLabel() < curr_node.getLabel():
- #We go left
- curr_node = curr_node.getLeft()
- else:
- #Else we go right
- curr_node = curr_node.getRight()
- #We insert the new node in a leaf
- if new_node.getLabel() < parent_node.getLabel():
- parent_node.setLeft(new_node)
- else:
- parent_node.setRight(new_node)
- #Set parent to the new node
- new_node.setParent(parent_node)
-
- def delete(self, label):
- if (not self.empty()):
- #Look for the node with that label
- node = self.getNode(label)
- #If the node exists
- if(node is not None):
- #If it has no children
- if(node.getLeft() is None and node.getRight() is None):
- self.__reassignNodes(node, None)
- node = None
- #Has only right children
- elif(node.getLeft() is None and node.getRight() is not None):
- self.__reassignNodes(node, node.getRight())
- #Has only left children
- elif(node.getLeft() is not None and node.getRight() is None):
- self.__reassignNodes(node, node.getLeft())
- #Has two children
- else:
- #Gets the max value of the left branch
- tmpNode = self.getMax(node.getLeft())
- #Deletes the tmpNode
- self.delete(tmpNode.getLabel())
- #Assigns the value to the node to delete and keesp tree structure
- node.setLabel(tmpNode.getLabel())
-
- def getNode(self, label):
- curr_node = None
- #If the tree is not empty
- if(not self.empty()):
- #Get tree root
- curr_node = self.getRoot()
- #While we don't find the node we look for
- #I am using lazy evaluation here to avoid NoneType Attribute error
- while curr_node is not None and curr_node.getLabel() is not label:
- #If node label is less than current node
- if label < curr_node.getLabel():
- #We go left
- curr_node = curr_node.getLeft()
- else:
- #Else we go right
- curr_node = curr_node.getRight()
- return curr_node
-
- def getMax(self, root = None):
- if(root is not None):
- curr_node = root
- else:
- #We go deep on the right branch
- curr_node = self.getRoot()
- if(not self.empty()):
- while(curr_node.getRight() is not None):
- curr_node = curr_node.getRight()
- return curr_node
-
- def getMin(self, root = None):
- if(root is not None):
- curr_node = root
- else:
- #We go deep on the left branch
- curr_node = self.getRoot()
- if(not self.empty()):
- curr_node = self.getRoot()
- while(curr_node.getLeft() is not None):
- curr_node = curr_node.getLeft()
- return curr_node
-
- def empty(self):
- if self.root is None:
- return True
- return False
-
- def __InOrderTraversal(self, curr_node):
- nodeList = []
- if curr_node is not None:
- nodeList.insert(0, curr_node)
- nodeList = nodeList + self.__InOrderTraversal(curr_node.getLeft())
- nodeList = nodeList + self.__InOrderTraversal(curr_node.getRight())
- return nodeList
-
- def getRoot(self):
- return self.root
-
- def __isRightChildren(self, node):
- if(node == node.getParent().getRight()):
- return True
- return False
-
- def __reassignNodes(self, node, newChildren):
- if(newChildren is not None):
- newChildren.setParent(node.getParent())
- if(node.getParent() is not None):
- #If it is the Right Children
- if(self.__isRightChildren(node)):
- node.getParent().setRight(newChildren)
- else:
- #Else it is the left children
- node.getParent().setLeft(newChildren)
-
- #This function traversal the tree. By default it returns an
- #In order traversal list. You can pass a function to traversal
- #The tree as needed by client code
- def traversalTree(self, traversalFunction = None, root = None):
- if(traversalFunction is None):
- #Returns a list of nodes in preOrder by default
- return self.__InOrderTraversal(self.root)
- else:
- #Returns a list of nodes in the order that the users wants to
- return traversalFunction(self.root)
-
- #Returns an string of all the nodes labels in the list
- #In Order Traversal
- def __str__(self):
- list = self.__InOrderTraversal(self.root)
- str = ""
- for x in list:
- str = str + " " + x.getLabel().__str__()
- return str
-
-def InPreOrder(curr_node):
- nodeList = []
- if curr_node is not None:
- nodeList = nodeList + InPreOrder(curr_node.getLeft())
- nodeList.insert(0, curr_node.getLabel())
- nodeList = nodeList + InPreOrder(curr_node.getRight())
- return nodeList
-
-def testBinarySearchTree():
- r'''
- Example
- 8
- / \
- 3 10
- / \ \
- 1 6 14
- / \ /
- 4 7 13
- '''
-
- r'''
- Example After Deletion
- 7
- / \
- 1 4
-
- '''
- t = BinarySearchTree()
- t.insert(8)
- t.insert(3)
- t.insert(6)
- t.insert(1)
- t.insert(10)
- t.insert(14)
- t.insert(13)
- t.insert(4)
- t.insert(7)
-
- #Prints all the elements of the list in order traversal
- print(t.__str__())
-
- if(t.getNode(6) is not None):
- print("The label 6 exists")
- else:
- print("The label 6 doesn't exist")
-
- if(t.getNode(-1) is not None):
- print("The label -1 exists")
- else:
- print("The label -1 doesn't exist")
-
- if(not t.empty()):
- print(("Max Value: ", t.getMax().getLabel()))
- print(("Min Value: ", t.getMin().getLabel()))
-
- t.delete(13)
- t.delete(10)
- t.delete(8)
- t.delete(3)
- t.delete(6)
- t.delete(14)
-
- #Gets all the elements of the tree In pre order
- #And it prints them
- list = t.traversalTree(InPreOrder, t.root)
- for x in list:
- print(x)
-
-if __name__ == "__main__":
- testBinarySearchTree()
diff --git a/data_structures/binary tree/fenwick_tree.py b/data_structures/binary tree/fenwick_tree.py
deleted file mode 100644
index f429161c8c36..000000000000
--- a/data_structures/binary tree/fenwick_tree.py
+++ /dev/null
@@ -1,29 +0,0 @@
-from __future__ import print_function
-class FenwickTree:
-
- def __init__(self, SIZE): # create fenwick tree with size SIZE
- self.Size = SIZE
- self.ft = [0 for i in range (0,SIZE)]
-
- def update(self, i, val): # update data (adding) in index i in O(lg N)
- while (i < self.Size):
- self.ft[i] += val
- i += i & (-i)
-
- def query(self, i): # query cumulative data from index 0 to i in O(lg N)
- ret = 0
- while (i > 0):
- ret += self.ft[i]
- i -= i & (-i)
- return ret
-
-if __name__ == '__main__':
- f = FenwickTree(100)
- f.update(1,20)
- f.update(4,4)
- print (f.query(1))
- print (f.query(3))
- print (f.query(4))
- f.update(2,-5)
- print (f.query(1))
- print (f.query(3))
diff --git a/data_structures/binary tree/lazy_segment_tree.py b/data_structures/binary tree/lazy_segment_tree.py
deleted file mode 100644
index 9b14b24e81fa..000000000000
--- a/data_structures/binary tree/lazy_segment_tree.py
+++ /dev/null
@@ -1,91 +0,0 @@
-from __future__ import print_function
-import math
-
-class SegmentTree:
-
- def __init__(self, N):
- self.N = N
- self.st = [0 for i in range(0,4*N)] # approximate the overall size of segment tree with array N
- self.lazy = [0 for i in range(0,4*N)] # create array to store lazy update
- self.flag = [0 for i in range(0,4*N)] # flag for lazy update
-
- def left(self, idx):
- return idx*2
-
- def right(self, idx):
- return idx*2 + 1
-
- def build(self, idx, l, r, A):
- if l==r:
- self.st[idx] = A[l-1]
- else :
- mid = (l+r)//2
- self.build(self.left(idx),l,mid, A)
- self.build(self.right(idx),mid+1,r, A)
- self.st[idx] = max(self.st[self.left(idx)] , self.st[self.right(idx)])
-
- # update with O(lg N) (Normal segment tree without lazy update will take O(Nlg N) for each update)
- def update(self, idx, l, r, a, b, val): # update(1, 1, N, a, b, v) for update val v to [a,b]
- if self.flag[idx] == True:
- self.st[idx] = self.lazy[idx]
- self.flag[idx] = False
- if l!=r:
- self.lazy[self.left(idx)] = self.lazy[idx]
- self.lazy[self.right(idx)] = self.lazy[idx]
- self.flag[self.left(idx)] = True
- self.flag[self.right(idx)] = True
-
- if r < a or l > b:
- return True
- if l >= a and r <= b :
- self.st[idx] = val
- if l!=r:
- self.lazy[self.left(idx)] = val
- self.lazy[self.right(idx)] = val
- self.flag[self.left(idx)] = True
- self.flag[self.right(idx)] = True
- return True
- mid = (l+r)//2
- self.update(self.left(idx),l,mid,a,b,val)
- self.update(self.right(idx),mid+1,r,a,b,val)
- self.st[idx] = max(self.st[self.left(idx)] , self.st[self.right(idx)])
- return True
-
- # query with O(lg N)
- def query(self, idx, l, r, a, b): #query(1, 1, N, a, b) for query max of [a,b]
- if self.flag[idx] == True:
- self.st[idx] = self.lazy[idx]
- self.flag[idx] = False
- if l != r:
- self.lazy[self.left(idx)] = self.lazy[idx]
- self.lazy[self.right(idx)] = self.lazy[idx]
- self.flag[self.left(idx)] = True
- self.flag[self.right(idx)] = True
- if r < a or l > b:
- return -math.inf
- if l >= a and r <= b:
- return self.st[idx]
- mid = (l+r)//2
- q1 = self.query(self.left(idx),l,mid,a,b)
- q2 = self.query(self.right(idx),mid+1,r,a,b)
- return max(q1,q2)
-
- def showData(self):
- showList = []
- for i in range(1,N+1):
- showList += [self.query(1, 1, self.N, i, i)]
- print (showList)
-
-
-if __name__ == '__main__':
- A = [1,2,-4,7,3,-5,6,11,-20,9,14,15,5,2,-8]
- N = 15
- segt = SegmentTree(N)
- segt.build(1,1,N,A)
- print (segt.query(1,1,N,4,6))
- print (segt.query(1,1,N,7,11))
- print (segt.query(1,1,N,7,12))
- segt.update(1,1,N,1,3,111)
- print (segt.query(1,1,N,1,15))
- segt.update(1,1,N,7,8,235)
- segt.showData()
diff --git a/data_structures/binary_tree/__init__.py b/data_structures/binary_tree/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/data_structures/binary_tree/avl_tree.py b/data_structures/binary_tree/avl_tree.py
new file mode 100644
index 000000000000..3362610b9303
--- /dev/null
+++ b/data_structures/binary_tree/avl_tree.py
@@ -0,0 +1,328 @@
+"""
+Implementation of an auto-balanced binary tree!
+For doctests run following command:
+python3 -m doctest -v avl_tree.py
+For testing run:
+python avl_tree.py
+"""
+
+import math
+import random
+
+
+class my_queue:
+ def __init__(self):
+ self.data = []
+ self.head = 0
+ self.tail = 0
+
+ def is_empty(self):
+ return self.head == self.tail
+
+ def push(self, data):
+ self.data.append(data)
+ self.tail = self.tail + 1
+
+ def pop(self):
+ ret = self.data[self.head]
+ self.head = self.head + 1
+ return ret
+
+ def count(self):
+ return self.tail - self.head
+
+ def print(self):
+ print(self.data)
+ print("**************")
+ print(self.data[self.head : self.tail])
+
+
+class my_node:
+ def __init__(self, data):
+ self.data = data
+ self.left = None
+ self.right = None
+ self.height = 1
+
+ def get_data(self):
+ return self.data
+
+ def get_left(self):
+ return self.left
+
+ def get_right(self):
+ return self.right
+
+ def get_height(self):
+ return self.height
+
+ def set_data(self, data):
+ self.data = data
+ return
+
+ def set_left(self, node):
+ self.left = node
+ return
+
+ def set_right(self, node):
+ self.right = node
+ return
+
+ def set_height(self, height):
+ self.height = height
+ return
+
+
+def get_height(node):
+ if node is None:
+ return 0
+ return node.get_height()
+
+
+def my_max(a, b):
+ if a > b:
+ return a
+ return b
+
+
+def right_rotation(node):
+ r"""
+ A B
+ / \ / \
+ B C Bl A
+ / \ --> / / \
+ Bl Br UB Br C
+ /
+ UB
+ UB = unbalanced node
+ """
+ print("left rotation node:", node.get_data())
+ ret = node.get_left()
+ node.set_left(ret.get_right())
+ ret.set_right(node)
+ h1 = my_max(get_height(node.get_right()), get_height(node.get_left())) + 1
+ node.set_height(h1)
+ h2 = my_max(get_height(ret.get_right()), get_height(ret.get_left())) + 1
+ ret.set_height(h2)
+ return ret
+
+
+def left_rotation(node):
+ """
+ a mirror symmetry rotation of the left_rotation
+ """
+ print("right rotation node:", node.get_data())
+ ret = node.get_right()
+ node.set_right(ret.get_left())
+ ret.set_left(node)
+ h1 = my_max(get_height(node.get_right()), get_height(node.get_left())) + 1
+ node.set_height(h1)
+ h2 = my_max(get_height(ret.get_right()), get_height(ret.get_left())) + 1
+ ret.set_height(h2)
+ return ret
+
+
+def lr_rotation(node):
+ r"""
+ A A Br
+ / \ / \ / \
+ B C LR Br C RR B A
+ / \ --> / \ --> / / \
+ Bl Br B UB Bl UB C
+ \ /
+ UB Bl
+ RR = right_rotation LR = left_rotation
+ """
+ node.set_left(left_rotation(node.get_left()))
+ return right_rotation(node)
+
+
+def rl_rotation(node):
+ node.set_right(right_rotation(node.get_right()))
+ return left_rotation(node)
+
+
+def insert_node(node, data):
+ if node is None:
+ return my_node(data)
+ if data < node.get_data():
+ node.set_left(insert_node(node.get_left(), data))
+ if (
+ get_height(node.get_left()) - get_height(node.get_right()) == 2
+ ): # an unbalance detected
+ if (
+ data < node.get_left().get_data()
+ ): # new node is the left child of the left child
+ node = right_rotation(node)
+ else:
+ node = lr_rotation(node)
+ else:
+ node.set_right(insert_node(node.get_right(), data))
+ if get_height(node.get_right()) - get_height(node.get_left()) == 2:
+ if data < node.get_right().get_data():
+ node = rl_rotation(node)
+ else:
+ node = left_rotation(node)
+ h1 = my_max(get_height(node.get_right()), get_height(node.get_left())) + 1
+ node.set_height(h1)
+ return node
+
+
+def get_rightMost(root):
+ while root.get_right() is not None:
+ root = root.get_right()
+ return root.get_data()
+
+
+def get_leftMost(root):
+ while root.get_left() is not None:
+ root = root.get_left()
+ return root.get_data()
+
+
+def del_node(root, data):
+ if root.get_data() == data:
+ if root.get_left() is not None and root.get_right() is not None:
+ temp_data = get_leftMost(root.get_right())
+ root.set_data(temp_data)
+ root.set_right(del_node(root.get_right(), temp_data))
+ elif root.get_left() is not None:
+ root = root.get_left()
+ else:
+ root = root.get_right()
+ elif root.get_data() > data:
+ if root.get_left() is None:
+ print("No such data")
+ return root
+ else:
+ root.set_left(del_node(root.get_left(), data))
+ elif root.get_data() < data:
+ if root.get_right() is None:
+ return root
+ else:
+ root.set_right(del_node(root.get_right(), data))
+ if root is None:
+ return root
+ if get_height(root.get_right()) - get_height(root.get_left()) == 2:
+ if get_height(root.get_right().get_right()) > get_height(
+ root.get_right().get_left()
+ ):
+ root = left_rotation(root)
+ else:
+ root = rl_rotation(root)
+ elif get_height(root.get_right()) - get_height(root.get_left()) == -2:
+ if get_height(root.get_left().get_left()) > get_height(
+ root.get_left().get_right()
+ ):
+ root = right_rotation(root)
+ else:
+ root = lr_rotation(root)
+ height = my_max(get_height(root.get_right()), get_height(root.get_left())) + 1
+ root.set_height(height)
+ return root
+
+
+class AVLtree:
+ """
+ An AVL tree doctest
+ Examples:
+ >>> t = AVLtree()
+ >>> t.insert(4)
+ insert:4
+ >>> print(str(t).replace(" \\n","\\n"))
+ 4
+ *************************************
+ >>> t.insert(2)
+ insert:2
+ >>> print(str(t).replace(" \\n","\\n").replace(" \\n","\\n"))
+ 4
+ 2 *
+ *************************************
+ >>> t.insert(3)
+ insert:3
+ right rotation node: 2
+ left rotation node: 4
+ >>> print(str(t).replace(" \\n","\\n").replace(" \\n","\\n"))
+ 3
+ 2 4
+ *************************************
+ >>> t.get_height()
+ 2
+ >>> t.del_node(3)
+ delete:3
+ >>> print(str(t).replace(" \\n","\\n").replace(" \\n","\\n"))
+ 4
+ 2 *
+ *************************************
+ """
+
+ def __init__(self):
+ self.root = None
+
+ def get_height(self):
+ # print("yyy")
+ return get_height(self.root)
+
+ def insert(self, data):
+ print("insert:" + str(data))
+ self.root = insert_node(self.root, data)
+
+ def del_node(self, data):
+ print("delete:" + str(data))
+ if self.root is None:
+ print("Tree is empty!")
+ return
+ self.root = del_node(self.root, data)
+
+ def __str__(self): # a level traversale, gives a more intuitive look on the tree
+ output = ""
+ q = my_queue()
+ q.push(self.root)
+ layer = self.get_height()
+ if layer == 0:
+ return output
+ cnt = 0
+ while not q.is_empty():
+ node = q.pop()
+ space = " " * int(math.pow(2, layer - 1))
+ output += space
+ if node is None:
+ output += "*"
+ q.push(None)
+ q.push(None)
+ else:
+ output += str(node.get_data())
+ q.push(node.get_left())
+ q.push(node.get_right())
+ output += space
+ cnt = cnt + 1
+ for i in range(100):
+ if cnt == math.pow(2, i) - 1:
+ layer = layer - 1
+ if layer == 0:
+ output += "\n*************************************"
+ return output
+ output += "\n"
+ break
+ output += "\n*************************************"
+ return output
+
+
+def _test():
+ import doctest
+
+ doctest.testmod()
+
+
+if __name__ == "__main__":
+ _test()
+ t = AVLtree()
+ lst = list(range(10))
+ random.shuffle(lst)
+ for i in lst:
+ t.insert(i)
+ print(str(t))
+ random.shuffle(lst)
+ for i in lst:
+ t.del_node(i)
+ print(str(t))
diff --git a/data_structures/binary_tree/basic_binary_tree.py b/data_structures/binary_tree/basic_binary_tree.py
new file mode 100644
index 000000000000..575b157ee78a
--- /dev/null
+++ b/data_structures/binary_tree/basic_binary_tree.py
@@ -0,0 +1,101 @@
+from typing import Optional
+
+
+class Node:
+ """
+ A Node has data variable and pointers to Nodes to its left and right.
+ """
+
+ def __init__(self, data: int) -> None:
+ self.data = data
+ self.left: Optional[Node] = None
+ self.right: Optional[Node] = None
+
+
+def display(tree: Optional[Node]) -> None: # In Order traversal of the tree
+ """
+ >>> root = Node(1)
+ >>> root.left = Node(0)
+ >>> root.right = Node(2)
+ >>> display(root)
+ 0
+ 1
+ 2
+ >>> display(root.right)
+ 2
+ """
+ if tree:
+ display(tree.left)
+ print(tree.data)
+ display(tree.right)
+
+
+def depth_of_tree(tree: Optional[Node]) -> int:
+ """
+ Recursive function that returns the depth of a binary tree.
+
+ >>> root = Node(0)
+ >>> depth_of_tree(root)
+ 1
+ >>> root.left = Node(0)
+ >>> depth_of_tree(root)
+ 2
+ >>> root.right = Node(0)
+ >>> depth_of_tree(root)
+ 2
+ >>> root.left.right = Node(0)
+ >>> depth_of_tree(root)
+ 3
+ >>> depth_of_tree(root.left)
+ 2
+ """
+ return 1 + max(depth_of_tree(tree.left), depth_of_tree(tree.right)) if tree else 0
+
+
+def is_full_binary_tree(tree: Node) -> bool:
+ """
+ Returns True if this is a full binary tree
+
+ >>> root = Node(0)
+ >>> is_full_binary_tree(root)
+ True
+ >>> root.left = Node(0)
+ >>> is_full_binary_tree(root)
+ False
+ >>> root.right = Node(0)
+ >>> is_full_binary_tree(root)
+ True
+ >>> root.left.left = Node(0)
+ >>> is_full_binary_tree(root)
+ False
+ >>> root.right.right = Node(0)
+ >>> is_full_binary_tree(root)
+ False
+ """
+ if not tree:
+ return True
+ if tree.left and tree.right:
+ return is_full_binary_tree(tree.left) and is_full_binary_tree(tree.right)
+ else:
+ return not tree.left and not tree.right
+
+
+def main() -> None: # Main function for testing.
+ tree = Node(1)
+ tree.left = Node(2)
+ tree.right = Node(3)
+ tree.left.left = Node(4)
+ tree.left.right = Node(5)
+ tree.left.right.left = Node(6)
+ tree.right.left = Node(7)
+ tree.right.left.left = Node(8)
+ tree.right.left.left.right = Node(9)
+
+ print(is_full_binary_tree(tree))
+ print(depth_of_tree(tree))
+ print("Tree is: ")
+ display(tree)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py
new file mode 100644
index 000000000000..45c3933fe899
--- /dev/null
+++ b/data_structures/binary_tree/binary_search_tree.py
@@ -0,0 +1,221 @@
+"""
+A binary search Tree
+"""
+
+
+class Node:
+ def __init__(self, value, parent):
+ self.value = value
+ self.parent = parent # Added in order to delete a node easier
+ self.left = None
+ self.right = None
+
+ def __repr__(self):
+ from pprint import pformat
+
+ if self.left is None and self.right is None:
+ return str(self.value)
+ return pformat({"%s" % (self.value): (self.left, self.right)}, indent=1)
+
+
+class BinarySearchTree:
+ def __init__(self, root=None):
+ self.root = root
+
+ def __str__(self):
+ """
+ Return a string of all the Nodes using in order traversal
+ """
+ return str(self.root)
+
+ def __reassign_nodes(self, node, new_children):
+ if new_children is not None: # reset its kids
+ new_children.parent = node.parent
+ if node.parent is not None: # reset its parent
+ if self.is_right(node): # If it is the right children
+ node.parent.right = new_children
+ else:
+ node.parent.left = new_children
+ else:
+ self.root = new_children
+
+ def is_right(self, node):
+ return node == node.parent.right
+
+ def empty(self):
+ return self.root is None
+
+ def __insert(self, value):
+ """
+ Insert a new node in Binary Search Tree with value label
+ """
+ new_node = Node(value, None) # create a new Node
+ if self.empty(): # if Tree is empty
+ self.root = new_node # set its root
+ else: # Tree is not empty
+ parent_node = self.root # from root
+ while True: # While we don't get to a leaf
+ if value < parent_node.value: # We go left
+ if parent_node.left is None:
+ parent_node.left = new_node # We insert the new node in a leaf
+ break
+ else:
+ parent_node = parent_node.left
+ else:
+ if parent_node.right is None:
+ parent_node.right = new_node
+ break
+ else:
+ parent_node = parent_node.right
+ new_node.parent = parent_node
+
+ def insert(self, *values):
+ for value in values:
+ self.__insert(value)
+ return self
+
+ def search(self, value):
+ if self.empty():
+ raise IndexError("Warning: Tree is empty! please use another.")
+ else:
+ node = self.root
+ # use lazy evaluation here to avoid NoneType Attribute error
+ while node is not None and node.value is not value:
+ node = node.left if value < node.value else node.right
+ return node
+
+ def get_max(self, node=None):
+ """
+ We go deep on the right branch
+ """
+ if node is None:
+ node = self.root
+ if not self.empty():
+ while node.right is not None:
+ node = node.right
+ return node
+
+ def get_min(self, node=None):
+ """
+ We go deep on the left branch
+ """
+ if node is None:
+ node = self.root
+ if not self.empty():
+ node = self.root
+ while node.left is not None:
+ node = node.left
+ return node
+
+ def remove(self, value):
+ node = self.search(value) # Look for the node with that label
+ if node is not None:
+ if node.left is None and node.right is None: # If it has no children
+ self.__reassign_nodes(node, None)
+ elif node.left is None: # Has only right children
+ self.__reassign_nodes(node, node.right)
+ elif node.right is None: # Has only left children
+ self.__reassign_nodes(node, node.left)
+ else:
+ tmp_node = self.get_max(
+ node.left
+ ) # Gets the max value of the left branch
+ self.remove(tmp_node.value)
+ node.value = (
+ tmp_node.value
+ ) # Assigns the value to the node to delete and keep tree structure
+
+ def preorder_traverse(self, node):
+ if node is not None:
+ yield node # Preorder Traversal
+ yield from self.preorder_traverse(node.left)
+ yield from self.preorder_traverse(node.right)
+
+ def traversal_tree(self, traversal_function=None):
+ """
+ This function traversal the tree.
+ You can pass a function to traversal the tree as needed by client code
+ """
+ if traversal_function is None:
+ return self.preorder_traverse(self.root)
+ else:
+ return traversal_function(self.root)
+
+ def inorder(self, arr: list, node: Node):
+ """Perform an inorder traversal and append values of the nodes to
+ a list named arr"""
+ if node:
+ self.inorder(arr, node.left)
+ arr.append(node.value)
+ self.inorder(arr, node.right)
+
+ def find_kth_smallest(self, k: int, node: Node) -> int:
+ """Return the kth smallest element in a binary search tree """
+ arr = []
+ self.inorder(arr, node) # append all values to list using inorder traversal
+ return arr[k - 1]
+
+
+def postorder(curr_node):
+ """
+ postOrder (left, right, self)
+ """
+ node_list = list()
+ if curr_node is not None:
+ node_list = postorder(curr_node.left) + postorder(curr_node.right) + [curr_node]
+ return node_list
+
+
+def binary_search_tree():
+ r"""
+ Example
+ 8
+ / \
+ 3 10
+ / \ \
+ 1 6 14
+ / \ /
+ 4 7 13
+
+ >>> t = BinarySearchTree().insert(8, 3, 6, 1, 10, 14, 13, 4, 7)
+ >>> print(" ".join(repr(i.value) for i in t.traversal_tree()))
+ 8 3 1 6 4 7 10 14 13
+ >>> print(" ".join(repr(i.value) for i in t.traversal_tree(postorder)))
+ 1 4 7 6 3 13 14 10 8
+ >>> BinarySearchTree().search(6)
+ Traceback (most recent call last):
+ ...
+ IndexError: Warning: Tree is empty! please use another.
+ """
+ testlist = (8, 3, 6, 1, 10, 14, 13, 4, 7)
+ t = BinarySearchTree()
+ for i in testlist:
+ t.insert(i)
+
+ # Prints all the elements of the list in order traversal
+ print(t)
+
+ if t.search(6) is not None:
+ print("The value 6 exists")
+ else:
+ print("The value 6 doesn't exist")
+
+ if t.search(-1) is not None:
+ print("The value -1 exists")
+ else:
+ print("The value -1 doesn't exist")
+
+ if not t.empty():
+ print("Max Value: ", t.get_max().value)
+ print("Min Value: ", t.get_min().value)
+
+ for i in testlist:
+ t.remove(i)
+ print(t)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ # binary_search_tree()
diff --git a/data_structures/binary_tree/binary_search_tree_recursive.py b/data_structures/binary_tree/binary_search_tree_recursive.py
new file mode 100644
index 000000000000..f1e46e33cd24
--- /dev/null
+++ b/data_structures/binary_tree/binary_search_tree_recursive.py
@@ -0,0 +1,613 @@
+"""
+This is a python3 implementation of binary search tree using recursion
+
+To run tests:
+python -m unittest binary_search_tree_recursive.py
+
+To run an example:
+python binary_search_tree_recursive.py
+"""
+import unittest
+
+
+class Node:
+ def __init__(self, label: int, parent):
+ self.label = label
+ self.parent = parent
+ self.left = None
+ self.right = None
+
+
+class BinarySearchTree:
+ def __init__(self):
+ self.root = None
+
+ def empty(self):
+ """
+ Empties the tree
+
+ >>> t = BinarySearchTree()
+ >>> assert t.root is None
+ >>> t.put(8)
+ >>> assert t.root is not None
+ """
+ self.root = None
+
+ def is_empty(self) -> bool:
+ """
+ Checks if the tree is empty
+
+ >>> t = BinarySearchTree()
+ >>> t.is_empty()
+ True
+ >>> t.put(8)
+ >>> t.is_empty()
+ False
+ """
+ return self.root is None
+
+ def put(self, label: int):
+ """
+ Put a new node in the tree
+
+ >>> t = BinarySearchTree()
+ >>> t.put(8)
+ >>> assert t.root.parent is None
+ >>> assert t.root.label == 8
+
+ >>> t.put(10)
+ >>> assert t.root.right.parent == t.root
+ >>> assert t.root.right.label == 10
+
+ >>> t.put(3)
+ >>> assert t.root.left.parent == t.root
+ >>> assert t.root.left.label == 3
+ """
+ self.root = self._put(self.root, label)
+
+ def _put(self, node: Node, label: int, parent: Node = None) -> Node:
+ if node is None:
+ node = Node(label, parent)
+ else:
+ if label < node.label:
+ node.left = self._put(node.left, label, node)
+ elif label > node.label:
+ node.right = self._put(node.right, label, node)
+ else:
+ raise Exception(f"Node with label {label} already exists")
+
+ return node
+
+ def search(self, label: int) -> Node:
+ """
+ Searches a node in the tree
+
+ >>> t = BinarySearchTree()
+ >>> t.put(8)
+ >>> t.put(10)
+ >>> node = t.search(8)
+ >>> assert node.label == 8
+
+ >>> node = t.search(3)
+ Traceback (most recent call last):
+ ...
+ Exception: Node with label 3 does not exist
+ """
+ return self._search(self.root, label)
+
+ def _search(self, node: Node, label: int) -> Node:
+ if node is None:
+ raise Exception(f"Node with label {label} does not exist")
+ else:
+ if label < node.label:
+ node = self._search(node.left, label)
+ elif label > node.label:
+ node = self._search(node.right, label)
+
+ return node
+
+ def remove(self, label: int):
+ """
+ Removes a node in the tree
+
+ >>> t = BinarySearchTree()
+ >>> t.put(8)
+ >>> t.put(10)
+ >>> t.remove(8)
+ >>> assert t.root.label == 10
+
+ >>> t.remove(3)
+ Traceback (most recent call last):
+ ...
+ Exception: Node with label 3 does not exist
+ """
+ node = self.search(label)
+ if not node.right and not node.left:
+ self._reassign_nodes(node, None)
+ elif not node.right and node.left:
+ self._reassign_nodes(node, node.left)
+ elif node.right and not node.left:
+ self._reassign_nodes(node, node.right)
+ else:
+ lowest_node = self._get_lowest_node(node.right)
+ lowest_node.left = node.left
+ lowest_node.right = node.right
+ node.left.parent = lowest_node
+ if node.right:
+ node.right.parent = lowest_node
+ self._reassign_nodes(node, lowest_node)
+
+ def _reassign_nodes(self, node: Node, new_children: Node):
+ if new_children:
+ new_children.parent = node.parent
+
+ if node.parent:
+ if node.parent.right == node:
+ node.parent.right = new_children
+ else:
+ node.parent.left = new_children
+ else:
+ self.root = new_children
+
+ def _get_lowest_node(self, node: Node) -> Node:
+ if node.left:
+ lowest_node = self._get_lowest_node(node.left)
+ else:
+ lowest_node = node
+ self._reassign_nodes(node, node.right)
+
+ return lowest_node
+
+ def exists(self, label: int) -> bool:
+ """
+ Checks if a node exists in the tree
+
+ >>> t = BinarySearchTree()
+ >>> t.put(8)
+ >>> t.put(10)
+ >>> t.exists(8)
+ True
+
+ >>> t.exists(3)
+ False
+ """
+ try:
+ self.search(label)
+ return True
+ except Exception:
+ return False
+
+ def get_max_label(self) -> int:
+ """
+ Gets the max label inserted in the tree
+
+ >>> t = BinarySearchTree()
+ >>> t.get_max_label()
+ Traceback (most recent call last):
+ ...
+ Exception: Binary search tree is empty
+
+ >>> t.put(8)
+ >>> t.put(10)
+ >>> t.get_max_label()
+ 10
+ """
+ if self.is_empty():
+ raise Exception("Binary search tree is empty")
+
+ node = self.root
+ while node.right is not None:
+ node = node.right
+
+ return node.label
+
+ def get_min_label(self) -> int:
+ """
+ Gets the min label inserted in the tree
+
+ >>> t = BinarySearchTree()
+ >>> t.get_min_label()
+ Traceback (most recent call last):
+ ...
+ Exception: Binary search tree is empty
+
+ >>> t.put(8)
+ >>> t.put(10)
+ >>> t.get_min_label()
+ 8
+ """
+ if self.is_empty():
+ raise Exception("Binary search tree is empty")
+
+ node = self.root
+ while node.left is not None:
+ node = node.left
+
+ return node.label
+
+ def inorder_traversal(self) -> list:
+ """
+ Return the inorder traversal of the tree
+
+ >>> t = BinarySearchTree()
+ >>> [i.label for i in t.inorder_traversal()]
+ []
+
+ >>> t.put(8)
+ >>> t.put(10)
+ >>> t.put(9)
+ >>> [i.label for i in t.inorder_traversal()]
+ [8, 9, 10]
+ """
+ return self._inorder_traversal(self.root)
+
+ def _inorder_traversal(self, node: Node) -> list:
+ if node is not None:
+ yield from self._inorder_traversal(node.left)
+ yield node
+ yield from self._inorder_traversal(node.right)
+
+ def preorder_traversal(self) -> list:
+ """
+ Return the preorder traversal of the tree
+
+ >>> t = BinarySearchTree()
+ >>> [i.label for i in t.preorder_traversal()]
+ []
+
+ >>> t.put(8)
+ >>> t.put(10)
+ >>> t.put(9)
+ >>> [i.label for i in t.preorder_traversal()]
+ [8, 10, 9]
+ """
+ return self._preorder_traversal(self.root)
+
+ def _preorder_traversal(self, node: Node) -> list:
+ if node is not None:
+ yield node
+ yield from self._preorder_traversal(node.left)
+ yield from self._preorder_traversal(node.right)
+
+
+class BinarySearchTreeTest(unittest.TestCase):
+ @staticmethod
+ def _get_binary_search_tree():
+ r"""
+ 8
+ / \
+ 3 10
+ / \ \
+ 1 6 14
+ / \ /
+ 4 7 13
+ \
+ 5
+ """
+ t = BinarySearchTree()
+ t.put(8)
+ t.put(3)
+ t.put(6)
+ t.put(1)
+ t.put(10)
+ t.put(14)
+ t.put(13)
+ t.put(4)
+ t.put(7)
+ t.put(5)
+
+ return t
+
+ def test_put(self):
+ t = BinarySearchTree()
+ assert t.is_empty()
+
+ t.put(8)
+ r"""
+ 8
+ """
+ assert t.root.parent is None
+ assert t.root.label == 8
+
+ t.put(10)
+ r"""
+ 8
+ \
+ 10
+ """
+ assert t.root.right.parent == t.root
+ assert t.root.right.label == 10
+
+ t.put(3)
+ r"""
+ 8
+ / \
+ 3 10
+ """
+ assert t.root.left.parent == t.root
+ assert t.root.left.label == 3
+
+ t.put(6)
+ r"""
+ 8
+ / \
+ 3 10
+ \
+ 6
+ """
+ assert t.root.left.right.parent == t.root.left
+ assert t.root.left.right.label == 6
+
+ t.put(1)
+ r"""
+ 8
+ / \
+ 3 10
+ / \
+ 1 6
+ """
+ assert t.root.left.left.parent == t.root.left
+ assert t.root.left.left.label == 1
+
+ with self.assertRaises(Exception):
+ t.put(1)
+
+ def test_search(self):
+ t = self._get_binary_search_tree()
+
+ node = t.search(6)
+ assert node.label == 6
+
+ node = t.search(13)
+ assert node.label == 13
+
+ with self.assertRaises(Exception):
+ t.search(2)
+
+ def test_remove(self):
+ t = self._get_binary_search_tree()
+
+ t.remove(13)
+ r"""
+ 8
+ / \
+ 3 10
+ / \ \
+ 1 6 14
+ / \
+ 4 7
+ \
+ 5
+ """
+ assert t.root.right.right.right is None
+ assert t.root.right.right.left is None
+
+ t.remove(7)
+ r"""
+ 8
+ / \
+ 3 10
+ / \ \
+ 1 6 14
+ /
+ 4
+ \
+ 5
+ """
+ assert t.root.left.right.right is None
+ assert t.root.left.right.left.label == 4
+
+ t.remove(6)
+ r"""
+ 8
+ / \
+ 3 10
+ / \ \
+ 1 4 14
+ \
+ 5
+ """
+ assert t.root.left.left.label == 1
+ assert t.root.left.right.label == 4
+ assert t.root.left.right.right.label == 5
+ assert t.root.left.right.left is None
+ assert t.root.left.left.parent == t.root.left
+ assert t.root.left.right.parent == t.root.left
+
+ t.remove(3)
+ r"""
+ 8
+ / \
+ 4 10
+ / \ \
+ 1 5 14
+ """
+ assert t.root.left.label == 4
+ assert t.root.left.right.label == 5
+ assert t.root.left.left.label == 1
+ assert t.root.left.parent == t.root
+ assert t.root.left.left.parent == t.root.left
+ assert t.root.left.right.parent == t.root.left
+
+ t.remove(4)
+ r"""
+ 8
+ / \
+ 5 10
+ / \
+ 1 14
+ """
+ assert t.root.left.label == 5
+ assert t.root.left.right is None
+ assert t.root.left.left.label == 1
+ assert t.root.left.parent == t.root
+ assert t.root.left.left.parent == t.root.left
+
+ def test_remove_2(self):
+ t = self._get_binary_search_tree()
+
+ t.remove(3)
+ r"""
+ 8
+ / \
+ 4 10
+ / \ \
+ 1 6 14
+ / \ /
+ 5 7 13
+ """
+ assert t.root.left.label == 4
+ assert t.root.left.right.label == 6
+ assert t.root.left.left.label == 1
+ assert t.root.left.right.right.label == 7
+ assert t.root.left.right.left.label == 5
+ assert t.root.left.parent == t.root
+ assert t.root.left.right.parent == t.root.left
+ assert t.root.left.left.parent == t.root.left
+ assert t.root.left.right.left.parent == t.root.left.right
+
+ def test_empty(self):
+ t = self._get_binary_search_tree()
+ t.empty()
+ assert t.root is None
+
+ def test_is_empty(self):
+ t = self._get_binary_search_tree()
+ assert not t.is_empty()
+
+ t.empty()
+ assert t.is_empty()
+
+ def test_exists(self):
+ t = self._get_binary_search_tree()
+
+ assert t.exists(6)
+ assert not t.exists(-1)
+
+ def test_get_max_label(self):
+ t = self._get_binary_search_tree()
+
+ assert t.get_max_label() == 14
+
+ t.empty()
+ with self.assertRaises(Exception):
+ t.get_max_label()
+
+ def test_get_min_label(self):
+ t = self._get_binary_search_tree()
+
+ assert t.get_min_label() == 1
+
+ t.empty()
+ with self.assertRaises(Exception):
+ t.get_min_label()
+
+ def test_inorder_traversal(self):
+ t = self._get_binary_search_tree()
+
+ inorder_traversal_nodes = [i.label for i in t.inorder_traversal()]
+ assert inorder_traversal_nodes == [1, 3, 4, 5, 6, 7, 8, 10, 13, 14]
+
+ def test_preorder_traversal(self):
+ t = self._get_binary_search_tree()
+
+ preorder_traversal_nodes = [i.label for i in t.preorder_traversal()]
+ assert preorder_traversal_nodes == [8, 3, 1, 6, 4, 5, 7, 10, 14, 13]
+
+
+def binary_search_tree_example():
+ r"""
+ Example
+ 8
+ / \
+ 3 10
+ / \ \
+ 1 6 14
+ / \ /
+ 4 7 13
+ \
+ 5
+
+ Example After Deletion
+ 4
+ / \
+ 1 7
+ \
+ 5
+
+ """
+
+ t = BinarySearchTree()
+ t.put(8)
+ t.put(3)
+ t.put(6)
+ t.put(1)
+ t.put(10)
+ t.put(14)
+ t.put(13)
+ t.put(4)
+ t.put(7)
+ t.put(5)
+
+ print(
+ """
+ 8
+ / \\
+ 3 10
+ / \\ \\
+ 1 6 14
+ / \\ /
+ 4 7 13
+ \\
+ 5
+ """
+ )
+
+ print("Label 6 exists:", t.exists(6))
+ print("Label 13 exists:", t.exists(13))
+ print("Label -1 exists:", t.exists(-1))
+ print("Label 12 exists:", t.exists(12))
+
+ # Prints all the elements of the list in inorder traversal
+ inorder_traversal_nodes = [i.label for i in t.inorder_traversal()]
+ print("Inorder traversal:", inorder_traversal_nodes)
+
+ # Prints all the elements of the list in preorder traversal
+ preorder_traversal_nodes = [i.label for i in t.preorder_traversal()]
+ print("Preorder traversal:", preorder_traversal_nodes)
+
+ print("Max. label:", t.get_max_label())
+ print("Min. label:", t.get_min_label())
+
+ # Delete elements
+ print("\nDeleting elements 13, 10, 8, 3, 6, 14")
+ print(
+ """
+ 4
+ / \\
+ 1 7
+ \\
+ 5
+ """
+ )
+ t.remove(13)
+ t.remove(10)
+ t.remove(8)
+ t.remove(3)
+ t.remove(6)
+ t.remove(14)
+
+ # Prints all the elements of the list in inorder traversal after delete
+ inorder_traversal_nodes = [i.label for i in t.inorder_traversal()]
+ print("Inorder traversal after delete:", inorder_traversal_nodes)
+
+ # Prints all the elements of the list in preorder traversal after delete
+ preorder_traversal_nodes = [i.label for i in t.preorder_traversal()]
+ print("Preorder traversal after delete:", preorder_traversal_nodes)
+
+ print("Max. label:", t.get_max_label())
+ print("Min. label:", t.get_min_label())
+
+
+if __name__ == "__main__":
+ binary_search_tree_example()
diff --git a/data_structures/binary_tree/binary_tree_mirror.py b/data_structures/binary_tree/binary_tree_mirror.py
new file mode 100644
index 000000000000..dc7f657b37c7
--- /dev/null
+++ b/data_structures/binary_tree/binary_tree_mirror.py
@@ -0,0 +1,44 @@
+"""
+Problem Description:
+Given a binary tree, return it's mirror.
+"""
+
+
+def binary_tree_mirror_dict(binary_tree_mirror_dictionary: dict, root: int):
+ if not root or root not in binary_tree_mirror_dictionary:
+ return
+ left_child, right_child = binary_tree_mirror_dictionary[root][:2]
+ binary_tree_mirror_dictionary[root] = [right_child, left_child]
+ binary_tree_mirror_dict(binary_tree_mirror_dictionary, left_child)
+ binary_tree_mirror_dict(binary_tree_mirror_dictionary, right_child)
+
+
+def binary_tree_mirror(binary_tree: dict, root: int = 1) -> dict:
+ """
+ >>> binary_tree_mirror({ 1: [2,3], 2: [4,5], 3: [6,7], 7: [8,9]}, 1)
+ {1: [3, 2], 2: [5, 4], 3: [7, 6], 7: [9, 8]}
+ >>> binary_tree_mirror({ 1: [2,3], 2: [4,5], 3: [6,7], 4: [10,11]}, 1)
+ {1: [3, 2], 2: [5, 4], 3: [7, 6], 4: [11, 10]}
+ >>> binary_tree_mirror({ 1: [2,3], 2: [4,5], 3: [6,7], 4: [10,11]}, 5)
+ Traceback (most recent call last):
+ ...
+ ValueError: root 5 is not present in the binary_tree
+ >>> binary_tree_mirror({}, 5)
+ Traceback (most recent call last):
+ ...
+ ValueError: binary tree cannot be empty
+ """
+ if not binary_tree:
+ raise ValueError("binary tree cannot be empty")
+ if root not in binary_tree:
+ raise ValueError(f"root {root} is not present in the binary_tree")
+ binary_tree_mirror_dictionary = dict(binary_tree)
+ binary_tree_mirror_dict(binary_tree_mirror_dictionary, root)
+ return binary_tree_mirror_dictionary
+
+
+if __name__ == "__main__":
+ binary_tree = {1: [2, 3], 2: [4, 5], 3: [6, 7], 7: [8, 9]}
+ print(f"Binary tree: {binary_tree}")
+ binary_tree_mirror_dictionary = binary_tree_mirror(binary_tree, 5)
+ print(f"Binary tree mirror: {binary_tree_mirror_dictionary}")
diff --git a/data_structures/binary_tree/binary_tree_traversals.py b/data_structures/binary_tree/binary_tree_traversals.py
new file mode 100644
index 000000000000..7c0ee1dbbc2a
--- /dev/null
+++ b/data_structures/binary_tree/binary_tree_traversals.py
@@ -0,0 +1,161 @@
+# https://en.wikipedia.org/wiki/Tree_traversal
+
+
+class Node:
+ """
+ A Node has data variable and pointers to its left and right nodes.
+ """
+
+ def __init__(self, data):
+ self.left = None
+ self.right = None
+ self.data = data
+
+
+def make_tree() -> Node:
+ root = Node(1)
+ root.left = Node(2)
+ root.right = Node(3)
+ root.left.left = Node(4)
+ root.left.right = Node(5)
+ return root
+
+
+def preorder(root: Node):
+ """
+ Pre-order traversal visits root node, left subtree, right subtree.
+ >>> preorder(make_tree())
+ [1, 2, 4, 5, 3]
+ """
+ return [root.data] + preorder(root.left) + preorder(root.right) if root else []
+
+
+def postorder(root: Node):
+ """
+ Post-order traversal visits left subtree, right subtree, root node.
+ >>> postorder(make_tree())
+ [4, 5, 2, 3, 1]
+ """
+ return postorder(root.left) + postorder(root.right) + [root.data] if root else []
+
+
+def inorder(root: Node):
+ """
+ In-order traversal visits left subtree, root node, right subtree.
+ >>> inorder(make_tree())
+ [4, 2, 5, 1, 3]
+ """
+ return inorder(root.left) + [root.data] + inorder(root.right) if root else []
+
+
+def height(root: Node):
+ """
+ Recursive function for calculating the height of the binary tree.
+ >>> height(None)
+ 0
+ >>> height(make_tree())
+ 3
+ """
+ return (max(height(root.left), height(root.right)) + 1) if root else 0
+
+
+def level_order_1(root: Node):
+ """
+ Print whole binary tree in Level Order Traverse.
+ Level Order traverse: Visit nodes of the tree level-by-level.
+ """
+ if not root:
+ return
+ temp = root
+ que = [temp]
+ while len(que) > 0:
+ print(que[0].data, end=" ")
+ temp = que.pop(0)
+ if temp.left:
+ que.append(temp.left)
+ if temp.right:
+ que.append(temp.right)
+ return que
+
+
+def level_order_2(root: Node, level: int):
+ """
+ Level-wise traversal: Print all nodes present at the given level of the binary tree
+ """
+ if not root:
+ return root
+ if level == 1:
+ print(root.data, end=" ")
+ elif level > 1:
+ level_order_2(root.left, level - 1)
+ level_order_2(root.right, level - 1)
+
+
+def print_left_to_right(root: Node, level: int):
+ """
+ Print elements on particular level from left to right direction of the binary tree.
+ """
+ if not root:
+ return
+ if level == 1:
+ print(root.data, end=" ")
+ elif level > 1:
+ print_left_to_right(root.left, level - 1)
+ print_left_to_right(root.right, level - 1)
+
+
+def print_right_to_left(root: Node, level: int):
+ """
+ Print elements on particular level from right to left direction of the binary tree.
+ """
+ if not root:
+ return
+ if level == 1:
+ print(root.data, end=" ")
+ elif level > 1:
+ print_right_to_left(root.right, level - 1)
+ print_right_to_left(root.left, level - 1)
+
+
+def zigzag(root: Node):
+ """
+ ZigZag traverse: Print node left to right and right to left, alternatively.
+ """
+ flag = 0
+ height_tree = height(root)
+ for h in range(1, height_tree + 1):
+ if flag == 0:
+ print_left_to_right(root, h)
+ flag = 1
+ else:
+ print_right_to_left(root, h)
+ flag = 0
+
+
+def main(): # Main function for testing.
+ """
+ Create binary tree.
+ """
+ root = make_tree()
+ """
+ All Traversals of the binary are as follows:
+ """
+ print(f" In-order Traversal is {inorder(root)}")
+ print(f" Pre-order Traversal is {preorder(root)}")
+ print(f"Post-order Traversal is {postorder(root)}")
+ print(f"Height of Tree is {height(root)}")
+ print("Complete Level Order Traversal is : ")
+ level_order_1(root)
+ print("\nLevel-wise order Traversal is : ")
+ for h in range(1, height(root) + 1):
+ level_order_2(root, h)
+ print("\nZigZag order Traversal is : ")
+ zigzag(root)
+ print()
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ main()
diff --git a/data_structures/binary_tree/fenwick_tree.py b/data_structures/binary_tree/fenwick_tree.py
new file mode 100644
index 000000000000..54f0f07ac68d
--- /dev/null
+++ b/data_structures/binary_tree/fenwick_tree.py
@@ -0,0 +1,28 @@
+class FenwickTree:
+ def __init__(self, SIZE): # create fenwick tree with size SIZE
+ self.Size = SIZE
+ self.ft = [0 for i in range(0, SIZE)]
+
+ def update(self, i, val): # update data (adding) in index i in O(lg N)
+ while i < self.Size:
+ self.ft[i] += val
+ i += i & (-i)
+
+ def query(self, i): # query cumulative data from index 0 to i in O(lg N)
+ ret = 0
+ while i > 0:
+ ret += self.ft[i]
+ i -= i & (-i)
+ return ret
+
+
+if __name__ == "__main__":
+ f = FenwickTree(100)
+ f.update(1, 20)
+ f.update(4, 4)
+ print(f.query(1))
+ print(f.query(3))
+ print(f.query(4))
+ f.update(2, -5)
+ print(f.query(1))
+ print(f.query(3))
diff --git a/data_structures/binary_tree/lazy_segment_tree.py b/data_structures/binary_tree/lazy_segment_tree.py
new file mode 100644
index 000000000000..5bc79e74efcd
--- /dev/null
+++ b/data_structures/binary_tree/lazy_segment_tree.py
@@ -0,0 +1,136 @@
+from __future__ import annotations
+
+import math
+
+
+class SegmentTree:
+ def __init__(self, size: int) -> None:
+ self.size = size
+ # approximate the overall size of segment tree with given value
+ self.segment_tree = [0 for i in range(0, 4 * size)]
+ # create array to store lazy update
+ self.lazy = [0 for i in range(0, 4 * size)]
+ self.flag = [0 for i in range(0, 4 * size)] # flag for lazy update
+
+ def left(self, idx: int) -> int:
+ """
+ >>> segment_tree = SegmentTree(15)
+ >>> segment_tree.left(1)
+ 2
+ >>> segment_tree.left(2)
+ 4
+ >>> segment_tree.left(12)
+ 24
+ """
+ return idx * 2
+
+ def right(self, idx: int) -> int:
+ """
+ >>> segment_tree = SegmentTree(15)
+ >>> segment_tree.right(1)
+ 3
+ >>> segment_tree.right(2)
+ 5
+ >>> segment_tree.right(12)
+ 25
+ """
+ return idx * 2 + 1
+
+ def build(
+ self, idx: int, left_element: int, right_element: int, A: list[int]
+ ) -> None:
+ if left_element == right_element:
+ self.segment_tree[idx] = A[left_element - 1]
+ else:
+ mid = (left_element + right_element) // 2
+ self.build(self.left(idx), left_element, mid, A)
+ self.build(self.right(idx), mid + 1, right_element, A)
+ self.segment_tree[idx] = max(
+ self.segment_tree[self.left(idx)], self.segment_tree[self.right(idx)]
+ )
+
+ def update(
+ self, idx: int, left_element: int, right_element: int, a: int, b: int, val: int
+ ) -> bool:
+ """
+ update with O(lg n) (Normal segment tree without lazy update will take O(nlg n)
+ for each update)
+
+ update(1, 1, size, a, b, v) for update val v to [a,b]
+ """
+ if self.flag[idx] is True:
+ self.segment_tree[idx] = self.lazy[idx]
+ self.flag[idx] = False
+ if left_element != right_element:
+ self.lazy[self.left(idx)] = self.lazy[idx]
+ self.lazy[self.right(idx)] = self.lazy[idx]
+ self.flag[self.left(idx)] = True
+ self.flag[self.right(idx)] = True
+
+ if right_element < a or left_element > b:
+ return True
+ if left_element >= a and right_element <= b:
+ self.segment_tree[idx] = val
+ if left_element != right_element:
+ self.lazy[self.left(idx)] = val
+ self.lazy[self.right(idx)] = val
+ self.flag[self.left(idx)] = True
+ self.flag[self.right(idx)] = True
+ return True
+ mid = (left_element + right_element) // 2
+ self.update(self.left(idx), left_element, mid, a, b, val)
+ self.update(self.right(idx), mid + 1, right_element, a, b, val)
+ self.segment_tree[idx] = max(
+ self.segment_tree[self.left(idx)], self.segment_tree[self.right(idx)]
+ )
+ return True
+
+ # query with O(lg n)
+ def query(
+ self, idx: int, left_element: int, right_element: int, a: int, b: int
+ ) -> int:
+ """
+ query(1, 1, size, a, b) for query max of [a,b]
+ >>> A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
+ >>> segment_tree = SegmentTree(15)
+ >>> segment_tree.build(1, 1, 15, A)
+ >>> segment_tree.query(1, 1, 15, 4, 6)
+ 7
+ >>> segment_tree.query(1, 1, 15, 7, 11)
+ 14
+ >>> segment_tree.query(1, 1, 15, 7, 12)
+ 15
+ """
+ if self.flag[idx] is True:
+ self.segment_tree[idx] = self.lazy[idx]
+ self.flag[idx] = False
+ if left_element != right_element:
+ self.lazy[self.left(idx)] = self.lazy[idx]
+ self.lazy[self.right(idx)] = self.lazy[idx]
+ self.flag[self.left(idx)] = True
+ self.flag[self.right(idx)] = True
+ if right_element < a or left_element > b:
+ return -math.inf
+ if left_element >= a and right_element <= b:
+ return self.segment_tree[idx]
+ mid = (left_element + right_element) // 2
+ q1 = self.query(self.left(idx), left_element, mid, a, b)
+ q2 = self.query(self.right(idx), mid + 1, right_element, a, b)
+ return max(q1, q2)
+
+ def __str__(self) -> None:
+ return [self.query(1, 1, self.size, i, i) for i in range(1, self.size + 1)]
+
+
+if __name__ == "__main__":
+ A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
+ size = 15
+ segt = SegmentTree(size)
+ segt.build(1, 1, size, A)
+ print(segt.query(1, 1, size, 4, 6))
+ print(segt.query(1, 1, size, 7, 11))
+ print(segt.query(1, 1, size, 7, 12))
+ segt.update(1, 1, size, 1, 3, 111)
+ print(segt.query(1, 1, size, 1, 15))
+ segt.update(1, 1, size, 7, 8, 235)
+ print(segt)
diff --git a/data_structures/LCA.py b/data_structures/binary_tree/lowest_common_ancestor.py
similarity index 51%
rename from data_structures/LCA.py
rename to data_structures/binary_tree/lowest_common_ancestor.py
index 9c9d8ca629c7..2f1e893fcf99 100644
--- a/data_structures/LCA.py
+++ b/data_structures/binary_tree/lowest_common_ancestor.py
@@ -1,15 +1,31 @@
+# https://en.wikipedia.org/wiki/Lowest_common_ancestor
+# https://en.wikipedia.org/wiki/Breadth-first_search
+
+from __future__ import annotations
+
import queue
-def swap(a, b):
+def swap(a: int, b: int) -> tuple[int, int]:
+ """
+ Return a tuple (b, a) when given two integers a and b
+ >>> swap(2,3)
+ (3, 2)
+ >>> swap(3,4)
+ (4, 3)
+ >>> swap(67, 12)
+ (12, 67)
+ """
a ^= b
b ^= a
a ^= b
return a, b
-# creating sparse table which saves each nodes 2^ith parent
-def creatSparse(max_node, parent):
+def create_sparse(max_node: int, parent: list[list[int]]) -> list[list[int]]:
+ """
+ creating sparse table which saves each nodes 2^i-th parent
+ """
j = 1
while (1 << j) < max_node:
for i in range(1, max_node + 1):
@@ -19,7 +35,9 @@ def creatSparse(max_node, parent):
# returns lca of node u,v
-def LCA(u, v, level, parent):
+def lowest_common_ancestor(
+ u: int, v: int, level: list[int], parent: list[list[int]]
+) -> list[list[int]]:
# u must be deeper in the tree than v
if level[u] < level[v]:
u, v = swap(u, v)
@@ -39,10 +57,18 @@ def LCA(u, v, level, parent):
# runs a breadth first search from root node of the tree
-# sets every nodes direct parent
-# parent of root node is set to 0
-# calculates depth of each node from root node
-def bfs(level, parent, max_node, graph, root=1):
+def breadth_first_search(
+ level: list[int],
+ parent: list[list[int]],
+ max_node: int,
+ graph: dict[int, int],
+ root=1,
+) -> tuple[list[int], list[list[int]]]:
+ """
+ sets every nodes direct parent
+ parent of root node is set to 0
+ calculates depth of each node from root node
+ """
level[root] = 0
q = queue.Queue(maxsize=max_node)
q.put(root)
@@ -56,7 +82,7 @@ def bfs(level, parent, max_node, graph, root=1):
return level, parent
-def main():
+def main() -> None:
max_node = 13
# initializing with 0
parent = [[0 for _ in range(max_node + 10)] for _ in range(20)]
@@ -75,16 +101,16 @@ def main():
10: [],
11: [],
12: [],
- 13: []
+ 13: [],
}
- level, parent = bfs(level, parent, max_node, graph, 1)
- parent = creatSparse(max_node, parent)
- print("LCA of node 1 and 3 is: ", LCA(1, 3, level, parent))
- print("LCA of node 5 and 6 is: ", LCA(5, 6, level, parent))
- print("LCA of node 7 and 11 is: ", LCA(7, 11, level, parent))
- print("LCA of node 6 and 7 is: ", LCA(6, 7, level, parent))
- print("LCA of node 4 and 12 is: ", LCA(4, 12, level, parent))
- print("LCA of node 8 and 8 is: ", LCA(8, 8, level, parent))
+ level, parent = breadth_first_search(level, parent, max_node, graph, 1)
+ parent = create_sparse(max_node, parent)
+ print("LCA of node 1 and 3 is: ", lowest_common_ancestor(1, 3, level, parent))
+ print("LCA of node 5 and 6 is: ", lowest_common_ancestor(5, 6, level, parent))
+ print("LCA of node 7 and 11 is: ", lowest_common_ancestor(7, 11, level, parent))
+ print("LCA of node 6 and 7 is: ", lowest_common_ancestor(6, 7, level, parent))
+ print("LCA of node 4 and 12 is: ", lowest_common_ancestor(4, 12, level, parent))
+ print("LCA of node 8 and 8 is: ", lowest_common_ancestor(8, 8, level, parent))
if __name__ == "__main__":
diff --git a/data_structures/binary_tree/non_recursive_segment_tree.py b/data_structures/binary_tree/non_recursive_segment_tree.py
new file mode 100644
index 000000000000..064e5aded7b4
--- /dev/null
+++ b/data_structures/binary_tree/non_recursive_segment_tree.py
@@ -0,0 +1,158 @@
+"""
+A non-recursive Segment Tree implementation with range query and single element update,
+works virtually with any list of the same type of elements with a "commutative"
+combiner.
+
+Explanation:
+https://www.geeksforgeeks.org/iterative-segment-tree-range-minimum-query/
+https://www.geeksforgeeks.org/segment-tree-efficient-implementation/
+
+>>> SegmentTree([1, 2, 3], lambda a, b: a + b).query(0, 2)
+6
+>>> SegmentTree([3, 1, 2], min).query(0, 2)
+1
+>>> SegmentTree([2, 3, 1], max).query(0, 2)
+3
+>>> st = SegmentTree([1, 5, 7, -1, 6], lambda a, b: a + b)
+>>> st.update(1, -1)
+>>> st.update(2, 3)
+>>> st.query(1, 2)
+2
+>>> st.query(1, 1)
+-1
+>>> st.update(4, 1)
+>>> st.query(3, 4)
+0
+>>> st = SegmentTree([[1, 2, 3], [3, 2, 1], [1, 1, 1]], lambda a, b: [a[i] + b[i] for i
+... in range(len(a))])
+>>> st.query(0, 1)
+[4, 4, 4]
+>>> st.query(1, 2)
+[4, 3, 2]
+>>> st.update(1, [-1, -1, -1])
+>>> st.query(1, 2)
+[0, 0, 0]
+>>> st.query(0, 2)
+[1, 2, 3]
+"""
+from __future__ import annotations
+
+from typing import Callable, TypeVar
+
+T = TypeVar("T")
+
+
+class SegmentTree:
+ def __init__(self, arr: list[T], fnc: Callable[[T, T], T]) -> None:
+ """
+ Segment Tree constructor, it works just with commutative combiner.
+ :param arr: list of elements for the segment tree
+ :param fnc: commutative function for combine two elements
+
+ >>> SegmentTree(['a', 'b', 'c'], lambda a, b: '{}{}'.format(a, b)).query(0, 2)
+ 'abc'
+ >>> SegmentTree([(1, 2), (2, 3), (3, 4)],
+ ... lambda a, b: (a[0] + b[0], a[1] + b[1])).query(0, 2)
+ (6, 9)
+ """
+ self.N = len(arr)
+ self.st = [None for _ in range(len(arr))] + arr
+ self.fn = fnc
+ self.build()
+
+ def build(self) -> None:
+ for p in range(self.N - 1, 0, -1):
+ self.st[p] = self.fn(self.st[p * 2], self.st[p * 2 + 1])
+
+ def update(self, p: int, v: T) -> None:
+ """
+ Update an element in log(N) time
+ :param p: position to be update
+ :param v: new value
+
+ >>> st = SegmentTree([3, 1, 2, 4], min)
+ >>> st.query(0, 3)
+ 1
+ >>> st.update(2, -1)
+ >>> st.query(0, 3)
+ -1
+ """
+ p += self.N
+ self.st[p] = v
+ while p > 1:
+ p = p // 2
+ self.st[p] = self.fn(self.st[p * 2], self.st[p * 2 + 1])
+
+ def query(self, l: int, r: int) -> T: # noqa: E741
+ """
+ Get range query value in log(N) time
+ :param l: left element index
+ :param r: right element index
+ :return: element combined in the range [l, r]
+
+ >>> st = SegmentTree([1, 2, 3, 4], lambda a, b: a + b)
+ >>> st.query(0, 2)
+ 6
+ >>> st.query(1, 2)
+ 5
+ >>> st.query(0, 3)
+ 10
+ >>> st.query(2, 3)
+ 7
+ """
+ l, r = l + self.N, r + self.N # noqa: E741
+ res = None
+ while l <= r: # noqa: E741
+ if l % 2 == 1:
+ res = self.st[l] if res is None else self.fn(res, self.st[l])
+ if r % 2 == 0:
+ res = self.st[r] if res is None else self.fn(res, self.st[r])
+ l, r = (l + 1) // 2, (r - 1) // 2
+ return res
+
+
+if __name__ == "__main__":
+ from functools import reduce
+
+ test_array = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
+
+ test_updates = {
+ 0: 7,
+ 1: 2,
+ 2: 6,
+ 3: -14,
+ 4: 5,
+ 5: 4,
+ 6: 7,
+ 7: -10,
+ 8: 9,
+ 9: 10,
+ 10: 12,
+ 11: 1,
+ }
+
+ min_segment_tree = SegmentTree(test_array, min)
+ max_segment_tree = SegmentTree(test_array, max)
+ sum_segment_tree = SegmentTree(test_array, lambda a, b: a + b)
+
+ def test_all_segments():
+ """
+ Test all possible segments
+ """
+ for i in range(len(test_array)):
+ for j in range(i, len(test_array)):
+ min_range = reduce(min, test_array[i : j + 1])
+ max_range = reduce(max, test_array[i : j + 1])
+ sum_range = reduce(lambda a, b: a + b, test_array[i : j + 1])
+ assert min_range == min_segment_tree.query(i, j)
+ assert max_range == max_segment_tree.query(i, j)
+ assert sum_range == sum_segment_tree.query(i, j)
+
+ test_all_segments()
+
+ for index, value in test_updates.items():
+ test_array[index] = value
+ min_segment_tree.update(index, value)
+ max_segment_tree.update(index, value)
+ sum_segment_tree.update(index, value)
+ test_all_segments()
diff --git a/data_structures/binary_tree/number_of_possible_binary_trees.py b/data_structures/binary_tree/number_of_possible_binary_trees.py
new file mode 100644
index 000000000000..1ad8f2ed4287
--- /dev/null
+++ b/data_structures/binary_tree/number_of_possible_binary_trees.py
@@ -0,0 +1,102 @@
+"""
+Hey, we are going to find an exciting number called Catalan number which is use to find
+the number of possible binary search trees from tree of a given number of nodes.
+
+We will use the formula: t(n) = SUMMATION(i = 1 to n)t(i-1)t(n-i)
+
+Further details at Wikipedia: https://en.wikipedia.org/wiki/Catalan_number
+"""
+"""
+Our Contribution:
+Basically we Create the 2 function:
+ 1. catalan_number(node_count: int) -> int
+ Returns the number of possible binary search trees for n nodes.
+ 2. binary_tree_count(node_count: int) -> int
+ Returns the number of possible binary trees for n nodes.
+"""
+
+
+def binomial_coefficient(n: int, k: int) -> int:
+ """
+ Since Here we Find the Binomial Coefficient:
+ https://en.wikipedia.org/wiki/Binomial_coefficient
+ C(n,k) = n! / k!(n-k)!
+ :param n: 2 times of Number of nodes
+ :param k: Number of nodes
+ :return: Integer Value
+
+ >>> binomial_coefficient(4, 2)
+ 6
+ """
+ result = 1 # To kept the Calculated Value
+ # Since C(n, k) = C(n, n-k)
+ if k > (n - k):
+ k = n - k
+ # Calculate C(n,k)
+ for i in range(k):
+ result *= n - i
+ result //= i + 1
+ return result
+
+
+def catalan_number(node_count: int) -> int:
+ """
+ We can find Catalan number many ways but here we use Binomial Coefficient because it
+ does the job in O(n)
+
+ return the Catalan number of n using 2nCn/(n+1).
+ :param n: number of nodes
+ :return: Catalan number of n nodes
+
+ >>> catalan_number(5)
+ 42
+ >>> catalan_number(6)
+ 132
+ """
+ return binomial_coefficient(2 * node_count, node_count) // (node_count + 1)
+
+
+def factorial(n: int) -> int:
+ """
+ Return the factorial of a number.
+ :param n: Number to find the Factorial of.
+ :return: Factorial of n.
+
+ >>> import math
+ >>> all(factorial(i) == math.factorial(i) for i in range(10))
+ True
+ >>> factorial(-5) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ ...
+ ValueError: factorial() not defined for negative values
+ """
+ if n < 0:
+ raise ValueError("factorial() not defined for negative values")
+ result = 1
+ for i in range(1, n + 1):
+ result *= i
+ return result
+
+
+def binary_tree_count(node_count: int) -> int:
+ """
+ Return the number of possible of binary trees.
+ :param n: number of nodes
+ :return: Number of possible binary trees
+
+ >>> binary_tree_count(5)
+ 5040
+ >>> binary_tree_count(6)
+ 95040
+ """
+ return catalan_number(node_count) * factorial(node_count)
+
+
+if __name__ == "__main__":
+ node_count = int(input("Enter the number of nodes: ").strip() or 0)
+ if node_count <= 0:
+ raise ValueError("We need some nodes to work with.")
+ print(
+ f"Given {node_count} nodes, there are {binary_tree_count(node_count)} "
+ f"binary trees and {catalan_number(node_count)} binary search trees."
+ )
diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py
new file mode 100644
index 000000000000..5d721edfa45b
--- /dev/null
+++ b/data_structures/binary_tree/red_black_tree.py
@@ -0,0 +1,718 @@
+"""
+python/black : true
+flake8 : passed
+"""
+from typing import Iterator, Optional
+
+
+class RedBlackTree:
+ """
+ A Red-Black tree, which is a self-balancing BST (binary search
+ tree).
+ This tree has similar performance to AVL trees, but the balancing is
+ less strict, so it will perform faster for writing/deleting nodes
+ and slower for reading in the average case, though, because they're
+ both balanced binary search trees, both will get the same asymptotic
+ performance.
+ To read more about them, https://en.wikipedia.org/wiki/Red–black_tree
+ Unless otherwise specified, all asymptotic runtimes are specified in
+ terms of the size of the tree.
+ """
+
+ def __init__(
+ self,
+ label: Optional[int] = None,
+ color: int = 0,
+ parent: Optional["RedBlackTree"] = None,
+ left: Optional["RedBlackTree"] = None,
+ right: Optional["RedBlackTree"] = None,
+ ) -> None:
+ """Initialize a new Red-Black Tree node with the given values:
+ label: The value associated with this node
+ color: 0 if black, 1 if red
+ parent: The parent to this node
+ left: This node's left child
+ right: This node's right child
+ """
+ self.label = label
+ self.parent = parent
+ self.left = left
+ self.right = right
+ self.color = color
+
+ # Here are functions which are specific to red-black trees
+
+ def rotate_left(self) -> "RedBlackTree":
+ """Rotate the subtree rooted at this node to the left and
+ returns the new root to this subtree.
+ Performing one rotation can be done in O(1).
+ """
+ parent = self.parent
+ right = self.right
+ self.right = right.left
+ if self.right:
+ self.right.parent = self
+ self.parent = right
+ right.left = self
+ if parent is not None:
+ if parent.left == self:
+ parent.left = right
+ else:
+ parent.right = right
+ right.parent = parent
+ return right
+
+ def rotate_right(self) -> "RedBlackTree":
+ """Rotate the subtree rooted at this node to the right and
+ returns the new root to this subtree.
+ Performing one rotation can be done in O(1).
+ """
+ parent = self.parent
+ left = self.left
+ self.left = left.right
+ if self.left:
+ self.left.parent = self
+ self.parent = left
+ left.right = self
+ if parent is not None:
+ if parent.right is self:
+ parent.right = left
+ else:
+ parent.left = left
+ left.parent = parent
+ return left
+
+ def insert(self, label: int) -> "RedBlackTree":
+ """Inserts label into the subtree rooted at self, performs any
+ rotations necessary to maintain balance, and then returns the
+ new root to this subtree (likely self).
+ This is guaranteed to run in O(log(n)) time.
+ """
+ if self.label is None:
+ # Only possible with an empty tree
+ self.label = label
+ return self
+ if self.label == label:
+ return self
+ elif self.label > label:
+ if self.left:
+ self.left.insert(label)
+ else:
+ self.left = RedBlackTree(label, 1, self)
+ self.left._insert_repair()
+ else:
+ if self.right:
+ self.right.insert(label)
+ else:
+ self.right = RedBlackTree(label, 1, self)
+ self.right._insert_repair()
+ return self.parent or self
+
+ def _insert_repair(self) -> None:
+ """Repair the coloring from inserting into a tree."""
+ if self.parent is None:
+ # This node is the root, so it just needs to be black
+ self.color = 0
+ elif color(self.parent) == 0:
+ # If the parent is black, then it just needs to be red
+ self.color = 1
+ else:
+ uncle = self.parent.sibling
+ if color(uncle) == 0:
+ if self.is_left() and self.parent.is_right():
+ self.parent.rotate_right()
+ self.right._insert_repair()
+ elif self.is_right() and self.parent.is_left():
+ self.parent.rotate_left()
+ self.left._insert_repair()
+ elif self.is_left():
+ self.grandparent.rotate_right()
+ self.parent.color = 0
+ self.parent.right.color = 1
+ else:
+ self.grandparent.rotate_left()
+ self.parent.color = 0
+ self.parent.left.color = 1
+ else:
+ self.parent.color = 0
+ uncle.color = 0
+ self.grandparent.color = 1
+ self.grandparent._insert_repair()
+
+ def remove(self, label: int) -> "RedBlackTree":
+ """Remove label from this tree."""
+ if self.label == label:
+ if self.left and self.right:
+ # It's easier to balance a node with at most one child,
+ # so we replace this node with the greatest one less than
+ # it and remove that.
+ value = self.left.get_max()
+ self.label = value
+ self.left.remove(value)
+ else:
+ # This node has at most one non-None child, so we don't
+ # need to replace
+ child = self.left or self.right
+ if self.color == 1:
+ # This node is red, and its child is black
+ # The only way this happens to a node with one child
+ # is if both children are None leaves.
+ # We can just remove this node and call it a day.
+ if self.is_left():
+ self.parent.left = None
+ else:
+ self.parent.right = None
+ else:
+ # The node is black
+ if child is None:
+ # This node and its child are black
+ if self.parent is None:
+ # The tree is now empty
+ return RedBlackTree(None)
+ else:
+ self._remove_repair()
+ if self.is_left():
+ self.parent.left = None
+ else:
+ self.parent.right = None
+ self.parent = None
+ else:
+ # This node is black and its child is red
+ # Move the child node here and make it black
+ self.label = child.label
+ self.left = child.left
+ self.right = child.right
+ if self.left:
+ self.left.parent = self
+ if self.right:
+ self.right.parent = self
+ elif self.label > label:
+ if self.left:
+ self.left.remove(label)
+ else:
+ if self.right:
+ self.right.remove(label)
+ return self.parent or self
+
+ def _remove_repair(self) -> None:
+ """Repair the coloring of the tree that may have been messed up."""
+ if color(self.sibling) == 1:
+ self.sibling.color = 0
+ self.parent.color = 1
+ if self.is_left():
+ self.parent.rotate_left()
+ else:
+ self.parent.rotate_right()
+ if (
+ color(self.parent) == 0
+ and color(self.sibling) == 0
+ and color(self.sibling.left) == 0
+ and color(self.sibling.right) == 0
+ ):
+ self.sibling.color = 1
+ self.parent._remove_repair()
+ return
+ if (
+ color(self.parent) == 1
+ and color(self.sibling) == 0
+ and color(self.sibling.left) == 0
+ and color(self.sibling.right) == 0
+ ):
+ self.sibling.color = 1
+ self.parent.color = 0
+ return
+ if (
+ self.is_left()
+ and color(self.sibling) == 0
+ and color(self.sibling.right) == 0
+ and color(self.sibling.left) == 1
+ ):
+ self.sibling.rotate_right()
+ self.sibling.color = 0
+ self.sibling.right.color = 1
+ if (
+ self.is_right()
+ and color(self.sibling) == 0
+ and color(self.sibling.right) == 1
+ and color(self.sibling.left) == 0
+ ):
+ self.sibling.rotate_left()
+ self.sibling.color = 0
+ self.sibling.left.color = 1
+ if (
+ self.is_left()
+ and color(self.sibling) == 0
+ and color(self.sibling.right) == 1
+ ):
+ self.parent.rotate_left()
+ self.grandparent.color = self.parent.color
+ self.parent.color = 0
+ self.parent.sibling.color = 0
+ if (
+ self.is_right()
+ and color(self.sibling) == 0
+ and color(self.sibling.left) == 1
+ ):
+ self.parent.rotate_right()
+ self.grandparent.color = self.parent.color
+ self.parent.color = 0
+ self.parent.sibling.color = 0
+
+ def check_color_properties(self) -> bool:
+ """Check the coloring of the tree, and return True iff the tree
+ is colored in a way which matches these five properties:
+ (wording stolen from wikipedia article)
+ 1. Each node is either red or black.
+ 2. The root node is black.
+ 3. All leaves are black.
+ 4. If a node is red, then both its children are black.
+ 5. Every path from any node to all of its descendent NIL nodes
+ has the same number of black nodes.
+ This function runs in O(n) time, because properties 4 and 5 take
+ that long to check.
+ """
+ # I assume property 1 to hold because there is nothing that can
+ # make the color be anything other than 0 or 1.
+
+ # Property 2
+ if self.color:
+ # The root was red
+ print("Property 2")
+ return False
+
+ # Property 3 does not need to be checked, because None is assumed
+ # to be black and is all the leaves.
+
+ # Property 4
+ if not self.check_coloring():
+ print("Property 4")
+ return False
+
+ # Property 5
+ if self.black_height() is None:
+ print("Property 5")
+ return False
+ # All properties were met
+ return True
+
+ def check_coloring(self) -> None:
+ """A helper function to recursively check Property 4 of a
+ Red-Black Tree. See check_color_properties for more info.
+ """
+ if self.color == 1:
+ if color(self.left) == 1 or color(self.right) == 1:
+ return False
+ if self.left and not self.left.check_coloring():
+ return False
+ if self.right and not self.right.check_coloring():
+ return False
+ return True
+
+ def black_height(self) -> int:
+ """Returns the number of black nodes from this node to the
+ leaves of the tree, or None if there isn't one such value (the
+ tree is color incorrectly).
+ """
+ if self is None:
+ # If we're already at a leaf, there is no path
+ return 1
+ left = RedBlackTree.black_height(self.left)
+ right = RedBlackTree.black_height(self.right)
+ if left is None or right is None:
+ # There are issues with coloring below children nodes
+ return None
+ if left != right:
+ # The two children have unequal depths
+ return None
+ # Return the black depth of children, plus one if this node is
+ # black
+ return left + (1 - self.color)
+
+ # Here are functions which are general to all binary search trees
+
+ def __contains__(self, label) -> bool:
+ """Search through the tree for label, returning True iff it is
+ found somewhere in the tree.
+ Guaranteed to run in O(log(n)) time.
+ """
+ return self.search(label) is not None
+
+ def search(self, label: int) -> "RedBlackTree":
+ """Search through the tree for label, returning its node if
+ it's found, and None otherwise.
+ This method is guaranteed to run in O(log(n)) time.
+ """
+ if self.label == label:
+ return self
+ elif label > self.label:
+ if self.right is None:
+ return None
+ else:
+ return self.right.search(label)
+ else:
+ if self.left is None:
+ return None
+ else:
+ return self.left.search(label)
+
+ def floor(self, label: int) -> int:
+ """Returns the largest element in this tree which is at most label.
+ This method is guaranteed to run in O(log(n)) time."""
+ if self.label == label:
+ return self.label
+ elif self.label > label:
+ if self.left:
+ return self.left.floor(label)
+ else:
+ return None
+ else:
+ if self.right:
+ attempt = self.right.floor(label)
+ if attempt is not None:
+ return attempt
+ return self.label
+
+ def ceil(self, label: int) -> int:
+ """Returns the smallest element in this tree which is at least label.
+ This method is guaranteed to run in O(log(n)) time.
+ """
+ if self.label == label:
+ return self.label
+ elif self.label < label:
+ if self.right:
+ return self.right.ceil(label)
+ else:
+ return None
+ else:
+ if self.left:
+ attempt = self.left.ceil(label)
+ if attempt is not None:
+ return attempt
+ return self.label
+
+ def get_max(self) -> int:
+ """Returns the largest element in this tree.
+ This method is guaranteed to run in O(log(n)) time.
+ """
+ if self.right:
+ # Go as far right as possible
+ return self.right.get_max()
+ else:
+ return self.label
+
+ def get_min(self) -> int:
+ """Returns the smallest element in this tree.
+ This method is guaranteed to run in O(log(n)) time.
+ """
+ if self.left:
+ # Go as far left as possible
+ return self.left.get_min()
+ else:
+ return self.label
+
+ @property
+ def grandparent(self) -> "RedBlackTree":
+ """Get the current node's grandparent, or None if it doesn't exist."""
+ if self.parent is None:
+ return None
+ else:
+ return self.parent.parent
+
+ @property
+ def sibling(self) -> "RedBlackTree":
+ """Get the current node's sibling, or None if it doesn't exist."""
+ if self.parent is None:
+ return None
+ elif self.parent.left is self:
+ return self.parent.right
+ else:
+ return self.parent.left
+
+ def is_left(self) -> bool:
+ """Returns true iff this node is the left child of its parent."""
+ return self.parent and self.parent.left is self
+
+ def is_right(self) -> bool:
+ """Returns true iff this node is the right child of its parent."""
+ return self.parent and self.parent.right is self
+
+ def __bool__(self) -> bool:
+ return True
+
+ def __len__(self) -> int:
+ """
+ Return the number of nodes in this tree.
+ """
+ ln = 1
+ if self.left:
+ ln += len(self.left)
+ if self.right:
+ ln += len(self.right)
+ return ln
+
+ def preorder_traverse(self) -> Iterator[int]:
+ yield self.label
+ if self.left:
+ yield from self.left.preorder_traverse()
+ if self.right:
+ yield from self.right.preorder_traverse()
+
+ def inorder_traverse(self) -> Iterator[int]:
+ if self.left:
+ yield from self.left.inorder_traverse()
+ yield self.label
+ if self.right:
+ yield from self.right.inorder_traverse()
+
+ def postorder_traverse(self) -> Iterator[int]:
+ if self.left:
+ yield from self.left.postorder_traverse()
+ if self.right:
+ yield from self.right.postorder_traverse()
+ yield self.label
+
+ def __repr__(self) -> str:
+ from pprint import pformat
+
+ if self.left is None and self.right is None:
+ return "'{} {}'".format(self.label, (self.color and "red") or "blk")
+ return pformat(
+ {
+ "%s %s"
+ % (self.label, (self.color and "red") or "blk"): (self.left, self.right)
+ },
+ indent=1,
+ )
+
+ def __eq__(self, other) -> bool:
+ """Test if two trees are equal."""
+ if self.label == other.label:
+ return self.left == other.left and self.right == other.right
+ else:
+ return False
+
+
+def color(node) -> int:
+ """Returns the color of a node, allowing for None leaves."""
+ if node is None:
+ return 0
+ else:
+ return node.color
+
+
+"""
+Code for testing the various
+functions of the red-black tree.
+"""
+
+
+def test_rotations() -> bool:
+ """Test that the rotate_left and rotate_right functions work."""
+ # Make a tree to test on
+ tree = RedBlackTree(0)
+ tree.left = RedBlackTree(-10, parent=tree)
+ tree.right = RedBlackTree(10, parent=tree)
+ tree.left.left = RedBlackTree(-20, parent=tree.left)
+ tree.left.right = RedBlackTree(-5, parent=tree.left)
+ tree.right.left = RedBlackTree(5, parent=tree.right)
+ tree.right.right = RedBlackTree(20, parent=tree.right)
+ # Make the right rotation
+ left_rot = RedBlackTree(10)
+ left_rot.left = RedBlackTree(0, parent=left_rot)
+ left_rot.left.left = RedBlackTree(-10, parent=left_rot.left)
+ left_rot.left.right = RedBlackTree(5, parent=left_rot.left)
+ left_rot.left.left.left = RedBlackTree(-20, parent=left_rot.left.left)
+ left_rot.left.left.right = RedBlackTree(-5, parent=left_rot.left.left)
+ left_rot.right = RedBlackTree(20, parent=left_rot)
+ tree = tree.rotate_left()
+ if tree != left_rot:
+ return False
+ tree = tree.rotate_right()
+ tree = tree.rotate_right()
+ # Make the left rotation
+ right_rot = RedBlackTree(-10)
+ right_rot.left = RedBlackTree(-20, parent=right_rot)
+ right_rot.right = RedBlackTree(0, parent=right_rot)
+ right_rot.right.left = RedBlackTree(-5, parent=right_rot.right)
+ right_rot.right.right = RedBlackTree(10, parent=right_rot.right)
+ right_rot.right.right.left = RedBlackTree(5, parent=right_rot.right.right)
+ right_rot.right.right.right = RedBlackTree(20, parent=right_rot.right.right)
+ if tree != right_rot:
+ return False
+ return True
+
+
+def test_insertion_speed() -> bool:
+ """Test that the tree balances inserts to O(log(n)) by doing a lot
+ of them.
+ """
+ tree = RedBlackTree(-1)
+ for i in range(300000):
+ tree = tree.insert(i)
+ return True
+
+
+def test_insert() -> bool:
+ """Test the insert() method of the tree correctly balances, colors,
+ and inserts.
+ """
+ tree = RedBlackTree(0)
+ tree.insert(8)
+ tree.insert(-8)
+ tree.insert(4)
+ tree.insert(12)
+ tree.insert(10)
+ tree.insert(11)
+ ans = RedBlackTree(0, 0)
+ ans.left = RedBlackTree(-8, 0, ans)
+ ans.right = RedBlackTree(8, 1, ans)
+ ans.right.left = RedBlackTree(4, 0, ans.right)
+ ans.right.right = RedBlackTree(11, 0, ans.right)
+ ans.right.right.left = RedBlackTree(10, 1, ans.right.right)
+ ans.right.right.right = RedBlackTree(12, 1, ans.right.right)
+ return tree == ans
+
+
+def test_insert_and_search() -> bool:
+ """Tests searching through the tree for values."""
+ tree = RedBlackTree(0)
+ tree.insert(8)
+ tree.insert(-8)
+ tree.insert(4)
+ tree.insert(12)
+ tree.insert(10)
+ tree.insert(11)
+ if 5 in tree or -6 in tree or -10 in tree or 13 in tree:
+ # Found something not in there
+ return False
+ if not (11 in tree and 12 in tree and -8 in tree and 0 in tree):
+ # Didn't find something in there
+ return False
+ return True
+
+
+def test_insert_delete() -> bool:
+ """Test the insert() and delete() method of the tree, verifying the
+ insertion and removal of elements, and the balancing of the tree.
+ """
+ tree = RedBlackTree(0)
+ tree = tree.insert(-12)
+ tree = tree.insert(8)
+ tree = tree.insert(-8)
+ tree = tree.insert(15)
+ tree = tree.insert(4)
+ tree = tree.insert(12)
+ tree = tree.insert(10)
+ tree = tree.insert(9)
+ tree = tree.insert(11)
+ tree = tree.remove(15)
+ tree = tree.remove(-12)
+ tree = tree.remove(9)
+ if not tree.check_color_properties():
+ return False
+ if list(tree.inorder_traverse()) != [-8, 0, 4, 8, 10, 11, 12]:
+ return False
+ return True
+
+
+def test_floor_ceil() -> bool:
+ """Tests the floor and ceiling functions in the tree."""
+ tree = RedBlackTree(0)
+ tree.insert(-16)
+ tree.insert(16)
+ tree.insert(8)
+ tree.insert(24)
+ tree.insert(20)
+ tree.insert(22)
+ tuples = [(-20, None, -16), (-10, -16, 0), (8, 8, 8), (50, 24, None)]
+ for val, floor, ceil in tuples:
+ if tree.floor(val) != floor or tree.ceil(val) != ceil:
+ return False
+ return True
+
+
+def test_min_max() -> bool:
+ """Tests the min and max functions in the tree."""
+ tree = RedBlackTree(0)
+ tree.insert(-16)
+ tree.insert(16)
+ tree.insert(8)
+ tree.insert(24)
+ tree.insert(20)
+ tree.insert(22)
+ if tree.get_max() != 22 or tree.get_min() != -16:
+ return False
+ return True
+
+
+def test_tree_traversal() -> bool:
+ """Tests the three different tree traversal functions."""
+ tree = RedBlackTree(0)
+ tree = tree.insert(-16)
+ tree.insert(16)
+ tree.insert(8)
+ tree.insert(24)
+ tree.insert(20)
+ tree.insert(22)
+ if list(tree.inorder_traverse()) != [-16, 0, 8, 16, 20, 22, 24]:
+ return False
+ if list(tree.preorder_traverse()) != [0, -16, 16, 8, 22, 20, 24]:
+ return False
+ if list(tree.postorder_traverse()) != [-16, 8, 20, 24, 22, 16, 0]:
+ return False
+ return True
+
+
+def test_tree_chaining() -> bool:
+ """Tests the three different tree chaining functions."""
+ tree = RedBlackTree(0)
+ tree = tree.insert(-16).insert(16).insert(8).insert(24).insert(20).insert(22)
+ if list(tree.inorder_traverse()) != [-16, 0, 8, 16, 20, 22, 24]:
+ return False
+ if list(tree.preorder_traverse()) != [0, -16, 16, 8, 22, 20, 24]:
+ return False
+ if list(tree.postorder_traverse()) != [-16, 8, 20, 24, 22, 16, 0]:
+ return False
+ return True
+
+
+def print_results(msg: str, passes: bool) -> None:
+ print(str(msg), "works!" if passes else "doesn't work :(")
+
+
+def pytests() -> None:
+ assert test_rotations()
+ assert test_insert()
+ assert test_insert_and_search()
+ assert test_insert_delete()
+ assert test_floor_ceil()
+ assert test_tree_traversal()
+ assert test_tree_chaining()
+
+
+def main() -> None:
+ """
+ >>> pytests()
+ """
+ print_results("Rotating right and left", test_rotations())
+
+ print_results("Inserting", test_insert())
+
+ print_results("Searching", test_insert_and_search())
+
+ print_results("Deleting", test_insert_delete())
+
+ print_results("Floor and ceil", test_floor_ceil())
+
+ print_results("Tree traversal", test_tree_traversal())
+
+ print_results("Tree traversal", test_tree_chaining())
+
+ print("Testing tree balancing...")
+ print("This should only be a few seconds.")
+ test_insertion_speed()
+ print("Done!")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/data_structures/binary tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py
similarity index 50%
rename from data_structures/binary tree/segment_tree.py
rename to data_structures/binary_tree/segment_tree.py
index 001bf999f391..10451ae68bb2 100644
--- a/data_structures/binary tree/segment_tree.py
+++ b/data_structures/binary_tree/segment_tree.py
@@ -1,71 +1,78 @@
-from __future__ import print_function
import math
+
class SegmentTree:
-
def __init__(self, A):
self.N = len(A)
- self.st = [0] * (4 * self.N) # approximate the overall size of segment tree with array N
+ self.st = [0] * (
+ 4 * self.N
+ ) # approximate the overall size of segment tree with array N
self.build(1, 0, self.N - 1)
-
+
def left(self, idx):
return idx * 2
def right(self, idx):
return idx * 2 + 1
- def build(self, idx, l, r):
- if l == r:
+ def build(self, idx, l, r): # noqa: E741
+ if l == r: # noqa: E741
self.st[idx] = A[l]
else:
mid = (l + r) // 2
self.build(self.left(idx), l, mid)
self.build(self.right(idx), mid + 1, r)
- self.st[idx] = max(self.st[self.left(idx)] , self.st[self.right(idx)])
-
+ self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])
+
def update(self, a, b, val):
return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val)
-
- def update_recursive(self, idx, l, r, a, b, val): # update(1, 1, N, a, b, v) for update val v to [a,b]
+
+ def update_recursive(self, idx, l, r, a, b, val): # noqa: E741
+ """
+ update(1, 1, N, a, b, v) for update val v to [a,b]
+ """
if r < a or l > b:
return True
- if l == r :
+ if l == r: # noqa: E741
self.st[idx] = val
return True
- mid = (l+r)//2
+ mid = (l + r) // 2
self.update_recursive(self.left(idx), l, mid, a, b, val)
- self.update_recursive(self.right(idx), mid+1, r, a, b, val)
- self.st[idx] = max(self.st[self.left(idx)] , self.st[self.right(idx)])
+ self.update_recursive(self.right(idx), mid + 1, r, a, b, val)
+ self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])
return True
def query(self, a, b):
return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1)
- def query_recursive(self, idx, l, r, a, b): #query(1, 1, N, a, b) for query max of [a,b]
+ def query_recursive(self, idx, l, r, a, b): # noqa: E741
+ """
+ query(1, 1, N, a, b) for query max of [a,b]
+ """
if r < a or l > b:
return -math.inf
- if l >= a and r <= b:
+ if l >= a and r <= b: # noqa: E741
return self.st[idx]
- mid = (l+r)//2
+ mid = (l + r) // 2
q1 = self.query_recursive(self.left(idx), l, mid, a, b)
q2 = self.query_recursive(self.right(idx), mid + 1, r, a, b)
return max(q1, q2)
def showData(self):
showList = []
- for i in range(1,N+1):
+ for i in range(1, N + 1):
showList += [self.query(i, i)]
- print (showList)
-
+ print(showList)
+
-if __name__ == '__main__':
- A = [1,2,-4,7,3,-5,6,11,-20,9,14,15,5,2,-8]
+if __name__ == "__main__":
+ A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
N = 15
segt = SegmentTree(A)
- print (segt.query(4, 6))
- print (segt.query(7, 11))
- print (segt.query(7, 12))
- segt.update(1,3,111)
- print (segt.query(1, 15))
- segt.update(7,8,235)
+ print(segt.query(4, 6))
+ print(segt.query(7, 11))
+ print(segt.query(7, 12))
+ segt.update(1, 3, 111)
+ print(segt.query(1, 15))
+ segt.update(7, 8, 235)
segt.showData()
diff --git a/data_structures/binary_tree/segment_tree_other.py b/data_structures/binary_tree/segment_tree_other.py
new file mode 100644
index 000000000000..90afd7ca8b71
--- /dev/null
+++ b/data_structures/binary_tree/segment_tree_other.py
@@ -0,0 +1,241 @@
+"""
+Segment_tree creates a segment tree with a given array and function,
+allowing queries to be done later in log(N) time
+function takes 2 values and returns a same type value
+"""
+from collections.abc import Sequence
+from queue import Queue
+
+
+class SegmentTreeNode:
+ def __init__(self, start, end, val, left=None, right=None):
+ self.start = start
+ self.end = end
+ self.val = val
+ self.mid = (start + end) // 2
+ self.left = left
+ self.right = right
+
+ def __str__(self):
+ return f"val: {self.val}, start: {self.start}, end: {self.end}"
+
+
+class SegmentTree:
+ """
+ >>> import operator
+ >>> num_arr = SegmentTree([2, 1, 5, 3, 4], operator.add)
+ >>> for node in num_arr.traverse():
+ ... print(node)
+ ...
+ val: 15, start: 0, end: 4
+ val: 8, start: 0, end: 2
+ val: 7, start: 3, end: 4
+ val: 3, start: 0, end: 1
+ val: 5, start: 2, end: 2
+ val: 3, start: 3, end: 3
+ val: 4, start: 4, end: 4
+ val: 2, start: 0, end: 0
+ val: 1, start: 1, end: 1
+ >>>
+ >>> num_arr.update(1, 5)
+ >>> for node in num_arr.traverse():
+ ... print(node)
+ ...
+ val: 19, start: 0, end: 4
+ val: 12, start: 0, end: 2
+ val: 7, start: 3, end: 4
+ val: 7, start: 0, end: 1
+ val: 5, start: 2, end: 2
+ val: 3, start: 3, end: 3
+ val: 4, start: 4, end: 4
+ val: 2, start: 0, end: 0
+ val: 5, start: 1, end: 1
+ >>>
+ >>> num_arr.query_range(3, 4)
+ 7
+ >>> num_arr.query_range(2, 2)
+ 5
+ >>> num_arr.query_range(1, 3)
+ 13
+ >>>
+ >>> max_arr = SegmentTree([2, 1, 5, 3, 4], max)
+ >>> for node in max_arr.traverse():
+ ... print(node)
+ ...
+ val: 5, start: 0, end: 4
+ val: 5, start: 0, end: 2
+ val: 4, start: 3, end: 4
+ val: 2, start: 0, end: 1
+ val: 5, start: 2, end: 2
+ val: 3, start: 3, end: 3
+ val: 4, start: 4, end: 4
+ val: 2, start: 0, end: 0
+ val: 1, start: 1, end: 1
+ >>>
+ >>> max_arr.update(1, 5)
+ >>> for node in max_arr.traverse():
+ ... print(node)
+ ...
+ val: 5, start: 0, end: 4
+ val: 5, start: 0, end: 2
+ val: 4, start: 3, end: 4
+ val: 5, start: 0, end: 1
+ val: 5, start: 2, end: 2
+ val: 3, start: 3, end: 3
+ val: 4, start: 4, end: 4
+ val: 2, start: 0, end: 0
+ val: 5, start: 1, end: 1
+ >>>
+ >>> max_arr.query_range(3, 4)
+ 4
+ >>> max_arr.query_range(2, 2)
+ 5
+ >>> max_arr.query_range(1, 3)
+ 5
+ >>>
+ >>> min_arr = SegmentTree([2, 1, 5, 3, 4], min)
+ >>> for node in min_arr.traverse():
+ ... print(node)
+ ...
+ val: 1, start: 0, end: 4
+ val: 1, start: 0, end: 2
+ val: 3, start: 3, end: 4
+ val: 1, start: 0, end: 1
+ val: 5, start: 2, end: 2
+ val: 3, start: 3, end: 3
+ val: 4, start: 4, end: 4
+ val: 2, start: 0, end: 0
+ val: 1, start: 1, end: 1
+ >>>
+ >>> min_arr.update(1, 5)
+ >>> for node in min_arr.traverse():
+ ... print(node)
+ ...
+ val: 2, start: 0, end: 4
+ val: 2, start: 0, end: 2
+ val: 3, start: 3, end: 4
+ val: 2, start: 0, end: 1
+ val: 5, start: 2, end: 2
+ val: 3, start: 3, end: 3
+ val: 4, start: 4, end: 4
+ val: 2, start: 0, end: 0
+ val: 5, start: 1, end: 1
+ >>>
+ >>> min_arr.query_range(3, 4)
+ 3
+ >>> min_arr.query_range(2, 2)
+ 5
+ >>> min_arr.query_range(1, 3)
+ 3
+ >>>
+
+ """
+
+ def __init__(self, collection: Sequence, function):
+ self.collection = collection
+ self.fn = function
+ if self.collection:
+ self.root = self._build_tree(0, len(collection) - 1)
+
+ def update(self, i, val):
+ """
+ Update an element in log(N) time
+ :param i: position to be update
+ :param val: new value
+ >>> import operator
+ >>> num_arr = SegmentTree([2, 1, 5, 3, 4], operator.add)
+ >>> num_arr.update(1, 5)
+ >>> num_arr.query_range(1, 3)
+ 13
+ """
+ self._update_tree(self.root, i, val)
+
+ def query_range(self, i, j):
+ """
+ Get range query value in log(N) time
+ :param i: left element index
+ :param j: right element index
+ :return: element combined in the range [i, j]
+ >>> import operator
+ >>> num_arr = SegmentTree([2, 1, 5, 3, 4], operator.add)
+ >>> num_arr.update(1, 5)
+ >>> num_arr.query_range(3, 4)
+ 7
+ >>> num_arr.query_range(2, 2)
+ 5
+ >>> num_arr.query_range(1, 3)
+ 13
+ >>>
+ """
+ return self._query_range(self.root, i, j)
+
+ def _build_tree(self, start, end):
+ if start == end:
+ return SegmentTreeNode(start, end, self.collection[start])
+ mid = (start + end) // 2
+ left = self._build_tree(start, mid)
+ right = self._build_tree(mid + 1, end)
+ return SegmentTreeNode(start, end, self.fn(left.val, right.val), left, right)
+
+ def _update_tree(self, node, i, val):
+ if node.start == i and node.end == i:
+ node.val = val
+ return
+ if i <= node.mid:
+ self._update_tree(node.left, i, val)
+ else:
+ self._update_tree(node.right, i, val)
+ node.val = self.fn(node.left.val, node.right.val)
+
+ def _query_range(self, node, i, j):
+ if node.start == i and node.end == j:
+ return node.val
+
+ if i <= node.mid:
+ if j <= node.mid:
+ # range in left child tree
+ return self._query_range(node.left, i, j)
+ else:
+ # range in left child tree and right child tree
+ return self.fn(
+ self._query_range(node.left, i, node.mid),
+ self._query_range(node.right, node.mid + 1, j),
+ )
+ else:
+ # range in right child tree
+ return self._query_range(node.right, i, j)
+
+ def traverse(self):
+ if self.root is not None:
+ queue = Queue()
+ queue.put(self.root)
+ while not queue.empty():
+ node = queue.get()
+ yield node
+
+ if node.left is not None:
+ queue.put(node.left)
+
+ if node.right is not None:
+ queue.put(node.right)
+
+
+if __name__ == "__main__":
+ import operator
+
+ for fn in [operator.add, max, min]:
+ print("*" * 50)
+ arr = SegmentTree([2, 1, 5, 3, 4], fn)
+ for node in arr.traverse():
+ print(node)
+ print()
+
+ arr.update(1, 5)
+ for node in arr.traverse():
+ print(node)
+ print()
+
+ print(arr.query_range(3, 4)) # 7
+ print(arr.query_range(2, 2)) # 5
+ print(arr.query_range(1, 3)) # 13
+ print()
diff --git a/data_structures/binary_tree/treap.py b/data_structures/binary_tree/treap.py
new file mode 100644
index 000000000000..26648f7aba61
--- /dev/null
+++ b/data_structures/binary_tree/treap.py
@@ -0,0 +1,184 @@
+# flake8: noqa
+
+from __future__ import annotations
+
+from random import random
+
+
+class Node:
+ """
+ Treap's node
+ Treap is a binary tree by value and heap by priority
+ """
+
+ def __init__(self, value: int = None):
+ self.value = value
+ self.prior = random()
+ self.left = None
+ self.right = None
+
+ def __repr__(self):
+ from pprint import pformat
+
+ if self.left is None and self.right is None:
+ return f"'{self.value}: {self.prior:.5}'"
+ else:
+ return pformat(
+ {f"{self.value}: {self.prior:.5}": (self.left, self.right)}, indent=1
+ )
+
+ def __str__(self):
+ value = str(self.value) + " "
+ left = str(self.left or "")
+ right = str(self.right or "")
+ return value + left + right
+
+
+def split(root: Node, value: int) -> tuple[Node, Node]:
+ """
+ We split current tree into 2 trees with value:
+
+ Left tree contains all values less than split value.
+ Right tree contains all values greater or equal, than split value
+ """
+ if root is None: # None tree is split into 2 Nones
+ return (None, None)
+ elif root.value is None:
+ return (None, None)
+ else:
+ if value < root.value:
+ """
+ Right tree's root will be current node.
+ Now we split(with the same value) current node's left son
+ Left tree: left part of that split
+ Right tree's left son: right part of that split
+ """
+ left, root.left = split(root.left, value)
+ return (left, root)
+ else:
+ """
+ Just symmetric to previous case
+ """
+ root.right, right = split(root.right, value)
+ return (root, right)
+
+
+def merge(left: Node, right: Node) -> Node:
+ """
+ We merge 2 trees into one.
+ Note: all left tree's values must be less than all right tree's
+ """
+ if (not left) or (not right): # If one node is None, return the other
+ return left or right
+ elif left.prior < right.prior:
+ """
+ Left will be root because it has more priority
+ Now we need to merge left's right son and right tree
+ """
+ left.right = merge(left.right, right)
+ return left
+ else:
+ """
+ Symmetric as well
+ """
+ right.left = merge(left, right.left)
+ return right
+
+
+def insert(root: Node, value: int) -> Node:
+ """
+ Insert element
+
+ Split current tree with a value into left, right,
+ Insert new node into the middle
+ Merge left, node, right into root
+ """
+ node = Node(value)
+ left, right = split(root, value)
+ return merge(merge(left, node), right)
+
+
+def erase(root: Node, value: int) -> Node:
+ """
+ Erase element
+
+ Split all nodes with values less into left,
+ Split all nodes with values greater into right.
+ Merge left, right
+ """
+ left, right = split(root, value - 1)
+ _, right = split(right, value)
+ return merge(left, right)
+
+
+def inorder(root: Node):
+ """
+ Just recursive print of a tree
+ """
+ if not root: # None
+ return
+ else:
+ inorder(root.left)
+ print(root.value, end=",")
+ inorder(root.right)
+
+
+def interactTreap(root, args):
+ """
+ Commands:
+ + value to add value into treap
+ - value to erase all nodes with value
+
+ >>> root = interactTreap(None, "+1")
+ >>> inorder(root)
+ 1,
+ >>> root = interactTreap(root, "+3 +5 +17 +19 +2 +16 +4 +0")
+ >>> inorder(root)
+ 0,1,2,3,4,5,16,17,19,
+ >>> root = interactTreap(root, "+4 +4 +4")
+ >>> inorder(root)
+ 0,1,2,3,4,4,4,4,5,16,17,19,
+ >>> root = interactTreap(root, "-0")
+ >>> inorder(root)
+ 1,2,3,4,4,4,4,5,16,17,19,
+ >>> root = interactTreap(root, "-4")
+ >>> inorder(root)
+ 1,2,3,5,16,17,19,
+ >>> root = interactTreap(root, "=0")
+ Unknown command
+ """
+ for arg in args.split():
+ if arg[0] == "+":
+ root = insert(root, int(arg[1:]))
+
+ elif arg[0] == "-":
+ root = erase(root, int(arg[1:]))
+
+ else:
+ print("Unknown command")
+
+ return root
+
+
+def main():
+ """After each command, program prints treap"""
+ root = None
+ print(
+ "enter numbers to create a tree, + value to add value into treap, "
+ "- value to erase all nodes with value. 'q' to quit. "
+ )
+
+ args = input()
+ while args != "q":
+ root = interactTreap(root, args)
+ print(root)
+ args = input()
+
+ print("good by!")
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ main()
diff --git a/data_structures/disjoint_set/__init__.py b/data_structures/disjoint_set/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/data_structures/disjoint_set/alternate_disjoint_set.py b/data_structures/disjoint_set/alternate_disjoint_set.py
new file mode 100644
index 000000000000..5103335bc80a
--- /dev/null
+++ b/data_structures/disjoint_set/alternate_disjoint_set.py
@@ -0,0 +1,68 @@
+"""
+Implements a disjoint set using Lists and some added heuristics for efficiency
+Union by Rank Heuristic and Path Compression
+"""
+
+
+class DisjointSet:
+ def __init__(self, set_counts: list) -> None:
+ """
+ Initialize with a list of the number of items in each set
+ and with rank = 1 for each set
+ """
+ self.set_counts = set_counts
+ self.max_set = max(set_counts)
+ num_sets = len(set_counts)
+ self.ranks = [1] * num_sets
+ self.parents = list(range(num_sets))
+
+ def merge(self, src: int, dst: int) -> bool:
+ """
+ Merge two sets together using Union by rank heuristic
+ Return True if successful
+ Merge two disjoint sets
+ >>> A = DisjointSet([1, 1, 1])
+ >>> A.merge(1, 2)
+ True
+ >>> A.merge(0, 2)
+ True
+ >>> A.merge(0, 1)
+ False
+ """
+ src_parent = self.get_parent(src)
+ dst_parent = self.get_parent(dst)
+
+ if src_parent == dst_parent:
+ return False
+
+ if self.ranks[dst_parent] >= self.ranks[src_parent]:
+ self.set_counts[dst_parent] += self.set_counts[src_parent]
+ self.set_counts[src_parent] = 0
+ self.parents[src_parent] = dst_parent
+ if self.ranks[dst_parent] == self.ranks[src_parent]:
+ self.ranks[dst_parent] += 1
+ joined_set_size = self.set_counts[dst_parent]
+ else:
+ self.set_counts[src_parent] += self.set_counts[dst_parent]
+ self.set_counts[dst_parent] = 0
+ self.parents[dst_parent] = src_parent
+ joined_set_size = self.set_counts[src_parent]
+
+ self.max_set = max(self.max_set, joined_set_size)
+ return True
+
+ def get_parent(self, disj_set: int) -> int:
+ """
+ Find the Parent of a given set
+ >>> A = DisjointSet([1, 1, 1])
+ >>> A.merge(1, 2)
+ True
+ >>> A.get_parent(0)
+ 0
+ >>> A.get_parent(1)
+ 2
+ """
+ if self.parents[disj_set] == disj_set:
+ return disj_set
+ self.parents[disj_set] = self.get_parent(self.parents[disj_set])
+ return self.parents[disj_set]
diff --git a/data_structures/disjoint_set/disjoint_set.py b/data_structures/disjoint_set/disjoint_set.py
new file mode 100644
index 000000000000..a93b89621c4a
--- /dev/null
+++ b/data_structures/disjoint_set/disjoint_set.py
@@ -0,0 +1,79 @@
+"""
+ disjoint set
+ Reference: https://en.wikipedia.org/wiki/Disjoint-set_data_structure
+"""
+
+
+class Node:
+ def __init__(self, data):
+ self.data = data
+
+
+def make_set(x):
+ """
+ make x as a set.
+ """
+ # rank is the distance from x to its' parent
+ # root's rank is 0
+ x.rank = 0
+ x.parent = x
+
+
+def union_set(x, y):
+ """
+ union two sets.
+ set with bigger rank should be parent, so that the
+ disjoint set tree will be more flat.
+ """
+ x, y = find_set(x), find_set(y)
+ if x.rank > y.rank:
+ y.parent = x
+ else:
+ x.parent = y
+ if x.rank == y.rank:
+ y.rank += 1
+
+
+def find_set(x):
+ """
+ return the parent of x
+ """
+ if x != x.parent:
+ x.parent = find_set(x.parent)
+ return x.parent
+
+
+def find_python_set(node: Node) -> set:
+ """
+ Return a Python Standard Library set that contains i.
+ """
+ sets = ({0, 1, 2}, {3, 4, 5})
+ for s in sets:
+ if node.data in s:
+ return s
+ raise ValueError(f"{node.data} is not in {sets}")
+
+
+def test_disjoint_set():
+ """
+ >>> test_disjoint_set()
+ """
+ vertex = [Node(i) for i in range(6)]
+ for v in vertex:
+ make_set(v)
+
+ union_set(vertex[0], vertex[1])
+ union_set(vertex[1], vertex[2])
+ union_set(vertex[3], vertex[4])
+ union_set(vertex[3], vertex[5])
+
+ for node0 in vertex:
+ for node1 in vertex:
+ if find_python_set(node0).isdisjoint(find_python_set(node1)):
+ assert find_set(node0) != find_set(node1)
+ else:
+ assert find_set(node0) == find_set(node1)
+
+
+if __name__ == "__main__":
+ test_disjoint_set()
diff --git a/data_structures/hashing/__init__.py b/data_structures/hashing/__init__.py
index b96ddd478458..e69de29bb2d1 100644
--- a/data_structures/hashing/__init__.py
+++ b/data_structures/hashing/__init__.py
@@ -1,6 +0,0 @@
-from .hash_table import HashTable
-
-class QuadraticProbing(HashTable):
-
- def __init__(self):
- super(self.__class__, self).__init__()
diff --git a/data_structures/hashing/double_hash.py b/data_structures/hashing/double_hash.py
index 60098cda0ce1..57b1ffff4770 100644
--- a/data_structures/hashing/double_hash.py
+++ b/data_structures/hashing/double_hash.py
@@ -1,33 +1,41 @@
#!/usr/bin/env python3
-
from .hash_table import HashTable
-from number_theory.prime_numbers import next_prime, check_prime
+from .number_theory.prime_numbers import check_prime, next_prime
class DoubleHash(HashTable):
"""
- Hash Table example with open addressing and Double Hash
+ Hash Table example with open addressing and Double Hash
"""
+
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __hash_function_2(self, value, data):
- next_prime_gt = next_prime(value % self.size_table) \
- if not check_prime(value % self.size_table) else value % self.size_table #gt = bigger than
+ next_prime_gt = (
+ next_prime(value % self.size_table)
+ if not check_prime(value % self.size_table)
+ else value % self.size_table
+ ) # gt = bigger than
return next_prime_gt - (data % next_prime_gt)
def __hash_double_function(self, key, data, increment):
return (increment * self.__hash_function_2(key, data)) % self.size_table
- def _colision_resolution(self, key, data=None):
+ def _collision_resolution(self, key, data=None):
i = 1
new_key = self.hash_function(data)
while self.values[new_key] is not None and self.values[new_key] != key:
- new_key = self.__hash_double_function(key, data, i) if \
- self.balanced_factor() >= self.lim_charge else None
- if new_key is None: break
- else: i += 1
+ new_key = (
+ self.__hash_double_function(key, data, i)
+ if self.balanced_factor() >= self.lim_charge
+ else None
+ )
+ if new_key is None:
+ break
+ else:
+ i += 1
return new_key
diff --git a/data_structures/hashing/hash_table.py b/data_structures/hashing/hash_table.py
index f0de128d1ad1..fd9e6eec134c 100644
--- a/data_structures/hashing/hash_table.py
+++ b/data_structures/hashing/hash_table.py
@@ -1,10 +1,10 @@
#!/usr/bin/env python3
-from number_theory.prime_numbers import next_prime
+from .number_theory.prime_numbers import next_prime
class HashTable:
"""
- Basic Hash Table example with open addressing and linear probing
+ Basic Hash Table example with open addressing and linear probing
"""
def __init__(self, size_table, charge_factor=None, lim_charge=None):
@@ -19,15 +19,16 @@ def keys(self):
return self._keys
def balanced_factor(self):
- return sum([1 for slot in self.values
- if slot is not None]) / (self.size_table * self.charge_factor)
+ return sum([1 for slot in self.values if slot is not None]) / (
+ self.size_table * self.charge_factor
+ )
def hash_function(self, key):
return key % self.size_table
def _step_by_step(self, step_ord):
- print("step {0}".format(step_ord))
+ print(f"step {step_ord}")
print([i for i in range(len(self.values))])
print(self.values)
@@ -43,11 +44,10 @@ def _set_value(self, key, data):
self.values[key] = data
self._keys[key] = data
- def _colision_resolution(self, key, data=None):
+ def _collision_resolution(self, key, data=None):
new_key = self.hash_function(key + 1)
- while self.values[new_key] is not None \
- and self.values[new_key] != key:
+ while self.values[new_key] is not None and self.values[new_key] != key:
if self.values.count(None) > 0:
new_key = self.hash_function(new_key + 1)
@@ -61,8 +61,9 @@ def rehashing(self):
survivor_values = [value for value in self.values if value is not None]
self.size_table = next_prime(self.size_table, factor=2)
self._keys.clear()
- self.values = [None] * self.size_table #hell's pointers D: don't DRY ;/
- map(self.insert_data, survivor_values)
+ self.values = [None] * self.size_table # hell's pointers D: don't DRY ;/
+ for value in survivor_values:
+ self.insert_data(value)
def insert_data(self, data):
key = self.hash_function(data)
@@ -74,11 +75,9 @@ def insert_data(self, data):
pass
else:
- colision_resolution = self._colision_resolution(key, data)
- if colision_resolution is not None:
- self._set_value(colision_resolution, data)
+ collision_resolution = self._collision_resolution(key, data)
+ if collision_resolution is not None:
+ self._set_value(collision_resolution, data)
else:
self.rehashing()
self.insert_data(data)
-
-
diff --git a/data_structures/hashing/hash_table_with_linked_list.py b/data_structures/hashing/hash_table_with_linked_list.py
index 9689e4fc9fcf..fe838268fce8 100644
--- a/data_structures/hashing/hash_table_with_linked_list.py
+++ b/data_structures/hashing/hash_table_with_linked_list.py
@@ -1,24 +1,27 @@
-from .hash_table import HashTable
from collections import deque
+from .hash_table import HashTable
+
class HashTableWithLinkedList(HashTable):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _set_value(self, key, data):
- self.values[key] = deque([]) if self.values[key] is None else self.values[key]
+ self.values[key] = deque([]) if self.values[key] is None else self.values[key]
self.values[key].appendleft(data)
self._keys[key] = self.values[key]
def balanced_factor(self):
- return sum([self.charge_factor - len(slot) for slot in self.values])\
- / self.size_table * self.charge_factor
-
- def _colision_resolution(self, key, data=None):
- if not (len(self.values[key]) == self.charge_factor
- and self.values.count(None) == 0):
- return key
- return super()._colision_resolution(key, data)
-
+ return (
+ sum([self.charge_factor - len(slot) for slot in self.values])
+ / self.size_table
+ * self.charge_factor
+ )
+ def _collision_resolution(self, key, data=None):
+ if not (
+ len(self.values[key]) == self.charge_factor and self.values.count(None) == 0
+ ):
+ return key
+ return super()._collision_resolution(key, data)
diff --git a/data_structures/hashing/number_theory/prime_numbers.py b/data_structures/hashing/number_theory/prime_numbers.py
index 8a521bc45758..db4d40f475b2 100644
--- a/data_structures/hashing/number_theory/prime_numbers.py
+++ b/data_structures/hashing/number_theory/prime_numbers.py
@@ -5,25 +5,25 @@
def check_prime(number):
- """
- it's not the best solution
- """
- special_non_primes = [0,1,2]
- if number in special_non_primes[:2]:
- return 2
- elif number == special_non_primes[-1]:
- return 3
-
- return all([number % i for i in range(2, number)])
+ """
+ it's not the best solution
+ """
+ special_non_primes = [0, 1, 2]
+ if number in special_non_primes[:2]:
+ return 2
+ elif number == special_non_primes[-1]:
+ return 3
+
+ return all([number % i for i in range(2, number)])
def next_prime(value, factor=1, **kwargs):
value = factor * value
first_value_val = value
-
+
while not check_prime(value):
value += 1 if not ("desc" in kwargs.keys() and kwargs["desc"] is True) else -1
-
+
if value == first_value_val:
return next_prime(value + 1, **kwargs)
return value
diff --git a/data_structures/hashing/quadratic_probing.py b/data_structures/hashing/quadratic_probing.py
index f7a9ac1ae347..0930340a347f 100644
--- a/data_structures/hashing/quadratic_probing.py
+++ b/data_structures/hashing/quadratic_probing.py
@@ -5,20 +5,23 @@
class QuadraticProbing(HashTable):
"""
- Basic Hash Table example with open addressing using Quadratic Probing
+ Basic Hash Table example with open addressing using Quadratic Probing
"""
+
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
- def _colision_resolution(self, key, data=None):
+ def _collision_resolution(self, key, data=None):
i = 1
- new_key = self.hash_function(key + i*i)
+ new_key = self.hash_function(key + i * i)
- while self.values[new_key] is not None \
- and self.values[new_key] != key:
+ while self.values[new_key] is not None and self.values[new_key] != key:
i += 1
- new_key = self.hash_function(key + i*i) if not \
- self.balanced_factor() >= self.lim_charge else None
+ new_key = (
+ self.hash_function(key + i * i)
+ if not self.balanced_factor() >= self.lim_charge
+ else None
+ )
if new_key is None:
break
diff --git a/data_structures/heap/__init__.py b/data_structures/heap/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/data_structures/heap/binomial_heap.py b/data_structures/heap/binomial_heap.py
new file mode 100644
index 000000000000..334b444eaaff
--- /dev/null
+++ b/data_structures/heap/binomial_heap.py
@@ -0,0 +1,403 @@
+# flake8: noqa
+
+"""
+Binomial Heap
+Reference: Advanced Data Structures, Peter Brass
+"""
+
+
+class Node:
+ """
+ Node in a doubly-linked binomial tree, containing:
+ - value
+ - size of left subtree
+ - link to left, right and parent nodes
+ """
+
+ def __init__(self, val):
+ self.val = val
+ # Number of nodes in left subtree
+ self.left_tree_size = 0
+ self.left = None
+ self.right = None
+ self.parent = None
+
+ def mergeTrees(self, other):
+ """
+ In-place merge of two binomial trees of equal size.
+ Returns the root of the resulting tree
+ """
+ assert self.left_tree_size == other.left_tree_size, "Unequal Sizes of Blocks"
+
+ if self.val < other.val:
+ other.left = self.right
+ other.parent = None
+ if self.right:
+ self.right.parent = other
+ self.right = other
+ self.left_tree_size = self.left_tree_size * 2 + 1
+ return self
+ else:
+ self.left = other.right
+ self.parent = None
+ if other.right:
+ other.right.parent = self
+ other.right = self
+ other.left_tree_size = other.left_tree_size * 2 + 1
+ return other
+
+
+class BinomialHeap:
+ r"""
+ Min-oriented priority queue implemented with the Binomial Heap data
+ structure implemented with the BinomialHeap class. It supports:
+ - Insert element in a heap with n elements: Guaranteed logn, amoratized 1
+ - Merge (meld) heaps of size m and n: O(logn + logm)
+ - Delete Min: O(logn)
+ - Peek (return min without deleting it): O(1)
+
+ Example:
+
+ Create a random permutation of 30 integers to be inserted and 19 of them deleted
+ >>> import numpy as np
+ >>> permutation = np.random.permutation(list(range(30)))
+
+ Create a Heap and insert the 30 integers
+ __init__() test
+ >>> first_heap = BinomialHeap()
+
+ 30 inserts - insert() test
+ >>> for number in permutation:
+ ... first_heap.insert(number)
+
+ Size test
+ >>> print(first_heap.size)
+ 30
+
+ Deleting - delete() test
+ >>> for i in range(25):
+ ... print(first_heap.deleteMin(), end=" ")
+ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
+
+ Create a new Heap
+ >>> second_heap = BinomialHeap()
+ >>> vals = [17, 20, 31, 34]
+ >>> for value in vals:
+ ... second_heap.insert(value)
+
+
+ The heap should have the following structure:
+
+ 17
+ / \
+ # 31
+ / \
+ 20 34
+ / \ / \
+ # # # #
+
+ preOrder() test
+ >>> print(second_heap.preOrder())
+ [(17, 0), ('#', 1), (31, 1), (20, 2), ('#', 3), ('#', 3), (34, 2), ('#', 3), ('#', 3)]
+
+ printing Heap - __str__() test
+ >>> print(second_heap)
+ 17
+ -#
+ -31
+ --20
+ ---#
+ ---#
+ --34
+ ---#
+ ---#
+
+ mergeHeaps() test
+ >>> merged = second_heap.mergeHeaps(first_heap)
+ >>> merged.peek()
+ 17
+
+ values in merged heap; (merge is inplace)
+ >>> while not first_heap.isEmpty():
+ ... print(first_heap.deleteMin(), end=" ")
+ 17 20 25 26 27 28 29 31 34
+ """
+
+ def __init__(self, bottom_root=None, min_node=None, heap_size=0):
+ self.size = heap_size
+ self.bottom_root = bottom_root
+ self.min_node = min_node
+
+ def mergeHeaps(self, other):
+ """
+ In-place merge of two binomial heaps.
+ Both of them become the resulting merged heap
+ """
+
+ # Empty heaps corner cases
+ if other.size == 0:
+ return
+ if self.size == 0:
+ self.size = other.size
+ self.bottom_root = other.bottom_root
+ self.min_node = other.min_node
+ return
+ # Update size
+ self.size = self.size + other.size
+
+ # Update min.node
+ if self.min_node.val > other.min_node.val:
+ self.min_node = other.min_node
+ # Merge
+
+ # Order roots by left_subtree_size
+ combined_roots_list = []
+ i, j = self.bottom_root, other.bottom_root
+ while i or j:
+ if i and ((not j) or i.left_tree_size < j.left_tree_size):
+ combined_roots_list.append((i, True))
+ i = i.parent
+ else:
+ combined_roots_list.append((j, False))
+ j = j.parent
+ # Insert links between them
+ for i in range(len(combined_roots_list) - 1):
+ if combined_roots_list[i][1] != combined_roots_list[i + 1][1]:
+ combined_roots_list[i][0].parent = combined_roots_list[i + 1][0]
+ combined_roots_list[i + 1][0].left = combined_roots_list[i][0]
+ # Consecutively merge roots with same left_tree_size
+ i = combined_roots_list[0][0]
+ while i.parent:
+ if (
+ (i.left_tree_size == i.parent.left_tree_size) and (not i.parent.parent)
+ ) or (
+ i.left_tree_size == i.parent.left_tree_size
+ and i.left_tree_size != i.parent.parent.left_tree_size
+ ):
+
+ # Neighbouring Nodes
+ previous_node = i.left
+ next_node = i.parent.parent
+
+ # Merging trees
+ i = i.mergeTrees(i.parent)
+
+ # Updating links
+ i.left = previous_node
+ i.parent = next_node
+ if previous_node:
+ previous_node.parent = i
+ if next_node:
+ next_node.left = i
+ else:
+ i = i.parent
+ # Updating self.bottom_root
+ while i.left:
+ i = i.left
+ self.bottom_root = i
+
+ # Update other
+ other.size = self.size
+ other.bottom_root = self.bottom_root
+ other.min_node = self.min_node
+
+ # Return the merged heap
+ return self
+
+ def insert(self, val):
+ """
+ insert a value in the heap
+ """
+ if self.size == 0:
+ self.bottom_root = Node(val)
+ self.size = 1
+ self.min_node = self.bottom_root
+ else:
+ # Create new node
+ new_node = Node(val)
+
+ # Update size
+ self.size += 1
+
+ # update min_node
+ if val < self.min_node.val:
+ self.min_node = new_node
+ # Put new_node as a bottom_root in heap
+ self.bottom_root.left = new_node
+ new_node.parent = self.bottom_root
+ self.bottom_root = new_node
+
+ # Consecutively merge roots with same left_tree_size
+ while (
+ self.bottom_root.parent
+ and self.bottom_root.left_tree_size
+ == self.bottom_root.parent.left_tree_size
+ ):
+
+ # Next node
+ next_node = self.bottom_root.parent.parent
+
+ # Merge
+ self.bottom_root = self.bottom_root.mergeTrees(self.bottom_root.parent)
+
+ # Update Links
+ self.bottom_root.parent = next_node
+ self.bottom_root.left = None
+ if next_node:
+ next_node.left = self.bottom_root
+
+ def peek(self):
+ """
+ return min element without deleting it
+ """
+ return self.min_node.val
+
+ def isEmpty(self):
+ return self.size == 0
+
+ def deleteMin(self):
+ """
+ delete min element and return it
+ """
+ # assert not self.isEmpty(), "Empty Heap"
+
+ # Save minimal value
+ min_value = self.min_node.val
+
+ # Last element in heap corner case
+ if self.size == 1:
+ # Update size
+ self.size = 0
+
+ # Update bottom root
+ self.bottom_root = None
+
+ # Update min_node
+ self.min_node = None
+
+ return min_value
+ # No right subtree corner case
+ # The structure of the tree implies that this should be the bottom root
+ # and there is at least one other root
+ if self.min_node.right is None:
+ # Update size
+ self.size -= 1
+
+ # Update bottom root
+ self.bottom_root = self.bottom_root.parent
+ self.bottom_root.left = None
+
+ # Update min_node
+ self.min_node = self.bottom_root
+ i = self.bottom_root.parent
+ while i:
+ if i.val < self.min_node.val:
+ self.min_node = i
+ i = i.parent
+ return min_value
+ # General case
+ # Find the BinomialHeap of the right subtree of min_node
+ bottom_of_new = self.min_node.right
+ bottom_of_new.parent = None
+ min_of_new = bottom_of_new
+ size_of_new = 1
+
+ # Size, min_node and bottom_root
+ while bottom_of_new.left:
+ size_of_new = size_of_new * 2 + 1
+ bottom_of_new = bottom_of_new.left
+ if bottom_of_new.val < min_of_new.val:
+ min_of_new = bottom_of_new
+ # Corner case of single root on top left path
+ if (not self.min_node.left) and (not self.min_node.parent):
+ self.size = size_of_new
+ self.bottom_root = bottom_of_new
+ self.min_node = min_of_new
+ # print("Single root, multiple nodes case")
+ return min_value
+ # Remaining cases
+ # Construct heap of right subtree
+ newHeap = BinomialHeap(
+ bottom_root=bottom_of_new, min_node=min_of_new, heap_size=size_of_new
+ )
+
+ # Update size
+ self.size = self.size - 1 - size_of_new
+
+ # Neighbour nodes
+ previous_node = self.min_node.left
+ next_node = self.min_node.parent
+
+ # Initialize new bottom_root and min_node
+ self.min_node = previous_node or next_node
+ self.bottom_root = next_node
+
+ # Update links of previous_node and search below for new min_node and
+ # bottom_root
+ if previous_node:
+ previous_node.parent = next_node
+
+ # Update bottom_root and search for min_node below
+ self.bottom_root = previous_node
+ self.min_node = previous_node
+ while self.bottom_root.left:
+ self.bottom_root = self.bottom_root.left
+ if self.bottom_root.val < self.min_node.val:
+ self.min_node = self.bottom_root
+ if next_node:
+ next_node.left = previous_node
+
+ # Search for new min_node above min_node
+ i = next_node
+ while i:
+ if i.val < self.min_node.val:
+ self.min_node = i
+ i = i.parent
+ # Merge heaps
+ self.mergeHeaps(newHeap)
+
+ return min_value
+
+ def preOrder(self):
+ """
+ Returns the Pre-order representation of the heap including
+ values of nodes plus their level distance from the root;
+ Empty nodes appear as #
+ """
+ # Find top root
+ top_root = self.bottom_root
+ while top_root.parent:
+ top_root = top_root.parent
+ # preorder
+ heap_preOrder = []
+ self.__traversal(top_root, heap_preOrder)
+ return heap_preOrder
+
+ def __traversal(self, curr_node, preorder, level=0):
+ """
+ Pre-order traversal of nodes
+ """
+ if curr_node:
+ preorder.append((curr_node.val, level))
+ self.__traversal(curr_node.left, preorder, level + 1)
+ self.__traversal(curr_node.right, preorder, level + 1)
+ else:
+ preorder.append(("#", level))
+
+ def __str__(self):
+ """
+ Overwriting str for a pre-order print of nodes in heap;
+ Performance is poor, so use only for small examples
+ """
+ if self.isEmpty():
+ return ""
+ preorder_heap = self.preOrder()
+
+ return "\n".join(("-" * level + str(value)) for value, level in preorder_heap)
+
+
+# Unit Tests
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/data_structures/heap/heap.py b/data_structures/heap/heap.py
index 39778f725c3a..8592362c23b9 100644
--- a/data_structures/heap/heap.py
+++ b/data_structures/heap/heap.py
@@ -1,91 +1,163 @@
-#!/usr/bin/python
+from typing import Iterable, List, Optional
-from __future__ import print_function, division
-try:
- raw_input # Python 2
-except NameError:
- raw_input = input # Python 3
-
-#This heap class start from here.
class Heap:
- def __init__(self): #Default constructor of heap class.
- self.h = []
- self.currsize = 0
-
- def leftChild(self,i):
- if 2*i+1 < self.currsize:
- return 2*i+1
- return None
-
- def rightChild(self,i):
- if 2*i+2 < self.currsize:
- return 2*i+2
- return None
-
- def maxHeapify(self,node):
- if node < self.currsize:
- m = node
- lc = self.leftChild(node)
- rc = self.rightChild(node)
- if lc is not None and self.h[lc] > self.h[m]:
- m = lc
- if rc is not None and self.h[rc] > self.h[m]:
- m = rc
- if m!=node:
- temp = self.h[node]
- self.h[node] = self.h[m]
- self.h[m] = temp
- self.maxHeapify(m)
-
- def buildHeap(self,a): #This function is used to build the heap from the data container 'a'.
- self.currsize = len(a)
- self.h = list(a)
- for i in range(self.currsize//2,-1,-1):
- self.maxHeapify(i)
-
- def getMax(self): #This function is used to get maximum value from the heap.
- if self.currsize >= 1:
- me = self.h[0]
- temp = self.h[0]
- self.h[0] = self.h[self.currsize-1]
- self.h[self.currsize-1] = temp
- self.currsize -= 1
- self.maxHeapify(0)
- return me
- return None
-
- def heapSort(self): #This function is used to sort the heap.
- size = self.currsize
- while self.currsize-1 >= 0:
- temp = self.h[0]
- self.h[0] = self.h[self.currsize-1]
- self.h[self.currsize-1] = temp
- self.currsize -= 1
- self.maxHeapify(0)
- self.currsize = size
-
- def insert(self,data): #This function is used to insert data in the heap.
- self.h.append(data)
- curr = self.currsize
- self.currsize+=1
- while self.h[curr] > self.h[curr/2]:
- temp = self.h[curr/2]
- self.h[curr/2] = self.h[curr]
- self.h[curr] = temp
- curr = curr/2
-
- def display(self): #This function is used to print the heap.
- print(self.h)
-
-def main():
- l = list(map(int, raw_input().split()))
- h = Heap()
- h.buildHeap(l)
- h.heapSort()
- h.display()
-
-if __name__=='__main__':
- main()
+ """A Max Heap Implementation
+
+ >>> unsorted = [103, 9, 1, 7, 11, 15, 25, 201, 209, 107, 5]
+ >>> h = Heap()
+ >>> h.build_max_heap(unsorted)
+ >>> print(h)
+ [209, 201, 25, 103, 107, 15, 1, 9, 7, 11, 5]
+ >>>
+ >>> h.extract_max()
+ 209
+ >>> print(h)
+ [201, 107, 25, 103, 11, 15, 1, 9, 7, 5]
+ >>>
+ >>> h.insert(100)
+ >>> print(h)
+ [201, 107, 25, 103, 100, 15, 1, 9, 7, 5, 11]
+ >>>
+ >>> h.heap_sort()
+ >>> print(h)
+ [1, 5, 7, 9, 11, 15, 25, 100, 103, 107, 201]
+ """
+
+ def __init__(self) -> None:
+ self.h: List[float] = []
+ self.heap_size: int = 0
+
+ def __repr__(self) -> str:
+ return str(self.h)
+
+ def parent_index(self, child_idx: int) -> Optional[int]:
+ """ return the parent index of given child """
+ if child_idx > 0:
+ return (child_idx - 1) // 2
+ return None
+
+ def left_child_idx(self, parent_idx: int) -> Optional[int]:
+ """
+ return the left child index if the left child exists.
+ if not, return None.
+ """
+ left_child_index = 2 * parent_idx + 1
+ if left_child_index < self.heap_size:
+ return left_child_index
+ return None
+
+ def right_child_idx(self, parent_idx: int) -> Optional[int]:
+ """
+ return the right child index if the right child exists.
+ if not, return None.
+ """
+ right_child_index = 2 * parent_idx + 2
+ if right_child_index < self.heap_size:
+ return right_child_index
+ return None
+
+ def max_heapify(self, index: int) -> None:
+ """
+ correct a single violation of the heap property in a subtree's root.
+ """
+ if index < self.heap_size:
+ violation: int = index
+ left_child = self.left_child_idx(index)
+ right_child = self.right_child_idx(index)
+ # check which child is larger than its parent
+ if left_child is not None and self.h[left_child] > self.h[violation]:
+ violation = left_child
+ if right_child is not None and self.h[right_child] > self.h[violation]:
+ violation = right_child
+ # if violation indeed exists
+ if violation != index:
+ # swap to fix the violation
+ self.h[violation], self.h[index] = self.h[index], self.h[violation]
+ # fix the subsequent violation recursively if any
+ self.max_heapify(violation)
+
+ def build_max_heap(self, collection: Iterable[float]) -> None:
+ """ build max heap from an unsorted array"""
+ self.h = list(collection)
+ self.heap_size = len(self.h)
+ if self.heap_size > 1:
+ # max_heapify from right to left but exclude leaves (last level)
+ for i in range(self.heap_size // 2 - 1, -1, -1):
+ self.max_heapify(i)
+
+ def max(self) -> float:
+ """ return the max in the heap """
+ if self.heap_size >= 1:
+ return self.h[0]
+ else:
+ raise Exception("Empty heap")
+
+ def extract_max(self) -> float:
+ """ get and remove max from heap """
+ if self.heap_size >= 2:
+ me = self.h[0]
+ self.h[0] = self.h.pop(-1)
+ self.heap_size -= 1
+ self.max_heapify(0)
+ return me
+ elif self.heap_size == 1:
+ self.heap_size -= 1
+ return self.h.pop(-1)
+ else:
+ raise Exception("Empty heap")
+
+ def insert(self, value: float) -> None:
+ """ insert a new value into the max heap """
+ self.h.append(value)
+ idx = (self.heap_size - 1) // 2
+ self.heap_size += 1
+ while idx >= 0:
+ self.max_heapify(idx)
+ idx = (idx - 1) // 2
+
+ def heap_sort(self) -> None:
+ size = self.heap_size
+ for j in range(size - 1, 0, -1):
+ self.h[0], self.h[j] = self.h[j], self.h[0]
+ self.heap_size -= 1
+ self.max_heapify(0)
+ self.heap_size = size
+
+
+if __name__ == "__main__":
+ import doctest
+
+ # run doc test
+ doctest.testmod()
+
+ # demo
+ for unsorted in [
+ [0],
+ [2],
+ [3, 5],
+ [5, 3],
+ [5, 5],
+ [0, 0, 0, 0],
+ [1, 1, 1, 1],
+ [2, 2, 3, 5],
+ [0, 2, 2, 3, 5],
+ [2, 5, 3, 0, 2, 3, 0, 3],
+ [6, 1, 2, 7, 9, 3, 4, 5, 10, 8],
+ [103, 9, 1, 7, 11, 15, 25, 201, 209, 107, 5],
+ [-45, -2, -5],
+ ]:
+ print(f"unsorted array: {unsorted}")
+
+ heap = Heap()
+ heap.build_max_heap(unsorted)
+ print(f"after build heap: {heap}")
+
+ print(f"max value: {heap.extract_max()}")
+ print(f"after max value removed: {heap}")
+ heap.insert(100)
+ print(f"after new value 100 inserted: {heap}")
+ heap.heap_sort()
+ print(f"heap-sorted array: {heap}\n")
diff --git a/data_structures/heap/heap_generic.py b/data_structures/heap/heap_generic.py
new file mode 100644
index 000000000000..553cb94518c4
--- /dev/null
+++ b/data_structures/heap/heap_generic.py
@@ -0,0 +1,172 @@
+class Heap:
+ """
+ A generic Heap class, can be used as min or max by passing the key function
+ accordingly.
+ """
+
+ def __init__(self, key=None):
+ # Stores actual heap items.
+ self.arr = list()
+ # Stores indexes of each item for supporting updates and deletion.
+ self.pos_map = {}
+ # Stores current size of heap.
+ self.size = 0
+ # Stores function used to evaluate the score of an item on which basis ordering
+ # will be done.
+ self.key = key or (lambda x: x)
+
+ def _parent(self, i):
+ """Returns parent index of given index if exists else None"""
+ return int((i - 1) / 2) if i > 0 else None
+
+ def _left(self, i):
+ """Returns left-child-index of given index if exists else None"""
+ left = int(2 * i + 1)
+ return left if 0 < left < self.size else None
+
+ def _right(self, i):
+ """Returns right-child-index of given index if exists else None"""
+ right = int(2 * i + 2)
+ return right if 0 < right < self.size else None
+
+ def _swap(self, i, j):
+ """Performs changes required for swapping two elements in the heap"""
+ # First update the indexes of the items in index map.
+ self.pos_map[self.arr[i][0]], self.pos_map[self.arr[j][0]] = (
+ self.pos_map[self.arr[j][0]],
+ self.pos_map[self.arr[i][0]],
+ )
+ # Then swap the items in the list.
+ self.arr[i], self.arr[j] = self.arr[j], self.arr[i]
+
+ def _cmp(self, i, j):
+ """Compares the two items using default comparison"""
+ return self.arr[i][1] < self.arr[j][1]
+
+ def _get_valid_parent(self, i):
+ """
+ Returns index of valid parent as per desired ordering among given index and
+ both it's children
+ """
+ left = self._left(i)
+ right = self._right(i)
+ valid_parent = i
+
+ if left is not None and not self._cmp(left, valid_parent):
+ valid_parent = left
+ if right is not None and not self._cmp(right, valid_parent):
+ valid_parent = right
+
+ return valid_parent
+
+ def _heapify_up(self, index):
+ """Fixes the heap in upward direction of given index"""
+ parent = self._parent(index)
+ while parent is not None and not self._cmp(index, parent):
+ self._swap(index, parent)
+ index, parent = parent, self._parent(parent)
+
+ def _heapify_down(self, index):
+ """Fixes the heap in downward direction of given index"""
+ valid_parent = self._get_valid_parent(index)
+ while valid_parent != index:
+ self._swap(index, valid_parent)
+ index, valid_parent = valid_parent, self._get_valid_parent(valid_parent)
+
+ def update_item(self, item, item_value):
+ """Updates given item value in heap if present"""
+ if item not in self.pos_map:
+ return
+ index = self.pos_map[item]
+ self.arr[index] = [item, self.key(item_value)]
+ # Make sure heap is right in both up and down direction.
+ # Ideally only one of them will make any change.
+ self._heapify_up(index)
+ self._heapify_down(index)
+
+ def delete_item(self, item):
+ """Deletes given item from heap if present"""
+ if item not in self.pos_map:
+ return
+ index = self.pos_map[item]
+ del self.pos_map[item]
+ self.arr[index] = self.arr[self.size - 1]
+ self.pos_map[self.arr[self.size - 1][0]] = index
+ self.size -= 1
+ # Make sure heap is right in both up and down direction. Ideally only one
+ # of them will make any change- so no performance loss in calling both.
+ if self.size > index:
+ self._heapify_up(index)
+ self._heapify_down(index)
+
+ def insert_item(self, item, item_value):
+ """Inserts given item with given value in heap"""
+ arr_len = len(self.arr)
+ if arr_len == self.size:
+ self.arr.append([item, self.key(item_value)])
+ else:
+ self.arr[self.size] = [item, self.key(item_value)]
+ self.pos_map[item] = self.size
+ self.size += 1
+ self._heapify_up(self.size - 1)
+
+ def get_top(self):
+ """Returns top item tuple (Calculated value, item) from heap if present"""
+ return self.arr[0] if self.size else None
+
+ def extract_top(self):
+ """
+ Return top item tuple (Calculated value, item) from heap and removes it as well
+ if present
+ """
+ top_item_tuple = self.get_top()
+ if top_item_tuple:
+ self.delete_item(top_item_tuple[0])
+ return top_item_tuple
+
+
+def test_heap() -> None:
+ """
+ >>> h = Heap() # Max-heap
+ >>> h.insert_item(5, 34)
+ >>> h.insert_item(6, 31)
+ >>> h.insert_item(7, 37)
+ >>> h.get_top()
+ [7, 37]
+ >>> h.extract_top()
+ [7, 37]
+ >>> h.extract_top()
+ [5, 34]
+ >>> h.extract_top()
+ [6, 31]
+ >>> h = Heap(key=lambda x: -x) # Min heap
+ >>> h.insert_item(5, 34)
+ >>> h.insert_item(6, 31)
+ >>> h.insert_item(7, 37)
+ >>> h.get_top()
+ [6, -31]
+ >>> h.extract_top()
+ [6, -31]
+ >>> h.extract_top()
+ [5, -34]
+ >>> h.extract_top()
+ [7, -37]
+ >>> h.insert_item(8, 45)
+ >>> h.insert_item(9, 40)
+ >>> h.insert_item(10, 50)
+ >>> h.get_top()
+ [9, -40]
+ >>> h.update_item(10, 30)
+ >>> h.get_top()
+ [10, -30]
+ >>> h.delete_item(10)
+ >>> h.get_top()
+ [9, -40]
+ """
+ pass
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/data_structures/heap/max_heap.py b/data_structures/heap/max_heap.py
new file mode 100644
index 000000000000..2a08f8fa2cd1
--- /dev/null
+++ b/data_structures/heap/max_heap.py
@@ -0,0 +1,87 @@
+class BinaryHeap:
+ """
+ A max-heap implementation in Python
+ >>> binary_heap = BinaryHeap()
+ >>> binary_heap.insert(6)
+ >>> binary_heap.insert(10)
+ >>> binary_heap.insert(15)
+ >>> binary_heap.insert(12)
+ >>> binary_heap.pop()
+ 15
+ >>> binary_heap.pop()
+ 12
+ >>> binary_heap.get_list
+ [10, 6]
+ >>> len(binary_heap)
+ 2
+ """
+
+ def __init__(self):
+ self.__heap = [0]
+ self.__size = 0
+
+ def __swap_up(self, i: int) -> None:
+ """ Swap the element up """
+ temporary = self.__heap[i]
+ while i // 2 > 0:
+ if self.__heap[i] > self.__heap[i // 2]:
+ self.__heap[i] = self.__heap[i // 2]
+ self.__heap[i // 2] = temporary
+ i //= 2
+
+ def insert(self, value: int) -> None:
+ """ Insert new element """
+ self.__heap.append(value)
+ self.__size += 1
+ self.__swap_up(self.__size)
+
+ def __swap_down(self, i: int) -> None:
+ """ Swap the element down """
+ while self.__size >= 2 * i:
+ if 2 * i + 1 > self.__size:
+ bigger_child = 2 * i
+ else:
+ if self.__heap[2 * i] > self.__heap[2 * i + 1]:
+ bigger_child = 2 * i
+ else:
+ bigger_child = 2 * i + 1
+ temporary = self.__heap[i]
+ if self.__heap[i] < self.__heap[bigger_child]:
+ self.__heap[i] = self.__heap[bigger_child]
+ self.__heap[bigger_child] = temporary
+ i = bigger_child
+
+ def pop(self) -> int:
+ """ Pop the root element """
+ max_value = self.__heap[1]
+ self.__heap[1] = self.__heap[self.__size]
+ self.__size -= 1
+ self.__heap.pop()
+ self.__swap_down(1)
+ return max_value
+
+ @property
+ def get_list(self):
+ return self.__heap[1:]
+
+ def __len__(self):
+ """ Length of the array """
+ return self.__size
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ # create an instance of BinaryHeap
+ binary_heap = BinaryHeap()
+ binary_heap.insert(6)
+ binary_heap.insert(10)
+ binary_heap.insert(15)
+ binary_heap.insert(12)
+ # pop root(max-values because it is max heap)
+ print(binary_heap.pop()) # 15
+ print(binary_heap.pop()) # 12
+ # get the list and size after operations
+ print(binary_heap.get_list)
+ print(len(binary_heap))
diff --git a/data_structures/heap/min_heap.py b/data_structures/heap/min_heap.py
new file mode 100644
index 000000000000..9265c4839536
--- /dev/null
+++ b/data_structures/heap/min_heap.py
@@ -0,0 +1,170 @@
+# Min heap data structure
+# with decrease key functionality - in O(log(n)) time
+
+
+class Node:
+ def __init__(self, name, val):
+ self.name = name
+ self.val = val
+
+ def __str__(self):
+ return f"{self.__class__.__name__}({self.name}, {self.val})"
+
+ def __lt__(self, other):
+ return self.val < other.val
+
+
+class MinHeap:
+ """
+ >>> r = Node("R", -1)
+ >>> b = Node("B", 6)
+ >>> a = Node("A", 3)
+ >>> x = Node("X", 1)
+ >>> e = Node("E", 4)
+ >>> print(b)
+ Node(B, 6)
+ >>> myMinHeap = MinHeap([r, b, a, x, e])
+ >>> myMinHeap.decrease_key(b, -17)
+ >>> print(b)
+ Node(B, -17)
+ >>> print(myMinHeap["B"])
+ -17
+ """
+
+ def __init__(self, array):
+ self.idx_of_element = {}
+ self.heap_dict = {}
+ self.heap = self.build_heap(array)
+
+ def __getitem__(self, key):
+ return self.get_value(key)
+
+ def get_parent_idx(self, idx):
+ return (idx - 1) // 2
+
+ def get_left_child_idx(self, idx):
+ return idx * 2 + 1
+
+ def get_right_child_idx(self, idx):
+ return idx * 2 + 2
+
+ def get_value(self, key):
+ return self.heap_dict[key]
+
+ def build_heap(self, array):
+ lastIdx = len(array) - 1
+ startFrom = self.get_parent_idx(lastIdx)
+
+ for idx, i in enumerate(array):
+ self.idx_of_element[i] = idx
+ self.heap_dict[i.name] = i.val
+
+ for i in range(startFrom, -1, -1):
+ self.sift_down(i, array)
+ return array
+
+ # this is min-heapify method
+ def sift_down(self, idx, array):
+ while True:
+ l = self.get_left_child_idx(idx) # noqa: E741
+ r = self.get_right_child_idx(idx)
+
+ smallest = idx
+ if l < len(array) and array[l] < array[idx]:
+ smallest = l
+ if r < len(array) and array[r] < array[smallest]:
+ smallest = r
+
+ if smallest != idx:
+ array[idx], array[smallest] = array[smallest], array[idx]
+ (
+ self.idx_of_element[array[idx]],
+ self.idx_of_element[array[smallest]],
+ ) = (
+ self.idx_of_element[array[smallest]],
+ self.idx_of_element[array[idx]],
+ )
+ idx = smallest
+ else:
+ break
+
+ def sift_up(self, idx):
+ p = self.get_parent_idx(idx)
+ while p >= 0 and self.heap[p] > self.heap[idx]:
+ self.heap[p], self.heap[idx] = self.heap[idx], self.heap[p]
+ self.idx_of_element[self.heap[p]], self.idx_of_element[self.heap[idx]] = (
+ self.idx_of_element[self.heap[idx]],
+ self.idx_of_element[self.heap[p]],
+ )
+ idx = p
+ p = self.get_parent_idx(idx)
+
+ def peek(self):
+ return self.heap[0]
+
+ def remove(self):
+ self.heap[0], self.heap[-1] = self.heap[-1], self.heap[0]
+ self.idx_of_element[self.heap[0]], self.idx_of_element[self.heap[-1]] = (
+ self.idx_of_element[self.heap[-1]],
+ self.idx_of_element[self.heap[0]],
+ )
+
+ x = self.heap.pop()
+ del self.idx_of_element[x]
+ self.sift_down(0, self.heap)
+ return x
+
+ def insert(self, node):
+ self.heap.append(node)
+ self.idx_of_element[node] = len(self.heap) - 1
+ self.heap_dict[node.name] = node.val
+ self.sift_up(len(self.heap) - 1)
+
+ def is_empty(self):
+ return True if len(self.heap) == 0 else False
+
+ def decrease_key(self, node, newValue):
+ assert (
+ self.heap[self.idx_of_element[node]].val > newValue
+ ), "newValue must be less that current value"
+ node.val = newValue
+ self.heap_dict[node.name] = newValue
+ self.sift_up(self.idx_of_element[node])
+
+
+# USAGE
+
+r = Node("R", -1)
+b = Node("B", 6)
+a = Node("A", 3)
+x = Node("X", 1)
+e = Node("E", 4)
+
+# Use one of these two ways to generate Min-Heap
+
+# Generating Min-Heap from array
+myMinHeap = MinHeap([r, b, a, x, e])
+
+# Generating Min-Heap by Insert method
+# myMinHeap.insert(a)
+# myMinHeap.insert(b)
+# myMinHeap.insert(x)
+# myMinHeap.insert(r)
+# myMinHeap.insert(e)
+
+# Before
+print("Min Heap - before decrease key")
+for i in myMinHeap.heap:
+ print(i)
+
+print("Min Heap - After decrease key of node [B -> -17]")
+myMinHeap.decrease_key(b, -17)
+
+# After
+for i in myMinHeap.heap:
+ print(i)
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/data_structures/heap/randomized_heap.py b/data_structures/heap/randomized_heap.py
new file mode 100644
index 000000000000..0ddc2272efe8
--- /dev/null
+++ b/data_structures/heap/randomized_heap.py
@@ -0,0 +1,188 @@
+#!/usr/bin/env python3
+
+from __future__ import annotations
+
+import random
+from typing import Generic, Iterable, List, Optional, TypeVar
+
+T = TypeVar("T")
+
+
+class RandomizedHeapNode(Generic[T]):
+ """
+ One node of the randomized heap. Contains the value and references to
+ two children.
+ """
+
+ def __init__(self, value: T) -> None:
+ self._value: T = value
+ self.left: Optional[RandomizedHeapNode[T]] = None
+ self.right: Optional[RandomizedHeapNode[T]] = None
+
+ @property
+ def value(self) -> T:
+ """Return the value of the node."""
+ return self._value
+
+ @staticmethod
+ def merge(
+ root1: Optional[RandomizedHeapNode[T]], root2: Optional[RandomizedHeapNode[T]]
+ ) -> Optional[RandomizedHeapNode[T]]:
+ """Merge 2 nodes together."""
+ if not root1:
+ return root2
+
+ if not root2:
+ return root1
+
+ if root1.value > root2.value:
+ root1, root2 = root2, root1
+
+ if random.choice([True, False]):
+ root1.left, root1.right = root1.right, root1.left
+
+ root1.left = RandomizedHeapNode.merge(root1.left, root2)
+
+ return root1
+
+
+class RandomizedHeap(Generic[T]):
+ """
+ A data structure that allows inserting a new value and to pop the smallest
+ values. Both operations take O(logN) time where N is the size of the
+ structure.
+ Wiki: https://en.wikipedia.org/wiki/Randomized_meldable_heap
+
+ >>> RandomizedHeap([2, 3, 1, 5, 1, 7]).to_sorted_list()
+ [1, 1, 2, 3, 5, 7]
+
+ >>> rh = RandomizedHeap()
+ >>> rh.pop()
+ Traceback (most recent call last):
+ ...
+ IndexError: Can't get top element for the empty heap.
+
+ >>> rh.insert(1)
+ >>> rh.insert(-1)
+ >>> rh.insert(0)
+ >>> rh.to_sorted_list()
+ [-1, 0, 1]
+ """
+
+ def __init__(self, data: Optional[Iterable[T]] = ()) -> None:
+ """
+ >>> rh = RandomizedHeap([3, 1, 3, 7])
+ >>> rh.to_sorted_list()
+ [1, 3, 3, 7]
+ """
+ self._root: Optional[RandomizedHeapNode[T]] = None
+ for item in data:
+ self.insert(item)
+
+ def insert(self, value: T) -> None:
+ """
+ Insert the value into the heap.
+
+ >>> rh = RandomizedHeap()
+ >>> rh.insert(3)
+ >>> rh.insert(1)
+ >>> rh.insert(3)
+ >>> rh.insert(7)
+ >>> rh.to_sorted_list()
+ [1, 3, 3, 7]
+ """
+ self._root = RandomizedHeapNode.merge(self._root, RandomizedHeapNode(value))
+
+ def pop(self) -> T:
+ """
+ Pop the smallest value from the heap and return it.
+
+ >>> rh = RandomizedHeap([3, 1, 3, 7])
+ >>> rh.pop()
+ 1
+ >>> rh.pop()
+ 3
+ >>> rh.pop()
+ 3
+ >>> rh.pop()
+ 7
+ >>> rh.pop()
+ Traceback (most recent call last):
+ ...
+ IndexError: Can't get top element for the empty heap.
+ """
+ result = self.top()
+ self._root = RandomizedHeapNode.merge(self._root.left, self._root.right)
+
+ return result
+
+ def top(self) -> T:
+ """
+ Return the smallest value from the heap.
+
+ >>> rh = RandomizedHeap()
+ >>> rh.insert(3)
+ >>> rh.top()
+ 3
+ >>> rh.insert(1)
+ >>> rh.top()
+ 1
+ >>> rh.insert(3)
+ >>> rh.top()
+ 1
+ >>> rh.insert(7)
+ >>> rh.top()
+ 1
+ """
+ if not self._root:
+ raise IndexError("Can't get top element for the empty heap.")
+ return self._root.value
+
+ def clear(self):
+ """
+ Clear the heap.
+
+ >>> rh = RandomizedHeap([3, 1, 3, 7])
+ >>> rh.clear()
+ >>> rh.pop()
+ Traceback (most recent call last):
+ ...
+ IndexError: Can't get top element for the empty heap.
+ """
+ self._root = None
+
+ def to_sorted_list(self) -> List[T]:
+ """
+ Returns sorted list containing all the values in the heap.
+
+ >>> rh = RandomizedHeap([3, 1, 3, 7])
+ >>> rh.to_sorted_list()
+ [1, 3, 3, 7]
+ """
+ result = []
+ while self:
+ result.append(self.pop())
+
+ return result
+
+ def __bool__(self) -> bool:
+ """
+ Check if the heap is not empty.
+
+ >>> rh = RandomizedHeap()
+ >>> bool(rh)
+ False
+ >>> rh.insert(1)
+ >>> bool(rh)
+ True
+ >>> rh.clear()
+ >>> bool(rh)
+ False
+ """
+ return self._root is not None
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/data_structures/heap/skew_heap.py b/data_structures/heap/skew_heap.py
new file mode 100644
index 000000000000..417a383f733e
--- /dev/null
+++ b/data_structures/heap/skew_heap.py
@@ -0,0 +1,192 @@
+#!/usr/bin/env python3
+
+from __future__ import annotations
+
+from typing import Generic, Iterable, Iterator, Optional, TypeVar
+
+T = TypeVar("T")
+
+
+class SkewNode(Generic[T]):
+ """
+ One node of the skew heap. Contains the value and references to
+ two children.
+ """
+
+ def __init__(self, value: T) -> None:
+ self._value: T = value
+ self.left: Optional[SkewNode[T]] = None
+ self.right: Optional[SkewNode[T]] = None
+
+ @property
+ def value(self) -> T:
+ """Return the value of the node."""
+ return self._value
+
+ @staticmethod
+ def merge(
+ root1: Optional[SkewNode[T]], root2: Optional[SkewNode[T]]
+ ) -> Optional[SkewNode[T]]:
+ """Merge 2 nodes together."""
+ if not root1:
+ return root2
+
+ if not root2:
+ return root1
+
+ if root1.value > root2.value:
+ root1, root2 = root2, root1
+
+ result = root1
+ temp = root1.right
+ result.right = root1.left
+ result.left = SkewNode.merge(temp, root2)
+
+ return result
+
+
+class SkewHeap(Generic[T]):
+ """
+ A data structure that allows inserting a new value and to pop the smallest
+ values. Both operations take O(logN) time where N is the size of the
+ structure.
+ Wiki: https://en.wikipedia.org/wiki/Skew_heap
+ Visualisation: https://www.cs.usfca.edu/~galles/visualization/SkewHeap.html
+
+ >>> list(SkewHeap([2, 3, 1, 5, 1, 7]))
+ [1, 1, 2, 3, 5, 7]
+
+ >>> sh = SkewHeap()
+ >>> sh.pop()
+ Traceback (most recent call last):
+ ...
+ IndexError: Can't get top element for the empty heap.
+
+ >>> sh.insert(1)
+ >>> sh.insert(-1)
+ >>> sh.insert(0)
+ >>> list(sh)
+ [-1, 0, 1]
+ """
+
+ def __init__(self, data: Optional[Iterable[T]] = ()) -> None:
+ """
+ >>> sh = SkewHeap([3, 1, 3, 7])
+ >>> list(sh)
+ [1, 3, 3, 7]
+ """
+ self._root: Optional[SkewNode[T]] = None
+ for item in data:
+ self.insert(item)
+
+ def __bool__(self) -> bool:
+ """
+ Check if the heap is not empty.
+
+ >>> sh = SkewHeap()
+ >>> bool(sh)
+ False
+ >>> sh.insert(1)
+ >>> bool(sh)
+ True
+ >>> sh.clear()
+ >>> bool(sh)
+ False
+ """
+ return self._root is not None
+
+ def __iter__(self) -> Iterator[T]:
+ """
+ Returns sorted list containing all the values in the heap.
+
+ >>> sh = SkewHeap([3, 1, 3, 7])
+ >>> list(sh)
+ [1, 3, 3, 7]
+ """
+ result = []
+ while self:
+ result.append(self.pop())
+
+ # Pushing items back to the heap not to clear it.
+ for item in result:
+ self.insert(item)
+
+ return iter(result)
+
+ def insert(self, value: T) -> None:
+ """
+ Insert the value into the heap.
+
+ >>> sh = SkewHeap()
+ >>> sh.insert(3)
+ >>> sh.insert(1)
+ >>> sh.insert(3)
+ >>> sh.insert(7)
+ >>> list(sh)
+ [1, 3, 3, 7]
+ """
+ self._root = SkewNode.merge(self._root, SkewNode(value))
+
+ def pop(self) -> T:
+ """
+ Pop the smallest value from the heap and return it.
+
+ >>> sh = SkewHeap([3, 1, 3, 7])
+ >>> sh.pop()
+ 1
+ >>> sh.pop()
+ 3
+ >>> sh.pop()
+ 3
+ >>> sh.pop()
+ 7
+ >>> sh.pop()
+ Traceback (most recent call last):
+ ...
+ IndexError: Can't get top element for the empty heap.
+ """
+ result = self.top()
+ self._root = SkewNode.merge(self._root.left, self._root.right)
+
+ return result
+
+ def top(self) -> T:
+ """
+ Return the smallest value from the heap.
+
+ >>> sh = SkewHeap()
+ >>> sh.insert(3)
+ >>> sh.top()
+ 3
+ >>> sh.insert(1)
+ >>> sh.top()
+ 1
+ >>> sh.insert(3)
+ >>> sh.top()
+ 1
+ >>> sh.insert(7)
+ >>> sh.top()
+ 1
+ """
+ if not self._root:
+ raise IndexError("Can't get top element for the empty heap.")
+ return self._root.value
+
+ def clear(self):
+ """
+ Clear the heap.
+
+ >>> sh = SkewHeap([3, 1, 3, 7])
+ >>> sh.clear()
+ >>> sh.pop()
+ Traceback (most recent call last):
+ ...
+ IndexError: Can't get top element for the empty heap.
+ """
+ self._root = None
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/data_structures/linked_list/__init__.py b/data_structures/linked_list/__init__.py
index 6d50f23c1f1a..a5f5537b1d96 100644
--- a/data_structures/linked_list/__init__.py
+++ b/data_structures/linked_list/__init__.py
@@ -1,22 +1,80 @@
+"""
+Linked Lists consists of Nodes.
+Nodes contain data and also may link to other nodes:
+ - Head Node: First node, the address of the
+ head node gives us access of the complete list
+ - Last node: points to null
+"""
+
+from typing import Any
+
+
class Node:
- def __init__(self, item, next):
+ def __init__(self, item: Any, next: Any) -> None:
self.item = item
self.next = next
+
class LinkedList:
- def __init__(self):
+ def __init__(self) -> None:
self.head = None
+ self.size = 0
- def add(self, item):
+ def add(self, item: Any) -> None:
self.head = Node(item, self.head)
+ self.size += 1
- def remove(self):
+ def remove(self) -> Any:
if self.is_empty():
return None
else:
item = self.head.item
self.head = self.head.next
+ self.size -= 1
return item
- def is_empty(self):
+ def is_empty(self) -> bool:
return self.head is None
+
+ def __str__(self) -> str:
+ """
+ >>> linked_list = LinkedList()
+ >>> linked_list.add(23)
+ >>> linked_list.add(14)
+ >>> linked_list.add(9)
+ >>> print(linked_list)
+ 9 --> 14 --> 23
+ """
+ if not self.is_empty:
+ return ""
+ else:
+ iterate = self.head
+ item_str = ""
+ item_list = []
+ while iterate:
+ item_list.append(str(iterate.item))
+ iterate = iterate.next
+
+ item_str = " --> ".join(item_list)
+
+ return item_str
+
+ def __len__(self) -> int:
+ """
+ >>> linked_list = LinkedList()
+ >>> len(linked_list)
+ 0
+ >>> linked_list.add("a")
+ >>> len(linked_list)
+ 1
+ >>> linked_list.add("b")
+ >>> len(linked_list)
+ 2
+ >>> _ = linked_list.remove()
+ >>> len(linked_list)
+ 1
+ >>> _ = linked_list.remove()
+ >>> len(linked_list)
+ 0
+ """
+ return self.size
diff --git a/data_structures/linked_list/circular_linked_list.py b/data_structures/linked_list/circular_linked_list.py
new file mode 100644
index 000000000000..f67c1e8f2cf7
--- /dev/null
+++ b/data_structures/linked_list/circular_linked_list.py
@@ -0,0 +1,141 @@
+from typing import Any
+
+
+class Node:
+ def __init__(self, data: Any):
+ self.data = data
+ self.next = None
+
+
+class CircularLinkedList:
+ def __init__(self):
+ self.head = None
+ self.tail = None
+
+ def __iter__(self):
+ node = self.head
+ while self.head:
+ yield node.data
+ node = node.next
+ if node == self.head:
+ break
+
+ def __len__(self) -> int:
+ return len(tuple(iter(self)))
+
+ def __repr__(self):
+ return "->".join(str(item) for item in iter(self))
+
+ def insert_tail(self, data: Any) -> None:
+ self.insert_nth(len(self), data)
+
+ def insert_head(self, data: Any) -> None:
+ self.insert_nth(0, data)
+
+ def insert_nth(self, index: int, data: Any) -> None:
+ if index < 0 or index > len(self):
+ raise IndexError("list index out of range.")
+ new_node = Node(data)
+ if self.head is None:
+ new_node.next = new_node # first node points itself
+ self.tail = self.head = new_node
+ elif index == 0: # insert at head
+ new_node.next = self.head
+ self.head = self.tail.next = new_node
+ else:
+ temp = self.head
+ for _ in range(index - 1):
+ temp = temp.next
+ new_node.next = temp.next
+ temp.next = new_node
+ if index == len(self) - 1: # insert at tail
+ self.tail = new_node
+
+ def delete_front(self):
+ return self.delete_nth(0)
+
+ def delete_tail(self) -> None:
+ return self.delete_nth(len(self) - 1)
+
+ def delete_nth(self, index: int = 0):
+ if not 0 <= index < len(self):
+ raise IndexError("list index out of range.")
+ delete_node = self.head
+ if self.head == self.tail: # just one node
+ self.head = self.tail = None
+ elif index == 0: # delete head node
+ self.tail.next = self.tail.next.next
+ self.head = self.head.next
+ else:
+ temp = self.head
+ for _ in range(index - 1):
+ temp = temp.next
+ delete_node = temp.next
+ temp.next = temp.next.next
+ if index == len(self) - 1: # delete at tail
+ self.tail = temp
+ return delete_node.data
+
+ def is_empty(self):
+ return len(self) == 0
+
+
+def test_circular_linked_list() -> None:
+ """
+ >>> test_circular_linked_list()
+ """
+ circular_linked_list = CircularLinkedList()
+ assert len(circular_linked_list) == 0
+ assert circular_linked_list.is_empty() is True
+ assert str(circular_linked_list) == ""
+
+ try:
+ circular_linked_list.delete_front()
+ assert False # This should not happen
+ except IndexError:
+ assert True # This should happen
+
+ try:
+ circular_linked_list.delete_tail()
+ assert False # This should not happen
+ except IndexError:
+ assert True # This should happen
+
+ try:
+ circular_linked_list.delete_nth(-1)
+ assert False
+ except IndexError:
+ assert True
+
+ try:
+ circular_linked_list.delete_nth(0)
+ assert False
+ except IndexError:
+ assert True
+
+ assert circular_linked_list.is_empty() is True
+ for i in range(5):
+ assert len(circular_linked_list) == i
+ circular_linked_list.insert_nth(i, i + 1)
+ assert str(circular_linked_list) == "->".join(str(i) for i in range(1, 6))
+
+ circular_linked_list.insert_tail(6)
+ assert str(circular_linked_list) == "->".join(str(i) for i in range(1, 7))
+ circular_linked_list.insert_head(0)
+ assert str(circular_linked_list) == "->".join(str(i) for i in range(0, 7))
+
+ assert circular_linked_list.delete_front() == 0
+ assert circular_linked_list.delete_tail() == 6
+ assert str(circular_linked_list) == "->".join(str(i) for i in range(1, 6))
+ assert circular_linked_list.delete_nth(2) == 3
+
+ circular_linked_list.insert_nth(2, 3)
+ assert str(circular_linked_list) == "->".join(str(i) for i in range(1, 6))
+
+ assert circular_linked_list.is_empty() is False
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/data_structures/linked_list/deque_doubly.py b/data_structures/linked_list/deque_doubly.py
new file mode 100644
index 000000000000..894f91d561cc
--- /dev/null
+++ b/data_structures/linked_list/deque_doubly.py
@@ -0,0 +1,143 @@
+"""
+Implementing Deque using DoublyLinkedList ...
+Operations:
+ 1. insertion in the front -> O(1)
+ 2. insertion in the end -> O(1)
+ 3. remove from the front -> O(1)
+ 4. remove from the end -> O(1)
+"""
+
+
+class _DoublyLinkedBase:
+ """ A Private class (to be inherited) """
+
+ class _Node:
+ __slots__ = "_prev", "_data", "_next"
+
+ def __init__(self, link_p, element, link_n):
+ self._prev = link_p
+ self._data = element
+ self._next = link_n
+
+ def has_next_and_prev(self):
+ return " Prev -> {}, Next -> {}".format(
+ self._prev is not None, self._next is not None
+ )
+
+ def __init__(self):
+ self._header = self._Node(None, None, None)
+ self._trailer = self._Node(None, None, None)
+ self._header._next = self._trailer
+ self._trailer._prev = self._header
+ self._size = 0
+
+ def __len__(self):
+ return self._size
+
+ def is_empty(self):
+ return self.__len__() == 0
+
+ def _insert(self, predecessor, e, successor):
+ # Create new_node by setting it's prev.link -> header
+ # setting it's next.link -> trailer
+ new_node = self._Node(predecessor, e, successor)
+ predecessor._next = new_node
+ successor._prev = new_node
+ self._size += 1
+ return self
+
+ def _delete(self, node):
+ predecessor = node._prev
+ successor = node._next
+
+ predecessor._next = successor
+ successor._prev = predecessor
+ self._size -= 1
+ temp = node._data
+ node._prev = node._next = node._data = None
+ del node
+ return temp
+
+
+class LinkedDeque(_DoublyLinkedBase):
+ def first(self):
+ """return first element
+ >>> d = LinkedDeque()
+ >>> d.add_first('A').first()
+ 'A'
+ >>> d.add_first('B').first()
+ 'B'
+ """
+ if self.is_empty():
+ raise Exception("List is empty")
+ return self._header._next._data
+
+ def last(self):
+ """return last element
+ >>> d = LinkedDeque()
+ >>> d.add_last('A').last()
+ 'A'
+ >>> d.add_last('B').last()
+ 'B'
+ """
+ if self.is_empty():
+ raise Exception("List is empty")
+ return self._trailer._prev._data
+
+ # DEque Insert Operations (At the front, At the end)
+
+ def add_first(self, element):
+ """insertion in the front
+ >>> LinkedDeque().add_first('AV').first()
+ 'AV'
+ """
+ return self._insert(self._header, element, self._header._next)
+
+ def add_last(self, element):
+ """insertion in the end
+ >>> LinkedDeque().add_last('B').last()
+ 'B'
+ """
+ return self._insert(self._trailer._prev, element, self._trailer)
+
+ # DEqueu Remove Operations (At the front, At the end)
+
+ def remove_first(self):
+ """removal from the front
+ >>> d = LinkedDeque()
+ >>> d.is_empty()
+ True
+ >>> d.remove_first()
+ Traceback (most recent call last):
+ ...
+ IndexError: remove_first from empty list
+ >>> d.add_first('A') # doctest: +ELLIPSIS
+ >> d.remove_first()
+ 'A'
+ >>> d.is_empty()
+ True
+ """
+ if self.is_empty():
+ raise IndexError("remove_first from empty list")
+ return self._delete(self._header._next)
+
+ def remove_last(self):
+ """removal in the end
+ >>> d = LinkedDeque()
+ >>> d.is_empty()
+ True
+ >>> d.remove_last()
+ Traceback (most recent call last):
+ ...
+ IndexError: remove_first from empty list
+ >>> d.add_first('A') # doctest: +ELLIPSIS
+ >> d.remove_last()
+ 'A'
+ >>> d.is_empty()
+ True
+ """
+ if self.is_empty():
+ raise IndexError("remove_first from empty list")
+ return self._delete(self._trailer._prev)
diff --git a/data_structures/linked_list/doubly_linked_list.py b/data_structures/linked_list/doubly_linked_list.py
index 75b1f889dfc2..0eb3cf101a3e 100644
--- a/data_structures/linked_list/doubly_linked_list.py
+++ b/data_structures/linked_list/doubly_linked_list.py
@@ -1,77 +1,226 @@
-'''
-- A linked list is similar to an array, it holds values. However, links in a linked list do not have indexes.
-- This is an example of a double ended, doubly linked list.
-- Each link references the next link and the previous one.
-- A Doubly Linked List (DLL) contains an extra pointer, typically called previous pointer, together with next pointer and data which are there in singly linked list.
- - Advantages over SLL - IT can be traversed in both forward and backward direction.,Delete operation is more efficent'''
-from __future__ import print_function
+"""
+https://en.wikipedia.org/wiki/Doubly_linked_list
+"""
-class LinkedList: #making main class named linked list
+class Node:
+ def __init__(self, data):
+ self.data = data
+ self.previous = None
+ self.next = None
+
+ def __str__(self):
+ return f"{self.data}"
+
+
+class DoublyLinkedList:
def __init__(self):
self.head = None
self.tail = None
-
- def insertHead(self, x):
- newLink = Link(x) #Create a new link with a value attached to it
- if(self.isEmpty() == True): #Set the first element added to be the tail
- self.tail = newLink
+
+ def __iter__(self):
+ """
+ >>> linked_list = DoublyLinkedList()
+ >>> linked_list.insert_at_head('b')
+ >>> linked_list.insert_at_head('a')
+ >>> linked_list.insert_at_tail('c')
+ >>> tuple(linked_list)
+ ('a', 'b', 'c')
+ """
+ node = self.head
+ while node:
+ yield node.data
+ node = node.next
+
+ def __str__(self):
+ """
+ >>> linked_list = DoublyLinkedList()
+ >>> linked_list.insert_at_tail('a')
+ >>> linked_list.insert_at_tail('b')
+ >>> linked_list.insert_at_tail('c')
+ >>> str(linked_list)
+ 'a->b->c'
+ """
+ return "->".join([str(item) for item in self])
+
+ def __len__(self):
+ """
+ >>> linked_list = DoublyLinkedList()
+ >>> for i in range(0, 5):
+ ... linked_list.insert_at_nth(i, i + 1)
+ >>> len(linked_list) == 5
+ True
+ """
+ return len(tuple(iter(self)))
+
+ def insert_at_head(self, data):
+ self.insert_at_nth(0, data)
+
+ def insert_at_tail(self, data):
+ self.insert_at_nth(len(self), data)
+
+ def insert_at_nth(self, index: int, data):
+ """
+ >>> linked_list = DoublyLinkedList()
+ >>> linked_list.insert_at_nth(-1, 666)
+ Traceback (most recent call last):
+ ....
+ IndexError: list index out of range
+ >>> linked_list.insert_at_nth(1, 666)
+ Traceback (most recent call last):
+ ....
+ IndexError: list index out of range
+ >>> linked_list.insert_at_nth(0, 2)
+ >>> linked_list.insert_at_nth(0, 1)
+ >>> linked_list.insert_at_nth(2, 4)
+ >>> linked_list.insert_at_nth(2, 3)
+ >>> str(linked_list)
+ '1->2->3->4'
+ >>> linked_list.insert_at_nth(5, 5)
+ Traceback (most recent call last):
+ ....
+ IndexError: list index out of range
+ """
+ if not 0 <= index <= len(self):
+ raise IndexError("list index out of range")
+ new_node = Node(data)
+ if self.head is None:
+ self.head = self.tail = new_node
+ elif index == 0:
+ self.head.previous = new_node
+ new_node.next = self.head
+ self.head = new_node
+ elif index == len(self):
+ self.tail.next = new_node
+ new_node.previous = self.tail
+ self.tail = new_node
else:
- self.head.previous = newLink # newLink <-- currenthead(head)
- newLink.next = self.head # newLink <--> currenthead(head)
- self.head = newLink # newLink(head) <--> oldhead
-
- def deleteHead(self):
- temp = self.head
- self.head = self.head.next # oldHead <--> 2ndElement(head)
- self.head.previous = None # oldHead --> 2ndElement(head) nothing pointing at it so the old head will be removed
- if(self.head is None):
- self.tail = None #if empty linked list
- return temp
-
- def insertTail(self, x):
- newLink = Link(x)
- newLink.next = None # currentTail(tail) newLink -->
- self.tail.next = newLink # currentTail(tail) --> newLink -->
- newLink.previous = self.tail #currentTail(tail) <--> newLink -->
- self.tail = newLink # oldTail <--> newLink(tail) -->
-
- def deleteTail(self):
- temp = self.tail
- self.tail = self.tail.previous # 2ndLast(tail) <--> oldTail --> None
- self.tail.next = None # 2ndlast(tail) --> None
- return temp
-
- def delete(self, x):
- current = self.head
-
- while(current.value != x): # Find the position to delete
- current = current.next
-
- if(current == self.head):
- self.deleteHead()
-
- elif(current == self.tail):
- self.deleteTail()
-
- else: #Before: 1 <--> 2(current) <--> 3
- current.previous.next = current.next # 1 --> 3
- current.next.previous = current.previous # 1 <--> 3
-
- def isEmpty(self): #Will return True if the list is empty
- return(self.head is None)
-
- def display(self): #Prints contents of the list
+ temp = self.head
+ for i in range(0, index):
+ temp = temp.next
+ temp.previous.next = new_node
+ new_node.previous = temp.previous
+ new_node.next = temp
+ temp.previous = new_node
+
+ def delete_head(self):
+ return self.delete_at_nth(0)
+
+ def delete_tail(self):
+ return self.delete_at_nth(len(self) - 1)
+
+ def delete_at_nth(self, index: int):
+ """
+ >>> linked_list = DoublyLinkedList()
+ >>> linked_list.delete_at_nth(0)
+ Traceback (most recent call last):
+ ....
+ IndexError: list index out of range
+ >>> for i in range(0, 5):
+ ... linked_list.insert_at_nth(i, i + 1)
+ >>> linked_list.delete_at_nth(0) == 1
+ True
+ >>> linked_list.delete_at_nth(3) == 5
+ True
+ >>> linked_list.delete_at_nth(1) == 3
+ True
+ >>> str(linked_list)
+ '2->4'
+ >>> linked_list.delete_at_nth(2)
+ Traceback (most recent call last):
+ ....
+ IndexError: list index out of range
+ """
+ if not 0 <= index <= len(self) - 1:
+ raise IndexError("list index out of range")
+ delete_node = self.head # default first node
+ if len(self) == 1:
+ self.head = self.tail = None
+ elif index == 0:
+ self.head = self.head.next
+ self.head.previous = None
+ elif index == len(self) - 1:
+ delete_node = self.tail
+ self.tail = self.tail.previous
+ self.tail.next = None
+ else:
+ temp = self.head
+ for i in range(0, index):
+ temp = temp.next
+ delete_node = temp
+ temp.next.previous = temp.previous
+ temp.previous.next = temp.next
+ return delete_node.data
+
+ def delete(self, data) -> str:
current = self.head
- while(current != None):
- current.displayLink()
- current = current.next
- print()
-
-class Link:
- next = None #This points to the link in front of the new link
- previous = None #This points to the link behind the new link
- def __init__(self, x):
- self.value = x
- def displayLink(self):
- print("{}".format(self.value), end=" ")
+
+ while current.data != data: # Find the position to delete
+ if current.next:
+ current = current.next
+ else: # We have reached the end an no value matches
+ return "No data matching given value"
+
+ if current == self.head:
+ self.delete_head()
+
+ elif current == self.tail:
+ self.delete_tail()
+
+ else: # Before: 1 <--> 2(current) <--> 3
+ current.previous.next = current.next # 1 --> 3
+ current.next.previous = current.previous # 1 <--> 3
+ return data
+
+ def is_empty(self):
+ """
+ >>> linked_list = DoublyLinkedList()
+ >>> linked_list.is_empty()
+ True
+ >>> linked_list.insert_at_tail(1)
+ >>> linked_list.is_empty()
+ False
+ """
+ return len(self) == 0
+
+
+def test_doubly_linked_list() -> None:
+ """
+ >>> test_doubly_linked_list()
+ """
+ linked_list = DoublyLinkedList()
+ assert linked_list.is_empty() is True
+ assert str(linked_list) == ""
+
+ try:
+ linked_list.delete_head()
+ assert False # This should not happen.
+ except IndexError:
+ assert True # This should happen.
+
+ try:
+ linked_list.delete_tail()
+ assert False # This should not happen.
+ except IndexError:
+ assert True # This should happen.
+
+ for i in range(10):
+ assert len(linked_list) == i
+ linked_list.insert_at_nth(i, i + 1)
+ assert str(linked_list) == "->".join(str(i) for i in range(1, 11))
+
+ linked_list.insert_at_head(0)
+ linked_list.insert_at_tail(11)
+ assert str(linked_list) == "->".join(str(i) for i in range(0, 12))
+
+ assert linked_list.delete_head() == 0
+ assert linked_list.delete_at_nth(9) == 10
+ assert linked_list.delete_tail() == 11
+ assert len(linked_list) == 9
+ assert str(linked_list) == "->".join(str(i) for i in range(1, 10))
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
diff --git a/data_structures/linked_list/doubly_linked_list_two.py b/data_structures/linked_list/doubly_linked_list_two.py
new file mode 100644
index 000000000000..184b6966b5a9
--- /dev/null
+++ b/data_structures/linked_list/doubly_linked_list_two.py
@@ -0,0 +1,253 @@
+"""
+- A linked list is similar to an array, it holds values. However, links in a linked
+ list do not have indexes.
+- This is an example of a double ended, doubly linked list.
+- Each link references the next link and the previous one.
+- A Doubly Linked List (DLL) contains an extra pointer, typically called previous
+ pointer, together with next pointer and data which are there in singly linked list.
+ - Advantages over SLL - It can be traversed in both forward and backward direction.
+ Delete operation is more efficient
+"""
+
+
+class Node:
+ def __init__(self, data: int, previous=None, next_node=None):
+ self.data = data
+ self.previous = previous
+ self.next = next_node
+
+ def __str__(self) -> str:
+ return f"{self.data}"
+
+ def get_data(self) -> int:
+ return self.data
+
+ def get_next(self):
+ return self.next
+
+ def get_previous(self):
+ return self.previous
+
+
+class LinkedListIterator:
+ def __init__(self, head):
+ self.current = head
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if not self.current:
+ raise StopIteration
+ else:
+ value = self.current.get_data()
+ self.current = self.current.get_next()
+ return value
+
+
+class LinkedList:
+ def __init__(self):
+ self.head = None # First node in list
+ self.tail = None # Last node in list
+
+ def __str__(self):
+ current = self.head
+ nodes = []
+ while current is not None:
+ nodes.append(current.get_data())
+ current = current.get_next()
+ return " ".join(str(node) for node in nodes)
+
+ def __contains__(self, value: int):
+ current = self.head
+ while current:
+ if current.get_data() == value:
+ return True
+ current = current.get_next()
+ return False
+
+ def __iter__(self):
+ return LinkedListIterator(self.head)
+
+ def get_head_data(self):
+ if self.head:
+ return self.head.get_data()
+ return None
+
+ def get_tail_data(self):
+ if self.tail:
+ return self.tail.get_data()
+ return None
+
+ def set_head(self, node: Node) -> None:
+
+ if self.head is None:
+ self.head = node
+ self.tail = node
+ else:
+ self.insert_before_node(self.head, node)
+
+ def set_tail(self, node: Node) -> None:
+ if self.head is None:
+ self.set_head(node)
+ else:
+ self.insert_after_node(self.tail, node)
+
+ def insert(self, value: int) -> None:
+ node = Node(value)
+ if self.head is None:
+ self.set_head(node)
+ else:
+ self.set_tail(node)
+
+ def insert_before_node(self, node: Node, node_to_insert: Node) -> None:
+ node_to_insert.next = node
+ node_to_insert.previous = node.previous
+
+ if node.get_previous() is None:
+ self.head = node_to_insert
+ else:
+ node.previous.next = node_to_insert
+
+ node.previous = node_to_insert
+
+ def insert_after_node(self, node: Node, node_to_insert: Node) -> None:
+ node_to_insert.previous = node
+ node_to_insert.next = node.next
+
+ if node.get_next() is None:
+ self.tail = node_to_insert
+ else:
+ node.next.previous = node_to_insert
+
+ node.next = node_to_insert
+
+ def insert_at_position(self, position: int, value: int) -> None:
+ current_position = 1
+ new_node = Node(value)
+ node = self.head
+ while node:
+ if current_position == position:
+ self.insert_before_node(node, new_node)
+ return None
+ current_position += 1
+ node = node.next
+ self.insert_after_node(self.tail, new_node)
+
+ def get_node(self, item: int) -> Node:
+ node = self.head
+ while node:
+ if node.get_data() == item:
+ return node
+ node = node.get_next()
+ raise Exception("Node not found")
+
+ def delete_value(self, value):
+ node = self.get_node(value)
+
+ if node is not None:
+ if node == self.head:
+ self.head = self.head.get_next()
+
+ if node == self.tail:
+ self.tail = self.tail.get_previous()
+
+ self.remove_node_pointers(node)
+
+ @staticmethod
+ def remove_node_pointers(node: Node) -> None:
+ if node.get_next():
+ node.next.previous = node.previous
+
+ if node.get_previous():
+ node.previous.next = node.next
+
+ node.next = None
+ node.previous = None
+
+ def is_empty(self):
+ return self.head is None
+
+
+def create_linked_list() -> None:
+ """
+ >>> new_linked_list = LinkedList()
+ >>> new_linked_list.get_head_data() is None
+ True
+ >>> new_linked_list.get_tail_data() is None
+ True
+ >>> new_linked_list.is_empty()
+ True
+ >>> new_linked_list.insert(10)
+ >>> new_linked_list.get_head_data()
+ 10
+ >>> new_linked_list.get_tail_data()
+ 10
+ >>> new_linked_list.insert_at_position(position=3, value=20)
+ >>> new_linked_list.get_head_data()
+ 10
+ >>> new_linked_list.get_tail_data()
+ 20
+ >>> new_linked_list.set_head(Node(1000))
+ >>> new_linked_list.get_head_data()
+ 1000
+ >>> new_linked_list.get_tail_data()
+ 20
+ >>> new_linked_list.set_tail(Node(2000))
+ >>> new_linked_list.get_head_data()
+ 1000
+ >>> new_linked_list.get_tail_data()
+ 2000
+ >>> for value in new_linked_list:
+ ... print(value)
+ 1000
+ 10
+ 20
+ 2000
+ >>> new_linked_list.is_empty()
+ False
+ >>> for value in new_linked_list:
+ ... print(value)
+ 1000
+ 10
+ 20
+ 2000
+ >>> 10 in new_linked_list
+ True
+ >>> new_linked_list.delete_value(value=10)
+ >>> 10 in new_linked_list
+ False
+ >>> new_linked_list.delete_value(value=2000)
+ >>> new_linked_list.get_tail_data()
+ 20
+ >>> new_linked_list.delete_value(value=1000)
+ >>> new_linked_list.get_tail_data()
+ 20
+ >>> new_linked_list.get_head_data()
+ 20
+ >>> for value in new_linked_list:
+ ... print(value)
+ 20
+ >>> new_linked_list.delete_value(value=20)
+ >>> for value in new_linked_list:
+ ... print(value)
+ >>> for value in range(1,10):
+ ... new_linked_list.insert(value=value)
+ >>> for value in new_linked_list:
+ ... print(value)
+ 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+ """
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/data_structures/linked_list/from_sequence.py b/data_structures/linked_list/from_sequence.py
new file mode 100644
index 000000000000..94b44f15037f
--- /dev/null
+++ b/data_structures/linked_list/from_sequence.py
@@ -0,0 +1,44 @@
+# Recursive Prorgam to create a Linked List from a sequence and
+# print a string representation of it.
+
+
+class Node:
+ def __init__(self, data=None):
+ self.data = data
+ self.next = None
+
+ def __repr__(self):
+ """Returns a visual representation of the node and all its following nodes."""
+ string_rep = ""
+ temp = self
+ while temp:
+ string_rep += f"<{temp.data}> ---> "
+ temp = temp.next
+ string_rep += ""
+ return string_rep
+
+
+def make_linked_list(elements_list):
+ """Creates a Linked List from the elements of the given sequence
+ (list/tuple) and returns the head of the Linked List."""
+
+ # if elements_list is empty
+ if not elements_list:
+ raise Exception("The Elements List is empty")
+
+ # Set first element as Head
+ head = Node(elements_list[0])
+ current = head
+ # Loop through elements from position 1
+ for data in elements_list[1:]:
+ current.next = Node(data)
+ current = current.next
+ return head
+
+
+list_data = [1, 3, 5, 32, 44, 12, 43]
+print(f"List: {list_data}")
+print("Creating Linked List from List.")
+linked_list = make_linked_list(list_data)
+print("Linked List:")
+print(linked_list)
diff --git a/data_structures/linked_list/has_loop.py b/data_structures/linked_list/has_loop.py
new file mode 100644
index 000000000000..405ece7e27c8
--- /dev/null
+++ b/data_structures/linked_list/has_loop.py
@@ -0,0 +1,60 @@
+from typing import Any
+
+
+class ContainsLoopError(Exception):
+ pass
+
+
+class Node:
+ def __init__(self, data: Any) -> None:
+ self.data = data
+ self.next_node = None
+
+ def __iter__(self):
+ node = self
+ visited = []
+ while node:
+ if node in visited:
+ raise ContainsLoopError
+ visited.append(node)
+ yield node.data
+ node = node.next_node
+
+ @property
+ def has_loop(self) -> bool:
+ """
+ A loop is when the exact same Node appears more than once in a linked list.
+ >>> root_node = Node(1)
+ >>> root_node.next_node = Node(2)
+ >>> root_node.next_node.next_node = Node(3)
+ >>> root_node.next_node.next_node.next_node = Node(4)
+ >>> root_node.has_loop
+ False
+ >>> root_node.next_node.next_node.next_node = root_node.next_node
+ >>> root_node.has_loop
+ True
+ """
+ try:
+ list(self)
+ return False
+ except ContainsLoopError:
+ return True
+
+
+if __name__ == "__main__":
+ root_node = Node(1)
+ root_node.next_node = Node(2)
+ root_node.next_node.next_node = Node(3)
+ root_node.next_node.next_node.next_node = Node(4)
+ print(root_node.has_loop) # False
+ root_node.next_node.next_node.next_node = root_node.next_node
+ print(root_node.has_loop) # True
+
+ root_node = Node(5)
+ root_node.next_node = Node(6)
+ root_node.next_node.next_node = Node(5)
+ root_node.next_node.next_node.next_node = Node(6)
+ print(root_node.has_loop) # False
+
+ root_node = Node(1)
+ print(root_node.has_loop) # False
diff --git a/data_structures/linked_list/is_Palindrome.py b/data_structures/linked_list/is_palindrome.py
similarity index 100%
rename from data_structures/linked_list/is_Palindrome.py
rename to data_structures/linked_list/is_palindrome.py
diff --git a/data_structures/linked_list/merge_two_lists.py b/data_structures/linked_list/merge_two_lists.py
new file mode 100644
index 000000000000..96ec6b8abc85
--- /dev/null
+++ b/data_structures/linked_list/merge_two_lists.py
@@ -0,0 +1,83 @@
+"""
+Algorithm that merges two sorted linked lists into one sorted linked list.
+"""
+from __future__ import annotations
+
+from collections.abc import Iterable, Iterator
+from dataclasses import dataclass
+from typing import Optional
+
+test_data_odd = (3, 9, -11, 0, 7, 5, 1, -1)
+test_data_even = (4, 6, 2, 0, 8, 10, 3, -2)
+
+
+@dataclass
+class Node:
+ data: int
+ next: Optional[Node]
+
+
+class SortedLinkedList:
+ def __init__(self, ints: Iterable[int]) -> None:
+ self.head: Optional[Node] = None
+ for i in reversed(sorted(ints)):
+ self.head = Node(i, self.head)
+
+ def __iter__(self) -> Iterator[int]:
+ """
+ >>> tuple(SortedLinkedList(test_data_odd)) == tuple(sorted(test_data_odd))
+ True
+ >>> tuple(SortedLinkedList(test_data_even)) == tuple(sorted(test_data_even))
+ True
+ """
+ node = self.head
+ while node:
+ yield node.data
+ node = node.next
+
+ def __len__(self) -> int:
+ """
+ >>> for i in range(3):
+ ... len(SortedLinkedList(range(i))) == i
+ True
+ True
+ True
+ >>> len(SortedLinkedList(test_data_odd))
+ 8
+ """
+ return len(tuple(iter(self)))
+
+ def __str__(self) -> str:
+ """
+ >>> str(SortedLinkedList([]))
+ ''
+ >>> str(SortedLinkedList(test_data_odd))
+ '-11 -> -1 -> 0 -> 1 -> 3 -> 5 -> 7 -> 9'
+ >>> str(SortedLinkedList(test_data_even))
+ '-2 -> 0 -> 2 -> 3 -> 4 -> 6 -> 8 -> 10'
+ """
+ return " -> ".join([str(node) for node in self])
+
+
+def merge_lists(
+ sll_one: SortedLinkedList, sll_two: SortedLinkedList
+) -> SortedLinkedList:
+ """
+ >>> SSL = SortedLinkedList
+ >>> merged = merge_lists(SSL(test_data_odd), SSL(test_data_even))
+ >>> len(merged)
+ 16
+ >>> str(merged)
+ '-11 -> -2 -> -1 -> 0 -> 0 -> 1 -> 2 -> 3 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9 -> 10'
+ >>> list(merged) == list(sorted(test_data_odd + test_data_even))
+ True
+ """
+ return SortedLinkedList(list(sll_one) + list(sll_two))
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ SSL = SortedLinkedList
+ print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
diff --git a/data_structures/linked_list/middle_element_of_linked_list.py b/data_structures/linked_list/middle_element_of_linked_list.py
new file mode 100644
index 000000000000..185c4ccbbb0a
--- /dev/null
+++ b/data_structures/linked_list/middle_element_of_linked_list.py
@@ -0,0 +1,64 @@
+class Node:
+ def __init__(self, data: int) -> int:
+ self.data = data
+ self.next = None
+
+
+class LinkedList:
+ def __init__(self):
+ self.head = None
+
+ def push(self, new_data: int) -> int:
+ new_node = Node(new_data)
+ new_node.next = self.head
+ self.head = new_node
+ return self.head.data
+
+ def middle_element(self) -> int:
+ """
+ >>> link = LinkedList()
+ >>> link.middle_element()
+ No element found.
+ >>> link.push(5)
+ 5
+ >>> link.push(6)
+ 6
+ >>> link.push(8)
+ 8
+ >>> link.push(8)
+ 8
+ >>> link.push(10)
+ 10
+ >>> link.push(12)
+ 12
+ >>> link.push(17)
+ 17
+ >>> link.push(7)
+ 7
+ >>> link.push(3)
+ 3
+ >>> link.push(20)
+ 20
+ >>> link.push(-20)
+ -20
+ >>> link.middle_element()
+ 12
+ >>>
+ """
+ slow_pointer = self.head
+ fast_pointer = self.head
+ if self.head:
+ while fast_pointer and fast_pointer.next:
+ fast_pointer = fast_pointer.next.next
+ slow_pointer = slow_pointer.next
+ return slow_pointer.data
+ else:
+ print("No element found.")
+
+
+if __name__ == "__main__":
+ link = LinkedList()
+ for i in range(int(input().strip())):
+ data = int(input().strip())
+ link.push(data)
+ print(link.middle_element())
diff --git a/data_structures/linked_list/print_reverse.py b/data_structures/linked_list/print_reverse.py
new file mode 100644
index 000000000000..c46f228e7260
--- /dev/null
+++ b/data_structures/linked_list/print_reverse.py
@@ -0,0 +1,70 @@
+from typing import List
+
+
+class Node:
+ def __init__(self, data=None):
+ self.data = data
+ self.next = None
+
+ def __repr__(self):
+ """Returns a visual representation of the node and all its following nodes."""
+ string_rep = []
+ temp = self
+ while temp:
+ string_rep.append(f"{temp.data}")
+ temp = temp.next
+ return "->".join(string_rep)
+
+
+def make_linked_list(elements_list: List):
+ """Creates a Linked List from the elements of the given sequence
+ (list/tuple) and returns the head of the Linked List.
+ >>> make_linked_list([])
+ Traceback (most recent call last):
+ ...
+ Exception: The Elements List is empty
+ >>> make_linked_list([7])
+ 7
+ >>> make_linked_list(['abc'])
+ abc
+ >>> make_linked_list([7, 25])
+ 7->25
+ """
+ if not elements_list:
+ raise Exception("The Elements List is empty")
+
+ current = head = Node(elements_list[0])
+ for i in range(1, len(elements_list)):
+ current.next = Node(elements_list[i])
+ current = current.next
+ return head
+
+
+def print_reverse(head_node: Node) -> None:
+ """Prints the elements of the given Linked List in reverse order
+ >>> print_reverse([])
+ >>> linked_list = make_linked_list([69, 88, 73])
+ >>> print_reverse(linked_list)
+ 73
+ 88
+ 69
+ """
+ if head_node is not None and isinstance(head_node, Node):
+ print_reverse(head_node.next)
+ print(head_node.data)
+
+
+def main():
+ from doctest import testmod
+
+ testmod()
+
+ linked_list = make_linked_list([14, 52, 14, 12, 43])
+ print("Linked List:")
+ print(linked_list)
+ print("Elements in Reverse:")
+ print_reverse(linked_list)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/data_structures/linked_list/singly_linked_list.py b/data_structures/linked_list/singly_linked_list.py
index 5ae97523b9a1..e45a210a1785 100644
--- a/data_structures/linked_list/singly_linked_list.py
+++ b/data_structures/linked_list/singly_linked_list.py
@@ -1,60 +1,151 @@
-from __future__ import print_function
-
-
-class Node: # create a Node
+class Node:
def __init__(self, data):
- self.data = data # given data
- self.next = None # given next to None
+ self.data = data
+ self.next = None
+ def __repr__(self):
+ return f"Node({self.data})"
-class Linked_List:
+
+class LinkedList:
def __init__(self):
- self.Head = None # Initialize Head to None
-
- def insert_tail(self, data):
- if(self.Head is None): self.insert_head(data) #If this is first node, call insert_head
+ self.head = None
+
+ def __iter__(self):
+ node = self.head
+ while node:
+ yield node.data
+ node = node.next
+
+ def __len__(self) -> int:
+ """
+ Return length of linked list i.e. number of nodes
+ >>> linked_list = LinkedList()
+ >>> len(linked_list)
+ 0
+ >>> linked_list.insert_tail("head")
+ >>> len(linked_list)
+ 1
+ >>> linked_list.insert_head("head")
+ >>> len(linked_list)
+ 2
+ >>> _ = linked_list.delete_tail()
+ >>> len(linked_list)
+ 1
+ >>> _ = linked_list.delete_head()
+ >>> len(linked_list)
+ 0
+ """
+ return len(tuple(iter(self)))
+
+ def __repr__(self):
+ """
+ String representation/visualization of a Linked Lists
+ """
+ return "->".join([str(item) for item in self])
+
+ def __getitem__(self, index):
+ """
+ Indexing Support. Used to get a node at particular position
+ >>> linked_list = LinkedList()
+ >>> for i in range(0, 10):
+ ... linked_list.insert_nth(i, i)
+ >>> all(str(linked_list[i]) == str(i) for i in range(0, 10))
+ True
+ >>> linked_list[-10]
+ Traceback (most recent call last):
+ ...
+ ValueError: list index out of range.
+ >>> linked_list[len(linked_list)]
+ Traceback (most recent call last):
+ ...
+ ValueError: list index out of range.
+ """
+ if not 0 <= index < len(self):
+ raise ValueError("list index out of range.")
+ for i, node in enumerate(self):
+ if i == index:
+ return node
+
+ # Used to change the data of a particular node
+ def __setitem__(self, index, data):
+ """
+ >>> linked_list = LinkedList()
+ >>> for i in range(0, 10):
+ ... linked_list.insert_nth(i, i)
+ >>> linked_list[0] = 666
+ >>> linked_list[0]
+ 666
+ >>> linked_list[5] = -666
+ >>> linked_list[5]
+ -666
+ >>> linked_list[-10] = 666
+ Traceback (most recent call last):
+ ...
+ ValueError: list index out of range.
+ >>> linked_list[len(linked_list)] = 666
+ Traceback (most recent call last):
+ ...
+ ValueError: list index out of range.
+ """
+ if not 0 <= index < len(self):
+ raise ValueError("list index out of range.")
+ current = self.head
+ for i in range(index):
+ current = current.next
+ current.data = data
+
+ def insert_tail(self, data) -> None:
+ self.insert_nth(len(self), data)
+
+ def insert_head(self, data) -> None:
+ self.insert_nth(0, data)
+
+ def insert_nth(self, index: int, data) -> None:
+ if not 0 <= index <= len(self):
+ raise IndexError("list index out of range")
+ new_node = Node(data)
+ if self.head is None:
+ self.head = new_node
+ elif index == 0:
+ new_node.next = self.head # link new_node to head
+ self.head = new_node
else:
- temp = self.Head
- while(temp.next != None): #traverse to last node
+ temp = self.head
+ for _ in range(index - 1):
temp = temp.next
- temp.next = Node(data) #create node & link to tail
-
- def insert_head(self, data):
- newNod = Node(data) # create a new node
- if self.Head != None:
- newNod.next = self.Head # link newNode to head
- self.Head = newNod # make NewNode as Head
-
- def printList(self): # print every node data
- tamp = self.Head
- while tamp is not None:
- print(tamp.data)
- tamp = tamp.next
-
- def delete_head(self): # delete from head
- temp = self.Head
- if self.Head != None:
- self.Head = self.Head.next
- temp.next = None
- return temp
-
+ new_node.next = temp.next
+ temp.next = new_node
+
+ def print_list(self) -> None: # print every node data
+ print(self)
+
+ def delete_head(self):
+ return self.delete_nth(0)
+
def delete_tail(self): # delete from tail
- tamp = self.Head
- if self.Head != None:
- if(self.Head.next is None): # if Head is the only Node in the Linked List
- self.Head = None
- else:
- while tamp.next.next is not None: # find the 2nd last element
- tamp = tamp.next
- tamp.next, tamp = None, tamp.next #(2nd last element).next = None and tamp = last element
- return tamp
-
- def isEmpty(self):
- return self.Head is None # Return if Head is none
+ return self.delete_nth(len(self) - 1)
+
+ def delete_nth(self, index: int = 0):
+ if not 0 <= index <= len(self) - 1: # test if index is valid
+ raise IndexError("list index out of range")
+ delete_node = self.head # default first node
+ if index == 0:
+ self.head = self.head.next
+ else:
+ temp = self.head
+ for _ in range(index - 1):
+ temp = temp.next
+ delete_node = temp.next
+ temp.next = temp.next.next
+ return delete_node.data
+
+ def is_empty(self) -> bool:
+ return self.head is None
def reverse(self):
prev = None
- current = self.Head
+ current = self.head
while current:
# Store the current node's next node.
@@ -66,36 +157,84 @@ def reverse(self):
# Make the current node the next node (to progress iteration)
current = next_node
# Return prev in order to put the head at the end
- self.Head = prev
+ self.head = prev
+
+
+def test_singly_linked_list() -> None:
+ """
+ >>> test_singly_linked_list()
+ """
+ linked_list = LinkedList()
+ assert linked_list.is_empty() is True
+ assert str(linked_list) == ""
+
+ try:
+ linked_list.delete_head()
+ assert False # This should not happen.
+ except IndexError:
+ assert True # This should happen.
+
+ try:
+ linked_list.delete_tail()
+ assert False # This should not happen.
+ except IndexError:
+ assert True # This should happen.
+
+ for i in range(10):
+ assert len(linked_list) == i
+ linked_list.insert_nth(i, i + 1)
+ assert str(linked_list) == "->".join(str(i) for i in range(1, 11))
+
+ linked_list.insert_head(0)
+ linked_list.insert_tail(11)
+ assert str(linked_list) == "->".join(str(i) for i in range(0, 12))
+
+ assert linked_list.delete_head() == 0
+ assert linked_list.delete_nth(9) == 10
+ assert linked_list.delete_tail() == 11
+ assert len(linked_list) == 9
+ assert str(linked_list) == "->".join(str(i) for i in range(1, 10))
+
+ assert all(linked_list[i] == i + 1 for i in range(0, 9)) is True
+
+ for i in range(0, 9):
+ linked_list[i] = -i
+ assert all(linked_list[i] == -i for i in range(0, 9)) is True
+
def main():
- A = Linked_List()
- print("Inserting 1st at Head")
- a1=input()
- A.insert_head(a1)
- print("Inserting 2nd at Head")
- a2=input()
- A.insert_head(a2)
- print("\nPrint List : ")
- A.printList()
- print("\nInserting 1st at Tail")
- a3=input()
- A.insert_tail(a3)
- print("Inserting 2nd at Tail")
- a4=input()
- A.insert_tail(a4)
- print("\nPrint List : ")
- A.printList()
- print("\nDelete Head")
- A.delete_head()
- print("Delete Tail")
- A.delete_tail()
- print("\nPrint List : ")
- A.printList()
- print("\nReverse Linked List")
- A.reverse()
- print("\nPrint List : ")
- A.printList()
-
-if __name__ == '__main__':
- main()
+ from doctest import testmod
+
+ testmod()
+
+ linked_list = LinkedList()
+ linked_list.insert_head(input("Inserting 1st at head ").strip())
+ linked_list.insert_head(input("Inserting 2nd at head ").strip())
+ print("\nPrint list:")
+ linked_list.print_list()
+ linked_list.insert_tail(input("\nInserting 1st at tail ").strip())
+ linked_list.insert_tail(input("Inserting 2nd at tail ").strip())
+ print("\nPrint list:")
+ linked_list.print_list()
+ print("\nDelete head")
+ linked_list.delete_head()
+ print("Delete tail")
+ linked_list.delete_tail()
+ print("\nPrint list:")
+ linked_list.print_list()
+ print("\nReverse linked list")
+ linked_list.reverse()
+ print("\nPrint list:")
+ linked_list.print_list()
+ print("\nString representation of linked list:")
+ print(linked_list)
+ print("\nReading/changing Node data using indexing:")
+ print(f"Element at Position 1: {linked_list[1]}")
+ linked_list[1] = input("Enter New Value: ").strip()
+ print("New list:")
+ print(linked_list)
+ print(f"length of linked_list is : {len(linked_list)}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/data_structures/linked_list/skip_list.py b/data_structures/linked_list/skip_list.py
new file mode 100644
index 000000000000..8f06e6193d52
--- /dev/null
+++ b/data_structures/linked_list/skip_list.py
@@ -0,0 +1,447 @@
+"""
+Based on "Skip Lists: A Probabilistic Alternative to Balanced Trees" by William Pugh
+https://epaperpress.com/sortsearch/download/skiplist.pdf
+"""
+
+from __future__ import annotations
+
+from random import random
+from typing import Generic, Optional, TypeVar
+
+KT = TypeVar("KT")
+VT = TypeVar("VT")
+
+
+class Node(Generic[KT, VT]):
+ def __init__(self, key: KT, value: VT):
+ self.key = key
+ self.value = value
+ self.forward: list[Node[KT, VT]] = []
+
+ def __repr__(self) -> str:
+ """
+ :return: Visual representation of Node
+
+ >>> node = Node("Key", 2)
+ >>> repr(node)
+ 'Node(Key: 2)'
+ """
+
+ return f"Node({self.key}: {self.value})"
+
+ @property
+ def level(self) -> int:
+ """
+ :return: Number of forward references
+
+ >>> node = Node("Key", 2)
+ >>> node.level
+ 0
+ >>> node.forward.append(Node("Key2", 4))
+ >>> node.level
+ 1
+ >>> node.forward.append(Node("Key3", 6))
+ >>> node.level
+ 2
+ """
+
+ return len(self.forward)
+
+
+class SkipList(Generic[KT, VT]):
+ def __init__(self, p: float = 0.5, max_level: int = 16):
+ self.head = Node("root", None)
+ self.level = 0
+ self.p = p
+ self.max_level = max_level
+
+ def __str__(self) -> str:
+ """
+ :return: Visual representation of SkipList
+
+ >>> skip_list = SkipList()
+ >>> print(skip_list)
+ SkipList(level=0)
+ >>> skip_list.insert("Key1", "Value")
+ >>> print(skip_list) # doctest: +ELLIPSIS
+ SkipList(level=...
+ [root]--...
+ [Key1]--Key1...
+ None *...
+ >>> skip_list.insert("Key2", "OtherValue")
+ >>> print(skip_list) # doctest: +ELLIPSIS
+ SkipList(level=...
+ [root]--...
+ [Key1]--Key1...
+ [Key2]--Key2...
+ None *...
+ """
+
+ items = list(self)
+
+ if len(items) == 0:
+ return f"SkipList(level={self.level})"
+
+ label_size = max((len(str(item)) for item in items), default=4)
+ label_size = max(label_size, 4) + 4
+
+ node = self.head
+ lines = []
+
+ forwards = node.forward.copy()
+ lines.append(f"[{node.key}]".ljust(label_size, "-") + "* " * len(forwards))
+ lines.append(" " * label_size + "| " * len(forwards))
+
+ while len(node.forward) != 0:
+ node = node.forward[0]
+
+ lines.append(
+ f"[{node.key}]".ljust(label_size, "-")
+ + " ".join(str(n.key) if n.key == node.key else "|" for n in forwards)
+ )
+ lines.append(" " * label_size + "| " * len(forwards))
+ forwards[: node.level] = node.forward
+
+ lines.append("None".ljust(label_size) + "* " * len(forwards))
+ return f"SkipList(level={self.level})\n" + "\n".join(lines)
+
+ def __iter__(self):
+ node = self.head
+
+ while len(node.forward) != 0:
+ yield node.forward[0].key
+ node = node.forward[0]
+
+ def random_level(self) -> int:
+ """
+ :return: Random level from [1, self.max_level] interval.
+ Higher values are less likely.
+ """
+
+ level = 1
+ while random() < self.p and level < self.max_level:
+ level += 1
+
+ return level
+
+ def _locate_node(self, key) -> tuple[Optional[Node[KT, VT]], list[Node[KT, VT]]]:
+ """
+ :param key: Searched key,
+ :return: Tuple with searched node (or None if given key is not present)
+ and list of nodes that refer (if key is present) of should refer to
+ given node.
+ """
+
+ # Nodes with refer or should refer to output node
+ update_vector = []
+
+ node = self.head
+
+ for i in reversed(range(self.level)):
+ # i < node.level - When node level is lesser than `i` decrement `i`.
+ # node.forward[i].key < key - Jumping to node with key value higher
+ # or equal to searched key would result
+ # in skipping searched key.
+ while i < node.level and node.forward[i].key < key:
+ node = node.forward[i]
+ # Each leftmost node (relative to searched node) will potentially have to
+ # be updated.
+ update_vector.append(node)
+
+ update_vector.reverse() # Note that we were inserting values in reverse order.
+
+ # len(node.forward) != 0 - If current node doesn't contain any further
+ # references then searched key is not present.
+ # node.forward[0].key == key - Next node key should be equal to search key
+ # if key is present.
+ if len(node.forward) != 0 and node.forward[0].key == key:
+ return node.forward[0], update_vector
+ else:
+ return None, update_vector
+
+ def delete(self, key: KT):
+ """
+ :param key: Key to remove from list.
+
+ >>> skip_list = SkipList()
+ >>> skip_list.insert(2, "Two")
+ >>> skip_list.insert(1, "One")
+ >>> skip_list.insert(3, "Three")
+ >>> list(skip_list)
+ [1, 2, 3]
+ >>> skip_list.delete(2)
+ >>> list(skip_list)
+ [1, 3]
+ """
+
+ node, update_vector = self._locate_node(key)
+
+ if node is not None:
+ for i, update_node in enumerate(update_vector):
+ # Remove or replace all references to removed node.
+ if update_node.level > i and update_node.forward[i].key == key:
+ if node.level > i:
+ update_node.forward[i] = node.forward[i]
+ else:
+ update_node.forward = update_node.forward[:i]
+
+ def insert(self, key: KT, value: VT):
+ """
+ :param key: Key to insert.
+ :param value: Value associated with given key.
+
+ >>> skip_list = SkipList()
+ >>> skip_list.insert(2, "Two")
+ >>> skip_list.find(2)
+ 'Two'
+ >>> list(skip_list)
+ [2]
+ """
+
+ node, update_vector = self._locate_node(key)
+ if node is not None:
+ node.value = value
+ else:
+ level = self.random_level()
+
+ if level > self.level:
+ # After level increase we have to add additional nodes to head.
+ for i in range(self.level - 1, level):
+ update_vector.append(self.head)
+ self.level = level
+
+ new_node = Node(key, value)
+
+ for i, update_node in enumerate(update_vector[:level]):
+ # Change references to pass through new node.
+ if update_node.level > i:
+ new_node.forward.append(update_node.forward[i])
+
+ if update_node.level < i + 1:
+ update_node.forward.append(new_node)
+ else:
+ update_node.forward[i] = new_node
+
+ def find(self, key: VT) -> Optional[VT]:
+ """
+ :param key: Search key.
+ :return: Value associated with given key or None if given key is not present.
+
+ >>> skip_list = SkipList()
+ >>> skip_list.find(2)
+ >>> skip_list.insert(2, "Two")
+ >>> skip_list.find(2)
+ 'Two'
+ >>> skip_list.insert(2, "Three")
+ >>> skip_list.find(2)
+ 'Three'
+ """
+
+ node, _ = self._locate_node(key)
+
+ if node is not None:
+ return node.value
+
+ return None
+
+
+def test_insert():
+ skip_list = SkipList()
+ skip_list.insert("Key1", 3)
+ skip_list.insert("Key2", 12)
+ skip_list.insert("Key3", 41)
+ skip_list.insert("Key4", -19)
+
+ node = skip_list.head
+ all_values = {}
+ while node.level != 0:
+ node = node.forward[0]
+ all_values[node.key] = node.value
+
+ assert len(all_values) == 4
+ assert all_values["Key1"] == 3
+ assert all_values["Key2"] == 12
+ assert all_values["Key3"] == 41
+ assert all_values["Key4"] == -19
+
+
+def test_insert_overrides_existing_value():
+ skip_list = SkipList()
+ skip_list.insert("Key1", 10)
+ skip_list.insert("Key1", 12)
+
+ skip_list.insert("Key5", 7)
+ skip_list.insert("Key7", 10)
+ skip_list.insert("Key10", 5)
+
+ skip_list.insert("Key7", 7)
+ skip_list.insert("Key5", 5)
+ skip_list.insert("Key10", 10)
+
+ node = skip_list.head
+ all_values = {}
+ while node.level != 0:
+ node = node.forward[0]
+ all_values[node.key] = node.value
+
+ if len(all_values) != 4:
+ print()
+ assert len(all_values) == 4
+ assert all_values["Key1"] == 12
+ assert all_values["Key7"] == 7
+ assert all_values["Key5"] == 5
+ assert all_values["Key10"] == 10
+
+
+def test_searching_empty_list_returns_none():
+ skip_list = SkipList()
+ assert skip_list.find("Some key") is None
+
+
+def test_search():
+ skip_list = SkipList()
+
+ skip_list.insert("Key2", 20)
+ assert skip_list.find("Key2") == 20
+
+ skip_list.insert("Some Key", 10)
+ skip_list.insert("Key2", 8)
+ skip_list.insert("V", 13)
+
+ assert skip_list.find("Y") is None
+ assert skip_list.find("Key2") == 8
+ assert skip_list.find("Some Key") == 10
+ assert skip_list.find("V") == 13
+
+
+def test_deleting_item_from_empty_list_do_nothing():
+ skip_list = SkipList()
+ skip_list.delete("Some key")
+
+ assert len(skip_list.head.forward) == 0
+
+
+def test_deleted_items_are_not_founded_by_find_method():
+ skip_list = SkipList()
+
+ skip_list.insert("Key1", 12)
+ skip_list.insert("V", 13)
+ skip_list.insert("X", 14)
+ skip_list.insert("Key2", 15)
+
+ skip_list.delete("V")
+ skip_list.delete("Key2")
+
+ assert skip_list.find("V") is None
+ assert skip_list.find("Key2") is None
+
+
+def test_delete_removes_only_given_key():
+ skip_list = SkipList()
+
+ skip_list.insert("Key1", 12)
+ skip_list.insert("V", 13)
+ skip_list.insert("X", 14)
+ skip_list.insert("Key2", 15)
+
+ skip_list.delete("V")
+ assert skip_list.find("V") is None
+ assert skip_list.find("X") == 14
+ assert skip_list.find("Key1") == 12
+ assert skip_list.find("Key2") == 15
+
+ skip_list.delete("X")
+ assert skip_list.find("V") is None
+ assert skip_list.find("X") is None
+ assert skip_list.find("Key1") == 12
+ assert skip_list.find("Key2") == 15
+
+ skip_list.delete("Key1")
+ assert skip_list.find("V") is None
+ assert skip_list.find("X") is None
+ assert skip_list.find("Key1") is None
+ assert skip_list.find("Key2") == 15
+
+ skip_list.delete("Key2")
+ assert skip_list.find("V") is None
+ assert skip_list.find("X") is None
+ assert skip_list.find("Key1") is None
+ assert skip_list.find("Key2") is None
+
+
+def test_delete_doesnt_leave_dead_nodes():
+ skip_list = SkipList()
+
+ skip_list.insert("Key1", 12)
+ skip_list.insert("V", 13)
+ skip_list.insert("X", 142)
+ skip_list.insert("Key2", 15)
+
+ skip_list.delete("X")
+
+ def traverse_keys(node):
+ yield node.key
+ for forward_node in node.forward:
+ yield from traverse_keys(forward_node)
+
+ assert len(set(traverse_keys(skip_list.head))) == 4
+
+
+def test_iter_always_yields_sorted_values():
+ def is_sorted(lst):
+ for item, next_item in zip(lst, lst[1:]):
+ if next_item < item:
+ return False
+ return True
+
+ skip_list = SkipList()
+ for i in range(10):
+ skip_list.insert(i, i)
+ assert is_sorted(list(skip_list))
+ skip_list.delete(5)
+ skip_list.delete(8)
+ skip_list.delete(2)
+ assert is_sorted(list(skip_list))
+ skip_list.insert(-12, -12)
+ skip_list.insert(77, 77)
+ assert is_sorted(list(skip_list))
+
+
+def pytests():
+ for i in range(100):
+ # Repeat test 100 times due to the probabilistic nature of skip list
+ # random values == random bugs
+ test_insert()
+ test_insert_overrides_existing_value()
+
+ test_searching_empty_list_returns_none()
+ test_search()
+
+ test_deleting_item_from_empty_list_do_nothing()
+ test_deleted_items_are_not_founded_by_find_method()
+ test_delete_removes_only_given_key()
+ test_delete_doesnt_leave_dead_nodes()
+
+ test_iter_always_yields_sorted_values()
+
+
+def main():
+ """
+ >>> pytests()
+ """
+
+ skip_list = SkipList()
+ skip_list.insert(2, "2")
+ skip_list.insert(4, "4")
+ skip_list.insert(6, "4")
+ skip_list.insert(4, "5")
+ skip_list.insert(8, "4")
+ skip_list.insert(9, "4")
+
+ skip_list.delete(4)
+
+ print(skip_list)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/data_structures/linked_list/swapNodes.py b/data_structures/linked_list/swapNodes.py
deleted file mode 100644
index ce2543bc46d8..000000000000
--- a/data_structures/linked_list/swapNodes.py
+++ /dev/null
@@ -1,75 +0,0 @@
-class Node:
- def __init__(self, data):
- self.data = data;
- self.next = None
-
-
-class Linkedlist:
- def __init__(self):
- self.head = None
-
- def print_list(self):
- temp = self.head
- while temp is not None:
- print(temp.data)
- temp = temp.next
-
-# adding nodes
- def push(self, new_data):
- new_node = Node(new_data)
- new_node.next = self.head
- self.head = new_node
-
-# swapping nodes
- def swapNodes(self, d1, d2):
- prevD1 = None
- prevD2 = None
- if d1 == d2:
- return
- else:
- # find d1
- D1 = self.head
- while D1 is not None and D1.data != d1:
- prevD1 = D1
- D1 = D1.next
- # find d2
- D2 = self.head
- while D2 is not None and D2.data != d2:
- prevD2 = D2
- D2 = D2.next
- if D1 is None and D2 is None:
- return
- # if D1 is head
- if prevD1 is not None:
- prevD1.next = D2
- else:
- self.head = D2
- # if D2 is head
- if prevD2 is not None:
- prevD2.next = D1
- else:
- self.head = D1
- temp = D1.next
- D1.next = D2.next
- D2.next = temp
-
-# swapping code ends here
-
-
-
-if __name__ == '__main__':
- list = Linkedlist()
- list.push(5)
- list.push(4)
- list.push(3)
- list.push(2)
- list.push(1)
-
- list.print_list()
-
- list.swapNodes(1, 4)
- print("After swapping")
- list.print_list()
-
-
-
diff --git a/data_structures/linked_list/swap_nodes.py b/data_structures/linked_list/swap_nodes.py
new file mode 100644
index 000000000000..3f825756b3d2
--- /dev/null
+++ b/data_structures/linked_list/swap_nodes.py
@@ -0,0 +1,55 @@
+from typing import Any
+
+
+class Node:
+ def __init__(self, data: Any):
+ self.data = data
+ self.next = None
+
+
+class LinkedList:
+ def __init__(self):
+ self.head = None
+
+ def print_list(self):
+ temp = self.head
+ while temp is not None:
+ print(temp.data, end=" ")
+ temp = temp.next
+ print()
+
+ # adding nodes
+ def push(self, new_data: Any):
+ new_node = Node(new_data)
+ new_node.next = self.head
+ self.head = new_node
+
+ # swapping nodes
+ def swap_nodes(self, node_data_1, node_data_2):
+ if node_data_1 == node_data_2:
+ return
+ else:
+ node_1 = self.head
+ while node_1 is not None and node_1.data != node_data_1:
+ node_1 = node_1.next
+
+ node_2 = self.head
+ while node_2 is not None and node_2.data != node_data_2:
+ node_2 = node_2.next
+
+ if node_1 is None or node_2 is None:
+ return
+
+ node_1.data, node_2.data = node_2.data, node_1.data
+
+
+if __name__ == "__main__":
+ ll = LinkedList()
+ for i in range(5, 0, -1):
+ ll.push(i)
+
+ ll.print_list()
+
+ ll.swap_nodes(1, 4)
+ print("After swapping")
+ ll.print_list()
diff --git a/data_structures/queue/circular_queue.py b/data_structures/queue/circular_queue.py
new file mode 100644
index 000000000000..93a6ef805c7c
--- /dev/null
+++ b/data_structures/queue/circular_queue.py
@@ -0,0 +1,94 @@
+# Implementation of Circular Queue (using Python lists)
+
+
+class CircularQueue:
+ """Circular FIFO queue with a fixed capacity"""
+
+ def __init__(self, n: int):
+ self.n = n
+ self.array = [None] * self.n
+ self.front = 0 # index of the first element
+ self.rear = 0
+ self.size = 0
+
+ def __len__(self) -> int:
+ """
+ >>> cq = CircularQueue(5)
+ >>> len(cq)
+ 0
+ >>> cq.enqueue("A") # doctest: +ELLIPSIS
+ >> len(cq)
+ 1
+ """
+ return self.size
+
+ def is_empty(self) -> bool:
+ """
+ >>> cq = CircularQueue(5)
+ >>> cq.is_empty()
+ True
+ >>> cq.enqueue("A").is_empty()
+ False
+ """
+ return self.size == 0
+
+ def first(self):
+ """
+ >>> cq = CircularQueue(5)
+ >>> cq.first()
+ False
+ >>> cq.enqueue("A").first()
+ 'A'
+ """
+ return False if self.is_empty() else self.array[self.front]
+
+ def enqueue(self, data):
+ """
+ This function insert an element in the queue using self.rear value as an index
+ >>> cq = CircularQueue(5)
+ >>> cq.enqueue("A") # doctest: +ELLIPSIS
+ >> (cq.size, cq.first())
+ (1, 'A')
+ >>> cq.enqueue("B") # doctest: +ELLIPSIS
+ >> (cq.size, cq.first())
+ (2, 'A')
+ """
+ if self.size >= self.n:
+ raise Exception("QUEUE IS FULL")
+
+ self.array[self.rear] = data
+ self.rear = (self.rear + 1) % self.n
+ self.size += 1
+ return self
+
+ def dequeue(self):
+ """
+ This function removes an element from the queue using on self.front value as an
+ index
+ >>> cq = CircularQueue(5)
+ >>> cq.dequeue()
+ Traceback (most recent call last):
+ ...
+ Exception: UNDERFLOW
+ >>> cq.enqueue("A").enqueue("B").dequeue()
+ 'A'
+ >>> (cq.size, cq.first())
+ (1, 'B')
+ >>> cq.dequeue()
+ 'B'
+ >>> cq.dequeue()
+ Traceback (most recent call last):
+ ...
+ Exception: UNDERFLOW
+ """
+ if self.size == 0:
+ raise Exception("UNDERFLOW")
+
+ temp = self.array[self.front]
+ self.array[self.front] = None
+ self.front = (self.front + 1) % self.n
+ self.size -= 1
+ return temp
diff --git a/data_structures/queue/deqeue.py b/data_structures/queue/deqeue.py
deleted file mode 100644
index fdee64eb6ae0..000000000000
--- a/data_structures/queue/deqeue.py
+++ /dev/null
@@ -1,40 +0,0 @@
-from __future__ import print_function
-# Python code to demonstrate working of
-# extend(), extendleft(), rotate(), reverse()
-
-# importing "collections" for deque operations
-import collections
-
-# initializing deque
-de = collections.deque([1, 2, 3,])
-
-# using extend() to add numbers to right end
-# adds 4,5,6 to right end
-de.extend([4,5,6])
-
-# printing modified deque
-print ("The deque after extending deque at end is : ")
-print (de)
-
-# using extendleft() to add numbers to left end
-# adds 7,8,9 to right end
-de.extendleft([7,8,9])
-
-# printing modified deque
-print ("The deque after extending deque at beginning is : ")
-print (de)
-
-# using rotate() to rotate the deque
-# rotates by 3 to left
-de.rotate(-3)
-
-# printing modified deque
-print ("The deque after rotating deque is : ")
-print (de)
-
-# using reverse() to reverse the deque
-de.reverse()
-
-# printing modified deque
-print ("The deque after reversing deque is : ")
-print (de)
diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queue/double_ended_queue.py
new file mode 100644
index 000000000000..dd003b7c98ac
--- /dev/null
+++ b/data_structures/queue/double_ended_queue.py
@@ -0,0 +1,57 @@
+# Python code to demonstrate working of
+# extend(), extendleft(), rotate(), reverse()
+
+# importing "collections" for deque operations
+import collections
+
+# initializing deque
+de = collections.deque([1, 2, 3])
+
+# using extend() to add numbers to right end
+# adds 4,5,6 to right end
+de.extend([4, 5, 6])
+
+# printing modified deque
+print("The deque after extending deque at end is : ")
+print(de)
+
+# using extendleft() to add numbers to left end
+# adds 7,8,9 to right end
+de.extendleft([7, 8, 9])
+
+# printing modified deque
+print("The deque after extending deque at beginning is : ")
+print(de)
+
+# using rotate() to rotate the deque
+# rotates by 3 to left
+de.rotate(-3)
+
+# printing modified deque
+print("The deque after rotating deque is : ")
+print(de)
+
+# using reverse() to reverse the deque
+de.reverse()
+
+# printing modified deque
+print("The deque after reversing deque is : ")
+print(de)
+
+# get right-end value and eliminate
+startValue = de.pop()
+
+print("The deque after popping value at end is : ")
+print(de)
+
+# get left-end value and eliminate
+endValue = de.popleft()
+
+print("The deque after popping value at start is : ")
+print(de)
+
+# eliminate element searched by value
+de.remove(5)
+
+print("The deque after eliminating element searched by value : ")
+print(de)
diff --git a/data_structures/queue/linked_queue.py b/data_structures/queue/linked_queue.py
new file mode 100644
index 000000000000..8526ad311ed0
--- /dev/null
+++ b/data_structures/queue/linked_queue.py
@@ -0,0 +1,151 @@
+""" A Queue using a linked list like structure """
+from typing import Any
+
+
+class Node:
+ def __init__(self, data: Any) -> None:
+ self.data = data
+ self.next = None
+
+ def __str__(self) -> str:
+ return f"{self.data}"
+
+
+class LinkedQueue:
+ """
+ >>> queue = LinkedQueue()
+ >>> queue.is_empty()
+ True
+ >>> queue.put(5)
+ >>> queue.put(9)
+ >>> queue.put('python')
+ >>> queue.is_empty();
+ False
+ >>> queue.get()
+ 5
+ >>> queue.put('algorithms')
+ >>> queue.get()
+ 9
+ >>> queue.get()
+ 'python'
+ >>> queue.get()
+ 'algorithms'
+ >>> queue.is_empty()
+ True
+ >>> queue.get()
+ Traceback (most recent call last):
+ ...
+ IndexError: dequeue from empty queue
+ """
+
+ def __init__(self) -> None:
+ self.front = self.rear = None
+
+ def __iter__(self):
+ node = self.front
+ while node:
+ yield node.data
+ node = node.next
+
+ def __len__(self) -> int:
+ """
+ >>> queue = LinkedQueue()
+ >>> for i in range(1, 6):
+ ... queue.put(i)
+ >>> len(queue)
+ 5
+ >>> for i in range(1, 6):
+ ... assert len(queue) == 6 - i
+ ... _ = queue.get()
+ >>> len(queue)
+ 0
+ """
+ return len(tuple(iter(self)))
+
+ def __str__(self) -> str:
+ """
+ >>> queue = LinkedQueue()
+ >>> for i in range(1, 4):
+ ... queue.put(i)
+ >>> queue.put("Python")
+ >>> queue.put(3.14)
+ >>> queue.put(True)
+ >>> str(queue)
+ '1 <- 2 <- 3 <- Python <- 3.14 <- True'
+ """
+ return " <- ".join(str(item) for item in self)
+
+ def is_empty(self) -> bool:
+ """
+ >>> queue = LinkedQueue()
+ >>> queue.is_empty()
+ True
+ >>> for i in range(1, 6):
+ ... queue.put(i)
+ >>> queue.is_empty()
+ False
+ """
+ return len(self) == 0
+
+ def put(self, item) -> None:
+ """
+ >>> queue = LinkedQueue()
+ >>> queue.get()
+ Traceback (most recent call last):
+ ...
+ IndexError: dequeue from empty queue
+ >>> for i in range(1, 6):
+ ... queue.put(i)
+ >>> str(queue)
+ '1 <- 2 <- 3 <- 4 <- 5'
+ """
+ node = Node(item)
+ if self.is_empty():
+ self.front = self.rear = node
+ else:
+ assert isinstance(self.rear, Node)
+ self.rear.next = node
+ self.rear = node
+
+ def get(self) -> Any:
+ """
+ >>> queue = LinkedQueue()
+ >>> queue.get()
+ Traceback (most recent call last):
+ ...
+ IndexError: dequeue from empty queue
+ >>> queue = LinkedQueue()
+ >>> for i in range(1, 6):
+ ... queue.put(i)
+ >>> for i in range(1, 6):
+ ... assert queue.get() == i
+ >>> len(queue)
+ 0
+ """
+ if self.is_empty():
+ raise IndexError("dequeue from empty queue")
+ assert isinstance(self.front, Node)
+ node = self.front
+ self.front = self.front.next
+ if self.front is None:
+ self.rear = None
+ return node.data
+
+ def clear(self) -> None:
+ """
+ >>> queue = LinkedQueue()
+ >>> for i in range(1, 6):
+ ... queue.put(i)
+ >>> queue.clear()
+ >>> len(queue)
+ 0
+ >>> str(queue)
+ ''
+ """
+ self.front = self.rear = None
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
diff --git a/data_structures/queue/priority_queue_using_list.py b/data_structures/queue/priority_queue_using_list.py
new file mode 100644
index 000000000000..c5cf26433fff
--- /dev/null
+++ b/data_structures/queue/priority_queue_using_list.py
@@ -0,0 +1,232 @@
+"""
+Pure Python implementations of a Fixed Priority Queue and an Element Priority Queue
+using Python lists.
+"""
+
+
+class OverFlowError(Exception):
+ pass
+
+
+class UnderFlowError(Exception):
+ pass
+
+
+class FixedPriorityQueue:
+ """
+ Tasks can be added to a Priority Queue at any time and in any order but when Tasks
+ are removed then the Task with the highest priority is removed in FIFO order. In
+ code we will use three levels of priority with priority zero Tasks being the most
+ urgent (high priority) and priority 2 tasks being the least urgent.
+
+ Examples
+ >>> fpq = FixedPriorityQueue()
+ >>> fpq.enqueue(0, 10)
+ >>> fpq.enqueue(1, 70)
+ >>> fpq.enqueue(0, 100)
+ >>> fpq.enqueue(2, 1)
+ >>> fpq.enqueue(2, 5)
+ >>> fpq.enqueue(1, 7)
+ >>> fpq.enqueue(2, 4)
+ >>> fpq.enqueue(1, 64)
+ >>> fpq.enqueue(0, 128)
+ >>> print(fpq)
+ Priority 0: [10, 100, 128]
+ Priority 1: [70, 7, 64]
+ Priority 2: [1, 5, 4]
+ >>> fpq.dequeue()
+ 10
+ >>> fpq.dequeue()
+ 100
+ >>> fpq.dequeue()
+ 128
+ >>> fpq.dequeue()
+ 70
+ >>> fpq.dequeue()
+ 7
+ >>> print(fpq)
+ Priority 0: []
+ Priority 1: [64]
+ Priority 2: [1, 5, 4]
+ >>> fpq.dequeue()
+ 64
+ >>> fpq.dequeue()
+ 1
+ >>> fpq.dequeue()
+ 5
+ >>> fpq.dequeue()
+ 4
+ >>> fpq.dequeue()
+ Traceback (most recent call last):
+ ...
+ data_structures.queue.priority_queue_using_list.UnderFlowError: All queues are empty
+ >>> print(fpq)
+ Priority 0: []
+ Priority 1: []
+ Priority 2: []
+ """
+
+ def __init__(self):
+ self.queues = [
+ [],
+ [],
+ [],
+ ]
+
+ def enqueue(self, priority: int, data: int) -> None:
+ """
+ Add an element to a queue based on its priority.
+ If the priority is invalid ValueError is raised.
+ If the queue is full an OverFlowError is raised.
+ """
+ try:
+ if len(self.queues[priority]) >= 100:
+ raise OverflowError("Maximum queue size is 100")
+ self.queues[priority].append(data)
+ except IndexError:
+ raise ValueError("Valid priorities are 0, 1, and 2")
+
+ def dequeue(self) -> int:
+ """
+ Return the highest priority element in FIFO order.
+ If the queue is empty then an under flow exception is raised.
+ """
+ for queue in self.queues:
+ if queue:
+ return queue.pop(0)
+ raise UnderFlowError("All queues are empty")
+
+ def __str__(self) -> str:
+ return "\n".join(f"Priority {i}: {q}" for i, q in enumerate(self.queues))
+
+
+class ElementPriorityQueue:
+ """
+ Element Priority Queue is the same as Fixed Priority Queue except that the value of
+ the element itself is the priority. The rules for priorities are the same the as
+ Fixed Priority Queue.
+
+ >>> epq = ElementPriorityQueue()
+ >>> epq.enqueue(10)
+ >>> epq.enqueue(70)
+ >>> epq.enqueue(4)
+ >>> epq.enqueue(1)
+ >>> epq.enqueue(5)
+ >>> epq.enqueue(7)
+ >>> epq.enqueue(4)
+ >>> epq.enqueue(64)
+ >>> epq.enqueue(128)
+ >>> print(epq)
+ [10, 70, 4, 1, 5, 7, 4, 64, 128]
+ >>> epq.dequeue()
+ 1
+ >>> epq.dequeue()
+ 4
+ >>> epq.dequeue()
+ 4
+ >>> epq.dequeue()
+ 5
+ >>> epq.dequeue()
+ 7
+ >>> epq.dequeue()
+ 10
+ >>> print(epq)
+ [70, 64, 128]
+ >>> epq.dequeue()
+ 64
+ >>> epq.dequeue()
+ 70
+ >>> epq.dequeue()
+ 128
+ >>> epq.dequeue()
+ Traceback (most recent call last):
+ ...
+ data_structures.queue.priority_queue_using_list.UnderFlowError: The queue is empty
+ >>> print(epq)
+ []
+ """
+
+ def __init__(self):
+ self.queue = []
+
+ def enqueue(self, data: int) -> None:
+ """
+ This function enters the element into the queue
+ If the queue is full an Exception is raised saying Over Flow!
+ """
+ if len(self.queue) == 100:
+ raise OverFlowError("Maximum queue size is 100")
+ self.queue.append(data)
+
+ def dequeue(self) -> int:
+ """
+ Return the highest priority element in FIFO order.
+ If the queue is empty then an under flow exception is raised.
+ """
+ if not self.queue:
+ raise UnderFlowError("The queue is empty")
+ else:
+ data = min(self.queue)
+ self.queue.remove(data)
+ return data
+
+ def __str__(self) -> str:
+ """
+ Prints all the elements within the Element Priority Queue
+ """
+ return str(self.queue)
+
+
+def fixed_priority_queue():
+ fpq = FixedPriorityQueue()
+ fpq.enqueue(0, 10)
+ fpq.enqueue(1, 70)
+ fpq.enqueue(0, 100)
+ fpq.enqueue(2, 1)
+ fpq.enqueue(2, 5)
+ fpq.enqueue(1, 7)
+ fpq.enqueue(2, 4)
+ fpq.enqueue(1, 64)
+ fpq.enqueue(0, 128)
+ print(fpq)
+ print(fpq.dequeue())
+ print(fpq.dequeue())
+ print(fpq.dequeue())
+ print(fpq.dequeue())
+ print(fpq.dequeue())
+ print(fpq)
+ print(fpq.dequeue())
+ print(fpq.dequeue())
+ print(fpq.dequeue())
+ print(fpq.dequeue())
+ print(fpq.dequeue())
+
+
+def element_priority_queue():
+ epq = ElementPriorityQueue()
+ epq.enqueue(10)
+ epq.enqueue(70)
+ epq.enqueue(100)
+ epq.enqueue(1)
+ epq.enqueue(5)
+ epq.enqueue(7)
+ epq.enqueue(4)
+ epq.enqueue(64)
+ epq.enqueue(128)
+ print(epq)
+ print(epq.dequeue())
+ print(epq.dequeue())
+ print(epq.dequeue())
+ print(epq.dequeue())
+ print(epq.dequeue())
+ print(epq)
+ print(epq.dequeue())
+ print(epq.dequeue())
+ print(epq.dequeue())
+ print(epq.dequeue())
+ print(epq.dequeue())
+
+
+if __name__ == "__main__":
+ fixed_priority_queue()
+ element_priority_queue()
diff --git a/data_structures/queue/queue_on_list.py b/data_structures/queue/queue_on_list.py
index c8d0b41de5d5..485cf0b6f7a3 100644
--- a/data_structures/queue/queue_on_list.py
+++ b/data_structures/queue/queue_on_list.py
@@ -1,45 +1,52 @@
-"""Queue represented by a python list"""
-class Queue():
+"""Queue represented by a Python list"""
+
+
+class Queue:
def __init__(self):
self.entries = []
self.length = 0
- self.front=0
+ self.front = 0
def __str__(self):
- printed = '<' + str(self.entries)[1:-1] + '>'
+ printed = "<" + str(self.entries)[1:-1] + ">"
return printed
"""Enqueues {@code item}
@param item
item to enqueue"""
+
def put(self, item):
self.entries.append(item)
self.length = self.length + 1
-
"""Dequeues {@code item}
@requirement: |self.length| > 0
@return dequeued
item that was dequeued"""
+
def get(self):
self.length = self.length - 1
dequeued = self.entries[self.front]
- self.front-=1
- self.entries = self.entries[self.front:]
+ # self.front-=1
+ # self.entries = self.entries[self.front:]
+ self.entries = self.entries[1:]
return dequeued
"""Rotates the queue {@code rotation} times
@param rotation
number of times to rotate queue"""
+
def rotate(self, rotation):
for i in range(rotation):
self.put(self.get())
"""Enqueues {@code item}
@return item at front of self.entries"""
- def front(self):
+
+ def get_front(self):
return self.entries[0]
"""Returns the length of this.entries"""
+
def size(self):
return self.length
diff --git a/data_structures/queue/queue_on_pseudo_stack.py b/data_structures/queue/queue_on_pseudo_stack.py
index b69fbcc988f7..7fa2fb2566af 100644
--- a/data_structures/queue/queue_on_pseudo_stack.py
+++ b/data_structures/queue/queue_on_pseudo_stack.py
@@ -1,16 +1,19 @@
"""Queue represented by a pseudo stack (represented by a list with pop and append)"""
-class Queue():
+
+
+class Queue:
def __init__(self):
self.stack = []
self.length = 0
def __str__(self):
- printed = '<' + str(self.stack)[1:-1] + '>'
+ printed = "<" + str(self.stack)[1:-1] + ">"
return printed
"""Enqueues {@code item}
@param item
item to enqueue"""
+
def put(self, item):
self.stack.append(item)
self.length = self.length + 1
@@ -19,17 +22,19 @@ def put(self, item):
@requirement: |self.length| > 0
@return dequeued
item that was dequeued"""
+
def get(self):
self.rotate(1)
- dequeued = self.stack[self.length-1]
+ dequeued = self.stack[self.length - 1]
self.stack = self.stack[:-1]
- self.rotate(self.length-1)
- self.length = self.length -1
+ self.rotate(self.length - 1)
+ self.length = self.length - 1
return dequeued
"""Rotates the queue {@code rotation} times
@param rotation
number of times to rotate queue"""
+
def rotate(self, rotation):
for i in range(rotation):
temp = self.stack[0]
@@ -39,12 +44,14 @@ def rotate(self, rotation):
"""Reports item at the front of self
@return item at front of self.stack"""
+
def front(self):
front = self.get()
self.put(front)
- self.rotate(self.length-1)
+ self.rotate(self.length - 1)
return front
"""Returns the length of this.stack"""
+
def size(self):
return self.length
diff --git a/data_structures/stacks/__init__.py b/data_structures/stacks/__init__.py
index f7e92ae2d269..e69de29bb2d1 100644
--- a/data_structures/stacks/__init__.py
+++ b/data_structures/stacks/__init__.py
@@ -1,23 +0,0 @@
-class Stack:
-
- def __init__(self):
- self.stack = []
- self.top = 0
-
- def is_empty(self):
- return (self.top == 0)
-
- def push(self, item):
- if self.top < len(self.stack):
- self.stack[self.top] = item
- else:
- self.stack.append(item)
-
- self.top += 1
-
- def pop(self):
- if self.is_empty():
- return None
- else:
- self.top -= 1
- return self.stack[self.top]
diff --git a/data_structures/stacks/balanced_parentheses.py b/data_structures/stacks/balanced_parentheses.py
index 3229d19c8621..674f7ea436ed 100644
--- a/data_structures/stacks/balanced_parentheses.py
+++ b/data_structures/stacks/balanced_parentheses.py
@@ -1,25 +1,37 @@
-from __future__ import print_function
-from __future__ import absolute_import
-from stack import Stack
+from .stack import Stack
-__author__ = 'Omkar Pathak'
-
-def balanced_parentheses(parentheses):
- """ Use a stack to check if a string of parentheses is balanced."""
- stack = Stack(len(parentheses))
- for parenthesis in parentheses:
- if parenthesis == '(':
- stack.push(parenthesis)
- elif parenthesis == ')':
- if stack.is_empty():
+def balanced_parentheses(parentheses: str) -> bool:
+ """Use a stack to check if a string of parentheses is balanced.
+ >>> balanced_parentheses("([]{})")
+ True
+ >>> balanced_parentheses("[()]{}{[()()]()}")
+ True
+ >>> balanced_parentheses("[(])")
+ False
+ >>> balanced_parentheses("1+2*3-4")
+ True
+ >>> balanced_parentheses("")
+ True
+ """
+ stack = Stack()
+ bracket_pairs = {"(": ")", "[": "]", "{": "}"}
+ for bracket in parentheses:
+ if bracket in bracket_pairs:
+ stack.push(bracket)
+ elif bracket in (")", "]", "}"):
+ if stack.is_empty() or bracket_pairs[stack.pop()] != bracket:
return False
- stack.pop()
return stack.is_empty()
-if __name__ == '__main__':
- examples = ['((()))', '((())', '(()))']
- print('Balanced parentheses demonstration:\n')
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
+
+ examples = ["((()))", "((())", "(()))"]
+ print("Balanced parentheses demonstration:\n")
for example in examples:
- print(example + ': ' + str(balanced_parentheses(example)))
+ not_str = "" if balanced_parentheses(example) else "not "
+ print(f"{example} is {not_str}balanced")
diff --git a/data_structures/stacks/dijkstras_two_stack_algorithm.py b/data_structures/stacks/dijkstras_two_stack_algorithm.py
new file mode 100644
index 000000000000..8b4668f9f839
--- /dev/null
+++ b/data_structures/stacks/dijkstras_two_stack_algorithm.py
@@ -0,0 +1,83 @@
+"""
+Author: Alexander Joslin
+GitHub: github.com/echoaj
+
+Explanation: https://medium.com/@haleesammar/implemented-in-js-dijkstras-2-stack-
+ algorithm-for-evaluating-mathematical-expressions-fc0837dae1ea
+
+We can use Dijkstra's two stack algorithm to solve an equation
+such as: (5 + ((4 * 2) * (2 + 3)))
+
+THESE ARE THE ALGORITHM'S RULES:
+RULE 1: Scan the expression from left to right. When an operand is encountered,
+ push it onto the the operand stack.
+
+RULE 2: When an operator is encountered in the expression,
+ push it onto the operator stack.
+
+RULE 3: When a left parenthesis is encountered in the expression, ignore it.
+
+RULE 4: When a right parenthesis is encountered in the expression,
+ pop an operator off the operator stack. The two operands it must
+ operate on must be the last two operands pushed onto the operand stack.
+ We therefore pop the operand stack twice, perform the operation,
+ and push the result back onto the operand stack so it will be available
+ for use as an operand of the next operator popped off the operator stack.
+
+RULE 5: When the entire infix expression has been scanned, the value left on
+ the operand stack represents the value of the expression.
+
+NOTE: It only works with whole numbers.
+"""
+__author__ = "Alexander Joslin"
+
+import operator as op
+
+from .stack import Stack
+
+
+def dijkstras_two_stack_algorithm(equation: str) -> int:
+ """
+ DocTests
+ >>> dijkstras_two_stack_algorithm("(5 + 3)")
+ 8
+ >>> dijkstras_two_stack_algorithm("((9 - (2 + 9)) + (8 - 1))")
+ 5
+ >>> dijkstras_two_stack_algorithm("((((3 - 2) - (2 + 3)) + (2 - 4)) + 3)")
+ -3
+
+ :param equation: a string
+ :return: result: an integer
+ """
+ operators = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
+
+ operand_stack = Stack()
+ operator_stack = Stack()
+
+ for i in equation:
+ if i.isdigit():
+ # RULE 1
+ operand_stack.push(int(i))
+ elif i in operators:
+ # RULE 2
+ operator_stack.push(i)
+ elif i == ")":
+ # RULE 4
+ opr = operator_stack.peek()
+ operator_stack.pop()
+ num1 = operand_stack.peek()
+ operand_stack.pop()
+ num2 = operand_stack.peek()
+ operand_stack.pop()
+
+ total = operators[opr](num2, num1)
+ operand_stack.push(total)
+
+ # RULE 5
+ return operand_stack.peek()
+
+
+if __name__ == "__main__":
+ equation = "(5 + ((4 * 2) * (2 + 3)))"
+ # answer = 45
+ print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
diff --git a/data_structures/stacks/evaluate_postfix_notations.py b/data_structures/stacks/evaluate_postfix_notations.py
new file mode 100644
index 000000000000..a03cb43bb020
--- /dev/null
+++ b/data_structures/stacks/evaluate_postfix_notations.py
@@ -0,0 +1,49 @@
+"""
+The Reverse Polish Nation also known as Polish postfix notation
+or simply postfix notation.
+https://en.wikipedia.org/wiki/Reverse_Polish_notation
+Classic examples of simple stack implementations
+Valid operators are +, -, *, /.
+Each operand may be an integer or another expression.
+"""
+
+
+def evaluate_postfix(postfix_notation: list) -> int:
+ """
+ >>> evaluate_postfix(["2", "1", "+", "3", "*"])
+ 9
+ >>> evaluate_postfix(["4", "13", "5", "/", "+"])
+ 6
+ >>> evaluate_postfix([])
+ 0
+ """
+ if not postfix_notation:
+ return 0
+
+ operations = {"+", "-", "*", "/"}
+ stack = []
+
+ for token in postfix_notation:
+ if token in operations:
+ b, a = stack.pop(), stack.pop()
+ if token == "+":
+ stack.append(a + b)
+ elif token == "-":
+ stack.append(a - b)
+ elif token == "*":
+ stack.append(a * b)
+ else:
+ if a * b < 0 and a % b != 0:
+ stack.append(a // b + 1)
+ else:
+ stack.append(a // b)
+ else:
+ stack.append(int(token))
+
+ return stack.pop()
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/data_structures/stacks/infix_to_postfix_conversion.py b/data_structures/stacks/infix_to_postfix_conversion.py
index 75211fed258d..dedba8479ac8 100644
--- a/data_structures/stacks/infix_to_postfix_conversion.py
+++ b/data_structures/stacks/infix_to_postfix_conversion.py
@@ -1,64 +1,69 @@
-from __future__ import print_function
-from __future__ import absolute_import
-import string
+"""
+https://en.wikipedia.org/wiki/Infix_notation
+https://en.wikipedia.org/wiki/Reverse_Polish_notation
+https://en.wikipedia.org/wiki/Shunting-yard_algorithm
+"""
-from .Stack import Stack
+from .balanced_parentheses import balanced_parentheses
+from .stack import Stack
-__author__ = 'Omkar Pathak'
-
-def is_operand(char):
- return char in string.ascii_letters or char in string.digits
-
-
-def precedence(char):
- """ Return integer value representing an operator's precedence, or
+def precedence(char: str) -> int:
+ """
+ Return integer value representing an operator's precedence, or
order of operation.
-
https://en.wikipedia.org/wiki/Order_of_operations
"""
- dictionary = {'+': 1, '-': 1,
- '*': 2, '/': 2,
- '^': 3}
- return dictionary.get(char, -1)
-
+ return {"+": 1, "-": 1, "*": 2, "/": 2, "^": 3}.get(char, -1)
-def infix_to_postfix(expression):
- """ Convert infix notation to postfix notation using the Shunting-yard
- algorithm.
- https://en.wikipedia.org/wiki/Shunting-yard_algorithm
- https://en.wikipedia.org/wiki/Infix_notation
- https://en.wikipedia.org/wiki/Reverse_Polish_notation
+def infix_to_postfix(expression_str: str) -> str:
"""
- stack = Stack(len(expression))
+ >>> infix_to_postfix("(1*(2+3)+4))")
+ Traceback (most recent call last):
+ ...
+ ValueError: Mismatched parentheses
+ >>> infix_to_postfix("")
+ ''
+ >>> infix_to_postfix("3+2")
+ '3 2 +'
+ >>> infix_to_postfix("(3+4)*5-6")
+ '3 4 + 5 * 6 -'
+ >>> infix_to_postfix("(1+2)*3/4-5")
+ '1 2 + 3 * 4 / 5 -'
+ >>> infix_to_postfix("a+b*c+(d*e+f)*g")
+ 'a b c * + d e * f + g * +'
+ >>> infix_to_postfix("x^y/(5*z)+2")
+ 'x y ^ 5 z * / 2 +'
+ """
+ if not balanced_parentheses(expression_str):
+ raise ValueError("Mismatched parentheses")
+ stack = Stack()
postfix = []
- for char in expression:
- if is_operand(char):
+ for char in expression_str:
+ if char.isalpha() or char.isdigit():
postfix.append(char)
- elif char not in {'(', ')'}:
- while (not stack.is_empty()
- and precedence(char) <= precedence(stack.peek())):
- postfix.append(stack.pop())
- stack.push(char)
- elif char == '(':
+ elif char == "(":
stack.push(char)
- elif char == ')':
- while not stack.is_empty() and stack.peek() != '(':
+ elif char == ")":
+ while not stack.is_empty() and stack.peek() != "(":
postfix.append(stack.pop())
- # Pop '(' from stack. If there is no '(', there is a mismatched
- # parentheses.
- if stack.peek() != '(':
- raise ValueError('Mismatched parentheses')
stack.pop()
+ else:
+ while not stack.is_empty() and precedence(char) <= precedence(stack.peek()):
+ postfix.append(stack.pop())
+ stack.push(char)
while not stack.is_empty():
postfix.append(stack.pop())
- return ' '.join(postfix)
+ return " ".join(postfix)
+
+if __name__ == "__main__":
+ from doctest import testmod
-if __name__ == '__main__':
- expression = 'a+b*(c^d-e)^(f+g*h)-i'
+ testmod()
+ expression = "a+b*(c^d-e)^(f+g*h)-i"
- print('Infix to Postfix Notation demonstration:\n')
- print('Infix notation: ' + expression)
- print('Postfix notation: ' + infix_to_postfix(expression))
+ print("Infix to Postfix Notation demonstration:\n")
+ print("Infix notation: " + expression)
+ print("Postfix notation: " + infix_to_postfix(expression))
diff --git a/data_structures/stacks/infix_to_prefix_conversion.py b/data_structures/stacks/infix_to_prefix_conversion.py
index da5fc261fb9f..d3dc9e3e9c73 100644
--- a/data_structures/stacks/infix_to_prefix_conversion.py
+++ b/data_structures/stacks/infix_to_prefix_conversion.py
@@ -11,51 +11,83 @@
a | + | cb^a
| | cb^a+
- a+b^c (Infix) -> +a^bc (Prefix)
+ a+b^c (Infix) -> +a^bc (Prefix)
"""
+
def infix_2_postfix(Infix):
Stack = []
Postfix = []
- priority = {'^':3, '*':2, '/':2, '%':2, '+':1, '-':1} # Priority of each operator
- print_width = len(Infix) if(len(Infix)>7) else 7
+ priority = {
+ "^": 3,
+ "*": 2,
+ "/": 2,
+ "%": 2,
+ "+": 1,
+ "-": 1,
+ } # Priority of each operator
+ print_width = len(Infix) if (len(Infix) > 7) else 7
# Print table header for output
- print('Symbol'.center(8), 'Stack'.center(print_width), 'Postfix'.center(print_width), sep = " | ")
- print('-'*(print_width*3+7))
+ print(
+ "Symbol".center(8),
+ "Stack".center(print_width),
+ "Postfix".center(print_width),
+ sep=" | ",
+ )
+ print("-" * (print_width * 3 + 7))
for x in Infix:
- if(x.isalpha() or x.isdigit()): Postfix.append(x) # if x is Alphabet / Digit, add it to Postfix
- elif(x == '('): Stack.append(x) # if x is "(" push to Stack
- elif(x == ')'): # if x is ")" pop stack until "(" is encountered
- while(Stack[-1] != '('):
- Postfix.append( Stack.pop() ) #Pop stack & add the content to Postfix
+ if x.isalpha() or x.isdigit():
+ Postfix.append(x) # if x is Alphabet / Digit, add it to Postfix
+ elif x == "(":
+ Stack.append(x) # if x is "(" push to Stack
+ elif x == ")": # if x is ")" pop stack until "(" is encountered
+ while Stack[-1] != "(":
+ Postfix.append(Stack.pop()) # Pop stack & add the content to Postfix
Stack.pop()
else:
- if(len(Stack)==0): Stack.append(x) #If stack is empty, push x to stack
- else:
- while( len(Stack) > 0 and priority[x] <= priority[Stack[-1]]): # while priority of x is not greater than priority of element in the stack
- Postfix.append( Stack.pop() ) # pop stack & add to Postfix
- Stack.append(x) # push x to stack
+ if len(Stack) == 0:
+ Stack.append(x) # If stack is empty, push x to stack
+ else: # while priority of x is not > priority of element in the stack
+ while len(Stack) > 0 and priority[x] <= priority[Stack[-1]]:
+ Postfix.append(Stack.pop()) # pop stack & add to Postfix
+ Stack.append(x) # push x to stack
+
+ print(
+ x.center(8),
+ ("".join(Stack)).ljust(print_width),
+ ("".join(Postfix)).ljust(print_width),
+ sep=" | ",
+ ) # Output in tabular format
- print(x.center(8), (''.join(Stack)).ljust(print_width), (''.join(Postfix)).ljust(print_width), sep = " | ") # Output in tabular format
+ while len(Stack) > 0: # while stack is not empty
+ Postfix.append(Stack.pop()) # pop stack & add to Postfix
+ print(
+ " ".center(8),
+ ("".join(Stack)).ljust(print_width),
+ ("".join(Postfix)).ljust(print_width),
+ sep=" | ",
+ ) # Output in tabular format
- while(len(Stack) > 0): # while stack is not empty
- Postfix.append( Stack.pop() ) # pop stack & add to Postfix
- print(' '.center(8), (''.join(Stack)).ljust(print_width), (''.join(Postfix)).ljust(print_width), sep = " | ") # Output in tabular format
+ return "".join(Postfix) # return Postfix as str
- return "".join(Postfix) # return Postfix as str
def infix_2_prefix(Infix):
- Infix = list(Infix[::-1]) # reverse the infix equation
-
+ Infix = list(Infix[::-1]) # reverse the infix equation
+
for i in range(len(Infix)):
- if(Infix[i] == '('): Infix[i] = ')' # change "(" to ")"
- elif(Infix[i] == ')'): Infix[i] = '(' # change ")" to "("
-
- return (infix_2_postfix("".join(Infix)))[::-1] # call infix_2_postfix on Infix, return reverse of Postfix
+ if Infix[i] == "(":
+ Infix[i] = ")" # change "(" to ")"
+ elif Infix[i] == ")":
+ Infix[i] = "(" # change ")" to "("
+
+ return (infix_2_postfix("".join(Infix)))[
+ ::-1
+ ] # call infix_2_postfix on Infix, return reverse of Postfix
+
if __name__ == "__main__":
- Infix = input("\nEnter an Infix Equation = ") #Input an Infix equation
- Infix = "".join(Infix.split()) #Remove spaces from the input
+ Infix = input("\nEnter an Infix Equation = ") # Input an Infix equation
+ Infix = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
diff --git a/data_structures/stacks/linked_stack.py b/data_structures/stacks/linked_stack.py
new file mode 100644
index 000000000000..1a2d07f20e7c
--- /dev/null
+++ b/data_structures/stacks/linked_stack.py
@@ -0,0 +1,157 @@
+""" A Stack using a linked list like structure """
+from typing import Any
+
+
+class Node:
+ def __init__(self, data):
+ self.data = data
+ self.next = None
+
+ def __str__(self):
+ return f"{self.data}"
+
+
+class LinkedStack:
+ """
+ Linked List Stack implementing push (to top),
+ pop (from top) and is_empty
+
+ >>> stack = LinkedStack()
+ >>> stack.is_empty()
+ True
+ >>> stack.push(5)
+ >>> stack.push(9)
+ >>> stack.push('python')
+ >>> stack.is_empty()
+ False
+ >>> stack.pop()
+ 'python'
+ >>> stack.push('algorithms')
+ >>> stack.pop()
+ 'algorithms'
+ >>> stack.pop()
+ 9
+ >>> stack.pop()
+ 5
+ >>> stack.is_empty()
+ True
+ >>> stack.pop()
+ Traceback (most recent call last):
+ ...
+ IndexError: pop from empty stack
+ """
+
+ def __init__(self) -> None:
+ self.top = None
+
+ def __iter__(self):
+ node = self.top
+ while node:
+ yield node.data
+ node = node.next
+
+ def __str__(self):
+ """
+ >>> stack = LinkedStack()
+ >>> stack.push("c")
+ >>> stack.push("b")
+ >>> stack.push("a")
+ >>> str(stack)
+ 'a->b->c'
+ """
+ return "->".join([str(item) for item in self])
+
+ def __len__(self):
+ """
+ >>> stack = LinkedStack()
+ >>> len(stack) == 0
+ True
+ >>> stack.push("c")
+ >>> stack.push("b")
+ >>> stack.push("a")
+ >>> len(stack) == 3
+ True
+ """
+ return len(tuple(iter(self)))
+
+ def is_empty(self) -> bool:
+ """
+ >>> stack = LinkedStack()
+ >>> stack.is_empty()
+ True
+ >>> stack.push(1)
+ >>> stack.is_empty()
+ False
+ """
+ return self.top is None
+
+ def push(self, item: Any) -> None:
+ """
+ >>> stack = LinkedStack()
+ >>> stack.push("Python")
+ >>> stack.push("Java")
+ >>> stack.push("C")
+ >>> str(stack)
+ 'C->Java->Python'
+ """
+ node = Node(item)
+ if not self.is_empty():
+ node.next = self.top
+ self.top = node
+
+ def pop(self) -> Any:
+ """
+ >>> stack = LinkedStack()
+ >>> stack.pop()
+ Traceback (most recent call last):
+ ...
+ IndexError: pop from empty stack
+ >>> stack.push("c")
+ >>> stack.push("b")
+ >>> stack.push("a")
+ >>> stack.pop() == 'a'
+ True
+ >>> stack.pop() == 'b'
+ True
+ >>> stack.pop() == 'c'
+ True
+ """
+ if self.is_empty():
+ raise IndexError("pop from empty stack")
+ assert isinstance(self.top, Node)
+ pop_node = self.top
+ self.top = self.top.next
+ return pop_node.data
+
+ def peek(self) -> Any:
+ """
+ >>> stack = LinkedStack()
+ >>> stack.push("Java")
+ >>> stack.push("C")
+ >>> stack.push("Python")
+ >>> stack.peek()
+ 'Python'
+ """
+ if self.is_empty():
+ raise IndexError("peek from empty stack")
+ return self.top.data
+
+ def clear(self) -> None:
+ """
+ >>> stack = LinkedStack()
+ >>> stack.push("Java")
+ >>> stack.push("C")
+ >>> stack.push("Python")
+ >>> str(stack)
+ 'Python->C->Java'
+ >>> stack.clear()
+ >>> len(stack) == 0
+ True
+ """
+ self.top = None
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
diff --git a/data_structures/stacks/next.py b/data_structures/stacks/next.py
deleted file mode 100644
index bca83339592c..000000000000
--- a/data_structures/stacks/next.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from __future__ import print_function
-# Function to print element and NGE pair for all elements of list
-def printNGE(arr):
-
- for i in range(0, len(arr), 1):
-
- next = -1
- for j in range(i+1, len(arr), 1):
- if arr[i] < arr[j]:
- next = arr[j]
- break
-
- print(str(arr[i]) + " -- " + str(next))
-
-# Driver program to test above function
-arr = [11,13,21,3]
-printNGE(arr)
diff --git a/data_structures/stacks/next_greater_element.py b/data_structures/stacks/next_greater_element.py
new file mode 100644
index 000000000000..d8c7ed17317b
--- /dev/null
+++ b/data_structures/stacks/next_greater_element.py
@@ -0,0 +1,98 @@
+arr = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
+expect = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
+
+
+def next_greatest_element_slow(arr: list) -> list:
+ """
+ Get the Next Greatest Element (NGE) for all elements in a list.
+ Maximum element present after the current one which is also greater than the
+ current one.
+ >>> next_greatest_element_slow(arr) == expect
+ True
+ """
+ result = []
+ for i in range(0, len(arr), 1):
+ next = -1
+ for j in range(i + 1, len(arr), 1):
+ if arr[i] < arr[j]:
+ next = arr[j]
+ break
+ result.append(next)
+ return result
+
+
+def next_greatest_element_fast(arr: list) -> list:
+ """
+ Like next_greatest_element_slow() but changes the loops to use
+ enumerate() instead of range(len()) for the outer loop and
+ for in a slice of arr for the inner loop.
+ >>> next_greatest_element_fast(arr) == expect
+ True
+ """
+ result = []
+ for i, outer in enumerate(arr):
+ next = -1
+ for inner in arr[i + 1 :]:
+ if outer < inner:
+ next = inner
+ break
+ result.append(next)
+ return result
+
+
+def next_greatest_element(arr: list) -> list:
+ """
+ Get the Next Greatest Element (NGE) for all elements in a list.
+ Maximum element present after the current one which is also greater than the
+ current one.
+
+ A naive way to solve this is to take two loops and check for the next bigger
+ number but that will make the time complexity as O(n^2). The better way to solve
+ this would be to use a stack to keep track of maximum number giving a linear time
+ solution.
+ >>> next_greatest_element(arr) == expect
+ True
+ """
+ stack = []
+ result = [-1] * len(arr)
+
+ for index in reversed(range(len(arr))):
+ if len(stack):
+ while stack[-1] <= arr[index]:
+ stack.pop()
+ if len(stack) == 0:
+ break
+
+ if len(stack) != 0:
+ result[index] = stack[-1]
+
+ stack.append(arr[index])
+
+ return result
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+ from timeit import timeit
+
+ testmod()
+ print(next_greatest_element_slow(arr))
+ print(next_greatest_element_fast(arr))
+ print(next_greatest_element(arr))
+
+ setup = (
+ "from __main__ import arr, next_greatest_element_slow, "
+ "next_greatest_element_fast, next_greatest_element"
+ )
+ print(
+ "next_greatest_element_slow():",
+ timeit("next_greatest_element_slow(arr)", setup=setup),
+ )
+ print(
+ "next_greatest_element_fast():",
+ timeit("next_greatest_element_fast(arr)", setup=setup),
+ )
+ print(
+ " next_greatest_element():",
+ timeit("next_greatest_element(arr)", setup=setup),
+ )
diff --git a/data_structures/stacks/postfix_evaluation.py b/data_structures/stacks/postfix_evaluation.py
index 1786e71dd383..574acac71c43 100644
--- a/data_structures/stacks/postfix_evaluation.py
+++ b/data_structures/stacks/postfix_evaluation.py
@@ -14,37 +14,55 @@
| pop(5) |
+ | push(5+54) | 59
- Result = 59
+ Result = 59
"""
import operator as op
+
def Solve(Postfix):
Stack = []
- Div = lambda x, y: int(x/y) # integer division operation
- Opr = {'^':op.pow, '*':op.mul, '/':Div, '+':op.add, '-':op.sub} # operators & their respective operation
+ Div = lambda x, y: int(x / y) # noqa: E731 integer division operation
+ Opr = {
+ "^": op.pow,
+ "*": op.mul,
+ "/": Div,
+ "+": op.add,
+ "-": op.sub,
+ } # operators & their respective operation
# print table header
- print('Symbol'.center(8), 'Action'.center(12), 'Stack', sep = " | ")
- print('-'*(30+len(Postfix)))
+ print("Symbol".center(8), "Action".center(12), "Stack", sep=" | ")
+ print("-" * (30 + len(Postfix)))
for x in Postfix:
- if( x.isdigit() ): # if x in digit
- Stack.append(x) # append x to stack
- print(x.rjust(8), ('push('+x+')').ljust(12), ','.join(Stack), sep = " | ") # output in tabular format
+ if x.isdigit(): # if x in digit
+ Stack.append(x) # append x to stack
+ # output in tabular format
+ print(x.rjust(8), ("push(" + x + ")").ljust(12), ",".join(Stack), sep=" | ")
else:
- B = Stack.pop() # pop stack
- print("".rjust(8), ('pop('+B+')').ljust(12), ','.join(Stack), sep = " | ") # output in tabular format
+ B = Stack.pop() # pop stack
+ # output in tabular format
+ print("".rjust(8), ("pop(" + B + ")").ljust(12), ",".join(Stack), sep=" | ")
- A = Stack.pop() # pop stack
- print("".rjust(8), ('pop('+A+')').ljust(12), ','.join(Stack), sep = " | ") # output in tabular format
+ A = Stack.pop() # pop stack
+ # output in tabular format
+ print("".rjust(8), ("pop(" + A + ")").ljust(12), ",".join(Stack), sep=" | ")
- Stack.append( str(Opr[x](int(A), int(B))) ) # evaluate the 2 values poped from stack & push result to stack
- print(x.rjust(8), ('push('+A+x+B+')').ljust(12), ','.join(Stack), sep = " | ") # output in tabular format
+ Stack.append(
+ str(Opr[x](int(A), int(B)))
+ ) # evaluate the 2 values popped from stack & push result to stack
+ # output in tabular format
+ print(
+ x.rjust(8),
+ ("push(" + A + x + B + ")").ljust(12),
+ ",".join(Stack),
+ sep=" | ",
+ )
return int(Stack[0])
if __name__ == "__main__":
- Postfix = input("\n\nEnter a Postfix Equation (space separated) = ").split(' ')
+ Postfix = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", Solve(Postfix))
diff --git a/data_structures/stacks/prefix_evaluation.py b/data_structures/stacks/prefix_evaluation.py
new file mode 100644
index 000000000000..00df2c1e63b0
--- /dev/null
+++ b/data_structures/stacks/prefix_evaluation.py
@@ -0,0 +1,60 @@
+"""
+Python3 program to evaluate a prefix expression.
+"""
+
+calc = {
+ "+": lambda x, y: x + y,
+ "-": lambda x, y: x - y,
+ "*": lambda x, y: x * y,
+ "/": lambda x, y: x / y,
+}
+
+
+def is_operand(c):
+ """
+ Return True if the given char c is an operand, e.g. it is a number
+
+ >>> is_operand("1")
+ True
+ >>> is_operand("+")
+ False
+ """
+ return c.isdigit()
+
+
+def evaluate(expression):
+ """
+ Evaluate a given expression in prefix notation.
+ Asserts that the given expression is valid.
+
+ >>> evaluate("+ 9 * 2 6")
+ 21
+ >>> evaluate("/ * 10 2 + 4 1 ")
+ 4.0
+ """
+ stack = []
+
+ # iterate over the string in reverse order
+ for c in expression.split()[::-1]:
+
+ # push operand to stack
+ if is_operand(c):
+ stack.append(int(c))
+
+ else:
+ # pop values from stack can calculate the result
+ # push the result onto the stack again
+ o1 = stack.pop()
+ o2 = stack.pop()
+ stack.append(calc[c](o1, o2))
+
+ return stack.pop()
+
+
+# Driver code
+if __name__ == "__main__":
+ test_expression = "+ 9 * 2 6"
+ print(evaluate(test_expression))
+
+ test_expression = "/ * 10 2 + 4 1 "
+ print(evaluate(test_expression))
diff --git a/data_structures/stacks/stack.py b/data_structures/stacks/stack.py
index 7f979d927d08..840cde099d38 100644
--- a/data_structures/stacks/stack.py
+++ b/data_structures/stacks/stack.py
@@ -1,25 +1,24 @@
-from __future__ import print_function
-__author__ = 'Omkar Pathak'
+class StackOverflowError(BaseException):
+ pass
-class Stack(object):
- """ A stack is an abstract data type that serves as a collection of
+class Stack:
+ """A stack is an abstract data type that serves as a collection of
elements with two principal operations: push() and pop(). push() adds an
element to the top of the stack, and pop() removes an element from the top
of a stack. The order in which elements come off of a stack are
Last In, First Out (LIFO).
-
https://en.wikipedia.org/wiki/Stack_(abstract_data_type)
"""
- def __init__(self, limit=10):
+ def __init__(self, limit: int = 10):
self.stack = []
self.limit = limit
- def __bool__(self):
+ def __bool__(self) -> bool:
return bool(self.stack)
- def __str__(self):
+ def __str__(self) -> str:
return str(self.stack)
def push(self, data):
@@ -30,40 +29,76 @@ def push(self, data):
def pop(self):
""" Pop an element off of the top of the stack."""
- if self.stack:
- return self.stack.pop()
- else:
- raise IndexError('pop from an empty stack')
+ return self.stack.pop()
def peek(self):
""" Peek at the top-most element of the stack."""
- if self.stack:
- return self.stack[-1]
+ return self.stack[-1]
- def is_empty(self):
+ def is_empty(self) -> bool:
""" Check if a stack is empty."""
return not bool(self.stack)
- def size(self):
+ def is_full(self) -> bool:
+ return self.size() == self.limit
+
+ def size(self) -> int:
""" Return the size of the stack."""
return len(self.stack)
+ def __contains__(self, item) -> bool:
+ """Check if item is in stack"""
+ return item in self.stack
-class StackOverflowError(BaseException):
- pass
+def test_stack() -> None:
+ """
+ >>> test_stack()
+ """
+ stack = Stack(10)
+ assert bool(stack) is False
+ assert stack.is_empty() is True
+ assert stack.is_full() is False
+ assert str(stack) == "[]"
+
+ try:
+ _ = stack.pop()
+ assert False # This should not happen
+ except IndexError:
+ assert True # This should happen
+
+ try:
+ _ = stack.peek()
+ assert False # This should not happen
+ except IndexError:
+ assert True # This should happen
-if __name__ == '__main__':
- stack = Stack()
for i in range(10):
+ assert stack.size() == i
stack.push(i)
- print('Stack demonstration:\n')
- print('Initial stack: ' + str(stack))
- print('pop(): ' + str(stack.pop()))
- print('After pop(), the stack is now: ' + str(stack))
- print('peek(): ' + str(stack.peek()))
+ assert bool(stack) is True
+ assert stack.is_empty() is False
+ assert stack.is_full() is True
+ assert str(stack) == str(list(range(10)))
+ assert stack.pop() == 9
+ assert stack.peek() == 8
+
stack.push(100)
- print('After push(100), the stack is now: ' + str(stack))
- print('is_empty(): ' + str(stack.is_empty()))
- print('size(): ' + str(stack.size()))
+ assert str(stack) == str([0, 1, 2, 3, 4, 5, 6, 7, 8, 100])
+
+ try:
+ stack.push(200)
+ assert False # This should not happen
+ except StackOverflowError:
+ assert True # This should happen
+
+ assert stack.is_empty() is False
+ assert stack.size() == 10
+
+ assert 5 in stack
+ assert 55 not in stack
+
+
+if __name__ == "__main__":
+ test_stack()
diff --git a/data_structures/stacks/stack_using_dll.py b/data_structures/stacks/stack_using_dll.py
new file mode 100644
index 000000000000..75e0cd20640d
--- /dev/null
+++ b/data_structures/stacks/stack_using_dll.py
@@ -0,0 +1,123 @@
+# A complete working Python program to demonstrate all
+# stack operations using a doubly linked list
+
+
+class Node:
+ def __init__(self, data):
+ self.data = data # Assign data
+ self.next = None # Initialize next as null
+ self.prev = None # Initialize prev as null
+
+
+class Stack:
+ """
+ >>> stack = Stack()
+ >>> stack.is_empty()
+ True
+ >>> stack.print_stack()
+ stack elements are:
+ >>> for i in range(4):
+ ... stack.push(i)
+ ...
+ >>> stack.is_empty()
+ False
+ >>> stack.print_stack()
+ stack elements are:
+ 3->2->1->0->
+ >>> stack.top()
+ 3
+ >>> len(stack)
+ 4
+ >>> stack.pop()
+ 3
+ >>> stack.print_stack()
+ stack elements are:
+ 2->1->0->
+ """
+
+ def __init__(self):
+ self.head = None
+
+ def push(self, data):
+ """add a Node to the stack"""
+ if self.head is None:
+ self.head = Node(data)
+ else:
+ new_node = Node(data)
+ self.head.prev = new_node
+ new_node.next = self.head
+ new_node.prev = None
+ self.head = new_node
+
+ def pop(self):
+ """pop the top element off the stack"""
+ if self.head is None:
+ return None
+ else:
+ temp = self.head.data
+ self.head = self.head.next
+ self.head.prev = None
+ return temp
+
+ def top(self):
+ """return the top element of the stack"""
+ return self.head.data
+
+ def __len__(self):
+ temp = self.head
+ count = 0
+ while temp is not None:
+ count += 1
+ temp = temp.next
+ return count
+
+ def is_empty(self):
+ return self.head is None
+
+ def print_stack(self):
+ print("stack elements are:")
+ temp = self.head
+ while temp is not None:
+ print(temp.data, end="->")
+ temp = temp.next
+
+
+# Code execution starts here
+if __name__ == "__main__":
+
+ # Start with the empty stack
+ stack = Stack()
+
+ # Insert 4 at the beginning. So stack becomes 4->None
+ print("Stack operations using Doubly LinkedList")
+ stack.push(4)
+
+ # Insert 5 at the beginning. So stack becomes 4->5->None
+ stack.push(5)
+
+ # Insert 6 at the beginning. So stack becomes 4->5->6->None
+ stack.push(6)
+
+ # Insert 7 at the beginning. So stack becomes 4->5->6->7->None
+ stack.push(7)
+
+ # Print the stack
+ stack.print_stack()
+
+ # Print the top element
+ print("\nTop element is ", stack.top())
+
+ # Print the stack size
+ print("Size of the stack is ", len(stack))
+
+ # pop the top element
+ stack.pop()
+
+ # pop the top element
+ stack.pop()
+
+ # two elements have now been popped off
+ stack.print_stack()
+
+ # Print True if the stack is empty else False
+ print("\nstack is empty:", stack.is_empty())
diff --git a/data_structures/stacks/stock_span_problem.py b/data_structures/stacks/stock_span_problem.py
index 9628864edd10..cc2adfdd6c21 100644
--- a/data_structures/stacks/stock_span_problem.py
+++ b/data_structures/stacks/stock_span_problem.py
@@ -1,52 +1,53 @@
-'''
-The stock span problem is a financial problem where we have a series of n daily
+"""
+The stock span problem is a financial problem where we have a series of n daily
price quotes for a stock and we need to calculate span of stock's price for all n days.
-The span Si of the stock's price on a given day i is defined as the maximum
-number of consecutive days just before the given day, for which the price of the stock
+The span Si of the stock's price on a given day i is defined as the maximum
+number of consecutive days just before the given day, for which the price of the stock
on the current day is less than or equal to its price on the given day.
-'''
-from __future__ import print_function
-def calculateSpan(price, S):
-
- n = len(price)
- # Create a stack and push index of fist element to it
- st = []
- st.append(0)
-
- # Span value of first element is always 1
- S[0] = 1
-
- # Calculate span values for rest of the elements
- for i in range(1, n):
-
- # Pop elements from stack whlie stack is not
- # empty and top of stack is smaller than price[i]
- while( len(st) > 0 and price[st[0]] <= price[i]):
- st.pop()
-
- # If stack becomes empty, then price[i] is greater
- # than all elements on left of it, i.e. price[0],
- # price[1], ..price[i-1]. Else the price[i] is
- # greater than elements after top of stack
- S[i] = i+1 if len(st) <= 0 else (i - st[0])
-
- # Push this element to stack
- st.append(i)
-
-
-# A utility function to print elements of array
-def printArray(arr, n):
- for i in range(0,n):
- print (arr[i],end =" ")
-
-
-# Driver program to test above function
-price = [10, 4, 5, 90, 120, 80]
-S = [0 for i in range(len(price)+1)]
-
-# Fill the span values in array S[]
-calculateSpan(price, S)
-
-# Print the calculated span values
-printArray(S, len(price))
+"""
+
+
+def calculateSpan(price, S):
+
+ n = len(price)
+ # Create a stack and push index of fist element to it
+ st = []
+ st.append(0)
+
+ # Span value of first element is always 1
+ S[0] = 1
+
+ # Calculate span values for rest of the elements
+ for i in range(1, n):
+
+ # Pop elements from stack while stack is not
+ # empty and top of stack is smaller than price[i]
+ while len(st) > 0 and price[st[0]] <= price[i]:
+ st.pop()
+
+ # If stack becomes empty, then price[i] is greater
+ # than all elements on left of it, i.e. price[0],
+ # price[1], ..price[i-1]. Else the price[i] is
+ # greater than elements after top of stack
+ S[i] = i + 1 if len(st) <= 0 else (i - st[0])
+
+ # Push this element to stack
+ st.append(i)
+
+
+# A utility function to print elements of array
+def printArray(arr, n):
+ for i in range(0, n):
+ print(arr[i], end=" ")
+
+
+# Driver program to test above function
+price = [10, 4, 5, 90, 120, 80]
+S = [0 for i in range(len(price) + 1)]
+
+# Fill the span values in array S[]
+calculateSpan(price, S)
+
+# Print the calculated span values
+printArray(S, len(price))
diff --git a/data_structures/trie/__init__.py b/data_structures/trie/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/data_structures/trie/trie.py b/data_structures/trie/trie.py
index b6234c6704c6..6582be24fd0c 100644
--- a/data_structures/trie/trie.py
+++ b/data_structures/trie/trie.py
@@ -1,9 +1,8 @@
"""
A Trie/Prefix Tree is a kind of search tree used to provide quick lookup
of words/patterns in a set of words. A basic Trie however has O(n^2) space complexity
-making it impractical in practice. It however provides O(max(search_string, length of longest word)) lookup
-time making it an optimal approach when space is not an issue.
-
+making it impractical in practice. It however provides O(max(search_string, length of
+longest word)) lookup time making it an optimal approach when space is not an issue.
"""
@@ -12,7 +11,7 @@ def __init__(self):
self.nodes = dict() # Mapping from char to TrieNode
self.is_leaf = False
- def insert_many(self, words: [str]): # noqa: E999 This syntax is Python 3 only
+ def insert_many(self, words: [str]):
"""
Inserts a list of words into the Trie
:param words: list of string words
@@ -21,7 +20,7 @@ def insert_many(self, words: [str]): # noqa: E999 This syntax is Python 3 only
for word in words:
self.insert(word)
- def insert(self, word: str): # noqa: E999 This syntax is Python 3 only
+ def insert(self, word: str):
"""
Inserts a word into the Trie
:param word: word to be inserted
@@ -34,7 +33,7 @@ def insert(self, word: str): # noqa: E999 This syntax is Python 3 only
curr = curr.nodes[char]
curr.is_leaf = True
- def find(self, word: str) -> bool: # noqa: E999 This syntax is Python 3 only
+ def find(self, word: str) -> bool:
"""
Tries to find word in a Trie
:param word: word to look for
@@ -47,8 +46,36 @@ def find(self, word: str) -> bool: # noqa: E999 This syntax is Python 3 only
curr = curr.nodes[char]
return curr.is_leaf
+ def delete(self, word: str):
+ """
+ Deletes a word in a Trie
+ :param word: word to delete
+ :return: None
+ """
-def print_words(node: TrieNode, word: str): # noqa: E999 This syntax is Python 3 only
+ def _delete(curr: TrieNode, word: str, index: int):
+ if index == len(word):
+ # If word does not exist
+ if not curr.is_leaf:
+ return False
+ curr.is_leaf = False
+ return len(curr.nodes) == 0
+ char = word[index]
+ char_node = curr.nodes.get(char)
+ # If char not in current trie node
+ if not char_node:
+ return False
+ # Flag to check if node can be deleted
+ delete_curr = _delete(char_node, word, index + 1)
+ if delete_curr:
+ del curr.nodes[char]
+ return len(curr.nodes) == 0
+ return delete_curr
+
+ _delete(self, word, 0)
+
+
+def print_words(node: TrieNode, word: str):
"""
Prints all the words in a Trie
:param node: root node of Trie
@@ -56,20 +83,45 @@ def print_words(node: TrieNode, word: str): # noqa: E999 This syntax is Python
:return: None
"""
if node.is_leaf:
- print(word, end=' ')
+ print(word, end=" ")
for key, value in node.nodes.items():
print_words(value, word + key)
-def test():
- words = ['banana', 'bananas', 'bandana', 'band', 'apple', 'all', 'beast']
+def test_trie():
+ words = "banana bananas bandana band apple all beast".split()
root = TrieNode()
root.insert_many(words)
- # print_words(root, '')
- assert root.find('banana')
- assert not root.find('bandanas')
- assert not root.find('apps')
- assert root.find('apple')
+ # print_words(root, "")
+ assert all(root.find(word) for word in words)
+ assert root.find("banana")
+ assert not root.find("bandanas")
+ assert not root.find("apps")
+ assert root.find("apple")
+ assert root.find("all")
+ root.delete("all")
+ assert not root.find("all")
+ root.delete("banana")
+ assert not root.find("banana")
+ assert root.find("bananas")
+ return True
+
+
+def print_results(msg: str, passes: bool) -> None:
+ print(str(msg), "works!" if passes else "doesn't work :(")
+
+
+def pytests():
+ assert test_trie()
+
+
+def main():
+ """
+ >>> pytests()
+ """
+ print_results("Testing trie functionality", test_trie())
+
-test()
+if __name__ == "__main__":
+ main()
diff --git a/data_structures/union_find/tests_union_find.py b/data_structures/union_find/tests_union_find.py
deleted file mode 100644
index b0708778ddbd..000000000000
--- a/data_structures/union_find/tests_union_find.py
+++ /dev/null
@@ -1,78 +0,0 @@
-from __future__ import absolute_import
-from .union_find import UnionFind
-import unittest
-
-
-class TestUnionFind(unittest.TestCase):
- def test_init_with_valid_size(self):
- uf = UnionFind(5)
- self.assertEqual(uf.size, 5)
-
- def test_init_with_invalid_size(self):
- with self.assertRaises(ValueError):
- uf = UnionFind(0)
-
- with self.assertRaises(ValueError):
- uf = UnionFind(-5)
-
- def test_union_with_valid_values(self):
- uf = UnionFind(10)
-
- for i in range(11):
- for j in range(11):
- uf.union(i, j)
-
- def test_union_with_invalid_values(self):
- uf = UnionFind(10)
-
- with self.assertRaises(ValueError):
- uf.union(-1, 1)
-
- with self.assertRaises(ValueError):
- uf.union(11, 1)
-
- def test_same_set_with_valid_values(self):
- uf = UnionFind(10)
-
- for i in range(11):
- for j in range(11):
- if i == j:
- self.assertTrue(uf.same_set(i, j))
- else:
- self.assertFalse(uf.same_set(i, j))
-
- uf.union(1, 2)
- self.assertTrue(uf.same_set(1, 2))
-
- uf.union(3, 4)
- self.assertTrue(uf.same_set(3, 4))
-
- self.assertFalse(uf.same_set(1, 3))
- self.assertFalse(uf.same_set(1, 4))
- self.assertFalse(uf.same_set(2, 3))
- self.assertFalse(uf.same_set(2, 4))
-
- uf.union(1, 3)
- self.assertTrue(uf.same_set(1, 3))
- self.assertTrue(uf.same_set(1, 4))
- self.assertTrue(uf.same_set(2, 3))
- self.assertTrue(uf.same_set(2, 4))
-
- uf.union(4, 10)
- self.assertTrue(uf.same_set(1, 10))
- self.assertTrue(uf.same_set(2, 10))
- self.assertTrue(uf.same_set(3, 10))
- self.assertTrue(uf.same_set(4, 10))
-
- def test_same_set_with_invalid_values(self):
- uf = UnionFind(10)
-
- with self.assertRaises(ValueError):
- uf.same_set(-1, 1)
-
- with self.assertRaises(ValueError):
- uf.same_set(11, 0)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/data_structures/union_find/union_find.py b/data_structures/union_find/union_find.py
deleted file mode 100644
index 40eea67ac944..000000000000
--- a/data_structures/union_find/union_find.py
+++ /dev/null
@@ -1,87 +0,0 @@
-class UnionFind():
- """
- https://en.wikipedia.org/wiki/Disjoint-set_data_structure
-
- The union-find is a disjoint-set data structure
-
- You can merge two sets and tell if one set belongs to
- another one.
-
- It's used on the Kruskal Algorithm
- (https://en.wikipedia.org/wiki/Kruskal%27s_algorithm)
-
- The elements are in range [0, size]
- """
- def __init__(self, size):
- if size <= 0:
- raise ValueError("size should be greater than 0")
-
- self.size = size
-
- # The below plus 1 is because we are using elements
- # in range [0, size]. It makes more sense.
-
- # Every set begins with only itself
- self.root = [i for i in range(size+1)]
-
- # This is used for heuristic union by rank
- self.weight = [0 for i in range(size+1)]
-
- def union(self, u, v):
- """
- Union of the sets u and v.
- Complexity: log(n).
- Amortized complexity: < 5 (it's very fast).
- """
-
- self._validate_element_range(u, "u")
- self._validate_element_range(v, "v")
-
- if u == v:
- return
-
- # Using union by rank will guarantee the
- # log(n) complexity
- rootu = self._root(u)
- rootv = self._root(v)
- weight_u = self.weight[rootu]
- weight_v = self.weight[rootv]
- if weight_u >= weight_v:
- self.root[rootv] = rootu
- if weight_u == weight_v:
- self.weight[rootu] += 1
- else:
- self.root[rootu] = rootv
-
- def same_set(self, u, v):
- """
- Return true if the elements u and v belongs to
- the same set
- """
-
- self._validate_element_range(u, "u")
- self._validate_element_range(v, "v")
-
- return self._root(u) == self._root(v)
-
- def _root(self, u):
- """
- Get the element set root.
- This uses the heuristic path compression
- See wikipedia article for more details.
- """
-
- if u != self.root[u]:
- self.root[u] = self._root(self.root[u])
-
- return self.root[u]
-
- def _validate_element_range(self, u, element_name):
- """
- Raises ValueError if element is not in range
- """
- if u < 0 or u > self.size:
- msg = ("element {0} with value {1} "
- "should be in range [0~{2}]")\
- .format(element_name, u, self.size)
- raise ValueError(msg)
diff --git a/digital_image_processing/change_brightness.py b/digital_image_processing/change_brightness.py
new file mode 100644
index 000000000000..97493f1a399e
--- /dev/null
+++ b/digital_image_processing/change_brightness.py
@@ -0,0 +1,26 @@
+from PIL import Image
+
+
+def change_brightness(img: Image, level: float) -> Image:
+ """
+ Change the brightness of a PIL Image to a given level.
+ """
+
+ def brightness(c: int) -> float:
+ """
+ Fundamental Transformation/Operation that'll be performed on
+ every bit.
+ """
+ return 128 + level + (c - 128)
+
+ if not -255.0 <= level <= 255.0:
+ raise ValueError("level must be between -255.0 (black) and 255.0 (white)")
+ return img.point(brightness)
+
+
+if __name__ == "__main__":
+ # Load image
+ with Image.open("image_data/lena.jpg") as img:
+ # Change brightness to 100
+ brigt_img = change_brightness(img, 100)
+ brigt_img.save("image_data/lena_brightness.png", format="png")
diff --git a/digital_image_processing/change_contrast.py b/digital_image_processing/change_contrast.py
new file mode 100644
index 000000000000..6a150400249f
--- /dev/null
+++ b/digital_image_processing/change_contrast.py
@@ -0,0 +1,35 @@
+"""
+Changing contrast with PIL
+
+This algorithm is used in
+https://noivce.pythonanywhere.com/ Python web app.
+
+python/black: True
+flake8 : True
+"""
+
+from PIL import Image
+
+
+def change_contrast(img: Image, level: int) -> Image:
+ """
+ Function to change contrast
+ """
+ factor = (259 * (level + 255)) / (255 * (259 - level))
+
+ def contrast(c: int) -> int:
+ """
+ Fundamental Transformation/Operation that'll be performed on
+ every bit.
+ """
+ return int(128 + factor * (c - 128))
+
+ return img.point(contrast)
+
+
+if __name__ == "__main__":
+ # Load image
+ with Image.open("image_data/lena.jpg") as img:
+ # Change contrast to 170
+ cont_img = change_contrast(img, 170)
+ cont_img.save("image_data/lena_high_contrast.png", format="png")
diff --git a/digital_image_processing/convert_to_negative.py b/digital_image_processing/convert_to_negative.py
new file mode 100644
index 000000000000..7df44138973c
--- /dev/null
+++ b/digital_image_processing/convert_to_negative.py
@@ -0,0 +1,29 @@
+"""
+ Implemented an algorithm using opencv to convert a colored image into its negative
+"""
+from cv2 import destroyAllWindows, imread, imshow, waitKey
+
+
+def convert_to_negative(img):
+ # getting number of pixels in the image
+ pixel_h, pixel_v = img.shape[0], img.shape[1]
+
+ # converting each pixel's color to its negative
+ for i in range(pixel_h):
+ for j in range(pixel_v):
+ img[i][j] = [255, 255, 255] - img[i][j]
+
+ return img
+
+
+if __name__ == "__main__":
+ # read original image
+ img = imread("image_data/lena.jpg", 1)
+
+ # convert to its negative
+ neg = convert_to_negative(img)
+
+ # show result image
+ imshow("negative of original image", img)
+ waitKey(0)
+ destroyAllWindows()
diff --git a/digital_image_processing/dithering/__init__.py b/digital_image_processing/dithering/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/digital_image_processing/dithering/burkes.py b/digital_image_processing/dithering/burkes.py
new file mode 100644
index 000000000000..2bf0bbe03225
--- /dev/null
+++ b/digital_image_processing/dithering/burkes.py
@@ -0,0 +1,87 @@
+"""
+Implementation Burke's algorithm (dithering)
+"""
+import numpy as np
+from cv2 import destroyAllWindows, imread, imshow, waitKey
+
+
+class Burkes:
+ """
+ Burke's algorithm is using for converting grayscale image to black and white version
+ Source: Source: https://en.wikipedia.org/wiki/Dither
+
+ Note:
+ * Best results are given with threshold= ~1/2 * max greyscale value.
+ * This implementation get RGB image and converts it to greyscale in runtime.
+ """
+
+ def __init__(self, input_img, threshold: int):
+ self.min_threshold = 0
+ # max greyscale value for #FFFFFF
+ self.max_threshold = int(self.get_greyscale(255, 255, 255))
+
+ if not self.min_threshold < threshold < self.max_threshold:
+ raise ValueError(f"Factor value should be from 0 to {self.max_threshold}")
+
+ self.input_img = input_img
+ self.threshold = threshold
+ self.width, self.height = self.input_img.shape[1], self.input_img.shape[0]
+
+ # error table size (+4 columns and +1 row) greater than input image because of
+ # lack of if statements
+ self.error_table = [
+ [0 for _ in range(self.height + 4)] for __ in range(self.width + 1)
+ ]
+ self.output_img = np.ones((self.width, self.height, 3), np.uint8) * 255
+
+ @classmethod
+ def get_greyscale(cls, blue: int, green: int, red: int) -> float:
+ """
+ >>> Burkes.get_greyscale(3, 4, 5)
+ 3.753
+ """
+ return 0.114 * blue + 0.587 * green + 0.2126 * red
+
+ def process(self) -> None:
+ for y in range(self.height):
+ for x in range(self.width):
+ greyscale = int(self.get_greyscale(*self.input_img[y][x]))
+ if self.threshold > greyscale + self.error_table[y][x]:
+ self.output_img[y][x] = (0, 0, 0)
+ current_error = greyscale + self.error_table[x][y]
+ else:
+ self.output_img[y][x] = (255, 255, 255)
+ current_error = greyscale + self.error_table[x][y] - 255
+ """
+ Burkes error propagation (`*` is current pixel):
+
+ * 8/32 4/32
+ 2/32 4/32 8/32 4/32 2/32
+ """
+ self.error_table[y][x + 1] += int(8 / 32 * current_error)
+ self.error_table[y][x + 2] += int(4 / 32 * current_error)
+ self.error_table[y + 1][x] += int(8 / 32 * current_error)
+ self.error_table[y + 1][x + 1] += int(4 / 32 * current_error)
+ self.error_table[y + 1][x + 2] += int(2 / 32 * current_error)
+ self.error_table[y + 1][x - 1] += int(4 / 32 * current_error)
+ self.error_table[y + 1][x - 2] += int(2 / 32 * current_error)
+
+
+if __name__ == "__main__":
+ # create Burke's instances with original images in greyscale
+ burkes_instances = [
+ Burkes(imread("image_data/lena.jpg", 1), threshold)
+ for threshold in (1, 126, 130, 140)
+ ]
+
+ for burkes in burkes_instances:
+ burkes.process()
+
+ for burkes in burkes_instances:
+ imshow(
+ f"Original image with dithering threshold: {burkes.threshold}",
+ burkes.output_img,
+ )
+
+ waitKey(0)
+ destroyAllWindows()
diff --git a/digital_image_processing/edge_detection/__init__.py b/digital_image_processing/edge_detection/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/digital_image_processing/edge_detection/canny.py b/digital_image_processing/edge_detection/canny.py
new file mode 100644
index 000000000000..295b4d825c12
--- /dev/null
+++ b/digital_image_processing/edge_detection/canny.py
@@ -0,0 +1,122 @@
+import cv2
+import numpy as np
+
+from digital_image_processing.filters.convolve import img_convolve
+from digital_image_processing.filters.sobel_filter import sobel_filter
+
+PI = 180
+
+
+def gen_gaussian_kernel(k_size, sigma):
+ center = k_size // 2
+ x, y = np.mgrid[0 - center : k_size - center, 0 - center : k_size - center]
+ g = (
+ 1
+ / (2 * np.pi * sigma)
+ * np.exp(-(np.square(x) + np.square(y)) / (2 * np.square(sigma)))
+ )
+ return g
+
+
+def canny(image, threshold_low=15, threshold_high=30, weak=128, strong=255):
+ image_row, image_col = image.shape[0], image.shape[1]
+ # gaussian_filter
+ gaussian_out = img_convolve(image, gen_gaussian_kernel(9, sigma=1.4))
+ # get the gradient and degree by sobel_filter
+ sobel_grad, sobel_theta = sobel_filter(gaussian_out)
+ gradient_direction = np.rad2deg(sobel_theta)
+ gradient_direction += PI
+
+ dst = np.zeros((image_row, image_col))
+
+ """
+ Non-maximum suppression. If the edge strength of the current pixel is the largest
+ compared to the other pixels in the mask with the same direction, the value will be
+ preserved. Otherwise, the value will be suppressed.
+ """
+ for row in range(1, image_row - 1):
+ for col in range(1, image_col - 1):
+ direction = gradient_direction[row, col]
+
+ if (
+ 0 <= direction < 22.5
+ or 15 * PI / 8 <= direction <= 2 * PI
+ or 7 * PI / 8 <= direction <= 9 * PI / 8
+ ):
+ W = sobel_grad[row, col - 1]
+ E = sobel_grad[row, col + 1]
+ if sobel_grad[row, col] >= W and sobel_grad[row, col] >= E:
+ dst[row, col] = sobel_grad[row, col]
+
+ elif (PI / 8 <= direction < 3 * PI / 8) or (
+ 9 * PI / 8 <= direction < 11 * PI / 8
+ ):
+ SW = sobel_grad[row + 1, col - 1]
+ NE = sobel_grad[row - 1, col + 1]
+ if sobel_grad[row, col] >= SW and sobel_grad[row, col] >= NE:
+ dst[row, col] = sobel_grad[row, col]
+
+ elif (3 * PI / 8 <= direction < 5 * PI / 8) or (
+ 11 * PI / 8 <= direction < 13 * PI / 8
+ ):
+ N = sobel_grad[row - 1, col]
+ S = sobel_grad[row + 1, col]
+ if sobel_grad[row, col] >= N and sobel_grad[row, col] >= S:
+ dst[row, col] = sobel_grad[row, col]
+
+ elif (5 * PI / 8 <= direction < 7 * PI / 8) or (
+ 13 * PI / 8 <= direction < 15 * PI / 8
+ ):
+ NW = sobel_grad[row - 1, col - 1]
+ SE = sobel_grad[row + 1, col + 1]
+ if sobel_grad[row, col] >= NW and sobel_grad[row, col] >= SE:
+ dst[row, col] = sobel_grad[row, col]
+
+ """
+ High-Low threshold detection. If an edge pixel’s gradient value is higher
+ than the high threshold value, it is marked as a strong edge pixel. If an
+ edge pixel’s gradient value is smaller than the high threshold value and
+ larger than the low threshold value, it is marked as a weak edge pixel. If
+ an edge pixel's value is smaller than the low threshold value, it will be
+ suppressed.
+ """
+ if dst[row, col] >= threshold_high:
+ dst[row, col] = strong
+ elif dst[row, col] <= threshold_low:
+ dst[row, col] = 0
+ else:
+ dst[row, col] = weak
+
+ """
+ Edge tracking. Usually a weak edge pixel caused from true edges will be connected
+ to a strong edge pixel while noise responses are unconnected. As long as there is
+ one strong edge pixel that is involved in its 8-connected neighborhood, that weak
+ edge point can be identified as one that should be preserved.
+ """
+ for row in range(1, image_row):
+ for col in range(1, image_col):
+ if dst[row, col] == weak:
+ if 255 in (
+ dst[row, col + 1],
+ dst[row, col - 1],
+ dst[row - 1, col],
+ dst[row + 1, col],
+ dst[row - 1, col - 1],
+ dst[row + 1, col - 1],
+ dst[row - 1, col + 1],
+ dst[row + 1, col + 1],
+ ):
+ dst[row, col] = strong
+ else:
+ dst[row, col] = 0
+
+ return dst
+
+
+if __name__ == "__main__":
+ # read original image in gray mode
+ lena = cv2.imread(r"../image_data/lena.jpg", 0)
+ # canny edge detection
+ canny_dst = canny(lena)
+ cv2.imshow("canny", canny_dst)
+ cv2.waitKey(0)
diff --git a/digital_image_processing/filters/bilateral_filter.py b/digital_image_processing/filters/bilateral_filter.py
new file mode 100644
index 000000000000..76ae4dd20345
--- /dev/null
+++ b/digital_image_processing/filters/bilateral_filter.py
@@ -0,0 +1,88 @@
+"""
+Implementation of Bilateral filter
+
+Inputs:
+ img: A 2d image with values in between 0 and 1
+ varS: variance in space dimension.
+ varI: variance in Intensity.
+ N: Kernel size(Must be an odd number)
+Output:
+ img:A 2d zero padded image with values in between 0 and 1
+"""
+import math
+import sys
+
+import cv2
+import numpy as np
+
+
+def vec_gaussian(img: np.ndarray, variance: float) -> np.ndarray:
+ # For applying gaussian function for each element in matrix.
+ sigma = math.sqrt(variance)
+ cons = 1 / (sigma * math.sqrt(2 * math.pi))
+ return cons * np.exp(-((img / sigma) ** 2) * 0.5)
+
+
+def get_slice(img: np.ndarray, x: int, y: int, kernel_size: int) -> np.ndarray:
+ half = kernel_size // 2
+ return img[x - half : x + half + 1, y - half : y + half + 1]
+
+
+def get_gauss_kernel(kernel_size: int, spatial_variance: float) -> np.ndarray:
+ # Creates a gaussian kernel of given dimension.
+ arr = np.zeros((kernel_size, kernel_size))
+ for i in range(0, kernel_size):
+ for j in range(0, kernel_size):
+ arr[i, j] = math.sqrt(
+ abs(i - kernel_size // 2) ** 2 + abs(j - kernel_size // 2) ** 2
+ )
+ return vec_gaussian(arr, spatial_variance)
+
+
+def bilateral_filter(
+ img: np.ndarray,
+ spatial_variance: float,
+ intensity_variance: float,
+ kernel_size: int,
+) -> np.ndarray:
+ img2 = np.zeros(img.shape)
+ gaussKer = get_gauss_kernel(kernel_size, spatial_variance)
+ sizeX, sizeY = img.shape
+ for i in range(kernel_size // 2, sizeX - kernel_size // 2):
+ for j in range(kernel_size // 2, sizeY - kernel_size // 2):
+
+ imgS = get_slice(img, i, j, kernel_size)
+ imgI = imgS - imgS[kernel_size // 2, kernel_size // 2]
+ imgIG = vec_gaussian(imgI, intensity_variance)
+ weights = np.multiply(gaussKer, imgIG)
+ vals = np.multiply(imgS, weights)
+ val = np.sum(vals) / np.sum(weights)
+ img2[i, j] = val
+ return img2
+
+
+def parse_args(args: list) -> tuple:
+ filename = args[1] if args[1:] else "../image_data/lena.jpg"
+ spatial_variance = float(args[2]) if args[2:] else 1.0
+ intensity_variance = float(args[3]) if args[3:] else 1.0
+ if args[4:]:
+ kernel_size = int(args[4])
+ kernel_size = kernel_size + abs(kernel_size % 2 - 1)
+ else:
+ kernel_size = 5
+ return filename, spatial_variance, intensity_variance, kernel_size
+
+
+if __name__ == "__main__":
+ filename, spatial_variance, intensity_variance, kernel_size = parse_args(sys.argv)
+ img = cv2.imread(filename, 0)
+ cv2.imshow("input image", img)
+
+ out = img / 255
+ out = out.astype("float32")
+ out = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
+ out = out * 255
+ out = np.uint8(out)
+ cv2.imshow("output image", out)
+ cv2.waitKey(0)
+ cv2.destroyAllWindows()
diff --git a/digital_image_processing/filters/convolve.py b/digital_image_processing/filters/convolve.py
new file mode 100644
index 000000000000..299682010da6
--- /dev/null
+++ b/digital_image_processing/filters/convolve.py
@@ -0,0 +1,49 @@
+# @Author : lightXu
+# @File : convolve.py
+# @Time : 2019/7/8 0008 下午 16:13
+from cv2 import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
+from numpy import array, dot, pad, ravel, uint8, zeros
+
+
+def im2col(image, block_size):
+ rows, cols = image.shape
+ dst_height = cols - block_size[1] + 1
+ dst_width = rows - block_size[0] + 1
+ image_array = zeros((dst_height * dst_width, block_size[1] * block_size[0]))
+ row = 0
+ for i in range(0, dst_height):
+ for j in range(0, dst_width):
+ window = ravel(image[i : i + block_size[0], j : j + block_size[1]])
+ image_array[row, :] = window
+ row += 1
+
+ return image_array
+
+
+def img_convolve(image, filter_kernel):
+ height, width = image.shape[0], image.shape[1]
+ k_size = filter_kernel.shape[0]
+ pad_size = k_size // 2
+ # Pads image with the edge values of array.
+ image_tmp = pad(image, pad_size, mode="edge")
+
+ # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
+ image_array = im2col(image_tmp, (k_size, k_size))
+
+ # turn the kernel into shape(k*k, 1)
+ kernel_array = ravel(filter_kernel)
+ # reshape and get the dst image
+ dst = dot(image_array, kernel_array).reshape(height, width)
+ return dst
+
+
+if __name__ == "__main__":
+ # read original image
+ img = imread(r"../image_data/lena.jpg")
+ # turn image in gray scale value
+ gray = cvtColor(img, COLOR_BGR2GRAY)
+ # Laplace operator
+ Laplace_kernel = array([[0, 1, 0], [1, -4, 1], [0, 1, 0]])
+ out = img_convolve(gray, Laplace_kernel).astype(uint8)
+ imshow("Laplacian", out)
+ waitKey(0)
diff --git a/digital_image_processing/filters/gaussian_filter.py b/digital_image_processing/filters/gaussian_filter.py
new file mode 100644
index 000000000000..87fa67fb65ea
--- /dev/null
+++ b/digital_image_processing/filters/gaussian_filter.py
@@ -0,0 +1,54 @@
+"""
+Implementation of gaussian filter algorithm
+"""
+from itertools import product
+
+from cv2 import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
+from numpy import dot, exp, mgrid, pi, ravel, square, uint8, zeros
+
+
+def gen_gaussian_kernel(k_size, sigma):
+ center = k_size // 2
+ x, y = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
+ g = 1 / (2 * pi * sigma) * exp(-(square(x) + square(y)) / (2 * square(sigma)))
+ return g
+
+
+def gaussian_filter(image, k_size, sigma):
+ height, width = image.shape[0], image.shape[1]
+ # dst image height and width
+ dst_height = height - k_size + 1
+ dst_width = width - k_size + 1
+
+ # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
+ image_array = zeros((dst_height * dst_width, k_size * k_size))
+ row = 0
+ for i, j in product(range(dst_height), range(dst_width)):
+ window = ravel(image[i : i + k_size, j : j + k_size])
+ image_array[row, :] = window
+ row += 1
+
+ # turn the kernel into shape(k*k, 1)
+ gaussian_kernel = gen_gaussian_kernel(k_size, sigma)
+ filter_array = ravel(gaussian_kernel)
+
+ # reshape and get the dst image
+ dst = dot(image_array, filter_array).reshape(dst_height, dst_width).astype(uint8)
+
+ return dst
+
+
+if __name__ == "__main__":
+ # read original image
+ img = imread(r"../image_data/lena.jpg")
+ # turn image in gray scale value
+ gray = cvtColor(img, COLOR_BGR2GRAY)
+
+ # get values with two different mask size
+ gaussian3x3 = gaussian_filter(gray, 3, sigma=1)
+ gaussian5x5 = gaussian_filter(gray, 5, sigma=0.8)
+
+ # show result images
+ imshow("gaussian filter with 3x3 mask", gaussian3x3)
+ imshow("gaussian filter with 5x5 mask", gaussian5x5)
+ waitKey()
diff --git a/digital_image_processing/filters/median_filter.py b/digital_image_processing/filters/median_filter.py
index eea4295632a1..174018569d62 100644
--- a/digital_image_processing/filters/median_filter.py
+++ b/digital_image_processing/filters/median_filter.py
@@ -1,9 +1,8 @@
"""
Implementation of median filter algorithm
"""
-
-from cv2 import imread, cvtColor, COLOR_BGR2GRAY, imshow, waitKey
-from numpy import zeros_like, ravel, sort, multiply, divide, int8
+from cv2 import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
+from numpy import divide, int8, multiply, ravel, sort, zeros_like
def median_filter(gray_img, mask=3):
@@ -15,20 +14,20 @@ def median_filter(gray_img, mask=3):
# set image borders
bd = int(mask / 2)
# copy image size
- median_img = zeros_like(gray)
+ median_img = zeros_like(gray_img)
for i in range(bd, gray_img.shape[0] - bd):
for j in range(bd, gray_img.shape[1] - bd):
# get mask according with mask
- kernel = ravel(gray_img[i - bd:i + bd + 1, j - bd:j + bd + 1])
+ kernel = ravel(gray_img[i - bd : i + bd + 1, j - bd : j + bd + 1])
# calculate mask median
median = sort(kernel)[int8(divide((multiply(mask, mask)), 2) + 1)]
median_img[i, j] = median
return median_img
-if __name__ == '__main__':
+if __name__ == "__main__":
# read original image
- img = imread('lena.jpg')
+ img = imread("../image_data/lena.jpg")
# turn image in gray scale value
gray = cvtColor(img, COLOR_BGR2GRAY)
@@ -37,6 +36,6 @@ def median_filter(gray_img, mask=3):
median5x5 = median_filter(gray, 5)
# show result images
- imshow('median filter with 3x3 mask', median3x3)
- imshow('median filter with 5x5 mask', median5x5)
+ imshow("median filter with 3x3 mask", median3x3)
+ imshow("median filter with 5x5 mask", median5x5)
waitKey(0)
diff --git a/digital_image_processing/filters/sobel_filter.py b/digital_image_processing/filters/sobel_filter.py
new file mode 100644
index 000000000000..33284a32f424
--- /dev/null
+++ b/digital_image_processing/filters/sobel_filter.py
@@ -0,0 +1,39 @@
+# @Author : lightXu
+# @File : sobel_filter.py
+# @Time : 2019/7/8 0008 下午 16:26
+import numpy as np
+from cv2 import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
+
+from digital_image_processing.filters.convolve import img_convolve
+
+
+def sobel_filter(image):
+ kernel_x = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
+ kernel_y = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
+
+ dst_x = np.abs(img_convolve(image, kernel_x))
+ dst_y = np.abs(img_convolve(image, kernel_y))
+ # modify the pix within [0, 255]
+ dst_x = dst_x * 255 / np.max(dst_x)
+ dst_y = dst_y * 255 / np.max(dst_y)
+
+ dst_xy = np.sqrt((np.square(dst_x)) + (np.square(dst_y)))
+ dst_xy = dst_xy * 255 / np.max(dst_xy)
+ dst = dst_xy.astype(np.uint8)
+
+ theta = np.arctan2(dst_y, dst_x)
+ return dst, theta
+
+
+if __name__ == "__main__":
+ # read original image
+ img = imread("../image_data/lena.jpg")
+ # turn image in gray scale value
+ gray = cvtColor(img, COLOR_BGR2GRAY)
+
+ sobel_grad, sobel_theta = sobel_filter(gray)
+
+ # show result images
+ imshow("sobel filter", sobel_grad)
+ imshow("sobel theta", sobel_theta)
+ waitKey(0)
diff --git a/digital_image_processing/histogram_equalization/__init__.py b/digital_image_processing/histogram_equalization/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/digital_image_processing/histogram_equalization/histogram_stretch.py b/digital_image_processing/histogram_equalization/histogram_stretch.py
new file mode 100644
index 000000000000..0288a2c1fcf5
--- /dev/null
+++ b/digital_image_processing/histogram_equalization/histogram_stretch.py
@@ -0,0 +1,63 @@
+"""
+Created on Fri Sep 28 15:22:29 2018
+
+@author: Binish125
+"""
+import copy
+import os
+
+import cv2
+import numpy as np
+from matplotlib import pyplot as plt
+
+
+class contrastStretch:
+ def __init__(self):
+ self.img = ""
+ self.original_image = ""
+ self.last_list = []
+ self.rem = 0
+ self.L = 256
+ self.sk = 0
+ self.k = 0
+ self.number_of_rows = 0
+ self.number_of_cols = 0
+
+ def stretch(self, input_image):
+ self.img = cv2.imread(input_image, 0)
+ self.original_image = copy.deepcopy(self.img)
+ x, _, _ = plt.hist(self.img.ravel(), 256, [0, 256], label="x")
+ self.k = np.sum(x)
+ for i in range(len(x)):
+ prk = x[i] / self.k
+ self.sk += prk
+ last = (self.L - 1) * self.sk
+ if self.rem != 0:
+ self.rem = int(last % last)
+ last = int(last + 1 if self.rem >= 0.5 else last)
+ self.last_list.append(last)
+ self.number_of_rows = int(np.ma.count(self.img) / self.img[1].size)
+ self.number_of_cols = self.img[1].size
+ for i in range(self.number_of_cols):
+ for j in range(self.number_of_rows):
+ num = self.img[j][i]
+ if num != self.last_list[num]:
+ self.img[j][i] = self.last_list[num]
+ cv2.imwrite("output_data/output.jpg", self.img)
+
+ def plotHistogram(self):
+ plt.hist(self.img.ravel(), 256, [0, 256])
+
+ def showImage(self):
+ cv2.imshow("Output-Image", self.img)
+ cv2.imshow("Input-Image", self.original_image)
+ cv2.waitKey(5000)
+ cv2.destroyAllWindows()
+
+
+if __name__ == "__main__":
+ file_path = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
+ stretcher = contrastStretch()
+ stretcher.stretch(file_path)
+ stretcher.plotHistogram()
+ stretcher.showImage()
diff --git a/digital_image_processing/histogram_equalization/image_data/__init__.py b/digital_image_processing/histogram_equalization/image_data/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/digital_image_processing/histogram_equalization/image_data/input.jpg b/digital_image_processing/histogram_equalization/image_data/input.jpg
new file mode 100644
index 000000000000..483da6fd97f4
Binary files /dev/null and b/digital_image_processing/histogram_equalization/image_data/input.jpg differ
diff --git a/digital_image_processing/histogram_equalization/output_data/__init__.py b/digital_image_processing/histogram_equalization/output_data/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/digital_image_processing/histogram_equalization/output_data/output.jpg b/digital_image_processing/histogram_equalization/output_data/output.jpg
new file mode 100644
index 000000000000..48c70d8ae17d
Binary files /dev/null and b/digital_image_processing/histogram_equalization/output_data/output.jpg differ
diff --git a/digital_image_processing/image_data/__init__.py b/digital_image_processing/image_data/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/digital_image_processing/image_data/lena.jpg b/digital_image_processing/image_data/lena.jpg
new file mode 100644
index 000000000000..15c4d9764eff
Binary files /dev/null and b/digital_image_processing/image_data/lena.jpg differ
diff --git a/digital_image_processing/image_data/lena_small.jpg b/digital_image_processing/image_data/lena_small.jpg
new file mode 100644
index 000000000000..b85144e9f65c
Binary files /dev/null and b/digital_image_processing/image_data/lena_small.jpg differ
diff --git a/digital_image_processing/index_calculation.py b/digital_image_processing/index_calculation.py
new file mode 100644
index 000000000000..4350b8603390
--- /dev/null
+++ b/digital_image_processing/index_calculation.py
@@ -0,0 +1,576 @@
+# Author: João Gustavo A. Amorim
+# Author email: joaogustavoamorim@gmail.com
+# Coding date: jan 2019
+# python/black: True
+
+# Imports
+import numpy as np
+
+
+# Class implemented to calculus the index
+class IndexCalculation:
+ """
+ # Class Summary
+ This algorithm consists in calculating vegetation indices, these
+ indices can be used for precision agriculture for example (or remote
+ sensing). There are functions to define the data and to calculate the
+ implemented indices.
+
+ # Vegetation index
+ https://en.wikipedia.org/wiki/Vegetation_Index
+ A Vegetation Index (VI) is a spectral transformation of two or more bands
+ designed to enhance the contribution of vegetation properties and allow
+ reliable spatial and temporal inter-comparisons of terrestrial
+ photosynthetic activity and canopy structural variations
+
+ # Information about channels (Wavelength range for each)
+ * nir - near-infrared
+ https://www.malvernpanalytical.com/br/products/technology/near-infrared-spectroscopy
+ Wavelength Range 700 nm to 2500 nm
+ * Red Edge
+ https://en.wikipedia.org/wiki/Red_edge
+ Wavelength Range 680 nm to 730 nm
+ * red
+ https://en.wikipedia.org/wiki/Color
+ Wavelength Range 635 nm to 700 nm
+ * blue
+ https://en.wikipedia.org/wiki/Color
+ Wavelength Range 450 nm to 490 nm
+ * green
+ https://en.wikipedia.org/wiki/Color
+ Wavelength Range 520 nm to 560 nm
+
+
+ # Implemented index list
+ #"abbreviationOfIndexName" -- list of channels used
+
+ #"ARVI2" -- red, nir
+ #"CCCI" -- red, redEdge, nir
+ #"CVI" -- red, green, nir
+ #"GLI" -- red, green, blue
+ #"NDVI" -- red, nir
+ #"BNDVI" -- blue, nir
+ #"redEdgeNDVI" -- red, redEdge
+ #"GNDVI" -- green, nir
+ #"GBNDVI" -- green, blue, nir
+ #"GRNDVI" -- red, green, nir
+ #"RBNDVI" -- red, blue, nir
+ #"PNDVI" -- red, green, blue, nir
+ #"ATSAVI" -- red, nir
+ #"BWDRVI" -- blue, nir
+ #"CIgreen" -- green, nir
+ #"CIrededge" -- redEdge, nir
+ #"CI" -- red, blue
+ #"CTVI" -- red, nir
+ #"GDVI" -- green, nir
+ #"EVI" -- red, blue, nir
+ #"GEMI" -- red, nir
+ #"GOSAVI" -- green, nir
+ #"GSAVI" -- green, nir
+ #"Hue" -- red, green, blue
+ #"IVI" -- red, nir
+ #"IPVI" -- red, nir
+ #"I" -- red, green, blue
+ #"RVI" -- red, nir
+ #"MRVI" -- red, nir
+ #"MSAVI" -- red, nir
+ #"NormG" -- red, green, nir
+ #"NormNIR" -- red, green, nir
+ #"NormR" -- red, green, nir
+ #"NGRDI" -- red, green
+ #"RI" -- red, green
+ #"S" -- red, green, blue
+ #"IF" -- red, green, blue
+ #"DVI" -- red, nir
+ #"TVI" -- red, nir
+ #"NDRE" -- redEdge, nir
+
+ #list of all index implemented
+ #allIndex = ["ARVI2", "CCCI", "CVI", "GLI", "NDVI", "BNDVI", "redEdgeNDVI",
+ "GNDVI", "GBNDVI", "GRNDVI", "RBNDVI", "PNDVI", "ATSAVI",
+ "BWDRVI", "CIgreen", "CIrededge", "CI", "CTVI", "GDVI", "EVI",
+ "GEMI", "GOSAVI", "GSAVI", "Hue", "IVI", "IPVI", "I", "RVI",
+ "MRVI", "MSAVI", "NormG", "NormNIR", "NormR", "NGRDI", "RI",
+ "S", "IF", "DVI", "TVI", "NDRE"]
+
+ #list of index with not blue channel
+ #notBlueIndex = ["ARVI2", "CCCI", "CVI", "NDVI", "redEdgeNDVI", "GNDVI",
+ "GRNDVI", "ATSAVI", "CIgreen", "CIrededge", "CTVI", "GDVI",
+ "GEMI", "GOSAVI", "GSAVI", "IVI", "IPVI", "RVI", "MRVI",
+ "MSAVI", "NormG", "NormNIR", "NormR", "NGRDI", "RI", "DVI",
+ "TVI", "NDRE"]
+
+ #list of index just with RGB channels
+ #RGBIndex = ["GLI", "CI", "Hue", "I", "NGRDI", "RI", "S", "IF"]
+ """
+
+ def __init__(self, red=None, green=None, blue=None, redEdge=None, nir=None):
+ # print("Numpy version: " + np.__version__)
+ self.setMatrices(red=red, green=green, blue=blue, redEdge=redEdge, nir=nir)
+
+ def setMatrices(self, red=None, green=None, blue=None, redEdge=None, nir=None):
+ if red is not None:
+ self.red = red
+ if green is not None:
+ self.green = green
+ if blue is not None:
+ self.blue = blue
+ if redEdge is not None:
+ self.redEdge = redEdge
+ if nir is not None:
+ self.nir = nir
+ return True
+
+ def calculation(
+ self, index="", red=None, green=None, blue=None, redEdge=None, nir=None
+ ):
+ """
+ performs the calculation of the index with the values instantiated in the class
+ :str index: abbreviation of index name to perform
+ """
+ self.setMatrices(red=red, green=green, blue=blue, redEdge=redEdge, nir=nir)
+ funcs = {
+ "ARVI2": self.ARVI2,
+ "CCCI": self.CCCI,
+ "CVI": self.CVI,
+ "GLI": self.GLI,
+ "NDVI": self.NDVI,
+ "BNDVI": self.BNDVI,
+ "redEdgeNDVI": self.redEdgeNDVI,
+ "GNDVI": self.GNDVI,
+ "GBNDVI": self.GBNDVI,
+ "GRNDVI": self.GRNDVI,
+ "RBNDVI": self.RBNDVI,
+ "PNDVI": self.PNDVI,
+ "ATSAVI": self.ATSAVI,
+ "BWDRVI": self.BWDRVI,
+ "CIgreen": self.CIgreen,
+ "CIrededge": self.CIrededge,
+ "CI": self.CI,
+ "CTVI": self.CTVI,
+ "GDVI": self.GDVI,
+ "EVI": self.EVI,
+ "GEMI": self.GEMI,
+ "GOSAVI": self.GOSAVI,
+ "GSAVI": self.GSAVI,
+ "Hue": self.Hue,
+ "IVI": self.IVI,
+ "IPVI": self.IPVI,
+ "I": self.I,
+ "RVI": self.RVI,
+ "MRVI": self.MRVI,
+ "MSAVI": self.MSAVI,
+ "NormG": self.NormG,
+ "NormNIR": self.NormNIR,
+ "NormR": self.NormR,
+ "NGRDI": self.NGRDI,
+ "RI": self.RI,
+ "S": self.S,
+ "IF": self.IF,
+ "DVI": self.DVI,
+ "TVI": self.TVI,
+ "NDRE": self.NDRE,
+ }
+
+ try:
+ return funcs[index]()
+ except KeyError:
+ print("Index not in the list!")
+ return False
+
+ def ARVI2(self):
+ """
+ Atmospherically Resistant Vegetation Index 2
+ https://www.indexdatabase.de/db/i-single.php?id=396
+ :return: index
+ −0.18+1.17*(self.nir−self.red)/(self.nir+self.red)
+ """
+ return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
+
+ def CCCI(self):
+ """
+ Canopy Chlorophyll Content Index
+ https://www.indexdatabase.de/db/i-single.php?id=224
+ :return: index
+ """
+ return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
+ (self.nir - self.red) / (self.nir + self.red)
+ )
+
+ def CVI(self):
+ """
+ Chlorophyll vegetation index
+ https://www.indexdatabase.de/db/i-single.php?id=391
+ :return: index
+ """
+ return self.nir * (self.red / (self.green ** 2))
+
+ def GLI(self):
+ """
+ self.green leaf index
+ https://www.indexdatabase.de/db/i-single.php?id=375
+ :return: index
+ """
+ return (2 * self.green - self.red - self.blue) / (
+ 2 * self.green + self.red + self.blue
+ )
+
+ def NDVI(self):
+ """
+ Normalized Difference self.nir/self.red Normalized Difference Vegetation
+ Index, Calibrated NDVI - CDVI
+ https://www.indexdatabase.de/db/i-single.php?id=58
+ :return: index
+ """
+ return (self.nir - self.red) / (self.nir + self.red)
+
+ def BNDVI(self):
+ """
+ Normalized Difference self.nir/self.blue self.blue-normalized difference
+ vegetation index
+ https://www.indexdatabase.de/db/i-single.php?id=135
+ :return: index
+ """
+ return (self.nir - self.blue) / (self.nir + self.blue)
+
+ def redEdgeNDVI(self):
+ """
+ Normalized Difference self.rededge/self.red
+ https://www.indexdatabase.de/db/i-single.php?id=235
+ :return: index
+ """
+ return (self.redEdge - self.red) / (self.redEdge + self.red)
+
+ def GNDVI(self):
+ """
+ Normalized Difference self.nir/self.green self.green NDVI
+ https://www.indexdatabase.de/db/i-single.php?id=401
+ :return: index
+ """
+ return (self.nir - self.green) / (self.nir + self.green)
+
+ def GBNDVI(self):
+ """
+ self.green-self.blue NDVI
+ https://www.indexdatabase.de/db/i-single.php?id=186
+ :return: index
+ """
+ return (self.nir - (self.green + self.blue)) / (
+ self.nir + (self.green + self.blue)
+ )
+
+ def GRNDVI(self):
+ """
+ self.green-self.red NDVI
+ https://www.indexdatabase.de/db/i-single.php?id=185
+ :return: index
+ """
+ return (self.nir - (self.green + self.red)) / (
+ self.nir + (self.green + self.red)
+ )
+
+ def RBNDVI(self):
+ """
+ self.red-self.blue NDVI
+ https://www.indexdatabase.de/db/i-single.php?id=187
+ :return: index
+ """
+ return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
+
+ def PNDVI(self):
+ """
+ Pan NDVI
+ https://www.indexdatabase.de/db/i-single.php?id=188
+ :return: index
+ """
+ return (self.nir - (self.green + self.red + self.blue)) / (
+ self.nir + (self.green + self.red + self.blue)
+ )
+
+ def ATSAVI(self, X=0.08, a=1.22, b=0.03):
+ """
+ Adjusted transformed soil-adjusted VI
+ https://www.indexdatabase.de/db/i-single.php?id=209
+ :return: index
+ """
+ return a * (
+ (self.nir - a * self.red - b)
+ / (a * self.nir + self.red - a * b + X * (1 + a ** 2))
+ )
+
+ def BWDRVI(self):
+ """
+ self.blue-wide dynamic range vegetation index
+ https://www.indexdatabase.de/db/i-single.php?id=136
+ :return: index
+ """
+ return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
+
+ def CIgreen(self):
+ """
+ Chlorophyll Index self.green
+ https://www.indexdatabase.de/db/i-single.php?id=128
+ :return: index
+ """
+ return (self.nir / self.green) - 1
+
+ def CIrededge(self):
+ """
+ Chlorophyll Index self.redEdge
+ https://www.indexdatabase.de/db/i-single.php?id=131
+ :return: index
+ """
+ return (self.nir / self.redEdge) - 1
+
+ def CI(self):
+ """
+ Coloration Index
+ https://www.indexdatabase.de/db/i-single.php?id=11
+ :return: index
+ """
+ return (self.red - self.blue) / self.red
+
+ def CTVI(self):
+ """
+ Corrected Transformed Vegetation Index
+ https://www.indexdatabase.de/db/i-single.php?id=244
+ :return: index
+ """
+ ndvi = self.NDVI()
+ return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
+
+ def GDVI(self):
+ """
+ Difference self.nir/self.green self.green Difference Vegetation Index
+ https://www.indexdatabase.de/db/i-single.php?id=27
+ :return: index
+ """
+ return self.nir - self.green
+
+ def EVI(self):
+ """
+ Enhanced Vegetation Index
+ https://www.indexdatabase.de/db/i-single.php?id=16
+ :return: index
+ """
+ return 2.5 * (
+ (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
+ )
+
+ def GEMI(self):
+ """
+ Global Environment Monitoring Index
+ https://www.indexdatabase.de/db/i-single.php?id=25
+ :return: index
+ """
+ n = (2 * (self.nir ** 2 - self.red ** 2) + 1.5 * self.nir + 0.5 * self.red) / (
+ self.nir + self.red + 0.5
+ )
+ return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
+
+ def GOSAVI(self, Y=0.16):
+ """
+ self.green Optimized Soil Adjusted Vegetation Index
+ https://www.indexdatabase.de/db/i-single.php?id=29
+ mit Y = 0,16
+ :return: index
+ """
+ return (self.nir - self.green) / (self.nir + self.green + Y)
+
+ def GSAVI(self, L=0.5):
+ """
+ self.green Soil Adjusted Vegetation Index
+ https://www.indexdatabase.de/db/i-single.php?id=31
+ mit L = 0,5
+ :return: index
+ """
+ return ((self.nir - self.green) / (self.nir + self.green + L)) * (1 + L)
+
+ def Hue(self):
+ """
+ Hue
+ https://www.indexdatabase.de/db/i-single.php?id=34
+ :return: index
+ """
+ return np.arctan(
+ ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue)
+ )
+
+ def IVI(self, a=None, b=None):
+ """
+ Ideal vegetation index
+ https://www.indexdatabase.de/db/i-single.php?id=276
+ b=intercept of vegetation line
+ a=soil line slope
+ :return: index
+ """
+ return (self.nir - b) / (a * self.red)
+
+ def IPVI(self):
+ """
+ Infraself.red percentage vegetation index
+ https://www.indexdatabase.de/db/i-single.php?id=35
+ :return: index
+ """
+ return (self.nir / ((self.nir + self.red) / 2)) * (self.NDVI() + 1)
+
+ def I(self): # noqa: E741,E743
+ """
+ Intensity
+ https://www.indexdatabase.de/db/i-single.php?id=36
+ :return: index
+ """
+ return (self.red + self.green + self.blue) / 30.5
+
+ def RVI(self):
+ """
+ Ratio-Vegetation-Index
+ http://www.seos-project.eu/modules/remotesensing/remotesensing-c03-s01-p01.html
+ :return: index
+ """
+ return self.nir / self.red
+
+ def MRVI(self):
+ """
+ Modified Normalized Difference Vegetation Index RVI
+ https://www.indexdatabase.de/db/i-single.php?id=275
+ :return: index
+ """
+ return (self.RVI() - 1) / (self.RVI() + 1)
+
+ def MSAVI(self):
+ """
+ Modified Soil Adjusted Vegetation Index
+ https://www.indexdatabase.de/db/i-single.php?id=44
+ :return: index
+ """
+ return (
+ (2 * self.nir + 1)
+ - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
+ ) / 2
+
+ def NormG(self):
+ """
+ Norm G
+ https://www.indexdatabase.de/db/i-single.php?id=50
+ :return: index
+ """
+ return self.green / (self.nir + self.red + self.green)
+
+ def NormNIR(self):
+ """
+ Norm self.nir
+ https://www.indexdatabase.de/db/i-single.php?id=51
+ :return: index
+ """
+ return self.nir / (self.nir + self.red + self.green)
+
+ def NormR(self):
+ """
+ Norm R
+ https://www.indexdatabase.de/db/i-single.php?id=52
+ :return: index
+ """
+ return self.red / (self.nir + self.red + self.green)
+
+ def NGRDI(self):
+ """
+ Normalized Difference self.green/self.red Normalized self.green self.red
+ difference index, Visible Atmospherically Resistant Indices self.green
+ (VIself.green)
+ https://www.indexdatabase.de/db/i-single.php?id=390
+ :return: index
+ """
+ return (self.green - self.red) / (self.green + self.red)
+
+ def RI(self):
+ """
+ Normalized Difference self.red/self.green self.redness Index
+ https://www.indexdatabase.de/db/i-single.php?id=74
+ :return: index
+ """
+ return (self.red - self.green) / (self.red + self.green)
+
+ def S(self):
+ """
+ Saturation
+ https://www.indexdatabase.de/db/i-single.php?id=77
+ :return: index
+ """
+ max = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
+ min = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
+ return (max - min) / max
+
+ def IF(self):
+ """
+ Shape Index
+ https://www.indexdatabase.de/db/i-single.php?id=79
+ :return: index
+ """
+ return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
+
+ def DVI(self):
+ """
+ Simple Ratio self.nir/self.red Difference Vegetation Index, Vegetation Index
+ Number (VIN)
+ https://www.indexdatabase.de/db/i-single.php?id=12
+ :return: index
+ """
+ return self.nir / self.red
+
+ def TVI(self):
+ """
+ Transformed Vegetation Index
+ https://www.indexdatabase.de/db/i-single.php?id=98
+ :return: index
+ """
+ return (self.NDVI() + 0.5) ** (1 / 2)
+
+ def NDRE(self):
+ return (self.nir - self.redEdge) / (self.nir + self.redEdge)
+
+
+"""
+# genering a random matrices to test this class
+red = np.ones((1000,1000, 1),dtype="float64") * 46787
+green = np.ones((1000,1000, 1),dtype="float64") * 23487
+blue = np.ones((1000,1000, 1),dtype="float64") * 14578
+redEdge = np.ones((1000,1000, 1),dtype="float64") * 51045
+nir = np.ones((1000,1000, 1),dtype="float64") * 52200
+
+# Examples of how to use the class
+
+# instantiating the class
+cl = IndexCalculation()
+
+# instantiating the class with the values
+#cl = indexCalculation(red=red, green=green, blue=blue, redEdge=redEdge, nir=nir)
+
+# how set the values after instantiate the class cl, (for update the data or when don't
+# instantiating the class with the values)
+cl.setMatrices(red=red, green=green, blue=blue, redEdge=redEdge, nir=nir)
+
+# calculating the indices for the instantiated values in the class
+ # Note: the CCCI index can be changed to any index implemented in the class.
+indexValue_form1 = cl.calculation("CCCI", red=red, green=green, blue=blue,
+ redEdge=redEdge, nir=nir).astype(np.float64)
+indexValue_form2 = cl.CCCI()
+
+# calculating the index with the values directly -- you can set just the values
+# preferred note: the *calculation* function performs the function *setMatrices*
+indexValue_form3 = cl.calculation("CCCI", red=red, green=green, blue=blue,
+ redEdge=redEdge, nir=nir).astype(np.float64)
+
+print("Form 1: "+np.array2string(indexValue_form1, precision=20, separator=', ',
+ floatmode='maxprec_equal'))
+print("Form 2: "+np.array2string(indexValue_form2, precision=20, separator=', ',
+ floatmode='maxprec_equal'))
+print("Form 3: "+np.array2string(indexValue_form3, precision=20, separator=', ',
+ floatmode='maxprec_equal'))
+
+# A list of examples results for different type of data at NDVI
+# float16 -> 0.31567383 #NDVI (red = 50, nir = 100)
+# float32 -> 0.31578946 #NDVI (red = 50, nir = 100)
+# float64 -> 0.3157894736842105 #NDVI (red = 50, nir = 100)
+# longdouble -> 0.3157894736842105 #NDVI (red = 50, nir = 100)
+"""
diff --git a/digital_image_processing/resize/__init__.py b/digital_image_processing/resize/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/digital_image_processing/resize/resize.py b/digital_image_processing/resize/resize.py
new file mode 100644
index 000000000000..4836521f9f58
--- /dev/null
+++ b/digital_image_processing/resize/resize.py
@@ -0,0 +1,71 @@
+""" Multiple image resizing techniques """
+import numpy as np
+from cv2 import destroyAllWindows, imread, imshow, waitKey
+
+
+class NearestNeighbour:
+ """
+ Simplest and fastest version of image resizing.
+ Source: https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation
+ """
+
+ def __init__(self, img, dst_width: int, dst_height: int):
+ if dst_width < 0 or dst_height < 0:
+ raise ValueError("Destination width/height should be > 0")
+
+ self.img = img
+ self.src_w = img.shape[1]
+ self.src_h = img.shape[0]
+ self.dst_w = dst_width
+ self.dst_h = dst_height
+
+ self.ratio_x = self.src_w / self.dst_w
+ self.ratio_y = self.src_h / self.dst_h
+
+ self.output = self.output_img = (
+ np.ones((self.dst_h, self.dst_w, 3), np.uint8) * 255
+ )
+
+ def process(self):
+ for i in range(self.dst_h):
+ for j in range(self.dst_w):
+ self.output[i][j] = self.img[self.get_y(i)][self.get_x(j)]
+
+ def get_x(self, x: int) -> int:
+ """
+ Get parent X coordinate for destination X
+ :param x: Destination X coordinate
+ :return: Parent X coordinate based on `x ratio`
+ >>> nn = NearestNeighbour(imread("digital_image_processing/image_data/lena.jpg",
+ ... 1), 100, 100)
+ >>> nn.ratio_x = 0.5
+ >>> nn.get_x(4)
+ 2
+ """
+ return int(self.ratio_x * x)
+
+ def get_y(self, y: int) -> int:
+ """
+ Get parent Y coordinate for destination Y
+ :param y: Destination X coordinate
+ :return: Parent X coordinate based on `y ratio`
+ >>> nn = NearestNeighbour(imread("digital_image_processing/image_data/lena.jpg",
+ ... 1), 100, 100)
+ >>> nn.ratio_y = 0.5
+ >>> nn.get_y(4)
+ 2
+ """
+ return int(self.ratio_y * y)
+
+
+if __name__ == "__main__":
+ dst_w, dst_h = 800, 600
+ im = imread("image_data/lena.jpg", 1)
+ n = NearestNeighbour(im, dst_w, dst_h)
+ n.process()
+
+ imshow(
+ f"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output
+ )
+ waitKey(0)
+ destroyAllWindows()
diff --git a/digital_image_processing/rotation/__init__.py b/digital_image_processing/rotation/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/digital_image_processing/rotation/rotation.py b/digital_image_processing/rotation/rotation.py
new file mode 100644
index 000000000000..2951f18fc0ec
--- /dev/null
+++ b/digital_image_processing/rotation/rotation.py
@@ -0,0 +1,52 @@
+import cv2
+import numpy as np
+from matplotlib import pyplot as plt
+
+
+def get_rotation(
+ img: np.array, pt1: np.float32, pt2: np.float32, rows: int, cols: int
+) -> np.array:
+ """
+ Get image rotation
+ :param img: np.array
+ :param pt1: 3x2 list
+ :param pt2: 3x2 list
+ :param rows: columns image shape
+ :param cols: rows image shape
+ :return: np.array
+ """
+ matrix = cv2.getAffineTransform(pt1, pt2)
+ return cv2.warpAffine(img, matrix, (rows, cols))
+
+
+if __name__ == "__main__":
+ # read original image
+ image = cv2.imread("lena.jpg")
+ # turn image in gray scale value
+ gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
+ # get image shape
+ img_rows, img_cols = gray_img.shape
+
+ # set different points to rotate image
+ pts1 = np.float32([[50, 50], [200, 50], [50, 200]])
+ pts2 = np.float32([[10, 100], [200, 50], [100, 250]])
+ pts3 = np.float32([[50, 50], [150, 50], [120, 200]])
+ pts4 = np.float32([[10, 100], [80, 50], [180, 250]])
+
+ # add all rotated images in a list
+ images = [
+ gray_img,
+ get_rotation(gray_img, pts1, pts2, img_rows, img_cols),
+ get_rotation(gray_img, pts2, pts3, img_rows, img_cols),
+ get_rotation(gray_img, pts2, pts4, img_rows, img_cols),
+ ]
+
+ # plot different image rotations
+ fig = plt.figure(1)
+ titles = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
+ for i, image in enumerate(images):
+ plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
+ plt.title(titles[i])
+ plt.axis("off")
+ plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
+ plt.show()
diff --git a/digital_image_processing/sepia.py b/digital_image_processing/sepia.py
new file mode 100644
index 000000000000..dfb5951676aa
--- /dev/null
+++ b/digital_image_processing/sepia.py
@@ -0,0 +1,50 @@
+"""
+ Implemented an algorithm using opencv to tone an image with sepia technique
+"""
+from cv2 import destroyAllWindows, imread, imshow, waitKey
+
+
+def make_sepia(img, factor: int):
+ """
+ Function create sepia tone.
+ Source: https://en.wikipedia.org/wiki/Sepia_(color)
+ """
+ pixel_h, pixel_v = img.shape[0], img.shape[1]
+
+ def to_grayscale(blue, green, red):
+ """
+ Helper function to create pixel's greyscale representation
+ Src: https://pl.wikipedia.org/wiki/YUV
+ """
+ return 0.2126 * red + 0.587 * green + 0.114 * blue
+
+ def normalize(value):
+ """ Helper function to normalize R/G/B value -> return 255 if value > 255"""
+ return min(value, 255)
+
+ for i in range(pixel_h):
+ for j in range(pixel_v):
+ greyscale = int(to_grayscale(*img[i][j]))
+ img[i][j] = [
+ normalize(greyscale),
+ normalize(greyscale + factor),
+ normalize(greyscale + 2 * factor),
+ ]
+
+ return img
+
+
+if __name__ == "__main__":
+ # read original image
+ images = {
+ percentage: imread("image_data/lena.jpg", 1) for percentage in (10, 20, 30, 40)
+ }
+
+ for percentage, img in images.items():
+ make_sepia(img, percentage)
+
+ for percentage, img in images.items():
+ imshow(f"Original image with sepia (factor: {percentage})", img)
+
+ waitKey(0)
+ destroyAllWindows()
diff --git a/digital_image_processing/test_digital_image_processing.py b/digital_image_processing/test_digital_image_processing.py
new file mode 100644
index 000000000000..40f2f7b83b6d
--- /dev/null
+++ b/digital_image_processing/test_digital_image_processing.py
@@ -0,0 +1,93 @@
+"""
+PyTest's for Digital Image Processing
+"""
+from cv2 import COLOR_BGR2GRAY, cvtColor, imread
+from numpy import array, uint8
+from PIL import Image
+
+from digital_image_processing import change_contrast as cc
+from digital_image_processing import convert_to_negative as cn
+from digital_image_processing import sepia as sp
+from digital_image_processing.dithering import burkes as bs
+from digital_image_processing.edge_detection import canny as canny
+from digital_image_processing.filters import convolve as conv
+from digital_image_processing.filters import gaussian_filter as gg
+from digital_image_processing.filters import median_filter as med
+from digital_image_processing.filters import sobel_filter as sob
+from digital_image_processing.resize import resize as rs
+
+img = imread(r"digital_image_processing/image_data/lena_small.jpg")
+gray = cvtColor(img, COLOR_BGR2GRAY)
+
+
+# Test: convert_to_negative()
+def test_convert_to_negative():
+ negative_img = cn.convert_to_negative(img)
+ # assert negative_img array for at least one True
+ assert negative_img.any()
+
+
+# Test: change_contrast()
+def test_change_contrast():
+ with Image.open("digital_image_processing/image_data/lena_small.jpg") as img:
+ # Work around assertion for response
+ assert str(cc.change_contrast(img, 110)).startswith(
+ " Divide and conquer
+The points are sorted based on Xco-ords and
+then based on Yco-ords separately.
+And by applying divide and conquer approach,
+minimum distance is obtained recursively.
+
+>> Closest points can lie on different sides of partition.
+This case handled by forming a strip of points
+whose Xco-ords distance is less than closest_pair_dis
+from mid-point's Xco-ords. Points sorted based on Yco-ords
+are used in this step to reduce sorting time.
+Closest pair distance is found in the strip of points. (closest_in_strip)
+
+min(closest_pair_dis, closest_in_strip) would be the final answer.
+
+Time complexity: O(n * log n)
+"""
+
+
+def euclidean_distance_sqr(point1, point2):
+ """
+ >>> euclidean_distance_sqr([1,2],[2,4])
+ 5
+ """
+ return (point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2
+
+
+def column_based_sort(array, column=0):
+ """
+ >>> column_based_sort([(5, 1), (4, 2), (3, 0)], 1)
+ [(3, 0), (5, 1), (4, 2)]
+ """
+ return sorted(array, key=lambda x: x[column])
+
+
+def dis_between_closest_pair(points, points_counts, min_dis=float("inf")):
+ """
+ brute force approach to find distance between closest pair points
+
+ Parameters :
+ points, points_count, min_dis (list(tuple(int, int)), int, int)
+
+ Returns :
+ min_dis (float): distance between closest pair of points
+
+ >>> dis_between_closest_pair([[1,2],[2,4],[5,7],[8,9],[11,0]],5)
+ 5
+
+ """
+
+ for i in range(points_counts - 1):
+ for j in range(i + 1, points_counts):
+ current_dis = euclidean_distance_sqr(points[i], points[j])
+ if current_dis < min_dis:
+ min_dis = current_dis
+ return min_dis
+
+
+def dis_between_closest_in_strip(points, points_counts, min_dis=float("inf")):
+ """
+ closest pair of points in strip
+
+ Parameters :
+ points, points_count, min_dis (list(tuple(int, int)), int, int)
+
+ Returns :
+ min_dis (float): distance btw closest pair of points in the strip (< min_dis)
+
+ >>> dis_between_closest_in_strip([[1,2],[2,4],[5,7],[8,9],[11,0]],5)
+ 85
+ """
+
+ for i in range(min(6, points_counts - 1), points_counts):
+ for j in range(max(0, i - 6), i):
+ current_dis = euclidean_distance_sqr(points[i], points[j])
+ if current_dis < min_dis:
+ min_dis = current_dis
+ return min_dis
+
+
+def closest_pair_of_points_sqr(points_sorted_on_x, points_sorted_on_y, points_counts):
+ """divide and conquer approach
+
+ Parameters :
+ points, points_count (list(tuple(int, int)), int)
+
+ Returns :
+ (float): distance btw closest pair of points
+
+ >>> closest_pair_of_points_sqr([(1, 2), (3, 4)], [(5, 6), (7, 8)], 2)
+ 8
+ """
+
+ # base case
+ if points_counts <= 3:
+ return dis_between_closest_pair(points_sorted_on_x, points_counts)
+
+ # recursion
+ mid = points_counts // 2
+ closest_in_left = closest_pair_of_points_sqr(
+ points_sorted_on_x, points_sorted_on_y[:mid], mid
+ )
+ closest_in_right = closest_pair_of_points_sqr(
+ points_sorted_on_y, points_sorted_on_y[mid:], points_counts - mid
+ )
+ closest_pair_dis = min(closest_in_left, closest_in_right)
+
+ """
+ cross_strip contains the points, whose Xcoords are at a
+ distance(< closest_pair_dis) from mid's Xcoord
+ """
+
+ cross_strip = []
+ for point in points_sorted_on_x:
+ if abs(point[0] - points_sorted_on_x[mid][0]) < closest_pair_dis:
+ cross_strip.append(point)
+
+ closest_in_strip = dis_between_closest_in_strip(
+ cross_strip, len(cross_strip), closest_pair_dis
+ )
+ return min(closest_pair_dis, closest_in_strip)
+
+
+def closest_pair_of_points(points, points_counts):
+ """
+ >>> closest_pair_of_points([(2, 3), (12, 30)], len([(2, 3), (12, 30)]))
+ 28.792360097775937
+ """
+ points_sorted_on_x = column_based_sort(points, column=0)
+ points_sorted_on_y = column_based_sort(points, column=1)
+ return (
+ closest_pair_of_points_sqr(
+ points_sorted_on_x, points_sorted_on_y, points_counts
+ )
+ ) ** 0.5
+
+
+if __name__ == "__main__":
+ points = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
+ print("Distance:", closest_pair_of_points(points, len(points)))
diff --git a/divide_and_conquer/convex_hull.py b/divide_and_conquer/convex_hull.py
new file mode 100644
index 000000000000..9c096f671385
--- /dev/null
+++ b/divide_and_conquer/convex_hull.py
@@ -0,0 +1,506 @@
+"""
+The convex hull problem is problem of finding all the vertices of convex polygon, P of
+a set of points in a plane such that all the points are either on the vertices of P or
+inside P. TH convex hull problem has several applications in geometrical problems,
+computer graphics and game development.
+
+Two algorithms have been implemented for the convex hull problem here.
+1. A brute-force algorithm which runs in O(n^3)
+2. A divide-and-conquer algorithm which runs in O(n log(n))
+
+There are other several other algorithms for the convex hull problem
+which have not been implemented here, yet.
+
+"""
+
+from typing import Iterable, List, Set, Union
+
+
+class Point:
+ """
+ Defines a 2-d point for use by all convex-hull algorithms.
+
+ Parameters
+ ----------
+ x: an int or a float, the x-coordinate of the 2-d point
+ y: an int or a float, the y-coordinate of the 2-d point
+
+ Examples
+ --------
+ >>> Point(1, 2)
+ (1.0, 2.0)
+ >>> Point("1", "2")
+ (1.0, 2.0)
+ >>> Point(1, 2) > Point(0, 1)
+ True
+ >>> Point(1, 1) == Point(1, 1)
+ True
+ >>> Point(-0.5, 1) == Point(0.5, 1)
+ False
+ >>> Point("pi", "e")
+ Traceback (most recent call last):
+ ...
+ ValueError: could not convert string to float: 'pi'
+ """
+
+ def __init__(self, x, y):
+ self.x, self.y = float(x), float(y)
+
+ def __eq__(self, other):
+ return self.x == other.x and self.y == other.y
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __gt__(self, other):
+ if self.x > other.x:
+ return True
+ elif self.x == other.x:
+ return self.y > other.y
+ return False
+
+ def __lt__(self, other):
+ return not self > other
+
+ def __ge__(self, other):
+ if self.x > other.x:
+ return True
+ elif self.x == other.x:
+ return self.y >= other.y
+ return False
+
+ def __le__(self, other):
+ if self.x < other.x:
+ return True
+ elif self.x == other.x:
+ return self.y <= other.y
+ return False
+
+ def __repr__(self):
+ return f"({self.x}, {self.y})"
+
+ def __hash__(self):
+ return hash(self.x)
+
+
+def _construct_points(
+ list_of_tuples: Union[List[Point], List[List[float]], Iterable[List[float]]]
+) -> List[Point]:
+ """
+ constructs a list of points from an array-like object of numbers
+
+ Arguments
+ ---------
+
+ list_of_tuples: array-like object of type numbers. Acceptable types so far
+ are lists, tuples and sets.
+
+ Returns
+ --------
+ points: a list where each item is of type Point. This contains only objects
+ which can be converted into a Point.
+
+ Examples
+ -------
+ >>> _construct_points([[1, 1], [2, -1], [0.3, 4]])
+ [(1.0, 1.0), (2.0, -1.0), (0.3, 4.0)]
+ >>> _construct_points([1, 2])
+ Ignoring deformed point 1. All points must have at least 2 coordinates.
+ Ignoring deformed point 2. All points must have at least 2 coordinates.
+ []
+ >>> _construct_points([])
+ []
+ >>> _construct_points(None)
+ []
+ """
+
+ points: List[Point] = []
+ if list_of_tuples:
+ for p in list_of_tuples:
+ if isinstance(p, Point):
+ points.append(p)
+ else:
+ try:
+ points.append(Point(p[0], p[1]))
+ except (IndexError, TypeError):
+ print(
+ f"Ignoring deformed point {p}. All points"
+ " must have at least 2 coordinates."
+ )
+ return points
+
+
+def _validate_input(points: Union[List[Point], List[List[float]]]) -> List[Point]:
+ """
+ validates an input instance before a convex-hull algorithms uses it
+
+ Parameters
+ ---------
+ points: array-like, the 2d points to validate before using with
+ a convex-hull algorithm. The elements of points must be either lists, tuples or
+ Points.
+
+ Returns
+ -------
+ points: array_like, an iterable of all well-defined Points constructed passed in.
+
+
+ Exception
+ ---------
+ ValueError: if points is empty or None, or if a wrong data structure like a scalar
+ is passed
+
+ TypeError: if an iterable but non-indexable object (eg. dictionary) is passed.
+ The exception to this a set which we'll convert to a list before using
+
+
+ Examples
+ -------
+ >>> _validate_input([[1, 2]])
+ [(1.0, 2.0)]
+ >>> _validate_input([(1, 2)])
+ [(1.0, 2.0)]
+ >>> _validate_input([Point(2, 1), Point(-1, 2)])
+ [(2.0, 1.0), (-1.0, 2.0)]
+ >>> _validate_input([])
+ Traceback (most recent call last):
+ ...
+ ValueError: Expecting a list of points but got []
+ >>> _validate_input(1)
+ Traceback (most recent call last):
+ ...
+ ValueError: Expecting an iterable object but got an non-iterable type 1
+ """
+
+ if not hasattr(points, "__iter__"):
+ raise ValueError(
+ f"Expecting an iterable object but got an non-iterable type {points}"
+ )
+
+ if not points:
+ raise ValueError(f"Expecting a list of points but got {points}")
+
+ return _construct_points(points)
+
+
+def _det(a: Point, b: Point, c: Point) -> float:
+ """
+ Computes the sign perpendicular distance of a 2d point c from a line segment
+ ab. The sign indicates the direction of c relative to ab.
+ A Positive value means c is above ab (to the left), while a negative value
+ means c is below ab (to the right). 0 means all three points are on a straight line.
+
+ As a side note, 0.5 * abs|det| is the area of triangle abc
+
+ Parameters
+ ----------
+ a: point, the point on the left end of line segment ab
+ b: point, the point on the right end of line segment ab
+ c: point, the point for which the direction and location is desired.
+
+ Returns
+ --------
+ det: float, abs(det) is the distance of c from ab. The sign
+ indicates which side of line segment ab c is. det is computed as
+ (a_xb_y + c_xa_y + b_xc_y) - (a_yb_x + c_ya_x + b_yc_x)
+
+ Examples
+ ----------
+ >>> _det(Point(1, 1), Point(1, 2), Point(1, 5))
+ 0.0
+ >>> _det(Point(0, 0), Point(10, 0), Point(0, 10))
+ 100.0
+ >>> _det(Point(0, 0), Point(10, 0), Point(0, -10))
+ -100.0
+ """
+
+ det = (a.x * b.y + b.x * c.y + c.x * a.y) - (a.y * b.x + b.y * c.x + c.y * a.x)
+ return det
+
+
+def convex_hull_bf(points: List[Point]) -> List[Point]:
+ """
+ Constructs the convex hull of a set of 2D points using a brute force algorithm.
+ The algorithm basically considers all combinations of points (i, j) and uses the
+ definition of convexity to determine whether (i, j) is part of the convex hull or
+ not. (i, j) is part of the convex hull if and only iff there are no points on both
+ sides of the line segment connecting the ij, and there is no point k such that k is
+ on either end of the ij.
+
+ Runtime: O(n^3) - definitely horrible
+
+ Parameters
+ ---------
+ points: array-like of object of Points, lists or tuples.
+ The set of 2d points for which the convex-hull is needed
+
+ Returns
+ ------
+ convex_set: list, the convex-hull of points sorted in non-decreasing order.
+
+ See Also
+ --------
+ convex_hull_recursive,
+
+ Examples
+ ---------
+ >>> convex_hull_bf([[0, 0], [1, 0], [10, 1]])
+ [(0.0, 0.0), (1.0, 0.0), (10.0, 1.0)]
+ >>> convex_hull_bf([[0, 0], [1, 0], [10, 0]])
+ [(0.0, 0.0), (10.0, 0.0)]
+ >>> convex_hull_bf([[-1, 1],[-1, -1], [0, 0], [0.5, 0.5], [1, -1], [1, 1],
+ ... [-0.75, 1]])
+ [(-1.0, -1.0), (-1.0, 1.0), (1.0, -1.0), (1.0, 1.0)]
+ >>> convex_hull_bf([(0, 3), (2, 2), (1, 1), (2, 1), (3, 0), (0, 0), (3, 3),
+ ... (2, -1), (2, -4), (1, -3)])
+ [(0.0, 0.0), (0.0, 3.0), (1.0, -3.0), (2.0, -4.0), (3.0, 0.0), (3.0, 3.0)]
+ """
+
+ points = sorted(_validate_input(points))
+ n = len(points)
+ convex_set = set()
+
+ for i in range(n - 1):
+ for j in range(i + 1, n):
+ points_left_of_ij = points_right_of_ij = False
+ ij_part_of_convex_hull = True
+ for k in range(n):
+ if k != i and k != j:
+ det_k = _det(points[i], points[j], points[k])
+
+ if det_k > 0:
+ points_left_of_ij = True
+ elif det_k < 0:
+ points_right_of_ij = True
+ else:
+ # point[i], point[j], point[k] all lie on a straight line
+ # if point[k] is to the left of point[i] or it's to the
+ # right of point[j], then point[i], point[j] cannot be
+ # part of the convex hull of A
+ if points[k] < points[i] or points[k] > points[j]:
+ ij_part_of_convex_hull = False
+ break
+
+ if points_left_of_ij and points_right_of_ij:
+ ij_part_of_convex_hull = False
+ break
+
+ if ij_part_of_convex_hull:
+ convex_set.update([points[i], points[j]])
+
+ return sorted(convex_set)
+
+
+def convex_hull_recursive(points: List[Point]) -> List[Point]:
+ """
+ Constructs the convex hull of a set of 2D points using a divide-and-conquer strategy
+ The algorithm exploits the geometric properties of the problem by repeatedly
+ partitioning the set of points into smaller hulls, and finding the convex hull of
+ these smaller hulls. The union of the convex hull from smaller hulls is the
+ solution to the convex hull of the larger problem.
+
+ Parameter
+ ---------
+ points: array-like of object of Points, lists or tuples.
+ The set of 2d points for which the convex-hull is needed
+
+ Runtime: O(n log n)
+
+ Returns
+ -------
+ convex_set: list, the convex-hull of points sorted in non-decreasing order.
+
+ Examples
+ ---------
+ >>> convex_hull_recursive([[0, 0], [1, 0], [10, 1]])
+ [(0.0, 0.0), (1.0, 0.0), (10.0, 1.0)]
+ >>> convex_hull_recursive([[0, 0], [1, 0], [10, 0]])
+ [(0.0, 0.0), (10.0, 0.0)]
+ >>> convex_hull_recursive([[-1, 1],[-1, -1], [0, 0], [0.5, 0.5], [1, -1], [1, 1],
+ ... [-0.75, 1]])
+ [(-1.0, -1.0), (-1.0, 1.0), (1.0, -1.0), (1.0, 1.0)]
+ >>> convex_hull_recursive([(0, 3), (2, 2), (1, 1), (2, 1), (3, 0), (0, 0), (3, 3),
+ ... (2, -1), (2, -4), (1, -3)])
+ [(0.0, 0.0), (0.0, 3.0), (1.0, -3.0), (2.0, -4.0), (3.0, 0.0), (3.0, 3.0)]
+
+ """
+ points = sorted(_validate_input(points))
+ n = len(points)
+
+ # divide all the points into an upper hull and a lower hull
+ # the left most point and the right most point are definitely
+ # members of the convex hull by definition.
+ # use these two anchors to divide all the points into two hulls,
+ # an upper hull and a lower hull.
+
+ # all points to the left (above) the line joining the extreme points belong to the
+ # upper hull
+ # all points to the right (below) the line joining the extreme points below to the
+ # lower hull
+ # ignore all points on the line joining the extreme points since they cannot be
+ # part of the convex hull
+
+ left_most_point = points[0]
+ right_most_point = points[n - 1]
+
+ convex_set = {left_most_point, right_most_point}
+ upper_hull = []
+ lower_hull = []
+
+ for i in range(1, n - 1):
+ det = _det(left_most_point, right_most_point, points[i])
+
+ if det > 0:
+ upper_hull.append(points[i])
+ elif det < 0:
+ lower_hull.append(points[i])
+
+ _construct_hull(upper_hull, left_most_point, right_most_point, convex_set)
+ _construct_hull(lower_hull, right_most_point, left_most_point, convex_set)
+
+ return sorted(convex_set)
+
+
+def _construct_hull(
+ points: List[Point], left: Point, right: Point, convex_set: Set[Point]
+) -> None:
+ """
+
+ Parameters
+ ---------
+ points: list or None, the hull of points from which to choose the next convex-hull
+ point
+ left: Point, the point to the left of line segment joining left and right
+ right: The point to the right of the line segment joining left and right
+ convex_set: set, the current convex-hull. The state of convex-set gets updated by
+ this function
+
+ Note
+ ----
+ For the line segment 'ab', 'a' is on the left and 'b' on the right.
+ but the reverse is true for the line segment 'ba'.
+
+ Returns
+ -------
+ Nothing, only updates the state of convex-set
+ """
+ if points:
+ extreme_point = None
+ extreme_point_distance = float("-inf")
+ candidate_points = []
+
+ for p in points:
+ det = _det(left, right, p)
+
+ if det > 0:
+ candidate_points.append(p)
+
+ if det > extreme_point_distance:
+ extreme_point_distance = det
+ extreme_point = p
+
+ if extreme_point:
+ _construct_hull(candidate_points, left, extreme_point, convex_set)
+ convex_set.add(extreme_point)
+ _construct_hull(candidate_points, extreme_point, right, convex_set)
+
+
+def convex_hull_melkman(points: List[Point]) -> List[Point]:
+ """
+ Constructs the convex hull of a set of 2D points using the melkman algorithm.
+ The algorithm works by iteratively inserting points of a simple polygonal chain
+ (meaning that no line segments between two consecutive points cross each other).
+ Sorting the points yields such a polygonal chain.
+
+ For a detailed description, see http://cgm.cs.mcgill.ca/~athens/cs601/Melkman.html
+
+ Runtime: O(n log n) - O(n) if points are already sorted in the input
+
+ Parameters
+ ---------
+ points: array-like of object of Points, lists or tuples.
+ The set of 2d points for which the convex-hull is needed
+
+ Returns
+ ------
+ convex_set: list, the convex-hull of points sorted in non-decreasing order.
+
+ See Also
+ --------
+
+ Examples
+ ---------
+ >>> convex_hull_melkman([[0, 0], [1, 0], [10, 1]])
+ [(0.0, 0.0), (1.0, 0.0), (10.0, 1.0)]
+ >>> convex_hull_melkman([[0, 0], [1, 0], [10, 0]])
+ [(0.0, 0.0), (10.0, 0.0)]
+ >>> convex_hull_melkman([[-1, 1],[-1, -1], [0, 0], [0.5, 0.5], [1, -1], [1, 1],
+ ... [-0.75, 1]])
+ [(-1.0, -1.0), (-1.0, 1.0), (1.0, -1.0), (1.0, 1.0)]
+ >>> convex_hull_melkman([(0, 3), (2, 2), (1, 1), (2, 1), (3, 0), (0, 0), (3, 3),
+ ... (2, -1), (2, -4), (1, -3)])
+ [(0.0, 0.0), (0.0, 3.0), (1.0, -3.0), (2.0, -4.0), (3.0, 0.0), (3.0, 3.0)]
+ """
+ points = sorted(_validate_input(points))
+ n = len(points)
+
+ convex_hull = points[:2]
+ for i in range(2, n):
+ det = _det(convex_hull[1], convex_hull[0], points[i])
+ if det > 0:
+ convex_hull.insert(0, points[i])
+ break
+ elif det < 0:
+ convex_hull.append(points[i])
+ break
+ else:
+ convex_hull[1] = points[i]
+ i += 1
+
+ for i in range(i, n):
+ if (
+ _det(convex_hull[0], convex_hull[-1], points[i]) > 0
+ and _det(convex_hull[-1], convex_hull[0], points[1]) < 0
+ ):
+ # The point lies within the convex hull
+ continue
+
+ convex_hull.insert(0, points[i])
+ convex_hull.append(points[i])
+ while _det(convex_hull[0], convex_hull[1], convex_hull[2]) >= 0:
+ del convex_hull[1]
+ while _det(convex_hull[-1], convex_hull[-2], convex_hull[-3]) <= 0:
+ del convex_hull[-2]
+
+ # `convex_hull` is contains the convex hull in circular order
+ return sorted(convex_hull[1:] if len(convex_hull) > 3 else convex_hull)
+
+
+def main():
+ points = [
+ (0, 3),
+ (2, 2),
+ (1, 1),
+ (2, 1),
+ (3, 0),
+ (0, 0),
+ (3, 3),
+ (2, -1),
+ (2, -4),
+ (1, -3),
+ ]
+ # the convex set of points is
+ # [(0, 0), (0, 3), (1, -3), (2, -4), (3, 0), (3, 3)]
+ results_bf = convex_hull_bf(points)
+
+ results_recursive = convex_hull_recursive(points)
+ assert results_bf == results_recursive
+
+ results_melkman = convex_hull_melkman(points)
+ assert results_bf == results_melkman
+
+ print(results_bf)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/divide_and_conquer/heaps_algorithm.py b/divide_and_conquer/heaps_algorithm.py
new file mode 100644
index 000000000000..af30ad664101
--- /dev/null
+++ b/divide_and_conquer/heaps_algorithm.py
@@ -0,0 +1,56 @@
+"""
+Heap's algorithm returns the list of all permutations possible from a list.
+It minimizes movement by generating each permutation from the previous one
+by swapping only two elements.
+More information:
+https://en.wikipedia.org/wiki/Heap%27s_algorithm.
+"""
+
+
+def heaps(arr: list) -> list:
+ """
+ Pure python implementation of the Heap's algorithm (recursive version),
+ returning all permutations of a list.
+ >>> heaps([])
+ [()]
+ >>> heaps([0])
+ [(0,)]
+ >>> heaps([-1, 1])
+ [(-1, 1), (1, -1)]
+ >>> heaps([1, 2, 3])
+ [(1, 2, 3), (2, 1, 3), (3, 1, 2), (1, 3, 2), (2, 3, 1), (3, 2, 1)]
+ >>> from itertools import permutations
+ >>> sorted(heaps([1,2,3])) == sorted(permutations([1,2,3]))
+ True
+ >>> all(sorted(heaps(x)) == sorted(permutations(x))
+ ... for x in ([], [0], [-1, 1], [1, 2, 3]))
+ True
+ """
+
+ if len(arr) <= 1:
+ return [tuple(arr)]
+
+ res = []
+
+ def generate(k: int, arr: list):
+ if k == 1:
+ res.append(tuple(arr[:]))
+ return
+
+ generate(k - 1, arr)
+
+ for i in range(k - 1):
+ if k % 2 == 0: # k is even
+ arr[i], arr[k - 1] = arr[k - 1], arr[i]
+ else: # k is odd
+ arr[0], arr[k - 1] = arr[k - 1], arr[0]
+ generate(k - 1, arr)
+
+ generate(len(arr), arr)
+ return res
+
+
+if __name__ == "__main__":
+ user_input = input("Enter numbers separated by a comma:\n").strip()
+ arr = [int(item) for item in user_input.split(",")]
+ print(heaps(arr))
diff --git a/divide_and_conquer/heaps_algorithm_iterative.py b/divide_and_conquer/heaps_algorithm_iterative.py
new file mode 100644
index 000000000000..4dab41f539c0
--- /dev/null
+++ b/divide_and_conquer/heaps_algorithm_iterative.py
@@ -0,0 +1,60 @@
+"""
+Heap's (iterative) algorithm returns the list of all permutations possible from a list.
+It minimizes movement by generating each permutation from the previous one
+by swapping only two elements.
+More information:
+https://en.wikipedia.org/wiki/Heap%27s_algorithm.
+"""
+
+
+def heaps(arr: list) -> list:
+ """
+ Pure python implementation of the iterative Heap's algorithm,
+ returning all permutations of a list.
+ >>> heaps([])
+ [()]
+ >>> heaps([0])
+ [(0,)]
+ >>> heaps([-1, 1])
+ [(-1, 1), (1, -1)]
+ >>> heaps([1, 2, 3])
+ [(1, 2, 3), (2, 1, 3), (3, 1, 2), (1, 3, 2), (2, 3, 1), (3, 2, 1)]
+ >>> from itertools import permutations
+ >>> sorted(heaps([1,2,3])) == sorted(permutations([1,2,3]))
+ True
+ >>> all(sorted(heaps(x)) == sorted(permutations(x))
+ ... for x in ([], [0], [-1, 1], [1, 2, 3]))
+ True
+ """
+
+ if len(arr) <= 1:
+ return [tuple(arr)]
+
+ res = []
+
+ def generate(n: int, arr: list):
+ c = [0] * n
+ res.append(tuple(arr))
+
+ i = 0
+ while i < n:
+ if c[i] < i:
+ if i % 2 == 0:
+ arr[0], arr[i] = arr[i], arr[0]
+ else:
+ arr[c[i]], arr[i] = arr[i], arr[c[i]]
+ res.append(tuple(arr))
+ c[i] += 1
+ i = 0
+ else:
+ c[i] = 0
+ i += 1
+
+ generate(len(arr), arr)
+ return res
+
+
+if __name__ == "__main__":
+ user_input = input("Enter numbers separated by a comma:\n").strip()
+ arr = [int(item) for item in user_input.split(",")]
+ print(heaps(arr))
diff --git a/divide_and_conquer/inversions.py b/divide_and_conquer/inversions.py
new file mode 100644
index 000000000000..9bb656229321
--- /dev/null
+++ b/divide_and_conquer/inversions.py
@@ -0,0 +1,169 @@
+"""
+Given an array-like data structure A[1..n], how many pairs
+(i, j) for all 1 <= i < j <= n such that A[i] > A[j]? These pairs are
+called inversions. Counting the number of such inversions in an array-like
+object is the important. Among other things, counting inversions can help
+us determine how close a given array is to being sorted
+
+In this implementation, I provide two algorithms, a divide-and-conquer
+algorithm which runs in nlogn and the brute-force n^2 algorithm.
+
+"""
+
+
+def count_inversions_bf(arr):
+ """
+ Counts the number of inversions using a a naive brute-force algorithm
+
+ Parameters
+ ----------
+ arr: arr: array-like, the list containing the items for which the number
+ of inversions is desired. The elements of `arr` must be comparable.
+
+ Returns
+ -------
+ num_inversions: The total number of inversions in `arr`
+
+ Examples
+ ---------
+
+ >>> count_inversions_bf([1, 4, 2, 4, 1])
+ 4
+ >>> count_inversions_bf([1, 1, 2, 4, 4])
+ 0
+ >>> count_inversions_bf([])
+ 0
+ """
+
+ num_inversions = 0
+ n = len(arr)
+
+ for i in range(n - 1):
+ for j in range(i + 1, n):
+ if arr[i] > arr[j]:
+ num_inversions += 1
+
+ return num_inversions
+
+
+def count_inversions_recursive(arr):
+ """
+ Counts the number of inversions using a divide-and-conquer algorithm
+
+ Parameters
+ -----------
+ arr: array-like, the list containing the items for which the number
+ of inversions is desired. The elements of `arr` must be comparable.
+
+ Returns
+ -------
+ C: a sorted copy of `arr`.
+ num_inversions: int, the total number of inversions in 'arr'
+
+ Examples
+ --------
+
+ >>> count_inversions_recursive([1, 4, 2, 4, 1])
+ ([1, 1, 2, 4, 4], 4)
+ >>> count_inversions_recursive([1, 1, 2, 4, 4])
+ ([1, 1, 2, 4, 4], 0)
+ >>> count_inversions_recursive([])
+ ([], 0)
+ """
+ if len(arr) <= 1:
+ return arr, 0
+ else:
+ mid = len(arr) // 2
+ P = arr[0:mid]
+ Q = arr[mid:]
+
+ A, inversion_p = count_inversions_recursive(P)
+ B, inversions_q = count_inversions_recursive(Q)
+ C, cross_inversions = _count_cross_inversions(A, B)
+
+ num_inversions = inversion_p + inversions_q + cross_inversions
+ return C, num_inversions
+
+
+def _count_cross_inversions(P, Q):
+ """
+ Counts the inversions across two sorted arrays.
+ And combine the two arrays into one sorted array
+
+ For all 1<= i<=len(P) and for all 1 <= j <= len(Q),
+ if P[i] > Q[j], then (i, j) is a cross inversion
+
+ Parameters
+ ----------
+ P: array-like, sorted in non-decreasing order
+ Q: array-like, sorted in non-decreasing order
+
+ Returns
+ ------
+ R: array-like, a sorted array of the elements of `P` and `Q`
+ num_inversion: int, the number of inversions across `P` and `Q`
+
+ Examples
+ --------
+
+ >>> _count_cross_inversions([1, 2, 3], [0, 2, 5])
+ ([0, 1, 2, 2, 3, 5], 4)
+ >>> _count_cross_inversions([1, 2, 3], [3, 4, 5])
+ ([1, 2, 3, 3, 4, 5], 0)
+ """
+
+ R = []
+ i = j = num_inversion = 0
+ while i < len(P) and j < len(Q):
+ if P[i] > Q[j]:
+ # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
+ # These are all inversions. The claim emerges from the
+ # property that P is sorted.
+ num_inversion += len(P) - i
+ R.append(Q[j])
+ j += 1
+ else:
+ R.append(P[i])
+ i += 1
+
+ if i < len(P):
+ R.extend(P[i:])
+ else:
+ R.extend(Q[j:])
+
+ return R, num_inversion
+
+
+def main():
+ arr_1 = [10, 2, 1, 5, 5, 2, 11]
+
+ # this arr has 8 inversions:
+ # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
+
+ num_inversions_bf = count_inversions_bf(arr_1)
+ _, num_inversions_recursive = count_inversions_recursive(arr_1)
+
+ assert num_inversions_bf == num_inversions_recursive == 8
+
+ print("number of inversions = ", num_inversions_bf)
+
+ # testing an array with zero inversion (a sorted arr_1)
+
+ arr_1.sort()
+ num_inversions_bf = count_inversions_bf(arr_1)
+ _, num_inversions_recursive = count_inversions_recursive(arr_1)
+
+ assert num_inversions_bf == num_inversions_recursive == 0
+ print("number of inversions = ", num_inversions_bf)
+
+ # an empty list should also have zero inversions
+ arr_1 = []
+ num_inversions_bf = count_inversions_bf(arr_1)
+ _, num_inversions_recursive = count_inversions_recursive(arr_1)
+
+ assert num_inversions_bf == num_inversions_recursive == 0
+ print("number of inversions = ", num_inversions_bf)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/divide_and_conquer/kth_order_statistic.py b/divide_and_conquer/kth_order_statistic.py
new file mode 100644
index 000000000000..f6e81a306bff
--- /dev/null
+++ b/divide_and_conquer/kth_order_statistic.py
@@ -0,0 +1,64 @@
+"""
+Find the kth smallest element in linear time using divide and conquer.
+Recall we can do this trivially in O(nlogn) time. Sort the list and
+access kth element in constant time.
+
+This is a divide and conquer algorithm that can find a solution in O(n) time.
+
+For more information of this algorithm:
+https://web.stanford.edu/class/archive/cs/cs161/cs161.1138/lectures/08/Small08.pdf
+"""
+from random import choice
+from typing import List
+
+
+def random_pivot(lst):
+ """
+ Choose a random pivot for the list.
+ We can use a more sophisticated algorithm here, such as the median-of-medians
+ algorithm.
+ """
+ return choice(lst)
+
+
+def kth_number(lst: List[int], k: int) -> int:
+ """
+ Return the kth smallest number in lst.
+ >>> kth_number([2, 1, 3, 4, 5], 3)
+ 3
+ >>> kth_number([2, 1, 3, 4, 5], 1)
+ 1
+ >>> kth_number([2, 1, 3, 4, 5], 5)
+ 5
+ >>> kth_number([3, 2, 5, 6, 7, 8], 2)
+ 3
+ >>> kth_number([25, 21, 98, 100, 76, 22, 43, 60, 89, 87], 4)
+ 43
+ """
+ # pick a pivot and separate into list based on pivot.
+ pivot = random_pivot(lst)
+
+ # partition based on pivot
+ # linear time
+ small = [e for e in lst if e < pivot]
+ big = [e for e in lst if e > pivot]
+
+ # if we get lucky, pivot might be the element we want.
+ # we can easily see this:
+ # small (elements smaller than k)
+ # + pivot (kth element)
+ # + big (elements larger than k)
+ if len(small) == k - 1:
+ return pivot
+ # pivot is in elements bigger than k
+ elif len(small) < k - 1:
+ return kth_number(big, k - len(small) - 1)
+ # pivot is in elements smaller than k
+ else:
+ return kth_number(small, k)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/divide_and_conquer/max_difference_pair.py b/divide_and_conquer/max_difference_pair.py
new file mode 100644
index 000000000000..b976aca43137
--- /dev/null
+++ b/divide_and_conquer/max_difference_pair.py
@@ -0,0 +1,47 @@
+from typing import List
+
+
+def max_difference(a: List[int]) -> (int, int):
+ """
+ We are given an array A[1..n] of integers, n >= 1. We want to
+ find a pair of indices (i, j) such that
+ 1 <= i <= j <= n and A[j] - A[i] is as large as possible.
+
+ Explanation:
+ https://www.geeksforgeeks.org/maximum-difference-between-two-elements/
+
+ >>> max_difference([5, 11, 2, 1, 7, 9, 0, 7])
+ (1, 9)
+ """
+ # base case
+ if len(a) == 1:
+ return a[0], a[0]
+ else:
+ # split A into half.
+ first = a[: len(a) // 2]
+ second = a[len(a) // 2 :]
+
+ # 2 sub problems, 1/2 of original size.
+ small1, big1 = max_difference(first)
+ small2, big2 = max_difference(second)
+
+ # get min of first and max of second
+ # linear time
+ min_first = min(first)
+ max_second = max(second)
+
+ # 3 cases, either (small1, big1),
+ # (min_first, max_second), (small2, big2)
+ # constant comparisons
+ if big2 - small2 > max_second - min_first and big2 - small2 > big1 - small1:
+ return small2, big2
+ elif big1 - small1 > max_second - min_first:
+ return small1, big1
+ else:
+ return min_first, max_second
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/divide_and_conquer/max_subarray_sum.py b/divide_and_conquer/max_subarray_sum.py
new file mode 100644
index 000000000000..43f58086e078
--- /dev/null
+++ b/divide_and_conquer/max_subarray_sum.py
@@ -0,0 +1,76 @@
+"""
+Given a array of length n, max_subarray_sum() finds
+the maximum of sum of contiguous sub-array using divide and conquer method.
+
+Time complexity : O(n log n)
+
+Ref : INTRODUCTION TO ALGORITHMS THIRD EDITION
+(section : 4, sub-section : 4.1, page : 70)
+
+"""
+
+
+def max_sum_from_start(array):
+ """This function finds the maximum contiguous sum of array from 0 index
+
+ Parameters :
+ array (list[int]) : given array
+
+ Returns :
+ max_sum (int) : maximum contiguous sum of array from 0 index
+
+ """
+ array_sum = 0
+ max_sum = float("-inf")
+ for num in array:
+ array_sum += num
+ if array_sum > max_sum:
+ max_sum = array_sum
+ return max_sum
+
+
+def max_cross_array_sum(array, left, mid, right):
+ """This function finds the maximum contiguous sum of left and right arrays
+
+ Parameters :
+ array, left, mid, right (list[int], int, int, int)
+
+ Returns :
+ (int) : maximum of sum of contiguous sum of left and right arrays
+
+ """
+
+ max_sum_of_left = max_sum_from_start(array[left : mid + 1][::-1])
+ max_sum_of_right = max_sum_from_start(array[mid + 1 : right + 1])
+ return max_sum_of_left + max_sum_of_right
+
+
+def max_subarray_sum(array, left, right):
+ """Maximum contiguous sub-array sum, using divide and conquer method
+
+ Parameters :
+ array, left, right (list[int], int, int) :
+ given array, current left index and current right index
+
+ Returns :
+ int : maximum of sum of contiguous sub-array
+
+ """
+
+ # base case: array has only one element
+ if left == right:
+ return array[right]
+
+ # Recursion
+ mid = (left + right) // 2
+ left_half_sum = max_subarray_sum(array, left, mid)
+ right_half_sum = max_subarray_sum(array, mid + 1, right)
+ cross_sum = max_cross_array_sum(array, left, mid, right)
+ return max(left_half_sum, right_half_sum, cross_sum)
+
+
+array = [-2, -5, 6, -2, -3, 1, 5, -6]
+array_length = len(array)
+print(
+ "Maximum sum of contiguous subarray:", max_subarray_sum(array, 0, array_length - 1)
+)
diff --git a/divide_and_conquer/mergesort.py b/divide_and_conquer/mergesort.py
new file mode 100644
index 000000000000..46a46941cab3
--- /dev/null
+++ b/divide_and_conquer/mergesort.py
@@ -0,0 +1,112 @@
+from typing import List
+
+
+def merge(left_half: List, right_half: List) -> List:
+ """Helper function for mergesort.
+
+ >>> left_half = [-2]
+ >>> right_half = [-1]
+ >>> merge(left_half, right_half)
+ [-2, -1]
+
+ >>> left_half = [1,2,3]
+ >>> right_half = [4,5,6]
+ >>> merge(left_half, right_half)
+ [1, 2, 3, 4, 5, 6]
+
+ >>> left_half = [-2]
+ >>> right_half = [-1]
+ >>> merge(left_half, right_half)
+ [-2, -1]
+
+ >>> left_half = [12, 15]
+ >>> right_half = [13, 14]
+ >>> merge(left_half, right_half)
+ [12, 13, 14, 15]
+
+ >>> left_half = []
+ >>> right_half = []
+ >>> merge(left_half, right_half)
+ []
+ """
+ sorted_array = [None] * (len(right_half) + len(left_half))
+
+ pointer1 = 0 # pointer to current index for left Half
+ pointer2 = 0 # pointer to current index for the right Half
+ index = 0 # pointer to current index for the sorted array Half
+
+ while pointer1 < len(left_half) and pointer2 < len(right_half):
+ if left_half[pointer1] < right_half[pointer2]:
+ sorted_array[index] = left_half[pointer1]
+ pointer1 += 1
+ index += 1
+ else:
+ sorted_array[index] = right_half[pointer2]
+ pointer2 += 1
+ index += 1
+ while pointer1 < len(left_half):
+ sorted_array[index] = left_half[pointer1]
+ pointer1 += 1
+ index += 1
+
+ while pointer2 < len(right_half):
+ sorted_array[index] = right_half[pointer2]
+ pointer2 += 1
+ index += 1
+
+ return sorted_array
+
+
+def merge_sort(array: List) -> List:
+ """Returns a list of sorted array elements using merge sort.
+
+ >>> from random import shuffle
+ >>> array = [-2, 3, -10, 11, 99, 100000, 100, -200]
+ >>> shuffle(array)
+ >>> merge_sort(array)
+ [-200, -10, -2, 3, 11, 99, 100, 100000]
+
+ >>> shuffle(array)
+ >>> merge_sort(array)
+ [-200, -10, -2, 3, 11, 99, 100, 100000]
+
+ >>> array = [-200]
+ >>> merge_sort(array)
+ [-200]
+
+ >>> array = [-2, 3, -10, 11, 99, 100000, 100, -200]
+ >>> shuffle(array)
+ >>> sorted(array) == merge_sort(array)
+ True
+
+ >>> array = [-2]
+ >>> merge_sort(array)
+ [-2]
+
+ >>> array = []
+ >>> merge_sort(array)
+ []
+
+ >>> array = [10000000, 1, -1111111111, 101111111112, 9000002]
+ >>> sorted(array) == merge_sort(array)
+ True
+ """
+ if len(array) <= 1:
+ return array
+ # the actual formula to calculate the middle element = left + (right - left) // 2
+ # this avoids integer overflow in case of large N
+ middle = 0 + (len(array) - 0) // 2
+
+ # Split the array into halves till the array length becomes equal to One
+ # merge the arrays of single length returned by mergeSort function and
+ # pass them into the merge arrays function which merges the array
+ left_half = array[:middle]
+ right_half = array[middle:]
+
+ return merge(merge_sort(left_half), merge_sort(right_half))
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/divide_and_conquer/peak.py b/divide_and_conquer/peak.py
new file mode 100644
index 000000000000..f94f83ed3fcb
--- /dev/null
+++ b/divide_and_conquer/peak.py
@@ -0,0 +1,53 @@
+"""
+Finding the peak of a unimodal list using divide and conquer.
+A unimodal array is defined as follows: array is increasing up to index p,
+then decreasing afterwards. (for p >= 1)
+An obvious solution can be performed in O(n),
+to find the maximum of the array.
+(From Kleinberg and Tardos. Algorithm Design.
+Addison Wesley 2006: Chapter 5 Solved Exercise 1)
+"""
+from typing import List
+
+
+def peak(lst: List[int]) -> int:
+ """
+ Return the peak value of `lst`.
+ >>> peak([1, 2, 3, 4, 5, 4, 3, 2, 1])
+ 5
+ >>> peak([1, 10, 9, 8, 7, 6, 5, 4])
+ 10
+ >>> peak([1, 9, 8, 7])
+ 9
+ >>> peak([1, 2, 3, 4, 5, 6, 7, 0])
+ 7
+ >>> peak([1, 2, 3, 4, 3, 2, 1, 0, -1, -2])
+ 4
+ """
+ # middle index
+ m = len(lst) // 2
+
+ # choose the middle 3 elements
+ three = lst[m - 1 : m + 2]
+
+ # if middle element is peak
+ if three[1] > three[0] and three[1] > three[2]:
+ return three[1]
+
+ # if increasing, recurse on right
+ elif three[0] < three[2]:
+ if len(lst[:m]) == 2:
+ m -= 1
+ return peak(lst[m:])
+
+ # decreasing
+ else:
+ if len(lst[:m]) == 2:
+ m += 1
+ return peak(lst[:m])
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/divide_and_conquer/power.py b/divide_and_conquer/power.py
new file mode 100644
index 000000000000..f2e023afd536
--- /dev/null
+++ b/divide_and_conquer/power.py
@@ -0,0 +1,33 @@
+def actual_power(a: int, b: int):
+ """
+ Function using divide and conquer to calculate a^b.
+ It only works for integer a,b.
+ """
+ if b == 0:
+ return 1
+ if (b % 2) == 0:
+ return actual_power(a, int(b / 2)) * actual_power(a, int(b / 2))
+ else:
+ return a * actual_power(a, int(b / 2)) * actual_power(a, int(b / 2))
+
+
+def power(a: int, b: int) -> float:
+ """
+ >>> power(4,6)
+ 4096
+ >>> power(2,3)
+ 8
+ >>> power(-2,3)
+ -8
+ >>> power(2,-3)
+ 0.125
+ >>> power(-2,-3)
+ -0.125
+ """
+ if b < 0:
+ return 1 / actual_power(a, b)
+ return actual_power(a, b)
+
+
+if __name__ == "__main__":
+ print(power(-2, -3))
diff --git a/divide_and_conquer/strassen_matrix_multiplication.py b/divide_and_conquer/strassen_matrix_multiplication.py
new file mode 100644
index 000000000000..29a174daebf9
--- /dev/null
+++ b/divide_and_conquer/strassen_matrix_multiplication.py
@@ -0,0 +1,171 @@
+from __future__ import annotations
+
+import math
+
+
+def default_matrix_multiplication(a: list, b: list) -> list:
+ """
+ Multiplication only for 2x2 matrices
+ """
+ if len(a) != 2 or len(a[0]) != 2 or len(b) != 2 or len(b[0]) != 2:
+ raise Exception("Matrices are not 2x2")
+ new_matrix = [
+ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
+ [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
+ ]
+ return new_matrix
+
+
+def matrix_addition(matrix_a: list, matrix_b: list):
+ return [
+ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row]))]
+ for row in range(len(matrix_a))
+ ]
+
+
+def matrix_subtraction(matrix_a: list, matrix_b: list):
+ return [
+ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row]))]
+ for row in range(len(matrix_a))
+ ]
+
+
+def split_matrix(a: list) -> tuple[list, list, list, list]:
+ """
+ Given an even length matrix, returns the top_left, top_right, bot_left, bot_right
+ quadrant.
+
+ >>> split_matrix([[4,3,2,4],[2,3,1,1],[6,5,4,3],[8,4,1,6]])
+ ([[4, 3], [2, 3]], [[2, 4], [1, 1]], [[6, 5], [8, 4]], [[4, 3], [1, 6]])
+ >>> split_matrix([
+ ... [4,3,2,4,4,3,2,4],[2,3,1,1,2,3,1,1],[6,5,4,3,6,5,4,3],[8,4,1,6,8,4,1,6],
+ ... [4,3,2,4,4,3,2,4],[2,3,1,1,2,3,1,1],[6,5,4,3,6,5,4,3],[8,4,1,6,8,4,1,6]
+ ... ]) # doctest: +NORMALIZE_WHITESPACE
+ ([[4, 3, 2, 4], [2, 3, 1, 1], [6, 5, 4, 3], [8, 4, 1, 6]], [[4, 3, 2, 4],
+ [2, 3, 1, 1], [6, 5, 4, 3], [8, 4, 1, 6]], [[4, 3, 2, 4], [2, 3, 1, 1],
+ [6, 5, 4, 3], [8, 4, 1, 6]], [[4, 3, 2, 4], [2, 3, 1, 1], [6, 5, 4, 3],
+ [8, 4, 1, 6]])
+ """
+ if len(a) % 2 != 0 or len(a[0]) % 2 != 0:
+ raise Exception("Odd matrices are not supported!")
+
+ matrix_length = len(a)
+ mid = matrix_length // 2
+
+ top_right = [[a[i][j] for j in range(mid, matrix_length)] for i in range(mid)]
+ bot_right = [
+ [a[i][j] for j in range(mid, matrix_length)] for i in range(mid, matrix_length)
+ ]
+
+ top_left = [[a[i][j] for j in range(mid)] for i in range(mid)]
+ bot_left = [[a[i][j] for j in range(mid)] for i in range(mid, matrix_length)]
+
+ return top_left, top_right, bot_left, bot_right
+
+
+def matrix_dimensions(matrix: list) -> tuple[int, int]:
+ return len(matrix), len(matrix[0])
+
+
+def print_matrix(matrix: list) -> None:
+ for i in range(len(matrix)):
+ print(matrix[i])
+
+
+def actual_strassen(matrix_a: list, matrix_b: list) -> list:
+ """
+ Recursive function to calculate the product of two matrices, using the Strassen
+ Algorithm. It only supports even length matrices.
+ """
+ if matrix_dimensions(matrix_a) == (2, 2):
+ return default_matrix_multiplication(matrix_a, matrix_b)
+
+ a, b, c, d = split_matrix(matrix_a)
+ e, f, g, h = split_matrix(matrix_b)
+
+ t1 = actual_strassen(a, matrix_subtraction(f, h))
+ t2 = actual_strassen(matrix_addition(a, b), h)
+ t3 = actual_strassen(matrix_addition(c, d), e)
+ t4 = actual_strassen(d, matrix_subtraction(g, e))
+ t5 = actual_strassen(matrix_addition(a, d), matrix_addition(e, h))
+ t6 = actual_strassen(matrix_subtraction(b, d), matrix_addition(g, h))
+ t7 = actual_strassen(matrix_subtraction(a, c), matrix_addition(e, f))
+
+ top_left = matrix_addition(matrix_subtraction(matrix_addition(t5, t4), t2), t6)
+ top_right = matrix_addition(t1, t2)
+ bot_left = matrix_addition(t3, t4)
+ bot_right = matrix_subtraction(matrix_subtraction(matrix_addition(t1, t5), t3), t7)
+
+ # construct the new matrix from our 4 quadrants
+ new_matrix = []
+ for i in range(len(top_right)):
+ new_matrix.append(top_left[i] + top_right[i])
+ for i in range(len(bot_right)):
+ new_matrix.append(bot_left[i] + bot_right[i])
+ return new_matrix
+
+
+def strassen(matrix1: list, matrix2: list) -> list:
+ """
+ >>> strassen([[2,1,3],[3,4,6],[1,4,2],[7,6,7]], [[4,2,3,4],[2,1,1,1],[8,6,4,2]])
+ [[34, 23, 19, 15], [68, 46, 37, 28], [28, 18, 15, 12], [96, 62, 55, 48]]
+ >>> strassen([[3,7,5,6,9],[1,5,3,7,8],[1,4,4,5,7]], [[2,4],[5,2],[1,7],[5,5],[7,8]])
+ [[139, 163], [121, 134], [100, 121]]
+ """
+ if matrix_dimensions(matrix1)[1] != matrix_dimensions(matrix2)[0]:
+ raise Exception(
+ f"Unable to multiply these matrices, please check the dimensions. \n"
+ f"Matrix A:{matrix1} \nMatrix B:{matrix2}"
+ )
+ dimension1 = matrix_dimensions(matrix1)
+ dimension2 = matrix_dimensions(matrix2)
+
+ if dimension1[0] == dimension1[1] and dimension2[0] == dimension2[1]:
+ return matrix1, matrix2
+
+ maximum = max(max(dimension1), max(dimension2))
+ maxim = int(math.pow(2, math.ceil(math.log2(maximum))))
+ new_matrix1 = matrix1
+ new_matrix2 = matrix2
+
+ # Adding zeros to the matrices so that the arrays dimensions are the same and also
+ # power of 2
+ for i in range(0, maxim):
+ if i < dimension1[0]:
+ for j in range(dimension1[1], maxim):
+ new_matrix1[i].append(0)
+ else:
+ new_matrix1.append([0] * maxim)
+ if i < dimension2[0]:
+ for j in range(dimension2[1], maxim):
+ new_matrix2[i].append(0)
+ else:
+ new_matrix2.append([0] * maxim)
+
+ final_matrix = actual_strassen(new_matrix1, new_matrix2)
+
+ # Removing the additional zeros
+ for i in range(0, maxim):
+ if i < dimension1[0]:
+ for j in range(dimension2[1], maxim):
+ final_matrix[i].pop()
+ else:
+ final_matrix.pop()
+ return final_matrix
+
+
+if __name__ == "__main__":
+ matrix1 = [
+ [2, 3, 4, 5],
+ [6, 4, 3, 1],
+ [2, 3, 6, 7],
+ [3, 1, 2, 4],
+ [2, 3, 4, 5],
+ [6, 4, 3, 1],
+ [2, 3, 6, 7],
+ [3, 1, 2, 4],
+ [2, 3, 4, 5],
+ [6, 2, 3, 1],
+ ]
+ matrix2 = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
+ print(strassen(matrix1, matrix2))
diff --git a/dynamic_programming/FractionalKnapsack.py b/dynamic_programming/FractionalKnapsack.py
deleted file mode 100644
index 74e85b4b4708..000000000000
--- a/dynamic_programming/FractionalKnapsack.py
+++ /dev/null
@@ -1,12 +0,0 @@
-from itertools import accumulate
-from bisect import bisect
-
-def fracKnapsack(vl, wt, W, n):
-
- r = list(sorted(zip(vl,wt), key=lambda x:x[0]/x[1],reverse=True))
- vl , wt = [i[0] for i in r],[i[1] for i in r]
- acc=list(accumulate(wt))
- k = bisect(acc,W)
- return 0 if k == 0 else sum(vl[:k])+(W-acc[k-1])*(vl[k])/(wt[k]) if k!=n else sum(vl[:k])
-
-print("%.0f"%fracKnapsack([60, 100, 120],[10, 20, 30],50,3))
diff --git a/dynamic_programming/__init__.py b/dynamic_programming/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/dynamic_programming/abbreviation.py b/dynamic_programming/abbreviation.py
index f4d07e402925..5175aa9ed92f 100644
--- a/dynamic_programming/abbreviation.py
+++ b/dynamic_programming/abbreviation.py
@@ -12,7 +12,13 @@
"""
-def abbr(a, b):
+def abbr(a: str, b: str) -> bool:
+ """
+ >>> abbr("daBcd", "ABC")
+ True
+ >>> abbr("dBcd", "ABC")
+ False
+ """
n = len(a)
m = len(b)
dp = [[False for _ in range(m + 1)] for _ in range(n + 1)]
@@ -28,4 +34,6 @@ def abbr(a, b):
if __name__ == "__main__":
- print(abbr("daBcd", "ABC")) # expect True
+ import doctest
+
+ doctest.testmod()
diff --git a/dynamic_programming/bitmask.py b/dynamic_programming/bitmask.py
index 213b22fe9051..2994db5b5e1e 100644
--- a/dynamic_programming/bitmask.py
+++ b/dynamic_programming/bitmask.py
@@ -1,90 +1,93 @@
"""
-This is a python implementation for questions involving task assignments between people.
+This is a Python implementation for questions involving task assignments between people.
Here Bitmasking and DP are used for solving this.
Question :-
-We have N tasks and M people. Each person in M can do only certain of these tasks. Also a person can do only one task and a task is performed only by one person.
+We have N tasks and M people. Each person in M can do only certain of these tasks. Also
+a person can do only one task and a task is performed only by one person.
Find the total no of ways in which the tasks can be distributed.
-
-
"""
-from __future__ import print_function
from collections import defaultdict
class AssignmentUsingBitmask:
- def __init__(self,task_performed,total):
-
- self.total_tasks = total #total no of tasks (N)
-
+ def __init__(self, task_performed, total):
+
+ self.total_tasks = total # total no of tasks (N)
+
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
- self.dp = [[-1 for i in range(total+1)] for j in range(2**len(task_performed))]
-
- self.task = defaultdict(list) #stores the list of persons for each task
-
- #finalmask is used to check if all persons are included by setting all bits to 1
- self.finalmask = (1< self.total_tasks:
+ # if not everyone gets the task and no more tasks are available, return 0
+ if task_no > self.total_tasks:
return 0
- #if case already considered
- if self.dp[mask][taskno]!=-1:
- return self.dp[mask][taskno]
+ # if case already considered
+ if self.dp[mask][task_no] != -1:
+ return self.dp[mask][task_no]
+
+ # Number of ways when we don't this task in the arrangement
+ total_ways_util = self.CountWaysUtil(mask, task_no + 1)
- # Number of ways when we dont this task in the arrangement
- total_ways_util = self.CountWaysUtil(mask,taskno+1)
+ # now assign the tasks one by one to all possible persons and recursively
+ # assign for the remaining tasks.
+ if task_no in self.task:
+ for p in self.task[task_no]:
- # now assign the tasks one by one to all possible persons and recursively assign for the remaining tasks.
- if taskno in self.task:
- for p in self.task[taskno]:
-
# if p is already given a task
- if mask & (1<
int:
+ """
+ LeetCdoe No.70: Climbing Stairs
+ Distinct ways to climb a n step staircase where
+ each time you can either climb 1 or 2 steps.
+
+ Args:
+ n: number of steps of staircase
+
+ Returns:
+ Distinct ways to climb a n step staircase
+
+ Raises:
+ AssertionError: n not positive integer
+
+ >>> climb_stairs(3)
+ 3
+ >>> climb_stairs(1)
+ 1
+ >>> climb_stairs(-7) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ ...
+ AssertionError: n needs to be positive integer, your input -7
+ """
+ fmt = "n needs to be positive integer, your input {}"
+ assert isinstance(n, int) and n > 0, fmt.format(n)
+ if n == 1:
+ return 1
+ dp = [0] * (n + 1)
+ dp[0], dp[1] = (1, 1)
+ for i in range(2, n + 1):
+ dp[i] = dp[i - 1] + dp[i - 2]
+ return dp[n]
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/dynamic_programming/coin_change.py b/dynamic_programming/coin_change.py
index 74d86661f52d..2869b5857be1 100644
--- a/dynamic_programming/coin_change.py
+++ b/dynamic_programming/coin_change.py
@@ -5,11 +5,25 @@
the given types of coins?
https://www.hackerrank.com/challenges/coin-change/problem
"""
-from __future__ import print_function
-def dp_count(S, m, n):
-
+def dp_count(S, n):
+ """
+ >>> dp_count([1, 2, 3], 4)
+ 4
+ >>> dp_count([1, 2, 3], 7)
+ 8
+ >>> dp_count([2, 5, 3, 6], 10)
+ 5
+ >>> dp_count([10], 99)
+ 0
+ >>> dp_count([4, 5, 6], 0)
+ 1
+ >>> dp_count([1, 2, 3], -5)
+ 0
+ """
+ if n < 0:
+ return 0
# table[i] represents the number of ways to get to amount i
table = [0] * (n + 1)
@@ -26,6 +40,7 @@ def dp_count(S, m, n):
return table[n]
-if __name__ == '__main__':
- print(dp_count([1, 2, 3], 3, 4)) # answer 4
- print(dp_count([2, 5, 3, 6], 4, 10)) # answer 5
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/dynamic_programming/edit_distance.py b/dynamic_programming/edit_distance.py
index 335e5196ed53..56877e0c50a2 100644
--- a/dynamic_programming/edit_distance.py
+++ b/dynamic_programming/edit_distance.py
@@ -2,12 +2,13 @@
Author : Turfa Auliarachman
Date : October 12, 2016
-This is a pure Python implementation of Dynamic Programming solution to the edit distance problem.
+This is a pure Python implementation of Dynamic Programming solution to the edit
+distance problem.
The problem is :
-Given two strings A and B. Find the minimum number of operations to string B such that A = B. The permitted operations are removal, insertion, and substitution.
+Given two strings A and B. Find the minimum number of operations to string B such that
+A = B. The permitted operations are removal, insertion, and substitution.
"""
-from __future__ import print_function
class EditDistance:
@@ -20,56 +21,85 @@ class EditDistance:
def __init__(self):
self.__prepare__()
- def __prepare__(self, N = 0, M = 0):
- self.dp = [[-1 for y in range(0,M)] for x in range(0,N)]
+ def __prepare__(self, N=0, M=0):
+ self.dp = [[-1 for y in range(0, M)] for x in range(0, N)]
def __solveDP(self, x, y):
- if (x==-1):
- return y+1
- elif (y==-1):
- return x+1
- elif (self.dp[x][y]>-1):
+ if x == -1:
+ return y + 1
+ elif y == -1:
+ return x + 1
+ elif self.dp[x][y] > -1:
return self.dp[x][y]
else:
- if (self.A[x]==self.B[y]):
- self.dp[x][y] = self.__solveDP(x-1,y-1)
+ if self.A[x] == self.B[y]:
+ self.dp[x][y] = self.__solveDP(x - 1, y - 1)
else:
- self.dp[x][y] = 1+min(self.__solveDP(x,y-1), self.__solveDP(x-1,y), self.__solveDP(x-1,y-1))
+ self.dp[x][y] = 1 + min(
+ self.__solveDP(x, y - 1),
+ self.__solveDP(x - 1, y),
+ self.__solveDP(x - 1, y - 1),
+ )
return self.dp[x][y]
def solve(self, A, B):
- if isinstance(A,bytes):
- A = A.decode('ascii')
+ if isinstance(A, bytes):
+ A = A.decode("ascii")
- if isinstance(B,bytes):
- B = B.decode('ascii')
+ if isinstance(B, bytes):
+ B = B.decode("ascii")
self.A = str(A)
self.B = str(B)
self.__prepare__(len(A), len(B))
- return self.__solveDP(len(A)-1, len(B)-1)
+ return self.__solveDP(len(A) - 1, len(B) - 1)
-if __name__ == '__main__':
- try:
- raw_input # Python 2
- except NameError:
- raw_input = input # Python 3
- solver = EditDistance()
+def min_distance_bottom_up(word1: str, word2: str) -> int:
+ """
+ >>> min_distance_bottom_up("intention", "execution")
+ 5
+ >>> min_distance_bottom_up("intention", "")
+ 9
+ >>> min_distance_bottom_up("", "")
+ 0
+ """
+ m = len(word1)
+ n = len(word2)
+ dp = [[0 for _ in range(n + 1)] for _ in range(m + 1)]
+ for i in range(m + 1):
+ for j in range(n + 1):
+
+ if i == 0: # first string is empty
+ dp[i][j] = j
+ elif j == 0: # second string is empty
+ dp[i][j] = i
+ elif (
+ word1[i - 1] == word2[j - 1]
+ ): # last character of both substing is equal
+ dp[i][j] = dp[i - 1][j - 1]
+ else:
+ insert = dp[i][j - 1]
+ delete = dp[i - 1][j]
+ replace = dp[i - 1][j - 1]
+ dp[i][j] = 1 + min(insert, delete, replace)
+ return dp[m][n]
+
- print("****************** Testing Edit Distance DP Algorithm ******************")
- print()
+if __name__ == "__main__":
+ solver = EditDistance()
- print("Enter the first string: ", end="")
- S1 = raw_input().strip()
+ print("****************** Testing Edit Distance DP Algorithm ******************")
+ print()
- print("Enter the second string: ", end="")
- S2 = raw_input().strip()
+ S1 = input("Enter the first string: ").strip()
+ S2 = input("Enter the second string: ").strip()
- print()
- print("The minimum Edit Distance is: %d" % (solver.solve(S1, S2)))
- print()
- print("*************** End of Testing Edit Distance DP Algorithm ***************")
+ print()
+ print("The minimum Edit Distance is: %d" % (solver.solve(S1, S2)))
+ print("The minimum Edit Distance is: %d" % (min_distance_bottom_up(S1, S2)))
+ print()
+ print("*************** End of Testing Edit Distance DP Algorithm ***************")
diff --git a/dynamic_programming/factorial.py b/dynamic_programming/factorial.py
new file mode 100644
index 000000000000..1c9c927f5af3
--- /dev/null
+++ b/dynamic_programming/factorial.py
@@ -0,0 +1,27 @@
+# Factorial of a number using memoization
+
+from functools import lru_cache
+
+
+@lru_cache
+def factorial(num: int) -> int:
+ """
+ >>> factorial(7)
+ 5040
+ >>> factorial(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: Number should not be negative.
+ >>> [factorial(i) for i in range(10)]
+ [1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880]
+ """
+ if num < 0:
+ raise ValueError("Number should not be negative.")
+
+ return 1 if num in (0, 1) else num * factorial(num - 1)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/dynamic_programming/fast_fibonacci.py b/dynamic_programming/fast_fibonacci.py
new file mode 100644
index 000000000000..f48186a34c25
--- /dev/null
+++ b/dynamic_programming/fast_fibonacci.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python3
+
+"""
+This program calculates the nth Fibonacci number in O(log(n)).
+It's possible to calculate F(1_000_000) in less than a second.
+"""
+from __future__ import annotations
+
+import sys
+
+
+def fibonacci(n: int) -> int:
+ """
+ return F(n)
+ >>> [fibonacci(i) for i in range(13)]
+ [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]
+ """
+ if n < 0:
+ raise ValueError("Negative arguments are not supported")
+ return _fib(n)[0]
+
+
+# returns (F(n), F(n-1))
+def _fib(n: int) -> tuple[int, int]:
+ if n == 0: # (F(0), F(1))
+ return (0, 1)
+
+ # F(2n) = F(n)[2F(n+1) − F(n)]
+ # F(2n+1) = F(n+1)^2+F(n)^2
+ a, b = _fib(n // 2)
+ c = a * (b * 2 - a)
+ d = a * a + b * b
+ return (d, c + d) if n % 2 else (c, d)
+
+
+if __name__ == "__main__":
+ n = int(sys.argv[1])
+ print(f"fibonacci({n}) is {fibonacci(n)}")
diff --git a/dynamic_programming/fastfibonacci.py b/dynamic_programming/fastfibonacci.py
deleted file mode 100644
index cbc118467b3c..000000000000
--- a/dynamic_programming/fastfibonacci.py
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/python
-# encoding=utf8
-
-"""
-This program calculates the nth Fibonacci number in O(log(n)).
-It's possible to calculate F(1000000) in less than a second.
-"""
-from __future__ import print_function
-import sys
-
-
-# returns F(n)
-def fibonacci(n: int): # noqa: E999 This syntax is Python 3 only
- if n < 0:
- raise ValueError("Negative arguments are not supported")
- return _fib(n)[0]
-
-
-# returns (F(n), F(n-1))
-def _fib(n: int): # noqa: E999 This syntax is Python 3 only
- if n == 0:
- # (F(0), F(1))
- return (0, 1)
- else:
- # F(2n) = F(n)[2F(n+1) − F(n)]
- # F(2n+1) = F(n+1)^2+F(n)^2
- a, b = _fib(n // 2)
- c = a * (b * 2 - a)
- d = a * a + b * b
- if n % 2 == 0:
- return (c, d)
- else:
- return (d, c + d)
-
-
-if __name__ == "__main__":
- args = sys.argv[1:]
- if len(args) != 1:
- print("Too few or too much parameters given.")
- exit(1)
- try:
- n = int(args[0])
- except ValueError:
- print("Could not convert data to an integer.")
- exit(1)
- print("F(%d) = %d" % (n, fibonacci(n)))
diff --git a/dynamic_programming/fibonacci.py b/dynamic_programming/fibonacci.py
index b453ce255853..cab1358ddea1 100644
--- a/dynamic_programming/fibonacci.py
+++ b/dynamic_programming/fibonacci.py
@@ -1,11 +1,10 @@
"""
-This is a pure Python implementation of Dynamic Programming solution to the fibonacci sequence problem.
+This is a pure Python implementation of Dynamic Programming solution to the fibonacci
+sequence problem.
"""
-from __future__ import print_function
class Fibonacci:
-
def __init__(self, N=None):
self.fib_array = []
if N:
@@ -16,34 +15,42 @@ def __init__(self, N=None):
self.fib_array.append(self.fib_array[i - 1] + self.fib_array[i - 2])
elif N == 0:
self.fib_array.append(0)
+ print(self.fib_array)
def get(self, sequence_no=None):
- if sequence_no != None:
+ """
+ >>> Fibonacci(5).get(3)
+ [0, 1, 1, 2, 3, 5]
+ [0, 1, 1, 2]
+ >>> Fibonacci(5).get(6)
+ [0, 1, 1, 2, 3, 5]
+ Out of bound.
+ >>> Fibonacci(5).get(-1)
+ [0, 1, 1, 2, 3, 5]
+ []
+ """
+ if sequence_no is not None:
if sequence_no < len(self.fib_array):
- return print(self.fib_array[:sequence_no + 1])
+ return print(self.fib_array[: sequence_no + 1])
else:
print("Out of bound.")
else:
print("Please specify a value")
-if __name__ == '__main__':
+if __name__ == "__main__":
print("\n********* Fibonacci Series Using Dynamic Programming ************\n")
- try:
- raw_input # Python 2
- except NameError:
- raw_input = input # Python 3
-
print("\n Enter the upper limit for the fibonacci sequence: ", end="")
try:
- N = eval(raw_input().strip())
+ N = int(input().strip())
fib = Fibonacci(N)
print(
- "\n********* Enter different values to get the corresponding fibonacci sequence, enter any negative number to exit. ************\n")
+ "\n********* Enter different values to get the corresponding fibonacci "
+ "sequence, enter any negative number to exit. ************\n"
+ )
while True:
- print("Enter value: ", end=" ")
try:
- i = eval(raw_input().strip())
+ i = int(input("Enter value: ").strip())
if i < 0:
print("\n********* Good Bye!! ************\n")
break
@@ -52,3 +59,7 @@ def get(self, sequence_no=None):
print("\nInvalid input, please try again.")
except NameError:
print("\n********* Invalid input, good bye!! ************\n")
+
+ import doctest
+
+ doctest.testmod()
diff --git a/dynamic_programming/floyd_warshall.py b/dynamic_programming/floyd_warshall.py
index 038499ca03b6..a4b6c6a82568 100644
--- a/dynamic_programming/floyd_warshall.py
+++ b/dynamic_programming/floyd_warshall.py
@@ -1,37 +1,42 @@
import math
+
class Graph:
-
- def __init__(self, N = 0): # a graph with Node 0,1,...,N-1
+ def __init__(self, N=0): # a graph with Node 0,1,...,N-1
self.N = N
- self.W = [[math.inf for j in range(0,N)] for i in range(0,N)] # adjacency matrix for weight
- self.dp = [[math.inf for j in range(0,N)] for i in range(0,N)] # dp[i][j] stores minimum distance from i to j
+ self.W = [
+ [math.inf for j in range(0, N)] for i in range(0, N)
+ ] # adjacency matrix for weight
+ self.dp = [
+ [math.inf for j in range(0, N)] for i in range(0, N)
+ ] # dp[i][j] stores minimum distance from i to j
def addEdge(self, u, v, w):
self.dp[u][v] = w
def floyd_warshall(self):
- for k in range(0,self.N):
- for i in range(0,self.N):
- for j in range(0,self.N):
+ for k in range(0, self.N):
+ for i in range(0, self.N):
+ for j in range(0, self.N):
self.dp[i][j] = min(self.dp[i][j], self.dp[i][k] + self.dp[k][j])
def showMin(self, u, v):
return self.dp[u][v]
-
-if __name__ == '__main__':
+
+
+if __name__ == "__main__":
graph = Graph(5)
- graph.addEdge(0,2,9)
- graph.addEdge(0,4,10)
- graph.addEdge(1,3,5)
- graph.addEdge(2,3,7)
- graph.addEdge(3,0,10)
- graph.addEdge(3,1,2)
- graph.addEdge(3,2,1)
- graph.addEdge(3,4,6)
- graph.addEdge(4,1,3)
- graph.addEdge(4,2,4)
- graph.addEdge(4,3,9)
+ graph.addEdge(0, 2, 9)
+ graph.addEdge(0, 4, 10)
+ graph.addEdge(1, 3, 5)
+ graph.addEdge(2, 3, 7)
+ graph.addEdge(3, 0, 10)
+ graph.addEdge(3, 1, 2)
+ graph.addEdge(3, 2, 1)
+ graph.addEdge(3, 4, 6)
+ graph.addEdge(4, 1, 3)
+ graph.addEdge(4, 2, 4)
+ graph.addEdge(4, 3, 9)
graph.floyd_warshall()
- graph.showMin(1,4)
- graph.showMin(0,3)
+ graph.showMin(1, 4)
+ graph.showMin(0, 3)
diff --git a/dynamic_programming/fractional_knapsack.py b/dynamic_programming/fractional_knapsack.py
new file mode 100644
index 000000000000..c74af7ef8fc5
--- /dev/null
+++ b/dynamic_programming/fractional_knapsack.py
@@ -0,0 +1,27 @@
+from bisect import bisect
+from itertools import accumulate
+
+
+def fracKnapsack(vl, wt, W, n):
+ """
+ >>> fracKnapsack([60, 100, 120], [10, 20, 30], 50, 3)
+ 240.0
+ """
+
+ r = list(sorted(zip(vl, wt), key=lambda x: x[0] / x[1], reverse=True))
+ vl, wt = [i[0] for i in r], [i[1] for i in r]
+ acc = list(accumulate(wt))
+ k = bisect(acc, W)
+ return (
+ 0
+ if k == 0
+ else sum(vl[:k]) + (W - acc[k - 1]) * (vl[k]) / (wt[k])
+ if k != n
+ else sum(vl[:k])
+ )
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/dynamic_programming/fractional_knapsack_2.py b/dynamic_programming/fractional_knapsack_2.py
new file mode 100644
index 000000000000..cae57738311b
--- /dev/null
+++ b/dynamic_programming/fractional_knapsack_2.py
@@ -0,0 +1,60 @@
+# https://en.wikipedia.org/wiki/Continuous_knapsack_problem
+# https://www.guru99.com/fractional-knapsack-problem-greedy.html
+# https://medium.com/walkinthecode/greedy-algorithm-fractional-knapsack-problem-9aba1daecc93
+
+from __future__ import annotations
+
+
+def fractional_knapsack(
+ value: list[int], weight: list[int], capacity: int
+) -> tuple[int, list[int]]:
+ """
+ >>> value = [1, 3, 5, 7, 9]
+ >>> weight = [0.9, 0.7, 0.5, 0.3, 0.1]
+ >>> fractional_knapsack(value, weight, 5)
+ (25, [1, 1, 1, 1, 1])
+ >>> fractional_knapsack(value, weight, 15)
+ (25, [1, 1, 1, 1, 1])
+ >>> fractional_knapsack(value, weight, 25)
+ (25, [1, 1, 1, 1, 1])
+ >>> fractional_knapsack(value, weight, 26)
+ (25, [1, 1, 1, 1, 1])
+ >>> fractional_knapsack(value, weight, -1)
+ (-90.0, [0, 0, 0, 0, -10.0])
+ >>> fractional_knapsack([1, 3, 5, 7], weight, 30)
+ (16, [1, 1, 1, 1])
+ >>> fractional_knapsack(value, [0.9, 0.7, 0.5, 0.3, 0.1], 30)
+ (25, [1, 1, 1, 1, 1])
+ >>> fractional_knapsack([], [], 30)
+ (0, [])
+ """
+ index = list(range(len(value)))
+ ratio = [v / w for v, w in zip(value, weight)]
+ index.sort(key=lambda i: ratio[i], reverse=True)
+
+ max_value = 0
+ fractions = [0] * len(value)
+ for i in index:
+ if weight[i] <= capacity:
+ fractions[i] = 1
+ max_value += value[i]
+ capacity -= weight[i]
+ else:
+ fractions[i] = capacity / weight[i]
+ max_value += value[i] * capacity / weight[i]
+ break
+
+ return max_value, fractions
+
+
+if __name__ == "__main__":
+ n = int(input("Enter number of items: "))
+ value = input(f"Enter the values of the {n} item(s) in order: ").split()
+ value = [int(v) for v in value]
+ weight = input(f"Enter the positive weights of the {n} item(s) in order: ".split())
+ weight = [int(w) for w in weight]
+ capacity = int(input("Enter maximum weight: "))
+
+ max_value, fractions = fractional_knapsack(value, weight, capacity)
+ print("The maximum value of items that can be carried:", max_value)
+ print("The fractions in which the items should be taken:", fractions)
diff --git a/dynamic_programming/integer_partition.py b/dynamic_programming/integer_partition.py
index 7b27afebaa6c..4eb06348ce84 100644
--- a/dynamic_programming/integer_partition.py
+++ b/dynamic_programming/integer_partition.py
@@ -1,45 +1,37 @@
-from __future__ import print_function
+"""
+The number of partitions of a number n into at least k parts equals the number of
+partitions into exactly k parts plus the number of partitions into at least k-1 parts.
+Subtracting 1 from each part of a partition of n into k parts gives a partition of n-k
+into k parts. These two facts together are used for this algorithm.
+"""
-try:
- xrange #Python 2
-except NameError:
- xrange = range #Python 3
-try:
- raw_input #Python 2
-except NameError:
- raw_input = input #Python 3
-
-'''
-The number of partitions of a number n into at least k parts equals the number of partitions into exactly k parts
-plus the number of partitions into at least k-1 parts. Subtracting 1 from each part of a partition of n into k parts
-gives a partition of n-k into k parts. These two facts together are used for this algorithm.
-'''
def partition(m):
- memo = [[0 for _ in xrange(m)] for _ in xrange(m+1)]
- for i in xrange(m+1):
- memo[i][0] = 1
+ memo = [[0 for _ in range(m)] for _ in range(m + 1)]
+ for i in range(m + 1):
+ memo[i][0] = 1
+
+ for n in range(m + 1):
+ for k in range(1, m):
+ memo[n][k] += memo[n][k - 1]
+ if n - k > 0:
+ memo[n][k] += memo[n - k - 1][k]
- for n in xrange(m+1):
- for k in xrange(1, m):
- memo[n][k] += memo[n][k-1]
- if n-k > 0:
- memo[n][k] += memo[n-k-1][k]
+ return memo[m][m - 1]
- return memo[m][m-1]
-if __name__ == '__main__':
- import sys
+if __name__ == "__main__":
+ import sys
- if len(sys.argv) == 1:
- try:
- n = int(raw_input('Enter a number: '))
- print(partition(n))
- except ValueError:
- print('Please enter a number.')
- else:
- try:
- n = int(sys.argv[1])
- print(partition(n))
- except ValueError:
- print('Please pass a number.')
\ No newline at end of file
+ if len(sys.argv) == 1:
+ try:
+ n = int(input("Enter a number: ").strip())
+ print(partition(n))
+ except ValueError:
+ print("Please enter a number.")
+ else:
+ try:
+ n = int(sys.argv[1])
+ print(partition(n))
+ except ValueError:
+ print("Please pass a number.")
diff --git a/dynamic_programming/iterating_through_submasks.py b/dynamic_programming/iterating_through_submasks.py
new file mode 100644
index 000000000000..855af61d6707
--- /dev/null
+++ b/dynamic_programming/iterating_through_submasks.py
@@ -0,0 +1,61 @@
+"""
+Author : Syed Faizan (3rd Year Student IIIT Pune)
+github : faizan2700
+You are given a bitmask m and you want to efficiently iterate through all of
+its submasks. The mask s is submask of m if only bits that were included in
+bitmask are set
+"""
+from __future__ import annotations
+
+
+def list_of_submasks(mask: int) -> list[int]:
+
+ """
+ Args:
+ mask : number which shows mask ( always integer > 0, zero does not have any
+ submasks )
+
+ Returns:
+ all_submasks : the list of submasks of mask (mask s is called submask of mask
+ m if only bits that were included in original mask are set
+
+ Raises:
+ AssertionError: mask not positive integer
+
+ >>> list_of_submasks(15)
+ [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
+ >>> list_of_submasks(13)
+ [13, 12, 9, 8, 5, 4, 1]
+ >>> list_of_submasks(-7) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ ...
+ AssertionError: mask needs to be positive integer, your input -7
+ >>> list_of_submasks(0) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ ...
+ AssertionError: mask needs to be positive integer, your input 0
+
+ """
+
+ fmt = "mask needs to be positive integer, your input {}"
+ assert isinstance(mask, int) and mask > 0, fmt.format(mask)
+
+ """
+ first submask iterated will be mask itself then operation will be performed
+ to get other submasks till we reach empty submask that is zero ( zero is not
+ included in final submasks list )
+ """
+ all_submasks = []
+ submask = mask
+
+ while submask:
+ all_submasks.append(submask)
+ submask = (submask - 1) & mask
+
+ return all_submasks
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/dynamic_programming/k_means_clustering_tensorflow.py b/dynamic_programming/k_means_clustering_tensorflow.py_tf
similarity index 51%
rename from dynamic_programming/k_means_clustering_tensorflow.py
rename to dynamic_programming/k_means_clustering_tensorflow.py_tf
index b6813c6a22b3..4fbcedeaa0dc 100644
--- a/dynamic_programming/k_means_clustering_tensorflow.py
+++ b/dynamic_programming/k_means_clustering_tensorflow.py_tf
@@ -14,24 +14,24 @@ def TFKMeansCluster(vectors, noofclusters):
noofclusters = int(noofclusters)
assert noofclusters < len(vectors)
- #Find out the dimensionality
+ # Find out the dimensionality
dim = len(vectors[0])
- #Will help select random centroids from among the available vectors
+ # Will help select random centroids from among the available vectors
vector_indices = list(range(len(vectors)))
shuffle(vector_indices)
- #GRAPH OF COMPUTATION
- #We initialize a new graph and set it as the default during each run
- #of this algorithm. This ensures that as this function is called
- #multiple times, the default graph doesn't keep getting crowded with
- #unused ops and Variables from previous function calls.
+ # GRAPH OF COMPUTATION
+ # We initialize a new graph and set it as the default during each run
+ # of this algorithm. This ensures that as this function is called
+ # multiple times, the default graph doesn't keep getting crowded with
+ # unused ops and Variables from previous function calls.
graph = tf.Graph()
with graph.as_default():
- #SESSION OF COMPUTATION
+ # SESSION OF COMPUTATION
sess = tf.Session()
@@ -39,8 +39,9 @@ def TFKMeansCluster(vectors, noofclusters):
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
- centroids = [tf.Variable((vectors[vector_indices[i]]))
- for i in range(noofclusters)]
+ centroids = [
+ tf.Variable(vectors[vector_indices[i]]) for i in range(noofclusters)
+ ]
##These nodes will assign the centroid Variables the appropriate
##values
centroid_value = tf.placeholder("float64", [dim])
@@ -56,26 +57,24 @@ def TFKMeansCluster(vectors, noofclusters):
assignment_value = tf.placeholder("int32")
cluster_assigns = []
for assignment in assignments:
- cluster_assigns.append(tf.assign(assignment,
- assignment_value))
+ cluster_assigns.append(tf.assign(assignment, assignment_value))
##Now lets construct the node that will compute the mean
- #The placeholder for the input
+ # The placeholder for the input
mean_input = tf.placeholder("float", [None, dim])
- #The Node/op takes the input and computes a mean along the 0th
- #dimension, i.e. the list of input vectors
+ # The Node/op takes the input and computes a mean along the 0th
+ # dimension, i.e. the list of input vectors
mean_op = tf.reduce_mean(mean_input, 0)
##Node for computing Euclidean distances
- #Placeholders for input
+ # Placeholders for input
v1 = tf.placeholder("float", [dim])
v2 = tf.placeholder("float", [dim])
- euclid_dist = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(
- v1, v2), 2)))
+ euclid_dist = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(v1, v2), 2)))
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
- #Placeholder for input
+ # Placeholder for input
centroid_distances = tf.placeholder("float", [noofclusters])
cluster_assignment = tf.argmin(centroid_distances, 0)
@@ -87,55 +86,62 @@ def TFKMeansCluster(vectors, noofclusters):
##will be included in the initialization.
init_op = tf.initialize_all_variables()
- #Initialize all variables
+ # Initialize all variables
sess.run(init_op)
##CLUSTERING ITERATIONS
- #Now perform the Expectation-Maximization steps of K-Means clustering
- #iterations. To keep things simple, we will only do a set number of
- #iterations, instead of using a Stopping Criterion.
+ # Now perform the Expectation-Maximization steps of K-Means clustering
+ # iterations. To keep things simple, we will only do a set number of
+ # iterations, instead of using a Stopping Criterion.
noofiterations = 100
for iteration_n in range(noofiterations):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
- #Iterate over each vector
+ # Iterate over each vector
for vector_n in range(len(vectors)):
vect = vectors[vector_n]
- #Compute Euclidean distance between this vector and each
- #centroid. Remember that this list cannot be named
+ # Compute Euclidean distance between this vector and each
+ # centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
- #cluster assignment node.
- distances = [sess.run(euclid_dist, feed_dict={
- v1: vect, v2: sess.run(centroid)})
- for centroid in centroids]
- #Now use the cluster assignment node, with the distances
- #as the input
- assignment = sess.run(cluster_assignment, feed_dict = {
- centroid_distances: distances})
- #Now assign the value to the appropriate state variable
- sess.run(cluster_assigns[vector_n], feed_dict={
- assignment_value: assignment})
+ # cluster assignment node.
+ distances = [
+ sess.run(euclid_dist, feed_dict={v1: vect, v2: sess.run(centroid)})
+ for centroid in centroids
+ ]
+ # Now use the cluster assignment node, with the distances
+ # as the input
+ assignment = sess.run(
+ cluster_assignment, feed_dict={centroid_distances: distances}
+ )
+ # Now assign the value to the appropriate state variable
+ sess.run(
+ cluster_assigns[vector_n], feed_dict={assignment_value: assignment}
+ )
##MAXIMIZATION STEP
- #Based on the expected state computed from the Expectation Step,
- #compute the locations of the centroids so as to maximize the
- #overall objective of minimizing within-cluster Sum-of-Squares
+ # Based on the expected state computed from the Expectation Step,
+ # compute the locations of the centroids so as to maximize the
+ # overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(noofclusters):
- #Collect all the vectors assigned to this cluster
- assigned_vects = [vectors[i] for i in range(len(vectors))
- if sess.run(assignments[i]) == cluster_n]
- #Compute new centroid location
- new_location = sess.run(mean_op, feed_dict={
- mean_input: array(assigned_vects)})
- #Assign value to appropriate variable
- sess.run(cent_assigns[cluster_n], feed_dict={
- centroid_value: new_location})
-
- #Return centroids and assignments
+ # Collect all the vectors assigned to this cluster
+ assigned_vects = [
+ vectors[i]
+ for i in range(len(vectors))
+ if sess.run(assignments[i]) == cluster_n
+ ]
+ # Compute new centroid location
+ new_location = sess.run(
+ mean_op, feed_dict={mean_input: array(assigned_vects)}
+ )
+ # Assign value to appropriate variable
+ sess.run(
+ cent_assigns[cluster_n], feed_dict={centroid_value: new_location}
+ )
+
+ # Return centroids and assignments
centroids = sess.run(centroids)
assignments = sess.run(assignments)
return centroids, assignments
-
diff --git a/dynamic_programming/knapsack.py b/dynamic_programming/knapsack.py
index 27d1cfed799b..69e54c00aa4e 100644
--- a/dynamic_programming/knapsack.py
+++ b/dynamic_programming/knapsack.py
@@ -1,42 +1,150 @@
"""
-Given weights and values of n items, put these items in a knapsack of capacity W to get the maximum total value in the knapsack.
+Given weights and values of n items, put these items in a knapsack of
+ capacity W to get the maximum total value in the knapsack.
+
+Note that only the integer weights 0-1 knapsack problem is solvable
+ using dynamic programming.
"""
-def MF_knapsack(i,wt,val,j):
- '''
- This code involves the concept of memory functions. Here we solve the subproblems which are needed
- unlike the below example
+
+
+def MF_knapsack(i, wt, val, j):
+ """
+ This code involves the concept of memory functions. Here we solve the subproblems
+ which are needed unlike the below example
F is a 2D array with -1s filled up
- '''
+ """
global F # a global dp table for knapsack
if F[i][j] < 0:
if j < wt[i - 1]:
- val = MF_knapsack(i - 1,wt,val,j)
+ val = MF_knapsack(i - 1, wt, val, j)
else:
- val = max(MF_knapsack(i - 1,wt,val,j),MF_knapsack(i - 1,wt,val,j - wt[i - 1]) + val[i - 1])
+ val = max(
+ MF_knapsack(i - 1, wt, val, j),
+ MF_knapsack(i - 1, wt, val, j - wt[i - 1]) + val[i - 1],
+ )
F[i][j] = val
return F[i][j]
+
def knapsack(W, wt, val, n):
- dp = [[0 for i in range(W+1)]for j in range(n+1)]
+ dp = [[0 for i in range(W + 1)] for j in range(n + 1)]
- for i in range(1,n+1):
- for w in range(1,W+1):
- if(wt[i-1]<=w):
- dp[i][w] = max(val[i-1]+dp[i-1][w-wt[i-1]],dp[i-1][w])
+ for i in range(1, n + 1):
+ for w in range(1, W + 1):
+ if wt[i - 1] <= w:
+ dp[i][w] = max(val[i - 1] + dp[i - 1][w - wt[i - 1]], dp[i - 1][w])
else:
- dp[i][w] = dp[i-1][w]
+ dp[i][w] = dp[i - 1][w]
+
+ return dp[n][W], dp
+
+
+def knapsack_with_example_solution(W: int, wt: list, val: list):
+ """
+ Solves the integer weights knapsack problem returns one of
+ the several possible optimal subsets.
+
+ Parameters
+ ---------
+
+ W: int, the total maximum weight for the given knapsack problem.
+ wt: list, the vector of weights for all items where wt[i] is the weight
+ of the i-th item.
+ val: list, the vector of values for all items where val[i] is the value
+ of the i-th item
+
+ Returns
+ -------
+ optimal_val: float, the optimal value for the given knapsack problem
+ example_optional_set: set, the indices of one of the optimal subsets
+ which gave rise to the optimal value.
+
+ Examples
+ -------
+ >>> knapsack_with_example_solution(10, [1, 3, 5, 2], [10, 20, 100, 22])
+ (142, {2, 3, 4})
+ >>> knapsack_with_example_solution(6, [4, 3, 2, 3], [3, 2, 4, 4])
+ (8, {3, 4})
+ >>> knapsack_with_example_solution(6, [4, 3, 2, 3], [3, 2, 4])
+ Traceback (most recent call last):
+ ...
+ ValueError: The number of weights must be the same as the number of values.
+ But got 4 weights and 3 values
+ """
+ if not (isinstance(wt, (list, tuple)) and isinstance(val, (list, tuple))):
+ raise ValueError(
+ "Both the weights and values vectors must be either lists or tuples"
+ )
+
+ num_items = len(wt)
+ if num_items != len(val):
+ raise ValueError(
+ "The number of weights must be the "
+ "same as the number of values.\nBut "
+ f"got {num_items} weights and {len(val)} values"
+ )
+ for i in range(num_items):
+ if not isinstance(wt[i], int):
+ raise TypeError(
+ "All weights must be integers but "
+ f"got weight of type {type(wt[i])} at index {i}"
+ )
- return dp[n][w]
+ optimal_val, dp_table = knapsack(W, wt, val, num_items)
+ example_optional_set = set()
+ _construct_solution(dp_table, wt, num_items, W, example_optional_set)
-if __name__ == '__main__':
- '''
+ return optimal_val, example_optional_set
+
+
+def _construct_solution(dp: list, wt: list, i: int, j: int, optimal_set: set):
+ """
+ Recursively reconstructs one of the optimal subsets given
+ a filled DP table and the vector of weights
+
+ Parameters
+ ---------
+
+ dp: list of list, the table of a solved integer weight dynamic programming problem
+
+ wt: list or tuple, the vector of weights of the items
+ i: int, the index of the item under consideration
+ j: int, the current possible maximum weight
+ optimal_set: set, the optimal subset so far. This gets modified by the function.
+
+ Returns
+ -------
+ None
+
+ """
+ # for the current item i at a maximum weight j to be part of an optimal subset,
+ # the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
+ # where i - 1 means considering only the previous items at the given maximum weight
+ if i > 0 and j > 0:
+ if dp[i - 1][j] == dp[i][j]:
+ _construct_solution(dp, wt, i - 1, j, optimal_set)
+ else:
+ optimal_set.add(i)
+ _construct_solution(dp, wt, i - 1, j - wt[i - 1], optimal_set)
+
+
+if __name__ == "__main__":
+ """
Adding test case for knapsack
- '''
- val = [3,2,4,4]
- wt = [4,3,2,3]
+ """
+ val = [3, 2, 4, 4]
+ wt = [4, 3, 2, 3]
n = 4
w = 6
- F = [[0]*(w + 1)] + [[0] + [-1 for i in range(w + 1)] for j in range(n + 1)]
- print(knapsack(w,wt,val,n))
- print(MF_knapsack(n,wt,val,w)) # switched the n and w
-
+ F = [[0] * (w + 1)] + [[0] + [-1 for i in range(w + 1)] for j in range(n + 1)]
+ optimal_solution, _ = knapsack(w, wt, val, n)
+ print(optimal_solution)
+ print(MF_knapsack(n, wt, val, w)) # switched the n and w
+
+ # testing the dynamic programming problem with example
+ # the optimal subset for the above example are items 3 and 4
+ optimal_solution, optimal_subset = knapsack_with_example_solution(w, wt, val)
+ assert optimal_solution == 8
+ assert optimal_subset == {3, 4}
+ print("optimal_value = ", optimal_solution)
+ print("An optimal subset corresponding to the optimal value", optimal_subset)
diff --git a/dynamic_programming/longest_common_subsequence.py b/dynamic_programming/longest_common_subsequence.py
index 0a4771cb2efd..fdcf3311a017 100644
--- a/dynamic_programming/longest_common_subsequence.py
+++ b/dynamic_programming/longest_common_subsequence.py
@@ -1,37 +1,83 @@
"""
-LCS Problem Statement: Given two sequences, find the length of longest subsequence present in both of them.
-A subsequence is a sequence that appears in the same relative order, but not necessarily continious.
+LCS Problem Statement: Given two sequences, find the length of longest subsequence
+present in both of them. A subsequence is a sequence that appears in the same relative
+order, but not necessarily continuous.
Example:"abc", "abg" are subsequences of "abcdefgh".
"""
-from __future__ import print_function
-try:
- xrange # Python 2
-except NameError:
- xrange = range # Python 3
-def lcs_dp(x, y):
+def longest_common_subsequence(x: str, y: str):
+ """
+ Finds the longest common subsequence between two strings. Also returns the
+ The subsequence found
+
+ Parameters
+ ----------
+
+ x: str, one of the strings
+ y: str, the other string
+
+ Returns
+ -------
+ L[m][n]: int, the length of the longest subsequence. Also equal to len(seq)
+ Seq: str, the subsequence found
+
+ >>> longest_common_subsequence("programming", "gaming")
+ (6, 'gaming')
+ >>> longest_common_subsequence("physics", "smartphone")
+ (2, 'ph')
+ >>> longest_common_subsequence("computer", "food")
+ (1, 'o')
+ """
# find the length of strings
+
+ assert x is not None
+ assert y is not None
+
m = len(x)
n = len(y)
# declaring the array for storing the dp values
- L = [[None] * (n + 1) for i in xrange(m + 1)]
- seq = []
-
- for i in range(m + 1):
- for j in range(n + 1):
- if i == 0 or j == 0:
- L[i][j] = 0
- elif x[i - 1] == y[ j - 1]:
- L[i][j] = L[i - 1][j - 1] + 1
- seq.append(x[i -1])
+ L = [[0] * (n + 1) for _ in range(m + 1)]
+
+ for i in range(1, m + 1):
+ for j in range(1, n + 1):
+ if x[i - 1] == y[j - 1]:
+ match = 1
else:
- L[i][j] = max(L[i - 1][j], L[i][j - 1])
- # L[m][n] contains the length of LCS of X[0..n-1] & Y[0..m-1]
+ match = 0
+
+ L[i][j] = max(L[i - 1][j], L[i][j - 1], L[i - 1][j - 1] + match)
+
+ seq = ""
+ i, j = m, n
+ while i > 0 and j > 0:
+ if x[i - 1] == y[j - 1]:
+ match = 1
+ else:
+ match = 0
+
+ if L[i][j] == L[i - 1][j - 1] + match:
+ if match == 1:
+ seq = x[i - 1] + seq
+ i -= 1
+ j -= 1
+ elif L[i][j] == L[i - 1][j]:
+ i -= 1
+ else:
+ j -= 1
+
return L[m][n], seq
-if __name__=='__main__':
- x = 'AGGTAB'
- y = 'GXTXAYB'
- print(lcs_dp(x, y))
+
+if __name__ == "__main__":
+ a = "AGGTAB"
+ b = "GXTXAYB"
+ expected_ln = 4
+ expected_subseq = "GTAB"
+
+ ln, subseq = longest_common_subsequence(a, b)
+ print("len =", ln, ", sub-sequence =", subseq)
+ import doctest
+
+ doctest.testmod()
diff --git a/dynamic_programming/longest_increasing_subsequence.py b/dynamic_programming/longest_increasing_subsequence.py
index b6d165909e70..f5ca8a2b5cdc 100644
--- a/dynamic_programming/longest_increasing_subsequence.py
+++ b/dynamic_programming/longest_increasing_subsequence.py
@@ -1,42 +1,61 @@
-'''
+"""
Author : Mehdi ALAOUI
-This is a pure Python implementation of Dynamic Programming solution to the longest increasing subsequence of a given sequence.
+This is a pure Python implementation of Dynamic Programming solution to the longest
+increasing subsequence of a given sequence.
The problem is :
-Given an ARRAY, to find the longest and increasing sub ARRAY in that given ARRAY and return it.
-Example: [10, 22, 9, 33, 21, 50, 41, 60, 80] as input will return [10, 22, 33, 41, 60, 80] as output
-'''
-from __future__ import print_function
-
-def longestSub(ARRAY): #This function is recursive
-
- ARRAY_LENGTH = len(ARRAY)
- if(ARRAY_LENGTH <= 1): #If the array contains only one element, we return it (it's the stop condition of recursion)
- return ARRAY
- #Else
- PIVOT=ARRAY[0]
- isFound=False
- i=1
- LONGEST_SUB=[]
- while(not isFound and i= ARRAY[i] ]
- TEMPORARY_ARRAY = longestSub(TEMPORARY_ARRAY)
- if ( len(TEMPORARY_ARRAY) > len(LONGEST_SUB) ):
- LONGEST_SUB = TEMPORARY_ARRAY
- else:
- i+=1
-
- TEMPORARY_ARRAY = [ element for element in ARRAY[1:] if element >= PIVOT ]
- TEMPORARY_ARRAY = [PIVOT] + longestSub(TEMPORARY_ARRAY)
- if ( len(TEMPORARY_ARRAY) > len(LONGEST_SUB) ):
- return TEMPORARY_ARRAY
- else:
- return LONGEST_SUB
-
-#Some examples
-
-print(longestSub([4,8,7,5,1,12,2,3,9]))
-print(longestSub([9,8,7,6,5,7]))
\ No newline at end of file
+Given an array, to find the longest and increasing sub-array in that given array and
+return it.
+Example: [10, 22, 9, 33, 21, 50, 41, 60, 80] as input will return
+ [10, 22, 33, 41, 60, 80] as output
+"""
+from __future__ import annotations
+
+
+def longest_subsequence(array: list[int]) -> list[int]: # This function is recursive
+ """
+ Some examples
+ >>> longest_subsequence([10, 22, 9, 33, 21, 50, 41, 60, 80])
+ [10, 22, 33, 41, 60, 80]
+ >>> longest_subsequence([4, 8, 7, 5, 1, 12, 2, 3, 9])
+ [1, 2, 3, 9]
+ >>> longest_subsequence([9, 8, 7, 6, 5, 7])
+ [8]
+ >>> longest_subsequence([1, 1, 1])
+ [1, 1, 1]
+ >>> longest_subsequence([])
+ []
+ """
+ array_length = len(array)
+ # If the array contains only one element, we return it (it's the stop condition of
+ # recursion)
+ if array_length <= 1:
+ return array
+ # Else
+ pivot = array[0]
+ isFound = False
+ i = 1
+ longest_subseq = []
+ while not isFound and i < array_length:
+ if array[i] < pivot:
+ isFound = True
+ temp_array = [element for element in array[i:] if element >= array[i]]
+ temp_array = longest_subsequence(temp_array)
+ if len(temp_array) > len(longest_subseq):
+ longest_subseq = temp_array
+ else:
+ i += 1
+
+ temp_array = [element for element in array[1:] if element >= pivot]
+ temp_array = [pivot] + longest_subsequence(temp_array)
+ if len(temp_array) > len(longest_subseq):
+ return temp_array
+ else:
+ return longest_subseq
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/dynamic_programming/longest_increasing_subsequence_O(nlogn).py b/dynamic_programming/longest_increasing_subsequence_O(nlogn).py
deleted file mode 100644
index 21122a04d69f..000000000000
--- a/dynamic_programming/longest_increasing_subsequence_O(nlogn).py
+++ /dev/null
@@ -1,41 +0,0 @@
-from __future__ import print_function
-#############################
-# Author: Aravind Kashyap
-# File: lis.py
-# comments: This programme outputs the Longest Strictly Increasing Subsequence in O(NLogN)
-# Where N is the Number of elements in the list
-#############################
-def CeilIndex(v,l,r,key):
- while r-l > 1:
- m = (l + r)/2
- if v[m] >= key:
- r = m
- else:
- l = m
-
- return r
-
-
-def LongestIncreasingSubsequenceLength(v):
- if(len(v) == 0):
- return 0
-
- tail = [0]*len(v)
- length = 1
-
- tail[0] = v[0]
-
- for i in range(1,len(v)):
- if v[i] < tail[0]:
- tail[0] = v[i]
- elif v[i] > tail[length-1]:
- tail[length] = v[i]
- length += 1
- else:
- tail[CeilIndex(tail,-1,length-1,v[i])] = v[i]
-
- return length
-
-
-v = [2, 5, 3, 7, 11, 8, 10, 13, 6]
-print(LongestIncreasingSubsequenceLength(v))
diff --git a/dynamic_programming/longest_increasing_subsequence_o(nlogn).py b/dynamic_programming/longest_increasing_subsequence_o(nlogn).py
new file mode 100644
index 000000000000..af536f8bbd01
--- /dev/null
+++ b/dynamic_programming/longest_increasing_subsequence_o(nlogn).py
@@ -0,0 +1,55 @@
+#############################
+# Author: Aravind Kashyap
+# File: lis.py
+# comments: This programme outputs the Longest Strictly Increasing Subsequence in
+# O(NLogN) Where N is the Number of elements in the list
+#############################
+from __future__ import annotations
+
+
+def CeilIndex(v, l, r, key): # noqa: E741
+ while r - l > 1:
+ m = (l + r) // 2
+ if v[m] >= key:
+ r = m
+ else:
+ l = m # noqa: E741
+ return r
+
+
+def LongestIncreasingSubsequenceLength(v: list[int]) -> int:
+ """
+ >>> LongestIncreasingSubsequenceLength([2, 5, 3, 7, 11, 8, 10, 13, 6])
+ 6
+ >>> LongestIncreasingSubsequenceLength([])
+ 0
+ >>> LongestIncreasingSubsequenceLength([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3,
+ ... 11, 7, 15])
+ 6
+ >>> LongestIncreasingSubsequenceLength([5, 4, 3, 2, 1])
+ 1
+ """
+ if len(v) == 0:
+ return 0
+
+ tail = [0] * len(v)
+ length = 1
+
+ tail[0] = v[0]
+
+ for i in range(1, len(v)):
+ if v[i] < tail[0]:
+ tail[0] = v[i]
+ elif v[i] > tail[length - 1]:
+ tail[length] = v[i]
+ length += 1
+ else:
+ tail[CeilIndex(tail, -1, length - 1, v[i])] = v[i]
+
+ return length
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/dynamic_programming/longest_sub_array.py b/dynamic_programming/longest_sub_array.py
index de2c88a8b525..30159a1386c3 100644
--- a/dynamic_programming/longest_sub_array.py
+++ b/dynamic_programming/longest_sub_array.py
@@ -1,33 +1,34 @@
-'''
-Auther : Yvonne
+"""
+Author : Yvonne
-This is a pure Python implementation of Dynamic Programming solution to the longest_sub_array problem.
+This is a pure Python implementation of Dynamic Programming solution to the
+ longest_sub_array problem.
The problem is :
-Given an array, to find the longest and continuous sub array and get the max sum of the sub array in the given array.
-'''
-from __future__ import print_function
+Given an array, to find the longest and continuous sub array and get the max sum of the
+ sub array in the given array.
+"""
class SubArray:
-
def __init__(self, arr):
# we need a list not a string, so do something to change the type
- self.array = arr.split(',')
+ self.array = arr.split(",")
print(("the input array is:", self.array))
def solve_sub_array(self):
- rear = [int(self.array[0])]*len(self.array)
- sum_value = [int(self.array[0])]*len(self.array)
+ rear = [int(self.array[0])] * len(self.array)
+ sum_value = [int(self.array[0])] * len(self.array)
for i in range(1, len(self.array)):
- sum_value[i] = max(int(self.array[i]) + sum_value[i-1], int(self.array[i]))
- rear[i] = max(sum_value[i], rear[i-1])
- return rear[len(self.array)-1]
+ sum_value[i] = max(
+ int(self.array[i]) + sum_value[i - 1], int(self.array[i])
+ )
+ rear[i] = max(sum_value[i], rear[i - 1])
+ return rear[len(self.array) - 1]
-if __name__ == '__main__':
+if __name__ == "__main__":
whole_array = input("please input some numbers:")
array = SubArray(whole_array)
re = array.solve_sub_array()
print(("the results is:", re))
-
diff --git a/dynamic_programming/matrix_chain_order.py b/dynamic_programming/matrix_chain_order.py
index b8234a65acbe..9411bc704f1c 100644
--- a/dynamic_programming/matrix_chain_order.py
+++ b/dynamic_programming/matrix_chain_order.py
@@ -1,46 +1,54 @@
-from __future__ import print_function
-
import sys
-'''
+
+"""
Dynamic Programming
Implementation of Matrix Chain Multiplication
Time Complexity: O(n^3)
Space Complexity: O(n^2)
-'''
+"""
+
+
def MatrixChainOrder(array):
- N=len(array)
- Matrix=[[0 for x in range(N)] for x in range(N)]
- Sol=[[0 for x in range(N)] for x in range(N)]
+ N = len(array)
+ Matrix = [[0 for x in range(N)] for x in range(N)]
+ Sol = [[0 for x in range(N)] for x in range(N)]
- for ChainLength in range(2,N):
- for a in range(1,N-ChainLength+1):
- b = a+ChainLength-1
+ for ChainLength in range(2, N):
+ for a in range(1, N - ChainLength + 1):
+ b = a + ChainLength - 1
Matrix[a][b] = sys.maxsize
- for c in range(a , b):
- cost = Matrix[a][c] + Matrix[c+1][b] + array[a-1]*array[c]*array[b]
+ for c in range(a, b):
+ cost = (
+ Matrix[a][c] + Matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
+ )
if cost < Matrix[a][b]:
Matrix[a][b] = cost
Sol[a][b] = c
- return Matrix , Sol
-#Print order of matrix with Ai as Matrix
-def PrintOptimalSolution(OptimalSolution,i,j):
- if i==j:
- print("A" + str(i),end = " ")
+ return Matrix, Sol
+
+
+# Print order of matrix with Ai as Matrix
+def PrintOptimalSolution(OptimalSolution, i, j):
+ if i == j:
+ print("A" + str(i), end=" ")
else:
- print("(",end = " ")
- PrintOptimalSolution(OptimalSolution,i,OptimalSolution[i][j])
- PrintOptimalSolution(OptimalSolution,OptimalSolution[i][j]+1,j)
- print(")",end = " ")
+ print("(", end=" ")
+ PrintOptimalSolution(OptimalSolution, i, OptimalSolution[i][j])
+ PrintOptimalSolution(OptimalSolution, OptimalSolution[i][j] + 1, j)
+ print(")", end=" ")
+
def main():
- array=[30,35,15,5,10,20,25]
- n=len(array)
- #Size of matrix created from above array will be
+ array = [30, 35, 15, 5, 10, 20, 25]
+ n = len(array)
+ # Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
- Matrix , OptimalSolution = MatrixChainOrder(array)
+ Matrix, OptimalSolution = MatrixChainOrder(array)
+
+ print("No. of Operation required: " + str(Matrix[1][n - 1]))
+ PrintOptimalSolution(OptimalSolution, 1, n - 1)
+
- print("No. of Operation required: "+str((Matrix[1][n-1])))
- PrintOptimalSolution(OptimalSolution,1,n-1)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/dynamic_programming/max_non_adjacent_sum.py b/dynamic_programming/max_non_adjacent_sum.py
new file mode 100644
index 000000000000..5362b22ca9dc
--- /dev/null
+++ b/dynamic_programming/max_non_adjacent_sum.py
@@ -0,0 +1,34 @@
+# Video Explanation: https://www.youtube.com/watch?v=6w60Zi1NtL8&feature=emb_logo
+
+from __future__ import annotations
+
+
+def maximum_non_adjacent_sum(nums: list[int]) -> int:
+ """
+ Find the maximum non-adjacent sum of the integers in the nums input list
+
+ >>> print(maximum_non_adjacent_sum([1, 2, 3]))
+ 4
+ >>> maximum_non_adjacent_sum([1, 5, 3, 7, 2, 2, 6])
+ 18
+ >>> maximum_non_adjacent_sum([-1, -5, -3, -7, -2, -2, -6])
+ 0
+ >>> maximum_non_adjacent_sum([499, 500, -3, -7, -2, -2, -6])
+ 500
+ """
+ if not nums:
+ return 0
+ max_including = nums[0]
+ max_excluding = 0
+ for num in nums[1:]:
+ max_including, max_excluding = (
+ max_excluding + num,
+ max(max_including, max_excluding),
+ )
+ return max(max_excluding, max_including)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/dynamic_programming/max_sub_array.py b/dynamic_programming/max_sub_array.py
index 5d48882427c0..3060010ef7c6 100644
--- a/dynamic_programming/max_sub_array.py
+++ b/dynamic_programming/max_sub_array.py
@@ -1,60 +1,94 @@
"""
author : Mayank Kumar Jha (mk9440)
"""
-from __future__ import print_function
-
-import time
-import matplotlib.pyplot as plt
-from random import randint
-def find_max_sub_array(A,low,high):
- if low==high:
- return low,high,A[low]
- else :
- mid=(low+high)//2
- left_low,left_high,left_sum=find_max_sub_array(A,low,mid)
- right_low,right_high,right_sum=find_max_sub_array(A,mid+1,high)
- cross_left,cross_right,cross_sum=find_max_cross_sum(A,low,mid,high)
- if left_sum>=right_sum and left_sum>=cross_sum:
- return left_low,left_high,left_sum
- elif right_sum>=left_sum and right_sum>=cross_sum :
- return right_low,right_high,right_sum
+from __future__ import annotations
+
+
+def find_max_sub_array(A, low, high):
+ if low == high:
+ return low, high, A[low]
+ else:
+ mid = (low + high) // 2
+ left_low, left_high, left_sum = find_max_sub_array(A, low, mid)
+ right_low, right_high, right_sum = find_max_sub_array(A, mid + 1, high)
+ cross_left, cross_right, cross_sum = find_max_cross_sum(A, low, mid, high)
+ if left_sum >= right_sum and left_sum >= cross_sum:
+ return left_low, left_high, left_sum
+ elif right_sum >= left_sum and right_sum >= cross_sum:
+ return right_low, right_high, right_sum
else:
- return cross_left,cross_right,cross_sum
-
-def find_max_cross_sum(A,low,mid,high):
- left_sum,max_left=-999999999,-1
- right_sum,max_right=-999999999,-1
- summ=0
- for i in range(mid,low-1,-1):
- summ+=A[i]
+ return cross_left, cross_right, cross_sum
+
+
+def find_max_cross_sum(A, low, mid, high):
+ left_sum, max_left = -999999999, -1
+ right_sum, max_right = -999999999, -1
+ summ = 0
+ for i in range(mid, low - 1, -1):
+ summ += A[i]
if summ > left_sum:
- left_sum=summ
- max_left=i
- summ=0
- for i in range(mid+1,high+1):
- summ+=A[i]
+ left_sum = summ
+ max_left = i
+ summ = 0
+ for i in range(mid + 1, high + 1):
+ summ += A[i]
if summ > right_sum:
- right_sum=summ
- max_right=i
- return max_left,max_right,(left_sum+right_sum)
-
-
-if __name__=='__main__':
- inputs=[10,100,1000,10000,50000,100000,200000,300000,400000,500000]
- tim=[]
- for i in inputs:
- li=[randint(1,i) for j in range(i)]
- strt=time.time()
- (find_max_sub_array(li,0,len(li)-1))
- end=time.time()
- tim.append(end-strt)
- print("No of Inputs Time Taken")
- for i in range(len(inputs)):
- print(inputs[i],'\t\t',tim[i])
- plt.plot(inputs,tim)
- plt.xlabel("Number of Inputs");plt.ylabel("Time taken in seconds ")
- plt.show()
+ right_sum = summ
+ max_right = i
+ return max_left, max_right, (left_sum + right_sum)
+
+def max_sub_array(nums: list[int]) -> int:
+ """
+ Finds the contiguous subarray which has the largest sum and return its sum.
+ >>> max_sub_array([-2, 1, -3, 4, -1, 2, 1, -5, 4])
+ 6
-
+ An empty (sub)array has sum 0.
+ >>> max_sub_array([])
+ 0
+
+ If all elements are negative, the largest subarray would be the empty array,
+ having the sum 0.
+ >>> max_sub_array([-1, -2, -3])
+ 0
+ >>> max_sub_array([5, -2, -3])
+ 5
+ >>> max_sub_array([31, -41, 59, 26, -53, 58, 97, -93, -23, 84])
+ 187
+ """
+ best = 0
+ current = 0
+ for i in nums:
+ current += i
+ if current < 0:
+ current = 0
+ best = max(best, current)
+ return best
+
+
+if __name__ == "__main__":
+ """
+ A random simulation of this algorithm.
+ """
+ import time
+ from random import randint
+
+ from matplotlib import pyplot as plt
+
+ inputs = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000]
+ tim = []
+ for i in inputs:
+ li = [randint(1, i) for j in range(i)]
+ strt = time.time()
+ (find_max_sub_array(li, 0, len(li) - 1))
+ end = time.time()
+ tim.append(end - strt)
+ print("No of Inputs Time Taken")
+ for i in range(len(inputs)):
+ print(inputs[i], "\t\t", tim[i])
+ plt.plot(inputs, tim)
+ plt.xlabel("Number of Inputs")
+ plt.ylabel("Time taken in seconds ")
+ plt.show()
diff --git a/dynamic_programming/max_sum_contiguous_subsequence.py b/dynamic_programming/max_sum_contiguous_subsequence.py
new file mode 100644
index 000000000000..bac592370c5d
--- /dev/null
+++ b/dynamic_programming/max_sum_contiguous_subsequence.py
@@ -0,0 +1,20 @@
+def max_subarray_sum(nums: list) -> int:
+ """
+ >>> max_subarray_sum([6 , 9, -1, 3, -7, -5, 10])
+ 17
+ """
+ if not nums:
+ return 0
+ n = len(nums)
+
+ res, s, s_pre = nums[0], nums[0], nums[0]
+ for i in range(1, n):
+ s = max(nums[i], s_pre + nums[i])
+ s_pre = s
+ res = max(res, s)
+ return res
+
+
+if __name__ == "__main__":
+ nums = [6, 9, -1, 3, -7, -5, 10]
+ print(max_subarray_sum(nums))
diff --git a/dynamic_programming/minimum_cost_path.py b/dynamic_programming/minimum_cost_path.py
new file mode 100644
index 000000000000..3ad24b5528d1
--- /dev/null
+++ b/dynamic_programming/minimum_cost_path.py
@@ -0,0 +1,37 @@
+# Youtube Explanation: https://www.youtube.com/watch?v=lBRtnuxg-gU
+
+from __future__ import annotations
+
+
+def minimum_cost_path(matrix: list[list[int]]) -> int:
+ """
+ Find the minimum cost traced by all possible paths from top left to bottom right in
+ a given matrix
+
+ >>> minimum_cost_path([[2, 1], [3, 1], [4, 2]])
+ 6
+
+ >>> minimum_cost_path([[2, 1, 4], [2, 1, 3], [3, 2, 1]])
+ 7
+ """
+
+ # preprocessing the first row
+ for i in range(1, len(matrix[0])):
+ matrix[0][i] += matrix[0][i - 1]
+
+ # preprocessing the first column
+ for i in range(1, len(matrix)):
+ matrix[i][0] += matrix[i - 1][0]
+
+ # updating the path cost for current position
+ for i in range(1, len(matrix)):
+ for j in range(1, len(matrix[0])):
+ matrix[i][j] += min(matrix[i - 1][j], matrix[i][j - 1])
+
+ return matrix[-1][-1]
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/dynamic_programming/minimum_partition.py b/dynamic_programming/minimum_partition.py
index 18aa1faa2fa6..8fad4ef3072f 100644
--- a/dynamic_programming/minimum_partition.py
+++ b/dynamic_programming/minimum_partition.py
@@ -1,28 +1,30 @@
"""
Partition a set into two subsets such that the difference of subset sums is minimum
"""
+
+
def findMin(arr):
n = len(arr)
s = sum(arr)
- dp = [[False for x in range(s+1)]for y in range(n+1)]
+ dp = [[False for x in range(s + 1)] for y in range(n + 1)]
- for i in range(1, n+1):
+ for i in range(1, n + 1):
dp[i][0] = True
- for i in range(1, s+1):
+ for i in range(1, s + 1):
dp[0][i] = False
- for i in range(1, n+1):
- for j in range(1, s+1):
- dp[i][j]= dp[i][j-1]
+ for i in range(1, n + 1):
+ for j in range(1, s + 1):
+ dp[i][j] = dp[i][j - 1]
- if (arr[i-1] <= j):
- dp[i][j] = dp[i][j] or dp[i-1][j-arr[i-1]]
+ if arr[i - 1] <= j:
+ dp[i][j] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
- for j in range(int(s/2), -1, -1):
- if dp[n][j] == True:
- diff = s-2*j
- break;
+ for j in range(int(s / 2), -1, -1):
+ if dp[n][j] is True:
+ diff = s - 2 * j
+ break
return diff
diff --git a/dynamic_programming/minimum_steps_to_one.py b/dynamic_programming/minimum_steps_to_one.py
new file mode 100644
index 000000000000..f4eb7033dd20
--- /dev/null
+++ b/dynamic_programming/minimum_steps_to_one.py
@@ -0,0 +1,65 @@
+"""
+YouTube Explanation: https://www.youtube.com/watch?v=f2xi3c1S95M
+
+Given an integer n, return the minimum steps to 1
+
+AVAILABLE STEPS:
+ * Decrement by 1
+ * if n is divisible by 2, divide by 2
+ * if n is divisible by 3, divide by 3
+
+
+Example 1: n = 10
+10 -> 9 -> 3 -> 1
+Result: 3 steps
+
+Example 2: n = 15
+15 -> 5 -> 4 -> 2 -> 1
+Result: 4 steps
+
+Example 3: n = 6
+6 -> 2 -> 1
+Result: 2 step
+"""
+
+from __future__ import annotations
+
+__author__ = "Alexander Joslin"
+
+
+def min_steps_to_one(number: int) -> int:
+ """
+ Minimum steps to 1 implemented using tabulation.
+ >>> min_steps_to_one(10)
+ 3
+ >>> min_steps_to_one(15)
+ 4
+ >>> min_steps_to_one(6)
+ 2
+
+ :param number:
+ :return int:
+ """
+
+ if number <= 0:
+ raise ValueError(f"n must be greater than 0. Got n = {number}")
+
+ table = [number + 1] * (number + 1)
+
+ # starting position
+ table[1] = 0
+ for i in range(1, number):
+ table[i + 1] = min(table[i + 1], table[i] + 1)
+ # check if out of bounds
+ if i * 2 <= number:
+ table[i * 2] = min(table[i * 2], table[i] + 1)
+ # check if out of bounds
+ if i * 3 <= number:
+ table[i * 3] = min(table[i * 3], table[i] + 1)
+ return table[number]
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/dynamic_programming/optimal_binary_search_tree.py b/dynamic_programming/optimal_binary_search_tree.py
new file mode 100644
index 000000000000..0d94c1b61d39
--- /dev/null
+++ b/dynamic_programming/optimal_binary_search_tree.py
@@ -0,0 +1,144 @@
+#!/usr/bin/env python3
+
+# This Python program implements an optimal binary search tree (abbreviated BST)
+# building dynamic programming algorithm that delivers O(n^2) performance.
+#
+# The goal of the optimal BST problem is to build a low-cost BST for a
+# given set of nodes, each with its own key and frequency. The frequency
+# of the node is defined as how many time the node is being searched.
+# The search cost of binary search tree is given by this formula:
+#
+# cost(1, n) = sum{i = 1 to n}((depth(node_i) + 1) * node_i_freq)
+#
+# where n is number of nodes in the BST. The characteristic of low-cost
+# BSTs is having a faster overall search time than other implementations.
+# The reason for their fast search time is that the nodes with high
+# frequencies will be placed near the root of the tree while the nodes
+# with low frequencies will be placed near the leaves of the tree thus
+# reducing search time in the most frequent instances.
+import sys
+from random import randint
+
+
+class Node:
+ """Binary Search Tree Node"""
+
+ def __init__(self, key, freq):
+ self.key = key
+ self.freq = freq
+
+ def __str__(self):
+ """
+ >>> str(Node(1, 2))
+ 'Node(key=1, freq=2)'
+ """
+ return f"Node(key={self.key}, freq={self.freq})"
+
+
+def print_binary_search_tree(root, key, i, j, parent, is_left):
+ """
+ Recursive function to print a BST from a root table.
+
+ >>> key = [3, 8, 9, 10, 17, 21]
+ >>> root = [[0, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 3], [0, 0, 2, 3, 3, 3], \
+ [0, 0, 0, 3, 3, 3], [0, 0, 0, 0, 4, 5], [0, 0, 0, 0, 0, 5]]
+ >>> print_binary_search_tree(root, key, 0, 5, -1, False)
+ 8 is the root of the binary search tree.
+ 3 is the left child of key 8.
+ 10 is the right child of key 8.
+ 9 is the left child of key 10.
+ 21 is the right child of key 10.
+ 17 is the left child of key 21.
+ """
+ if i > j or i < 0 or j > len(root) - 1:
+ return
+
+ node = root[i][j]
+ if parent == -1: # root does not have a parent
+ print(f"{key[node]} is the root of the binary search tree.")
+ elif is_left:
+ print(f"{key[node]} is the left child of key {parent}.")
+ else:
+ print(f"{key[node]} is the right child of key {parent}.")
+
+ print_binary_search_tree(root, key, i, node - 1, key[node], True)
+ print_binary_search_tree(root, key, node + 1, j, key[node], False)
+
+
+def find_optimal_binary_search_tree(nodes):
+ """
+ This function calculates and prints the optimal binary search tree.
+ The dynamic programming algorithm below runs in O(n^2) time.
+ Implemented from CLRS (Introduction to Algorithms) book.
+ https://en.wikipedia.org/wiki/Introduction_to_Algorithms
+
+ >>> find_optimal_binary_search_tree([Node(12, 8), Node(10, 34), Node(20, 50), \
+ Node(42, 3), Node(25, 40), Node(37, 30)])
+ Binary search tree nodes:
+ Node(key=10, freq=34)
+ Node(key=12, freq=8)
+ Node(key=20, freq=50)
+ Node(key=25, freq=40)
+ Node(key=37, freq=30)
+ Node(key=42, freq=3)
+
+ The cost of optimal BST for given tree nodes is 324.
+ 20 is the root of the binary search tree.
+ 10 is the left child of key 20.
+ 12 is the right child of key 10.
+ 25 is the right child of key 20.
+ 37 is the right child of key 25.
+ 42 is the right child of key 37.
+ """
+ # Tree nodes must be sorted first, the code below sorts the keys in
+ # increasing order and rearrange its frequencies accordingly.
+ nodes.sort(key=lambda node: node.key)
+
+ n = len(nodes)
+
+ keys = [nodes[i].key for i in range(n)]
+ freqs = [nodes[i].freq for i in range(n)]
+
+ # This 2D array stores the overall tree cost (which's as minimized as possible);
+ # for a single key, cost is equal to frequency of the key.
+ dp = [[freqs[i] if i == j else 0 for j in range(n)] for i in range(n)]
+ # sum[i][j] stores the sum of key frequencies between i and j inclusive in nodes
+ # array
+ sum = [[freqs[i] if i == j else 0 for j in range(n)] for i in range(n)]
+ # stores tree roots that will be used later for constructing binary search tree
+ root = [[i if i == j else 0 for j in range(n)] for i in range(n)]
+
+ for interval_length in range(2, n + 1):
+ for i in range(n - interval_length + 1):
+ j = i + interval_length - 1
+
+ dp[i][j] = sys.maxsize # set the value to "infinity"
+ sum[i][j] = sum[i][j - 1] + freqs[j]
+
+ # Apply Knuth's optimization
+ # Loop without optimization: for r in range(i, j + 1):
+ for r in range(root[i][j - 1], root[i + 1][j] + 1): # r is a temporal root
+ left = dp[i][r - 1] if r != i else 0 # optimal cost for left subtree
+ right = dp[r + 1][j] if r != j else 0 # optimal cost for right subtree
+ cost = left + sum[i][j] + right
+
+ if dp[i][j] > cost:
+ dp[i][j] = cost
+ root[i][j] = r
+
+ print("Binary search tree nodes:")
+ for node in nodes:
+ print(node)
+
+ print(f"\nThe cost of optimal BST for given tree nodes is {dp[0][n - 1]}.")
+ print_binary_search_tree(root, keys, 0, n - 1, -1, False)
+
+
+def main():
+ # A sample binary search tree
+ nodes = [Node(i, randint(1, 50)) for i in range(10, 0, -1)]
+ find_optimal_binary_search_tree(nodes)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/dynamic_programming/rod_cutting.py b/dynamic_programming/rod_cutting.py
index 34350cb8202b..442a39cb1616 100644
--- a/dynamic_programming/rod_cutting.py
+++ b/dynamic_programming/rod_cutting.py
@@ -1,58 +1,207 @@
-### PROBLEM ###
-"""
-We are given a rod of length n and we are given the array of prices, also of
-length n. This array contains the price for selling a rod at a certain length.
-For example, prices[5] shows the price we can sell a rod of length 5.
-Generalising, prices[x] shows the price a rod of length x can be sold.
-We are tasked to find the optimal solution to sell the given rod.
"""
+This module provides two implementations for the rod-cutting problem:
+1. A naive recursive implementation which has an exponential runtime
+2. Two dynamic programming implementations which have quadratic runtime
-### SOLUTION ###
-"""
-Profit(n) = max(1>> naive_cut_rod_recursive(4, [1, 5, 8, 9])
+ 10
+ >>> naive_cut_rod_recursive(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30])
+ 30
+ """
+
+ _enforce_args(n, prices)
+ if n == 0:
+ return 0
+ max_revue = float("-inf")
+ for i in range(1, n + 1):
+ max_revue = max(
+ max_revue, prices[i - 1] + naive_cut_rod_recursive(n - i, prices)
+ )
+
+ return max_revue
+
+
+def top_down_cut_rod(n: int, prices: list):
+ """
+ Constructs a top-down dynamic programming solution for the rod-cutting
+ problem via memoization. This function serves as a wrapper for
+ _top_down_cut_rod_recursive
+
+ Runtime: O(n^2)
+
+ Arguments
+ --------
+ n: int, the length of the rod
+ prices: list, the prices for each piece of rod. ``p[i-i]`` is the
+ price for a rod of length ``i``
+
+ Note
+ ----
+ For convenience and because Python's lists using 0-indexing, length(max_rev) =
+ n + 1, to accommodate for the revenue obtainable from a rod of length 0.
+
+ Returns
+ -------
+ The maximum revenue obtainable for a rod of length n given the list of prices
+ for each piece.
+
+ Examples
+ -------
+ >>> top_down_cut_rod(4, [1, 5, 8, 9])
+ 10
+ >>> top_down_cut_rod(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30])
+ 30
+ """
+ _enforce_args(n, prices)
+ max_rev = [float("-inf") for _ in range(n + 1)]
+ return _top_down_cut_rod_recursive(n, prices, max_rev)
+
+
+def _top_down_cut_rod_recursive(n: int, prices: list, max_rev: list):
+ """
+ Constructs a top-down dynamic programming solution for the rod-cutting problem
+ via memoization.
+
+ Runtime: O(n^2)
+
+ Arguments
+ --------
+ n: int, the length of the rod
+ prices: list, the prices for each piece of rod. ``p[i-i]`` is the
+ price for a rod of length ``i``
+ max_rev: list, the computed maximum revenue for a piece of rod.
+ ``max_rev[i]`` is the maximum revenue obtainable for a rod of length ``i``
+
+ Returns
+ -------
+ The maximum revenue obtainable for a rod of length n given the list of prices
+ for each piece.
+ """
+ if max_rev[n] >= 0:
+ return max_rev[n]
+ elif n == 0:
+ return 0
+ else:
+ max_revenue = float("-inf")
+ for i in range(1, n + 1):
+ max_revenue = max(
+ max_revenue,
+ prices[i - 1] + _top_down_cut_rod_recursive(n - i, prices, max_rev),
+ )
+
+ max_rev[n] = max_revenue
+
+ return max_rev[n]
+
+
+def bottom_up_cut_rod(n: int, prices: list):
+ """
+ Constructs a bottom-up dynamic programming solution for the rod-cutting problem
+
+ Runtime: O(n^2)
+
+ Arguments
+ ----------
+ n: int, the maximum length of the rod.
+ prices: list, the prices for each piece of rod. ``p[i-i]`` is the
+ price for a rod of length ``i``
+
+ Returns
+ -------
+ The maximum revenue obtainable from cutting a rod of length n given
+ the prices for each piece of rod p.
+
+ Examples
+ -------
+ >>> bottom_up_cut_rod(4, [1, 5, 8, 9])
+ 10
+ >>> bottom_up_cut_rod(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30])
+ 30
+ """
+ _enforce_args(n, prices)
+
+ # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
+ # length 0.
+ max_rev = [float("-inf") for _ in range(n + 1)]
+ max_rev[0] = 0
+
+ for i in range(1, n + 1):
+ max_revenue_i = max_rev[i]
+ for j in range(1, i + 1):
+ max_revenue_i = max(max_revenue_i, prices[j - 1] + max_rev[i - j])
+
+ max_rev[i] = max_revenue_i
+
+ return max_rev[n]
+
+
+def _enforce_args(n: int, prices: list):
+ """
+ Basic checks on the arguments to the rod-cutting algorithms
+
+ n: int, the length of the rod
+ prices: list, the price list for each piece of rod.
+
+ Throws ValueError:
+
+ if n is negative or there are fewer items in the price list than the length of
+ the rod
+ """
+ if n < 0:
+ raise ValueError(f"n must be greater than or equal to 0. Got n = {n}")
+
+ if n > len(prices):
+ raise ValueError(
+ f"Each integral piece of rod must have a corresponding "
+ f"price. Got n = {n} but length of prices = {len(prices)}"
+ )
- for i in range(1,n):
- if(solutions[i] == -1):
- #We haven't calulated solution for length i yet.
- #We know we sell the part of length i so we get prices[i].
- #We just need to know how to sell rod of length n-i
- yesCut[i] = prices[i] + CutRod(n-i)
- else:
- #We have calculated solution for length i.
- #We add the two prices.
- yesCut[i] = prices[i] + solutions[n-i]
- #We need to find the highest price in order to sell more efficiently.
- #We have to choose between noCut and the prices in yesCut.
- m = noCut #Initialize max to noCut
- for i in range(n):
- if(yesCut[i] > m):
- m = yesCut[i]
+def main():
+ prices = [6, 10, 12, 15, 20, 23]
+ n = len(prices)
- solutions[n] = m
- return m
+ # the best revenue comes from cutting the rod into 6 pieces, each
+ # of length 1 resulting in a revenue of 6 * 6 = 36.
+ expected_max_revenue = 36
+ max_rev_top_down = top_down_cut_rod(n, prices)
+ max_rev_bottom_up = bottom_up_cut_rod(n, prices)
+ max_rev_naive = naive_cut_rod_recursive(n, prices)
+ assert expected_max_revenue == max_rev_top_down
+ assert max_rev_top_down == max_rev_bottom_up
+ assert max_rev_bottom_up == max_rev_naive
-### EXAMPLE ###
-length = 5
-#The first price, 0, is for when we have no rod.
-prices = [0, 1, 3, 7, 9, 11, 13, 17, 21, 21, 30]
-solutions = [-1 for x in range(length+1)]
-print(CutRod(length))
+if __name__ == "__main__":
+ main()
diff --git a/dynamic_programming/subset_generation.py b/dynamic_programming/subset_generation.py
new file mode 100644
index 000000000000..4781b23b32eb
--- /dev/null
+++ b/dynamic_programming/subset_generation.py
@@ -0,0 +1,43 @@
+# Print all subset combinations of n element in given set of r element.
+
+
+def combination_util(arr, n, r, index, data, i):
+ """
+ Current combination is ready to be printed, print it
+ arr[] ---> Input Array
+ data[] ---> Temporary array to store current combination
+ start & end ---> Staring and Ending indexes in arr[]
+ index ---> Current index in data[]
+ r ---> Size of a combination to be printed
+ """
+ if index == r:
+ for j in range(r):
+ print(data[j], end=" ")
+ print(" ")
+ return
+ # When no more elements are there to put in data[]
+ if i >= n:
+ return
+ # current is included, put next at next location
+ data[index] = arr[i]
+ combination_util(arr, n, r, index + 1, data, i + 1)
+ # current is excluded, replace it with
+ # next (Note that i+1 is passed, but
+ # index is not changed)
+ combination_util(arr, n, r, index, data, i + 1)
+ # The main function that prints all combinations
+ # of size r in arr[] of size n. This function
+ # mainly uses combinationUtil()
+
+
+def print_combination(arr, n, r):
+ # A temporary array to store all combination one by one
+ data = [0] * r
+ # Print all combination using temporary array 'data[]'
+ combination_util(arr, n, r, 0, data, 0)
+
+
+# Driver function to check for above function
+arr = [10, 20, 30, 40, 50]
+print_combination(arr, len(arr), 3)
+# This code is contributed by Ambuj sahu
diff --git a/dynamic_programming/sum_of_subset.py b/dynamic_programming/sum_of_subset.py
new file mode 100644
index 000000000000..a12177b57c74
--- /dev/null
+++ b/dynamic_programming/sum_of_subset.py
@@ -0,0 +1,37 @@
+def isSumSubset(arr, arrLen, requiredSum):
+ """
+ >>> isSumSubset([2, 4, 6, 8], 4, 5)
+ False
+ >>> isSumSubset([2, 4, 6, 8], 4, 14)
+ True
+ """
+ # a subset value says 1 if that subset sum can be formed else 0
+ # initially no subsets can be formed hence False/0
+ subset = [[False for i in range(requiredSum + 1)] for i in range(arrLen + 1)]
+
+ # for each arr value, a sum of zero(0) can be formed by not taking any element
+ # hence True/1
+ for i in range(arrLen + 1):
+ subset[i][0] = True
+
+ # sum is not zero and set is empty then false
+ for i in range(1, requiredSum + 1):
+ subset[0][i] = False
+
+ for i in range(1, arrLen + 1):
+ for j in range(1, requiredSum + 1):
+ if arr[i - 1] > j:
+ subset[i][j] = subset[i - 1][j]
+ if arr[i - 1] <= j:
+ subset[i][j] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
+
+ # uncomment to print the subset
+ # for i in range(arrLen+1):
+ # print(subset[i])
+ print(subset[arrLen][requiredSum])
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/electronics/electric_power.py b/electronics/electric_power.py
new file mode 100644
index 000000000000..768c3d5c7232
--- /dev/null
+++ b/electronics/electric_power.py
@@ -0,0 +1,49 @@
+# https://en.m.wikipedia.org/wiki/Electric_power
+from collections import namedtuple
+
+
+def electric_power(voltage: float, current: float, power: float) -> float:
+ """
+ This function can calculate any one of the three (voltage, current, power),
+ fundamental value of electrical system.
+ examples are below:
+ >>> electric_power(voltage=0, current=2, power=5)
+ result(name='voltage', value=2.5)
+ >>> electric_power(voltage=2, current=2, power=0)
+ result(name='power', value=4.0)
+ >>> electric_power(voltage=-2, current=3, power=0)
+ result(name='power', value=6.0)
+ >>> electric_power(voltage=2, current=4, power=2)
+ Traceback (most recent call last):
+ File "", line 15, in
+ ValueError: Only one argument must be 0
+ >>> electric_power(voltage=0, current=0, power=2)
+ Traceback (most recent call last):
+ File "", line 19, in
+ ValueError: Only one argument must be 0
+ >>> electric_power(voltage=0, current=2, power=-4)
+ Traceback (most recent call last):
+ File "", line 23, in >> electric_power(voltage=2.2, current=2.2, power=0)
+ result(name='power', value=4.84)
+ """
+ result = namedtuple("result", "name value")
+ if (voltage, current, power).count(0) != 1:
+ raise ValueError("Only one argument must be 0")
+ elif power < 0:
+ raise ValueError(
+ "Power cannot be negative in any electrical/electronics system"
+ )
+ elif voltage == 0:
+ return result("voltage", power / current)
+ elif current == 0:
+ return result("current", power / voltage)
+ elif power == 0:
+ return result("power", float(round(abs(voltage * current), 2)))
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/electronics/ohms_law.py b/electronics/ohms_law.py
new file mode 100644
index 000000000000..a7b37b635397
--- /dev/null
+++ b/electronics/ohms_law.py
@@ -0,0 +1,39 @@
+# https://en.wikipedia.org/wiki/Ohm%27s_law
+
+
+def ohms_law(voltage: float, current: float, resistance: float) -> float:
+ """
+ Apply Ohm's Law, on any two given electrical values, which can be voltage, current,
+ and resistance, and then in a Python dict return name/value pair of the zero value.
+
+ >>> ohms_law(voltage=10, resistance=5, current=0)
+ {'current': 2.0}
+ >>> ohms_law(voltage=0, current=0, resistance=10)
+ Traceback (most recent call last):
+ ...
+ ValueError: One and only one argument must be 0
+ >>> ohms_law(voltage=0, current=1, resistance=-2)
+ Traceback (most recent call last):
+ ...
+ ValueError: Resistance cannot be negative
+ >>> ohms_law(resistance=0, voltage=-10, current=1)
+ {'resistance': -10.0}
+ >>> ohms_law(voltage=0, current=-1.5, resistance=2)
+ {'voltage': -3.0}
+ """
+ if (voltage, current, resistance).count(0) != 1:
+ raise ValueError("One and only one argument must be 0")
+ if resistance < 0:
+ raise ValueError("Resistance cannot be negative")
+ if voltage == 0:
+ return {"voltage": float(current * resistance)}
+ elif current == 0:
+ return {"current": voltage / resistance}
+ elif resistance == 0:
+ return {"resistance": voltage / current}
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/file_transfer/__init__.py b/file_transfer/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/file_transfer/mytext.txt b/file_transfer/mytext.txt
new file mode 100644
index 000000000000..54cfa7f766c7
--- /dev/null
+++ b/file_transfer/mytext.txt
@@ -0,0 +1,6 @@
+Hello
+This is sample data
+«küßî»
+“ЌύБЇ”
+😀😉
+😋
diff --git a/file_transfer/receive_file.py b/file_transfer/receive_file.py
new file mode 100644
index 000000000000..cfba6ed88484
--- /dev/null
+++ b/file_transfer/receive_file.py
@@ -0,0 +1,23 @@
+if __name__ == "__main__":
+ import socket # Import socket module
+
+ sock = socket.socket() # Create a socket object
+ host = socket.gethostname() # Get local machine name
+ port = 12312
+
+ sock.connect((host, port))
+ sock.send(b"Hello server!")
+
+ with open("Received_file", "wb") as out_file:
+ print("File opened")
+ print("Receiving data...")
+ while True:
+ data = sock.recv(1024)
+ print(f"data={data}")
+ if not data:
+ break
+ out_file.write(data) # Write data to a file
+
+ print("Successfully got the file")
+ sock.close()
+ print("Connection closed")
diff --git a/file_transfer/send_file.py b/file_transfer/send_file.py
new file mode 100644
index 000000000000..5b53471dfb50
--- /dev/null
+++ b/file_transfer/send_file.py
@@ -0,0 +1,35 @@
+def send_file(filename: str = "mytext.txt", testing: bool = False) -> None:
+ import socket
+
+ port = 12312 # Reserve a port for your service.
+ sock = socket.socket() # Create a socket object
+ host = socket.gethostname() # Get local machine name
+ sock.bind((host, port)) # Bind to the port
+ sock.listen(5) # Now wait for client connection.
+
+ print("Server listening....")
+
+ while True:
+ conn, addr = sock.accept() # Establish connection with client.
+ print(f"Got connection from {addr}")
+ data = conn.recv(1024)
+ print(f"Server received {data}")
+
+ with open(filename, "rb") as in_file:
+ data = in_file.read(1024)
+ while data:
+ conn.send(data)
+ print(f"Sent {data!r}")
+ data = in_file.read(1024)
+
+ print("Done sending")
+ conn.close()
+ if testing: # Allow the test to complete
+ break
+
+ sock.shutdown(1)
+ sock.close()
+
+
+if __name__ == "__main__":
+ send_file()
diff --git a/file_transfer/tests/__init__.py b/file_transfer/tests/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/file_transfer/tests/test_send_file.py b/file_transfer/tests/test_send_file.py
new file mode 100644
index 000000000000..2a6008448362
--- /dev/null
+++ b/file_transfer/tests/test_send_file.py
@@ -0,0 +1,31 @@
+from unittest.mock import Mock, patch
+
+from file_transfer.send_file import send_file
+
+
+@patch("socket.socket")
+@patch("builtins.open")
+def test_send_file_running_as_expected(file, sock):
+ # ===== initialization =====
+ conn = Mock()
+ sock.return_value.accept.return_value = conn, Mock()
+ f = iter([1, None])
+ file.return_value.__enter__.return_value.read.side_effect = lambda _: next(f)
+
+ # ===== invoke =====
+ send_file(filename="mytext.txt", testing=True)
+
+ # ===== ensurance =====
+ sock.assert_called_once()
+ sock.return_value.bind.assert_called_once()
+ sock.return_value.listen.assert_called_once()
+ sock.return_value.accept.assert_called_once()
+ conn.recv.assert_called_once()
+
+ file.return_value.__enter__.assert_called_once()
+ file.return_value.__enter__.return_value.read.assert_called()
+
+ conn.send.assert_called_once()
+ conn.close.assert_called_once()
+ sock.return_value.shutdown.assert_called_once()
+ sock.return_value.close.assert_called_once()
diff --git a/file_transfer_protocol/ftp_client_server.py b/file_transfer_protocol/ftp_client_server.py
deleted file mode 100644
index 414c336dee9f..000000000000
--- a/file_transfer_protocol/ftp_client_server.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# server
-
-import socket # Import socket module
-
-port = 60000 # Reserve a port for your service.
-s = socket.socket() # Create a socket object
-host = socket.gethostname() # Get local machine name
-s.bind((host, port)) # Bind to the port
-s.listen(5) # Now wait for client connection.
-
-print('Server listening....')
-
-while True:
- conn, addr = s.accept() # Establish connection with client.
- print('Got connection from', addr)
- data = conn.recv(1024)
- print('Server received', repr(data))
-
- filename = 'mytext.txt'
- with open(filename, 'rb') as f:
- in_data = f.read(1024)
- while in_data:
- conn.send(in_data)
- print('Sent ', repr(in_data))
- in_data = f.read(1024)
-
- print('Done sending')
- conn.send('Thank you for connecting')
- conn.close()
-
-
-# client side server
-
-import socket # Import socket module
-
-s = socket.socket() # Create a socket object
-host = socket.gethostname() # Get local machine name
-port = 60000 # Reserve a port for your service.
-
-s.connect((host, port))
-s.send("Hello server!")
-
-with open('received_file', 'wb') as f:
- print('file opened')
- while True:
- print('receiving data...')
- data = s.recv(1024)
- print('data=%s', (data))
- if not data:
- break
- # write data to a file
- f.write(data)
-
-f.close()
-print('Successfully get the file')
-s.close()
-print('connection closed')
diff --git a/file_transfer_protocol/ftp_send_receive.py b/file_transfer_protocol/ftp_send_receive.py
deleted file mode 100644
index 6a9819ef3f21..000000000000
--- a/file_transfer_protocol/ftp_send_receive.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""
-File transfer protocol used to send and receive files using FTP server.
-Use credentials to provide access to the FTP client
-
-Note: Do not use root username & password for security reasons
-Create a seperate user and provide access to a home directory of the user
-Use login id and password of the user created
-cwd here stands for current working directory
-"""
-
-from ftplib import FTP
-ftp = FTP('xxx.xxx.x.x') # Enter the ip address or the domain name here
-ftp.login(user='username', passwd='password')
-ftp.cwd('/Enter the directory here/')
-
-"""
-The file which will be received via the FTP server
-Enter the location of the file where the file is received
-"""
-
-def ReceiveFile():
- FileName = 'example.txt' """ Enter the location of the file """
- with open(FileName, 'wb') as LocalFile:
- ftp.retrbinary('RETR ' + FileName, LocalFile.write, 1024)
- ftp.quit()
-
-"""
-The file which will be sent via the FTP server
-The file send will be send to the current working directory
-"""
-
-def SendFile():
- FileName = 'example.txt' """ Enter the name of the file """
- with open(FileName, 'rb') as LocalFile:
- ftp.storbinary('STOR ' + FileName, LocalFile)
- ftp.quit()
diff --git a/fuzzy_logic/__init__.py b/fuzzy_logic/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/fuzzy_logic/fuzzy_operations.py b/fuzzy_logic/fuzzy_operations.py
new file mode 100644
index 000000000000..0f573f158663
--- /dev/null
+++ b/fuzzy_logic/fuzzy_operations.py
@@ -0,0 +1,102 @@
+"""README, Author - Jigyasa Gandhi(mailto:jigsgandhi97@gmail.com)
+Requirements:
+ - scikit-fuzzy
+ - numpy
+ - matplotlib
+Python:
+ - 3.5
+"""
+import numpy as np
+import skfuzzy as fuzz
+
+if __name__ == "__main__":
+ # Create universe of discourse in Python using linspace ()
+ X = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
+
+ # Create two fuzzy sets by defining any membership function
+ # (trapmf(), gbellmf(), gaussmf(), etc).
+ abc1 = [0, 25, 50]
+ abc2 = [25, 50, 75]
+ young = fuzz.membership.trimf(X, abc1)
+ middle_aged = fuzz.membership.trimf(X, abc2)
+
+ # Compute the different operations using inbuilt functions.
+ one = np.ones(75)
+ zero = np.zeros((75,))
+ # 1. Union = max(µA(x), µB(x))
+ union = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
+ # 2. Intersection = min(µA(x), µB(x))
+ intersection = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
+ # 3. Complement (A) = (1- min(µA(x))
+ complement_a = fuzz.fuzzy_not(young)
+ # 4. Difference (A/B) = min(µA(x),(1- µB(x)))
+ difference = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
+ # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
+ alg_sum = young + middle_aged - (young * middle_aged)
+ # 6. Algebraic Product = (µA(x) * µB(x))
+ alg_product = young * middle_aged
+ # 7. Bounded Sum = min[1,(µA(x), µB(x))]
+ bdd_sum = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
+ # 8. Bounded difference = min[0,(µA(x), µB(x))]
+ bdd_difference = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
+
+ # max-min composition
+ # max-product composition
+
+ # Plot each set A, set B and each operation result using plot() and subplot().
+ from matplotlib import pyplot as plt
+
+ plt.figure()
+
+ plt.subplot(4, 3, 1)
+ plt.plot(X, young)
+ plt.title("Young")
+ plt.grid(True)
+
+ plt.subplot(4, 3, 2)
+ plt.plot(X, middle_aged)
+ plt.title("Middle aged")
+ plt.grid(True)
+
+ plt.subplot(4, 3, 3)
+ plt.plot(X, union)
+ plt.title("union")
+ plt.grid(True)
+
+ plt.subplot(4, 3, 4)
+ plt.plot(X, intersection)
+ plt.title("intersection")
+ plt.grid(True)
+
+ plt.subplot(4, 3, 5)
+ plt.plot(X, complement_a)
+ plt.title("complement_a")
+ plt.grid(True)
+
+ plt.subplot(4, 3, 6)
+ plt.plot(X, difference)
+ plt.title("difference a/b")
+ plt.grid(True)
+
+ plt.subplot(4, 3, 7)
+ plt.plot(X, alg_sum)
+ plt.title("alg_sum")
+ plt.grid(True)
+
+ plt.subplot(4, 3, 8)
+ plt.plot(X, alg_product)
+ plt.title("alg_product")
+ plt.grid(True)
+
+ plt.subplot(4, 3, 9)
+ plt.plot(X, bdd_sum)
+ plt.title("bdd_sum")
+ plt.grid(True)
+
+ plt.subplot(4, 3, 10)
+ plt.plot(X, bdd_difference)
+ plt.title("bdd_difference")
+ plt.grid(True)
+
+ plt.subplots_adjust(hspace=0.5)
+ plt.show()
diff --git a/genetic_algorithm/__init__.py b/genetic_algorithm/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/genetic_algorithm/basic_string.py b/genetic_algorithm/basic_string.py
new file mode 100644
index 000000000000..97dbe182bc82
--- /dev/null
+++ b/genetic_algorithm/basic_string.py
@@ -0,0 +1,175 @@
+"""
+Simple multithreaded algorithm to show how the 4 phases of a genetic algorithm works
+(Evaluation, Selection, Crossover and Mutation)
+https://en.wikipedia.org/wiki/Genetic_algorithm
+Author: D4rkia
+"""
+
+from __future__ import annotations
+
+import random
+
+# Maximum size of the population. bigger could be faster but is more memory expensive
+N_POPULATION = 200
+# Number of elements selected in every generation for evolution the selection takes
+# place from the best to the worst of that generation must be smaller than N_POPULATION
+N_SELECTED = 50
+# Probability that an element of a generation can mutate changing one of its genes this
+# guarantees that all genes will be used during evolution
+MUTATION_PROBABILITY = 0.4
+# just a seed to improve randomness required by the algorithm
+random.seed(random.randint(0, 1000))
+
+
+def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, str]:
+ """
+ Verify that the target contains no genes besides the ones inside genes variable.
+
+ >>> from string import ascii_lowercase
+ >>> basic("doctest", ascii_lowercase, debug=False)[2]
+ 'doctest'
+ >>> genes = list(ascii_lowercase)
+ >>> genes.remove("e")
+ >>> basic("test", genes)
+ Traceback (most recent call last):
+ ...
+ ValueError: ['e'] is not in genes list, evolution cannot converge
+ >>> genes.remove("s")
+ >>> basic("test", genes)
+ Traceback (most recent call last):
+ ...
+ ValueError: ['e', 's'] is not in genes list, evolution cannot converge
+ >>> genes.remove("t")
+ >>> basic("test", genes)
+ Traceback (most recent call last):
+ ...
+ ValueError: ['e', 's', 't'] is not in genes list, evolution cannot converge
+ """
+
+ # Verify if N_POPULATION is bigger than N_SELECTED
+ if N_POPULATION < N_SELECTED:
+ raise ValueError(f"{N_POPULATION} must be bigger than {N_SELECTED}")
+ # Verify that the target contains no genes besides the ones inside genes variable.
+ not_in_genes_list = sorted({c for c in target if c not in genes})
+ if not_in_genes_list:
+ raise ValueError(
+ f"{not_in_genes_list} is not in genes list, evolution cannot converge"
+ )
+
+ # Generate random starting population
+ population = []
+ for _ in range(N_POPULATION):
+ population.append("".join([random.choice(genes) for i in range(len(target))]))
+
+ # Just some logs to know what the algorithms is doing
+ generation, total_population = 0, 0
+
+ # This loop will end when we will find a perfect match for our target
+ while True:
+ generation += 1
+ total_population += len(population)
+
+ # Random population created now it's time to evaluate
+ def evaluate(item: str, main_target: str = target) -> tuple[str, float]:
+ """
+ Evaluate how similar the item is with the target by just
+ counting each char in the right position
+ >>> evaluate("Helxo Worlx", Hello World)
+ ["Helxo Worlx", 9]
+ """
+ score = len(
+ [g for position, g in enumerate(item) if g == main_target[position]]
+ )
+ return (item, float(score))
+
+ # Adding a bit of concurrency can make everything faster,
+ #
+ # import concurrent.futures
+ # population_score: list[tuple[str, float]] = []
+ # with concurrent.futures.ThreadPoolExecutor(
+ # max_workers=NUM_WORKERS) as executor:
+ # futures = {executor.submit(evaluate, item) for item in population}
+ # concurrent.futures.wait(futures)
+ # population_score = [item.result() for item in futures]
+ #
+ # but with a simple algorithm like this will probably be slower
+ # we just need to call evaluate for every item inside population
+ population_score = [evaluate(item) for item in population]
+
+ # Check if there is a matching evolution
+ population_score = sorted(population_score, key=lambda x: x[1], reverse=True)
+ if population_score[0][0] == target:
+ return (generation, total_population, population_score[0][0])
+
+ # Print the Best result every 10 generation
+ # just to know that the algorithm is working
+ if debug and generation % 10 == 0:
+ print(
+ f"\nGeneration: {generation}"
+ f"\nTotal Population:{total_population}"
+ f"\nBest score: {population_score[0][1]}"
+ f"\nBest string: {population_score[0][0]}"
+ )
+
+ # Flush the old population keeping some of the best evolutions
+ # Keeping this avoid regression of evolution
+ population_best = population[: int(N_POPULATION / 3)]
+ population.clear()
+ population.extend(population_best)
+ # Normalize population score from 0 to 1
+ population_score = [
+ (item, score / len(target)) for item, score in population_score
+ ]
+
+ # Select, Crossover and Mutate a new population
+ def select(parent_1: tuple[str, float]) -> list[str]:
+ """Select the second parent and generate new population"""
+ pop = []
+ # Generate more child proportionally to the fitness score
+ child_n = int(parent_1[1] * 100) + 1
+ child_n = 10 if child_n >= 10 else child_n
+ for _ in range(child_n):
+ parent_2 = population_score[random.randint(0, N_SELECTED)][0]
+ child_1, child_2 = crossover(parent_1[0], parent_2)
+ # Append new string to the population list
+ pop.append(mutate(child_1))
+ pop.append(mutate(child_2))
+ return pop
+
+ def crossover(parent_1: str, parent_2: str) -> tuple[str, str]:
+ """Slice and combine two string in a random point"""
+ random_slice = random.randint(0, len(parent_1) - 1)
+ child_1 = parent_1[:random_slice] + parent_2[random_slice:]
+ child_2 = parent_2[:random_slice] + parent_1[random_slice:]
+ return (child_1, child_2)
+
+ def mutate(child: str) -> str:
+ """Mutate a random gene of a child with another one from the list"""
+ child_list = list(child)
+ if random.uniform(0, 1) < MUTATION_PROBABILITY:
+ child_list[random.randint(0, len(child)) - 1] = random.choice(genes)
+ return "".join(child_list)
+
+ # This is Selection
+ for i in range(N_SELECTED):
+ population.extend(select(population_score[int(i)]))
+ # Check if the population has already reached the maximum value and if so,
+ # break the cycle. if this check is disabled the algorithm will take
+ # forever to compute large strings but will also calculate small string in
+ # a lot fewer generations
+ if len(population) > N_POPULATION:
+ break
+
+
+if __name__ == "__main__":
+ target_str = (
+ "This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
+ )
+ genes_list = list(
+ " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
+ "nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
+ )
+ print(
+ "\nGeneration: %s\nTotal Population: %s\nTarget: %s"
+ % basic(target_str, genes_list)
+ )
diff --git a/geodesy/__init__.py b/geodesy/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/geodesy/haversine_distance.py b/geodesy/haversine_distance.py
new file mode 100644
index 000000000000..de8ac7f88302
--- /dev/null
+++ b/geodesy/haversine_distance.py
@@ -0,0 +1,56 @@
+from math import asin, atan, cos, radians, sin, sqrt, tan
+
+
+def haversine_distance(lat1: float, lon1: float, lat2: float, lon2: float) -> float:
+ """
+ Calculate great circle distance between two points in a sphere,
+ given longitudes and latitudes https://en.wikipedia.org/wiki/Haversine_formula
+
+ We know that the globe is "sort of" spherical, so a path between two points
+ isn't exactly a straight line. We need to account for the Earth's curvature
+ when calculating distance from point A to B. This effect is negligible for
+ small distances but adds up as distance increases. The Haversine method treats
+ the earth as a sphere which allows us to "project" the two points A and B
+ onto the surface of that sphere and approximate the spherical distance between
+ them. Since the Earth is not a perfect sphere, other methods which model the
+ Earth's ellipsoidal nature are more accurate but a quick and modifiable
+ computation like Haversine can be handy for shorter range distances.
+
+ Args:
+ lat1, lon1: latitude and longitude of coordinate 1
+ lat2, lon2: latitude and longitude of coordinate 2
+ Returns:
+ geographical distance between two points in metres
+ >>> from collections import namedtuple
+ >>> point_2d = namedtuple("point_2d", "lat lon")
+ >>> SAN_FRANCISCO = point_2d(37.774856, -122.424227)
+ >>> YOSEMITE = point_2d(37.864742, -119.537521)
+ >>> f"{haversine_distance(*SAN_FRANCISCO, *YOSEMITE):0,.0f} meters"
+ '254,352 meters'
+ """
+ # CONSTANTS per WGS84 https://en.wikipedia.org/wiki/World_Geodetic_System
+ # Distance in metres(m)
+ AXIS_A = 6378137.0
+ AXIS_B = 6356752.314245
+ RADIUS = 6378137
+ # Equation parameters
+ # Equation https://en.wikipedia.org/wiki/Haversine_formula#Formulation
+ flattening = (AXIS_A - AXIS_B) / AXIS_A
+ phi_1 = atan((1 - flattening) * tan(radians(lat1)))
+ phi_2 = atan((1 - flattening) * tan(radians(lat2)))
+ lambda_1 = radians(lon1)
+ lambda_2 = radians(lon2)
+ # Equation
+ sin_sq_phi = sin((phi_2 - phi_1) / 2)
+ sin_sq_lambda = sin((lambda_2 - lambda_1) / 2)
+ # Square both values
+ sin_sq_phi *= sin_sq_phi
+ sin_sq_lambda *= sin_sq_lambda
+ h_value = sqrt(sin_sq_phi + (cos(phi_1) * cos(phi_2) * sin_sq_lambda))
+ return 2 * RADIUS * asin(h_value)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/geodesy/lamberts_ellipsoidal_distance.py b/geodesy/lamberts_ellipsoidal_distance.py
new file mode 100644
index 000000000000..bf8f1b9a5080
--- /dev/null
+++ b/geodesy/lamberts_ellipsoidal_distance.py
@@ -0,0 +1,86 @@
+from math import atan, cos, radians, sin, tan
+
+from .haversine_distance import haversine_distance
+
+
+def lamberts_ellipsoidal_distance(
+ lat1: float, lon1: float, lat2: float, lon2: float
+) -> float:
+
+ """
+ Calculate the shortest distance along the surface of an ellipsoid between
+ two points on the surface of earth given longitudes and latitudes
+ https://en.wikipedia.org/wiki/Geographical_distance#Lambert's_formula_for_long_lines
+
+ NOTE: This algorithm uses geodesy/haversine_distance.py to compute central angle,
+ sigma
+
+ Representing the earth as an ellipsoid allows us to approximate distances between
+ points on the surface much better than a sphere. Ellipsoidal formulas treat the
+ Earth as an oblate ellipsoid which means accounting for the flattening that happens
+ at the North and South poles. Lambert's formulae provide accuracy on the order of
+ 10 meteres over thousands of kilometeres. Other methods can provide
+ millimeter-level accuracy but this is a simpler method to calculate long range
+ distances without increasing computational intensity.
+
+ Args:
+ lat1, lon1: latitude and longitude of coordinate 1
+ lat2, lon2: latitude and longitude of coordinate 2
+ Returns:
+ geographical distance between two points in metres
+
+ >>> from collections import namedtuple
+ >>> point_2d = namedtuple("point_2d", "lat lon")
+ >>> SAN_FRANCISCO = point_2d(37.774856, -122.424227)
+ >>> YOSEMITE = point_2d(37.864742, -119.537521)
+ >>> NEW_YORK = point_2d(40.713019, -74.012647)
+ >>> VENICE = point_2d(45.443012, 12.313071)
+ >>> f"{lamberts_ellipsoidal_distance(*SAN_FRANCISCO, *YOSEMITE):0,.0f} meters"
+ '254,351 meters'
+ >>> f"{lamberts_ellipsoidal_distance(*SAN_FRANCISCO, *NEW_YORK):0,.0f} meters"
+ '4,138,992 meters'
+ >>> f"{lamberts_ellipsoidal_distance(*SAN_FRANCISCO, *VENICE):0,.0f} meters"
+ '9,737,326 meters'
+ """
+
+ # CONSTANTS per WGS84 https://en.wikipedia.org/wiki/World_Geodetic_System
+ # Distance in metres(m)
+ AXIS_A = 6378137.0
+ AXIS_B = 6356752.314245
+ EQUATORIAL_RADIUS = 6378137
+
+ # Equation Parameters
+ # https://en.wikipedia.org/wiki/Geographical_distance#Lambert's_formula_for_long_lines
+ flattening = (AXIS_A - AXIS_B) / AXIS_A
+ # Parametric latitudes
+ # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
+ b_lat1 = atan((1 - flattening) * tan(radians(lat1)))
+ b_lat2 = atan((1 - flattening) * tan(radians(lat2)))
+
+ # Compute central angle between two points
+ # using haversine theta. sigma = haversine_distance / equatorial radius
+ sigma = haversine_distance(lat1, lon1, lat2, lon2) / EQUATORIAL_RADIUS
+
+ # Intermediate P and Q values
+ P_value = (b_lat1 + b_lat2) / 2
+ Q_value = (b_lat2 - b_lat1) / 2
+
+ # Intermediate X value
+ # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
+ X_numerator = (sin(P_value) ** 2) * (cos(Q_value) ** 2)
+ X_demonimator = cos(sigma / 2) ** 2
+ X_value = (sigma - sin(sigma)) * (X_numerator / X_demonimator)
+
+ # Intermediate Y value
+ # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
+ Y_numerator = (cos(P_value) ** 2) * (sin(Q_value) ** 2)
+ Y_denominator = sin(sigma / 2) ** 2
+ Y_value = (sigma + sin(sigma)) * (Y_numerator / Y_denominator)
+
+ return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (X_value + Y_value)))
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/graphics/__init__.py b/graphics/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/graphics/bezier_curve.py b/graphics/bezier_curve.py
new file mode 100644
index 000000000000..295ff47e8cdc
--- /dev/null
+++ b/graphics/bezier_curve.py
@@ -0,0 +1,114 @@
+# https://en.wikipedia.org/wiki/B%C3%A9zier_curve
+# https://www.tutorialspoint.com/computer_graphics/computer_graphics_curves.htm
+from __future__ import annotations
+
+from scipy.special import comb
+
+
+class BezierCurve:
+ """
+ Bezier curve is a weighted sum of a set of control points.
+ Generate Bezier curves from a given set of control points.
+ This implementation works only for 2d coordinates in the xy plane.
+ """
+
+ def __init__(self, list_of_points: list[tuple[float, float]]):
+ """
+ list_of_points: Control points in the xy plane on which to interpolate. These
+ points control the behavior (shape) of the Bezier curve.
+ """
+ self.list_of_points = list_of_points
+ # Degree determines the flexibility of the curve.
+ # Degree = 1 will produce a straight line.
+ self.degree = len(list_of_points) - 1
+
+ def basis_function(self, t: float) -> list[float]:
+ """
+ The basis function determines the weight of each control point at time t.
+ t: time value between 0 and 1 inclusive at which to evaluate the basis of
+ the curve.
+ returns the x, y values of basis function at time t
+
+ >>> curve = BezierCurve([(1,1), (1,2)])
+ >>> curve.basis_function(0)
+ [1.0, 0.0]
+ >>> curve.basis_function(1)
+ [0.0, 1.0]
+ """
+ assert 0 <= t <= 1, "Time t must be between 0 and 1."
+ output_values: list[float] = []
+ for i in range(len(self.list_of_points)):
+ # basis function for each i
+ output_values.append(
+ comb(self.degree, i) * ((1 - t) ** (self.degree - i)) * (t ** i)
+ )
+ # the basis must sum up to 1 for it to produce a valid Bezier curve.
+ assert round(sum(output_values), 5) == 1
+ return output_values
+
+ def bezier_curve_function(self, t: float) -> tuple[float, float]:
+ """
+ The function to produce the values of the Bezier curve at time t.
+ t: the value of time t at which to evaluate the Bezier function
+ Returns the x, y coordinates of the Bezier curve at time t.
+ The first point in the curve is when t = 0.
+ The last point in the curve is when t = 1.
+
+ >>> curve = BezierCurve([(1,1), (1,2)])
+ >>> curve.bezier_curve_function(0)
+ (1.0, 1.0)
+ >>> curve.bezier_curve_function(1)
+ (1.0, 2.0)
+ """
+
+ assert 0 <= t <= 1, "Time t must be between 0 and 1."
+
+ basis_function = self.basis_function(t)
+ x = 0.0
+ y = 0.0
+ for i in range(len(self.list_of_points)):
+ # For all points, sum up the product of i-th basis function and i-th point.
+ x += basis_function[i] * self.list_of_points[i][0]
+ y += basis_function[i] * self.list_of_points[i][1]
+ return (x, y)
+
+ def plot_curve(self, step_size: float = 0.01):
+ """
+ Plots the Bezier curve using matplotlib plotting capabilities.
+ step_size: defines the step(s) at which to evaluate the Bezier curve.
+ The smaller the step size, the finer the curve produced.
+ """
+ from matplotlib import pyplot as plt
+
+ to_plot_x: list[float] = [] # x coordinates of points to plot
+ to_plot_y: list[float] = [] # y coordinates of points to plot
+
+ t = 0.0
+ while t <= 1:
+ value = self.bezier_curve_function(t)
+ to_plot_x.append(value[0])
+ to_plot_y.append(value[1])
+ t += step_size
+
+ x = [i[0] for i in self.list_of_points]
+ y = [i[1] for i in self.list_of_points]
+
+ plt.plot(
+ to_plot_x,
+ to_plot_y,
+ color="blue",
+ label="Curve of Degree " + str(self.degree),
+ )
+ plt.scatter(x, y, color="red", label="Control Points")
+ plt.legend()
+ plt.show()
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+
+ BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
+ BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
+ BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
diff --git a/graphics/vector3_for_2d_rendering.py b/graphics/vector3_for_2d_rendering.py
new file mode 100644
index 000000000000..dfa22262a8d8
--- /dev/null
+++ b/graphics/vector3_for_2d_rendering.py
@@ -0,0 +1,102 @@
+"""
+render 3d points for 2d surfaces.
+"""
+
+from __future__ import annotations
+
+import math
+
+__version__ = "2020.9.26"
+__author__ = "xcodz-dot, cclaus, dhruvmanila"
+
+
+def convert_to_2d(
+ x: float, y: float, z: float, scale: float, distance: float
+) -> tuple[float, float]:
+ """
+ Converts 3d point to a 2d drawable point
+
+ >>> convert_to_2d(1.0, 2.0, 3.0, 10.0, 10.0)
+ (7.6923076923076925, 15.384615384615385)
+
+ >>> convert_to_2d(1, 2, 3, 10, 10)
+ (7.6923076923076925, 15.384615384615385)
+
+ >>> convert_to_2d("1", 2, 3, 10, 10) # '1' is str
+ Traceback (most recent call last):
+ ...
+ TypeError: Input values must either be float or int: ['1', 2, 3, 10, 10]
+ """
+ if not all(isinstance(val, (float, int)) for val in locals().values()):
+ raise TypeError(
+ "Input values must either be float or int: " f"{list(locals().values())}"
+ )
+ projected_x = ((x * distance) / (z + distance)) * scale
+ projected_y = ((y * distance) / (z + distance)) * scale
+ return projected_x, projected_y
+
+
+def rotate(
+ x: float, y: float, z: float, axis: str, angle: float
+) -> tuple[float, float, float]:
+ """
+ rotate a point around a certain axis with a certain angle
+ angle can be any integer between 1, 360 and axis can be any one of
+ 'x', 'y', 'z'
+
+ >>> rotate(1.0, 2.0, 3.0, 'y', 90.0)
+ (3.130524675073759, 2.0, 0.4470070007889556)
+
+ >>> rotate(1, 2, 3, "z", 180)
+ (0.999736015495891, -2.0001319704760485, 3)
+
+ >>> rotate('1', 2, 3, "z", 90.0) # '1' is str
+ Traceback (most recent call last):
+ ...
+ TypeError: Input values except axis must either be float or int: ['1', 2, 3, 90.0]
+
+ >>> rotate(1, 2, 3, "n", 90) # 'n' is not a valid axis
+ Traceback (most recent call last):
+ ...
+ ValueError: not a valid axis, choose one of 'x', 'y', 'z'
+
+ >>> rotate(1, 2, 3, "x", -90)
+ (1, -2.5049096187183877, -2.5933429780983657)
+
+ >>> rotate(1, 2, 3, "x", 450) # 450 wrap around to 90
+ (1, 3.5776792428178217, -0.44744970165427644)
+ """
+ if not isinstance(axis, str):
+ raise TypeError("Axis must be a str")
+ input_variables = locals()
+ del input_variables["axis"]
+ if not all(isinstance(val, (float, int)) for val in input_variables.values()):
+ raise TypeError(
+ "Input values except axis must either be float or int: "
+ f"{list(input_variables.values())}"
+ )
+ angle = (angle % 360) / 450 * 180 / math.pi
+ if axis == "z":
+ new_x = x * math.cos(angle) - y * math.sin(angle)
+ new_y = y * math.cos(angle) + x * math.sin(angle)
+ new_z = z
+ elif axis == "x":
+ new_y = y * math.cos(angle) - z * math.sin(angle)
+ new_z = z * math.cos(angle) + y * math.sin(angle)
+ new_x = x
+ elif axis == "y":
+ new_x = x * math.cos(angle) - z * math.sin(angle)
+ new_z = z * math.cos(angle) + x * math.sin(angle)
+ new_y = y
+ else:
+ raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'")
+
+ return new_x, new_y, new_z
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ print(f"{convert_to_2d(1.0, 2.0, 3.0, 10.0, 10.0) = }")
+ print(f"{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }")
diff --git a/graphs/BFS.py b/graphs/BFS.py
deleted file mode 100644
index bf9b572cec50..000000000000
--- a/graphs/BFS.py
+++ /dev/null
@@ -1,39 +0,0 @@
-"""pseudo-code"""
-
-"""
-BFS(graph G, start vertex s):
-// all nodes initially unexplored
-mark s as explored
-let Q = queue data structure, initialized with s
-while Q is non-empty:
- remove the first node of Q, call it v
- for each edge(v, w): // for w in graph[v]
- if w unexplored:
- mark w as explored
- add w to Q (at the end)
-
-"""
-
-import collections
-
-
-def bfs(graph, start):
- explored, queue = set(), [start] # collections.deque([start])
- explored.add(start)
- while queue:
- v = queue.pop(0) # queue.popleft()
- for w in graph[v]:
- if w not in explored:
- explored.add(w)
- queue.append(w)
- return explored
-
-
-G = {'A': ['B', 'C'],
- 'B': ['A', 'D', 'E'],
- 'C': ['A', 'F'],
- 'D': ['B'],
- 'E': ['B', 'F'],
- 'F': ['C', 'E']}
-
-print(bfs(G, 'A'))
diff --git a/graphs/DFS.py b/graphs/DFS.py
deleted file mode 100644
index d3c34fabb7b3..000000000000
--- a/graphs/DFS.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""pseudo-code"""
-
-"""
-DFS(graph G, start vertex s):
-// all nodes initially unexplored
-mark s as explored
-for every edge (s, v):
- if v unexplored:
- DFS(G, v)
-"""
-
-
-def dfs(graph, start):
- """The DFS function simply calls itself recursively for every unvisited child of its argument. We can emulate that
- behaviour precisely using a stack of iterators. Instead of recursively calling with a node, we'll push an iterator
- to the node's children onto the iterator stack. When the iterator at the top of the stack terminates, we'll pop
- it off the stack."""
- explored, stack = set(), [start]
- explored.add(start)
- while stack:
- v = stack.pop() # the only difference from BFS is to pop last element here instead of first one
- for w in graph[v]:
- if w not in explored:
- explored.add(w)
- stack.append(w)
- return explored
-
-
-G = {'A': ['B', 'C'],
- 'B': ['A', 'D', 'E'],
- 'C': ['A', 'F'],
- 'D': ['B'],
- 'E': ['B', 'F'],
- 'F': ['C', 'E']}
-
-print(dfs(G, 'A'))
diff --git a/graphs/Directed and Undirected (Weighted) Graph.py b/graphs/Directed and Undirected (Weighted) Graph.py
deleted file mode 100644
index a31a4a96d6d0..000000000000
--- a/graphs/Directed and Undirected (Weighted) Graph.py
+++ /dev/null
@@ -1,472 +0,0 @@
-from collections import deque
-import random as rand
-import math as math
-import time
-
-# the dfault weight is 1 if not assigend but all the implementation is weighted
-
-class DirectedGraph:
- def __init__(self):
- self.graph = {}
-
- # adding vertices and edges
- # adding the weight is optional
- # handels repetition
- def add_pair(self, u, v, w = 1):
- if self.graph.get(u):
- if self.graph[u].count([w,v]) == 0:
- self.graph[u].append([w, v])
- else:
- self.graph[u] = [[w, v]]
- if not self.graph.get(v):
- self.graph[v] = []
-
- def all_nodes(self):
- return list(self.graph)
-
- # handels if the input does not exist
- def remove_pair(self, u, v):
- if self.graph.get(u):
- for _ in self.graph[u]:
- if _[1] == v:
- self.graph[u].remove(_)
-
- # if no destination is meant the defaut value is -1
- def dfs(self, s = -2, d = -1):
- if s == d:
- return []
- stack = []
- visited = []
- if s == -2:
- s = list(self.graph.keys())[0]
- stack.append(s)
- visited.append(s)
- ss = s
-
- while True:
- # check if there is any non isolated nodes
- if len(self.graph[s]) != 0:
- ss = s
- for __ in self.graph[s]:
- if visited.count(__[1]) < 1:
- if __[1] == d:
- visited.append(d)
- return visited
- else:
- stack.append(__[1])
- visited.append(__[1])
- ss =__[1]
- break
-
- # check if all the children are visited
- if s == ss :
- stack.pop()
- if len(stack) != 0:
- s = stack[len(stack) - 1]
- else:
- s = ss
-
- # check if se have reached the starting point
- if len(stack) == 0:
- return visited
-
- # c is the count of nodes you want and if you leave it or pass -1 to the funtion the count
- # will be random from 10 to 10000
- def fill_graph_randomly(self, c = -1):
- if c == -1:
- c = (math.floor(rand.random() * 10000)) + 10
- for _ in range(c):
- # every vertex has max 100 edges
- e = math.floor(rand.random() * 102) + 1
- for __ in range(e):
- n = math.floor(rand.random() * (c)) + 1
- if n == _:
- continue
- self.add_pair(_, n, 1)
-
- def bfs(self, s = -2):
- d = deque()
- visited = []
- if s == -2:
- s = list(self.graph.keys())[0]
- d.append(s)
- visited.append(s)
- while d:
- s = d.popleft()
- if len(self.graph[s]) != 0:
- for __ in self.graph[s]:
- if visited.count(__[1]) < 1:
- d.append(__[1])
- visited.append(__[1])
- return visited
- def in_degree(self, u):
- count = 0
- for _ in self.graph:
- for __ in self.graph[_]:
- if __[1] == u:
- count += 1
- return count
-
- def out_degree(self, u):
- return len(self.graph[u])
-
- def topological_sort(self, s = -2):
- stack = []
- visited = []
- if s == -2:
- s = list(self.graph.keys())[0]
- stack.append(s)
- visited.append(s)
- ss = s
- sorted_nodes = []
-
- while True:
- # check if there is any non isolated nodes
- if len(self.graph[s]) != 0:
- ss = s
- for __ in self.graph[s]:
- if visited.count(__[1]) < 1:
- stack.append(__[1])
- visited.append(__[1])
- ss =__[1]
- break
-
- # check if all the children are visited
- if s == ss :
- sorted_nodes.append(stack.pop())
- if len(stack) != 0:
- s = stack[len(stack) - 1]
- else:
- s = ss
-
- # check if se have reached the starting point
- if len(stack) == 0:
- return sorted_nodes
-
- def cycle_nodes(self):
- stack = []
- visited = []
- s = list(self.graph.keys())[0]
- stack.append(s)
- visited.append(s)
- parent = -2
- indirect_parents = []
- ss = s
- on_the_way_back = False
- anticipating_nodes = set()
-
- while True:
- # check if there is any non isolated nodes
- if len(self.graph[s]) != 0:
- ss = s
- for __ in self.graph[s]:
- if visited.count(__[1]) > 0 and __[1] != parent and indirect_parents.count(__[1]) > 0 and not on_the_way_back:
- l = len(stack) - 1
- while True and l >= 0:
- if stack[l] == __[1]:
- anticipating_nodes.add(__[1])
- break
- else:
- anticipating_nodes.add(stack[l])
- l -= 1
- if visited.count(__[1]) < 1:
- stack.append(__[1])
- visited.append(__[1])
- ss =__[1]
- break
-
- # check if all the children are visited
- if s == ss :
- stack.pop()
- on_the_way_back = True
- if len(stack) != 0:
- s = stack[len(stack) - 1]
- else:
- on_the_way_back = False
- indirect_parents.append(parent)
- parent = s
- s = ss
-
- # check if se have reached the starting point
- if len(stack) == 0:
- return list(anticipating_nodes)
-
- def has_cycle(self):
- stack = []
- visited = []
- s = list(self.graph.keys())[0]
- stack.append(s)
- visited.append(s)
- parent = -2
- indirect_parents = []
- ss = s
- on_the_way_back = False
- anticipating_nodes = set()
-
- while True:
- # check if there is any non isolated nodes
- if len(self.graph[s]) != 0:
- ss = s
- for __ in self.graph[s]:
- if visited.count(__[1]) > 0 and __[1] != parent and indirect_parents.count(__[1]) > 0 and not on_the_way_back:
- l = len(stack) - 1
- while True and l >= 0:
- if stack[l] == __[1]:
- anticipating_nodes.add(__[1])
- break
- else:
- return True
- anticipating_nodes.add(stack[l])
- l -= 1
- if visited.count(__[1]) < 1:
- stack.append(__[1])
- visited.append(__[1])
- ss =__[1]
- break
-
- # check if all the children are visited
- if s == ss :
- stack.pop()
- on_the_way_back = True
- if len(stack) != 0:
- s = stack[len(stack) - 1]
- else:
- on_the_way_back = False
- indirect_parents.append(parent)
- parent = s
- s = ss
-
- # check if se have reached the starting point
- if len(stack) == 0:
- return False
-
- def dfs_time(self, s = -2, e = -1):
- begin = time.time()
- self.dfs(s,e)
- end = time.time()
- return end - begin
-
- def bfs_time(self, s = -2):
- begin = time.time()
- self.bfs(s)
- end = time.time()
- return end - begin
-
-class Graph:
- def __init__(self):
- self.graph = {}
-
- # adding vertices and edges
- # adding the weight is optional
- # handels repetition
- def add_pair(self, u, v, w = 1):
- # check if the u exists
- if self.graph.get(u):
- # if there already is a edge
- if self.graph[u].count([w,v]) == 0:
- self.graph[u].append([w, v])
- else:
- # if u does not exist
- self.graph[u] = [[w, v]]
- # add the other way
- if self.graph.get(v):
- # if there already is a edge
- if self.graph[v].count([w,u]) == 0:
- self.graph[v].append([w, u])
- else:
- # if u does not exist
- self.graph[v] = [[w, u]]
-
- # handels if the input does not exist
- def remove_pair(self, u, v):
- if self.graph.get(u):
- for _ in self.graph[u]:
- if _[1] == v:
- self.graph[u].remove(_)
- # the other way round
- if self.graph.get(v):
- for _ in self.graph[v]:
- if _[1] == u:
- self.graph[v].remove(_)
-
- # if no destination is meant the defaut value is -1
- def dfs(self, s = -2, d = -1):
- if s == d:
- return []
- stack = []
- visited = []
- if s == -2:
- s = list(self.graph.keys())[0]
- stack.append(s)
- visited.append(s)
- ss = s
-
- while True:
- # check if there is any non isolated nodes
- if len(self.graph[s]) != 0:
- ss = s
- for __ in self.graph[s]:
- if visited.count(__[1]) < 1:
- if __[1] == d:
- visited.append(d)
- return visited
- else:
- stack.append(__[1])
- visited.append(__[1])
- ss =__[1]
- break
-
- # check if all the children are visited
- if s == ss :
- stack.pop()
- if len(stack) != 0:
- s = stack[len(stack) - 1]
- else:
- s = ss
-
- # check if se have reached the starting point
- if len(stack) == 0:
- return visited
-
- # c is the count of nodes you want and if you leave it or pass -1 to the funtion the count
- # will be random from 10 to 10000
- def fill_graph_randomly(self, c = -1):
- if c == -1:
- c = (math.floor(rand.random() * 10000)) + 10
- for _ in range(c):
- # every vertex has max 100 edges
- e = math.floor(rand.random() * 102) + 1
- for __ in range(e):
- n = math.floor(rand.random() * (c)) + 1
- if n == _:
- continue
- self.add_pair(_, n, 1)
-
- def bfs(self, s = -2):
- d = deque()
- visited = []
- if s == -2:
- s = list(self.graph.keys())[0]
- d.append(s)
- visited.append(s)
- while d:
- s = d.popleft()
- if len(self.graph[s]) != 0:
- for __ in self.graph[s]:
- if visited.count(__[1]) < 1:
- d.append(__[1])
- visited.append(__[1])
- return visited
- def degree(self, u):
- return len(self.graph[u])
-
- def cycle_nodes(self):
- stack = []
- visited = []
- s = list(self.graph.keys())[0]
- stack.append(s)
- visited.append(s)
- parent = -2
- indirect_parents = []
- ss = s
- on_the_way_back = False
- anticipating_nodes = set()
-
- while True:
- # check if there is any non isolated nodes
- if len(self.graph[s]) != 0:
- ss = s
- for __ in self.graph[s]:
- if visited.count(__[1]) > 0 and __[1] != parent and indirect_parents.count(__[1]) > 0 and not on_the_way_back:
- l = len(stack) - 1
- while True and l >= 0:
- if stack[l] == __[1]:
- anticipating_nodes.add(__[1])
- break
- else:
- anticipating_nodes.add(stack[l])
- l -= 1
- if visited.count(__[1]) < 1:
- stack.append(__[1])
- visited.append(__[1])
- ss =__[1]
- break
-
- # check if all the children are visited
- if s == ss :
- stack.pop()
- on_the_way_back = True
- if len(stack) != 0:
- s = stack[len(stack) - 1]
- else:
- on_the_way_back = False
- indirect_parents.append(parent)
- parent = s
- s = ss
-
- # check if se have reached the starting point
- if len(stack) == 0:
- return list(anticipating_nodes)
-
- def has_cycle(self):
- stack = []
- visited = []
- s = list(self.graph.keys())[0]
- stack.append(s)
- visited.append(s)
- parent = -2
- indirect_parents = []
- ss = s
- on_the_way_back = False
- anticipating_nodes = set()
-
- while True:
- # check if there is any non isolated nodes
- if len(self.graph[s]) != 0:
- ss = s
- for __ in self.graph[s]:
- if visited.count(__[1]) > 0 and __[1] != parent and indirect_parents.count(__[1]) > 0 and not on_the_way_back:
- l = len(stack) - 1
- while True and l >= 0:
- if stack[l] == __[1]:
- anticipating_nodes.add(__[1])
- break
- else:
- return True
- anticipating_nodes.add(stack[l])
- l -= 1
- if visited.count(__[1]) < 1:
- stack.append(__[1])
- visited.append(__[1])
- ss =__[1]
- break
-
- # check if all the children are visited
- if s == ss :
- stack.pop()
- on_the_way_back = True
- if len(stack) != 0:
- s = stack[len(stack) - 1]
- else:
- on_the_way_back = False
- indirect_parents.append(parent)
- parent = s
- s = ss
-
- # check if se have reached the starting point
- if len(stack) == 0:
- return False
- def all_nodes(self):
- return list(self.graph)
-
- def dfs_time(self, s = -2, e = -1):
- begin = time.time()
- self.dfs(s,e)
- end = time.time()
- return end - begin
-
- def bfs_time(self, s = -2):
- begin = time.time()
- self.bfs(s)
- end = time.time()
- return end - begin
diff --git a/graphs/__init__.py b/graphs/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/graphs/a_star.py b/graphs/a_star.py
index 584222e6f62b..cb5b2fcd16e8 100644
--- a/graphs/a_star.py
+++ b/graphs/a_star.py
@@ -1,44 +1,45 @@
-from __future__ import print_function
+grid = [
+ [0, 1, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
+ [0, 1, 0, 0, 0, 0],
+ [0, 1, 0, 0, 1, 0],
+ [0, 0, 0, 0, 1, 0],
+]
-grid = [[0, 1, 0, 0, 0, 0],
- [0, 1, 0, 0, 0, 0],#0 are free path whereas 1's are obstacles
- [0, 1, 0, 0, 0, 0],
- [0, 1, 0, 0, 1, 0],
- [0, 0, 0, 0, 1, 0]]
-
-'''
+"""
heuristic = [[9, 8, 7, 6, 5, 4],
[8, 7, 6, 5, 4, 3],
[7, 6, 5, 4, 3, 2],
[6, 5, 4, 3, 2, 1],
- [5, 4, 3, 2, 1, 0]]'''
+ [5, 4, 3, 2, 1, 0]]"""
init = [0, 0]
-goal = [len(grid)-1, len(grid[0])-1] #all coordinates are given in format [y,x]
+goal = [len(grid) - 1, len(grid[0]) - 1] # all coordinates are given in format [y,x]
cost = 1
-#the cost map which pushes the path closer to the goal
+# the cost map which pushes the path closer to the goal
heuristic = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
-for i in range(len(grid)):
- for j in range(len(grid[0])):
+for i in range(len(grid)):
+ for j in range(len(grid[0])):
heuristic[i][j] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
- heuristic[i][j] = 99 #added extra penalty in the heuristic map
+ heuristic[i][j] = 99 # added extra penalty in the heuristic map
-#the actions we can take
-delta = [[-1, 0 ], # go up
- [ 0, -1], # go left
- [ 1, 0 ], # go down
- [ 0, 1 ]] # go right
+# the actions we can take
+delta = [[-1, 0], [0, -1], [1, 0], [0, 1]] # go up # go left # go down # go right
-#function to search the path
-def search(grid,init,goal,cost,heuristic):
+# function to search the path
+def search(grid, init, goal, cost, heuristic):
- closed = [[0 for col in range(len(grid[0]))] for row in range(len(grid))]# the referrence grid
+ closed = [
+ [0 for col in range(len(grid[0]))] for row in range(len(grid))
+ ] # the reference grid
closed[init[0]][init[1]] = 1
- action = [[0 for col in range(len(grid[0]))] for row in range(len(grid))]#the action grid
+ action = [
+ [0 for col in range(len(grid[0]))] for row in range(len(grid))
+ ] # the action grid
x = init[0]
y = init[1]
@@ -47,29 +48,26 @@ def search(grid,init,goal,cost,heuristic):
cell = [[f, g, x, y]]
found = False # flag that is set when search is complete
- resign = False # flag set if we can't find expand
+ resign = False # flag set if we can't find expand
while not found and not resign:
if len(cell) == 0:
- resign = True
return "FAIL"
- else:
- cell.sort()#to choose the least costliest action so as to move closer to the goal
+ else: # to choose the least costliest action so as to move closer to the goal
+ cell.sort()
cell.reverse()
next = cell.pop()
x = next[2]
y = next[3]
g = next[1]
- f = next[0]
-
if x == goal[0] and y == goal[1]:
found = True
else:
- for i in range(len(delta)):#to try out different valid actions
+ for i in range(len(delta)): # to try out different valid actions
x2 = x + delta[i][0]
y2 = y + delta[i][1]
- if x2 >= 0 and x2 < len(grid) and y2 >=0 and y2 < len(grid[0]):
+ if x2 >= 0 and x2 < len(grid) and y2 >= 0 and y2 < len(grid[0]):
if closed[x2][y2] == 0 and grid[x2][y2] == 0:
g2 = g + cost
f2 = g2 + heuristic[x2][y2]
@@ -79,7 +77,7 @@ def search(grid,init,goal,cost,heuristic):
invpath = []
x = goal[0]
y = goal[1]
- invpath.append([x, y])#we get the reverse path from here
+ invpath.append([x, y]) # we get the reverse path from here
while x != init[0] or y != init[1]:
x2 = x - delta[action[x][y]][0]
y2 = y - delta[action[x][y]][1]
@@ -89,14 +87,14 @@ def search(grid,init,goal,cost,heuristic):
path = []
for i in range(len(invpath)):
- path.append(invpath[len(invpath) - 1 - i])
+ path.append(invpath[len(invpath) - 1 - i])
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
-
+
return path
-
-a = search(grid,init,goal,cost,heuristic)
-for i in range(len(a)):
- print(a[i])
+
+a = search(grid, init, goal, cost, heuristic)
+for i in range(len(a)):
+ print(a[i])
diff --git a/graphs/articulation_points.py b/graphs/articulation_points.py
index 1173c4ea373c..7197369de090 100644
--- a/graphs/articulation_points.py
+++ b/graphs/articulation_points.py
@@ -1,5 +1,5 @@
# Finding Articulation Points in Undirected Graph
-def computeAP(l):
+def computeAP(l): # noqa: E741
n = len(l)
outEdgeCount = 0
low = [0] * n
@@ -33,12 +33,23 @@ def dfs(root, at, parent, outEdgeCount):
if not visited[i]:
outEdgeCount = 0
outEdgeCount = dfs(i, i, -1, outEdgeCount)
- isArt[i] = (outEdgeCount > 1)
+ isArt[i] = outEdgeCount > 1
for x in range(len(isArt)):
- if isArt[x] == True:
+ if isArt[x] is True:
print(x)
+
# Adjacency list of graph
-l = {0:[1,2], 1:[0,2], 2:[0,1,3,5], 3:[2,4], 4:[3], 5:[2,6,8], 6:[5,7], 7:[6,8], 8:[5,7]}
-computeAP(l)
+data = {
+ 0: [1, 2],
+ 1: [0, 2],
+ 2: [0, 1, 3, 5],
+ 3: [2, 4],
+ 4: [3],
+ 5: [2, 6, 8],
+ 6: [5, 7],
+ 7: [6, 8],
+ 8: [5, 7],
+}
+computeAP(data)
diff --git a/graphs/basic_graphs.py b/graphs/basic_graphs.py
index 3b3abeb1720d..0f73d8d07b2a 100644
--- a/graphs/basic_graphs.py
+++ b/graphs/basic_graphs.py
@@ -1,51 +1,42 @@
-from __future__ import print_function
-
-try:
- raw_input # Python 2
-except NameError:
- raw_input = input # Python 3
-
-try:
- xrange # Python 2
-except NameError:
- xrange = range # Python 3
-
-# Accept No. of Nodes and edges
-n, m = map(int, raw_input().split(" "))
-
-# Initialising Dictionary of edges
-g = {}
-for i in xrange(n):
- g[i + 1] = []
-
-"""
---------------------------------------------------------------------------------
- Accepting edges of Unweighted Directed Graphs
---------------------------------------------------------------------------------
-"""
-for _ in xrange(m):
- x, y = map(int, raw_input().split(" "))
- g[x].append(y)
-
-"""
---------------------------------------------------------------------------------
- Accepting edges of Unweighted Undirected Graphs
---------------------------------------------------------------------------------
-"""
-for _ in xrange(m):
- x, y = map(int, raw_input().split(" "))
- g[x].append(y)
- g[y].append(x)
+from collections import deque
-"""
---------------------------------------------------------------------------------
- Accepting edges of Weighted Undirected Graphs
---------------------------------------------------------------------------------
-"""
-for _ in xrange(m):
- x, y, r = map(int, raw_input().split(" "))
- g[x].append([y, r])
- g[y].append([x, r])
+if __name__ == "__main__":
+ # Accept No. of Nodes and edges
+ n, m = map(int, input().split(" "))
+
+ # Initialising Dictionary of edges
+ g = {}
+ for i in range(n):
+ g[i + 1] = []
+
+ """
+ ----------------------------------------------------------------------------
+ Accepting edges of Unweighted Directed Graphs
+ ----------------------------------------------------------------------------
+ """
+ for _ in range(m):
+ x, y = map(int, input().strip().split(" "))
+ g[x].append(y)
+
+ """
+ ----------------------------------------------------------------------------
+ Accepting edges of Unweighted Undirected Graphs
+ ----------------------------------------------------------------------------
+ """
+ for _ in range(m):
+ x, y = map(int, input().strip().split(" "))
+ g[x].append(y)
+ g[y].append(x)
+
+ """
+ ----------------------------------------------------------------------------
+ Accepting edges of Weighted Undirected Graphs
+ ----------------------------------------------------------------------------
+ """
+ for _ in range(m):
+ x, y, r = map(int, input().strip().split(" "))
+ g[x].append([y, r])
+ g[y].append([x, r])
"""
--------------------------------------------------------------------------------
@@ -59,7 +50,7 @@
def dfs(G, s):
- vis, S = set([s]), [s]
+ vis, S = {s}, [s]
print(s)
while S:
flag = 0
@@ -80,14 +71,13 @@ def dfs(G, s):
Args : G - Dictionary of edges
s - Starting Node
Vars : vis - Set of visited nodes
- Q - Traveral Stack
+ Q - Traversal Stack
--------------------------------------------------------------------------------
"""
-from collections import deque
def bfs(G, s):
- vis, Q = set([s]), deque([s])
+ vis, Q = {s}, deque([s])
print(s)
while Q:
u = Q.popleft()
@@ -136,10 +126,11 @@ def dijk(G, s):
Topological Sort
--------------------------------------------------------------------------------
"""
-from collections import deque
-def topo(G, ind=None, Q=[1]):
+def topo(G, ind=None, Q=None):
+ if Q is None:
+ Q = [1]
if ind is None:
ind = [0] * (len(G) + 1) # SInce oth Index is ignored
for u in G:
@@ -168,9 +159,10 @@ def topo(G, ind=None, Q=[1]):
def adjm():
- n, a = raw_input(), []
- for i in xrange(n):
- a.append(map(int, raw_input().split()))
+ n = input().strip()
+ a = []
+ for i in range(n):
+ a.append(map(int, input().strip().split()))
return a, n
@@ -190,10 +182,10 @@ def adjm():
def floy(A_and_n):
(A, n) = A_and_n
dist = list(A)
- path = [[0] * n for i in xrange(n)]
- for k in xrange(n):
- for i in xrange(n):
- for j in xrange(n):
+ path = [[0] * n for i in range(n)]
+ for k in range(n):
+ for i in range(n):
+ for j in range(n):
if dist[i][j] > dist[i][k] + dist[k][j]:
dist[i][j] = dist[i][k] + dist[k][j]
path[i][k] = k
@@ -228,6 +220,7 @@ def prim(G, s):
if v[1] < dist.get(v[0], 100000):
dist[v[0]] = v[1]
path[v[0]] = u
+ return dist
"""
@@ -242,11 +235,11 @@ def prim(G, s):
def edglist():
- n, m = map(int, raw_input().split(" "))
- l = []
- for i in xrange(m):
- l.append(map(int, raw_input().split(' ')))
- return l, n
+ n, m = map(int, input().split(" "))
+ edges = []
+ for i in range(m):
+ edges.append(map(int, input().split(" ")))
+ return edges, n
"""
@@ -263,16 +256,16 @@ def krusk(E_and_n):
# Sort edges on the basis of distance
(E, n) = E_and_n
E.sort(reverse=True, key=lambda x: x[2])
- s = [set([i]) for i in range(1, n + 1)]
+ s = [{i} for i in range(1, n + 1)]
while True:
if len(s) == 1:
break
print(s)
x = E.pop()
- for i in xrange(len(s)):
+ for i in range(len(s)):
if x[0] in s[i]:
break
- for j in xrange(len(s)):
+ for j in range(len(s)):
if x[1] in s[j]:
if i == j:
break
diff --git a/graphs/bellman_ford.py b/graphs/bellman_ford.py
index 82db80546b94..ace7985647bb 100644
--- a/graphs/bellman_ford.py
+++ b/graphs/bellman_ford.py
@@ -1,54 +1,56 @@
-from __future__ import print_function
+from __future__ import annotations
+
def printDist(dist, V):
- print("\nVertex Distance")
- for i in range(V):
- if dist[i] != float('inf') :
- print(i,"\t",int(dist[i]),end = "\t")
- else:
- print(i,"\t","INF",end="\t")
- print()
-
-def BellmanFord(graph, V, E, src):
- mdist=[float('inf') for i in range(V)]
- mdist[src] = 0.0
-
- for i in range(V-1):
- for j in range(V):
- u = graph[j]["src"]
- v = graph[j]["dst"]
- w = graph[j]["weight"]
-
- if mdist[u] != float('inf') and mdist[u] + w < mdist[v]:
- mdist[v] = mdist[u] + w
- for j in range(V):
- u = graph[j]["src"]
- v = graph[j]["dst"]
- w = graph[j]["weight"]
-
- if mdist[u] != float('inf') and mdist[u] + w < mdist[v]:
- print("Negative cycle found. Solution not possible.")
- return
-
- printDist(mdist, V)
-
-
-
-#MAIN
-V = int(input("Enter number of vertices: "))
-E = int(input("Enter number of edges: "))
-
-graph = [dict() for j in range(E)]
-
-for i in range(V):
- graph[i][i] = 0.0
-
-for i in range(E):
- print("\nEdge ",i+1)
- src = int(input("Enter source:"))
- dst = int(input("Enter destination:"))
- weight = float(input("Enter weight:"))
- graph[i] = {"src": src,"dst": dst, "weight": weight}
-
-gsrc = int(input("\nEnter shortest path source:"))
-BellmanFord(graph, V, E, gsrc)
+ print("Vertex Distance")
+ distances = ("INF" if d == float("inf") else d for d in dist)
+ print("\t".join(f"{i}\t{d}" for i, d in enumerate(distances)))
+
+
+def BellmanFord(graph: list[dict[str, int]], V: int, E: int, src: int) -> int:
+ """
+ Returns shortest paths from a vertex src to all
+ other vertices.
+ """
+ mdist = [float("inf") for i in range(V)]
+ mdist[src] = 0.0
+
+ for i in range(V - 1):
+ for j in range(E):
+ u = graph[j]["src"]
+ v = graph[j]["dst"]
+ w = graph[j]["weight"]
+
+ if mdist[u] != float("inf") and mdist[u] + w < mdist[v]:
+ mdist[v] = mdist[u] + w
+ for j in range(E):
+ u = graph[j]["src"]
+ v = graph[j]["dst"]
+ w = graph[j]["weight"]
+
+ if mdist[u] != float("inf") and mdist[u] + w < mdist[v]:
+ print("Negative cycle found. Solution not possible.")
+ return
+
+ printDist(mdist, V)
+ return src
+
+
+if __name__ == "__main__":
+ V = int(input("Enter number of vertices: ").strip())
+ E = int(input("Enter number of edges: ").strip())
+
+ graph = [dict() for j in range(E)]
+
+ for i in range(E):
+ graph[i][i] = 0.0
+
+ for i in range(E):
+ print("\nEdge ", i + 1)
+ src = int(input("Enter source:").strip())
+ dst = int(input("Enter destination:").strip())
+ weight = float(input("Enter weight:").strip())
+ graph[i] = {"src": src, "dst": dst, "weight": weight}
+
+ gsrc = int(input("\nEnter shortest path source:").strip())
+ BellmanFord(graph, V, E, gsrc)
diff --git a/graphs/bfs_shortest_path.py b/graphs/bfs_shortest_path.py
new file mode 100644
index 000000000000..754ba403537e
--- /dev/null
+++ b/graphs/bfs_shortest_path.py
@@ -0,0 +1,106 @@
+"""Breadth-first search shortest path implementations.
+ doctest:
+ python -m doctest -v bfs_shortest_path.py
+ Manual test:
+ python bfs_shortest_path.py
+"""
+graph = {
+ "A": ["B", "C", "E"],
+ "B": ["A", "D", "E"],
+ "C": ["A", "F", "G"],
+ "D": ["B"],
+ "E": ["A", "B", "D"],
+ "F": ["C"],
+ "G": ["C"],
+}
+
+
+def bfs_shortest_path(graph: dict, start, goal) -> str:
+ """Find shortest path between `start` and `goal` nodes.
+ Args:
+ graph (dict): node/list of neighboring nodes key/value pairs.
+ start: start node.
+ goal: target node.
+ Returns:
+ Shortest path between `start` and `goal` nodes as a string of nodes.
+ 'Not found' string if no path found.
+ Example:
+ >>> bfs_shortest_path(graph, "G", "D")
+ ['G', 'C', 'A', 'B', 'D']
+ """
+ # keep track of explored nodes
+ explored = set()
+ # keep track of all the paths to be checked
+ queue = [[start]]
+
+ # return path if start is goal
+ if start == goal:
+ return "That was easy! Start = goal"
+
+ # keeps looping until all possible paths have been checked
+ while queue:
+ # pop the first path from the queue
+ path = queue.pop(0)
+ # get the last node from the path
+ node = path[-1]
+ if node not in explored:
+ neighbours = graph[node]
+ # go through all neighbour nodes, construct a new path and
+ # push it into the queue
+ for neighbour in neighbours:
+ new_path = list(path)
+ new_path.append(neighbour)
+ queue.append(new_path)
+ # return path if neighbour is goal
+ if neighbour == goal:
+ return new_path
+
+ # mark node as explored
+ explored.add(node)
+
+ # in case there's no path between the 2 nodes
+ return "So sorry, but a connecting path doesn't exist :("
+
+
+def bfs_shortest_path_distance(graph: dict, start, target) -> int:
+ """Find shortest path distance between `start` and `target` nodes.
+ Args:
+ graph: node/list of neighboring nodes key/value pairs.
+ start: node to start search from.
+ target: node to search for.
+ Returns:
+ Number of edges in shortest path between `start` and `target` nodes.
+ -1 if no path exists.
+ Example:
+ >>> bfs_shortest_path_distance(graph, "G", "D")
+ 4
+ >>> bfs_shortest_path_distance(graph, "A", "A")
+ 0
+ >>> bfs_shortest_path_distance(graph, "A", "H")
+ -1
+ """
+ if not graph or start not in graph or target not in graph:
+ return -1
+ if start == target:
+ return 0
+ queue = [start]
+ visited = set(start)
+ # Keep tab on distances from `start` node.
+ dist = {start: 0, target: -1}
+ while queue:
+ node = queue.pop(0)
+ if node == target:
+ dist[target] = (
+ dist[node] if dist[target] == -1 else min(dist[target], dist[node])
+ )
+ for adjacent in graph[node]:
+ if adjacent not in visited:
+ visited.add(adjacent)
+ queue.append(adjacent)
+ dist[adjacent] = dist[node] + 1
+ return dist[target]
+
+
+if __name__ == "__main__":
+ print(bfs_shortest_path(graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
+ print(bfs_shortest_path_distance(graph, "G", "D")) # returns 4
diff --git a/graphs/bfs_zero_one_shortest_path.py b/graphs/bfs_zero_one_shortest_path.py
new file mode 100644
index 000000000000..a725fae7e48f
--- /dev/null
+++ b/graphs/bfs_zero_one_shortest_path.py
@@ -0,0 +1,138 @@
+from collections import deque
+from dataclasses import dataclass
+from typing import Iterator, List
+
+"""
+Finding the shortest path in 0-1-graph in O(E + V) which is faster than dijkstra.
+0-1-graph is the weighted graph with the weights equal to 0 or 1.
+Link: https://codeforces.com/blog/entry/22276
+"""
+
+
+@dataclass
+class Edge:
+ """Weighted directed graph edge."""
+
+ destination_vertex: int
+ weight: int
+
+
+class AdjacencyList:
+ """Graph adjacency list."""
+
+ def __init__(self, size: int):
+ self._graph: List[List[Edge]] = [[] for _ in range(size)]
+ self._size = size
+
+ def __getitem__(self, vertex: int) -> Iterator[Edge]:
+ """Get all the vertices adjacent to the given one."""
+ return iter(self._graph[vertex])
+
+ @property
+ def size(self):
+ return self._size
+
+ def add_edge(self, from_vertex: int, to_vertex: int, weight: int):
+ """
+ >>> g = AdjacencyList(2)
+ >>> g.add_edge(0, 1, 0)
+ >>> g.add_edge(1, 0, 1)
+ >>> list(g[0])
+ [Edge(destination_vertex=1, weight=0)]
+ >>> list(g[1])
+ [Edge(destination_vertex=0, weight=1)]
+ >>> g.add_edge(0, 1, 2)
+ Traceback (most recent call last):
+ ...
+ ValueError: Edge weight must be either 0 or 1.
+ >>> g.add_edge(0, 2, 1)
+ Traceback (most recent call last):
+ ...
+ ValueError: Vertex indexes must be in [0; size).
+ """
+ if weight not in (0, 1):
+ raise ValueError("Edge weight must be either 0 or 1.")
+
+ if to_vertex < 0 or to_vertex >= self.size:
+ raise ValueError("Vertex indexes must be in [0; size).")
+
+ self._graph[from_vertex].append(Edge(to_vertex, weight))
+
+ def get_shortest_path(self, start_vertex: int, finish_vertex: int) -> int:
+ """
+ Return the shortest distance from start_vertex to finish_vertex in 0-1-graph.
+ 1 1 1
+ 0--------->3 6--------7>------->8
+ | ^ ^ ^ |1
+ | | | |0 v
+ 0| |0 1| 9-------->10
+ | | | ^ 1
+ v | | |0
+ 1--------->2<-------4------->5
+ 0 1 1
+ >>> g = AdjacencyList(11)
+ >>> g.add_edge(0, 1, 0)
+ >>> g.add_edge(0, 3, 1)
+ >>> g.add_edge(1, 2, 0)
+ >>> g.add_edge(2, 3, 0)
+ >>> g.add_edge(4, 2, 1)
+ >>> g.add_edge(4, 5, 1)
+ >>> g.add_edge(4, 6, 1)
+ >>> g.add_edge(5, 9, 0)
+ >>> g.add_edge(6, 7, 1)
+ >>> g.add_edge(7, 8, 1)
+ >>> g.add_edge(8, 10, 1)
+ >>> g.add_edge(9, 7, 0)
+ >>> g.add_edge(9, 10, 1)
+ >>> g.add_edge(1, 2, 2)
+ Traceback (most recent call last):
+ ...
+ ValueError: Edge weight must be either 0 or 1.
+ >>> g.get_shortest_path(0, 3)
+ 0
+ >>> g.get_shortest_path(0, 4)
+ Traceback (most recent call last):
+ ...
+ ValueError: No path from start_vertex to finish_vertex.
+ >>> g.get_shortest_path(4, 10)
+ 2
+ >>> g.get_shortest_path(4, 8)
+ 2
+ >>> g.get_shortest_path(0, 1)
+ 0
+ >>> g.get_shortest_path(1, 0)
+ Traceback (most recent call last):
+ ...
+ ValueError: No path from start_vertex to finish_vertex.
+ """
+ queue = deque([start_vertex])
+ distances = [None for i in range(self.size)]
+ distances[start_vertex] = 0
+
+ while queue:
+ current_vertex = queue.popleft()
+ current_distance = distances[current_vertex]
+
+ for edge in self[current_vertex]:
+ new_distance = current_distance + edge.weight
+ if (
+ distances[edge.destination_vertex] is not None
+ and new_distance >= distances[edge.destination_vertex]
+ ):
+ continue
+ distances[edge.destination_vertex] = new_distance
+ if edge.weight == 0:
+ queue.appendleft(edge.destination_vertex)
+ else:
+ queue.append(edge.destination_vertex)
+
+ if distances[finish_vertex] is None:
+ raise ValueError("No path from start_vertex to finish_vertex.")
+
+ return distances[finish_vertex]
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/graphs/bidirectional_a_star.py b/graphs/bidirectional_a_star.py
new file mode 100644
index 000000000000..72ff4fa65ff0
--- /dev/null
+++ b/graphs/bidirectional_a_star.py
@@ -0,0 +1,257 @@
+"""
+https://en.wikipedia.org/wiki/Bidirectional_search
+"""
+
+from __future__ import annotations
+
+import time
+from math import sqrt
+
+# 1 for manhattan, 0 for euclidean
+HEURISTIC = 0
+
+grid = [
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0, 0, 0],
+ [1, 0, 1, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 1, 0, 0],
+]
+
+delta = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
+
+
+class Node:
+ """
+ >>> k = Node(0, 0, 4, 3, 0, None)
+ >>> k.calculate_heuristic()
+ 5.0
+ >>> n = Node(1, 4, 3, 4, 2, None)
+ >>> n.calculate_heuristic()
+ 2.0
+ >>> l = [k, n]
+ >>> n == l[0]
+ False
+ >>> l.sort()
+ >>> n == l[0]
+ True
+ """
+
+ def __init__(self, pos_x, pos_y, goal_x, goal_y, g_cost, parent):
+ self.pos_x = pos_x
+ self.pos_y = pos_y
+ self.pos = (pos_y, pos_x)
+ self.goal_x = goal_x
+ self.goal_y = goal_y
+ self.g_cost = g_cost
+ self.parent = parent
+ self.h_cost = self.calculate_heuristic()
+ self.f_cost = self.g_cost + self.h_cost
+
+ def calculate_heuristic(self) -> float:
+ """
+ Heuristic for the A*
+ """
+ dy = self.pos_x - self.goal_x
+ dx = self.pos_y - self.goal_y
+ if HEURISTIC == 1:
+ return abs(dx) + abs(dy)
+ else:
+ return sqrt(dy ** 2 + dx ** 2)
+
+ def __lt__(self, other) -> bool:
+ return self.f_cost < other.f_cost
+
+
+class AStar:
+ """
+ >>> astar = AStar((0, 0), (len(grid) - 1, len(grid[0]) - 1))
+ >>> (astar.start.pos_y + delta[3][0], astar.start.pos_x + delta[3][1])
+ (0, 1)
+ >>> [x.pos for x in astar.get_successors(astar.start)]
+ [(1, 0), (0, 1)]
+ >>> (astar.start.pos_y + delta[2][0], astar.start.pos_x + delta[2][1])
+ (1, 0)
+ >>> astar.retrace_path(astar.start)
+ [(0, 0)]
+ >>> astar.search() # doctest: +NORMALIZE_WHITESPACE
+ [(0, 0), (1, 0), (2, 0), (2, 1), (2, 2), (2, 3), (3, 3),
+ (4, 3), (4, 4), (5, 4), (5, 5), (6, 5), (6, 6)]
+ """
+
+ def __init__(self, start, goal):
+ self.start = Node(start[1], start[0], goal[1], goal[0], 0, None)
+ self.target = Node(goal[1], goal[0], goal[1], goal[0], 99999, None)
+
+ self.open_nodes = [self.start]
+ self.closed_nodes = []
+
+ self.reached = False
+
+ def search(self) -> list[tuple[int]]:
+ while self.open_nodes:
+ # Open Nodes are sorted using __lt__
+ self.open_nodes.sort()
+ current_node = self.open_nodes.pop(0)
+
+ if current_node.pos == self.target.pos:
+ self.reached = True
+ return self.retrace_path(current_node)
+
+ self.closed_nodes.append(current_node)
+ successors = self.get_successors(current_node)
+
+ for child_node in successors:
+ if child_node in self.closed_nodes:
+ continue
+
+ if child_node not in self.open_nodes:
+ self.open_nodes.append(child_node)
+ else:
+ # retrieve the best current path
+ better_node = self.open_nodes.pop(self.open_nodes.index(child_node))
+
+ if child_node.g_cost < better_node.g_cost:
+ self.open_nodes.append(child_node)
+ else:
+ self.open_nodes.append(better_node)
+
+ if not (self.reached):
+ return [(self.start.pos)]
+
+ def get_successors(self, parent: Node) -> list[Node]:
+ """
+ Returns a list of successors (both in the grid and free spaces)
+ """
+ successors = []
+ for action in delta:
+ pos_x = parent.pos_x + action[1]
+ pos_y = parent.pos_y + action[0]
+ if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(grid) - 1):
+ continue
+
+ if grid[pos_y][pos_x] != 0:
+ continue
+
+ successors.append(
+ Node(
+ pos_x,
+ pos_y,
+ self.target.pos_y,
+ self.target.pos_x,
+ parent.g_cost + 1,
+ parent,
+ )
+ )
+ return successors
+
+ def retrace_path(self, node: Node) -> list[tuple[int]]:
+ """
+ Retrace the path from parents to parents until start node
+ """
+ current_node = node
+ path = []
+ while current_node is not None:
+ path.append((current_node.pos_y, current_node.pos_x))
+ current_node = current_node.parent
+ path.reverse()
+ return path
+
+
+class BidirectionalAStar:
+ """
+ >>> bd_astar = BidirectionalAStar((0, 0), (len(grid) - 1, len(grid[0]) - 1))
+ >>> bd_astar.fwd_astar.start.pos == bd_astar.bwd_astar.target.pos
+ True
+ >>> bd_astar.retrace_bidirectional_path(bd_astar.fwd_astar.start,
+ ... bd_astar.bwd_astar.start)
+ [(0, 0)]
+ >>> bd_astar.search() # doctest: +NORMALIZE_WHITESPACE
+ [(0, 0), (0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (2, 4),
+ (2, 5), (3, 5), (4, 5), (5, 5), (5, 6), (6, 6)]
+ """
+
+ def __init__(self, start, goal):
+ self.fwd_astar = AStar(start, goal)
+ self.bwd_astar = AStar(goal, start)
+ self.reached = False
+
+ def search(self) -> list[tuple[int]]:
+ while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
+ self.fwd_astar.open_nodes.sort()
+ self.bwd_astar.open_nodes.sort()
+ current_fwd_node = self.fwd_astar.open_nodes.pop(0)
+ current_bwd_node = self.bwd_astar.open_nodes.pop(0)
+
+ if current_bwd_node.pos == current_fwd_node.pos:
+ self.reached = True
+ return self.retrace_bidirectional_path(
+ current_fwd_node, current_bwd_node
+ )
+
+ self.fwd_astar.closed_nodes.append(current_fwd_node)
+ self.bwd_astar.closed_nodes.append(current_bwd_node)
+
+ self.fwd_astar.target = current_bwd_node
+ self.bwd_astar.target = current_fwd_node
+
+ successors = {
+ self.fwd_astar: self.fwd_astar.get_successors(current_fwd_node),
+ self.bwd_astar: self.bwd_astar.get_successors(current_bwd_node),
+ }
+
+ for astar in [self.fwd_astar, self.bwd_astar]:
+ for child_node in successors[astar]:
+ if child_node in astar.closed_nodes:
+ continue
+
+ if child_node not in astar.open_nodes:
+ astar.open_nodes.append(child_node)
+ else:
+ # retrieve the best current path
+ better_node = astar.open_nodes.pop(
+ astar.open_nodes.index(child_node)
+ )
+
+ if child_node.g_cost < better_node.g_cost:
+ astar.open_nodes.append(child_node)
+ else:
+ astar.open_nodes.append(better_node)
+
+ if not self.reached:
+ return [self.fwd_astar.start.pos]
+
+ def retrace_bidirectional_path(
+ self, fwd_node: Node, bwd_node: Node
+ ) -> list[tuple[int]]:
+ fwd_path = self.fwd_astar.retrace_path(fwd_node)
+ bwd_path = self.bwd_astar.retrace_path(bwd_node)
+ bwd_path.pop()
+ bwd_path.reverse()
+ path = fwd_path + bwd_path
+ return path
+
+
+if __name__ == "__main__":
+ # all coordinates are given in format [y,x]
+ import doctest
+
+ doctest.testmod()
+ init = (0, 0)
+ goal = (len(grid) - 1, len(grid[0]) - 1)
+ for elem in grid:
+ print(elem)
+
+ start_time = time.time()
+ a_star = AStar(init, goal)
+ path = a_star.search()
+ end_time = time.time() - start_time
+ print(f"AStar execution time = {end_time:f} seconds")
+
+ bd_start_time = time.time()
+ bidir_astar = BidirectionalAStar(init, goal)
+ path = bidir_astar.search()
+ bd_end_time = time.time() - bd_start_time
+ print(f"BidirectionalAStar execution time = {bd_end_time:f} seconds")
diff --git a/graphs/bidirectional_breadth_first_search.py b/graphs/bidirectional_breadth_first_search.py
new file mode 100644
index 000000000000..39d8dc7d4187
--- /dev/null
+++ b/graphs/bidirectional_breadth_first_search.py
@@ -0,0 +1,181 @@
+"""
+https://en.wikipedia.org/wiki/Bidirectional_search
+"""
+
+from __future__ import annotations
+
+import time
+
+grid = [
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0, 0, 0],
+ [1, 0, 1, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 1, 0, 0],
+]
+
+delta = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
+
+
+class Node:
+ def __init__(self, pos_x, pos_y, goal_x, goal_y, parent):
+ self.pos_x = pos_x
+ self.pos_y = pos_y
+ self.pos = (pos_y, pos_x)
+ self.goal_x = goal_x
+ self.goal_y = goal_y
+ self.parent = parent
+
+
+class BreadthFirstSearch:
+ """
+ >>> bfs = BreadthFirstSearch((0, 0), (len(grid) - 1, len(grid[0]) - 1))
+ >>> (bfs.start.pos_y + delta[3][0], bfs.start.pos_x + delta[3][1])
+ (0, 1)
+ >>> [x.pos for x in bfs.get_successors(bfs.start)]
+ [(1, 0), (0, 1)]
+ >>> (bfs.start.pos_y + delta[2][0], bfs.start.pos_x + delta[2][1])
+ (1, 0)
+ >>> bfs.retrace_path(bfs.start)
+ [(0, 0)]
+ >>> bfs.search() # doctest: +NORMALIZE_WHITESPACE
+ [(0, 0), (1, 0), (2, 0), (3, 0), (3, 1), (4, 1),
+ (5, 1), (5, 2), (5, 3), (5, 4), (5, 5), (6, 5), (6, 6)]
+ """
+
+ def __init__(self, start, goal):
+ self.start = Node(start[1], start[0], goal[1], goal[0], None)
+ self.target = Node(goal[1], goal[0], goal[1], goal[0], None)
+
+ self.node_queue = [self.start]
+ self.reached = False
+
+ def search(self) -> list[tuple[int]]:
+ while self.node_queue:
+ current_node = self.node_queue.pop(0)
+
+ if current_node.pos == self.target.pos:
+ self.reached = True
+ return self.retrace_path(current_node)
+
+ successors = self.get_successors(current_node)
+
+ for node in successors:
+ self.node_queue.append(node)
+
+ if not (self.reached):
+ return [(self.start.pos)]
+
+ def get_successors(self, parent: Node) -> list[Node]:
+ """
+ Returns a list of successors (both in the grid and free spaces)
+ """
+ successors = []
+ for action in delta:
+ pos_x = parent.pos_x + action[1]
+ pos_y = parent.pos_y + action[0]
+ if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(grid) - 1):
+ continue
+
+ if grid[pos_y][pos_x] != 0:
+ continue
+
+ successors.append(
+ Node(pos_x, pos_y, self.target.pos_y, self.target.pos_x, parent)
+ )
+ return successors
+
+ def retrace_path(self, node: Node) -> list[tuple[int]]:
+ """
+ Retrace the path from parents to parents until start node
+ """
+ current_node = node
+ path = []
+ while current_node is not None:
+ path.append((current_node.pos_y, current_node.pos_x))
+ current_node = current_node.parent
+ path.reverse()
+ return path
+
+
+class BidirectionalBreadthFirstSearch:
+ """
+ >>> bd_bfs = BidirectionalBreadthFirstSearch((0, 0), (len(grid) - 1,
+ ... len(grid[0]) - 1))
+ >>> bd_bfs.fwd_bfs.start.pos == bd_bfs.bwd_bfs.target.pos
+ True
+ >>> bd_bfs.retrace_bidirectional_path(bd_bfs.fwd_bfs.start,
+ ... bd_bfs.bwd_bfs.start)
+ [(0, 0)]
+ >>> bd_bfs.search() # doctest: +NORMALIZE_WHITESPACE
+ [(0, 0), (0, 1), (0, 2), (1, 2), (2, 2), (2, 3),
+ (2, 4), (3, 4), (3, 5), (3, 6), (4, 6), (5, 6), (6, 6)]
+ """
+
+ def __init__(self, start, goal):
+ self.fwd_bfs = BreadthFirstSearch(start, goal)
+ self.bwd_bfs = BreadthFirstSearch(goal, start)
+ self.reached = False
+
+ def search(self) -> list[tuple[int]]:
+ while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
+ current_fwd_node = self.fwd_bfs.node_queue.pop(0)
+ current_bwd_node = self.bwd_bfs.node_queue.pop(0)
+
+ if current_bwd_node.pos == current_fwd_node.pos:
+ self.reached = True
+ return self.retrace_bidirectional_path(
+ current_fwd_node, current_bwd_node
+ )
+
+ self.fwd_bfs.target = current_bwd_node
+ self.bwd_bfs.target = current_fwd_node
+
+ successors = {
+ self.fwd_bfs: self.fwd_bfs.get_successors(current_fwd_node),
+ self.bwd_bfs: self.bwd_bfs.get_successors(current_bwd_node),
+ }
+
+ for bfs in [self.fwd_bfs, self.bwd_bfs]:
+ for node in successors[bfs]:
+ bfs.node_queue.append(node)
+
+ if not self.reached:
+ return [self.fwd_bfs.start.pos]
+
+ def retrace_bidirectional_path(
+ self, fwd_node: Node, bwd_node: Node
+ ) -> list[tuple[int]]:
+ fwd_path = self.fwd_bfs.retrace_path(fwd_node)
+ bwd_path = self.bwd_bfs.retrace_path(bwd_node)
+ bwd_path.pop()
+ bwd_path.reverse()
+ path = fwd_path + bwd_path
+ return path
+
+
+if __name__ == "__main__":
+ # all coordinates are given in format [y,x]
+ import doctest
+
+ doctest.testmod()
+ init = (0, 0)
+ goal = (len(grid) - 1, len(grid[0]) - 1)
+ for elem in grid:
+ print(elem)
+
+ start_bfs_time = time.time()
+ bfs = BreadthFirstSearch(init, goal)
+ path = bfs.search()
+ bfs_time = time.time() - start_bfs_time
+
+ print("Unidirectional BFS computation time : ", bfs_time)
+
+ start_bd_bfs_time = time.time()
+ bd_bfs = BidirectionalBreadthFirstSearch(init, goal)
+ bd_path = bd_bfs.search()
+ bd_bfs_time = time.time() - start_bd_bfs_time
+
+ print("Bidirectional BFS computation time : ", bd_bfs_time)
diff --git a/graphs/breadth_first_search.py b/graphs/breadth_first_search.py
index 3992e2d4d892..ee9855bd0c2d 100644
--- a/graphs/breadth_first_search.py
+++ b/graphs/breadth_first_search.py
@@ -1,67 +1,91 @@
#!/usr/bin/python
-# encoding=utf8
""" Author: OMKAR PATHAK """
-from __future__ import print_function
+from typing import Set
-class Graph():
- def __init__(self):
- self.vertex = {}
+class Graph:
+ def __init__(self) -> None:
+ self.vertices = {}
- # for printing the Graph vertexes
- def printGraph(self):
- for i in self.vertex.keys():
- print(i,' -> ', ' -> '.join([str(j) for j in self.vertex[i]]))
+ def print_graph(self) -> None:
+ """
+ prints adjacency list representation of graaph
+ >>> g = Graph()
+ >>> g.print_graph()
+ >>> g.add_edge(0, 1)
+ >>> g.print_graph()
+ 0 : 1
+ """
+ for i in self.vertices:
+ print(i, " : ", " -> ".join([str(j) for j in self.vertices[i]]))
- # for adding the edge beween two vertexes
- def addEdge(self, fromVertex, toVertex):
- # check if vertex is already present,
- if fromVertex in self.vertex.keys():
- self.vertex[fromVertex].append(toVertex)
+ def add_edge(self, from_vertex: int, to_vertex: int) -> None:
+ """
+ adding the edge between two vertices
+ >>> g = Graph()
+ >>> g.print_graph()
+ >>> g.add_edge(0, 1)
+ >>> g.print_graph()
+ 0 : 1
+ """
+ if from_vertex in self.vertices:
+ self.vertices[from_vertex].append(to_vertex)
else:
- # else make a new vertex
- self.vertex[fromVertex] = [toVertex]
+ self.vertices[from_vertex] = [to_vertex]
- def BFS(self, startVertex):
- # Take a list for stoting already visited vertexes
- visited = [False] * len(self.vertex)
+ def bfs(self, start_vertex: int) -> Set[int]:
+ """
+ >>> g = Graph()
+ >>> g.add_edge(0, 1)
+ >>> g.add_edge(0, 1)
+ >>> g.add_edge(0, 2)
+ >>> g.add_edge(1, 2)
+ >>> g.add_edge(2, 0)
+ >>> g.add_edge(2, 3)
+ >>> g.add_edge(3, 3)
+ >>> sorted(g.bfs(2))
+ [0, 1, 2, 3]
+ """
+ # initialize set for storing already visited vertices
+ visited = set()
- # create a list to store all the vertexes for BFS
+ # create a first in first out queue to store all the vertices for BFS
queue = []
# mark the source node as visited and enqueue it
- visited[startVertex] = True
- queue.append(startVertex)
+ visited.add(start_vertex)
+ queue.append(start_vertex)
while queue:
- startVertex = queue.pop(0)
- print(startVertex, end = ' ')
+ vertex = queue.pop(0)
- # mark all adjacent nodes as visited and print them
- for i in self.vertex[startVertex]:
- if visited[i] == False:
- queue.append(i)
- visited[i] = True
+ # loop through all adjacent vertex and enqueue it if not yet visited
+ for adjacent_vertex in self.vertices[vertex]:
+ if adjacent_vertex not in visited:
+ queue.append(adjacent_vertex)
+ visited.add(adjacent_vertex)
+ return visited
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod(verbose=True)
-if __name__ == '__main__':
g = Graph()
- g.addEdge(0, 1)
- g.addEdge(0, 2)
- g.addEdge(1, 2)
- g.addEdge(2, 0)
- g.addEdge(2, 3)
- g.addEdge(3, 3)
-
- g.printGraph()
- print('BFS:')
- g.BFS(2)
-
- # OUTPUT:
- # 0 -> 1 -> 2
- # 1 -> 2
- # 2 -> 0 -> 3
- # 3 -> 3
- # BFS:
- # 2 0 3 1
+ g.add_edge(0, 1)
+ g.add_edge(0, 2)
+ g.add_edge(1, 2)
+ g.add_edge(2, 0)
+ g.add_edge(2, 3)
+ g.add_edge(3, 3)
+
+ g.print_graph()
+ # 0 : 1 -> 2
+ # 1 : 2
+ # 2 : 0 -> 3
+ # 3 : 3
+
+ assert sorted(g.bfs(2)) == [0, 1, 2, 3]
diff --git a/graphs/breadth_first_search_2.py b/graphs/breadth_first_search_2.py
new file mode 100644
index 000000000000..a90e963a4043
--- /dev/null
+++ b/graphs/breadth_first_search_2.py
@@ -0,0 +1,44 @@
+"""
+https://en.wikipedia.org/wiki/Breadth-first_search
+pseudo-code:
+breadth_first_search(graph G, start vertex s):
+// all nodes initially unexplored
+mark s as explored
+let Q = queue data structure, initialized with s
+while Q is non-empty:
+ remove the first node of Q, call it v
+ for each edge(v, w): // for w in graph[v]
+ if w unexplored:
+ mark w as explored
+ add w to Q (at the end)
+"""
+from __future__ import annotations
+
+G = {
+ "A": ["B", "C"],
+ "B": ["A", "D", "E"],
+ "C": ["A", "F"],
+ "D": ["B"],
+ "E": ["B", "F"],
+ "F": ["C", "E"],
+}
+
+
+def breadth_first_search(graph: dict, start: str) -> set[str]:
+ """
+ >>> ''.join(sorted(breadth_first_search(G, 'A')))
+ 'ABCDEF'
+ """
+ explored = {start}
+ queue = [start]
+ while queue:
+ v = queue.pop(0) # queue.popleft()
+ for w in graph[v]:
+ if w not in explored:
+ explored.add(w)
+ queue.append(w)
+ return explored
+
+
+if __name__ == "__main__":
+ print(breadth_first_search(G, "A"))
diff --git a/graphs/breadth_first_search_shortest_path.py b/graphs/breadth_first_search_shortest_path.py
new file mode 100644
index 000000000000..b43479d4659c
--- /dev/null
+++ b/graphs/breadth_first_search_shortest_path.py
@@ -0,0 +1,82 @@
+"""Breath First Search (BFS) can be used when finding the shortest path
+from a given source node to a target node in an unweighted graph.
+"""
+from __future__ import annotations
+
+graph = {
+ "A": ["B", "C", "E"],
+ "B": ["A", "D", "E"],
+ "C": ["A", "F", "G"],
+ "D": ["B"],
+ "E": ["A", "B", "D"],
+ "F": ["C"],
+ "G": ["C"],
+}
+
+
+class Graph:
+ def __init__(self, graph: dict[str, str], source_vertex: str) -> None:
+ """Graph is implemented as dictionary of adjacency lists. Also,
+ Source vertex have to be defined upon initialization.
+ """
+ self.graph = graph
+ # mapping node to its parent in resulting breadth first tree
+ self.parent = {}
+ self.source_vertex = source_vertex
+
+ def breath_first_search(self) -> None:
+ """This function is a helper for running breath first search on this graph.
+ >>> g = Graph(graph, "G")
+ >>> g.breath_first_search()
+ >>> g.parent
+ {'G': None, 'C': 'G', 'A': 'C', 'F': 'C', 'B': 'A', 'E': 'A', 'D': 'B'}
+ """
+ visited = {self.source_vertex}
+ self.parent[self.source_vertex] = None
+ queue = [self.source_vertex] # first in first out queue
+
+ while queue:
+ vertex = queue.pop(0)
+ for adjacent_vertex in self.graph[vertex]:
+ if adjacent_vertex not in visited:
+ visited.add(adjacent_vertex)
+ self.parent[adjacent_vertex] = vertex
+ queue.append(adjacent_vertex)
+
+ def shortest_path(self, target_vertex: str) -> str:
+ """This shortest path function returns a string, describing the result:
+ 1.) No path is found. The string is a human readable message to indicate this.
+ 2.) The shortest path is found. The string is in the form
+ `v1(->v2->v3->...->vn)`, where v1 is the source vertex and vn is the target
+ vertex, if it exists separately.
+
+ >>> g = Graph(graph, "G")
+ >>> g.breath_first_search()
+
+ Case 1 - No path is found.
+ >>> g.shortest_path("Foo")
+ 'No path from vertex:G to vertex:Foo'
+
+ Case 2 - The path is found.
+ >>> g.shortest_path("D")
+ 'G->C->A->B->D'
+ >>> g.shortest_path("G")
+ 'G'
+ """
+ if target_vertex == self.source_vertex:
+ return f"{self.source_vertex}"
+ elif not self.parent.get(target_vertex):
+ return f"No path from vertex:{self.source_vertex} to vertex:{target_vertex}"
+ else:
+ return self.shortest_path(self.parent[target_vertex]) + f"->{target_vertex}"
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ g = Graph(graph, "G")
+ g.breath_first_search()
+ print(g.shortest_path("D"))
+ print(g.shortest_path("G"))
+ print(g.shortest_path("Foo"))
diff --git a/graphs/check_bipartite_graph_bfs.py b/graphs/check_bipartite_graph_bfs.py
index 1b9c32c6ccc4..00b771649b5d 100644
--- a/graphs/check_bipartite_graph_bfs.py
+++ b/graphs/check_bipartite_graph_bfs.py
@@ -1,21 +1,22 @@
# Check whether Graph is Bipartite or Not using BFS
+
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
-def checkBipartite(l):
+def checkBipartite(graph):
queue = []
- visited = [False] * len(l)
- color = [-1] * len(l)
+ visited = [False] * len(graph)
+ color = [-1] * len(graph)
def bfs():
- while(queue):
+ while queue:
u = queue.pop(0)
visited[u] = True
- for neighbour in l[u]:
+ for neighbour in graph[u]:
if neighbour == u:
return False
@@ -29,15 +30,16 @@ def bfs():
return True
- for i in range(len(l)):
+ for i in range(len(graph)):
if not visited[i]:
queue.append(i)
color[i] = 0
- if bfs() == False:
+ if bfs() is False:
return False
return True
-# Adjacency List of graph
-l = {0:[1,3], 1:[0,2], 2:[1,3], 3:[0,2]}
-print(checkBipartite(l))
+
+if __name__ == "__main__":
+ # Adjacency List of graph
+ print(checkBipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}))
diff --git a/graphs/check_bipartite_graph_dfs.py b/graphs/check_bipartite_graph_dfs.py
new file mode 100644
index 000000000000..fd644230449c
--- /dev/null
+++ b/graphs/check_bipartite_graph_dfs.py
@@ -0,0 +1,34 @@
+# Check whether Graph is Bipartite or Not using DFS
+
+
+# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
+# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
+# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
+# or u belongs to V and v to U. We can also say that there is no edge that connects
+# vertices of same set.
+def check_bipartite_dfs(graph):
+ visited = [False] * len(graph)
+ color = [-1] * len(graph)
+
+ def dfs(v, c):
+ visited[v] = True
+ color[v] = c
+ for u in graph[v]:
+ if not visited[u]:
+ dfs(u, 1 - c)
+
+ for i in range(len(graph)):
+ if not visited[i]:
+ dfs(i, 0)
+
+ for i in range(len(graph)):
+ for j in graph[i]:
+ if color[i] == color[j]:
+ return False
+
+ return True
+
+
+# Adjacency list of graph
+graph = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
+print(check_bipartite_dfs(graph))
diff --git a/graphs/connected_components.py b/graphs/connected_components.py
new file mode 100644
index 000000000000..4af7803d74a7
--- /dev/null
+++ b/graphs/connected_components.py
@@ -0,0 +1,58 @@
+"""
+https://en.wikipedia.org/wiki/Component_(graph_theory)
+
+Finding connected components in graph
+
+"""
+
+test_graph_1 = {0: [1, 2], 1: [0, 3], 2: [0], 3: [1], 4: [5, 6], 5: [4, 6], 6: [4, 5]}
+
+test_graph_2 = {0: [1, 2, 3], 1: [0, 3], 2: [0], 3: [0, 1], 4: [], 5: []}
+
+
+def dfs(graph: dict, vert: int, visited: list) -> list:
+ """
+ Use depth first search to find all vertices
+ being in the same component as initial vertex
+ >>> dfs(test_graph_1, 0, 5 * [False])
+ [0, 1, 3, 2]
+ >>> dfs(test_graph_2, 0, 6 * [False])
+ [0, 1, 3, 2]
+ """
+
+ visited[vert] = True
+ connected_verts = []
+
+ for neighbour in graph[vert]:
+ if not visited[neighbour]:
+ connected_verts += dfs(graph, neighbour, visited)
+
+ return [vert] + connected_verts
+
+
+def connected_components(graph: dict) -> list:
+ """
+ This function takes graph as a parameter
+ and then returns the list of connected components
+ >>> connected_components(test_graph_1)
+ [[0, 1, 3, 2], [4, 5, 6]]
+ >>> connected_components(test_graph_2)
+ [[0, 1, 3, 2], [4], [5]]
+ """
+
+ graph_size = len(graph)
+ visited = graph_size * [False]
+ components_list = []
+
+ for i in range(graph_size):
+ if not visited[i]:
+ i_connected = dfs(graph, i, visited)
+ components_list.append(i_connected)
+
+ return components_list
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/graphs/depth_first_search.py b/graphs/depth_first_search.py
index 98faf61354f9..907cc172f253 100644
--- a/graphs/depth_first_search.py
+++ b/graphs/depth_first_search.py
@@ -1,66 +1,49 @@
-#!/usr/bin/python
-# encoding=utf8
-
-""" Author: OMKAR PATHAK """
-from __future__ import print_function
-
-
-class Graph():
- def __init__(self):
- self.vertex = {}
-
- # for printing the Graph vertexes
- def printGraph(self):
- print(self.vertex)
- for i in self.vertex.keys():
- print(i,' -> ', ' -> '.join([str(j) for j in self.vertex[i]]))
-
- # for adding the edge beween two vertexes
- def addEdge(self, fromVertex, toVertex):
- # check if vertex is already present,
- if fromVertex in self.vertex.keys():
- self.vertex[fromVertex].append(toVertex)
- else:
- # else make a new vertex
- self.vertex[fromVertex] = [toVertex]
-
- def DFS(self):
- # visited array for storing already visited nodes
- visited = [False] * len(self.vertex)
-
- # call the recursive helper function
- for i in range(len(self.vertex)):
- if visited[i] == False:
- self.DFSRec(i, visited)
-
- def DFSRec(self, startVertex, visited):
- # mark start vertex as visited
- visited[startVertex] = True
-
- print(startVertex, end = ' ')
-
- # Recur for all the vertexes that are adjacent to this node
- for i in self.vertex.keys():
- if visited[i] == False:
- self.DFSRec(i, visited)
-
-if __name__ == '__main__':
- g = Graph()
- g.addEdge(0, 1)
- g.addEdge(0, 2)
- g.addEdge(1, 2)
- g.addEdge(2, 0)
- g.addEdge(2, 3)
- g.addEdge(3, 3)
-
- g.printGraph()
- print('DFS:')
- g.DFS()
-
- # OUTPUT:
- # 0 -> 1 -> 2
- # 1 -> 2
- # 2 -> 0 -> 3
- # 3 -> 3
- # DFS:
- # 0 1 2 3
+"""Non recursive implementation of a DFS algorithm."""
+
+from __future__ import annotations
+
+
+def depth_first_search(graph: dict, start: str) -> set[int]:
+ """Depth First Search on Graph
+ :param graph: directed graph in dictionary format
+ :param vertex: starting vertex as a string
+ :returns: the trace of the search
+ >>> G = { "A": ["B", "C", "D"], "B": ["A", "D", "E"],
+ ... "C": ["A", "F"], "D": ["B", "D"], "E": ["B", "F"],
+ ... "F": ["C", "E", "G"], "G": ["F"] }
+ >>> start = "A"
+ >>> output_G = list({'A', 'B', 'C', 'D', 'E', 'F', 'G'})
+ >>> all(x in output_G for x in list(depth_first_search(G, "A")))
+ True
+ >>> all(x in output_G for x in list(depth_first_search(G, "G")))
+ True
+ """
+ explored, stack = set(start), [start]
+
+ while stack:
+ v = stack.pop()
+ explored.add(v)
+ # Differences from BFS:
+ # 1) pop last element instead of first one
+ # 2) add adjacent elements to stack without exploring them
+ for adj in reversed(graph[v]):
+ if adj not in explored:
+ stack.append(adj)
+ return explored
+
+
+G = {
+ "A": ["B", "C", "D"],
+ "B": ["A", "D", "E"],
+ "C": ["A", "F"],
+ "D": ["B", "D"],
+ "E": ["B", "F"],
+ "F": ["C", "E", "G"],
+ "G": ["F"],
+}
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ print(depth_first_search(G, "A"))
diff --git a/graphs/depth_first_search_2.py b/graphs/depth_first_search_2.py
new file mode 100644
index 000000000000..3072d527c1c7
--- /dev/null
+++ b/graphs/depth_first_search_2.py
@@ -0,0 +1,65 @@
+#!/usr/bin/python
+
+""" Author: OMKAR PATHAK """
+
+
+class Graph:
+ def __init__(self):
+ self.vertex = {}
+
+ # for printing the Graph vertices
+ def print_graph(self) -> None:
+ print(self.vertex)
+ for i in self.vertex:
+ print(i, " -> ", " -> ".join([str(j) for j in self.vertex[i]]))
+
+ # for adding the edge between two vertices
+ def add_edge(self, from_vertex: int, to_vertex: int) -> None:
+ # check if vertex is already present,
+ if from_vertex in self.vertex:
+ self.vertex[from_vertex].append(to_vertex)
+ else:
+ # else make a new vertex
+ self.vertex[from_vertex] = [to_vertex]
+
+ def dfs(self) -> None:
+ # visited array for storing already visited nodes
+ visited = [False] * len(self.vertex)
+
+ # call the recursive helper function
+ for i in range(len(self.vertex)):
+ if not visited[i]:
+ self.dfs_recursive(i, visited)
+
+ def dfs_recursive(self, start_vertex: int, visited: list) -> None:
+ # mark start vertex as visited
+ visited[start_vertex] = True
+
+ print(start_vertex, end=" ")
+
+ # Recur for all the vertices that are adjacent to this node
+ for i in self.vertex:
+ if not visited[i]:
+ self.dfs_recursive(i, visited)
+
+
+if __name__ == "__main__":
+ g = Graph()
+ g.add_edge(0, 1)
+ g.add_edge(0, 2)
+ g.add_edge(1, 2)
+ g.add_edge(2, 0)
+ g.add_edge(2, 3)
+ g.add_edge(3, 3)
+
+ g.print_graph()
+ print("DFS:")
+ g.dfs()
+
+ # OUTPUT:
+ # 0 -> 1 -> 2
+ # 1 -> 2
+ # 2 -> 0 -> 3
+ # 3 -> 3
+ # DFS:
+ # 0 1 2 3
diff --git a/graphs/dijkstra.py b/graphs/dijkstra.py
index 6b08b28fcfd3..d15fcbbfeef0 100644
--- a/graphs/dijkstra.py
+++ b/graphs/dijkstra.py
@@ -1,47 +1,118 @@
-"""pseudo-code"""
-
"""
-DIJKSTRA(graph G, start vertex s,destination vertex d):
-// all nodes initially unexplored
-let H = min heap data structure, initialized with 0 and s [here 0 indicates the distance from start vertex]
-while H is non-empty:
- remove the first node and cost of H, call it U and cost
- if U is not explored
- mark U as explored
- if U is d:
- return cost // total cost from start to destination vertex
- for each edge(U, V): c=cost of edge(u,V) // for V in graph[U]
- if V unexplored:
- next=cost+c
- add next,V to H (at the end)
+pseudo-code
+
+DIJKSTRA(graph G, start vertex s, destination vertex d):
+
+//all nodes initially unexplored
+
+1 - let H = min heap data structure, initialized with 0 and s [here 0 indicates
+ the distance from start vertex s]
+2 - while H is non-empty:
+3 - remove the first node and cost of H, call it U and cost
+4 - if U has been previously explored:
+5 - go to the while loop, line 2 //Once a node is explored there is no need
+ to make it again
+6 - mark U as explored
+7 - if U is d:
+8 - return cost // total cost from start to destination vertex
+9 - for each edge(U, V): c=cost of edge(U,V) // for V in graph[U]
+10 - if V explored:
+11 - go to next V in line 9
+12 - total_cost = cost + c
+13 - add (total_cost,V) to H
+
+You can think at cost as a distance where Dijkstra finds the shortest distance
+between vertices s and v in a graph G. The use of a min heap as H guarantees
+that if a vertex has already been explored there will be no other path with
+shortest distance, that happens because heapq.heappop will always return the
+next vertex with the shortest distance, considering that the heap stores not
+only the distance between previous vertex and current vertex but the entire
+distance between each vertex that makes up the path from start vertex to target
+vertex.
"""
import heapq
def dijkstra(graph, start, end):
+ """Return the cost of the shortest path between vertices start and end.
+
+ >>> dijkstra(G, "E", "C")
+ 6
+ >>> dijkstra(G2, "E", "F")
+ 3
+ >>> dijkstra(G3, "E", "F")
+ 3
+ """
+
heap = [(0, start)] # cost from start node,end node
- visited = []
+ visited = set()
while heap:
(cost, u) = heapq.heappop(heap)
if u in visited:
continue
- visited.append(u)
+ visited.add(u)
if u == end:
return cost
- for v, c in G[u]:
+ for v, c in graph[u]:
if v in visited:
continue
next = cost + c
heapq.heappush(heap, (next, v))
- return (-1, -1)
+ return -1
+
+
+G = {
+ "A": [["B", 2], ["C", 5]],
+ "B": [["A", 2], ["D", 3], ["E", 1], ["F", 1]],
+ "C": [["A", 5], ["F", 3]],
+ "D": [["B", 3]],
+ "E": [["B", 4], ["F", 3]],
+ "F": [["C", 3], ["E", 3]],
+}
+
+r"""
+Layout of G2:
+
+E -- 1 --> B -- 1 --> C -- 1 --> D -- 1 --> F
+ \ /\
+ \ ||
+ ----------------- 3 --------------------
+"""
+G2 = {
+ "B": [["C", 1]],
+ "C": [["D", 1]],
+ "D": [["F", 1]],
+ "E": [["B", 1], ["F", 3]],
+ "F": [],
+}
+
+r"""
+Layout of G3:
+
+E -- 1 --> B -- 1 --> C -- 1 --> D -- 1 --> F
+ \ /\
+ \ ||
+ -------- 2 ---------> G ------- 1 ------
+"""
+G3 = {
+ "B": [["C", 1]],
+ "C": [["D", 1]],
+ "D": [["F", 1]],
+ "E": [["B", 1], ["G", 2]],
+ "F": [],
+ "G": [["F", 1]],
+}
+
+shortDistance = dijkstra(G, "E", "C")
+print(shortDistance) # E -- 3 --> F -- 3 --> C == 6
+
+shortDistance = dijkstra(G2, "E", "F")
+print(shortDistance) # E -- 3 --> F == 3
+shortDistance = dijkstra(G3, "E", "F")
+print(shortDistance) # E -- 2 --> G -- 1 --> F == 3
-G = {'A': [['B', 2], ['C', 5]],
- 'B': [['A', 2], ['D', 3], ['E', 1]],
- 'C': [['A', 5], ['F', 3]],
- 'D': [['B', 3]],
- 'E': [['B', 1], ['F', 3]],
- 'F': [['C', 3], ['E', 3]]}
+if __name__ == "__main__":
+ import doctest
-shortDistance = dijkstra(G, 'E', 'C')
-print(shortDistance)
+ doctest.testmod()
diff --git a/graphs/dijkstra_2.py b/graphs/dijkstra_2.py
index a6c340e8a68d..762884136e4a 100644
--- a/graphs/dijkstra_2.py
+++ b/graphs/dijkstra_2.py
@@ -1,57 +1,58 @@
-from __future__ import print_function
-
def printDist(dist, V):
- print("\nVertex Distance")
- for i in range(V):
- if dist[i] != float('inf') :
- print(i,"\t",int(dist[i]),end = "\t")
- else:
- print(i,"\t","INF",end="\t")
- print()
+ print("\nVertex Distance")
+ for i in range(V):
+ if dist[i] != float("inf"):
+ print(i, "\t", int(dist[i]), end="\t")
+ else:
+ print(i, "\t", "INF", end="\t")
+ print()
+
def minDist(mdist, vset, V):
- minVal = float('inf')
- minInd = -1
- for i in range(V):
- if (not vset[i]) and mdist[i] < minVal :
- minInd = i
- minVal = mdist[i]
- return minInd
+ minVal = float("inf")
+ minInd = -1
+ for i in range(V):
+ if (not vset[i]) and mdist[i] < minVal:
+ minInd = i
+ minVal = mdist[i]
+ return minInd
+
def Dijkstra(graph, V, src):
- mdist=[float('inf') for i in range(V)]
- vset = [False for i in range(V)]
- mdist[src] = 0.0
-
- for i in range(V-1):
- u = minDist(mdist, vset, V)
- vset[u] = True
-
- for v in range(V):
- if (not vset[v]) and graph[u][v]!=float('inf') and mdist[u] + graph[u][v] < mdist[v]:
- mdist[v] = mdist[u] + graph[u][v]
-
-
-
- printDist(mdist, V)
-
-
-
-#MAIN
-V = int(input("Enter number of vertices: "))
-E = int(input("Enter number of edges: "))
-
-graph = [[float('inf') for i in range(V)] for j in range(V)]
-
-for i in range(V):
- graph[i][i] = 0.0
-
-for i in range(E):
- print("\nEdge ",i+1)
- src = int(input("Enter source:"))
- dst = int(input("Enter destination:"))
- weight = float(input("Enter weight:"))
- graph[src][dst] = weight
-
-gsrc = int(input("\nEnter shortest path source:"))
-Dijkstra(graph, V, gsrc)
+ mdist = [float("inf") for i in range(V)]
+ vset = [False for i in range(V)]
+ mdist[src] = 0.0
+
+ for i in range(V - 1):
+ u = minDist(mdist, vset, V)
+ vset[u] = True
+
+ for v in range(V):
+ if (
+ (not vset[v])
+ and graph[u][v] != float("inf")
+ and mdist[u] + graph[u][v] < mdist[v]
+ ):
+ mdist[v] = mdist[u] + graph[u][v]
+
+ printDist(mdist, V)
+
+
+if __name__ == "__main__":
+ V = int(input("Enter number of vertices: ").strip())
+ E = int(input("Enter number of edges: ").strip())
+
+ graph = [[float("inf") for i in range(V)] for j in range(V)]
+
+ for i in range(V):
+ graph[i][i] = 0.0
+
+ for i in range(E):
+ print("\nEdge ", i + 1)
+ src = int(input("Enter source:").strip())
+ dst = int(input("Enter destination:").strip())
+ weight = float(input("Enter weight:").strip())
+ graph[src][dst] = weight
+
+ gsrc = int(input("\nEnter shortest path source:").strip())
+ Dijkstra(graph, V, gsrc)
diff --git a/graphs/dijkstra_algorithm.py b/graphs/dijkstra_algorithm.py
index 985c7f6c1301..6b64834acd81 100644
--- a/graphs/dijkstra_algorithm.py
+++ b/graphs/dijkstra_algorithm.py
@@ -2,10 +2,10 @@
# Author: Shubham Malik
# References: https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
-from __future__ import print_function
import math
import sys
-# For storing the vertex set to retreive node with the lowest distance
+
+# For storing the vertex set to retrieve node with the lowest distance
class PriorityQueue:
@@ -13,7 +13,7 @@ class PriorityQueue:
def __init__(self):
self.cur_size = 0
self.array = []
- self.pos = {} # To store the pos of node in array
+ self.pos = {} # To store the pos of node in array
def isEmpty(self):
return self.cur_size == 0
@@ -79,8 +79,8 @@ def decrease_key(self, tup, new_d):
class Graph:
def __init__(self, num):
- self.adjList = {} # To store graph: u -> (v,w)
- self.num_nodes = num # Number of nodes in graph
+ self.adjList = {} # To store graph: u -> (v,w)
+ self.num_nodes = num # Number of nodes in graph
# To store the distance from source vertex
self.dist = [0] * self.num_nodes
self.par = [-1] * self.num_nodes # To store the path
@@ -103,8 +103,7 @@ def add_edge(self, u, v, w):
def show_graph(self):
# u -> v(w)
for u in self.adjList:
- print(u, '->', ' -> '.join(str("{}({})".format(v, w))
- for v, w in self.adjList[u]))
+ print(u, "->", " -> ".join(str(f"{v}({w})") for v, w in self.adjList[u]))
def dijkstra(self, src):
# Flush old junk values in par[]
@@ -136,9 +135,9 @@ def dijkstra(self, src):
self.show_distances(src)
def show_distances(self, src):
- print("Distance from node: {}".format(src))
+ print(f"Distance from node: {src}")
for u in range(self.num_nodes):
- print('Node {} has distance: {}'.format(u, self.dist[u]))
+ print(f"Node {u} has distance: {self.dist[u]}")
def show_path(self, src, dest):
# To show the shortest path from src to dest
@@ -158,16 +157,16 @@ def show_path(self, src, dest):
path.append(src)
path.reverse()
- print('----Path to reach {} from {}----'.format(dest, src))
+ print(f"----Path to reach {dest} from {src}----")
for u in path:
- print('{}'.format(u), end=' ')
+ print(f"{u}", end=" ")
if u != dest:
- print('-> ', end='')
+ print("-> ", end="")
- print('\nTotal cost of path: ', cost)
+ print("\nTotal cost of path: ", cost)
-if __name__ == '__main__':
+if __name__ == "__main__":
graph = Graph(9)
graph.add_edge(0, 1, 4)
graph.add_edge(0, 7, 8)
diff --git a/graphs/dinic.py b/graphs/dinic.py
new file mode 100644
index 000000000000..aaf3a119525c
--- /dev/null
+++ b/graphs/dinic.py
@@ -0,0 +1,94 @@
+INF = float("inf")
+
+
+class Dinic:
+ def __init__(self, n):
+ self.lvl = [0] * n
+ self.ptr = [0] * n
+ self.q = [0] * n
+ self.adj = [[] for _ in range(n)]
+
+ """
+ Here we will add our edges containing with the following parameters:
+ vertex closest to source, vertex closest to sink and flow capacity
+ through that edge ...
+ """
+
+ def add_edge(self, a, b, c, rcap=0):
+ self.adj[a].append([b, len(self.adj[b]), c, 0])
+ self.adj[b].append([a, len(self.adj[a]) - 1, rcap, 0])
+
+ # This is a sample depth first search to be used at max_flow
+ def depth_first_search(self, vertex, sink, flow):
+ if vertex == sink or not flow:
+ return flow
+
+ for i in range(self.ptr[vertex], len(self.adj[vertex])):
+ e = self.adj[vertex][i]
+ if self.lvl[e[0]] == self.lvl[vertex] + 1:
+ p = self.depth_first_search(e[0], sink, min(flow, e[2] - e[3]))
+ if p:
+ self.adj[vertex][i][3] += p
+ self.adj[e[0]][e[1]][3] -= p
+ return p
+ self.ptr[vertex] = self.ptr[vertex] + 1
+ return 0
+
+ # Here we calculate the flow that reaches the sink
+ def max_flow(self, source, sink):
+ flow, self.q[0] = 0, source
+ for l in range(31): # noqa: E741 l = 30 maybe faster for random data
+ while True:
+ self.lvl, self.ptr = [0] * len(self.q), [0] * len(self.q)
+ qi, qe, self.lvl[source] = 0, 1, 1
+ while qi < qe and not self.lvl[sink]:
+ v = self.q[qi]
+ qi += 1
+ for e in self.adj[v]:
+ if not self.lvl[e[0]] and (e[2] - e[3]) >> (30 - l):
+ self.q[qe] = e[0]
+ qe += 1
+ self.lvl[e[0]] = self.lvl[v] + 1
+
+ p = self.depth_first_search(source, sink, INF)
+ while p:
+ flow += p
+ p = self.depth_first_search(source, sink, INF)
+
+ if not self.lvl[sink]:
+ break
+
+ return flow
+
+
+# Example to use
+
+"""
+Will be a bipartite graph, than it has the vertices near the source(4)
+and the vertices near the sink(4)
+"""
+# Here we make a graphs with 10 vertex(source and sink includes)
+graph = Dinic(10)
+source = 0
+sink = 9
+"""
+Now we add the vertices next to the font in the font with 1 capacity in this edge
+(source -> source vertices)
+"""
+for vertex in range(1, 5):
+ graph.add_edge(source, vertex, 1)
+"""
+We will do the same thing for the vertices near the sink, but from vertex to sink
+(sink vertices -> sink)
+"""
+for vertex in range(5, 9):
+ graph.add_edge(vertex, sink, 1)
+"""
+Finally we add the verices near the sink to the vertices near the source.
+(source vertices -> sink vertices)
+"""
+for vertex in range(1, 5):
+ graph.add_edge(vertex, vertex + 4, 1)
+
+# Now we can know that is the maximum flow(source -> sink)
+print(graph.max_flow(source, sink))
diff --git a/graphs/directed_and_undirected_(weighted)_graph.py b/graphs/directed_and_undirected_(weighted)_graph.py
new file mode 100644
index 000000000000..5cfa9e13edd9
--- /dev/null
+++ b/graphs/directed_and_undirected_(weighted)_graph.py
@@ -0,0 +1,496 @@
+from collections import deque
+from math import floor
+from random import random
+from time import time
+
+# the default weight is 1 if not assigned but all the implementation is weighted
+
+
+class DirectedGraph:
+ def __init__(self):
+ self.graph = {}
+
+ # adding vertices and edges
+ # adding the weight is optional
+ # handles repetition
+ def add_pair(self, u, v, w=1):
+ if self.graph.get(u):
+ if self.graph[u].count([w, v]) == 0:
+ self.graph[u].append([w, v])
+ else:
+ self.graph[u] = [[w, v]]
+ if not self.graph.get(v):
+ self.graph[v] = []
+
+ def all_nodes(self):
+ return list(self.graph)
+
+ # handles if the input does not exist
+ def remove_pair(self, u, v):
+ if self.graph.get(u):
+ for _ in self.graph[u]:
+ if _[1] == v:
+ self.graph[u].remove(_)
+
+ # if no destination is meant the default value is -1
+ def dfs(self, s=-2, d=-1):
+ if s == d:
+ return []
+ stack = []
+ visited = []
+ if s == -2:
+ s = list(self.graph)[0]
+ stack.append(s)
+ visited.append(s)
+ ss = s
+
+ while True:
+ # check if there is any non isolated nodes
+ if len(self.graph[s]) != 0:
+ ss = s
+ for node in self.graph[s]:
+ if visited.count(node[1]) < 1:
+ if node[1] == d:
+ visited.append(d)
+ return visited
+ else:
+ stack.append(node[1])
+ visited.append(node[1])
+ ss = node[1]
+ break
+
+ # check if all the children are visited
+ if s == ss:
+ stack.pop()
+ if len(stack) != 0:
+ s = stack[len(stack) - 1]
+ else:
+ s = ss
+
+ # check if se have reached the starting point
+ if len(stack) == 0:
+ return visited
+
+ # c is the count of nodes you want and if you leave it or pass -1 to the function
+ # the count will be random from 10 to 10000
+ def fill_graph_randomly(self, c=-1):
+ if c == -1:
+ c = floor(random() * 10000) + 10
+ for i in range(c):
+ # every vertex has max 100 edges
+ for _ in range(floor(random() * 102) + 1):
+ n = floor(random() * c) + 1
+ if n != i:
+ self.add_pair(i, n, 1)
+
+ def bfs(self, s=-2):
+ d = deque()
+ visited = []
+ if s == -2:
+ s = list(self.graph)[0]
+ d.append(s)
+ visited.append(s)
+ while d:
+ s = d.popleft()
+ if len(self.graph[s]) != 0:
+ for node in self.graph[s]:
+ if visited.count(node[1]) < 1:
+ d.append(node[1])
+ visited.append(node[1])
+ return visited
+
+ def in_degree(self, u):
+ count = 0
+ for x in self.graph:
+ for y in self.graph[x]:
+ if y[1] == u:
+ count += 1
+ return count
+
+ def out_degree(self, u):
+ return len(self.graph[u])
+
+ def topological_sort(self, s=-2):
+ stack = []
+ visited = []
+ if s == -2:
+ s = list(self.graph)[0]
+ stack.append(s)
+ visited.append(s)
+ ss = s
+ sorted_nodes = []
+
+ while True:
+ # check if there is any non isolated nodes
+ if len(self.graph[s]) != 0:
+ ss = s
+ for node in self.graph[s]:
+ if visited.count(node[1]) < 1:
+ stack.append(node[1])
+ visited.append(node[1])
+ ss = node[1]
+ break
+
+ # check if all the children are visited
+ if s == ss:
+ sorted_nodes.append(stack.pop())
+ if len(stack) != 0:
+ s = stack[len(stack) - 1]
+ else:
+ s = ss
+
+ # check if se have reached the starting point
+ if len(stack) == 0:
+ return sorted_nodes
+
+ def cycle_nodes(self):
+ stack = []
+ visited = []
+ s = list(self.graph)[0]
+ stack.append(s)
+ visited.append(s)
+ parent = -2
+ indirect_parents = []
+ ss = s
+ on_the_way_back = False
+ anticipating_nodes = set()
+
+ while True:
+ # check if there is any non isolated nodes
+ if len(self.graph[s]) != 0:
+ ss = s
+ for node in self.graph[s]:
+ if (
+ visited.count(node[1]) > 0
+ and node[1] != parent
+ and indirect_parents.count(node[1]) > 0
+ and not on_the_way_back
+ ):
+ len_stack = len(stack) - 1
+ while True and len_stack >= 0:
+ if stack[len_stack] == node[1]:
+ anticipating_nodes.add(node[1])
+ break
+ else:
+ anticipating_nodes.add(stack[len_stack])
+ len_stack -= 1
+ if visited.count(node[1]) < 1:
+ stack.append(node[1])
+ visited.append(node[1])
+ ss = node[1]
+ break
+
+ # check if all the children are visited
+ if s == ss:
+ stack.pop()
+ on_the_way_back = True
+ if len(stack) != 0:
+ s = stack[len(stack) - 1]
+ else:
+ on_the_way_back = False
+ indirect_parents.append(parent)
+ parent = s
+ s = ss
+
+ # check if se have reached the starting point
+ if len(stack) == 0:
+ return list(anticipating_nodes)
+
+ def has_cycle(self):
+ stack = []
+ visited = []
+ s = list(self.graph)[0]
+ stack.append(s)
+ visited.append(s)
+ parent = -2
+ indirect_parents = []
+ ss = s
+ on_the_way_back = False
+ anticipating_nodes = set()
+
+ while True:
+ # check if there is any non isolated nodes
+ if len(self.graph[s]) != 0:
+ ss = s
+ for node in self.graph[s]:
+ if (
+ visited.count(node[1]) > 0
+ and node[1] != parent
+ and indirect_parents.count(node[1]) > 0
+ and not on_the_way_back
+ ):
+ len_stack_minus_one = len(stack) - 1
+ while True and len_stack_minus_one >= 0:
+ if stack[len_stack_minus_one] == node[1]:
+ anticipating_nodes.add(node[1])
+ break
+ else:
+ return True
+ # TODO:The following code is unreachable.
+ anticipating_nodes.add(stack[len_stack_minus_one])
+ len_stack_minus_one -= 1
+ if visited.count(node[1]) < 1:
+ stack.append(node[1])
+ visited.append(node[1])
+ ss = node[1]
+ break
+
+ # check if all the children are visited
+ if s == ss:
+ stack.pop()
+ on_the_way_back = True
+ if len(stack) != 0:
+ s = stack[len(stack) - 1]
+ else:
+ on_the_way_back = False
+ indirect_parents.append(parent)
+ parent = s
+ s = ss
+
+ # check if se have reached the starting point
+ if len(stack) == 0:
+ return False
+
+ def dfs_time(self, s=-2, e=-1):
+ begin = time()
+ self.dfs(s, e)
+ end = time()
+ return end - begin
+
+ def bfs_time(self, s=-2):
+ begin = time()
+ self.bfs(s)
+ end = time()
+ return end - begin
+
+
+class Graph:
+ def __init__(self):
+ self.graph = {}
+
+ # adding vertices and edges
+ # adding the weight is optional
+ # handles repetition
+ def add_pair(self, u, v, w=1):
+ # check if the u exists
+ if self.graph.get(u):
+ # if there already is a edge
+ if self.graph[u].count([w, v]) == 0:
+ self.graph[u].append([w, v])
+ else:
+ # if u does not exist
+ self.graph[u] = [[w, v]]
+ # add the other way
+ if self.graph.get(v):
+ # if there already is a edge
+ if self.graph[v].count([w, u]) == 0:
+ self.graph[v].append([w, u])
+ else:
+ # if u does not exist
+ self.graph[v] = [[w, u]]
+
+ # handles if the input does not exist
+ def remove_pair(self, u, v):
+ if self.graph.get(u):
+ for _ in self.graph[u]:
+ if _[1] == v:
+ self.graph[u].remove(_)
+ # the other way round
+ if self.graph.get(v):
+ for _ in self.graph[v]:
+ if _[1] == u:
+ self.graph[v].remove(_)
+
+ # if no destination is meant the default value is -1
+ def dfs(self, s=-2, d=-1):
+ if s == d:
+ return []
+ stack = []
+ visited = []
+ if s == -2:
+ s = list(self.graph)[0]
+ stack.append(s)
+ visited.append(s)
+ ss = s
+
+ while True:
+ # check if there is any non isolated nodes
+ if len(self.graph[s]) != 0:
+ ss = s
+ for node in self.graph[s]:
+ if visited.count(node[1]) < 1:
+ if node[1] == d:
+ visited.append(d)
+ return visited
+ else:
+ stack.append(node[1])
+ visited.append(node[1])
+ ss = node[1]
+ break
+
+ # check if all the children are visited
+ if s == ss:
+ stack.pop()
+ if len(stack) != 0:
+ s = stack[len(stack) - 1]
+ else:
+ s = ss
+
+ # check if se have reached the starting point
+ if len(stack) == 0:
+ return visited
+
+ # c is the count of nodes you want and if you leave it or pass -1 to the function
+ # the count will be random from 10 to 10000
+ def fill_graph_randomly(self, c=-1):
+ if c == -1:
+ c = floor(random() * 10000) + 10
+ for i in range(c):
+ # every vertex has max 100 edges
+ for _ in range(floor(random() * 102) + 1):
+ n = floor(random() * c) + 1
+ if n != i:
+ self.add_pair(i, n, 1)
+
+ def bfs(self, s=-2):
+ d = deque()
+ visited = []
+ if s == -2:
+ s = list(self.graph)[0]
+ d.append(s)
+ visited.append(s)
+ while d:
+ s = d.popleft()
+ if len(self.graph[s]) != 0:
+ for node in self.graph[s]:
+ if visited.count(node[1]) < 1:
+ d.append(node[1])
+ visited.append(node[1])
+ return visited
+
+ def degree(self, u):
+ return len(self.graph[u])
+
+ def cycle_nodes(self):
+ stack = []
+ visited = []
+ s = list(self.graph)[0]
+ stack.append(s)
+ visited.append(s)
+ parent = -2
+ indirect_parents = []
+ ss = s
+ on_the_way_back = False
+ anticipating_nodes = set()
+
+ while True:
+ # check if there is any non isolated nodes
+ if len(self.graph[s]) != 0:
+ ss = s
+ for node in self.graph[s]:
+ if (
+ visited.count(node[1]) > 0
+ and node[1] != parent
+ and indirect_parents.count(node[1]) > 0
+ and not on_the_way_back
+ ):
+ len_stack = len(stack) - 1
+ while True and len_stack >= 0:
+ if stack[len_stack] == node[1]:
+ anticipating_nodes.add(node[1])
+ break
+ else:
+ anticipating_nodes.add(stack[len_stack])
+ len_stack -= 1
+ if visited.count(node[1]) < 1:
+ stack.append(node[1])
+ visited.append(node[1])
+ ss = node[1]
+ break
+
+ # check if all the children are visited
+ if s == ss:
+ stack.pop()
+ on_the_way_back = True
+ if len(stack) != 0:
+ s = stack[len(stack) - 1]
+ else:
+ on_the_way_back = False
+ indirect_parents.append(parent)
+ parent = s
+ s = ss
+
+ # check if se have reached the starting point
+ if len(stack) == 0:
+ return list(anticipating_nodes)
+
+ def has_cycle(self):
+ stack = []
+ visited = []
+ s = list(self.graph)[0]
+ stack.append(s)
+ visited.append(s)
+ parent = -2
+ indirect_parents = []
+ ss = s
+ on_the_way_back = False
+ anticipating_nodes = set()
+
+ while True:
+ # check if there is any non isolated nodes
+ if len(self.graph[s]) != 0:
+ ss = s
+ for node in self.graph[s]:
+ if (
+ visited.count(node[1]) > 0
+ and node[1] != parent
+ and indirect_parents.count(node[1]) > 0
+ and not on_the_way_back
+ ):
+ len_stack_minus_one = len(stack) - 1
+ while True and len_stack_minus_one >= 0:
+ if stack[len_stack_minus_one] == node[1]:
+ anticipating_nodes.add(node[1])
+ break
+ else:
+ return True
+ # TODO: the following code is unreachable
+ # is this meant to be called in the else ?
+ anticipating_nodes.add(stack[len_stack_minus_one])
+ len_stack_minus_one -= 1
+ if visited.count(node[1]) < 1:
+ stack.append(node[1])
+ visited.append(node[1])
+ ss = node[1]
+ break
+
+ # check if all the children are visited
+ if s == ss:
+ stack.pop()
+ on_the_way_back = True
+ if len(stack) != 0:
+ s = stack[len(stack) - 1]
+ else:
+ on_the_way_back = False
+ indirect_parents.append(parent)
+ parent = s
+ s = ss
+
+ # check if se have reached the starting point
+ if len(stack) == 0:
+ return False
+
+ def all_nodes(self):
+ return list(self.graph)
+
+ def dfs_time(self, s=-2, e=-1):
+ begin = time()
+ self.dfs(s, e)
+ end = time()
+ return end - begin
+
+ def bfs_time(self, s=-2):
+ begin = time()
+ self.bfs(s)
+ end = time()
+ return end - begin
diff --git a/Graphs/edmonds_karp_Multiple_SourceAndSink.py b/graphs/edmonds_karp_multiple_source_and_sink.py
similarity index 85%
rename from Graphs/edmonds_karp_Multiple_SourceAndSink.py
rename to graphs/edmonds_karp_multiple_source_and_sink.py
index d231ac2c4cc3..0f359ff1aea3 100644
--- a/Graphs/edmonds_karp_Multiple_SourceAndSink.py
+++ b/graphs/edmonds_karp_multiple_source_and_sink.py
@@ -28,14 +28,13 @@ def _normalizeGraph(self, sources, sinks):
for i in sources:
maxInputFlow += sum(self.graph[i])
-
size = len(self.graph) + 1
for room in self.graph:
room.insert(0, 0)
self.graph.insert(0, [0] * size)
for i in sources:
self.graph[0][i + 1] = maxInputFlow
- self.sourceIndex = 0
+ self.sourceIndex = 0
size = len(self.graph) + 1
for room in self.graph:
@@ -45,7 +44,6 @@ def _normalizeGraph(self, sources, sinks):
self.graph[i + 1][size - 1] = maxInputFlow
self.sinkIndex = size - 1
-
def findMaximumFlow(self):
if self.maximumFlowAlgorithm is None:
raise Exception("You need to set maximum flow algorithm before.")
@@ -59,7 +57,7 @@ def setMaximumFlowAlgorithm(self, Algorithm):
self.maximumFlowAlgorithm = Algorithm(self)
-class FlowNetworkAlgorithmExecutor(object):
+class FlowNetworkAlgorithmExecutor:
def __init__(self, flowNetwork):
self.flowNetwork = flowNetwork
self.verticesCount = flowNetwork.verticesCount
@@ -80,10 +78,9 @@ def _algorithm(self):
pass
-
class MaximumFlowAlgorithmExecutor(FlowNetworkAlgorithmExecutor):
def __init__(self, flowNetwork):
- super(MaximumFlowAlgorithmExecutor, self).__init__(flowNetwork)
+ super().__init__(flowNetwork)
# use this to save your result
self.maximumFlow = -1
@@ -93,9 +90,10 @@ def getMaximumFlow(self):
return self.maximumFlow
+
class PushRelabelExecutor(MaximumFlowAlgorithmExecutor):
def __init__(self, flowNetwork):
- super(PushRelabelExecutor, self).__init__(flowNetwork)
+ super().__init__(flowNetwork)
self.preflow = [[0] * self.verticesCount for i in range(self.verticesCount)]
@@ -112,8 +110,11 @@ def _algorithm(self):
self.excesses[nextVertexIndex] += bandwidth
# Relabel-to-front selection rule
- verticesList = [i for i in range(self.verticesCount)
- if i != self.sourceIndex and i != self.sinkIndex]
+ verticesList = [
+ i
+ for i in range(self.verticesCount)
+ if i != self.sourceIndex and i != self.sinkIndex
+ ]
# move through list
i = 0
@@ -135,15 +136,21 @@ def processVertex(self, vertexIndex):
while self.excesses[vertexIndex] > 0:
for neighbourIndex in range(self.verticesCount):
# if it's neighbour and current vertex is higher
- if self.graph[vertexIndex][neighbourIndex] - self.preflow[vertexIndex][neighbourIndex] > 0\
- and self.heights[vertexIndex] > self.heights[neighbourIndex]:
+ if (
+ self.graph[vertexIndex][neighbourIndex]
+ - self.preflow[vertexIndex][neighbourIndex]
+ > 0
+ and self.heights[vertexIndex] > self.heights[neighbourIndex]
+ ):
self.push(vertexIndex, neighbourIndex)
self.relabel(vertexIndex)
def push(self, fromIndex, toIndex):
- preflowDelta = min(self.excesses[fromIndex],
- self.graph[fromIndex][toIndex] - self.preflow[fromIndex][toIndex])
+ preflowDelta = min(
+ self.excesses[fromIndex],
+ self.graph[fromIndex][toIndex] - self.preflow[fromIndex][toIndex],
+ )
self.preflow[fromIndex][toIndex] += preflowDelta
self.preflow[toIndex][fromIndex] -= preflowDelta
self.excesses[fromIndex] -= preflowDelta
@@ -152,14 +159,18 @@ def push(self, fromIndex, toIndex):
def relabel(self, vertexIndex):
minHeight = None
for toIndex in range(self.verticesCount):
- if self.graph[vertexIndex][toIndex] - self.preflow[vertexIndex][toIndex] > 0:
+ if (
+ self.graph[vertexIndex][toIndex] - self.preflow[vertexIndex][toIndex]
+ > 0
+ ):
if minHeight is None or self.heights[toIndex] < minHeight:
minHeight = self.heights[toIndex]
if minHeight is not None:
self.heights[vertexIndex] = minHeight + 1
-if __name__ == '__main__':
+
+if __name__ == "__main__":
entrances = [0]
exits = [3]
# graph = [
@@ -179,4 +190,4 @@ def relabel(self, vertexIndex):
# and calculate
maximumFlow = flowNetwork.findMaximumFlow()
- print("maximum flow is {}".format(maximumFlow))
+ print(f"maximum flow is {maximumFlow}")
diff --git a/graphs/eulerian_path_and_circuit_for_undirected_graph.py b/graphs/eulerian_path_and_circuit_for_undirected_graph.py
new file mode 100644
index 000000000000..7850933b0201
--- /dev/null
+++ b/graphs/eulerian_path_and_circuit_for_undirected_graph.py
@@ -0,0 +1,71 @@
+# Eulerian Path is a path in graph that visits every edge exactly once.
+# Eulerian Circuit is an Eulerian Path which starts and ends on the same
+# vertex.
+# time complexity is O(V+E)
+# space complexity is O(VE)
+
+
+# using dfs for finding eulerian path traversal
+def dfs(u, graph, visited_edge, path=[]):
+ path = path + [u]
+ for v in graph[u]:
+ if visited_edge[u][v] is False:
+ visited_edge[u][v], visited_edge[v][u] = True, True
+ path = dfs(v, graph, visited_edge, path)
+ return path
+
+
+# for checking in graph has euler path or circuit
+def check_circuit_or_path(graph, max_node):
+ odd_degree_nodes = 0
+ odd_node = -1
+ for i in range(max_node):
+ if i not in graph.keys():
+ continue
+ if len(graph[i]) % 2 == 1:
+ odd_degree_nodes += 1
+ odd_node = i
+ if odd_degree_nodes == 0:
+ return 1, odd_node
+ if odd_degree_nodes == 2:
+ return 2, odd_node
+ return 3, odd_node
+
+
+def check_euler(graph, max_node):
+ visited_edge = [[False for _ in range(max_node + 1)] for _ in range(max_node + 1)]
+ check, odd_node = check_circuit_or_path(graph, max_node)
+ if check == 3:
+ print("graph is not Eulerian")
+ print("no path")
+ return
+ start_node = 1
+ if check == 2:
+ start_node = odd_node
+ print("graph has a Euler path")
+ if check == 1:
+ print("graph has a Euler cycle")
+ path = dfs(start_node, graph, visited_edge)
+ print(path)
+
+
+def main():
+ G1 = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
+ G2 = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
+ G3 = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
+ G4 = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
+ G5 = {
+ 1: [],
+ 2: []
+ # all degree is zero
+ }
+ max_node = 10
+ check_euler(G1, max_node)
+ check_euler(G2, max_node)
+ check_euler(G3, max_node)
+ check_euler(G4, max_node)
+ check_euler(G5, max_node)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/graphs/even_tree.py b/graphs/even_tree.py
index 9383ea9a13c1..c9aef6e7861f 100644
--- a/graphs/even_tree.py
+++ b/graphs/even_tree.py
@@ -12,7 +12,6 @@
Note: The tree input will be such that it can always be decomposed into
components containing an even number of nodes.
"""
-from __future__ import print_function
# pylint: disable=invalid-name
from collections import defaultdict
@@ -46,23 +45,13 @@ def even_tree():
dfs(1)
-if __name__ == '__main__':
+if __name__ == "__main__":
n, m = 10, 9
tree = defaultdict(list)
visited = {}
cuts = []
count = 0
- edges = [
- (2, 1),
- (3, 1),
- (4, 3),
- (5, 2),
- (6, 1),
- (7, 2),
- (8, 6),
- (9, 8),
- (10, 8),
- ]
+ edges = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
diff --git a/graphs/finding_bridges.py b/graphs/finding_bridges.py
index 56533dd48bde..6555dd7bc29e 100644
--- a/graphs/finding_bridges.py
+++ b/graphs/finding_bridges.py
@@ -1,7 +1,7 @@
# Finding Bridges in Undirected Graph
-def computeBridges(l):
+def computeBridges(graph):
id = 0
- n = len(l) # No of vertices in graph
+ n = len(graph) # No of vertices in graph
low = [0] * n
visited = [False] * n
@@ -9,7 +9,7 @@ def dfs(at, parent, bridges, id):
visited[at] = True
low[at] = id
id += 1
- for to in l[at]:
+ for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
@@ -23,9 +23,20 @@ def dfs(at, parent, bridges, id):
bridges = []
for i in range(n):
- if (not visited[i]):
+ if not visited[i]:
dfs(i, -1, bridges, id)
print(bridges)
-
-l = {0:[1,2], 1:[0,2], 2:[0,1,3,5], 3:[2,4], 4:[3], 5:[2,6,8], 6:[5,7], 7:[6,8], 8:[5,7]}
-computeBridges(l)
+
+
+graph = {
+ 0: [1, 2],
+ 1: [0, 2],
+ 2: [0, 1, 3, 5],
+ 3: [2, 4],
+ 4: [3],
+ 5: [2, 6, 8],
+ 6: [5, 7],
+ 7: [6, 8],
+ 8: [5, 7],
+}
+computeBridges(graph)
diff --git a/graphs/floyd_warshall.py b/graphs/floyd_warshall.py
deleted file mode 100644
index fae8b19b351a..000000000000
--- a/graphs/floyd_warshall.py
+++ /dev/null
@@ -1,48 +0,0 @@
-from __future__ import print_function
-
-def printDist(dist, V):
- print("\nThe shortest path matrix using Floyd Warshall algorithm\n")
- for i in range(V):
- for j in range(V):
- if dist[i][j] != float('inf') :
- print(int(dist[i][j]),end = "\t")
- else:
- print("INF",end="\t")
- print()
-
-
-
-def FloydWarshall(graph, V):
- dist=[[float('inf') for i in range(V)] for j in range(V)]
-
- for i in range(V):
- for j in range(V):
- dist[i][j] = graph[i][j]
-
- for k in range(V):
- for i in range(V):
- for j in range(V):
- if dist[i][k]!=float('inf') and dist[k][j]!=float('inf') and dist[i][k]+dist[k][j] < dist[i][j]:
- dist[i][j] = dist[i][k] + dist[k][j]
-
- printDist(dist, V)
-
-
-
-#MAIN
-V = int(input("Enter number of vertices: "))
-E = int(input("Enter number of edges: "))
-
-graph = [[float('inf') for i in range(V)] for j in range(V)]
-
-for i in range(V):
- graph[i][i] = 0.0
-
-for i in range(E):
- print("\nEdge ",i+1)
- src = int(input("Enter source:"))
- dst = int(input("Enter destination:"))
- weight = float(input("Enter weight:"))
- graph[src][dst] = weight
-
-FloydWarshall(graph, V)
diff --git a/graphs/frequent_pattern_graph_miner.py b/graphs/frequent_pattern_graph_miner.py
new file mode 100644
index 000000000000..ff7063082267
--- /dev/null
+++ b/graphs/frequent_pattern_graph_miner.py
@@ -0,0 +1,232 @@
+"""
+FP-GraphMiner - A Fast Frequent Pattern Mining Algorithm for Network Graphs
+
+A novel Frequent Pattern Graph Mining algorithm, FP-GraphMiner, that compactly
+represents a set of network graphs as a Frequent Pattern Graph (or FP-Graph).
+This graph can be used to efficiently mine frequent subgraphs including maximal
+frequent subgraphs and maximum common subgraphs.
+
+URL: https://www.researchgate.net/publication/235255851
+"""
+# fmt: off
+edge_array = [
+ ['ab-e1', 'ac-e3', 'ad-e5', 'bc-e4', 'bd-e2', 'be-e6', 'bh-e12', 'cd-e2', 'ce-e4',
+ 'de-e1', 'df-e8', 'dg-e5', 'dh-e10', 'ef-e3', 'eg-e2', 'fg-e6', 'gh-e6', 'hi-e3'],
+ ['ab-e1', 'ac-e3', 'ad-e5', 'bc-e4', 'bd-e2', 'be-e6', 'cd-e2', 'de-e1', 'df-e8',
+ 'ef-e3', 'eg-e2', 'fg-e6'],
+ ['ab-e1', 'ac-e3', 'bc-e4', 'bd-e2', 'de-e1', 'df-e8', 'dg-e5', 'ef-e3', 'eg-e2',
+ 'eh-e12', 'fg-e6', 'fh-e10', 'gh-e6'],
+ ['ab-e1', 'ac-e3', 'bc-e4', 'bd-e2', 'bh-e12', 'cd-e2', 'df-e8', 'dh-e10'],
+ ['ab-e1', 'ac-e3', 'ad-e5', 'bc-e4', 'bd-e2', 'cd-e2', 'ce-e4', 'de-e1', 'df-e8',
+ 'dg-e5', 'ef-e3', 'eg-e2', 'fg-e6']
+]
+# fmt: on
+
+
+def get_distinct_edge(edge_array):
+ """
+ Return Distinct edges from edge array of multiple graphs
+ >>> sorted(get_distinct_edge(edge_array))
+ ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
+ """
+ distinct_edge = set()
+ for row in edge_array:
+ for item in row:
+ distinct_edge.add(item[0])
+ return list(distinct_edge)
+
+
+def get_bitcode(edge_array, distinct_edge):
+ """
+ Return bitcode of distinct_edge
+ """
+ bitcode = ["0"] * len(edge_array)
+ for i, row in enumerate(edge_array):
+ for item in row:
+ if distinct_edge in item[0]:
+ bitcode[i] = "1"
+ break
+ return "".join(bitcode)
+
+
+def get_frequency_table(edge_array):
+ """
+ Returns Frequency Table
+ """
+ distinct_edge = get_distinct_edge(edge_array)
+ frequency_table = dict()
+
+ for item in distinct_edge:
+ bit = get_bitcode(edge_array, item)
+ # print('bit',bit)
+ # bt=''.join(bit)
+ s = bit.count("1")
+ frequency_table[item] = [s, bit]
+ # Store [Distinct edge, WT(Bitcode), Bitcode] in descending order
+ sorted_frequency_table = [
+ [k, v[0], v[1]]
+ for k, v in sorted(frequency_table.items(), key=lambda v: v[1][0], reverse=True)
+ ]
+ return sorted_frequency_table
+
+
+def get_nodes(frequency_table):
+ """
+ Returns nodes
+ format nodes={bitcode:edges that represent the bitcode}
+ >>> get_nodes([['ab', 5, '11111'], ['ac', 5, '11111'], ['df', 5, '11111'],
+ ... ['bd', 5, '11111'], ['bc', 5, '11111']])
+ {'11111': ['ab', 'ac', 'df', 'bd', 'bc']}
+ """
+ nodes = {}
+ for i, item in enumerate(frequency_table):
+ nodes.setdefault(item[2], []).append(item[0])
+ return nodes
+
+
+def get_cluster(nodes):
+ """
+ Returns cluster
+ format cluster:{WT(bitcode):nodes with same WT}
+ """
+ cluster = {}
+ for key, value in nodes.items():
+ cluster.setdefault(key.count("1"), {})[key] = value
+ return cluster
+
+
+def get_support(cluster):
+ """
+ Returns support
+ >>> get_support({5: {'11111': ['ab', 'ac', 'df', 'bd', 'bc']},
+ ... 4: {'11101': ['ef', 'eg', 'de', 'fg'], '11011': ['cd']},
+ ... 3: {'11001': ['ad'], '10101': ['dg']},
+ ... 2: {'10010': ['dh', 'bh'], '11000': ['be'], '10100': ['gh'],
+ ... '10001': ['ce']},
+ ... 1: {'00100': ['fh', 'eh'], '10000': ['hi']}})
+ [100.0, 80.0, 60.0, 40.0, 20.0]
+ """
+ return [i * 100 / len(cluster) for i in cluster]
+
+
+def print_all() -> None:
+ print("\nNodes\n")
+ for key, value in nodes.items():
+ print(key, value)
+ print("\nSupport\n")
+ print(support)
+ print("\n Cluster \n")
+ for key, value in sorted(cluster.items(), reverse=True):
+ print(key, value)
+ print("\n Graph\n")
+ for key, value in graph.items():
+ print(key, value)
+ print("\n Edge List of Frequent subgraphs \n")
+ for edge_list in freq_subgraph_edge_list:
+ print(edge_list)
+
+
+def create_edge(nodes, graph, cluster, c1):
+ """
+ create edge between the nodes
+ """
+ for i in cluster[c1].keys():
+ count = 0
+ c2 = c1 + 1
+ while c2 < max(cluster.keys()):
+ for j in cluster[c2].keys():
+ """
+ creates edge only if the condition satisfies
+ """
+ if int(i, 2) & int(j, 2) == int(i, 2):
+ if tuple(nodes[i]) in graph:
+ graph[tuple(nodes[i])].append(nodes[j])
+ else:
+ graph[tuple(nodes[i])] = [nodes[j]]
+ count += 1
+ if count == 0:
+ c2 = c2 + 1
+ else:
+ break
+
+
+def construct_graph(cluster, nodes):
+ X = cluster[max(cluster.keys())]
+ cluster[max(cluster.keys()) + 1] = "Header"
+ graph = {}
+ for i in X:
+ if tuple(["Header"]) in graph:
+ graph[tuple(["Header"])].append(X[i])
+ else:
+ graph[tuple(["Header"])] = [X[i]]
+ for i in X:
+ graph[tuple(X[i])] = [["Header"]]
+ i = 1
+ while i < max(cluster) - 1:
+ create_edge(nodes, graph, cluster, i)
+ i = i + 1
+ return graph
+
+
+def myDFS(graph, start, end, path=[]):
+ """
+ find different DFS walk from given node to Header node
+ """
+ path = path + [start]
+ if start == end:
+ paths.append(path)
+ for node in graph[start]:
+ if tuple(node) not in path:
+ myDFS(graph, tuple(node), end, path)
+
+
+def find_freq_subgraph_given_support(s, cluster, graph):
+ """
+ find edges of multiple frequent subgraphs
+ """
+ k = int(s / 100 * (len(cluster) - 1))
+ for i in cluster[k].keys():
+ myDFS(graph, tuple(cluster[k][i]), tuple(["Header"]))
+
+
+def freq_subgraphs_edge_list(paths):
+ """
+ returns Edge list for frequent subgraphs
+ """
+ freq_sub_EL = []
+ for edges in paths:
+ EL = []
+ for j in range(len(edges) - 1):
+ temp = list(edges[j])
+ for e in temp:
+ edge = (e[0], e[1])
+ EL.append(edge)
+ freq_sub_EL.append(EL)
+ return freq_sub_EL
+
+
+def preprocess(edge_array):
+ """
+ Preprocess the edge array
+ >>> preprocess([['ab-e1', 'ac-e3', 'ad-e5', 'bc-e4', 'bd-e2', 'be-e6', 'bh-e12',
+ ... 'cd-e2', 'ce-e4', 'de-e1', 'df-e8', 'dg-e5', 'dh-e10', 'ef-e3',
+ ... 'eg-e2', 'fg-e6', 'gh-e6', 'hi-e3']])
+
+ """
+ for i in range(len(edge_array)):
+ for j in range(len(edge_array[i])):
+ t = edge_array[i][j].split("-")
+ edge_array[i][j] = t
+
+
+if __name__ == "__main__":
+ preprocess(edge_array)
+ frequency_table = get_frequency_table(edge_array)
+ nodes = get_nodes(frequency_table)
+ cluster = get_cluster(nodes)
+ support = get_support(cluster)
+ graph = construct_graph(cluster, nodes)
+ find_freq_subgraph_given_support(60, cluster, graph)
+ paths = []
+ freq_subgraph_edge_list = freq_subgraphs_edge_list(paths)
+ print_all()
diff --git a/graphs/g_topological_sort.py b/graphs/g_topological_sort.py
new file mode 100644
index 000000000000..77543d51f61d
--- /dev/null
+++ b/graphs/g_topological_sort.py
@@ -0,0 +1,47 @@
+# Author: Phyllipe Bezerra (https://github.com/pmba)
+
+clothes = {
+ 0: "underwear",
+ 1: "pants",
+ 2: "belt",
+ 3: "suit",
+ 4: "shoe",
+ 5: "socks",
+ 6: "shirt",
+ 7: "tie",
+ 8: "watch",
+}
+
+graph = [[1, 4], [2, 4], [3], [], [], [4], [2, 7], [3], []]
+
+visited = [0 for x in range(len(graph))]
+stack = []
+
+
+def print_stack(stack, clothes):
+ order = 1
+ while stack:
+ current_clothing = stack.pop()
+ print(order, clothes[current_clothing])
+ order += 1
+
+
+def depth_first_search(u, visited, graph):
+ visited[u] = 1
+ for v in graph[u]:
+ if not visited[v]:
+ depth_first_search(v, visited, graph)
+
+ stack.append(u)
+
+
+def topological_sort(graph, visited):
+ for v in range(len(graph)):
+ if not visited[v]:
+ depth_first_search(v, visited, graph)
+
+
+if __name__ == "__main__":
+ topological_sort(graph, visited)
+ print(stack)
+ print_stack(stack, clothes)
diff --git a/graphs/gale_shapley_bigraph.py b/graphs/gale_shapley_bigraph.py
new file mode 100644
index 000000000000..59baf8296ea6
--- /dev/null
+++ b/graphs/gale_shapley_bigraph.py
@@ -0,0 +1,44 @@
+from __future__ import annotations
+
+
+def stable_matching(donor_pref: list[int], recipient_pref: list[int]) -> list[int]:
+ """
+ Finds the stable match in any bipartite graph, i.e a pairing where no 2 objects
+ prefer each other over their partner. The function accepts the preferences of
+ oegan donors and recipients (where both are assigned numbers from 0 to n-1) and
+ returns a list where the index position corresponds to the donor and value at the
+ index is the organ recipient.
+
+ To better understand the algorithm, see also:
+ https://github.com/akashvshroff/Gale_Shapley_Stable_Matching (README).
+ https://www.youtube.com/watch?v=Qcv1IqHWAzg&t=13s (Numberphile YouTube).
+
+ >>> donor_pref = [[0, 1, 3, 2], [0, 2, 3, 1], [1, 0, 2, 3], [0, 3, 1, 2]]
+ >>> recipient_pref = [[3, 1, 2, 0], [3, 1, 0, 2], [0, 3, 1, 2], [1, 0, 3, 2]]
+ >>> print(stable_matching(donor_pref, recipient_pref))
+ [1, 2, 3, 0]
+ """
+ assert len(donor_pref) == len(recipient_pref)
+ n = len(donor_pref)
+ unmatched_donors = list(range(n))
+ donor_record = [-1] * n # who the donor has donated to
+ rec_record = [-1] * n # who the recipient has received from
+ num_donations = [0] * n
+ while unmatched_donors:
+ donor = unmatched_donors[0]
+ donor_preference = donor_pref[donor]
+ recipient = donor_preference[num_donations[donor]]
+ num_donations[donor] += 1
+ rec_preference = recipient_pref[recipient]
+ prev_donor = rec_record[recipient]
+ if prev_donor != -1:
+ if rec_preference.index(prev_donor) > rec_preference.index(donor):
+ rec_record[recipient] = donor
+ donor_record[donor] = recipient
+ unmatched_donors.append(prev_donor)
+ unmatched_donors.remove(donor)
+ else:
+ rec_record[recipient] = donor
+ donor_record[donor] = recipient
+ unmatched_donors.remove(donor)
+ return donor_record
diff --git a/graphs/graph.py b/graphs/graph.py
deleted file mode 100644
index 9bd61559dcbf..000000000000
--- a/graphs/graph.py
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/python
-# encoding=utf8
-
-from __future__ import print_function
-# Author: OMKAR PATHAK
-
-# We can use Python's dictionary for constructing the graph
-
-class AdjacencyList(object):
- def __init__(self):
- self.List = {}
-
- def addEdge(self, fromVertex, toVertex):
- # check if vertex is already present
- if fromVertex in self.List.keys():
- self.List[fromVertex].append(toVertex)
- else:
- self.List[fromVertex] = [toVertex]
-
- def printList(self):
- for i in self.List:
- print((i,'->',' -> '.join([str(j) for j in self.List[i]])))
-
-if __name__ == '__main__':
- al = AdjacencyList()
- al.addEdge(0, 1)
- al.addEdge(0, 4)
- al.addEdge(4, 1)
- al.addEdge(4, 3)
- al.addEdge(1, 0)
- al.addEdge(1, 4)
- al.addEdge(1, 3)
- al.addEdge(1, 2)
- al.addEdge(2, 3)
- al.addEdge(3, 4)
-
- al.printList()
-
- # OUTPUT:
- # 0 -> 1 -> 4
- # 1 -> 0 -> 4 -> 3 -> 2
- # 2 -> 3
- # 3 -> 4
- # 4 -> 1 -> 3
diff --git a/graphs/graph_list.py b/graphs/graph_list.py
index d67bc96c4a81..a812fecd961e 100644
--- a/graphs/graph_list.py
+++ b/graphs/graph_list.py
@@ -1,31 +1,44 @@
-from __future__ import print_function
-
-
-class Graph:
- def __init__(self, vertex):
- self.vertex = vertex
- self.graph = [[0] for i in range(vertex)]
-
- def add_edge(self, u, v):
- self.graph[u - 1].append(v - 1)
-
- def show(self):
- for i in range(self.vertex):
- print('%d: '% (i + 1), end=' ')
- for j in self.graph[i]:
- print('%d-> '% (j + 1), end=' ')
- print(' ')
-
-
-
-g = Graph(100)
-
-g.add_edge(1,3)
-g.add_edge(2,3)
-g.add_edge(3,4)
-g.add_edge(3,5)
-g.add_edge(4,5)
-
-
-g.show()
-
+#!/usr/bin/python
+
+# Author: OMKAR PATHAK
+
+# We can use Python's dictionary for constructing the graph.
+
+
+class AdjacencyList:
+ def __init__(self):
+ self.adj_list = {}
+
+ def add_edge(self, from_vertex: int, to_vertex: int) -> None:
+ # check if vertex is already present
+ if from_vertex in self.adj_list:
+ self.adj_list[from_vertex].append(to_vertex)
+ else:
+ self.adj_list[from_vertex] = [to_vertex]
+
+ def print_list(self) -> None:
+ for i in self.adj_list:
+ print((i, "->", " -> ".join([str(j) for j in self.adj_list[i]])))
+
+
+if __name__ == "__main__":
+ al = AdjacencyList()
+ al.add_edge(0, 1)
+ al.add_edge(0, 4)
+ al.add_edge(4, 1)
+ al.add_edge(4, 3)
+ al.add_edge(1, 0)
+ al.add_edge(1, 4)
+ al.add_edge(1, 3)
+ al.add_edge(1, 2)
+ al.add_edge(2, 3)
+ al.add_edge(3, 4)
+
+ al.print_list()
+
+ # OUTPUT:
+ # 0 -> 1 -> 4
+ # 1 -> 0 -> 4 -> 3 -> 2
+ # 2 -> 3
+ # 3 -> 4
+ # 4 -> 1 -> 3
diff --git a/graphs/graph_matrix.py b/graphs/graph_matrix.py
index de25301d6dd1..987168426ba5 100644
--- a/graphs/graph_matrix.py
+++ b/graphs/graph_matrix.py
@@ -1,11 +1,7 @@
-from __future__ import print_function
-
-
class Graph:
-
def __init__(self, vertex):
self.vertex = vertex
- self.graph = [[0] * vertex for i in range(vertex) ]
+ self.graph = [[0] * vertex for i in range(vertex)]
def add_edge(self, u, v):
self.graph[u - 1][v - 1] = 1
@@ -15,18 +11,15 @@ def show(self):
for i in self.graph:
for j in i:
- print(j, end=' ')
- print(' ')
-
-
+ print(j, end=" ")
+ print(" ")
g = Graph(100)
-g.add_edge(1,4)
-g.add_edge(4,2)
-g.add_edge(4,5)
-g.add_edge(2,5)
-g.add_edge(5,3)
+g.add_edge(1, 4)
+g.add_edge(4, 2)
+g.add_edge(4, 5)
+g.add_edge(2, 5)
+g.add_edge(5, 3)
g.show()
-
diff --git a/graphs/graphs_floyd_warshall.py b/graphs/graphs_floyd_warshall.py
new file mode 100644
index 000000000000..56cf8b9e382b
--- /dev/null
+++ b/graphs/graphs_floyd_warshall.py
@@ -0,0 +1,102 @@
+# floyd_warshall.py
+"""
+ The problem is to find the shortest distance between all pairs of vertices in a
+ weighted directed graph that can have negative edge weights.
+"""
+
+
+def _print_dist(dist, v):
+ print("\nThe shortest path matrix using Floyd Warshall algorithm\n")
+ for i in range(v):
+ for j in range(v):
+ if dist[i][j] != float("inf"):
+ print(int(dist[i][j]), end="\t")
+ else:
+ print("INF", end="\t")
+ print()
+
+
+def floyd_warshall(graph, v):
+ """
+ :param graph: 2D array calculated from weight[edge[i, j]]
+ :type graph: List[List[float]]
+ :param v: number of vertices
+ :type v: int
+ :return: shortest distance between all vertex pairs
+ distance[u][v] will contain the shortest distance from vertex u to v.
+
+ 1. For all edges from v to n, distance[i][j] = weight(edge(i, j)).
+ 3. The algorithm then performs distance[i][j] = min(distance[i][j], distance[i][k] +
+ distance[k][j]) for each possible pair i, j of vertices.
+ 4. The above is repeated for each vertex k in the graph.
+ 5. Whenever distance[i][j] is given a new minimum value, next vertex[i][j] is
+ updated to the next vertex[i][k].
+ """
+
+ dist = [[float("inf") for _ in range(v)] for _ in range(v)]
+
+ for i in range(v):
+ for j in range(v):
+ dist[i][j] = graph[i][j]
+
+ # check vertex k against all other vertices (i, j)
+ for k in range(v):
+ # looping through rows of graph array
+ for i in range(v):
+ # looping through columns of graph array
+ for j in range(v):
+ if (
+ dist[i][k] != float("inf")
+ and dist[k][j] != float("inf")
+ and dist[i][k] + dist[k][j] < dist[i][j]
+ ):
+ dist[i][j] = dist[i][k] + dist[k][j]
+
+ _print_dist(dist, v)
+ return dist, v
+
+
+if __name__ == "__main__":
+ v = int(input("Enter number of vertices: "))
+ e = int(input("Enter number of edges: "))
+
+ graph = [[float("inf") for i in range(v)] for j in range(v)]
+
+ for i in range(v):
+ graph[i][i] = 0.0
+
+ # src and dst are indices that must be within the array size graph[e][v]
+ # failure to follow this will result in an error
+ for i in range(e):
+ print("\nEdge ", i + 1)
+ src = int(input("Enter source:"))
+ dst = int(input("Enter destination:"))
+ weight = float(input("Enter weight:"))
+ graph[src][dst] = weight
+
+ floyd_warshall(graph, v)
+
+ # Example Input
+ # Enter number of vertices: 3
+ # Enter number of edges: 2
+
+ # # generated graph from vertex and edge inputs
+ # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
+ # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
+
+ # specify source, destination and weight for edge #1
+ # Edge 1
+ # Enter source:1
+ # Enter destination:2
+ # Enter weight:2
+
+ # specify source, destination and weight for edge #2
+ # Edge 2
+ # Enter source:2
+ # Enter destination:1
+ # Enter weight:1
+
+ # # Expected Output from the vertice, edge and src, dst, weight inputs!!
+ # 0 INF INF
+ # INF 0 2
+ # INF 1 0
diff --git a/graphs/greedy_best_first.py b/graphs/greedy_best_first.py
new file mode 100644
index 000000000000..4b80a6853d3f
--- /dev/null
+++ b/graphs/greedy_best_first.py
@@ -0,0 +1,174 @@
+"""
+https://en.wikipedia.org/wiki/Best-first_search#Greedy_BFS
+"""
+
+from __future__ import annotations
+
+grid = [
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0, 0, 0],
+ [1, 0, 1, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 1, 0, 0],
+]
+
+delta = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
+
+
+class Node:
+ """
+ >>> k = Node(0, 0, 4, 5, 0, None)
+ >>> k.calculate_heuristic()
+ 9
+ >>> n = Node(1, 4, 3, 4, 2, None)
+ >>> n.calculate_heuristic()
+ 2
+ >>> l = [k, n]
+ >>> n == l[0]
+ False
+ >>> l.sort()
+ >>> n == l[0]
+ True
+ """
+
+ def __init__(self, pos_x, pos_y, goal_x, goal_y, g_cost, parent):
+ self.pos_x = pos_x
+ self.pos_y = pos_y
+ self.pos = (pos_y, pos_x)
+ self.goal_x = goal_x
+ self.goal_y = goal_y
+ self.g_cost = g_cost
+ self.parent = parent
+ self.f_cost = self.calculate_heuristic()
+
+ def calculate_heuristic(self) -> float:
+ """
+ The heuristic here is the Manhattan Distance
+ Could elaborate to offer more than one choice
+ """
+ dy = abs(self.pos_x - self.goal_x)
+ dx = abs(self.pos_y - self.goal_y)
+ return dx + dy
+
+ def __lt__(self, other) -> bool:
+ return self.f_cost < other.f_cost
+
+
+class GreedyBestFirst:
+ """
+ >>> gbf = GreedyBestFirst((0, 0), (len(grid) - 1, len(grid[0]) - 1))
+ >>> [x.pos for x in gbf.get_successors(gbf.start)]
+ [(1, 0), (0, 1)]
+ >>> (gbf.start.pos_y + delta[3][0], gbf.start.pos_x + delta[3][1])
+ (0, 1)
+ >>> (gbf.start.pos_y + delta[2][0], gbf.start.pos_x + delta[2][1])
+ (1, 0)
+ >>> gbf.retrace_path(gbf.start)
+ [(0, 0)]
+ >>> gbf.search() # doctest: +NORMALIZE_WHITESPACE
+ [(0, 0), (1, 0), (2, 0), (3, 0), (3, 1), (4, 1), (5, 1), (6, 1),
+ (6, 2), (6, 3), (5, 3), (5, 4), (5, 5), (6, 5), (6, 6)]
+ """
+
+ def __init__(self, start, goal):
+ self.start = Node(start[1], start[0], goal[1], goal[0], 0, None)
+ self.target = Node(goal[1], goal[0], goal[1], goal[0], 99999, None)
+
+ self.open_nodes = [self.start]
+ self.closed_nodes = []
+
+ self.reached = False
+
+ def search(self) -> list[tuple[int]]:
+ """
+ Search for the path,
+ if a path is not found, only the starting position is returned
+ """
+ while self.open_nodes:
+ # Open Nodes are sorted using __lt__
+ self.open_nodes.sort()
+ current_node = self.open_nodes.pop(0)
+
+ if current_node.pos == self.target.pos:
+ self.reached = True
+ return self.retrace_path(current_node)
+
+ self.closed_nodes.append(current_node)
+ successors = self.get_successors(current_node)
+
+ for child_node in successors:
+ if child_node in self.closed_nodes:
+ continue
+
+ if child_node not in self.open_nodes:
+ self.open_nodes.append(child_node)
+ else:
+ # retrieve the best current path
+ better_node = self.open_nodes.pop(self.open_nodes.index(child_node))
+
+ if child_node.g_cost < better_node.g_cost:
+ self.open_nodes.append(child_node)
+ else:
+ self.open_nodes.append(better_node)
+
+ if not (self.reached):
+ return [self.start.pos]
+
+ def get_successors(self, parent: Node) -> list[Node]:
+ """
+ Returns a list of successors (both in the grid and free spaces)
+ """
+ successors = []
+ for action in delta:
+ pos_x = parent.pos_x + action[1]
+ pos_y = parent.pos_y + action[0]
+
+ if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(grid) - 1):
+ continue
+
+ if grid[pos_y][pos_x] != 0:
+ continue
+
+ successors.append(
+ Node(
+ pos_x,
+ pos_y,
+ self.target.pos_y,
+ self.target.pos_x,
+ parent.g_cost + 1,
+ parent,
+ )
+ )
+ return successors
+
+ def retrace_path(self, node: Node) -> list[tuple[int]]:
+ """
+ Retrace the path from parents to parents until start node
+ """
+ current_node = node
+ path = []
+ while current_node is not None:
+ path.append((current_node.pos_y, current_node.pos_x))
+ current_node = current_node.parent
+ path.reverse()
+ return path
+
+
+if __name__ == "__main__":
+ init = (0, 0)
+ goal = (len(grid) - 1, len(grid[0]) - 1)
+ for elem in grid:
+ print(elem)
+
+ print("------")
+
+ greedy_bf = GreedyBestFirst(init, goal)
+ path = greedy_bf.search()
+
+ for elem in path:
+ grid[elem[0]][elem[1]] = 2
+
+ for elem in grid:
+ print(elem)
diff --git a/graphs/kahns_algorithm_long.py b/graphs/kahns_algorithm_long.py
index 453b5706f6da..fed7517a21e2 100644
--- a/graphs/kahns_algorithm_long.py
+++ b/graphs/kahns_algorithm_long.py
@@ -1,10 +1,10 @@
# Finding longest distance in Directed Acyclic Graph using KahnsAlgorithm
-def longestDistance(l):
- indegree = [0] * len(l)
+def longestDistance(graph):
+ indegree = [0] * len(graph)
queue = []
- longDist = [1] * len(l)
+ longDist = [1] * len(graph)
- for key, values in l.items():
+ for key, values in graph.items():
for i in values:
indegree[i] += 1
@@ -12,19 +12,20 @@ def longestDistance(l):
if indegree[i] == 0:
queue.append(i)
- while(queue):
+ while queue:
vertex = queue.pop(0)
- for x in l[vertex]:
+ for x in graph[vertex]:
indegree[x] -= 1
if longDist[vertex] + 1 > longDist[x]:
- longDist[x] = longDist[vertex] + 1
+ longDist[x] = longDist[vertex] + 1
if indegree[x] == 0:
queue.append(x)
print(max(longDist))
+
# Adjacency list of Graph
-l = {0:[2,3,4], 1:[2,7], 2:[5], 3:[5,7], 4:[7], 5:[6], 6:[7], 7:[]}
-longestDistance(l)
+graph = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
+longestDistance(graph)
diff --git a/graphs/kahns_algorithm_topo.py b/graphs/kahns_algorithm_topo.py
index 8c182c4e902c..bf9f90299361 100644
--- a/graphs/kahns_algorithm_topo.py
+++ b/graphs/kahns_algorithm_topo.py
@@ -1,11 +1,14 @@
-# Kahn's Algorithm is used to find Topological ordering of Directed Acyclic Graph using BFS
-def topologicalSort(l):
- indegree = [0] * len(l)
+def topologicalSort(graph):
+ """
+ Kahn's Algorithm is used to find Topological ordering of Directed Acyclic Graph
+ using BFS
+ """
+ indegree = [0] * len(graph)
queue = []
topo = []
cnt = 0
- for key, values in l.items():
+ for key, values in graph.items():
for i in values:
indegree[i] += 1
@@ -13,20 +16,21 @@ def topologicalSort(l):
if indegree[i] == 0:
queue.append(i)
- while(queue):
+ while queue:
vertex = queue.pop(0)
cnt += 1
topo.append(vertex)
- for x in l[vertex]:
+ for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(x)
- if cnt != len(l):
+ if cnt != len(graph):
print("Cycle exists")
else:
print(topo)
+
# Adjacency List of Graph
-l = {0:[1,2], 1:[3], 2:[3], 3:[4,5], 4:[], 5:[]}
-topologicalSort(l)
+graph = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
+topologicalSort(graph)
diff --git a/graphs/karger.py b/graphs/karger.py
new file mode 100644
index 000000000000..f72128c8178a
--- /dev/null
+++ b/graphs/karger.py
@@ -0,0 +1,86 @@
+"""
+An implementation of Karger's Algorithm for partitioning a graph.
+"""
+
+from __future__ import annotations
+
+import random
+
+# Adjacency list representation of this graph:
+# https://en.wikipedia.org/wiki/File:Single_run_of_Karger%E2%80%99s_Mincut_algorithm.svg
+TEST_GRAPH = {
+ "1": ["2", "3", "4", "5"],
+ "2": ["1", "3", "4", "5"],
+ "3": ["1", "2", "4", "5", "10"],
+ "4": ["1", "2", "3", "5", "6"],
+ "5": ["1", "2", "3", "4", "7"],
+ "6": ["7", "8", "9", "10", "4"],
+ "7": ["6", "8", "9", "10", "5"],
+ "8": ["6", "7", "9", "10"],
+ "9": ["6", "7", "8", "10"],
+ "10": ["6", "7", "8", "9", "3"],
+}
+
+
+def partition_graph(graph: dict[str, list[str]]) -> set[tuple[str, str]]:
+ """
+ Partitions a graph using Karger's Algorithm. Implemented from
+ pseudocode found here:
+ https://en.wikipedia.org/wiki/Karger%27s_algorithm.
+ This function involves random choices, meaning it will not give
+ consistent outputs.
+
+ Args:
+ graph: A dictionary containing adacency lists for the graph.
+ Nodes must be strings.
+
+ Returns:
+ The cutset of the cut found by Karger's Algorithm.
+
+ >>> graph = {'0':['1'], '1':['0']}
+ >>> partition_graph(graph)
+ {('0', '1')}
+ """
+ # Dict that maps contracted nodes to a list of all the nodes it "contains."
+ contracted_nodes = {node: {node} for node in graph}
+
+ graph_copy = {node: graph[node][:] for node in graph}
+
+ while len(graph_copy) > 2:
+
+ # Choose a random edge.
+ u = random.choice(list(graph_copy.keys()))
+ v = random.choice(graph_copy[u])
+
+ # Contract edge (u, v) to new node uv
+ uv = u + v
+ uv_neighbors = list(set(graph_copy[u] + graph_copy[v]))
+ uv_neighbors.remove(u)
+ uv_neighbors.remove(v)
+ graph_copy[uv] = uv_neighbors
+ for neighbor in uv_neighbors:
+ graph_copy[neighbor].append(uv)
+
+ contracted_nodes[uv] = set(contracted_nodes[u].union(contracted_nodes[v]))
+
+ # Remove nodes u and v.
+ del graph_copy[u]
+ del graph_copy[v]
+ for neighbor in uv_neighbors:
+ if u in graph_copy[neighbor]:
+ graph_copy[neighbor].remove(u)
+ if v in graph_copy[neighbor]:
+ graph_copy[neighbor].remove(v)
+
+ # Find cutset.
+ groups = [contracted_nodes[node] for node in graph_copy]
+ return {
+ (node, neighbor)
+ for node in groups[0]
+ for neighbor in graph[node]
+ if neighbor in groups[1]
+ }
+
+
+if __name__ == "__main__":
+ print(partition_graph(TEST_GRAPH))
diff --git a/graphs/minimum_spanning_tree_boruvka.py b/graphs/minimum_spanning_tree_boruvka.py
new file mode 100644
index 000000000000..32548b2ecb6c
--- /dev/null
+++ b/graphs/minimum_spanning_tree_boruvka.py
@@ -0,0 +1,196 @@
+class Graph:
+ """
+ Data structure to store graphs (based on adjacency lists)
+ """
+
+ def __init__(self):
+
+ self.num_vertices = 0
+ self.num_edges = 0
+ self.adjacency = {}
+
+ def add_vertex(self, vertex):
+ """
+ Adds a vertex to the graph
+
+ """
+ if vertex not in self.adjacency:
+ self.adjacency[vertex] = {}
+ self.num_vertices += 1
+
+ def add_edge(self, head, tail, weight):
+ """
+ Adds an edge to the graph
+
+ """
+
+ self.add_vertex(head)
+ self.add_vertex(tail)
+
+ if head == tail:
+ return
+
+ self.adjacency[head][tail] = weight
+ self.adjacency[tail][head] = weight
+
+ def distinct_weight(self):
+ """
+ For Boruvks's algorithm the weights should be distinct
+ Converts the weights to be distinct
+
+ """
+ edges = self.get_edges()
+ for edge in edges:
+ head, tail, weight = edge
+ edges.remove((tail, head, weight))
+ for i in range(len(edges)):
+ edges[i] = list(edges[i])
+
+ edges.sort(key=lambda e: e[2])
+ for i in range(len(edges) - 1):
+ if edges[i][2] >= edges[i + 1][2]:
+ edges[i + 1][2] = edges[i][2] + 1
+ for edge in edges:
+ head, tail, weight = edge
+ self.adjacency[head][tail] = weight
+ self.adjacency[tail][head] = weight
+
+ def __str__(self):
+ """
+ Returns string representation of the graph
+ """
+ string = ""
+ for tail in self.adjacency:
+ for head in self.adjacency[tail]:
+ weight = self.adjacency[head][tail]
+ string += "%d -> %d == %d\n" % (head, tail, weight)
+ return string.rstrip("\n")
+
+ def get_edges(self):
+ """
+ Returna all edges in the graph
+ """
+ output = []
+ for tail in self.adjacency:
+ for head in self.adjacency[tail]:
+ output.append((tail, head, self.adjacency[head][tail]))
+ return output
+
+ def get_vertices(self):
+ """
+ Returns all vertices in the graph
+ """
+ return self.adjacency.keys()
+
+ @staticmethod
+ def build(vertices=None, edges=None):
+ """
+ Builds a graph from the given set of vertices and edges
+
+ """
+ g = Graph()
+ if vertices is None:
+ vertices = []
+ if edges is None:
+ edge = []
+ for vertex in vertices:
+ g.add_vertex(vertex)
+ for edge in edges:
+ g.add_edge(*edge)
+ return g
+
+ class UnionFind:
+ """
+ Disjoint set Union and Find for Boruvka's algorithm
+ """
+
+ def __init__(self):
+ self.parent = {}
+ self.rank = {}
+
+ def __len__(self):
+ return len(self.parent)
+
+ def make_set(self, item):
+ if item in self.parent:
+ return self.find(item)
+
+ self.parent[item] = item
+ self.rank[item] = 0
+ return item
+
+ def find(self, item):
+ if item not in self.parent:
+ return self.make_set(item)
+ if item != self.parent[item]:
+ self.parent[item] = self.find(self.parent[item])
+ return self.parent[item]
+
+ def union(self, item1, item2):
+ root1 = self.find(item1)
+ root2 = self.find(item2)
+
+ if root1 == root2:
+ return root1
+
+ if self.rank[root1] > self.rank[root2]:
+ self.parent[root2] = root1
+ return root1
+
+ if self.rank[root1] < self.rank[root2]:
+ self.parent[root1] = root2
+ return root2
+
+ if self.rank[root1] == self.rank[root2]:
+ self.rank[root1] += 1
+ self.parent[root2] = root1
+ return root1
+
+ @staticmethod
+ def boruvka_mst(graph):
+ """
+ Implementation of Boruvka's algorithm
+ >>> g = Graph()
+ >>> g = Graph.build([0, 1, 2, 3], [[0, 1, 1], [0, 2, 1],[2, 3, 1]])
+ >>> g.distinct_weight()
+ >>> bg = Graph.boruvka_mst(g)
+ >>> print(bg)
+ 1 -> 0 == 1
+ 2 -> 0 == 2
+ 0 -> 1 == 1
+ 0 -> 2 == 2
+ 3 -> 2 == 3
+ 2 -> 3 == 3
+ """
+ num_components = graph.num_vertices
+
+ union_find = Graph.UnionFind()
+ mst_edges = []
+ while num_components > 1:
+ cheap_edge = {}
+ for vertex in graph.get_vertices():
+ cheap_edge[vertex] = -1
+
+ edges = graph.get_edges()
+ for edge in edges:
+ head, tail, weight = edge
+ edges.remove((tail, head, weight))
+ for edge in edges:
+ head, tail, weight = edge
+ set1 = union_find.find(head)
+ set2 = union_find.find(tail)
+ if set1 != set2:
+ if cheap_edge[set1] == -1 or cheap_edge[set1][2] > weight:
+ cheap_edge[set1] = [head, tail, weight]
+
+ if cheap_edge[set2] == -1 or cheap_edge[set2][2] > weight:
+ cheap_edge[set2] = [head, tail, weight]
+ for vertex in cheap_edge:
+ if cheap_edge[vertex] != -1:
+ head, tail, weight = cheap_edge[vertex]
+ if union_find.find(head) != union_find.find(tail):
+ union_find.union(head, tail)
+ mst_edges.append(cheap_edge[vertex])
+ num_components = num_components - 1
+ mst = Graph.build(edges=mst_edges)
+ return mst
diff --git a/graphs/minimum_spanning_tree_kruskal.py b/graphs/minimum_spanning_tree_kruskal.py
index 81d64f421a31..a51f970341f7 100644
--- a/graphs/minimum_spanning_tree_kruskal.py
+++ b/graphs/minimum_spanning_tree_kruskal.py
@@ -1,32 +1,47 @@
-from __future__ import print_function
-num_nodes, num_edges = list(map(int,input().split()))
+from typing import List, Tuple
-edges = []
-for i in range(num_edges):
- node1, node2, cost = list(map(int,input().split()))
- edges.append((i,node1,node2,cost))
+def kruskal(num_nodes: int, num_edges: int, edges: List[Tuple[int, int, int]]) -> int:
+ """
+ >>> kruskal(4, 3, [(0, 1, 3), (1, 2, 5), (2, 3, 1)])
+ [(2, 3, 1), (0, 1, 3), (1, 2, 5)]
-edges = sorted(edges, key=lambda edge: edge[3])
+ >>> kruskal(4, 5, [(0, 1, 3), (1, 2, 5), (2, 3, 1), (0, 2, 1), (0, 3, 2)])
+ [(2, 3, 1), (0, 2, 1), (0, 1, 3)]
-parent = [i for i in range(num_nodes)]
+ >>> kruskal(4, 6, [(0, 1, 3), (1, 2, 5), (2, 3, 1), (0, 2, 1), (0, 3, 2),
+ ... (2, 1, 1)])
+ [(2, 3, 1), (0, 2, 1), (2, 1, 1)]
+ """
+ edges = sorted(edges, key=lambda edge: edge[2])
-def find_parent(i):
- if(i != parent[i]):
- parent[i] = find_parent(parent[i])
- return parent[i]
+ parent = list(range(num_nodes))
-minimum_spanning_tree_cost = 0
-minimum_spanning_tree = []
+ def find_parent(i):
+ if i != parent[i]:
+ parent[i] = find_parent(parent[i])
+ return parent[i]
-for edge in edges:
- parent_a = find_parent(edge[1])
- parent_b = find_parent(edge[2])
- if(parent_a != parent_b):
- minimum_spanning_tree_cost += edge[3]
- minimum_spanning_tree.append(edge)
- parent[parent_a] = parent_b
+ minimum_spanning_tree_cost = 0
+ minimum_spanning_tree = []
-print(minimum_spanning_tree_cost)
-for edge in minimum_spanning_tree:
- print(edge)
+ for edge in edges:
+ parent_a = find_parent(edge[0])
+ parent_b = find_parent(edge[1])
+ if parent_a != parent_b:
+ minimum_spanning_tree_cost += edge[2]
+ minimum_spanning_tree.append(edge)
+ parent[parent_a] = parent_b
+
+ return minimum_spanning_tree
+
+
+if __name__ == "__main__": # pragma: no cover
+ num_nodes, num_edges = list(map(int, input().strip().split()))
+ edges = []
+
+ for _ in range(num_edges):
+ node1, node2, cost = [int(x) for x in input().strip().split()]
+ edges.append((node1, node2, cost))
+
+ kruskal(num_nodes, num_edges, edges)
diff --git a/graphs/minimum_spanning_tree_kruskal2.py b/graphs/minimum_spanning_tree_kruskal2.py
new file mode 100644
index 000000000000..dfb87efeb89a
--- /dev/null
+++ b/graphs/minimum_spanning_tree_kruskal2.py
@@ -0,0 +1,109 @@
+from __future__ import annotations
+
+
+class DisjointSetTreeNode:
+ # Disjoint Set Node to store the parent and rank
+ def __init__(self, key: int) -> None:
+ self.key = key
+ self.parent = self
+ self.rank = 0
+
+
+class DisjointSetTree:
+ # Disjoint Set DataStructure
+ def __init__(self):
+ # map from node name to the node object
+ self.map = {}
+
+ def make_set(self, x: int) -> None:
+ # create a new set with x as its member
+ self.map[x] = DisjointSetTreeNode(x)
+
+ def find_set(self, x: int) -> DisjointSetTreeNode:
+ # find the set x belongs to (with path-compression)
+ elem_ref = self.map[x]
+ if elem_ref != elem_ref.parent:
+ elem_ref.parent = self.find_set(elem_ref.parent.key)
+ return elem_ref.parent
+
+ def link(self, x: int, y: int) -> None:
+ # helper function for union operation
+ if x.rank > y.rank:
+ y.parent = x
+ else:
+ x.parent = y
+ if x.rank == y.rank:
+ y.rank += 1
+
+ def union(self, x: int, y: int) -> None:
+ # merge 2 disjoint sets
+ self.link(self.find_set(x), self.find_set(y))
+
+
+class GraphUndirectedWeighted:
+ def __init__(self):
+ # connections: map from the node to the neighbouring nodes (with weights)
+ self.connections = {}
+
+ def add_node(self, node: int) -> None:
+ # add a node ONLY if its not present in the graph
+ if node not in self.connections:
+ self.connections[node] = {}
+
+ def add_edge(self, node1: int, node2: int, weight: int) -> None:
+ # add an edge with the given weight
+ self.add_node(node1)
+ self.add_node(node2)
+ self.connections[node1][node2] = weight
+ self.connections[node2][node1] = weight
+
+ def kruskal(self) -> GraphUndirectedWeighted:
+ # Kruskal's Algorithm to generate a Minimum Spanning Tree (MST) of a graph
+ """
+ Details: https://en.wikipedia.org/wiki/Kruskal%27s_algorithm
+
+ Example:
+
+ >>> graph = GraphUndirectedWeighted()
+ >>> graph.add_edge(1, 2, 1)
+ >>> graph.add_edge(2, 3, 2)
+ >>> graph.add_edge(3, 4, 1)
+ >>> graph.add_edge(3, 5, 100) # Removed in MST
+ >>> graph.add_edge(4, 5, 5)
+ >>> assert 5 in graph.connections[3]
+ >>> mst = graph.kruskal()
+ >>> assert 5 not in mst.connections[3]
+ """
+
+ # getting the edges in ascending order of weights
+ edges = []
+ seen = set()
+ for start in self.connections:
+ for end in self.connections[start]:
+ if (start, end) not in seen:
+ seen.add((end, start))
+ edges.append((start, end, self.connections[start][end]))
+ edges.sort(key=lambda x: x[2])
+ # creating the disjoint set
+ disjoint_set = DisjointSetTree()
+ [disjoint_set.make_set(node) for node in self.connections]
+ # MST generation
+ num_edges = 0
+ index = 0
+ graph = GraphUndirectedWeighted()
+ while num_edges < len(self.connections) - 1:
+ u, v, w = edges[index]
+ index += 1
+ parentu = disjoint_set.find_set(u)
+ parentv = disjoint_set.find_set(v)
+ if parentu != parentv:
+ num_edges += 1
+ graph.add_edge(u, v, w)
+ disjoint_set.union(u, v)
+ return graph
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/graphs/minimum_spanning_tree_prims.py b/graphs/minimum_spanning_tree_prims.py
index 7b1ad0e743f7..16b4286140ec 100644
--- a/graphs/minimum_spanning_tree_prims.py
+++ b/graphs/minimum_spanning_tree_prims.py
@@ -1,16 +1,18 @@
import sys
from collections import defaultdict
-def PrimsAlgorithm(l):
+
+def PrimsAlgorithm(l): # noqa: E741
nodePosition = []
- def getPosition(vertex):
+
+ def get_position(vertex):
return nodePosition[vertex]
- def setPosition(vertex, pos):
+ def set_position(vertex, pos):
nodePosition[vertex] = pos
- def topToBottom(heap, start, size, positions):
+ def top_to_bottom(heap, start, size, positions):
if start > size // 2 - 1:
return
else:
@@ -26,52 +28,53 @@ def topToBottom(heap, start, size, positions):
heap[m], positions[m] = heap[start], positions[start]
heap[start], positions[start] = temp, temp1
- temp = getPosition(positions[m])
- setPosition(positions[m], getPosition(positions[start]))
- setPosition(positions[start], temp)
+ temp = get_position(positions[m])
+ set_position(positions[m], get_position(positions[start]))
+ set_position(positions[start], temp)
- topToBottom(heap, m, size, positions)
+ top_to_bottom(heap, m, size, positions)
# Update function if value of any node in min-heap decreases
- def bottomToTop(val, index, heap, position):
+ def bottom_to_top(val, index, heap, position):
temp = position[index]
- while(index != 0):
+ while index != 0:
if index % 2 == 0:
- parent = int( (index-2) / 2 )
+ parent = int((index - 2) / 2)
else:
- parent = int( (index-1) / 2 )
+ parent = int((index - 1) / 2)
if val < heap[parent]:
heap[index] = heap[parent]
position[index] = position[parent]
- setPosition(position[parent], index)
+ set_position(position[parent], index)
else:
heap[index] = val
position[index] = temp
- setPosition(temp, index)
+ set_position(temp, index)
break
index = parent
else:
heap[0] = val
position[0] = temp
- setPosition(temp, 0)
+ set_position(temp, 0)
def heapify(heap, positions):
start = len(heap) // 2 - 1
for i in range(start, -1, -1):
- topToBottom(heap, i, len(heap), positions)
+ top_to_bottom(heap, i, len(heap), positions)
def deleteMinimum(heap, positions):
temp = positions[0]
heap[0] = sys.maxsize
- topToBottom(heap, 0, len(heap), positions)
+ top_to_bottom(heap, 0, len(heap), positions)
return temp
visited = [0 for i in range(len(l))]
- Nbr_TV = [-1 for i in range(len(l))] # Neighboring Tree Vertex of selected vertex
- # Minimum Distance of explored vertex with neighboring vertex of partial tree formed in graph
- Distance_TV = [] # Heap of Distance of vertices from their neighboring vertex
+ Nbr_TV = [-1 for i in range(len(l))] # Neighboring Tree Vertex of selected vertex
+ # Minimum Distance of explored vertex with neighboring vertex of partial tree
+ # formed in graph
+ Distance_TV = [] # Heap of Distance of vertices from their neighboring vertex
Positions = []
for x in range(len(l)):
@@ -84,8 +87,8 @@ def deleteMinimum(heap, positions):
visited[0] = 1
Distance_TV[0] = sys.maxsize
for x in l[0]:
- Nbr_TV[ x[0] ] = 0
- Distance_TV[ x[0] ] = x[1]
+ Nbr_TV[x[0]] = 0
+ Distance_TV[x[0]] = x[1]
heapify(Distance_TV, Positions)
for i in range(1, len(l)):
@@ -94,18 +97,20 @@ def deleteMinimum(heap, positions):
TreeEdges.append((Nbr_TV[vertex], vertex))
visited[vertex] = 1
for v in l[vertex]:
- if visited[v[0]] == 0 and v[1] < Distance_TV[ getPosition(v[0]) ]:
- Distance_TV[ getPosition(v[0]) ] = v[1]
- bottomToTop(v[1], getPosition(v[0]), Distance_TV, Positions)
- Nbr_TV[ v[0] ] = vertex
+ if visited[v[0]] == 0 and v[1] < Distance_TV[get_position(v[0])]:
+ Distance_TV[get_position(v[0])] = v[1]
+ bottom_to_top(v[1], get_position(v[0]), Distance_TV, Positions)
+ Nbr_TV[v[0]] = vertex
return TreeEdges
-# < --------- Prims Algorithm --------- >
-n = int(input("Enter number of vertices: "))
-e = int(input("Enter number of edges: "))
-adjlist = defaultdict(list)
-for x in range(e):
- l = [int(x) for x in input().split()]
- adjlist[l[0]].append([ l[1], l[2] ])
- adjlist[l[1]].append([ l[0], l[2] ])
-print(PrimsAlgorithm(adjlist))
+
+if __name__ == "__main__": # pragma: no cover
+ # < --------- Prims Algorithm --------- >
+ n = int(input("Enter number of vertices: ").strip())
+ e = int(input("Enter number of edges: ").strip())
+ adjlist = defaultdict(list)
+ for x in range(e):
+ l = [int(x) for x in input().strip().split()] # noqa: E741
+ adjlist[l[0]].append([l[1], l[2]])
+ adjlist[l[1]].append([l[0], l[2]])
+ print(PrimsAlgorithm(adjlist))
diff --git a/graphs/minimum_spanning_tree_prims2.py b/graphs/minimum_spanning_tree_prims2.py
new file mode 100644
index 000000000000..10ed736c9d17
--- /dev/null
+++ b/graphs/minimum_spanning_tree_prims2.py
@@ -0,0 +1,271 @@
+"""
+Prim's (also known as Jarník's) algorithm is a greedy algorithm that finds a minimum
+spanning tree for a weighted undirected graph. This means it finds a subset of the
+edges that forms a tree that includes every vertex, where the total weight of all the
+edges in the tree is minimized. The algorithm operates by building this tree one vertex
+at a time, from an arbitrary starting vertex, at each step adding the cheapest possible
+connection from the tree to another vertex.
+"""
+
+from sys import maxsize
+from typing import Dict, Optional, Tuple, Union
+
+
+def get_parent_position(position: int) -> int:
+ """
+ heap helper function get the position of the parent of the current node
+
+ >>> get_parent_position(1)
+ 0
+ >>> get_parent_position(2)
+ 0
+ """
+ return (position - 1) // 2
+
+
+def get_child_left_position(position: int) -> int:
+ """
+ heap helper function get the position of the left child of the current node
+
+ >>> get_child_left_position(0)
+ 1
+ """
+ return (2 * position) + 1
+
+
+def get_child_right_position(position: int) -> int:
+ """
+ heap helper function get the position of the right child of the current node
+
+ >>> get_child_right_position(0)
+ 2
+ """
+ return (2 * position) + 2
+
+
+class MinPriorityQueue:
+ """
+ Minimum Priority Queue Class
+
+ Functions:
+ is_empty: function to check if the priority queue is empty
+ push: function to add an element with given priority to the queue
+ extract_min: function to remove and return the element with lowest weight (highest
+ priority)
+ update_key: function to update the weight of the given key
+ _bubble_up: helper function to place a node at the proper position (upward
+ movement)
+ _bubble_down: helper function to place a node at the proper position (downward
+ movement)
+ _swap_nodes: helper function to swap the nodes at the given positions
+
+ >>> queue = MinPriorityQueue()
+
+ >>> queue.push(1, 1000)
+ >>> queue.push(2, 100)
+ >>> queue.push(3, 4000)
+ >>> queue.push(4, 3000)
+
+ >>> print(queue.extract_min())
+ 2
+
+ >>> queue.update_key(4, 50)
+
+ >>> print(queue.extract_min())
+ 4
+ >>> print(queue.extract_min())
+ 1
+ >>> print(queue.extract_min())
+ 3
+ """
+
+ def __init__(self) -> None:
+ self.heap = []
+ self.position_map = {}
+ self.elements = 0
+
+ def __len__(self) -> int:
+ return self.elements
+
+ def __repr__(self) -> str:
+ return str(self.heap)
+
+ def is_empty(self) -> bool:
+ # Check if the priority queue is empty
+ return self.elements == 0
+
+ def push(self, elem: Union[int, str], weight: int) -> None:
+ # Add an element with given priority to the queue
+ self.heap.append((elem, weight))
+ self.position_map[elem] = self.elements
+ self.elements += 1
+ self._bubble_up(elem)
+
+ def extract_min(self) -> Union[int, str]:
+ # Remove and return the element with lowest weight (highest priority)
+ if self.elements > 1:
+ self._swap_nodes(0, self.elements - 1)
+ elem, _ = self.heap.pop()
+ del self.position_map[elem]
+ self.elements -= 1
+ if self.elements > 0:
+ bubble_down_elem, _ = self.heap[0]
+ self._bubble_down(bubble_down_elem)
+ return elem
+
+ def update_key(self, elem: Union[int, str], weight: int) -> None:
+ # Update the weight of the given key
+ position = self.position_map[elem]
+ self.heap[position] = (elem, weight)
+ if position > 0:
+ parent_position = get_parent_position(position)
+ _, parent_weight = self.heap[parent_position]
+ if parent_weight > weight:
+ self._bubble_up(elem)
+ else:
+ self._bubble_down(elem)
+ else:
+ self._bubble_down(elem)
+
+ def _bubble_up(self, elem: Union[int, str]) -> None:
+ # Place a node at the proper position (upward movement) [to be used internally
+ # only]
+ curr_pos = self.position_map[elem]
+ if curr_pos == 0:
+ return
+ parent_position = get_parent_position(curr_pos)
+ _, weight = self.heap[curr_pos]
+ _, parent_weight = self.heap[parent_position]
+ if parent_weight > weight:
+ self._swap_nodes(parent_position, curr_pos)
+ return self._bubble_up(elem)
+ return
+
+ def _bubble_down(self, elem: Union[int, str]) -> None:
+ # Place a node at the proper position (downward movement) [to be used
+ # internally only]
+ curr_pos = self.position_map[elem]
+ _, weight = self.heap[curr_pos]
+ child_left_position = get_child_left_position(curr_pos)
+ child_right_position = get_child_right_position(curr_pos)
+ if child_left_position < self.elements and child_right_position < self.elements:
+ _, child_left_weight = self.heap[child_left_position]
+ _, child_right_weight = self.heap[child_right_position]
+ if child_right_weight < child_left_weight:
+ if child_right_weight < weight:
+ self._swap_nodes(child_right_position, curr_pos)
+ return self._bubble_down(elem)
+ if child_left_position < self.elements:
+ _, child_left_weight = self.heap[child_left_position]
+ if child_left_weight < weight:
+ self._swap_nodes(child_left_position, curr_pos)
+ return self._bubble_down(elem)
+ else:
+ return
+ if child_right_position < self.elements:
+ _, child_right_weight = self.heap[child_right_position]
+ if child_right_weight < weight:
+ self._swap_nodes(child_right_position, curr_pos)
+ return self._bubble_down(elem)
+ else:
+ return
+
+ def _swap_nodes(self, node1_pos: int, node2_pos: int) -> None:
+ # Swap the nodes at the given positions
+ node1_elem = self.heap[node1_pos][0]
+ node2_elem = self.heap[node2_pos][0]
+ self.heap[node1_pos], self.heap[node2_pos] = (
+ self.heap[node2_pos],
+ self.heap[node1_pos],
+ )
+ self.position_map[node1_elem] = node2_pos
+ self.position_map[node2_elem] = node1_pos
+
+
+class GraphUndirectedWeighted:
+ """
+ Graph Undirected Weighted Class
+
+ Functions:
+ add_node: function to add a node in the graph
+ add_edge: function to add an edge between 2 nodes in the graph
+ """
+
+ def __init__(self) -> None:
+ self.connections = {}
+ self.nodes = 0
+
+ def __repr__(self) -> str:
+ return str(self.connections)
+
+ def __len__(self) -> int:
+ return self.nodes
+
+ def add_node(self, node: Union[int, str]) -> None:
+ # Add a node in the graph if it is not in the graph
+ if node not in self.connections:
+ self.connections[node] = {}
+ self.nodes += 1
+
+ def add_edge(
+ self, node1: Union[int, str], node2: Union[int, str], weight: int
+ ) -> None:
+ # Add an edge between 2 nodes in the graph
+ self.add_node(node1)
+ self.add_node(node2)
+ self.connections[node1][node2] = weight
+ self.connections[node2][node1] = weight
+
+
+def prims_algo(
+ graph: GraphUndirectedWeighted,
+) -> Tuple[Dict[str, int], Dict[str, Optional[str]]]:
+ """
+ >>> graph = GraphUndirectedWeighted()
+
+ >>> graph.add_edge("a", "b", 3)
+ >>> graph.add_edge("b", "c", 10)
+ >>> graph.add_edge("c", "d", 5)
+ >>> graph.add_edge("a", "c", 15)
+ >>> graph.add_edge("b", "d", 100)
+
+ >>> dist, parent = prims_algo(graph)
+
+ >>> abs(dist["a"] - dist["b"])
+ 3
+ >>> abs(dist["d"] - dist["b"])
+ 15
+ >>> abs(dist["a"] - dist["c"])
+ 13
+ """
+ # prim's algorithm for minimum spanning tree
+ dist = {node: maxsize for node in graph.connections}
+ parent = {node: None for node in graph.connections}
+ priority_queue = MinPriorityQueue()
+ [priority_queue.push(node, weight) for node, weight in dist.items()]
+ if priority_queue.is_empty():
+ return dist, parent
+
+ # initialization
+ node = priority_queue.extract_min()
+ dist[node] = 0
+ for neighbour in graph.connections[node]:
+ if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
+ dist[neighbour] = dist[node] + graph.connections[node][neighbour]
+ priority_queue.update_key(neighbour, dist[neighbour])
+ parent[neighbour] = node
+ # running prim's algorithm
+ while not priority_queue.is_empty():
+ node = priority_queue.extract_min()
+ for neighbour in graph.connections[node]:
+ if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
+ dist[neighbour] = dist[node] + graph.connections[node][neighbour]
+ priority_queue.update_key(neighbour, dist[neighbour])
+ parent[neighbour] = node
+ return dist, parent
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
diff --git a/graphs/multi_heuristic_astar.py b/graphs/multi_heuristic_astar.py
new file mode 100644
index 000000000000..77ca5760d5f0
--- /dev/null
+++ b/graphs/multi_heuristic_astar.py
@@ -0,0 +1,313 @@
+import heapq
+
+import numpy as np
+
+
+class PriorityQueue:
+ def __init__(self):
+ self.elements = []
+ self.set = set()
+
+ def minkey(self):
+ if not self.empty():
+ return self.elements[0][0]
+ else:
+ return float("inf")
+
+ def empty(self):
+ return len(self.elements) == 0
+
+ def put(self, item, priority):
+ if item not in self.set:
+ heapq.heappush(self.elements, (priority, item))
+ self.set.add(item)
+ else:
+ # update
+ # print("update", item)
+ temp = []
+ (pri, x) = heapq.heappop(self.elements)
+ while x != item:
+ temp.append((pri, x))
+ (pri, x) = heapq.heappop(self.elements)
+ temp.append((priority, item))
+ for (pro, xxx) in temp:
+ heapq.heappush(self.elements, (pro, xxx))
+
+ def remove_element(self, item):
+ if item in self.set:
+ self.set.remove(item)
+ temp = []
+ (pro, x) = heapq.heappop(self.elements)
+ while x != item:
+ temp.append((pro, x))
+ (pro, x) = heapq.heappop(self.elements)
+ for (prito, yyy) in temp:
+ heapq.heappush(self.elements, (prito, yyy))
+
+ def top_show(self):
+ return self.elements[0][1]
+
+ def get(self):
+ (priority, item) = heapq.heappop(self.elements)
+ self.set.remove(item)
+ return (priority, item)
+
+
+def consistent_heuristic(P, goal):
+ # euclidean distance
+ a = np.array(P)
+ b = np.array(goal)
+ return np.linalg.norm(a - b)
+
+
+def heuristic_2(P, goal):
+ # integer division by time variable
+ return consistent_heuristic(P, goal) // t
+
+
+def heuristic_1(P, goal):
+ # manhattan distance
+ return abs(P[0] - goal[0]) + abs(P[1] - goal[1])
+
+
+def key(start, i, goal, g_function):
+ ans = g_function[start] + W1 * heuristics[i](start, goal)
+ return ans
+
+
+def do_something(back_pointer, goal, start):
+ grid = np.chararray((n, n))
+ for i in range(n):
+ for j in range(n):
+ grid[i][j] = "*"
+
+ for i in range(n):
+ for j in range(n):
+ if (j, (n - 1) - i) in blocks:
+ grid[i][j] = "#"
+
+ grid[0][(n - 1)] = "-"
+ x = back_pointer[goal]
+ while x != start:
+ (x_c, y_c) = x
+ # print(x)
+ grid[(n - 1) - y_c][x_c] = "-"
+ x = back_pointer[x]
+ grid[(n - 1)][0] = "-"
+
+ for i in range(n):
+ for j in range(n):
+ if (i, j) == (0, n - 1):
+ print(grid[i][j], end=" ")
+ print("<-- End position", end=" ")
+ else:
+ print(grid[i][j], end=" ")
+ print()
+ print("^")
+ print("Start position")
+ print()
+ print("# is an obstacle")
+ print("- is the path taken by algorithm")
+ print("PATH TAKEN BY THE ALGORITHM IS:-")
+ x = back_pointer[goal]
+ while x != start:
+ print(x, end=" ")
+ x = back_pointer[x]
+ print(x)
+ quit()
+
+
+def valid(p):
+ if p[0] < 0 or p[0] > n - 1:
+ return False
+ if p[1] < 0 or p[1] > n - 1:
+ return False
+ return True
+
+
+def expand_state(
+ s,
+ j,
+ visited,
+ g_function,
+ close_list_anchor,
+ close_list_inad,
+ open_list,
+ back_pointer,
+):
+ for itera in range(n_heuristic):
+ open_list[itera].remove_element(s)
+ # print("s", s)
+ # print("j", j)
+ (x, y) = s
+ left = (x - 1, y)
+ right = (x + 1, y)
+ up = (x, y + 1)
+ down = (x, y - 1)
+
+ for neighbours in [left, right, up, down]:
+ if neighbours not in blocks:
+ if valid(neighbours) and neighbours not in visited:
+ # print("neighbour", neighbours)
+ visited.add(neighbours)
+ back_pointer[neighbours] = -1
+ g_function[neighbours] = float("inf")
+
+ if valid(neighbours) and g_function[neighbours] > g_function[s] + 1:
+ g_function[neighbours] = g_function[s] + 1
+ back_pointer[neighbours] = s
+ if neighbours not in close_list_anchor:
+ open_list[0].put(neighbours, key(neighbours, 0, goal, g_function))
+ if neighbours not in close_list_inad:
+ for var in range(1, n_heuristic):
+ if key(neighbours, var, goal, g_function) <= W2 * key(
+ neighbours, 0, goal, g_function
+ ):
+ open_list[j].put(
+ neighbours, key(neighbours, var, goal, g_function)
+ )
+
+
+def make_common_ground():
+ some_list = []
+ for x in range(1, 5):
+ for y in range(1, 6):
+ some_list.append((x, y))
+
+ for x in range(15, 20):
+ some_list.append((x, 17))
+
+ for x in range(10, 19):
+ for y in range(1, 15):
+ some_list.append((x, y))
+
+ # L block
+ for x in range(1, 4):
+ for y in range(12, 19):
+ some_list.append((x, y))
+ for x in range(3, 13):
+ for y in range(16, 19):
+ some_list.append((x, y))
+ return some_list
+
+
+heuristics = {0: consistent_heuristic, 1: heuristic_1, 2: heuristic_2}
+
+blocks_blk = [
+ (0, 1),
+ (1, 1),
+ (2, 1),
+ (3, 1),
+ (4, 1),
+ (5, 1),
+ (6, 1),
+ (7, 1),
+ (8, 1),
+ (9, 1),
+ (10, 1),
+ (11, 1),
+ (12, 1),
+ (13, 1),
+ (14, 1),
+ (15, 1),
+ (16, 1),
+ (17, 1),
+ (18, 1),
+ (19, 1),
+]
+blocks_no = []
+blocks_all = make_common_ground()
+
+
+blocks = blocks_blk
+# hyper parameters
+W1 = 1
+W2 = 1
+n = 20
+n_heuristic = 3 # one consistent and two other inconsistent
+
+# start and end destination
+start = (0, 0)
+goal = (n - 1, n - 1)
+
+t = 1
+
+
+def multi_a_star(start, goal, n_heuristic):
+ g_function = {start: 0, goal: float("inf")}
+ back_pointer = {start: -1, goal: -1}
+ open_list = []
+ visited = set()
+
+ for i in range(n_heuristic):
+ open_list.append(PriorityQueue())
+ open_list[i].put(start, key(start, i, goal, g_function))
+
+ close_list_anchor = []
+ close_list_inad = []
+ while open_list[0].minkey() < float("inf"):
+ for i in range(1, n_heuristic):
+ # print(open_list[0].minkey(), open_list[i].minkey())
+ if open_list[i].minkey() <= W2 * open_list[0].minkey():
+ global t
+ t += 1
+ if g_function[goal] <= open_list[i].minkey():
+ if g_function[goal] < float("inf"):
+ do_something(back_pointer, goal, start)
+ else:
+ _, get_s = open_list[i].top_show()
+ visited.add(get_s)
+ expand_state(
+ get_s,
+ i,
+ visited,
+ g_function,
+ close_list_anchor,
+ close_list_inad,
+ open_list,
+ back_pointer,
+ )
+ close_list_inad.append(get_s)
+ else:
+ if g_function[goal] <= open_list[0].minkey():
+ if g_function[goal] < float("inf"):
+ do_something(back_pointer, goal, start)
+ else:
+ get_s = open_list[0].top_show()
+ visited.add(get_s)
+ expand_state(
+ get_s,
+ 0,
+ visited,
+ g_function,
+ close_list_anchor,
+ close_list_inad,
+ open_list,
+ back_pointer,
+ )
+ close_list_anchor.append(get_s)
+ print("No path found to goal")
+ print()
+ for i in range(n - 1, -1, -1):
+ for j in range(n):
+ if (j, i) in blocks:
+ print("#", end=" ")
+ elif (j, i) in back_pointer:
+ if (j, i) == (n - 1, n - 1):
+ print("*", end=" ")
+ else:
+ print("-", end=" ")
+ else:
+ print("*", end=" ")
+ if (j, i) == (n - 1, n - 1):
+ print("<-- End position", end=" ")
+ print()
+ print("^")
+ print("Start position")
+ print()
+ print("# is an obstacle")
+ print("- is the path taken by algorithm")
+
+
+if __name__ == "__main__":
+ multi_a_star(start, goal, n_heuristic)
diff --git a/graphs/multi_hueristic_astar.py b/graphs/multi_hueristic_astar.py
deleted file mode 100644
index 1acd098f327d..000000000000
--- a/graphs/multi_hueristic_astar.py
+++ /dev/null
@@ -1,266 +0,0 @@
-from __future__ import print_function
-import heapq
-import numpy as np
-
-try:
- xrange # Python 2
-except NameError:
- xrange = range # Python 3
-
-
-class PriorityQueue:
- def __init__(self):
- self.elements = []
- self.set = set()
-
- def minkey(self):
- if not self.empty():
- return self.elements[0][0]
- else:
- return float('inf')
-
- def empty(self):
- return len(self.elements) == 0
-
- def put(self, item, priority):
- if item not in self.set:
- heapq.heappush(self.elements, (priority, item))
- self.set.add(item)
- else:
- # update
- # print("update", item)
- temp = []
- (pri, x) = heapq.heappop(self.elements)
- while x != item:
- temp.append((pri, x))
- (pri, x) = heapq.heappop(self.elements)
- temp.append((priority, item))
- for (pro, xxx) in temp:
- heapq.heappush(self.elements, (pro, xxx))
-
- def remove_element(self, item):
- if item in self.set:
- self.set.remove(item)
- temp = []
- (pro, x) = heapq.heappop(self.elements)
- while x != item:
- temp.append((pro, x))
- (pro, x) = heapq.heappop(self.elements)
- for (prito, yyy) in temp:
- heapq.heappush(self.elements, (prito, yyy))
-
- def top_show(self):
- return self.elements[0][1]
-
- def get(self):
- (priority, item) = heapq.heappop(self.elements)
- self.set.remove(item)
- return (priority, item)
-
-def consistent_hueristic(P, goal):
- # euclidean distance
- a = np.array(P)
- b = np.array(goal)
- return np.linalg.norm(a - b)
-
-def hueristic_2(P, goal):
- # integer division by time variable
- return consistent_hueristic(P, goal) // t
-
-def hueristic_1(P, goal):
- # manhattan distance
- return abs(P[0] - goal[0]) + abs(P[1] - goal[1])
-
-def key(start, i, goal, g_function):
- ans = g_function[start] + W1 * hueristics[i](start, goal)
- return ans
-
-def do_something(back_pointer, goal, start):
- grid = np.chararray((n, n))
- for i in range(n):
- for j in range(n):
- grid[i][j] = '*'
-
- for i in range(n):
- for j in range(n):
- if (j, (n-1)-i) in blocks:
- grid[i][j] = "#"
-
- grid[0][(n-1)] = "-"
- x = back_pointer[goal]
- while x != start:
- (x_c, y_c) = x
- # print(x)
- grid[(n-1)-y_c][x_c] = "-"
- x = back_pointer[x]
- grid[(n-1)][0] = "-"
-
-
- for i in xrange(n):
- for j in range(n):
- if (i, j) == (0, n-1):
- print(grid[i][j], end=' ')
- print("<-- End position", end=' ')
- else:
- print(grid[i][j], end=' ')
- print()
- print("^")
- print("Start position")
- print()
- print("# is an obstacle")
- print("- is the path taken by algorithm")
- print("PATH TAKEN BY THE ALGORITHM IS:-")
- x = back_pointer[goal]
- while x != start:
- print(x, end=' ')
- x = back_pointer[x]
- print(x)
- quit()
-
-def valid(p):
- if p[0] < 0 or p[0] > n-1:
- return False
- if p[1] < 0 or p[1] > n-1:
- return False
- return True
-
-def expand_state(s, j, visited, g_function, close_list_anchor, close_list_inad, open_list, back_pointer):
- for itera in range(n_hueristic):
- open_list[itera].remove_element(s)
- # print("s", s)
- # print("j", j)
- (x, y) = s
- left = (x-1, y)
- right = (x+1, y)
- up = (x, y+1)
- down = (x, y-1)
-
- for neighbours in [left, right, up, down]:
- if neighbours not in blocks:
- if valid(neighbours) and neighbours not in visited:
- # print("neighbour", neighbours)
- visited.add(neighbours)
- back_pointer[neighbours] = -1
- g_function[neighbours] = float('inf')
-
- if valid(neighbours) and g_function[neighbours] > g_function[s] + 1:
- g_function[neighbours] = g_function[s] + 1
- back_pointer[neighbours] = s
- if neighbours not in close_list_anchor:
- open_list[0].put(neighbours, key(neighbours, 0, goal, g_function))
- if neighbours not in close_list_inad:
- for var in range(1,n_hueristic):
- if key(neighbours, var, goal, g_function) <= W2 * key(neighbours, 0, goal, g_function):
- # print("why not plssssssssss")
- open_list[j].put(neighbours, key(neighbours, var, goal, g_function))
-
-
- # print
-
-def make_common_ground():
- some_list = []
- # block 1
- for x in range(1, 5):
- for y in range(1, 6):
- some_list.append((x, y))
-
- # line
- for x in range(15, 20):
- some_list.append((x, 17))
-
- # block 2 big
- for x in range(10, 19):
- for y in range(1, 15):
- some_list.append((x, y))
-
- # L block
- for x in range(1, 4):
- for y in range(12, 19):
- some_list.append((x, y))
- for x in range(3, 13):
- for y in range(16, 19):
- some_list.append((x, y))
- return some_list
-
-hueristics = {0: consistent_hueristic, 1: hueristic_1, 2: hueristic_2}
-
-blocks_blk = [(0, 1),(1, 1),(2, 1),(3, 1),(4, 1),(5, 1),(6, 1),(7, 1),(8, 1),(9, 1),(10, 1),(11, 1),(12, 1),(13, 1),(14, 1),(15, 1),(16, 1),(17, 1),(18, 1), (19, 1)]
-blocks_no = []
-blocks_all = make_common_ground()
-
-
-
-
-blocks = blocks_blk
-# hyper parameters
-W1 = 1
-W2 = 1
-n = 20
-n_hueristic = 3 # one consistent and two other inconsistent
-
-# start and end destination
-start = (0, 0)
-goal = (n-1, n-1)
-
-t = 1
-def multi_a_star(start, goal, n_hueristic):
- g_function = {start: 0, goal: float('inf')}
- back_pointer = {start:-1, goal:-1}
- open_list = []
- visited = set()
-
- for i in range(n_hueristic):
- open_list.append(PriorityQueue())
- open_list[i].put(start, key(start, i, goal, g_function))
-
- close_list_anchor = []
- close_list_inad = []
- while open_list[0].minkey() < float('inf'):
- for i in range(1, n_hueristic):
- # print("i", i)
- # print(open_list[0].minkey(), open_list[i].minkey())
- if open_list[i].minkey() <= W2 * open_list[0].minkey():
- global t
- t += 1
- # print("less prio")
- if g_function[goal] <= open_list[i].minkey():
- if g_function[goal] < float('inf'):
- do_something(back_pointer, goal, start)
- else:
- _, get_s = open_list[i].top_show()
- visited.add(get_s)
- expand_state(get_s, i, visited, g_function, close_list_anchor, close_list_inad, open_list, back_pointer)
- close_list_inad.append(get_s)
- else:
- # print("more prio")
- if g_function[goal] <= open_list[0].minkey():
- if g_function[goal] < float('inf'):
- do_something(back_pointer, goal, start)
- else:
- # print("hoolla")
- get_s = open_list[0].top_show()
- visited.add(get_s)
- expand_state(get_s, 0, visited, g_function, close_list_anchor, close_list_inad, open_list, back_pointer)
- close_list_anchor.append(get_s)
- print("No path found to goal")
- print()
- for i in range(n-1,-1, -1):
- for j in range(n):
- if (j, i) in blocks:
- print('#', end=' ')
- elif (j, i) in back_pointer:
- if (j, i) == (n-1, n-1):
- print('*', end=' ')
- else:
- print('-', end=' ')
- else:
- print('*', end=' ')
- if (j, i) == (n-1, n-1):
- print('<-- End position', end=' ')
- print()
- print("^")
- print("Start position")
- print()
- print("# is an obstacle")
- print("- is the path taken by algorithm")
-multi_a_star(start, goal, n_hueristic)
diff --git a/graphs/page_rank.py b/graphs/page_rank.py
new file mode 100644
index 000000000000..0f5129146ddf
--- /dev/null
+++ b/graphs/page_rank.py
@@ -0,0 +1,70 @@
+"""
+Author: https://github.com/bhushan-borole
+"""
+"""
+The input graph for the algorithm is:
+
+ A B C
+A 0 1 1
+B 0 0 1
+C 1 0 0
+
+"""
+
+graph = [[0, 1, 1], [0, 0, 1], [1, 0, 0]]
+
+
+class Node:
+ def __init__(self, name):
+ self.name = name
+ self.inbound = []
+ self.outbound = []
+
+ def add_inbound(self, node):
+ self.inbound.append(node)
+
+ def add_outbound(self, node):
+ self.outbound.append(node)
+
+ def __repr__(self):
+ return f"Node {self.name}: Inbound: {self.inbound} ; Outbound: {self.outbound}"
+
+
+def page_rank(nodes, limit=3, d=0.85):
+ ranks = {}
+ for node in nodes:
+ ranks[node.name] = 1
+
+ outbounds = {}
+ for node in nodes:
+ outbounds[node.name] = len(node.outbound)
+
+ for i in range(limit):
+ print(f"======= Iteration {i + 1} =======")
+ for j, node in enumerate(nodes):
+ ranks[node.name] = (1 - d) + d * sum(
+ [ranks[ib] / outbounds[ib] for ib in node.inbound]
+ )
+ print(ranks)
+
+
+def main():
+ names = list(input("Enter Names of the Nodes: ").split())
+
+ nodes = [Node(name) for name in names]
+
+ for ri, row in enumerate(graph):
+ for ci, col in enumerate(row):
+ if col == 1:
+ nodes[ci].add_inbound(names[ri])
+ nodes[ri].add_outbound(names[ci])
+
+ print("======= Nodes =======")
+ for node in nodes:
+ print(node)
+
+ page_rank(nodes)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/graphs/prim.py b/graphs/prim.py
new file mode 100644
index 000000000000..70329da7e8e2
--- /dev/null
+++ b/graphs/prim.py
@@ -0,0 +1,152 @@
+"""Prim's Algorithm.
+
+ Determines the minimum spanning tree(MST) of a graph using the Prim's Algorithm.
+
+ Details: https://en.wikipedia.org/wiki/Prim%27s_algorithm
+"""
+
+import heapq as hq
+import math
+from typing import Iterator
+
+
+class Vertex:
+ """Class Vertex."""
+
+ def __init__(self, id):
+ """
+ Arguments:
+ id - input an id to identify the vertex
+ Attributes:
+ neighbors - a list of the vertices it is linked to
+ edges - a dict to store the edges's weight
+ """
+ self.id = str(id)
+ self.key = None
+ self.pi = None
+ self.neighbors = []
+ self.edges = {} # {vertex:distance}
+
+ def __lt__(self, other):
+ """Comparison rule to < operator."""
+ return self.key < other.key
+
+ def __repr__(self):
+ """Return the vertex id."""
+ return self.id
+
+ def add_neighbor(self, vertex):
+ """Add a pointer to a vertex at neighbor's list."""
+ self.neighbors.append(vertex)
+
+ def add_edge(self, vertex, weight):
+ """Destination vertex and weight."""
+ self.edges[vertex.id] = weight
+
+
+def connect(graph, a, b, edge):
+ # add the neighbors:
+ graph[a - 1].add_neighbor(graph[b - 1])
+ graph[b - 1].add_neighbor(graph[a - 1])
+ # add the edges:
+ graph[a - 1].add_edge(graph[b - 1], edge)
+ graph[b - 1].add_edge(graph[a - 1], edge)
+
+
+def prim(graph: list, root: Vertex) -> list:
+ """Prim's Algorithm.
+
+ Runtime:
+ O(mn) with `m` edges and `n` vertices
+
+ Return:
+ List with the edges of a Minimum Spanning Tree
+
+ Usage:
+ prim(graph, graph[0])
+ """
+ a = []
+ for u in graph:
+ u.key = math.inf
+ u.pi = None
+ root.key = 0
+ q = graph[:]
+ while q:
+ u = min(q)
+ q.remove(u)
+ for v in u.neighbors:
+ if (v in q) and (u.edges[v.id] < v.key):
+ v.pi = u
+ v.key = u.edges[v.id]
+ for i in range(1, len(graph)):
+ a.append((int(graph[i].id) + 1, int(graph[i].pi.id) + 1))
+ return a
+
+
+def prim_heap(graph: list, root: Vertex) -> Iterator[tuple]:
+ """Prim's Algorithm with min heap.
+
+ Runtime:
+ O((m + n)log n) with `m` edges and `n` vertices
+
+ Yield:
+ Edges of a Minimum Spanning Tree
+
+ Usage:
+ prim(graph, graph[0])
+ """
+ for u in graph:
+ u.key = math.inf
+ u.pi = None
+ root.key = 0
+
+ h = list(graph)
+ hq.heapify(h)
+
+ while h:
+ u = hq.heappop(h)
+ for v in u.neighbors:
+ if (v in h) and (u.edges[v.id] < v.key):
+ v.pi = u
+ v.key = u.edges[v.id]
+ hq.heapify(h)
+
+ for i in range(1, len(graph)):
+ yield (int(graph[i].id) + 1, int(graph[i].pi.id) + 1)
+
+
+def test_vector() -> None:
+ """
+ # Creates a list to store x vertices.
+ >>> x = 5
+ >>> G = [Vertex(n) for n in range(x)]
+
+ >>> connect(G, 1, 2, 15)
+ >>> connect(G, 1, 3, 12)
+ >>> connect(G, 2, 4, 13)
+ >>> connect(G, 2, 5, 5)
+ >>> connect(G, 3, 2, 6)
+ >>> connect(G, 3, 4, 6)
+ >>> connect(G, 0, 0, 0) # Generate the minimum spanning tree:
+ >>> G_heap = G[:]
+ >>> MST = prim(G, G[0])
+ >>> MST_heap = prim_heap(G, G[0])
+ >>> for i in MST:
+ ... print(i)
+ (2, 3)
+ (3, 1)
+ (4, 3)
+ (5, 2)
+ >>> for i in MST_heap:
+ ... print(i)
+ (2, 3)
+ (3, 1)
+ (4, 3)
+ (5, 2)
+ """
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/graphs/scc_kosaraju.py b/graphs/scc_kosaraju.py
index 1f13ebaba36b..573c1bf5e363 100644
--- a/graphs/scc_kosaraju.py
+++ b/graphs/scc_kosaraju.py
@@ -1,46 +1,51 @@
-from __future__ import print_function
-# n - no of nodes, m - no of edges
-n, m = list(map(int,input().split()))
-
-g = [[] for i in range(n)] #graph
-r = [[] for i in range(n)] #reversed graph
-# input graph data (edges)
-for i in range(m):
- u, v = list(map(int,input().split()))
- g[u].append(v)
- r[v].append(u)
-
-stack = []
-visit = [False]*n
-scc = []
-component = []
-
def dfs(u):
global g, r, scc, component, visit, stack
- if visit[u]: return
+ if visit[u]:
+ return
visit[u] = True
for v in g[u]:
dfs(v)
stack.append(u)
+
def dfs2(u):
global g, r, scc, component, visit, stack
- if visit[u]: return
+ if visit[u]:
+ return
visit[u] = True
component.append(u)
for v in r[u]:
dfs2(v)
+
def kosaraju():
global g, r, scc, component, visit, stack
for i in range(n):
dfs(i)
- visit = [False]*n
+ visit = [False] * n
for i in stack[::-1]:
- if visit[i]: continue
+ if visit[i]:
+ continue
component = []
dfs2(i)
scc.append(component)
return scc
-print(kosaraju())
+
+if __name__ == "__main__":
+ # n - no of nodes, m - no of edges
+ n, m = list(map(int, input().strip().split()))
+
+ g = [[] for i in range(n)] # graph
+ r = [[] for i in range(n)] # reversed graph
+ # input graph data (edges)
+ for i in range(m):
+ u, v = list(map(int, input().strip().split()))
+ g[u].append(v)
+ r[v].append(u)
+
+ stack = []
+ visit = [False] * n
+ scc = []
+ component = []
+ print(kosaraju())
diff --git a/graphs/strongly_connected_components.py b/graphs/strongly_connected_components.py
new file mode 100644
index 000000000000..d469df0c625b
--- /dev/null
+++ b/graphs/strongly_connected_components.py
@@ -0,0 +1,92 @@
+"""
+https://en.wikipedia.org/wiki/Strongly_connected_component
+
+Finding strongly connected components in directed graph
+
+"""
+
+test_graph_1 = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
+
+test_graph_2 = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
+
+
+def topology_sort(graph: dict, vert: int, visited: list) -> list:
+ """
+ Use depth first search to sort graph
+ At this time graph is the same as input
+ >>> topology_sort(test_graph_1, 0, 5 * [False])
+ [1, 2, 4, 3, 0]
+ >>> topology_sort(test_graph_2, 0, 6 * [False])
+ [2, 1, 5, 4, 3, 0]
+ """
+
+ visited[vert] = True
+ order = []
+
+ for neighbour in graph[vert]:
+ if not visited[neighbour]:
+ order += topology_sort(graph, neighbour, visited)
+
+ order.append(vert)
+
+ return order
+
+
+def find_components(reversed_graph: dict, vert: int, visited: list) -> list:
+ """
+ Use depth first search to find strongliy connected
+ vertices. Now graph is reversed
+ >>> find_components({0: [1], 1: [2], 2: [0]}, 0, 5 * [False])
+ [0, 1, 2]
+ >>> find_components({0: [2], 1: [0], 2: [0, 1]}, 0, 6 * [False])
+ [0, 2, 1]
+ """
+
+ visited[vert] = True
+ component = [vert]
+
+ for neighbour in reversed_graph[vert]:
+ if not visited[neighbour]:
+ component += find_components(reversed_graph, neighbour, visited)
+
+ return component
+
+
+def strongly_connected_components(graph: dict) -> list:
+ """
+ This function takes graph as a parameter
+ and then returns the list of strongly connected components
+ >>> strongly_connected_components(test_graph_1)
+ [[0, 1, 2], [3], [4]]
+ >>> strongly_connected_components(test_graph_2)
+ [[0, 2, 1], [3, 5, 4]]
+ """
+
+ visited = len(graph) * [False]
+ reversed_graph = {vert: [] for vert in range(len(graph))}
+
+ for vert, neighbours in graph.items():
+ for neighbour in neighbours:
+ reversed_graph[neighbour].append(vert)
+
+ order = []
+ for i, was_visited in enumerate(visited):
+ if not was_visited:
+ order += topology_sort(graph, i, visited)
+
+ components_list = []
+ visited = len(graph) * [False]
+
+ for i in range(len(graph)):
+ vert = order[len(graph) - i - 1]
+ if not visited[vert]:
+ component = find_components(reversed_graph, vert, visited)
+ components_list.append(component)
+
+ return components_list
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/graphs/tarjans_scc.py b/graphs/tarjans_scc.py
index 89754e593508..30f8ca8a204f 100644
--- a/graphs/tarjans_scc.py
+++ b/graphs/tarjans_scc.py
@@ -5,19 +5,20 @@ def tarjan(g):
"""
Tarjan's algo for finding strongly connected components in a directed graph
- Uses two main attributes of each node to track reachability, the index of that node within a component(index),
- and the lowest index reachable from that node(lowlink).
+ Uses two main attributes of each node to track reachability, the index of that node
+ within a component(index), and the lowest index reachable from that node(lowlink).
- We then perform a dfs of the each component making sure to update these parameters for each node and saving the
- nodes we visit on the way.
+ We then perform a dfs of the each component making sure to update these parameters
+ for each node and saving the nodes we visit on the way.
- If ever we find that the lowest reachable node from a current node is equal to the index of the current node then it
- must be the root of a strongly connected component and so we save it and it's equireachable vertices as a strongly
+ If ever we find that the lowest reachable node from a current node is equal to the
+ index of the current node then it must be the root of a strongly connected
+ component and so we save it and it's equireachable vertices as a strongly
connected component.
- Complexity: strong_connect() is called at most once for each node and has a complexity of O(|E|) as it is DFS.
+ Complexity: strong_connect() is called at most once for each node and has a
+ complexity of O(|E|) as it is DFS.
Therefore this has complexity O(|V| + |E|) for a graph G = (V, E)
-
"""
n = len(g)
@@ -36,9 +37,13 @@ def strong_connect(v, index, components):
for w in g[v]:
if index_of[w] == -1:
index = strong_connect(w, index, components)
- lowlink_of[v] = lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
+ lowlink_of[v] = (
+ lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
+ )
elif on_stack[w]:
- lowlink_of[v] = lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
+ lowlink_of[v] = (
+ lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
+ )
if lowlink_of[v] == index_of[v]:
component = []
@@ -67,7 +72,7 @@ def create_graph(n, edges):
return g
-if __name__ == '__main__':
+if __name__ == "__main__":
# Test
n_vertices = 7
source = [0, 0, 1, 2, 3, 3, 4, 4, 6]
diff --git a/graphs/tests/test_min_spanning_tree_kruskal.py b/graphs/tests/test_min_spanning_tree_kruskal.py
new file mode 100644
index 000000000000..3a527aef384f
--- /dev/null
+++ b/graphs/tests/test_min_spanning_tree_kruskal.py
@@ -0,0 +1,36 @@
+from graphs.minimum_spanning_tree_kruskal import kruskal
+
+
+def test_kruskal_successful_result():
+ num_nodes, num_edges = 9, 14
+ edges = [
+ [0, 1, 4],
+ [0, 7, 8],
+ [1, 2, 8],
+ [7, 8, 7],
+ [7, 6, 1],
+ [2, 8, 2],
+ [8, 6, 6],
+ [2, 3, 7],
+ [2, 5, 4],
+ [6, 5, 2],
+ [3, 5, 14],
+ [3, 4, 9],
+ [5, 4, 10],
+ [1, 7, 11],
+ ]
+
+ result = kruskal(num_nodes, num_edges, edges)
+
+ expected = [
+ [7, 6, 1],
+ [2, 8, 2],
+ [6, 5, 2],
+ [0, 1, 4],
+ [2, 5, 4],
+ [2, 3, 7],
+ [0, 7, 8],
+ [3, 4, 9],
+ ]
+
+ assert sorted(expected) == sorted(result)
diff --git a/graphs/tests/test_min_spanning_tree_prim.py b/graphs/tests/test_min_spanning_tree_prim.py
new file mode 100644
index 000000000000..048fbf595fa6
--- /dev/null
+++ b/graphs/tests/test_min_spanning_tree_prim.py
@@ -0,0 +1,46 @@
+from collections import defaultdict
+
+from graphs.minimum_spanning_tree_prims import PrimsAlgorithm as mst
+
+
+def test_prim_successful_result():
+ num_nodes, num_edges = 9, 14 # noqa: F841
+ edges = [
+ [0, 1, 4],
+ [0, 7, 8],
+ [1, 2, 8],
+ [7, 8, 7],
+ [7, 6, 1],
+ [2, 8, 2],
+ [8, 6, 6],
+ [2, 3, 7],
+ [2, 5, 4],
+ [6, 5, 2],
+ [3, 5, 14],
+ [3, 4, 9],
+ [5, 4, 10],
+ [1, 7, 11],
+ ]
+
+ adjancency = defaultdict(list)
+ for node1, node2, cost in edges:
+ adjancency[node1].append([node2, cost])
+ adjancency[node2].append([node1, cost])
+
+ result = mst(adjancency)
+
+ expected = [
+ [7, 6, 1],
+ [2, 8, 2],
+ [6, 5, 2],
+ [0, 1, 4],
+ [2, 5, 4],
+ [2, 3, 7],
+ [0, 7, 8],
+ [3, 4, 9],
+ ]
+
+ for answer in expected:
+ edge = tuple(answer[:2])
+ reverse = tuple(edge[::-1])
+ assert edge in result or reverse in result
diff --git a/hashes/__init__.py b/hashes/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/hashes/adler32.py b/hashes/adler32.py
new file mode 100644
index 000000000000..fad747abe3c3
--- /dev/null
+++ b/hashes/adler32.py
@@ -0,0 +1,29 @@
+"""
+ Adler-32 is a checksum algorithm which was invented by Mark Adler in 1995.
+ Compared to a cyclic redundancy check of the same length, it trades reliability for
+ speed (preferring the latter).
+ Adler-32 is more reliable than Fletcher-16, and slightly less reliable than
+ Fletcher-32.[2]
+
+ source: https://en.wikipedia.org/wiki/Adler-32
+"""
+
+
+def adler32(plain_text: str) -> str:
+ """
+ Function implements adler-32 hash.
+ Itterates and evaluates new value for each character
+
+ >>> adler32('Algorithms')
+ 363791387
+
+ >>> adler32('go adler em all')
+ 708642122
+ """
+ MOD_ADLER = 65521
+ a = 1
+ b = 0
+ for plain_chr in plain_text:
+ a = (a + ord(plain_chr)) % MOD_ADLER
+ b = (b + a) % MOD_ADLER
+ return (b << 16) | a
diff --git a/hashes/chaos_machine.py b/hashes/chaos_machine.py
index f0a305bfeade..1bdf984b68de 100644
--- a/hashes/chaos_machine.py
+++ b/hashes/chaos_machine.py
@@ -1,13 +1,9 @@
"""example of simple chaos machine"""
-from __future__ import print_function
-
-try:
- input = raw_input # Python 2
-except NameError:
- pass # Python 3
# Chaos Machine (K, t, m)
-K = [0.33, 0.44, 0.55, 0.44, 0.33]; t = 3; m = 5
+K = [0.33, 0.44, 0.55, 0.44, 0.33]
+t = 3
+m = 5
# Buffer Space (with Parameters Space)
buffer_space, params_space = [], []
@@ -15,92 +11,91 @@
# Machine Time
machine_time = 0
+
def push(seed):
- global buffer_space, params_space, machine_time, \
- K, m, t
+ global buffer_space, params_space, machine_time, K, m, t
+
+ # Choosing Dynamical Systems (All)
+ for key, value in enumerate(buffer_space):
+ # Evolution Parameter
+ e = float(seed / value)
- # Choosing Dynamical Systems (All)
- for key, value in enumerate(buffer_space):
- # Evolution Parameter
- e = float(seed / value)
+ # Control Theory: Orbit Change
+ value = (buffer_space[(key + 1) % m] + e) % 1
- # Control Theory: Orbit Change
- value = (buffer_space[(key + 1) % m] + e) % 1
+ # Control Theory: Trajectory Change
+ r = (params_space[key] + e) % 1 + 3
- # Control Theory: Trajectory Change
- r = (params_space[key] + e) % 1 + 3
+ # Modification (Transition Function) - Jumps
+ buffer_space[key] = round(float(r * value * (1 - value)), 10)
+ params_space[key] = r # Saving to Parameters Space
- # Modification (Transition Function) - Jumps
- buffer_space[key] = \
- round(float(r * value * (1 - value)), 10)
- params_space[key] = \
- r # Saving to Parameters Space
+ # Logistic Map
+ assert max(buffer_space) < 1
+ assert max(params_space) < 4
- # Logistic Map
- assert max(buffer_space) < 1
- assert max(params_space) < 4
+ # Machine Time
+ machine_time += 1
- # Machine Time
- machine_time += 1
def pull():
- global buffer_space, params_space, machine_time, \
- K, m, t
+ global buffer_space, params_space, machine_time, K, m, t
- # PRNG (Xorshift by George Marsaglia)
- def xorshift(X, Y):
- X ^= Y >> 13
- Y ^= X << 17
- X ^= Y >> 5
- return X
+ # PRNG (Xorshift by George Marsaglia)
+ def xorshift(X, Y):
+ X ^= Y >> 13
+ Y ^= X << 17
+ X ^= Y >> 5
+ return X
- # Choosing Dynamical Systems (Increment)
- key = machine_time % m
+ # Choosing Dynamical Systems (Increment)
+ key = machine_time % m
- # Evolution (Time Length)
- for i in range(0, t):
- # Variables (Position + Parameters)
- r = params_space[key]
- value = buffer_space[key]
+ # Evolution (Time Length)
+ for i in range(0, t):
+ # Variables (Position + Parameters)
+ r = params_space[key]
+ value = buffer_space[key]
- # Modification (Transition Function) - Flow
- buffer_space[key] = \
- round(float(r * value * (1 - value)), 10)
- params_space[key] = \
- (machine_time * 0.01 + r * 1.01) % 1 + 3
+ # Modification (Transition Function) - Flow
+ buffer_space[key] = round(float(r * value * (1 - value)), 10)
+ params_space[key] = (machine_time * 0.01 + r * 1.01) % 1 + 3
- # Choosing Chaotic Data
- X = int(buffer_space[(key + 2) % m] * (10 ** 10))
- Y = int(buffer_space[(key - 2) % m] * (10 ** 10))
+ # Choosing Chaotic Data
+ X = int(buffer_space[(key + 2) % m] * (10 ** 10))
+ Y = int(buffer_space[(key - 2) % m] * (10 ** 10))
- # Machine Time
- machine_time += 1
+ # Machine Time
+ machine_time += 1
+
+ return xorshift(X, Y) % 0xFFFFFFFF
- return xorshift(X, Y) % 0xFFFFFFFF
def reset():
- global buffer_space, params_space, machine_time, \
- K, m, t
+ global buffer_space, params_space, machine_time, K, m, t
+
+ buffer_space = K
+ params_space = [0] * m
+ machine_time = 0
- buffer_space = K; params_space = [0] * m
- machine_time = 0
-#######################################
+if __name__ == "__main__":
+ # Initialization
+ reset()
-# Initialization
-reset()
+ # Pushing Data (Input)
+ import random
-# Pushing Data (Input)
-import random
-message = random.sample(range(0xFFFFFFFF), 100)
-for chunk in message:
- push(chunk)
+ message = random.sample(range(0xFFFFFFFF), 100)
+ for chunk in message:
+ push(chunk)
-# for controlling
-inp = ""
+ # for controlling
+ inp = ""
-# Pulling Data (Output)
-while inp in ("e", "E"):
- print("%s" % format(pull(), '#04x'))
- print(buffer_space); print(params_space)
- inp = input("(e)exit? ").strip()
+ # Pulling Data (Output)
+ while inp in ("e", "E"):
+ print("%s" % format(pull(), "#04x"))
+ print(buffer_space)
+ print(params_space)
+ inp = input("(e)exit? ").strip()
diff --git a/hashes/djb2.py b/hashes/djb2.py
new file mode 100644
index 000000000000..2d1c9aabb1fb
--- /dev/null
+++ b/hashes/djb2.py
@@ -0,0 +1,35 @@
+"""
+This algorithm (k=33) was first reported by Dan Bernstein many years ago in comp.lang.c
+Another version of this algorithm (now favored by Bernstein) uses xor:
+ hash(i) = hash(i - 1) * 33 ^ str[i];
+
+ First Magic constant 33:
+ It has never been adequately explained.
+ It's magic because it works better than many other constants, prime or not.
+
+ Second Magic Constant 5381:
+
+ 1. odd number
+ 2. prime number
+ 3. deficient number
+ 4. 001/010/100/000/101 b
+
+ source: http://www.cse.yorku.ca/~oz/hash.html
+"""
+
+
+def djb2(s: str) -> int:
+ """
+ Implementation of djb2 hash algorithm that
+ is popular because of it's magic constants.
+
+ >>> djb2('Algorithms')
+ 3782405311
+
+ >>> djb2('scramble bits')
+ 1609059040
+ """
+ hash = 5381
+ for x in s:
+ hash = ((hash << 5) + hash) + ord(x)
+ return hash & 0xFFFFFFFF
diff --git a/hashes/enigma_machine.py b/hashes/enigma_machine.py
new file mode 100644
index 000000000000..5420bacc1409
--- /dev/null
+++ b/hashes/enigma_machine.py
@@ -0,0 +1,60 @@
+alphabets = [chr(i) for i in range(32, 126)]
+gear_one = [i for i in range(len(alphabets))]
+gear_two = [i for i in range(len(alphabets))]
+gear_three = [i for i in range(len(alphabets))]
+reflector = [i for i in reversed(range(len(alphabets)))]
+code = []
+gear_one_pos = gear_two_pos = gear_three_pos = 0
+
+
+def rotator():
+ global gear_one_pos
+ global gear_two_pos
+ global gear_three_pos
+ i = gear_one[0]
+ gear_one.append(i)
+ del gear_one[0]
+ gear_one_pos += 1
+ if gear_one_pos % int(len(alphabets)) == 0:
+ i = gear_two[0]
+ gear_two.append(i)
+ del gear_two[0]
+ gear_two_pos += 1
+ if gear_two_pos % int(len(alphabets)) == 0:
+ i = gear_three[0]
+ gear_three.append(i)
+ del gear_three[0]
+ gear_three_pos += 1
+
+
+def engine(input_character):
+ target = alphabets.index(input_character)
+ target = gear_one[target]
+ target = gear_two[target]
+ target = gear_three[target]
+ target = reflector[target]
+ target = gear_three.index(target)
+ target = gear_two.index(target)
+ target = gear_one.index(target)
+ code.append(alphabets[target])
+ rotator()
+
+
+if __name__ == "__main__":
+ decode = input("Type your message:\n")
+ decode = list(decode)
+ while True:
+ try:
+ token = int(input("Please set token:(must be only digits)\n"))
+ break
+ except Exception as error:
+ print(error)
+ for i in range(token):
+ rotator()
+ for i in decode:
+ engine(i)
+ print("\n" + "".join(code))
+ print(
+ f"\nYour Token is {token} please write it down.\nIf you want to decode "
+ f"this message again you should input same digits as token!"
+ )
diff --git a/hashes/hamming_code.py b/hashes/hamming_code.py
new file mode 100644
index 000000000000..4a32bae1a51c
--- /dev/null
+++ b/hashes/hamming_code.py
@@ -0,0 +1,296 @@
+# Author: João Gustavo A. Amorim & Gabriel Kunz
+# Author email: joaogustavoamorim@gmail.com and gabriel-kunz@uergs.edu.br
+# Coding date: apr 2019
+# Black: True
+
+"""
+ * This code implement the Hamming code:
+ https://en.wikipedia.org/wiki/Hamming_code - In telecommunication,
+ Hamming codes are a family of linear error-correcting codes. Hamming
+ codes can detect up to two-bit errors or correct one-bit errors
+ without detection of uncorrected errors. By contrast, the simple
+ parity code cannot correct errors, and can detect only an odd number
+ of bits in error. Hamming codes are perfect codes, that is, they
+ achieve the highest possible rate for codes with their block length
+ and minimum distance of three.
+
+ * the implemented code consists of:
+ * a function responsible for encoding the message (emitterConverter)
+ * return the encoded message
+ * a function responsible for decoding the message (receptorConverter)
+ * return the decoded message and a ack of data integrity
+
+ * how to use:
+ to be used you must declare how many parity bits (sizePari)
+ you want to include in the message.
+ it is desired (for test purposes) to select a bit to be set
+ as an error. This serves to check whether the code is working correctly.
+ Lastly, the variable of the message/word that must be desired to be
+ encoded (text).
+
+ * how this work:
+ declaration of variables (sizePari, be, text)
+
+ converts the message/word (text) to binary using the
+ text_to_bits function
+ encodes the message using the rules of hamming encoding
+ decodes the message using the rules of hamming encoding
+ print the original message, the encoded message and the
+ decoded message
+
+ forces an error in the coded text variable
+ decodes the message that was forced the error
+ print the original message, the encoded message, the bit changed
+ message and the decoded message
+"""
+
+# Imports
+import numpy as np
+
+
+# Functions of binary conversion--------------------------------------
+def text_to_bits(text, encoding="utf-8", errors="surrogatepass"):
+ """
+ >>> text_to_bits("msg")
+ '011011010111001101100111'
+ """
+ bits = bin(int.from_bytes(text.encode(encoding, errors), "big"))[2:]
+ return bits.zfill(8 * ((len(bits) + 7) // 8))
+
+
+def text_from_bits(bits, encoding="utf-8", errors="surrogatepass"):
+ """
+ >>> text_from_bits('011011010111001101100111')
+ 'msg'
+ """
+ n = int(bits, 2)
+ return n.to_bytes((n.bit_length() + 7) // 8, "big").decode(encoding, errors) or "\0"
+
+
+# Functions of hamming code-------------------------------------------
+def emitterConverter(sizePar, data):
+ """
+ :param sizePar: how many parity bits the message must have
+ :param data: information bits
+ :return: message to be transmitted by unreliable medium
+ - bits of information merged with parity bits
+
+ >>> emitterConverter(4, "101010111111")
+ ['1', '1', '1', '1', '0', '1', '0', '0', '1', '0', '1', '1', '1', '1', '1', '1']
+ """
+ if sizePar + len(data) <= 2 ** sizePar - (len(data) - 1):
+ print("ERROR - size of parity don't match with size of data")
+ exit(0)
+
+ dataOut = []
+ parity = []
+ binPos = [bin(x)[2:] for x in range(1, sizePar + len(data) + 1)]
+
+ # sorted information data for the size of the output data
+ dataOrd = []
+ # data position template + parity
+ dataOutGab = []
+ # parity bit counter
+ qtdBP = 0
+ # counter position of data bits
+ contData = 0
+
+ for x in range(1, sizePar + len(data) + 1):
+ # Performs a template of bit positions - who should be given,
+ # and who should be parity
+ if qtdBP < sizePar:
+ if (np.log(x) / np.log(2)).is_integer():
+ dataOutGab.append("P")
+ qtdBP = qtdBP + 1
+ else:
+ dataOutGab.append("D")
+ else:
+ dataOutGab.append("D")
+
+ # Sorts the data to the new output size
+ if dataOutGab[-1] == "D":
+ dataOrd.append(data[contData])
+ contData += 1
+ else:
+ dataOrd.append(None)
+
+ # Calculates parity
+ qtdBP = 0 # parity bit counter
+ for bp in range(1, sizePar + 1):
+ # Bit counter one for a given parity
+ contBO = 0
+ # counter to control the loop reading
+ contLoop = 0
+ for x in dataOrd:
+ if x is not None:
+ try:
+ aux = (binPos[contLoop])[-1 * (bp)]
+ except IndexError:
+ aux = "0"
+ if aux == "1":
+ if x == "1":
+ contBO += 1
+ contLoop += 1
+ parity.append(contBO % 2)
+
+ qtdBP += 1
+
+ # Mount the message
+ ContBP = 0 # parity bit counter
+ for x in range(0, sizePar + len(data)):
+ if dataOrd[x] is None:
+ dataOut.append(str(parity[ContBP]))
+ ContBP += 1
+ else:
+ dataOut.append(dataOrd[x])
+
+ return dataOut
+
+
+def receptorConverter(sizePar, data):
+ """
+ >>> receptorConverter(4, "1111010010111111")
+ (['1', '0', '1', '0', '1', '0', '1', '1', '1', '1', '1', '1'], True)
+ """
+ # data position template + parity
+ dataOutGab = []
+ # Parity bit counter
+ qtdBP = 0
+ # Counter p data bit reading
+ contData = 0
+ # list of parity received
+ parityReceived = []
+ dataOutput = []
+
+ for x in range(1, len(data) + 1):
+ # Performs a template of bit positions - who should be given,
+ # and who should be parity
+ if qtdBP < sizePar and (np.log(x) / np.log(2)).is_integer():
+ dataOutGab.append("P")
+ qtdBP = qtdBP + 1
+ else:
+ dataOutGab.append("D")
+
+ # Sorts the data to the new output size
+ if dataOutGab[-1] == "D":
+ dataOutput.append(data[contData])
+ else:
+ parityReceived.append(data[contData])
+ contData += 1
+
+ # -----------calculates the parity with the data
+ dataOut = []
+ parity = []
+ binPos = [bin(x)[2:] for x in range(1, sizePar + len(dataOutput) + 1)]
+
+ # sorted information data for the size of the output data
+ dataOrd = []
+ # Data position feedback + parity
+ dataOutGab = []
+ # Parity bit counter
+ qtdBP = 0
+ # Counter p data bit reading
+ contData = 0
+
+ for x in range(1, sizePar + len(dataOutput) + 1):
+ # Performs a template position of bits - who should be given,
+ # and who should be parity
+ if qtdBP < sizePar and (np.log(x) / np.log(2)).is_integer():
+ dataOutGab.append("P")
+ qtdBP = qtdBP + 1
+ else:
+ dataOutGab.append("D")
+
+ # Sorts the data to the new output size
+ if dataOutGab[-1] == "D":
+ dataOrd.append(dataOutput[contData])
+ contData += 1
+ else:
+ dataOrd.append(None)
+
+ # Calculates parity
+ qtdBP = 0 # parity bit counter
+ for bp in range(1, sizePar + 1):
+ # Bit counter one for a certain parity
+ contBO = 0
+ # Counter to control loop reading
+ contLoop = 0
+ for x in dataOrd:
+ if x is not None:
+ try:
+ aux = (binPos[contLoop])[-1 * (bp)]
+ except IndexError:
+ aux = "0"
+ if aux == "1" and x == "1":
+ contBO += 1
+ contLoop += 1
+ parity.append(str(contBO % 2))
+
+ qtdBP += 1
+
+ # Mount the message
+ ContBP = 0 # Parity bit counter
+ for x in range(0, sizePar + len(dataOutput)):
+ if dataOrd[x] is None:
+ dataOut.append(str(parity[ContBP]))
+ ContBP += 1
+ else:
+ dataOut.append(dataOrd[x])
+
+ ack = parityReceived == parity
+ return dataOutput, ack
+
+
+# ---------------------------------------------------------------------
+"""
+# Example how to use
+
+# number of parity bits
+sizePari = 4
+
+# location of the bit that will be forced an error
+be = 2
+
+# Message/word to be encoded and decoded with hamming
+# text = input("Enter the word to be read: ")
+text = "Message01"
+
+# Convert the message to binary
+binaryText = text_to_bits(text)
+
+# Prints the binary of the string
+print("Text input in binary is '" + binaryText + "'")
+
+# total transmitted bits
+totalBits = len(binaryText) + sizePari
+print("Size of data is " + str(totalBits))
+
+print("\n --Message exchange--")
+print("Data to send ------------> " + binaryText)
+dataOut = emitterConverter(sizePari, binaryText)
+print("Data converted ----------> " + "".join(dataOut))
+dataReceiv, ack = receptorConverter(sizePari, dataOut)
+print(
+ "Data receive ------------> "
+ + "".join(dataReceiv)
+ + "\t\t -- Data integrity: "
+ + str(ack)
+)
+
+
+print("\n --Force error--")
+print("Data to send ------------> " + binaryText)
+dataOut = emitterConverter(sizePari, binaryText)
+print("Data converted ----------> " + "".join(dataOut))
+
+# forces error
+dataOut[-be] = "1" * (dataOut[-be] == "0") + "0" * (dataOut[-be] == "1")
+print("Data after transmission -> " + "".join(dataOut))
+dataReceiv, ack = receptorConverter(sizePari, dataOut)
+print(
+ "Data receive ------------> "
+ + "".join(dataReceiv)
+ + "\t\t -- Data integrity: "
+ + str(ack)
+)
+"""
diff --git a/hashes/md5.py b/hashes/md5.py
index d3f15510874e..b7888fb610ac 100644
--- a/hashes/md5.py
+++ b/hashes/md5.py
@@ -1,155 +1,239 @@
-from __future__ import print_function
import math
+
def rearrange(bitString32):
- """[summary]
- Regroups the given binary string.
-
- Arguments:
- bitString32 {[string]} -- [32 bit binary]
-
- Raises:
- ValueError -- [if the given string not are 32 bit binary string]
-
- Returns:
- [string] -- [32 bit binary string]
- """
-
- if len(bitString32) != 32:
- raise ValueError("Need length 32")
- newString = ""
- for i in [3,2,1,0]:
- newString += bitString32[8*i:8*i+8]
- return newString
+ """[summary]
+ Regroups the given binary string.
+
+ Arguments:
+ bitString32 {[string]} -- [32 bit binary]
+
+ Raises:
+ ValueError -- [if the given string not are 32 bit binary string]
+
+ Returns:
+ [string] -- [32 bit binary string]
+ >>> rearrange('1234567890abcdfghijklmnopqrstuvw')
+ 'pqrstuvwhijklmno90abcdfg12345678'
+ """
+
+ if len(bitString32) != 32:
+ raise ValueError("Need length 32")
+ newString = ""
+ for i in [3, 2, 1, 0]:
+ newString += bitString32[8 * i : 8 * i + 8]
+ return newString
+
def reformatHex(i):
- """[summary]
- Converts the given integer into 8-digit hex number.
+ """[summary]
+ Converts the given integer into 8-digit hex number.
+
+ Arguments:
+ i {[int]} -- [integer]
+ >>> reformatHex(666)
+ '9a020000'
+ """
- Arguments:
- i {[int]} -- [integer]
- """
+ hexrep = format(i, "08x")
+ thing = ""
+ for i in [3, 2, 1, 0]:
+ thing += hexrep[2 * i : 2 * i + 2]
+ return thing
- hexrep = format(i,'08x')
- thing = ""
- for i in [3,2,1,0]:
- thing += hexrep[2*i:2*i+2]
- return thing
def pad(bitString):
- """[summary]
- Fills up the binary string to a 512 bit binary string
-
- Arguments:
- bitString {[string]} -- [binary string]
-
- Returns:
- [string] -- [binary string]
- """
-
- startLength = len(bitString)
- bitString += '1'
- while len(bitString) % 512 != 448:
- bitString += '0'
- lastPart = format(startLength,'064b')
- bitString += rearrange(lastPart[32:]) + rearrange(lastPart[:32])
- return bitString
+ """[summary]
+ Fills up the binary string to a 512 bit binary string
+
+ Arguments:
+ bitString {[string]} -- [binary string]
+
+ Returns:
+ [string] -- [binary string]
+ """
+ startLength = len(bitString)
+ bitString += "1"
+ while len(bitString) % 512 != 448:
+ bitString += "0"
+ lastPart = format(startLength, "064b")
+ bitString += rearrange(lastPart[32:]) + rearrange(lastPart[:32])
+ return bitString
+
def getBlock(bitString):
- """[summary]
- Iterator:
- Returns by each call a list of length 16 with the 32 bit
- integer blocks.
-
- Arguments:
- bitString {[string]} -- [binary string >= 512]
- """
-
- currPos = 0
- while currPos < len(bitString):
- currPart = bitString[currPos:currPos+512]
- mySplits = []
- for i in range(16):
- mySplits.append(int(rearrange(currPart[32*i:32*i+32]),2))
- yield mySplits
- currPos += 512
+ """[summary]
+ Iterator:
+ Returns by each call a list of length 16 with the 32 bit
+ integer blocks.
+
+ Arguments:
+ bitString {[string]} -- [binary string >= 512]
+ """
+
+ currPos = 0
+ while currPos < len(bitString):
+ currPart = bitString[currPos : currPos + 512]
+ mySplits = []
+ for i in range(16):
+ mySplits.append(int(rearrange(currPart[32 * i : 32 * i + 32]), 2))
+ yield mySplits
+ currPos += 512
+
def not32(i):
- i_str = format(i,'032b')
- new_str = ''
- for c in i_str:
- new_str += '1' if c=='0' else '0'
- return int(new_str,2)
+ """
+ >>> not32(34)
+ 4294967261
+ """
+ i_str = format(i, "032b")
+ new_str = ""
+ for c in i_str:
+ new_str += "1" if c == "0" else "0"
+ return int(new_str, 2)
+
-def sum32(a,b):
- return (a + b) % 2**32
+def sum32(a, b):
+ """"""
+ return (a + b) % 2 ** 32
+
+
+def leftrot32(i, s):
+ return (i << s) ^ (i >> (32 - s))
-def leftrot32(i,s):
- return (i << s) ^ (i >> (32-s))
def md5me(testString):
- """[summary]
- Returns a 32-bit hash code of the string 'testString'
-
- Arguments:
- testString {[string]} -- [message]
- """
-
- bs =''
- for i in testString:
- bs += format(ord(i),'08b')
- bs = pad(bs)
-
- tvals = [int(2**32 * abs(math.sin(i+1))) for i in range(64)]
-
- a0 = 0x67452301
- b0 = 0xefcdab89
- c0 = 0x98badcfe
- d0 = 0x10325476
-
- s = [7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, \
- 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, \
- 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, \
- 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21 ]
-
- for m in getBlock(bs):
- A = a0
- B = b0
- C = c0
- D = d0
- for i in range(64):
- if i <= 15:
- #f = (B & C) | (not32(B) & D)
- f = D ^ (B & (C ^ D))
- g = i
- elif i<= 31:
- #f = (D & B) | (not32(D) & C)
- f = C ^ (D & (B ^ C))
- g = (5*i+1) % 16
- elif i <= 47:
- f = B ^ C ^ D
- g = (3*i+5) % 16
- else:
- f = C ^ (B | not32(D))
- g = (7*i) % 16
- dtemp = D
- D = C
- C = B
- B = sum32(B,leftrot32((A + f + tvals[i] + m[g]) % 2**32, s[i]))
- A = dtemp
- a0 = sum32(a0, A)
- b0 = sum32(b0, B)
- c0 = sum32(c0, C)
- d0 = sum32(d0, D)
-
- digest = reformatHex(a0) + reformatHex(b0) + reformatHex(c0) + reformatHex(d0)
- return digest
+ """[summary]
+ Returns a 32-bit hash code of the string 'testString'
+
+ Arguments:
+ testString {[string]} -- [message]
+ """
+
+ bs = ""
+ for i in testString:
+ bs += format(ord(i), "08b")
+ bs = pad(bs)
+
+ tvals = [int(2 ** 32 * abs(math.sin(i + 1))) for i in range(64)]
+
+ a0 = 0x67452301
+ b0 = 0xEFCDAB89
+ c0 = 0x98BADCFE
+ d0 = 0x10325476
+
+ s = [
+ 7,
+ 12,
+ 17,
+ 22,
+ 7,
+ 12,
+ 17,
+ 22,
+ 7,
+ 12,
+ 17,
+ 22,
+ 7,
+ 12,
+ 17,
+ 22,
+ 5,
+ 9,
+ 14,
+ 20,
+ 5,
+ 9,
+ 14,
+ 20,
+ 5,
+ 9,
+ 14,
+ 20,
+ 5,
+ 9,
+ 14,
+ 20,
+ 4,
+ 11,
+ 16,
+ 23,
+ 4,
+ 11,
+ 16,
+ 23,
+ 4,
+ 11,
+ 16,
+ 23,
+ 4,
+ 11,
+ 16,
+ 23,
+ 6,
+ 10,
+ 15,
+ 21,
+ 6,
+ 10,
+ 15,
+ 21,
+ 6,
+ 10,
+ 15,
+ 21,
+ 6,
+ 10,
+ 15,
+ 21,
+ ]
+
+ for m in getBlock(bs):
+ A = a0
+ B = b0
+ C = c0
+ D = d0
+ for i in range(64):
+ if i <= 15:
+ # f = (B & C) | (not32(B) & D)
+ f = D ^ (B & (C ^ D))
+ g = i
+ elif i <= 31:
+ # f = (D & B) | (not32(D) & C)
+ f = C ^ (D & (B ^ C))
+ g = (5 * i + 1) % 16
+ elif i <= 47:
+ f = B ^ C ^ D
+ g = (3 * i + 5) % 16
+ else:
+ f = C ^ (B | not32(D))
+ g = (7 * i) % 16
+ dtemp = D
+ D = C
+ C = B
+ B = sum32(B, leftrot32((A + f + tvals[i] + m[g]) % 2 ** 32, s[i]))
+ A = dtemp
+ a0 = sum32(a0, A)
+ b0 = sum32(b0, B)
+ c0 = sum32(c0, C)
+ d0 = sum32(d0, D)
+
+ digest = reformatHex(a0) + reformatHex(b0) + reformatHex(c0) + reformatHex(d0)
+ return digest
+
def test():
- assert md5me("") == "d41d8cd98f00b204e9800998ecf8427e"
- assert md5me("The quick brown fox jumps over the lazy dog") == "9e107d9d372bb6826bd81d3542a419d6"
- print("Success.")
+ assert md5me("") == "d41d8cd98f00b204e9800998ecf8427e"
+ assert (
+ md5me("The quick brown fox jumps over the lazy dog")
+ == "9e107d9d372bb6826bd81d3542a419d6"
+ )
+ print("Success.")
if __name__ == "__main__":
- test()
+ test()
+ import doctest
+
+ doctest.testmod()
diff --git a/hashes/sdbm.py b/hashes/sdbm.py
new file mode 100644
index 000000000000..86d47a1d9967
--- /dev/null
+++ b/hashes/sdbm.py
@@ -0,0 +1,37 @@
+"""
+ This algorithm was created for sdbm (a public-domain reimplementation of ndbm)
+ database library.
+ It was found to do well in scrambling bits, causing better distribution of the keys
+ and fewer splits.
+ It also happens to be a good general hashing function with good distribution.
+ The actual function (pseudo code) is:
+ for i in i..len(str):
+ hash(i) = hash(i - 1) * 65599 + str[i];
+
+ What is included below is the faster version used in gawk. [there is even a faster,
+ duff-device version]
+ The magic constant 65599 was picked out of thin air while experimenting with
+ different constants.
+ It turns out to be a prime.
+ This is one of the algorithms used in berkeley db (see sleepycat) and elsewhere.
+
+ source: http://www.cse.yorku.ca/~oz/hash.html
+"""
+
+
+def sdbm(plain_text: str) -> str:
+ """
+ Function implements sdbm hash, easy to use, great for bits scrambling.
+ iterates over each character in the given string and applies function to each of
+ them.
+
+ >>> sdbm('Algorithms')
+ 1462174910723540325254304520539387479031000036
+
+ >>> sdbm('scramble bits')
+ 730247649148944819640658295400555317318720608290373040936089
+ """
+ hash = 0
+ for plain_chr in plain_text:
+ hash = ord(plain_chr) + (hash << 6) + (hash << 16) - hash
+ return hash
diff --git a/hashes/sha1.py b/hashes/sha1.py
index 4c78ad3a89e5..cca38b7c3fdc 100644
--- a/hashes/sha1.py
+++ b/hashes/sha1.py
@@ -2,13 +2,14 @@
Demonstrates implementation of SHA1 Hash function in a Python class and gives utilities
to find hash of string or hash of text from a file.
Usage: python sha1.py --string "Hello World!!"
- pyhton sha1.py --file "hello_world.txt"
- When run without any arguments, it prints the hash of the string "Hello World!! Welcome to Cryptography"
+ python sha1.py --file "hello_world.txt"
+ When run without any arguments, it prints the hash of the string "Hello World!!
+ Welcome to Cryptography"
Also contains a Test class to verify that the generated Hash is same as that
returned by the hashlib library
-SHA1 hash or SHA1 sum of a string is a crytpographic function which means it is easy
-to calculate forwards but extemely difficult to calculate backwards. What this means
+SHA1 hash or SHA1 sum of a string is a cryptographic function which means it is easy
+to calculate forwards but extremely difficult to calculate backwards. What this means
is, you can easily calculate the hash of a string, but it is extremely difficult to
know the original string if you have its hash. This property is useful to communicate
securely, send encrypted messages and is very useful in payment systems, blockchain
@@ -22,21 +23,24 @@
the final hash.
Reference: https://deadhacker.com/2006/02/21/sha-1-illustrated/
"""
-
import argparse
+import hashlib # hashlib is only used inside the Test class
import struct
-import hashlib #hashlib is only used inside the Test class
import unittest
class SHA1Hash:
"""
Class to contain the entire pipeline for SHA1 Hashing Algorithm
+ >>> SHA1Hash(bytes('Allan', 'utf-8')).final_hash()
+ '872af2d8ac3d8695387e7c804bf0e02c18df9e6e'
"""
+
def __init__(self, data):
"""
Inititates the variables data and h. h is a list of 5 8-digit Hexadecimal
- numbers corresponding to (1732584193, 4023233417, 2562383102, 271733878, 3285377520)
+ numbers corresponding to
+ (1732584193, 4023233417, 2562383102, 271733878, 3285377520)
respectively. We will start with this as a message digest. 0x is how you write
Hexadecimal numbers in Python
"""
@@ -47,42 +51,47 @@ def __init__(self, data):
def rotate(n, b):
"""
Static method to be used inside other methods. Left rotates n by b.
+ >>> SHA1Hash('').rotate(12,2)
+ 48
"""
- return ((n << b) | (n >> (32 - b))) & 0xffffffff
+ return ((n << b) | (n >> (32 - b))) & 0xFFFFFFFF
def padding(self):
"""
Pads the input message with zeros so that padded_data has 64 bytes or 512 bits
"""
- padding = b'\x80' + b'\x00'*(63 - (len(self.data) + 8) % 64)
- padded_data = self.data + padding + struct.pack('>Q', 8 * len(self.data))
+ padding = b"\x80" + b"\x00" * (63 - (len(self.data) + 8) % 64)
+ padded_data = self.data + padding + struct.pack(">Q", 8 * len(self.data))
return padded_data
def split_blocks(self):
"""
Returns a list of bytestrings each of length 64
"""
- return [self.padded_data[i:i+64] for i in range(0, len(self.padded_data), 64)]
+ return [
+ self.padded_data[i : i + 64] for i in range(0, len(self.padded_data), 64)
+ ]
# @staticmethod
def expand_block(self, block):
"""
- Takes a bytestring-block of length 64, unpacks it to a list of integers and returns a
- list of 80 integers pafter some bit operations
+ Takes a bytestring-block of length 64, unpacks it to a list of integers and
+ returns a list of 80 integers after some bit operations
"""
- w = list(struct.unpack('>16L', block)) + [0] * 64
+ w = list(struct.unpack(">16L", block)) + [0] * 64
for i in range(16, 80):
- w[i] = self.rotate((w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]), 1)
+ w[i] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1)
return w
def final_hash(self):
"""
- Calls all the other methods to process the input. Pads the data, then splits into
- blocks and then does a series of operations for each block (including expansion).
+ Calls all the other methods to process the input. Pads the data, then splits
+ into blocks and then does a series of operations for each block (including
+ expansion).
For each block, the variable h that was initialized is copied to a,b,c,d,e
- and these 5 variables a,b,c,d,e undergo several changes. After all the blocks are
- processed, these 5 variables are pairwise added to h ie a to h[0], b to h[1] and so on.
- This h becomes our final hash which is returned.
+ and these 5 variables a,b,c,d,e undergo several changes. After all the blocks
+ are processed, these 5 variables are pairwise added to h ie a to h[0], b to h[1]
+ and so on. This h becomes our final hash which is returned.
"""
self.padded_data = self.padding()
self.blocks = self.split_blocks()
@@ -102,47 +111,61 @@ def final_hash(self):
elif 60 <= i < 80:
f = b ^ c ^ d
k = 0xCA62C1D6
- a, b, c, d, e = self.rotate(a, 5) + f + e + k + expanded_block[i] & 0xffffffff,\
- a, self.rotate(b, 30), c, d
- self.h = self.h[0] + a & 0xffffffff,\
- self.h[1] + b & 0xffffffff,\
- self.h[2] + c & 0xffffffff,\
- self.h[3] + d & 0xffffffff,\
- self.h[4] + e & 0xffffffff
- return '%08x%08x%08x%08x%08x' %tuple(self.h)
+ a, b, c, d, e = (
+ self.rotate(a, 5) + f + e + k + expanded_block[i] & 0xFFFFFFFF,
+ a,
+ self.rotate(b, 30),
+ c,
+ d,
+ )
+ self.h = (
+ self.h[0] + a & 0xFFFFFFFF,
+ self.h[1] + b & 0xFFFFFFFF,
+ self.h[2] + c & 0xFFFFFFFF,
+ self.h[3] + d & 0xFFFFFFFF,
+ self.h[4] + e & 0xFFFFFFFF,
+ )
+ return "%08x%08x%08x%08x%08x" % tuple(self.h)
class SHA1HashTest(unittest.TestCase):
"""
Test class for the SHA1Hash class. Inherits the TestCase class from unittest
"""
+
def testMatchHashes(self):
- msg = bytes('Test String', 'utf-8')
+ msg = bytes("Test String", "utf-8")
self.assertEqual(SHA1Hash(msg).final_hash(), hashlib.sha1(msg).hexdigest())
def main():
"""
- Provides option 'string' or 'file' to take input and prints the calculated SHA1 hash.
- unittest.main() has been commented because we probably dont want to run
+ Provides option 'string' or 'file' to take input and prints the calculated SHA1
+ hash. unittest.main() has been commented because we probably don't want to run
the test each time.
"""
# unittest.main()
- parser = argparse.ArgumentParser(description='Process some strings or files')
- parser.add_argument('--string', dest='input_string',
- default='Hello World!! Welcome to Cryptography',
- help='Hash the string')
- parser.add_argument('--file', dest='input_file', help='Hash contents of a file')
+ parser = argparse.ArgumentParser(description="Process some strings or files")
+ parser.add_argument(
+ "--string",
+ dest="input_string",
+ default="Hello World!! Welcome to Cryptography",
+ help="Hash the string",
+ )
+ parser.add_argument("--file", dest="input_file", help="Hash contents of a file")
args = parser.parse_args()
input_string = args.input_string
- #In any case hash input should be a bytestring
+ # In any case hash input should be a bytestring
if args.input_file:
- with open(args.input_file, 'rb') as f:
+ with open(args.input_file, "rb") as f:
hash_input = f.read()
else:
- hash_input = bytes(input_string, 'utf-8')
+ hash_input = bytes(input_string, "utf-8")
print(SHA1Hash(hash_input).final_hash())
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
+ import doctest
+
+ doctest.testmod()
diff --git a/knapsack/README.md b/knapsack/README.md
new file mode 100644
index 000000000000..6041c1e48eb8
--- /dev/null
+++ b/knapsack/README.md
@@ -0,0 +1,32 @@
+# A naive recursive implementation of 0-1 Knapsack Problem
+
+This overview is taken from:
+
+ https://en.wikipedia.org/wiki/Knapsack_problem
+
+---
+
+## Overview
+
+The knapsack problem is a problem in combinatorial optimization: Given a set of items, each with a weight and a value, determine the number of each item to include in a collection so that the total weight is less than or equal to a given limit and the total value is as large as possible. It derives its name from the problem faced by someone who is constrained by a fixed-size knapsack and must fill it with the most valuable items. The problem often arises in resource allocation where the decision makers have to choose from a set of non-divisible projects or tasks under a fixed budget or time constraint, respectively.
+
+The knapsack problem has been studied for more than a century, with early works dating as far back as 1897 The name "knapsack problem" dates back to the early works of mathematician Tobias Dantzig (1884–1956), and refers to the commonplace problem of packing the most valuable or useful items without overloading the luggage.
+
+---
+
+## Documentation
+
+This module uses docstrings to enable the use of Python's in-built `help(...)` function.
+For instance, try `help(Vector)`, `help(unitBasisVector)`, and `help(CLASSNAME.METHODNAME)`.
+
+---
+
+## Usage
+
+Import the module `knapsack.py` from the **.** directory into your project.
+
+---
+
+## Tests
+
+`.` contains Python unit tests which can be run with `python3 -m unittest -v`.
diff --git a/knapsack/__init__.py b/knapsack/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/knapsack/greedy_knapsack.py b/knapsack/greedy_knapsack.py
new file mode 100644
index 000000000000..ed6399c740c8
--- /dev/null
+++ b/knapsack/greedy_knapsack.py
@@ -0,0 +1,98 @@
+# To get an insight into Greedy Algorithm through the Knapsack problem
+
+
+"""
+A shopkeeper has bags of wheat that each have different weights and different profits.
+eg.
+profit 5 8 7 1 12 3 4
+weight 2 7 1 6 4 2 5
+max_weight 100
+
+Constraints:
+max_weight > 0
+profit[i] >= 0
+weight[i] >= 0
+Calculate the maximum profit that the shopkeeper can make given maxmum weight that can
+be carried.
+"""
+
+
+def calc_profit(profit: list, weight: list, max_weight: int) -> int:
+ """
+ Function description is as follows-
+ :param profit: Take a list of profits
+ :param weight: Take a list of weight if bags corresponding to the profits
+ :param max_weight: Maximum weight that could be carried
+ :return: Maximum expected gain
+
+ >>> calc_profit([1, 2, 3], [3, 4, 5], 15)
+ 6
+ >>> calc_profit([10, 9 , 8], [3 ,4 , 5], 25)
+ 27
+ """
+ if len(profit) != len(weight):
+ raise ValueError("The length of profit and weight must be same.")
+ if max_weight <= 0:
+ raise ValueError("max_weight must greater than zero.")
+ if any(p < 0 for p in profit):
+ raise ValueError("Profit can not be negative.")
+ if any(w < 0 for w in weight):
+ raise ValueError("Weight can not be negative.")
+
+ # List created to store profit gained for the 1kg in case of each weight
+ # respectively. Calculate and append profit/weight for each element.
+ profit_by_weight = [p / w for p, w in zip(profit, weight)]
+
+ # Creating a copy of the list and sorting profit/weight in ascending order
+ sorted_profit_by_weight = sorted(profit_by_weight)
+
+ # declaring useful variables
+ length = len(sorted_profit_by_weight)
+ limit = 0
+ gain = 0
+ i = 0
+
+ # loop till the total weight do not reach max limit e.g. 15 kg and till i= weight[index]:
+ limit += weight[index]
+ # Adding profit gained for the given weight 1 ===
+ # weight[index]/weight[index]
+ gain += 1 * profit[index]
+ else:
+ # Since the weight encountered is greater than limit, therefore take the
+ # required number of remaining kgs and calculate profit for it.
+ # weight remaining / weight[index]
+ gain += (max_weight - limit) / weight[index] * profit[index]
+ break
+ i += 1
+ return gain
+
+
+if __name__ == "__main__":
+ print(
+ "Input profits, weights, and then max_weight (all positive ints) separated by "
+ "spaces."
+ )
+
+ profit = [int(x) for x in input("Input profits separated by spaces: ").split()]
+ weight = [int(x) for x in input("Input weights separated by spaces: ").split()]
+ max_weight = int(input("Max weight allowed: "))
+
+ # Function Call
+ calc_profit(profit, weight, max_weight)
diff --git a/knapsack/knapsack.py b/knapsack/knapsack.py
new file mode 100644
index 000000000000..756443ea6163
--- /dev/null
+++ b/knapsack/knapsack.py
@@ -0,0 +1,47 @@
+from typing import List
+
+""" A naive recursive implementation of 0-1 Knapsack Problem
+ https://en.wikipedia.org/wiki/Knapsack_problem
+"""
+
+
+def knapsack(capacity: int, weights: List[int], values: List[int], counter: int) -> int:
+ """
+ Returns the maximum value that can be put in a knapsack of a capacity cap,
+ whereby each weight w has a specific value val.
+
+ >>> cap = 50
+ >>> val = [60, 100, 120]
+ >>> w = [10, 20, 30]
+ >>> c = len(val)
+ >>> knapsack(cap, w, val, c)
+ 220
+
+ The result is 220 cause the values of 100 and 120 got the weight of 50
+ which is the limit of the capacity.
+ """
+
+ # Base Case
+ if counter == 0 or capacity == 0:
+ return 0
+
+ # If weight of the nth item is more than Knapsack of capacity,
+ # then this item cannot be included in the optimal solution,
+ # else return the maximum of two cases:
+ # (1) nth item included
+ # (2) not included
+ if weights[counter - 1] > capacity:
+ return knapsack(capacity, weights, values, counter - 1)
+ else:
+ left_capacity = capacity - weights[counter - 1]
+ new_value_included = values[counter - 1] + knapsack(
+ left_capacity, weights, values, counter - 1
+ )
+ without_new_value = knapsack(capacity, weights, values, counter - 1)
+ return max(new_value_included, without_new_value)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/knapsack/tests/__init__.py b/knapsack/tests/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/knapsack/tests/test_greedy_knapsack.py b/knapsack/tests/test_greedy_knapsack.py
new file mode 100644
index 000000000000..b7b62d5d80b4
--- /dev/null
+++ b/knapsack/tests/test_greedy_knapsack.py
@@ -0,0 +1,75 @@
+import unittest
+
+from knapsack import greedy_knapsack as kp
+
+
+class TestClass(unittest.TestCase):
+ """
+ Test cases for knapsack
+ """
+
+ def test_sorted(self):
+ """
+ kp.calc_profit takes the required argument (profit, weight, max_weight)
+ and returns whether the answer matches to the expected ones
+ """
+ profit = [10, 20, 30, 40, 50, 60]
+ weight = [2, 4, 6, 8, 10, 12]
+ max_weight = 100
+ self.assertEqual(kp.calc_profit(profit, weight, max_weight), 210)
+
+ def test_negative_max_weight(self):
+ """
+ Returns ValueError for any negative max_weight value
+ :return: ValueError
+ """
+ # profit = [10, 20, 30, 40, 50, 60]
+ # weight = [2, 4, 6, 8, 10, 12]
+ # max_weight = -15
+ self.assertRaisesRegex(ValueError, "max_weight must greater than zero.")
+
+ def test_negative_profit_value(self):
+ """
+ Returns ValueError for any negative profit value in the list
+ :return: ValueError
+ """
+ # profit = [10, -20, 30, 40, 50, 60]
+ # weight = [2, 4, 6, 8, 10, 12]
+ # max_weight = 15
+ self.assertRaisesRegex(ValueError, "Weight can not be negative.")
+
+ def test_negative_weight_value(self):
+ """
+ Returns ValueError for any negative weight value in the list
+ :return: ValueError
+ """
+ # profit = [10, 20, 30, 40, 50, 60]
+ # weight = [2, -4, 6, -8, 10, 12]
+ # max_weight = 15
+ self.assertRaisesRegex(ValueError, "Profit can not be negative.")
+
+ def test_null_max_weight(self):
+ """
+ Returns ValueError for any zero max_weight value
+ :return: ValueError
+ """
+ # profit = [10, 20, 30, 40, 50, 60]
+ # weight = [2, 4, 6, 8, 10, 12]
+ # max_weight = null
+ self.assertRaisesRegex(ValueError, "max_weight must greater than zero.")
+
+ def test_unequal_list_length(self):
+ """
+ Returns IndexError if length of lists (profit and weight) are unequal.
+ :return: IndexError
+ """
+ # profit = [10, 20, 30, 40, 50]
+ # weight = [2, 4, 6, 8, 10, 12]
+ # max_weight = 100
+ self.assertRaisesRegex(
+ IndexError, "The length of profit and weight must be same."
+ )
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/knapsack/tests/test_knapsack.py b/knapsack/tests/test_knapsack.py
new file mode 100644
index 000000000000..248855fbce53
--- /dev/null
+++ b/knapsack/tests/test_knapsack.py
@@ -0,0 +1,52 @@
+"""
+Created on Fri Oct 16 09:31:07 2020
+
+@author: Dr. Tobias Schröder
+@license: MIT-license
+
+This file contains the test-suite for the knapsack problem.
+"""
+import unittest
+
+from knapsack import knapsack as k
+
+
+class Test(unittest.TestCase):
+ def test_base_case(self):
+ """
+ test for the base case
+ """
+ cap = 0
+ val = [0]
+ w = [0]
+ c = len(val)
+ self.assertEqual(k.knapsack(cap, w, val, c), 0)
+
+ val = [60]
+ w = [10]
+ c = len(val)
+ self.assertEqual(k.knapsack(cap, w, val, c), 0)
+
+ def test_easy_case(self):
+ """
+ test for the base case
+ """
+ cap = 3
+ val = [1, 2, 3]
+ w = [3, 2, 1]
+ c = len(val)
+ self.assertEqual(k.knapsack(cap, w, val, c), 5)
+
+ def test_knapsack(self):
+ """
+ test for the knapsack
+ """
+ cap = 50
+ val = [60, 100, 120]
+ w = [10, 20, 30]
+ c = len(val)
+ self.assertEqual(k.knapsack(cap, w, val, c), 220)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/linear_algebra/README.md b/linear_algebra/README.md
new file mode 100644
index 000000000000..dc6085090d02
--- /dev/null
+++ b/linear_algebra/README.md
@@ -0,0 +1,75 @@
+# Linear algebra library for Python
+
+This module contains classes and functions for doing linear algebra.
+
+---
+
+## Overview
+
+### class Vector
+-
+ - This class represents a vector of arbitrary size and related operations.
+
+ **Overview about the methods:**
+
+ - constructor(components : list) : init the vector
+ - set(components : list) : changes the vector components.
+ - \_\_str\_\_() : toString method
+ - component(i : int): gets the i-th component (start by 0)
+ - \_\_len\_\_() : gets the size / length of the vector (number of components)
+ - euclidLength() : returns the eulidean length of the vector.
+ - operator + : vector addition
+ - operator - : vector subtraction
+ - operator * : scalar multiplication and dot product
+ - copy() : copies this vector and returns it.
+ - changeComponent(pos,value) : changes the specified component.
+
+- function zeroVector(dimension)
+ - returns a zero vector of 'dimension'
+- function unitBasisVector(dimension,pos)
+ - returns a unit basis vector with a One at index 'pos' (indexing at 0)
+- function axpy(scalar,vector1,vector2)
+ - computes the axpy operation
+- function randomVector(N,a,b)
+ - returns a random vector of size N, with random integer components between 'a' and 'b'.
+
+### class Matrix
+-
+ - This class represents a matrix of arbitrary size and operations on it.
+
+ **Overview about the methods:**
+
+ - \_\_str\_\_() : returns a string representation
+ - operator * : implements the matrix vector multiplication
+ implements the matrix-scalar multiplication.
+ - changeComponent(x,y,value) : changes the specified component.
+ - component(x,y) : returns the specified component.
+ - width() : returns the width of the matrix
+ - height() : returns the height of the matrix
+ - determinate() : returns the determinate of the matrix if it is square
+ - operator + : implements the matrix-addition.
+ - operator - _ implements the matrix-subtraction
+
+- function squareZeroMatrix(N)
+ - returns a square zero-matrix of dimension NxN
+- function randomMatrix(W,H,a,b)
+ - returns a random matrix WxH with integer components between 'a' and 'b'
+---
+
+## Documentation
+
+This module uses docstrings to enable the use of Python's in-built `help(...)` function.
+For instance, try `help(Vector)`, `help(unitBasisVector)`, and `help(CLASSNAME.METHODNAME)`.
+
+---
+
+## Usage
+
+Import the module `lib.py` from the **src** directory into your project.
+Alternatively, you can directly use the Python bytecode file `lib.pyc`.
+
+---
+
+## Tests
+
+`src/tests.py` contains Python unit tests which can be run with `python3 -m unittest -v`.
diff --git a/linear_algebra/__init__.py b/linear_algebra/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/linear_algebra/src/__init__.py b/linear_algebra/src/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/linear_algebra/src/conjugate_gradient.py b/linear_algebra/src/conjugate_gradient.py
new file mode 100644
index 000000000000..1a65b8ccf019
--- /dev/null
+++ b/linear_algebra/src/conjugate_gradient.py
@@ -0,0 +1,173 @@
+"""
+Resources:
+- https://en.wikipedia.org/wiki/Conjugate_gradient_method
+- https://en.wikipedia.org/wiki/Definite_symmetric_matrix
+"""
+import numpy as np
+
+
+def _is_matrix_spd(matrix: np.array) -> bool:
+ """
+ Returns True if input matrix is symmetric positive definite.
+ Returns False otherwise.
+
+ For a matrix to be SPD, all eigenvalues must be positive.
+
+ >>> import numpy as np
+ >>> matrix = np.array([
+ ... [4.12401784, -5.01453636, -0.63865857],
+ ... [-5.01453636, 12.33347422, -3.40493586],
+ ... [-0.63865857, -3.40493586, 5.78591885]])
+ >>> _is_matrix_spd(matrix)
+ True
+ >>> matrix = np.array([
+ ... [0.34634879, 1.96165514, 2.18277744],
+ ... [0.74074469, -1.19648894, -1.34223498],
+ ... [-0.7687067 , 0.06018373, -1.16315631]])
+ >>> _is_matrix_spd(matrix)
+ False
+ """
+ # Ensure matrix is square.
+ assert np.shape(matrix)[0] == np.shape(matrix)[1]
+
+ # If matrix not symmetric, exit right away.
+ if np.allclose(matrix, matrix.T) is False:
+ return False
+
+ # Get eigenvalues and eignevectors for a symmetric matrix.
+ eigen_values, _ = np.linalg.eigh(matrix)
+
+ # Check sign of all eigenvalues.
+ return np.all(eigen_values > 0)
+
+
+def _create_spd_matrix(dimension: np.int64) -> np.array:
+ """
+ Returns a symmetric positive definite matrix given a dimension.
+
+ Input:
+ dimension gives the square matrix dimension.
+
+ Output:
+ spd_matrix is an diminesion x dimensions symmetric positive definite (SPD) matrix.
+
+ >>> import numpy as np
+ >>> dimension = 3
+ >>> spd_matrix = _create_spd_matrix(dimension)
+ >>> _is_matrix_spd(spd_matrix)
+ True
+ """
+ random_matrix = np.random.randn(dimension, dimension)
+ spd_matrix = np.dot(random_matrix, random_matrix.T)
+ assert _is_matrix_spd(spd_matrix)
+ return spd_matrix
+
+
+def conjugate_gradient(
+ spd_matrix: np.array,
+ load_vector: np.array,
+ max_iterations: int = 1000,
+ tol: float = 1e-8,
+) -> np.array:
+ """
+ Returns solution to the linear system np.dot(spd_matrix, x) = b.
+
+ Input:
+ spd_matrix is an NxN Symmetric Positive Definite (SPD) matrix.
+ load_vector is an Nx1 vector.
+
+ Output:
+ x is an Nx1 vector that is the solution vector.
+
+ >>> import numpy as np
+ >>> spd_matrix = np.array([
+ ... [8.73256573, -5.02034289, -2.68709226],
+ ... [-5.02034289, 3.78188322, 0.91980451],
+ ... [-2.68709226, 0.91980451, 1.94746467]])
+ >>> b = np.array([
+ ... [-5.80872761],
+ ... [ 3.23807431],
+ ... [ 1.95381422]])
+ >>> conjugate_gradient(spd_matrix, b)
+ array([[-0.63114139],
+ [-0.01561498],
+ [ 0.13979294]])
+ """
+ # Ensure proper dimensionality.
+ assert np.shape(spd_matrix)[0] == np.shape(spd_matrix)[1]
+ assert np.shape(load_vector)[0] == np.shape(spd_matrix)[0]
+ assert _is_matrix_spd(spd_matrix)
+
+ # Initialize solution guess, residual, search direction.
+ x0 = np.zeros((np.shape(load_vector)[0], 1))
+ r0 = np.copy(load_vector)
+ p0 = np.copy(r0)
+
+ # Set initial errors in solution guess and residual.
+ error_residual = 1e9
+ error_x_solution = 1e9
+ error = 1e9
+
+ # Set iteration counter to threshold number of iterations.
+ iterations = 0
+
+ while error > tol:
+
+ # Save this value so we only calculate the matrix-vector product once.
+ w = np.dot(spd_matrix, p0)
+
+ # The main algorithm.
+
+ # Update search direction magnitude.
+ alpha = np.dot(r0.T, r0) / np.dot(p0.T, w)
+ # Update solution guess.
+ x = x0 + alpha * p0
+ # Calculate new residual.
+ r = r0 - alpha * w
+ # Calculate new Krylov subspace scale.
+ beta = np.dot(r.T, r) / np.dot(r0.T, r0)
+ # Calculate new A conjuage search direction.
+ p = r + beta * p0
+
+ # Calculate errors.
+ error_residual = np.linalg.norm(r - r0)
+ error_x_solution = np.linalg.norm(x - x0)
+ error = np.maximum(error_residual, error_x_solution)
+
+ # Update variables.
+ x0 = np.copy(x)
+ r0 = np.copy(r)
+ p0 = np.copy(p)
+
+ # Update number of iterations.
+ iterations += 1
+
+ return x
+
+
+def test_conjugate_gradient() -> None:
+ """
+ >>> test_conjugate_gradient() # self running tests
+ """
+ # Create linear system with SPD matrix and known solution x_true.
+ dimension = 3
+ spd_matrix = _create_spd_matrix(dimension)
+ x_true = np.random.randn(dimension, 1)
+ b = np.dot(spd_matrix, x_true)
+
+ # Numpy solution.
+ x_numpy = np.linalg.solve(spd_matrix, b)
+
+ # Our implementation.
+ x_conjugate_gradient = conjugate_gradient(spd_matrix, b)
+
+ # Ensure both solutions are close to x_true (and therefore one another).
+ assert np.linalg.norm(x_numpy - x_true) <= 1e-6
+ assert np.linalg.norm(x_conjugate_gradient - x_true) <= 1e-6
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ test_conjugate_gradient()
diff --git a/linear_algebra/src/lib.py b/linear_algebra/src/lib.py
new file mode 100644
index 000000000000..353c8334093b
--- /dev/null
+++ b/linear_algebra/src/lib.py
@@ -0,0 +1,379 @@
+"""
+Created on Mon Feb 26 14:29:11 2018
+
+@author: Christian Bender
+@license: MIT-license
+
+This module contains some useful classes and functions for dealing
+with linear algebra in python.
+
+Overview:
+
+- class Vector
+- function zeroVector(dimension)
+- function unitBasisVector(dimension,pos)
+- function axpy(scalar,vector1,vector2)
+- function randomVector(N,a,b)
+- class Matrix
+- function squareZeroMatrix(N)
+- function randomMatrix(W,H,a,b)
+"""
+
+
+import math
+import random
+
+
+class Vector:
+ """
+ This class represents a vector of arbitrary size.
+ You need to give the vector components.
+
+ Overview about the methods:
+
+ constructor(components : list) : init the vector
+ set(components : list) : changes the vector components.
+ __str__() : toString method
+ component(i : int): gets the i-th component (start by 0)
+ __len__() : gets the size of the vector (number of components)
+ euclidLength() : returns the euclidean length of the vector.
+ operator + : vector addition
+ operator - : vector subtraction
+ operator * : scalar multiplication and dot product
+ copy() : copies this vector and returns it.
+ changeComponent(pos,value) : changes the specified component.
+ TODO: compare-operator
+ """
+
+ def __init__(self, components=None):
+ """
+ input: components or nothing
+ simple constructor for init the vector
+ """
+ if components is None:
+ components = []
+ self.__components = list(components)
+
+ def set(self, components):
+ """
+ input: new components
+ changes the components of the vector.
+ replace the components with newer one.
+ """
+ if len(components) > 0:
+ self.__components = list(components)
+ else:
+ raise Exception("please give any vector")
+
+ def __str__(self):
+ """
+ returns a string representation of the vector
+ """
+ return "(" + ",".join(map(str, self.__components)) + ")"
+
+ def component(self, i):
+ """
+ input: index (start at 0)
+ output: the i-th component of the vector.
+ """
+ if type(i) is int and -len(self.__components) <= i < len(self.__components):
+ return self.__components[i]
+ else:
+ raise Exception("index out of range")
+
+ def __len__(self):
+ """
+ returns the size of the vector
+ """
+ return len(self.__components)
+
+ def euclidLength(self):
+ """
+ returns the euclidean length of the vector
+ """
+ summe = 0
+ for c in self.__components:
+ summe += c ** 2
+ return math.sqrt(summe)
+
+ def __add__(self, other):
+ """
+ input: other vector
+ assumes: other vector has the same size
+ returns a new vector that represents the sum.
+ """
+ size = len(self)
+ if size == len(other):
+ result = [self.__components[i] + other.component(i) for i in range(size)]
+ return Vector(result)
+ else:
+ raise Exception("must have the same size")
+
+ def __sub__(self, other):
+ """
+ input: other vector
+ assumes: other vector has the same size
+ returns a new vector that represents the difference.
+ """
+ size = len(self)
+ if size == len(other):
+ result = [self.__components[i] - other.component(i) for i in range(size)]
+ return Vector(result)
+ else: # error case
+ raise Exception("must have the same size")
+
+ def __mul__(self, other):
+ """
+ mul implements the scalar multiplication
+ and the dot-product
+ """
+ if isinstance(other, float) or isinstance(other, int):
+ ans = [c * other for c in self.__components]
+ return Vector(ans)
+ elif isinstance(other, Vector) and (len(self) == len(other)):
+ size = len(self)
+ summe = 0
+ for i in range(size):
+ summe += self.__components[i] * other.component(i)
+ return summe
+ else: # error case
+ raise Exception("invalid operand!")
+
+ def copy(self):
+ """
+ copies this vector and returns it.
+ """
+ return Vector(self.__components)
+
+ def changeComponent(self, pos, value):
+ """
+ input: an index (pos) and a value
+ changes the specified component (pos) with the
+ 'value'
+ """
+ # precondition
+ assert -len(self.__components) <= pos < len(self.__components)
+ self.__components[pos] = value
+
+
+def zeroVector(dimension):
+ """
+ returns a zero-vector of size 'dimension'
+ """
+ # precondition
+ assert isinstance(dimension, int)
+ return Vector([0] * dimension)
+
+
+def unitBasisVector(dimension, pos):
+ """
+ returns a unit basis vector with a One
+ at index 'pos' (indexing at 0)
+ """
+ # precondition
+ assert isinstance(dimension, int) and (isinstance(pos, int))
+ ans = [0] * dimension
+ ans[pos] = 1
+ return Vector(ans)
+
+
+def axpy(scalar, x, y):
+ """
+ input: a 'scalar' and two vectors 'x' and 'y'
+ output: a vector
+ computes the axpy operation
+ """
+ # precondition
+ assert (
+ isinstance(x, Vector)
+ and (isinstance(y, Vector))
+ and (isinstance(scalar, int) or isinstance(scalar, float))
+ )
+ return x * scalar + y
+
+
+def randomVector(N, a, b):
+ """
+ input: size (N) of the vector.
+ random range (a,b)
+ output: returns a random vector of size N, with
+ random integer components between 'a' and 'b'.
+ """
+ random.seed(None)
+ ans = [random.randint(a, b) for i in range(N)]
+ return Vector(ans)
+
+
+class Matrix:
+ """
+ class: Matrix
+ This class represents a arbitrary matrix.
+
+ Overview about the methods:
+
+ __str__() : returns a string representation
+ operator * : implements the matrix vector multiplication
+ implements the matrix-scalar multiplication.
+ changeComponent(x,y,value) : changes the specified component.
+ component(x,y) : returns the specified component.
+ width() : returns the width of the matrix
+ height() : returns the height of the matrix
+ operator + : implements the matrix-addition.
+ operator - _ implements the matrix-subtraction
+ """
+
+ def __init__(self, matrix, w, h):
+ """
+ simple constructor for initializing
+ the matrix with components.
+ """
+ self.__matrix = matrix
+ self.__width = w
+ self.__height = h
+
+ def __str__(self):
+ """
+ returns a string representation of this
+ matrix.
+ """
+ ans = ""
+ for i in range(self.__height):
+ ans += "|"
+ for j in range(self.__width):
+ if j < self.__width - 1:
+ ans += str(self.__matrix[i][j]) + ","
+ else:
+ ans += str(self.__matrix[i][j]) + "|\n"
+ return ans
+
+ def changeComponent(self, x, y, value):
+ """
+ changes the x-y component of this matrix
+ """
+ if 0 <= x < self.__height and 0 <= y < self.__width:
+ self.__matrix[x][y] = value
+ else:
+ raise Exception("changeComponent: indices out of bounds")
+
+ def component(self, x, y):
+ """
+ returns the specified (x,y) component
+ """
+ if 0 <= x < self.__height and 0 <= y < self.__width:
+ return self.__matrix[x][y]
+ else:
+ raise Exception("changeComponent: indices out of bounds")
+
+ def width(self):
+ """
+ getter for the width
+ """
+ return self.__width
+
+ def height(self):
+ """
+ getter for the height
+ """
+ return self.__height
+
+ def determinate(self) -> float:
+ """
+ returns the determinate of an nxn matrix using Laplace expansion
+ """
+ if self.__height == self.__width and self.__width >= 2:
+ total = 0
+ if self.__width > 2:
+ for x in range(0, self.__width):
+ for y in range(0, self.__height):
+ total += (
+ self.__matrix[x][y]
+ * (-1) ** (x + y)
+ * Matrix(
+ self.__matrix[0:x] + self.__matrix[x + 1 :],
+ self.__width - 1,
+ self.__height - 1,
+ ).determinate()
+ )
+ else:
+ return (
+ self.__matrix[0][0] * self.__matrix[1][1]
+ - self.__matrix[0][1] * self.__matrix[1][0]
+ )
+ return total
+ else:
+ raise Exception("matrix is not square")
+
+ def __mul__(self, other):
+ """
+ implements the matrix-vector multiplication.
+ implements the matrix-scalar multiplication
+ """
+ if isinstance(other, Vector): # vector-matrix
+ if len(other) == self.__width:
+ ans = zeroVector(self.__height)
+ for i in range(self.__height):
+ summe = 0
+ for j in range(self.__width):
+ summe += other.component(j) * self.__matrix[i][j]
+ ans.changeComponent(i, summe)
+ summe = 0
+ return ans
+ else:
+ raise Exception(
+ "vector must have the same size as the "
+ + "number of columns of the matrix!"
+ )
+ elif isinstance(other, int) or isinstance(other, float): # matrix-scalar
+ matrix = [
+ [self.__matrix[i][j] * other for j in range(self.__width)]
+ for i in range(self.__height)
+ ]
+ return Matrix(matrix, self.__width, self.__height)
+
+ def __add__(self, other):
+ """
+ implements the matrix-addition.
+ """
+ if self.__width == other.width() and self.__height == other.height():
+ matrix = []
+ for i in range(self.__height):
+ row = []
+ for j in range(self.__width):
+ row.append(self.__matrix[i][j] + other.component(i, j))
+ matrix.append(row)
+ return Matrix(matrix, self.__width, self.__height)
+ else:
+ raise Exception("matrix must have the same dimension!")
+
+ def __sub__(self, other):
+ """
+ implements the matrix-subtraction.
+ """
+ if self.__width == other.width() and self.__height == other.height():
+ matrix = []
+ for i in range(self.__height):
+ row = []
+ for j in range(self.__width):
+ row.append(self.__matrix[i][j] - other.component(i, j))
+ matrix.append(row)
+ return Matrix(matrix, self.__width, self.__height)
+ else:
+ raise Exception("matrix must have the same dimension!")
+
+
+def squareZeroMatrix(N):
+ """
+ returns a square zero-matrix of dimension NxN
+ """
+ ans = [[0] * N for i in range(N)]
+ return Matrix(ans, N, N)
+
+
+def randomMatrix(W, H, a, b):
+ """
+ returns a random matrix WxH with integer components
+ between 'a' and 'b'
+ """
+ random.seed(None)
+ matrix = [[random.randint(a, b) for j in range(W)] for i in range(H)]
+ return Matrix(matrix, W, H)
diff --git a/linear_algebra/src/polynom_for_points.py b/linear_algebra/src/polynom_for_points.py
new file mode 100644
index 000000000000..7a363723d9d2
--- /dev/null
+++ b/linear_algebra/src/polynom_for_points.py
@@ -0,0 +1,132 @@
+from __future__ import annotations
+
+
+def points_to_polynomial(coordinates: list[list[int]]) -> str:
+ """
+ coordinates is a two dimensional matrix: [[x, y], [x, y], ...]
+ number of points you want to use
+
+ >>> print(points_to_polynomial([]))
+ The program cannot work out a fitting polynomial.
+ >>> print(points_to_polynomial([[]]))
+ The program cannot work out a fitting polynomial.
+ >>> print(points_to_polynomial([[1, 0], [2, 0], [3, 0]]))
+ f(x)=x^2*0.0+x^1*-0.0+x^0*0.0
+ >>> print(points_to_polynomial([[1, 1], [2, 1], [3, 1]]))
+ f(x)=x^2*0.0+x^1*-0.0+x^0*1.0
+ >>> print(points_to_polynomial([[1, 3], [2, 3], [3, 3]]))
+ f(x)=x^2*0.0+x^1*-0.0+x^0*3.0
+ >>> print(points_to_polynomial([[1, 1], [2, 2], [3, 3]]))
+ f(x)=x^2*0.0+x^1*1.0+x^0*0.0
+ >>> print(points_to_polynomial([[1, 1], [2, 4], [3, 9]]))
+ f(x)=x^2*1.0+x^1*-0.0+x^0*0.0
+ >>> print(points_to_polynomial([[1, 3], [2, 6], [3, 11]]))
+ f(x)=x^2*1.0+x^1*-0.0+x^0*2.0
+ >>> print(points_to_polynomial([[1, -3], [2, -6], [3, -11]]))
+ f(x)=x^2*-1.0+x^1*-0.0+x^0*-2.0
+ >>> print(points_to_polynomial([[1, 5], [2, 2], [3, 9]]))
+ f(x)=x^2*5.0+x^1*-18.0+x^0*18.0
+ """
+ try:
+ check = 1
+ more_check = 0
+ d = coordinates[0][0]
+ for j in range(len(coordinates)):
+ if j == 0:
+ continue
+ if d == coordinates[j][0]:
+ more_check += 1
+ solved = "x=" + str(coordinates[j][0])
+ if more_check == len(coordinates) - 1:
+ check = 2
+ break
+ elif more_check > 0 and more_check != len(coordinates) - 1:
+ check = 3
+ else:
+ check = 1
+
+ if len(coordinates) == 1 and coordinates[0][0] == 0:
+ check = 2
+ solved = "x=0"
+ except Exception:
+ check = 3
+
+ x = len(coordinates)
+
+ if check == 1:
+ count_of_line = 0
+ matrix = []
+ # put the x and x to the power values in a matrix
+ while count_of_line < x:
+ count_in_line = 0
+ a = coordinates[count_of_line][0]
+ count_line: list[int] = []
+ while count_in_line < x:
+ count_line.append(a ** (x - (count_in_line + 1)))
+ count_in_line += 1
+ matrix.append(count_line)
+ count_of_line += 1
+
+ count_of_line = 0
+ # put the y values into a vector
+ vector: list[int] = []
+ while count_of_line < x:
+ vector.append(coordinates[count_of_line][1])
+ count_of_line += 1
+
+ count = 0
+
+ while count < x:
+ zahlen = 0
+ while zahlen < x:
+ if count == zahlen:
+ zahlen += 1
+ if zahlen == x:
+ break
+ bruch = matrix[zahlen][count] / matrix[count][count]
+ for counting_columns, item in enumerate(matrix[count]):
+ # manipulating all the values in the matrix
+ matrix[zahlen][counting_columns] -= item * bruch
+ # manipulating the values in the vector
+ vector[zahlen] -= vector[count] * bruch
+ zahlen += 1
+ count += 1
+
+ count = 0
+ # make solutions
+ solution: list[str] = []
+ while count < x:
+ solution.append(vector[count] / matrix[count][count])
+ count += 1
+
+ count = 0
+ solved = "f(x)="
+
+ while count < x:
+ remove_e: list[str] = str(solution[count]).split("E")
+ if len(remove_e) > 1:
+ solution[count] = remove_e[0] + "*10^" + remove_e[1]
+ solved += "x^" + str(x - (count + 1)) + "*" + str(solution[count])
+ if count + 1 != x:
+ solved += "+"
+ count += 1
+
+ return solved
+
+ elif check == 2:
+ return solved
+ else:
+ return "The program cannot work out a fitting polynomial."
+
+
+if __name__ == "__main__":
+ print(points_to_polynomial([]))
+ print(points_to_polynomial([[]]))
+ print(points_to_polynomial([[1, 0], [2, 0], [3, 0]]))
+ print(points_to_polynomial([[1, 1], [2, 1], [3, 1]]))
+ print(points_to_polynomial([[1, 3], [2, 3], [3, 3]]))
+ print(points_to_polynomial([[1, 1], [2, 2], [3, 3]]))
+ print(points_to_polynomial([[1, 1], [2, 4], [3, 9]]))
+ print(points_to_polynomial([[1, 3], [2, 6], [3, 11]]))
+ print(points_to_polynomial([[1, -3], [2, -6], [3, -11]]))
+ print(points_to_polynomial([[1, 5], [2, 2], [3, 9]]))
diff --git a/linear_algebra/src/power_iteration.py b/linear_algebra/src/power_iteration.py
new file mode 100644
index 000000000000..476361e0d433
--- /dev/null
+++ b/linear_algebra/src/power_iteration.py
@@ -0,0 +1,101 @@
+import numpy as np
+
+
+def power_iteration(
+ input_matrix: np.array, vector: np.array, error_tol=1e-12, max_iterations=100
+) -> [float, np.array]:
+ """
+ Power Iteration.
+ Find the largest eignevalue and corresponding eigenvector
+ of matrix input_matrix given a random vector in the same space.
+ Will work so long as vector has component of largest eigenvector.
+ input_matrix must be symmetric.
+
+ Input
+ input_matrix: input matrix whose largest eigenvalue we will find.
+ Numpy array. np.shape(input_matrix) == (N,N).
+ vector: random initial vector in same space as matrix.
+ Numpy array. np.shape(vector) == (N,) or (N,1)
+
+ Output
+ largest_eigenvalue: largest eigenvalue of the matrix input_matrix.
+ Float. Scalar.
+ largest_eigenvector: eigenvector corresponding to largest_eigenvalue.
+ Numpy array. np.shape(largest_eigenvector) == (N,) or (N,1).
+
+ >>> import numpy as np
+ >>> input_matrix = np.array([
+ ... [41, 4, 20],
+ ... [ 4, 26, 30],
+ ... [20, 30, 50]
+ ... ])
+ >>> vector = np.array([41,4,20])
+ >>> power_iteration(input_matrix,vector)
+ (79.66086378788381, array([0.44472726, 0.46209842, 0.76725662]))
+ """
+
+ # Ensure matrix is square.
+ assert np.shape(input_matrix)[0] == np.shape(input_matrix)[1]
+ # Ensure proper dimensionality.
+ assert np.shape(input_matrix)[0] == np.shape(vector)[0]
+
+ # Set convergence to False. Will define convergence when we exceed max_iterations
+ # or when we have small changes from one iteration to next.
+
+ convergence = False
+ lamda_previous = 0
+ iterations = 0
+ error = 1e12
+
+ while not convergence:
+ # Multiple matrix by the vector.
+ w = np.dot(input_matrix, vector)
+ # Normalize the resulting output vector.
+ vector = w / np.linalg.norm(w)
+ # Find rayleigh quotient
+ # (faster than usual b/c we know vector is normalized already)
+ lamda = np.dot(vector.T, np.dot(input_matrix, vector))
+
+ # Check convergence.
+ error = np.abs(lamda - lamda_previous) / lamda
+ iterations += 1
+
+ if error <= error_tol or iterations >= max_iterations:
+ convergence = True
+
+ lamda_previous = lamda
+
+ return lamda, vector
+
+
+def test_power_iteration() -> None:
+ """
+ >>> test_power_iteration() # self running tests
+ """
+ # Our implementation.
+ input_matrix = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]])
+ vector = np.array([41, 4, 20])
+ eigen_value, eigen_vector = power_iteration(input_matrix, vector)
+
+ # Numpy implementation.
+
+ # Get eigen values and eigen vectors using built in numpy
+ # eigh (eigh used for symmetric or hermetian matrices).
+ eigen_values, eigen_vectors = np.linalg.eigh(input_matrix)
+ # Last eigen value is the maximum one.
+ eigen_value_max = eigen_values[-1]
+ # Last column in this matrix is eigen vector corresponding to largest eigen value.
+ eigen_vector_max = eigen_vectors[:, -1]
+
+ # Check our implementation and numpy gives close answers.
+ assert np.abs(eigen_value - eigen_value_max) <= 1e-6
+ # Take absolute values element wise of each eigenvector.
+ # as they are only unique to a minus sign.
+ assert np.linalg.norm(np.abs(eigen_vector) - np.abs(eigen_vector_max)) <= 1e-6
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ test_power_iteration()
diff --git a/linear_algebra/src/rayleigh_quotient.py b/linear_algebra/src/rayleigh_quotient.py
new file mode 100644
index 000000000000..69bbbac119e8
--- /dev/null
+++ b/linear_algebra/src/rayleigh_quotient.py
@@ -0,0 +1,64 @@
+"""
+https://en.wikipedia.org/wiki/Rayleigh_quotient
+"""
+import numpy as np
+
+
+def is_hermitian(matrix: np.array) -> bool:
+ """
+ Checks if a matrix is Hermitian.
+ >>> import numpy as np
+ >>> A = np.array([
+ ... [2, 2+1j, 4],
+ ... [2-1j, 3, 1j],
+ ... [4, -1j, 1]])
+ >>> is_hermitian(A)
+ True
+ >>> A = np.array([
+ ... [2, 2+1j, 4+1j],
+ ... [2-1j, 3, 1j],
+ ... [4, -1j, 1]])
+ >>> is_hermitian(A)
+ False
+ """
+ return np.array_equal(matrix, matrix.conjugate().T)
+
+
+def rayleigh_quotient(A: np.array, v: np.array) -> float:
+ """
+ Returns the Rayleigh quotient of a Hermitian matrix A and
+ vector v.
+ >>> import numpy as np
+ >>> A = np.array([
+ ... [1, 2, 4],
+ ... [2, 3, -1],
+ ... [4, -1, 1]
+ ... ])
+ >>> v = np.array([
+ ... [1],
+ ... [2],
+ ... [3]
+ ... ])
+ >>> rayleigh_quotient(A, v)
+ array([[3.]])
+ """
+ v_star = v.conjugate().T
+ return (v_star.dot(A).dot(v)) / (v_star.dot(v))
+
+
+def tests() -> None:
+ A = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]])
+ v = np.array([[1], [2], [3]])
+ assert is_hermitian(A), f"{A} is not hermitian."
+ print(rayleigh_quotient(A, v))
+
+ A = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]])
+ assert is_hermitian(A), f"{A} is not hermitian."
+ assert rayleigh_quotient(A, v) == float(3)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ tests()
diff --git a/linear_algebra/src/test_linear_algebra.py b/linear_algebra/src/test_linear_algebra.py
new file mode 100644
index 000000000000..6eba3a1638bd
--- /dev/null
+++ b/linear_algebra/src/test_linear_algebra.py
@@ -0,0 +1,156 @@
+"""
+Created on Mon Feb 26 15:40:07 2018
+
+@author: Christian Bender
+@license: MIT-license
+
+This file contains the test-suite for the linear algebra library.
+"""
+import unittest
+
+from .lib import Matrix, Vector, axpy, squareZeroMatrix, unitBasisVector, zeroVector
+
+
+class Test(unittest.TestCase):
+ def test_component(self):
+ """
+ test for method component
+ """
+ x = Vector([1, 2, 3])
+ self.assertEqual(x.component(0), 1)
+ self.assertEqual(x.component(2), 3)
+ _ = Vector()
+
+ def test_str(self):
+ """
+ test for toString() method
+ """
+ x = Vector([0, 0, 0, 0, 0, 1])
+ self.assertEqual(str(x), "(0,0,0,0,0,1)")
+
+ def test_size(self):
+ """
+ test for size()-method
+ """
+ x = Vector([1, 2, 3, 4])
+ self.assertEqual(len(x), 4)
+
+ def test_euclidLength(self):
+ """
+ test for the eulidean length
+ """
+ x = Vector([1, 2])
+ self.assertAlmostEqual(x.euclidLength(), 2.236, 3)
+
+ def test_add(self):
+ """
+ test for + operator
+ """
+ x = Vector([1, 2, 3])
+ y = Vector([1, 1, 1])
+ self.assertEqual((x + y).component(0), 2)
+ self.assertEqual((x + y).component(1), 3)
+ self.assertEqual((x + y).component(2), 4)
+
+ def test_sub(self):
+ """
+ test for - operator
+ """
+ x = Vector([1, 2, 3])
+ y = Vector([1, 1, 1])
+ self.assertEqual((x - y).component(0), 0)
+ self.assertEqual((x - y).component(1), 1)
+ self.assertEqual((x - y).component(2), 2)
+
+ def test_mul(self):
+ """
+ test for * operator
+ """
+ x = Vector([1, 2, 3])
+ a = Vector([2, -1, 4]) # for test of dot-product
+ b = Vector([1, -2, -1])
+ self.assertEqual(str(x * 3.0), "(3.0,6.0,9.0)")
+ self.assertEqual((a * b), 0)
+
+ def test_zeroVector(self):
+ """
+ test for the global function zeroVector(...)
+ """
+ self.assertTrue(str(zeroVector(10)).count("0") == 10)
+
+ def test_unitBasisVector(self):
+ """
+ test for the global function unitBasisVector(...)
+ """
+ self.assertEqual(str(unitBasisVector(3, 1)), "(0,1,0)")
+
+ def test_axpy(self):
+ """
+ test for the global function axpy(...) (operation)
+ """
+ x = Vector([1, 2, 3])
+ y = Vector([1, 0, 1])
+ self.assertEqual(str(axpy(2, x, y)), "(3,4,7)")
+
+ def test_copy(self):
+ """
+ test for the copy()-method
+ """
+ x = Vector([1, 0, 0, 0, 0, 0])
+ y = x.copy()
+ self.assertEqual(str(x), str(y))
+
+ def test_changeComponent(self):
+ """
+ test for the changeComponent(...)-method
+ """
+ x = Vector([1, 0, 0])
+ x.changeComponent(0, 0)
+ x.changeComponent(1, 1)
+ self.assertEqual(str(x), "(0,1,0)")
+
+ def test_str_matrix(self):
+ A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
+ self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n", str(A))
+
+ def test_determinate(self):
+ """
+ test for determinate()
+ """
+ A = Matrix([[1, 1, 4, 5], [3, 3, 3, 2], [5, 1, 9, 0], [9, 7, 7, 9]], 4, 4)
+ self.assertEqual(-376, A.determinate())
+
+ def test__mul__matrix(self):
+ A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 3, 3)
+ x = Vector([1, 2, 3])
+ self.assertEqual("(14,32,50)", str(A * x))
+ self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n", str(A * 2))
+
+ def test_changeComponent_matrix(self):
+ A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
+ A.changeComponent(0, 2, 5)
+ self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n", str(A))
+
+ def test_component_matrix(self):
+ A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
+ self.assertEqual(7, A.component(2, 1), 0.01)
+
+ def test__add__matrix(self):
+ A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
+ B = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3)
+ self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n", str(A + B))
+
+ def test__sub__matrix(self):
+ A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
+ B = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3)
+ self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n", str(A - B))
+
+ def test_squareZeroMatrix(self):
+ self.assertEqual(
+ "|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|" + "\n|0,0,0,0,0|\n",
+ str(squareZeroMatrix(5)),
+ )
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/linear_algebra/src/transformations_2d.py b/linear_algebra/src/transformations_2d.py
new file mode 100644
index 000000000000..6a15189c5676
--- /dev/null
+++ b/linear_algebra/src/transformations_2d.py
@@ -0,0 +1,63 @@
+"""
+2D Transformations are regularly used in Linear Algebra.
+
+I have added the codes for reflection, projection, scaling and rotation 2D matrices.
+
+ scaling(5) = [[5.0, 0.0], [0.0, 5.0]]
+ rotation(45) = [[0.5253219888177297, -0.8509035245341184],
+ [0.8509035245341184, 0.5253219888177297]]
+projection(45) = [[0.27596319193541496, 0.446998331800279],
+ [0.446998331800279, 0.7240368080645851]]
+reflection(45) = [[0.05064397763545947, 0.893996663600558],
+ [0.893996663600558, 0.7018070490682369]]
+"""
+from __future__ import annotations
+
+from math import cos, sin
+
+
+def scaling(scaling_factor: float) -> list[list[float]]:
+ """
+ >>> scaling(5)
+ [[5.0, 0.0], [0.0, 5.0]]
+ """
+ scaling_factor = float(scaling_factor)
+ return [[scaling_factor * int(x == y) for x in range(2)] for y in range(2)]
+
+
+def rotation(angle: float) -> list[list[float]]:
+ """
+ >>> rotation(45) # doctest: +NORMALIZE_WHITESPACE
+ [[0.5253219888177297, -0.8509035245341184],
+ [0.8509035245341184, 0.5253219888177297]]
+ """
+ c, s = cos(angle), sin(angle)
+ return [[c, -s], [s, c]]
+
+
+def projection(angle: float) -> list[list[float]]:
+ """
+ >>> projection(45) # doctest: +NORMALIZE_WHITESPACE
+ [[0.27596319193541496, 0.446998331800279],
+ [0.446998331800279, 0.7240368080645851]]
+ """
+ c, s = cos(angle), sin(angle)
+ cs = c * s
+ return [[c * c, cs], [cs, s * s]]
+
+
+def reflection(angle: float) -> list[list[float]]:
+ """
+ >>> reflection(45) # doctest: +NORMALIZE_WHITESPACE
+ [[0.05064397763545947, 0.893996663600558],
+ [0.893996663600558, 0.7018070490682369]]
+ """
+ c, s = cos(angle), sin(angle)
+ cs = c * s
+ return [[2 * c - 1, 2 * cs], [2 * cs, 2 * s - 1]]
+
+
+print(f" {scaling(5) = }")
+print(f" {rotation(45) = }")
+print(f"{projection(45) = }")
+print(f"{reflection(45) = }")
diff --git a/linear_algebra_python/README.md b/linear_algebra_python/README.md
deleted file mode 100644
index 1e34d0bd7805..000000000000
--- a/linear_algebra_python/README.md
+++ /dev/null
@@ -1,76 +0,0 @@
-# Linear algebra library for Python
-
-This module contains some useful classes and functions for dealing with linear algebra in python 2.
-
----
-
-## Overview
-
-- class Vector
- - This class represents a vector of arbitray size and operations on it.
-
- **Overview about the methods:**
-
- - constructor(components : list) : init the vector
- - set(components : list) : changes the vector components.
- - \_\_str\_\_() : toString method
- - component(i : int): gets the i-th component (start by 0)
- - \_\_len\_\_() : gets the size / length of the vector (number of components)
- - euclidLength() : returns the eulidean length of the vector.
- - operator + : vector addition
- - operator - : vector subtraction
- - operator * : scalar multiplication and dot product
- - copy() : copies this vector and returns it.
- - changeComponent(pos,value) : changes the specified component.
-
-- function zeroVector(dimension)
- - returns a zero vector of 'dimension'
-- function unitBasisVector(dimension,pos)
- - returns a unit basis vector with a One at index 'pos' (indexing at 0)
-- function axpy(scalar,vector1,vector2)
- - computes the axpy operation
-- function randomVector(N,a,b)
- - returns a random vector of size N, with random integer components between 'a' and 'b'.
-
-- class Matrix
- - This class represents a matrix of arbitrary size and operations on it.
-
- **Overview about the methods:**
-
- - \_\_str\_\_() : returns a string representation
- - operator * : implements the matrix vector multiplication
- implements the matrix-scalar multiplication.
- - changeComponent(x,y,value) : changes the specified component.
- - component(x,y) : returns the specified component.
- - width() : returns the width of the matrix
- - height() : returns the height of the matrix
- - operator + : implements the matrix-addition.
- - operator - _ implements the matrix-subtraction
-
-- function squareZeroMatrix(N)
- - returns a square zero-matrix of dimension NxN
-- function randomMatrix(W,H,a,b)
- - returns a random matrix WxH with integer components between 'a' and 'b'
----
-
-## Documentation
-
-The module is well documented. You can use the python in-built ```help(...)``` function.
-For instance: ```help(Vector)``` gives you all information about the Vector-class.
-Or ```help(unitBasisVector)``` gives you all information you needed about the
-global function ```unitBasisVector(...)```. If you need informations about a certain
-method you type ```help(CLASSNAME.METHODNAME)```.
-
----
-
-## Usage
-
-You will find the module in the **src** directory its called ```lib.py```. You need to
-import this module in your project. Alternative you can also use the file ```lib.pyc``` in python-bytecode.
-
----
-
-## Tests
-
-In the **src** directory you also find the test-suite, its called ```tests.py```.
-The test-suite uses the built-in python-test-framework **unittest**.
diff --git a/linear_algebra_python/src/lib.py b/linear_algebra_python/src/lib.py
deleted file mode 100644
index 281991a93b2d..000000000000
--- a/linear_algebra_python/src/lib.py
+++ /dev/null
@@ -1,323 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Created on Mon Feb 26 14:29:11 2018
-
-@author: Christian Bender
-@license: MIT-license
-
-This module contains some useful classes and functions for dealing
-with linear algebra in python.
-
-Overview:
-
-- class Vector
-- function zeroVector(dimension)
-- function unitBasisVector(dimension,pos)
-- function axpy(scalar,vector1,vector2)
-- function randomVector(N,a,b)
-- class Matrix
-- function squareZeroMatrix(N)
-- function randomMatrix(W,H,a,b)
-"""
-
-
-import math
-import random
-
-
-class Vector(object):
- """
- This class represents a vector of arbitray size.
- You need to give the vector components.
-
- Overview about the methods:
-
- constructor(components : list) : init the vector
- set(components : list) : changes the vector components.
- __str__() : toString method
- component(i : int): gets the i-th component (start by 0)
- __len__() : gets the size of the vector (number of components)
- euclidLength() : returns the eulidean length of the vector.
- operator + : vector addition
- operator - : vector subtraction
- operator * : scalar multiplication and dot product
- copy() : copies this vector and returns it.
- changeComponent(pos,value) : changes the specified component.
- TODO: compare-operator
- """
- def __init__(self,components=[]):
- """
- input: components or nothing
- simple constructor for init the vector
- """
- self.__components = list(components)
- def set(self,components):
- """
- input: new components
- changes the components of the vector.
- replace the components with newer one.
- """
- if len(components) > 0:
- self.__components = list(components)
- else:
- raise Exception("please give any vector")
- def __str__(self):
- """
- returns a string representation of the vector
- """
- return "(" + ",".join(map(str, self.__components)) + ")"
- def component(self,i):
- """
- input: index (start at 0)
- output: the i-th component of the vector.
- """
- if type(i) is int and -len(self.__components) <= i < len(self.__components) :
- return self.__components[i]
- else:
- raise Exception("index out of range")
- def __len__(self):
- """
- returns the size of the vector
- """
- return len(self.__components)
- def eulidLength(self):
- """
- returns the eulidean length of the vector
- """
- summe = 0
- for c in self.__components:
- summe += c**2
- return math.sqrt(summe)
- def __add__(self,other):
- """
- input: other vector
- assumes: other vector has the same size
- returns a new vector that represents the sum.
- """
- size = len(self)
- if size == len(other):
- result = [self.__components[i] + other.component(i) for i in range(size)]
- return Vector(result)
- else:
- raise Exception("must have the same size")
- def __sub__(self,other):
- """
- input: other vector
- assumes: other vector has the same size
- returns a new vector that represents the differenz.
- """
- size = len(self)
- if size == len(other):
- result = [self.__components[i] - other.component(i) for i in range(size)]
- return result
- else: # error case
- raise Exception("must have the same size")
- def __mul__(self,other):
- """
- mul implements the scalar multiplication
- and the dot-product
- """
- if isinstance(other,float) or isinstance(other,int):
- ans = [c*other for c in self.__components]
- return ans
- elif (isinstance(other,Vector) and (len(self) == len(other))):
- size = len(self)
- summe = 0
- for i in range(size):
- summe += self.__components[i] * other.component(i)
- return summe
- else: # error case
- raise Exception("invalide operand!")
- def copy(self):
- """
- copies this vector and returns it.
- """
- return Vector(self.__components)
- def changeComponent(self,pos,value):
- """
- input: an index (pos) and a value
- changes the specified component (pos) with the
- 'value'
- """
- #precondition
- assert (-len(self.__components) <= pos < len(self.__components))
- self.__components[pos] = value
-
-def zeroVector(dimension):
- """
- returns a zero-vector of size 'dimension'
- """
- #precondition
- assert(isinstance(dimension,int))
- return Vector([0]*dimension)
-
-
-def unitBasisVector(dimension,pos):
- """
- returns a unit basis vector with a One
- at index 'pos' (indexing at 0)
- """
- #precondition
- assert(isinstance(dimension,int) and (isinstance(pos,int)))
- ans = [0]*dimension
- ans[pos] = 1
- return Vector(ans)
-
-
-def axpy(scalar,x,y):
- """
- input: a 'scalar' and two vectors 'x' and 'y'
- output: a vector
- computes the axpy operation
- """
- # precondition
- assert(isinstance(x,Vector) and (isinstance(y,Vector)) \
- and (isinstance(scalar,int) or isinstance(scalar,float)))
- return (x*scalar + y)
-
-
-def randomVector(N,a,b):
- """
- input: size (N) of the vector.
- random range (a,b)
- output: returns a random vector of size N, with
- random integer components between 'a' and 'b'.
- """
- random.seed(None)
- ans = [random.randint(a,b) for i in range(N)]
- return Vector(ans)
-
-
-class Matrix(object):
- """
- class: Matrix
- This class represents a arbitrary matrix.
-
- Overview about the methods:
-
- __str__() : returns a string representation
- operator * : implements the matrix vector multiplication
- implements the matrix-scalar multiplication.
- changeComponent(x,y,value) : changes the specified component.
- component(x,y) : returns the specified component.
- width() : returns the width of the matrix
- height() : returns the height of the matrix
- operator + : implements the matrix-addition.
- operator - _ implements the matrix-subtraction
- """
- def __init__(self,matrix,w,h):
- """
- simple constructor for initialzes
- the matrix with components.
- """
- self.__matrix = matrix
- self.__width = w
- self.__height = h
- def __str__(self):
- """
- returns a string representation of this
- matrix.
- """
- ans = ""
- for i in range(self.__height):
- ans += "|"
- for j in range(self.__width):
- if j < self.__width -1:
- ans += str(self.__matrix[i][j]) + ","
- else:
- ans += str(self.__matrix[i][j]) + "|\n"
- return ans
- def changeComponent(self,x,y, value):
- """
- changes the x-y component of this matrix
- """
- if x >= 0 and x < self.__height and y >= 0 and y < self.__width:
- self.__matrix[x][y] = value
- else:
- raise Exception ("changeComponent: indices out of bounds")
- def component(self,x,y):
- """
- returns the specified (x,y) component
- """
- if x >= 0 and x < self.__height and y >= 0 and y < self.__width:
- return self.__matrix[x][y]
- else:
- raise Exception ("changeComponent: indices out of bounds")
- def width(self):
- """
- getter for the width
- """
- return self.__width
- def height(self):
- """
- getter for the height
- """
- return self.__height
- def __mul__(self,other):
- """
- implements the matrix-vector multiplication.
- implements the matrix-scalar multiplication
- """
- if isinstance(other, Vector): # vector-matrix
- if (len(other) == self.__width):
- ans = zeroVector(self.__height)
- for i in range(self.__height):
- summe = 0
- for j in range(self.__width):
- summe += other.component(j) * self.__matrix[i][j]
- ans.changeComponent(i,summe)
- summe = 0
- return ans
- else:
- raise Exception("vector must have the same size as the " + "number of columns of the matrix!")
- elif isinstance(other,int) or isinstance(other,float): # matrix-scalar
- matrix = [[self.__matrix[i][j] * other for j in range(self.__width)] for i in range(self.__height)]
- return Matrix(matrix,self.__width,self.__height)
- def __add__(self,other):
- """
- implements the matrix-addition.
- """
- if (self.__width == other.width() and self.__height == other.height()):
- matrix = []
- for i in range(self.__height):
- row = []
- for j in range(self.__width):
- row.append(self.__matrix[i][j] + other.component(i,j))
- matrix.append(row)
- return Matrix(matrix,self.__width,self.__height)
- else:
- raise Exception("matrix must have the same dimension!")
- def __sub__(self,other):
- """
- implements the matrix-subtraction.
- """
- if (self.__width == other.width() and self.__height == other.height()):
- matrix = []
- for i in range(self.__height):
- row = []
- for j in range(self.__width):
- row.append(self.__matrix[i][j] - other.component(i,j))
- matrix.append(row)
- return Matrix(matrix,self.__width,self.__height)
- else:
- raise Exception("matrix must have the same dimension!")
-
-
-def squareZeroMatrix(N):
- """
- returns a square zero-matrix of dimension NxN
- """
- ans = [[0]*N for i in range(N)]
- return Matrix(ans,N,N)
-
-
-def randomMatrix(W,H,a,b):
- """
- returns a random matrix WxH with integer components
- between 'a' and 'b'
- """
- random.seed(None)
- matrix = [[random.randint(a,b) for j in range(W)] for i in range(H)]
- return Matrix(matrix,W,H)
-
-
diff --git a/linear_algebra_python/src/tests.py b/linear_algebra_python/src/tests.py
deleted file mode 100644
index a26eb92653e2..000000000000
--- a/linear_algebra_python/src/tests.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Created on Mon Feb 26 15:40:07 2018
-
-@author: Christian Bender
-@license: MIT-license
-
-This file contains the test-suite for the linear algebra library.
-"""
-
-import unittest
-from lib import *
-
-class Test(unittest.TestCase):
- def test_component(self):
- """
- test for method component
- """
- x = Vector([1,2,3])
- self.assertEqual(x.component(0),1)
- self.assertEqual(x.component(2),3)
- try:
- y = Vector()
- self.assertTrue(False)
- except:
- self.assertTrue(True)
- def test_str(self):
- """
- test for toString() method
- """
- x = Vector([0,0,0,0,0,1])
- self.assertEqual(str(x),"(0,0,0,0,0,1)")
- def test_size(self):
- """
- test for size()-method
- """
- x = Vector([1,2,3,4])
- self.assertEqual(len(x),4)
- def test_euclidLength(self):
- """
- test for the eulidean length
- """
- x = Vector([1,2])
- self.assertAlmostEqual(x.eulidLength(),2.236,3)
- def test_add(self):
- """
- test for + operator
- """
- x = Vector([1,2,3])
- y = Vector([1,1,1])
- self.assertEqual((x+y).component(0),2)
- self.assertEqual((x+y).component(1),3)
- self.assertEqual((x+y).component(2),4)
- def test_sub(self):
- """
- test for - operator
- """
- x = Vector([1,2,3])
- y = Vector([1,1,1])
- self.assertEqual((x-y).component(0),0)
- self.assertEqual((x-y).component(1),1)
- self.assertEqual((x-y).component(2),2)
- def test_mul(self):
- """
- test for * operator
- """
- x = Vector([1,2,3])
- a = Vector([2,-1,4]) # for test of dot-product
- b = Vector([1,-2,-1])
- self.assertEqual(str(x*3.0),"(3.0,6.0,9.0)")
- self.assertEqual((a*b),0)
- def test_zeroVector(self):
- """
- test for the global function zeroVector(...)
- """
- self.assertTrue(str(zeroVector(10)).count("0") == 10)
- def test_unitBasisVector(self):
- """
- test for the global function unitBasisVector(...)
- """
- self.assertEqual(str(unitBasisVector(3,1)),"(0,1,0)")
- def test_axpy(self):
- """
- test for the global function axpy(...) (operation)
- """
- x = Vector([1,2,3])
- y = Vector([1,0,1])
- self.assertEqual(str(axpy(2,x,y)),"(3,4,7)")
- def test_copy(self):
- """
- test for the copy()-method
- """
- x = Vector([1,0,0,0,0,0])
- y = x.copy()
- self.assertEqual(str(x),str(y))
- def test_changeComponent(self):
- """
- test for the changeComponent(...)-method
- """
- x = Vector([1,0,0])
- x.changeComponent(0,0)
- x.changeComponent(1,1)
- self.assertEqual(str(x),"(0,1,0)")
- def test_str_matrix(self):
- A = Matrix([[1,2,3],[2,4,5],[6,7,8]],3,3)
- self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n",str(A))
- def test__mul__matrix(self):
- A = Matrix([[1,2,3],[4,5,6],[7,8,9]],3,3)
- x = Vector([1,2,3])
- self.assertEqual("(14,32,50)",str(A*x))
- self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n",str(A*2))
- def test_changeComponent_matrix(self):
- A = Matrix([[1,2,3],[2,4,5],[6,7,8]],3,3)
- A.changeComponent(0,2,5)
- self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n",str(A))
- def test_component_matrix(self):
- A = Matrix([[1,2,3],[2,4,5],[6,7,8]],3,3)
- self.assertEqual(7,A.component(2,1),0.01)
- def test__add__matrix(self):
- A = Matrix([[1,2,3],[2,4,5],[6,7,8]],3,3)
- B = Matrix([[1,2,7],[2,4,5],[6,7,10]],3,3)
- self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n",str(A+B))
- def test__sub__matrix(self):
- A = Matrix([[1,2,3],[2,4,5],[6,7,8]],3,3)
- B = Matrix([[1,2,7],[2,4,5],[6,7,10]],3,3)
- self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n",str(A-B))
- def test_squareZeroMatrix(self):
- self.assertEqual('|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|'
- +'\n|0,0,0,0,0|\n',str(squareZeroMatrix(5)))
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/machine_learning/NaiveBayes.ipynb b/machine_learning/NaiveBayes.ipynb
deleted file mode 100644
index 5a427c5cb965..000000000000
--- a/machine_learning/NaiveBayes.ipynb
+++ /dev/null
@@ -1,1659 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": 1,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "from sklearn import datasets\n",
- "import pandas as pd"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "iris = datasets.load_iris()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "df = pd.DataFrame(iris.data)\n",
- "df.columns = [\"sl\", \"sw\", 'pl', 'pw']"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 4,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "def abc(k, *val):\n",
- " if k < val[0]:\n",
- " return 0\n",
- " else:\n",
- " return 1"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 5,
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/plain": [
- "0 1\n",
- "1 0\n",
- "2 0\n",
- "3 0\n",
- "4 1\n",
- "5 1\n",
- "6 0\n",
- "7 1\n",
- "8 0\n",
- "9 0\n",
- "10 1\n",
- "11 0\n",
- "12 0\n",
- "13 0\n",
- "14 1\n",
- "15 1\n",
- "16 1\n",
- "17 1\n",
- "18 1\n",
- "19 1\n",
- "20 1\n",
- "21 1\n",
- "22 0\n",
- "23 1\n",
- "24 0\n",
- "25 1\n",
- "26 1\n",
- "27 1\n",
- "28 1\n",
- "29 0\n",
- " ..\n",
- "120 1\n",
- "121 1\n",
- "122 1\n",
- "123 1\n",
- "124 1\n",
- "125 1\n",
- "126 1\n",
- "127 1\n",
- "128 1\n",
- "129 1\n",
- "130 1\n",
- "131 1\n",
- "132 1\n",
- "133 1\n",
- "134 1\n",
- "135 1\n",
- "136 1\n",
- "137 1\n",
- "138 1\n",
- "139 1\n",
- "140 1\n",
- "141 1\n",
- "142 1\n",
- "143 1\n",
- "144 1\n",
- "145 1\n",
- "146 1\n",
- "147 1\n",
- "148 1\n",
- "149 1\n",
- "Name: sl, dtype: int64"
- ]
- },
- "execution_count": 5,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "df.sl.apply(abc, args=(5,))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": []
- },
- {
- "cell_type": "code",
- "execution_count": 6,
- "metadata": {},
- "outputs": [],
- "source": [
- "def label(val, *boundaries):\n",
- " if (val < boundaries[0]):\n",
- " return 'a'\n",
- " elif (val < boundaries[1]):\n",
- " return 'b'\n",
- " elif (val < boundaries[2]):\n",
- " return 'c'\n",
- " else:\n",
- " return 'd'\n",
- "\n",
- "def toLabel(df, old_feature_name):\n",
- " second = df[old_feature_name].mean()\n",
- " minimum = df[old_feature_name].min()\n",
- " first = (minimum + second)/2\n",
- " maximum = df[old_feature_name].max()\n",
- " third = (maximum + second)/2\n",
- " return df[old_feature_name].apply(label, args= (first, second, third))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 7,
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/html": [
- "
\n",
- "
\n",
- " \n",
- "
\n",
- "
\n",
- "
sl
\n",
- "
sw
\n",
- "
pl
\n",
- "
pw
\n",
- "
sl_labeled
\n",
- "
sw_labeled
\n",
- "
pl_labeled
\n",
- "
pw_labeled
\n",
- "
\n",
- " \n",
- " \n",
- "
\n",
- "
0
\n",
- "
5.1
\n",
- "
3.5
\n",
- "
1.4
\n",
- "
0.2
\n",
- "
b
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
1
\n",
- "
4.9
\n",
- "
3.0
\n",
- "
1.4
\n",
- "
0.2
\n",
- "
a
\n",
- "
b
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
2
\n",
- "
4.7
\n",
- "
3.2
\n",
- "
1.3
\n",
- "
0.2
\n",
- "
a
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
3
\n",
- "
4.6
\n",
- "
3.1
\n",
- "
1.5
\n",
- "
0.2
\n",
- "
a
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
4
\n",
- "
5.0
\n",
- "
3.6
\n",
- "
1.4
\n",
- "
0.2
\n",
- "
a
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
5
\n",
- "
5.4
\n",
- "
3.9
\n",
- "
1.7
\n",
- "
0.4
\n",
- "
b
\n",
- "
d
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
6
\n",
- "
4.6
\n",
- "
3.4
\n",
- "
1.4
\n",
- "
0.3
\n",
- "
a
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
7
\n",
- "
5.0
\n",
- "
3.4
\n",
- "
1.5
\n",
- "
0.2
\n",
- "
a
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
8
\n",
- "
4.4
\n",
- "
2.9
\n",
- "
1.4
\n",
- "
0.2
\n",
- "
a
\n",
- "
b
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
9
\n",
- "
4.9
\n",
- "
3.1
\n",
- "
1.5
\n",
- "
0.1
\n",
- "
a
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
10
\n",
- "
5.4
\n",
- "
3.7
\n",
- "
1.5
\n",
- "
0.2
\n",
- "
b
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
11
\n",
- "
4.8
\n",
- "
3.4
\n",
- "
1.6
\n",
- "
0.2
\n",
- "
a
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
12
\n",
- "
4.8
\n",
- "
3.0
\n",
- "
1.4
\n",
- "
0.1
\n",
- "
a
\n",
- "
b
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
13
\n",
- "
4.3
\n",
- "
3.0
\n",
- "
1.1
\n",
- "
0.1
\n",
- "
a
\n",
- "
b
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
14
\n",
- "
5.8
\n",
- "
4.0
\n",
- "
1.2
\n",
- "
0.2
\n",
- "
b
\n",
- "
d
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
15
\n",
- "
5.7
\n",
- "
4.4
\n",
- "
1.5
\n",
- "
0.4
\n",
- "
b
\n",
- "
d
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
16
\n",
- "
5.4
\n",
- "
3.9
\n",
- "
1.3
\n",
- "
0.4
\n",
- "
b
\n",
- "
d
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
17
\n",
- "
5.1
\n",
- "
3.5
\n",
- "
1.4
\n",
- "
0.3
\n",
- "
b
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
18
\n",
- "
5.7
\n",
- "
3.8
\n",
- "
1.7
\n",
- "
0.3
\n",
- "
b
\n",
- "
d
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
19
\n",
- "
5.1
\n",
- "
3.8
\n",
- "
1.5
\n",
- "
0.3
\n",
- "
b
\n",
- "
d
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
20
\n",
- "
5.4
\n",
- "
3.4
\n",
- "
1.7
\n",
- "
0.2
\n",
- "
b
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
21
\n",
- "
5.1
\n",
- "
3.7
\n",
- "
1.5
\n",
- "
0.4
\n",
- "
b
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
22
\n",
- "
4.6
\n",
- "
3.6
\n",
- "
1.0
\n",
- "
0.2
\n",
- "
a
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
23
\n",
- "
5.1
\n",
- "
3.3
\n",
- "
1.7
\n",
- "
0.5
\n",
- "
b
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
24
\n",
- "
4.8
\n",
- "
3.4
\n",
- "
1.9
\n",
- "
0.2
\n",
- "
a
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
25
\n",
- "
5.0
\n",
- "
3.0
\n",
- "
1.6
\n",
- "
0.2
\n",
- "
a
\n",
- "
b
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
26
\n",
- "
5.0
\n",
- "
3.4
\n",
- "
1.6
\n",
- "
0.4
\n",
- "
a
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
27
\n",
- "
5.2
\n",
- "
3.5
\n",
- "
1.5
\n",
- "
0.2
\n",
- "
b
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
28
\n",
- "
5.2
\n",
- "
3.4
\n",
- "
1.4
\n",
- "
0.2
\n",
- "
b
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
29
\n",
- "
4.7
\n",
- "
3.2
\n",
- "
1.6
\n",
- "
0.2
\n",
- "
a
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
\n",
- "
\n",
- "
...
\n",
- "
...
\n",
- "
...
\n",
- "
...
\n",
- "
...
\n",
- "
...
\n",
- "
...
\n",
- "
...
\n",
- "
...
\n",
- "
\n",
- "
\n",
- "
120
\n",
- "
6.9
\n",
- "
3.2
\n",
- "
5.7
\n",
- "
2.3
\n",
- "
d
\n",
- "
c
\n",
- "
d
\n",
- "
d
\n",
- "
\n",
- "
\n",
- "
121
\n",
- "
5.6
\n",
- "
2.8
\n",
- "
4.9
\n",
- "
2.0
\n",
- "
b
\n",
- "
b
\n",
- "
c
\n",
- "
d
\n",
- "
\n",
- "
\n",
- "
122
\n",
- "
7.7
\n",
- "
2.8
\n",
- "
6.7
\n",
- "
2.0
\n",
- "
d
\n",
- "
b
\n",
- "
d
\n",
- "
d
\n",
- "
\n",
- "
\n",
- "
123
\n",
- "
6.3
\n",
- "
2.7
\n",
- "
4.9
\n",
- "
1.8
\n",
- "
c
\n",
- "
b
\n",
- "
c
\n",
- "
c
\n",
- "
\n",
- "
\n",
- "
124
\n",
- "
6.7
\n",
- "
3.3
\n",
- "
5.7
\n",
- "
2.1
\n",
- "
c
\n",
- "
c
\n",
- "
d
\n",
- "
d
\n",
- "
\n",
- "
\n",
- "
125
\n",
- "
7.2
\n",
- "
3.2
\n",
- "
6.0
\n",
- "
1.8
\n",
- "
d
\n",
- "
c
\n",
- "
d
\n",
- "
c
\n",
- "
\n",
- "
\n",
- "
126
\n",
- "
6.2
\n",
- "
2.8
\n",
- "
4.8
\n",
- "
1.8
\n",
- "
c
\n",
- "
b
\n",
- "
c
\n",
- "
c
\n",
- "
\n",
- "
\n",
- "
127
\n",
- "
6.1
\n",
- "
3.0
\n",
- "
4.9
\n",
- "
1.8
\n",
- "
c
\n",
- "
b
\n",
- "
c
\n",
- "
c
\n",
- "
\n",
- "
\n",
- "
128
\n",
- "
6.4
\n",
- "
2.8
\n",
- "
5.6
\n",
- "
2.1
\n",
- "
c
\n",
- "
b
\n",
- "
d
\n",
- "
d
\n",
- "
\n",
- "
\n",
- "
129
\n",
- "
7.2
\n",
- "
3.0
\n",
- "
5.8
\n",
- "
1.6
\n",
- "
d
\n",
- "
b
\n",
- "
d
\n",
- "
c
\n",
- "
\n",
- "
\n",
- "
130
\n",
- "
7.4
\n",
- "
2.8
\n",
- "
6.1
\n",
- "
1.9
\n",
- "
d
\n",
- "
b
\n",
- "
d
\n",
- "
d
\n",
- "
\n",
- "
\n",
- "
131
\n",
- "
7.9
\n",
- "
3.8
\n",
- "
6.4
\n",
- "
2.0
\n",
- "
d
\n",
- "
d
\n",
- "
d
\n",
- "
d
\n",
- "
\n",
- "
\n",
- "
132
\n",
- "
6.4
\n",
- "
2.8
\n",
- "
5.6
\n",
- "
2.2
\n",
- "
c
\n",
- "
b
\n",
- "
d
\n",
- "
d
\n",
- "
\n",
- "
\n",
- "
133
\n",
- "
6.3
\n",
- "
2.8
\n",
- "
5.1
\n",
- "
1.5
\n",
- "
c
\n",
- "
b
\n",
- "
c
\n",
- "
c
\n",
- "
\n",
- "
\n",
- "
134
\n",
- "
6.1
\n",
- "
2.6
\n",
- "
5.6
\n",
- "
1.4
\n",
- "
c
\n",
- "
b
\n",
- "
d
\n",
- "
c
\n",
- "
\n",
- "
\n",
- "
135
\n",
- "
7.7
\n",
- "
3.0
\n",
- "
6.1
\n",
- "
2.3
\n",
- "
d
\n",
- "
b
\n",
- "
d
\n",
- "
d
\n",
- "
\n",
- "
\n",
- "
136
\n",
- "
6.3
\n",
- "
3.4
\n",
- "
5.6
\n",
- "
2.4
\n",
- "
c
\n",
- "
c
\n",
- "
d
\n",
- "
d
\n",
- "
\n",
- "
\n",
- "
137
\n",
- "
6.4
\n",
- "
3.1
\n",
- "
5.5
\n",
- "
1.8
\n",
- "
c
\n",
- "
c
\n",
- "
d
\n",
- "
c
\n",
- "
\n",
- "
\n",
- "
138
\n",
- "
6.0
\n",
- "
3.0
\n",
- "
4.8
\n",
- "
1.8
\n",
- "
c
\n",
- "
b
\n",
- "
c
\n",
- "
c
\n",
- "
\n",
- "
\n",
- "
139
\n",
- "
6.9
\n",
- "
3.1
\n",
- "
5.4
\n",
- "
2.1
\n",
- "
d
\n",
- "
c
\n",
- "
d
\n",
- "
d
\n",
- "
\n",
- "
\n",
- "
140
\n",
- "
6.7
\n",
- "
3.1
\n",
- "
5.6
\n",
- "
2.4
\n",
- "
c
\n",
- "
c
\n",
- "
d
\n",
- "
d
\n",
- "
\n",
- "
\n",
- "
141
\n",
- "
6.9
\n",
- "
3.1
\n",
- "
5.1
\n",
- "
2.3
\n",
- "
d
\n",
- "
c
\n",
- "
c
\n",
- "
d
\n",
- "
\n",
- "
\n",
- "
142
\n",
- "
5.8
\n",
- "
2.7
\n",
- "
5.1
\n",
- "
1.9
\n",
- "
b
\n",
- "
b
\n",
- "
c
\n",
- "
d
\n",
- "
\n",
- "
\n",
- "
143
\n",
- "
6.8
\n",
- "
3.2
\n",
- "
5.9
\n",
- "
2.3
\n",
- "
c
\n",
- "
c
\n",
- "
d
\n",
- "
d
\n",
- "
\n",
- "
\n",
- "
144
\n",
- "
6.7
\n",
- "
3.3
\n",
- "
5.7
\n",
- "
2.5
\n",
- "
c
\n",
- "
c
\n",
- "
d
\n",
- "
d
\n",
- "
\n",
- "
\n",
- "
145
\n",
- "
6.7
\n",
- "
3.0
\n",
- "
5.2
\n",
- "
2.3
\n",
- "
c
\n",
- "
b
\n",
- "
c
\n",
- "
d
\n",
- "
\n",
- "
\n",
- "
146
\n",
- "
6.3
\n",
- "
2.5
\n",
- "
5.0
\n",
- "
1.9
\n",
- "
c
\n",
- "
a
\n",
- "
c
\n",
- "
d
\n",
- "
\n",
- "
\n",
- "
147
\n",
- "
6.5
\n",
- "
3.0
\n",
- "
5.2
\n",
- "
2.0
\n",
- "
c
\n",
- "
b
\n",
- "
c
\n",
- "
d
\n",
- "
\n",
- "
\n",
- "
148
\n",
- "
6.2
\n",
- "
3.4
\n",
- "
5.4
\n",
- "
2.3
\n",
- "
c
\n",
- "
c
\n",
- "
d
\n",
- "
d
\n",
- "
\n",
- "
\n",
- "
149
\n",
- "
5.9
\n",
- "
3.0
\n",
- "
5.1
\n",
- "
1.8
\n",
- "
c
\n",
- "
b
\n",
- "
c
\n",
- "
c
\n",
- "
\n",
- " \n",
- "
\n",
- "
150 rows × 8 columns
\n",
- "
"
- ],
- "text/plain": [
- " sl sw pl pw sl_labeled sw_labeled pl_labeled pw_labeled\n",
- "0 5.1 3.5 1.4 0.2 b c a a\n",
- "1 4.9 3.0 1.4 0.2 a b a a\n",
- "2 4.7 3.2 1.3 0.2 a c a a\n",
- "3 4.6 3.1 1.5 0.2 a c a a\n",
- "4 5.0 3.6 1.4 0.2 a c a a\n",
- "5 5.4 3.9 1.7 0.4 b d a a\n",
- "6 4.6 3.4 1.4 0.3 a c a a\n",
- "7 5.0 3.4 1.5 0.2 a c a a\n",
- "8 4.4 2.9 1.4 0.2 a b a a\n",
- "9 4.9 3.1 1.5 0.1 a c a a\n",
- "10 5.4 3.7 1.5 0.2 b c a a\n",
- "11 4.8 3.4 1.6 0.2 a c a a\n",
- "12 4.8 3.0 1.4 0.1 a b a a\n",
- "13 4.3 3.0 1.1 0.1 a b a a\n",
- "14 5.8 4.0 1.2 0.2 b d a a\n",
- "15 5.7 4.4 1.5 0.4 b d a a\n",
- "16 5.4 3.9 1.3 0.4 b d a a\n",
- "17 5.1 3.5 1.4 0.3 b c a a\n",
- "18 5.7 3.8 1.7 0.3 b d a a\n",
- "19 5.1 3.8 1.5 0.3 b d a a\n",
- "20 5.4 3.4 1.7 0.2 b c a a\n",
- "21 5.1 3.7 1.5 0.4 b c a a\n",
- "22 4.6 3.6 1.0 0.2 a c a a\n",
- "23 5.1 3.3 1.7 0.5 b c a a\n",
- "24 4.8 3.4 1.9 0.2 a c a a\n",
- "25 5.0 3.0 1.6 0.2 a b a a\n",
- "26 5.0 3.4 1.6 0.4 a c a a\n",
- "27 5.2 3.5 1.5 0.2 b c a a\n",
- "28 5.2 3.4 1.4 0.2 b c a a\n",
- "29 4.7 3.2 1.6 0.2 a c a a\n",
- ".. ... ... ... ... ... ... ... ...\n",
- "120 6.9 3.2 5.7 2.3 d c d d\n",
- "121 5.6 2.8 4.9 2.0 b b c d\n",
- "122 7.7 2.8 6.7 2.0 d b d d\n",
- "123 6.3 2.7 4.9 1.8 c b c c\n",
- "124 6.7 3.3 5.7 2.1 c c d d\n",
- "125 7.2 3.2 6.0 1.8 d c d c\n",
- "126 6.2 2.8 4.8 1.8 c b c c\n",
- "127 6.1 3.0 4.9 1.8 c b c c\n",
- "128 6.4 2.8 5.6 2.1 c b d d\n",
- "129 7.2 3.0 5.8 1.6 d b d c\n",
- "130 7.4 2.8 6.1 1.9 d b d d\n",
- "131 7.9 3.8 6.4 2.0 d d d d\n",
- "132 6.4 2.8 5.6 2.2 c b d d\n",
- "133 6.3 2.8 5.1 1.5 c b c c\n",
- "134 6.1 2.6 5.6 1.4 c b d c\n",
- "135 7.7 3.0 6.1 2.3 d b d d\n",
- "136 6.3 3.4 5.6 2.4 c c d d\n",
- "137 6.4 3.1 5.5 1.8 c c d c\n",
- "138 6.0 3.0 4.8 1.8 c b c c\n",
- "139 6.9 3.1 5.4 2.1 d c d d\n",
- "140 6.7 3.1 5.6 2.4 c c d d\n",
- "141 6.9 3.1 5.1 2.3 d c c d\n",
- "142 5.8 2.7 5.1 1.9 b b c d\n",
- "143 6.8 3.2 5.9 2.3 c c d d\n",
- "144 6.7 3.3 5.7 2.5 c c d d\n",
- "145 6.7 3.0 5.2 2.3 c b c d\n",
- "146 6.3 2.5 5.0 1.9 c a c d\n",
- "147 6.5 3.0 5.2 2.0 c b c d\n",
- "148 6.2 3.4 5.4 2.3 c c d d\n",
- "149 5.9 3.0 5.1 1.8 c b c c\n",
- "\n",
- "[150 rows x 8 columns]"
- ]
- },
- "execution_count": 7,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "df['sl_labeled'] = toLabel(df, 'sl')\n",
- "df['sw_labeled'] = toLabel(df, 'sw')\n",
- "df['pl_labeled'] = toLabel(df, 'pl')\n",
- "df['pw_labeled'] = toLabel(df, 'pw')\n",
- "df"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 8,
- "metadata": {},
- "outputs": [],
- "source": [
- "df.drop(['sl', 'sw', 'pl', 'pw'], axis = 1, inplace = True)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 9,
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/plain": [
- "{'a', 'b', 'c', 'd'}"
- ]
- },
- "execution_count": 9,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "set(df['sl_labeled'])"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 12,
- "metadata": {},
- "outputs": [],
- "source": [
- "df[\"output\"] = iris.target"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 13,
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/html": [
- "
\n",
- "
\n",
- " \n",
- "
\n",
- "
\n",
- "
sl_labeled
\n",
- "
sw_labeled
\n",
- "
pl_labeled
\n",
- "
pw_labeled
\n",
- "
output
\n",
- "
\n",
- " \n",
- " \n",
- "
\n",
- "
0
\n",
- "
b
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
1
\n",
- "
a
\n",
- "
b
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
2
\n",
- "
a
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
3
\n",
- "
a
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
4
\n",
- "
a
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
5
\n",
- "
b
\n",
- "
d
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
6
\n",
- "
a
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
7
\n",
- "
a
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
8
\n",
- "
a
\n",
- "
b
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
9
\n",
- "
a
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
10
\n",
- "
b
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
11
\n",
- "
a
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
12
\n",
- "
a
\n",
- "
b
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
13
\n",
- "
a
\n",
- "
b
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
14
\n",
- "
b
\n",
- "
d
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
15
\n",
- "
b
\n",
- "
d
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
16
\n",
- "
b
\n",
- "
d
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
17
\n",
- "
b
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
18
\n",
- "
b
\n",
- "
d
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
19
\n",
- "
b
\n",
- "
d
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
20
\n",
- "
b
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
21
\n",
- "
b
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
22
\n",
- "
a
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
23
\n",
- "
b
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
24
\n",
- "
a
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
25
\n",
- "
a
\n",
- "
b
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
26
\n",
- "
a
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
27
\n",
- "
b
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
28
\n",
- "
b
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
29
\n",
- "
a
\n",
- "
c
\n",
- "
a
\n",
- "
a
\n",
- "
0
\n",
- "
\n",
- "
\n",
- "
...
\n",
- "
...
\n",
- "
...
\n",
- "
...
\n",
- "
...
\n",
- "
...
\n",
- "
\n",
- "
\n",
- "
120
\n",
- "
d
\n",
- "
c
\n",
- "
d
\n",
- "
d
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
121
\n",
- "
b
\n",
- "
b
\n",
- "
c
\n",
- "
d
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
122
\n",
- "
d
\n",
- "
b
\n",
- "
d
\n",
- "
d
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
123
\n",
- "
c
\n",
- "
b
\n",
- "
c
\n",
- "
c
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
124
\n",
- "
c
\n",
- "
c
\n",
- "
d
\n",
- "
d
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
125
\n",
- "
d
\n",
- "
c
\n",
- "
d
\n",
- "
c
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
126
\n",
- "
c
\n",
- "
b
\n",
- "
c
\n",
- "
c
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
127
\n",
- "
c
\n",
- "
b
\n",
- "
c
\n",
- "
c
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
128
\n",
- "
c
\n",
- "
b
\n",
- "
d
\n",
- "
d
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
129
\n",
- "
d
\n",
- "
b
\n",
- "
d
\n",
- "
c
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
130
\n",
- "
d
\n",
- "
b
\n",
- "
d
\n",
- "
d
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
131
\n",
- "
d
\n",
- "
d
\n",
- "
d
\n",
- "
d
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
132
\n",
- "
c
\n",
- "
b
\n",
- "
d
\n",
- "
d
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
133
\n",
- "
c
\n",
- "
b
\n",
- "
c
\n",
- "
c
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
134
\n",
- "
c
\n",
- "
b
\n",
- "
d
\n",
- "
c
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
135
\n",
- "
d
\n",
- "
b
\n",
- "
d
\n",
- "
d
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
136
\n",
- "
c
\n",
- "
c
\n",
- "
d
\n",
- "
d
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
137
\n",
- "
c
\n",
- "
c
\n",
- "
d
\n",
- "
c
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
138
\n",
- "
c
\n",
- "
b
\n",
- "
c
\n",
- "
c
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
139
\n",
- "
d
\n",
- "
c
\n",
- "
d
\n",
- "
d
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
140
\n",
- "
c
\n",
- "
c
\n",
- "
d
\n",
- "
d
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
141
\n",
- "
d
\n",
- "
c
\n",
- "
c
\n",
- "
d
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
142
\n",
- "
b
\n",
- "
b
\n",
- "
c
\n",
- "
d
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
143
\n",
- "
c
\n",
- "
c
\n",
- "
d
\n",
- "
d
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
144
\n",
- "
c
\n",
- "
c
\n",
- "
d
\n",
- "
d
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
145
\n",
- "
c
\n",
- "
b
\n",
- "
c
\n",
- "
d
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
146
\n",
- "
c
\n",
- "
a
\n",
- "
c
\n",
- "
d
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
147
\n",
- "
c
\n",
- "
b
\n",
- "
c
\n",
- "
d
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
148
\n",
- "
c
\n",
- "
c
\n",
- "
d
\n",
- "
d
\n",
- "
2
\n",
- "
\n",
- "
\n",
- "
149
\n",
- "
c
\n",
- "
b
\n",
- "
c
\n",
- "
c
\n",
- "
2
\n",
- "
\n",
- " \n",
- "
\n",
- "
150 rows × 5 columns
\n",
- "
"
- ],
- "text/plain": [
- " sl_labeled sw_labeled pl_labeled pw_labeled output\n",
- "0 b c a a 0\n",
- "1 a b a a 0\n",
- "2 a c a a 0\n",
- "3 a c a a 0\n",
- "4 a c a a 0\n",
- "5 b d a a 0\n",
- "6 a c a a 0\n",
- "7 a c a a 0\n",
- "8 a b a a 0\n",
- "9 a c a a 0\n",
- "10 b c a a 0\n",
- "11 a c a a 0\n",
- "12 a b a a 0\n",
- "13 a b a a 0\n",
- "14 b d a a 0\n",
- "15 b d a a 0\n",
- "16 b d a a 0\n",
- "17 b c a a 0\n",
- "18 b d a a 0\n",
- "19 b d a a 0\n",
- "20 b c a a 0\n",
- "21 b c a a 0\n",
- "22 a c a a 0\n",
- "23 b c a a 0\n",
- "24 a c a a 0\n",
- "25 a b a a 0\n",
- "26 a c a a 0\n",
- "27 b c a a 0\n",
- "28 b c a a 0\n",
- "29 a c a a 0\n",
- ".. ... ... ... ... ...\n",
- "120 d c d d 2\n",
- "121 b b c d 2\n",
- "122 d b d d 2\n",
- "123 c b c c 2\n",
- "124 c c d d 2\n",
- "125 d c d c 2\n",
- "126 c b c c 2\n",
- "127 c b c c 2\n",
- "128 c b d d 2\n",
- "129 d b d c 2\n",
- "130 d b d d 2\n",
- "131 d d d d 2\n",
- "132 c b d d 2\n",
- "133 c b c c 2\n",
- "134 c b d c 2\n",
- "135 d b d d 2\n",
- "136 c c d d 2\n",
- "137 c c d c 2\n",
- "138 c b c c 2\n",
- "139 d c d d 2\n",
- "140 c c d d 2\n",
- "141 d c c d 2\n",
- "142 b b c d 2\n",
- "143 c c d d 2\n",
- "144 c c d d 2\n",
- "145 c b c d 2\n",
- "146 c a c d 2\n",
- "147 c b c d 2\n",
- "148 c c d d 2\n",
- "149 c b c c 2\n",
- "\n",
- "[150 rows x 5 columns]"
- ]
- },
- "execution_count": 13,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "df"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 14,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "def fit(data):\n",
- " output_name = data.columns[-1]\n",
- " features = data.columns[0:-1]\n",
- " counts = {}\n",
- " possible_outputs = set(data[output_name])\n",
- " for output in possible_outputs:\n",
- " counts[output] = {}\n",
- " smallData = data[data[output_name] == output]\n",
- " counts[output][\"total_count\"] = len(smallData)\n",
- " for f in features:\n",
- " counts[output][f] = {}\n",
- " possible_values = set(smallData[f])\n",
- " for value in possible_values:\n",
- " val_count = len(smallData[smallData[f] == value])\n",
- " counts[output][f][value] = val_count\n",
- " return counts"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 15,
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/plain": [
- "{0: {'pl_labeled': {'a': 50},\n",
- " 'pw_labeled': {'a': 50},\n",
- " 'sl_labeled': {'a': 28, 'b': 22},\n",
- " 'sw_labeled': {'a': 1, 'b': 7, 'c': 32, 'd': 10},\n",
- " 'total_count': 50},\n",
- " 1: {'pl_labeled': {'b': 7, 'c': 43},\n",
- " 'pw_labeled': {'b': 10, 'c': 40},\n",
- " 'sl_labeled': {'a': 3, 'b': 21, 'c': 24, 'd': 2},\n",
- " 'sw_labeled': {'a': 13, 'b': 29, 'c': 8},\n",
- " 'total_count': 50},\n",
- " 2: {'pl_labeled': {'c': 20, 'd': 30},\n",
- " 'pw_labeled': {'c': 16, 'd': 34},\n",
- " 'sl_labeled': {'a': 1, 'b': 5, 'c': 29, 'd': 15},\n",
- " 'sw_labeled': {'a': 5, 'b': 28, 'c': 15, 'd': 2},\n",
- " 'total_count': 50}}"
- ]
- },
- "execution_count": 15,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "fit(df)"
- ]
- }
- ],
- "metadata": {
- "anaconda-cloud": {},
- "kernelspec": {
- "display_name": "Python [default]",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.6.5"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 1
-}
diff --git a/machine_learning/Random Forest Classification/Random Forest Classifier.ipynb b/machine_learning/Random Forest Classification/Random Forest Classifier.ipynb
deleted file mode 100644
index 7ee66124c371..000000000000
--- a/machine_learning/Random Forest Classification/Random Forest Classifier.ipynb
+++ /dev/null
@@ -1,196 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": 1,
- "metadata": {},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "C:\\Users\\Satyam\\AppData\\Roaming\\Python\\Python35\\site-packages\\sklearn\\ensemble\\weight_boosting.py:29: DeprecationWarning: numpy.core.umath_tests is an internal NumPy module and should not be imported. It will be removed in a future NumPy release.\n",
- " from numpy.core.umath_tests import inner1d\n"
- ]
- }
- ],
- "source": [
- "# Importing the libraries\n",
- "import numpy as np\n",
- "import matplotlib.pyplot as plt\n",
- "import pandas as pd\n",
- "from sklearn.model_selection import train_test_split\n",
- "from sklearn.preprocessing import StandardScaler\n",
- "from sklearn.metrics import confusion_matrix\n",
- "from matplotlib.colors import ListedColormap\n",
- "from sklearn.ensemble import RandomForestClassifier"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "# Importing the dataset\n",
- "dataset = pd.read_csv('Social_Network_Ads.csv')\n",
- "X = dataset.iloc[:, [2, 3]].values\n",
- "y = dataset.iloc[:, 4].values"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "# Splitting the dataset into the Training set and Test set\n",
- "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 4,
- "metadata": {},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "C:\\Users\\Satyam\\AppData\\Roaming\\Python\\Python35\\site-packages\\sklearn\\utils\\validation.py:475: DataConversionWarning: Data with input dtype int64 was converted to float64 by StandardScaler.\n",
- " warnings.warn(msg, DataConversionWarning)\n"
- ]
- }
- ],
- "source": [
- "# Feature Scaling\n",
- "sc = StandardScaler()\n",
- "X_train = sc.fit_transform(X_train)\n",
- "X_test = sc.transform(X_test)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 5,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "[[63 5]\n",
- " [ 3 29]]\n"
- ]
- }
- ],
- "source": [
- "# Fitting classifier to the Training set\n",
- "# Create your classifier here\n",
- "classifier = RandomForestClassifier(n_estimators=10,criterion='entropy',random_state=0)\n",
- "classifier.fit(X_train,y_train)\n",
- "# Predicting the Test set results\n",
- "y_pred = classifier.predict(X_test)\n",
- "\n",
- "# Making the Confusion Matrix\n",
- "cm = confusion_matrix(y_test, y_pred)\n",
- "print(cm)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 6,
- "metadata": {},
- "outputs": [
- {
- "data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYQAAAEWCAYAAABmE+CbAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJztnXuYHGWV8H+nZ5JJSGISBsgFCMl8kiEKGgTRIHyJIIgX\nFhV1wairLkbddVXQ9ZZlvaxZddeV9bJ+bgR1lSwoImoQVIhMBI0gYDRiQsAAAZJMyECGTEg6mZnz\n/VHVmb681VM1VdVVPXN+z5Mn3dXVVeft7jnnfc857zmiqhiGYRhGIWsBDMMwjHxgBsEwDMMAzCAY\nhmEYPmYQDMMwDMAMgmEYhuFjBsEwDMMAzCCMCURkiYg8lrUczULan5eIfF1ELi97/h4R6RaRPhFp\n9//vSPB+R4rIJhGZmNQ1q65/v4icmfS5WSAed4vICVnLkgVmEDJCRB4WkX3+H/8OEfm2iEzOWq64\niIiKyF5/XH0isrvB9w+lzEXkNBG5SUR2i8iTInKXiLy9ETKq6rtV9V98OcYBXwTOVdXJqtrj/78l\nwVt+FPi2qu4TkfvKvpsBEdlf9vzjIxxPp6renvS5jUBErhaRT5aeq7cx64vApzITKkPMIGTL+ao6\nGVgInAx8LGN5kuL5vlKbrKrTor5ZRFrTEKrs+ouAXwJrgWcD7cB7gFeked8AZgATgPviXsj1uYlI\nG/A3wNUAqvrc0ncD3A68t+y7+tcw1xwD/Ag4V0SOylqQRmMGIQeo6g7g53iGAQAReZWI/F5EnhaR\nR8tnMSIy15+J/42IbBWRXSKyvOz1if6K4ykR+TPwwvL7icgCEenyZ8f3ichflb32bRH5mojc7M8a\nfy0iM0XkP/3rbRKRk0cyThF5p4g86M/IfyIis8teUxH5exF5AHjAP3aCiNzin3+/iLyx7PxXisif\nRWSPiDwuIh8SkUnAzcDsslnv7BpB4N+B/1HVz6vqLvW4R1Xf6DgXEfmoiPzFv9efReS1Za89W0TW\nikiv/z18zz8uInKFiOz0v8MNInJi2Wf8GRGZD9zvX2q3iPyy7LN4tv+4TUS+4H/P3eK5myb6ry0R\nkcdE5CMisgP4lkP8FwG7VTWUC0xELhGRX4nIl0XkSeCfROR4EbnN/x52ich3RWRq2XseE5El/uPP\niMg1/sx7j4j8SUReMMJzTxWR9f5r14rIdeV/B1Vyz/flLn0P/1v22nNE5FZf/k0icqF//O+AvwY+\n7v9WbgBQ1WeA9cA5YT6zUYWq2r8M/gEPAy/zHx8DbAC+VPb6EuAkPKP9PKAbeI3/2lxAgW8AE4Hn\nA0Vggf/65/Bmf4cDxwJ/Ah7zXxsHPAh8HBgPnAXsATr9178N7AJOwZu5/hJ4CHgr0AJ8BritzrgU\neLbj+Fn+dV8AtAFfAX5V9b5bfJknApOAR4G3A614K6hdwHP887cDZ/qPpwMvKPvcHqsj32HAAPDS\nOudUXAN4AzDb/y7+GtgLzPJfuwZY7r82ATjDP/5y4B5gGiDAgrL3fBv4TNV32er6DIErgJ/4n8sU\nYDXw2TI5+4HP+5/pRMdY/h74acA4u4BLqo5d4l/zPf73PRGYD5zt/16OAn4NfKHsPY8BS/zHnwH2\n+eNvwTO+d0Q91x/PY8B78X6zbwAOAp8MGMt1wEfKvoeX+McnA4/j/X5b8X7XPQz93q92XRP4GvBv\nWeuJRv+zFUK2/EhE9uApvp3AJ0ovqGqXqm5Q1UFV/SOe4llc9f5Pqeo+Vf0D8Ac8wwDwRmCFqj6p\nqo8CXy57z4vx/kg+p6oHVPWXwI3AxWXn3KDejHk/cAOwX1W/o6oDwPfwlHM97vVXH7tFpHTvpcA3\nVfVeVS3iuccWicjcsvd91pd5H/Bq4GFV/Zaq9qvq74Hr8RQDeMrhOSLyLFV9SlXvHUamEtPxlMb2\nkOejqtep6jb/u/ge3grmtDI5jgNmq+p+Vb2j7PgU4ARAVHWjqoa+J3irDGAZcKn/uewB/hW4qOy0\nQeATqlr0P7dqpuEZ/ChsVdX/p6oD/u9rs6qu8X8vO/GMVPVvsZy1qvpz//fyXcpWvhHOfQkwqKpf\nVdWDqnodnoEN4iCecZ3lfw+/9o9fAGz2f7/9qnoPnkvo9cN8BnvwPrsxhRmEbHmNqk7Bm+mdABxR\nekFEXuQv058QkV7g3eWv++woe/wMnqIHbzb7aNlrj5Q9ng08qqqDVa8fXfa8u+zxPsfz4YLfL1DV\naf6/95Xd95AcqtqHN1Mrv2+5zMcBLyozLLvxjMpM//ULgVcCj/gum0XDyFTiKTwlOivk+YjIW33X\nRUmOExn6Lj6MtwK4Szz32zv88f0S+CrwX8BOEVkpIs8Ke0+fI/FWNPeU3ftn/vEST/iGO4in8AxT\nFMq/B8RzGX7fd809jbfCqf4tllP9u5w0gnNn460QAuWq4oN4K4m7fffc3/jHjwNeUvU7+muG//6n\nAA1NiMgDZhBygKquxfsj+0LZ4f/FcxUcq6pTga/jKZ4wbMdzFZWYU/Z4G3CsiBSqXn88othR2Yb3\nxwmA7+9vr7pveendR/Fmj9PK/k1W1fcAqOrvVPUCPBfGj4DvO65Rg3r+4XV4BmVYROQ4PNfce4F2\n9YLkf8L/LlR1h6q+U1VnA+8Cvlby/6vql1X1FOA5eG6XfwxzzzJ24Rng55Z9BlPVCwgfGtIw1/ij\nf+8oVF/z83guyZNU9VnA2wj/Wxwp26mcLEDlb7oCVd2uqpeo6iw8N9lKEZmH9zta4/gdvbf01oBL\nLsBbdY8pzCDkh/8EzhGRkttnCvCkqu4XkdOAN0W41veBj4nIdBE5BviHstfuxJuJfVhExvkBvvOB\na2OPoD7XAG8XkYXiZb78K3Cnqj4ccP6NwHwReYsv5zgReaF4AfHxIrJURKaq6kHgabxZP3irmfby\noKeDDwNvE5F/FJF2ABF5voi4PoNJeErjCf+8t+OtEPCfv8H/jMGbjSsw6Mv6IvHSSvcC+8tkDIW/\nivsGcIX4GS8icrSIvDzCZe4CpolItXKNwhS8MfSKyLHAh2JcKyx3AK3i7dFo9QPBpwSdLCJvLBvj\nbrzvYQBvUvVcEXlT2e/oNBHp9M/tBjqqrjURz3V1a8Jjyj1mEHKCqj4BfAf4Z//Q3wGf9mMM/8zQ\nDDgMn8JzzzwE/ALPN1u6zwE8A/AKvBno14C3quqmuGOoh6reClyOFwfYDvwfKn3h1efvAc71z9mG\n51ooBU8B3gI87Lsw3o3nTsIfxzXAFt9FUJNlpKq/wQtyn+Wf9ySwErjJce6fgf/AW1V04wX6f112\nyguBO0WkD0/5vF+9PQTPwlPmT+F9Fz14QdOofAQvCeC3/lhvBTrrv6VC/gN4q883j+DeJT6BFzPp\nxRvj9TGuFQo/zvRavO/2Kby42E14KxUXLwJ+JyJ7gR8Cf6+qW1W1Fy9o/Wa8390O4LMM/Y6uBJ4v\nXgbdD/xjrwFuUdVuxhiiag1yDGM0IyJH4mWdnRwQeG4KROQe4D9V9bvDnjzyewjwO+Atqroxrfvk\nFTMIhmHkEt+duRFvdfU3eNly8/xMJyMFxuIuRMMwmoMFeGnOk4C/ABeaMUgXWyEYhmEYgAWVDcMw\nDJ+mchmNmzJOJxwxIWsxDGPU0Ffs45Q9yRbZvWdKHy2FFiaOS6XatjEC+h7u26WqRw53XlMZhAlH\nTODUT56atRiGMWpY+1AXd69N9m9q3JldTJ40hYUz61WsMBpJ19u6Hhn+LHMZGYZhGD5mEAzDMAzA\nDIJhGIbh01QxBMMwjCyY3DKZi+ZcxKyJsyjkdB49yCDb923n2q3X0jfQN6JrmEEwDMMYhovmXMSJ\nx5xI25Q2vOoW+UNVad/TzkVcxJUPXTmia+TT1BmGYeSIWRNn5doYAIgIbVPamDUxdKuPGswgGIZh\nDEOBQq6NQQkRieXSyswgiMgEEblLRP7gd5r6VFayGIZhGNmuEIrAWar6fLxmFOeJyIszlMcwDCPX\n3L7mds578Xmc+8JzWfmllYlfPzODoB6lUPg4/59V2jMMw3AwMDDApz/6ab5x7Te48dc38tMbfsqD\n9z+Y6D0yjSGISIuIrAd24nUoutNxzjIRuVtE7j6452DjhTQMw4jIlB+spuPks5h/1AI6Tj6LKT9Y\nHfuaf7z3j8yZO4dj5x7L+PHjeeVrXsmam9ckIO0QmRoEVR1Q1YXAMcBpInKi45yVqnqqqp46bsq4\nxgtpGIYRgSk/WM3Myy5n3GPbEFXGPbaNmZddHtsodG/vZtbRQxlEM2fPpHt7sl0+c5FlpKq7gduA\n87KWxTAMIw5HrriCwr79FccK+/Zz5IorMpIoPFlmGR0pItP8xxOBc4BUG70bhmGkTevj2yMdD8uM\nWTPYXnaNHdt2MGPWjFjXrCbLFcIs4DYR+SNeU+tbVPXGDOUxDMOITf/R7o1hQcfDctLJJ/HIQ4/w\n2COPceDAAW760U2cdd5Zsa5ZTWalK1T1j8DJWd3fMAwjDZ5YfikzL7u8wm00OHECTyy/NNZ1W1tb\nufyzl/O3b/xbBgcHufDiCzn+hOPjilt5j0SvZhiGMcbZ8/rzAS+W0Pr4dvqPnsUTyy89dDwOi89Z\nzOJzFse+ThBmEAzDMBJmz+vPT8QANJpcZBkZhmEY2WMGwTAMwwDMIBiGYRg+ZhAMwzAMwAyCYRiG\n4WMGwTAMo0n4+Ps+zukLTuf8M9PJYDKDYBiG0SS89qLX8o1rv5Ha9c0gGIZhJMzqzas563/OYsF/\nLeCs/zmL1Zvjl78GeOHpL2Tq9KmJXMuFbUwzDMNIkNWbV3P5bZezv98rXbGtbxuX33Y5AOfPz/dm\nNVshGIZhJMgV6644ZAxK7O/fzxXrrPy1YRjGmGJ7n7vMddDxPGEGwTAMI0FmTXaXuQ46nifMIBiG\nYSTIpYsuZULrhIpjE1oncOmieOWvAS5bdhkXv+JiHnrwIRY/bzE/uPoHsa9ZjgWVDcMwEqQUOL5i\n3RVs79vOrMmzuHTRpYkElL+48ouxr1EPMwiGYaRCd183W57aQnGgSFtLGx3TO5gxOdmWj3nl/Pnn\n5z6jyIUZBKOpGQ1KZzSMoZpif5H7e+5nUAe95wPec6DpxzaaMYNgNIykFV93X3fTK53RMAYX+/v3\no2jFsUEdZMtTW5pyXIMMoqqISNai1EVVGWRwxO83g2A0hDQU35anthy6Xol6SiePM/GoY2gWqo1B\nieJAscGSJMP2fdtp39NO25S23BoFVaW4p8j2fSNPbzWDYDSENBRfkHJxHc/rTDzKGPLKqqO6Wd6x\nha1tReYU2xgQEMRpFNpa2jKQMD7Xbr2Wi7iIWRNnUchpcuYgg2zft51rt1474muYQTAaQhqKr62l\nzfl+l9LJ60w8yhjyyKqjulnWeT/PtHif7SMTiqAwTloZYKDiMy9IgY7pHVmJGou+gT6ufOjKrMVI\nnXyaOmPUEaTg4ii+jukdFKTyJxykdKIapO6+btY9uo6uh7tY9+g6uvu6RyxnPaKMIY8s79hyyBgc\nQqBf++ls7zz0/ba1tNHZ3tnUbrCxgK0QjIbQMb2jwmUD8RVfSbmEiQtEmYk30r0UZQx5ZGub26Aq\nyozJM2rGkXUcJ+v75x0zCEZDSEvxuZSOiygGqdHupbBjyCNzim2em6gKoTbwmnUcJ+v7NwNmEIyG\nkaXii2KQkoh3jJWZ6IotHRUxBAAUJoybUHNu1nGcrO/fDJhBMMYMYQ1S3EBv081Eu7thyxYoFqGt\nDTo6YEY4OZfu9M4rzzLaOr5IW2vtZ5V1RlXW928GzCAYRhVx4x15n4mufajr0OOLNwD33w+DvrzF\novccIhmFkmEAGHdml/O8rDOqsr5/M5CZQRCRY4HvADMABVaq6peykscwSsSNd6Q5E03KFTW4ohXO\nOAPWrYPBKrkGB70VQ0iDEJY0Egua6f7NQJYrhH7gg6p6r4hMAe4RkVtU9c8ZymQYQLx4R1oz0SRd\nUYXl/UAX/V1wzUmw/GzYOhXm9MKKNbB0QzrGq7O9M7PYSrNndDWCzAyCqm4HtvuP94jIRuBowAxC\nEzFag6dxxpXWTDQpV9TieUsOPf7yaV0sfxk8M957/sg0WHY+PDERLlvcFep6g2uX1BwLKm7X2d7J\nomMXhZY1aZo5o6sR5CKGICJzgZOBOx2vLQOWAbS1m68vTzRd8DQkcceV1kw0DVfUJ89t5ZnW/opj\nz4z3ji+ed8aw7y+PR5Qz2orbjRUyNwgiMhm4HviAqj5d/bqqrgRWAkyZN8VdMcvIhCRmrFFm4o1a\njSQxrjRmomm4onqrjMFwx8My2orbjRUyNQgiMg7PGKxS1R9mKYsRnbgz1igz8UauRqKOa/OuzWzr\n23bo+ezJs5l/xPxEZYJ0XFFRjMwdW+9wX6QqbfWiabBq4egqbjdWyDLLSICrgI2qmm5fOCMV4s5Y\no8zEG5nKGWVc1cYAOPQ8jlE4+zfdXHL9Fo7qKbKzvY0rL+xgzenJu6LaJ7bXyF86Xs7ah7poGYTJ\nByrP++BvqElb/fpP4dEjW7n9mOSL243WmFVeyHKF8BLgLcAGEVnvH/u4qt4U9Ia+Yl+gz9JoPAoU\nCoUR/9FHmYk3clNRlJm4S5mWjo/UIJz9m24+9O37mXDAu//MniIf+ra3GlpzerKuqJ59Pc7j2/Zs\nY/ueyrEd/KyfqlrOXbVpq5MOwneu6+e8z5xgDZGajCyzjO4AR8GTOpyyZzJ3rz01JYmMqBQWd8VK\nI4wyE2/kpqKs0xPf/L2NTKiaiU84MMhbv7/p0CqhnOpZc7G/GPiHtXjekopJlULgX2FN9pArxlx0\nG+RjdruL28Uh7xv+RgOZB5WN5ibOH32UmXijNxVlmZ44p9d9/JjdtT5516wZPEUft69XoU7a6SFj\n0dbmNAqPTUu+q5iVnkgfMwhGZkSZiWc9aw9i9uTZTrfR7MmzR3zNrVPh13NqN4ud+WitknXNmhFv\n5RSU71++D+GOrXfQP1ibUdTa0soZc9xppxVu246OyhgCsHccfPrltcXt4mKlJ9LHDIKRKVFm4nnc\nVFSKEySZZfSmC2H9TNhXtlnsnefDq/bOqjk37qzZZQzqHS9RvnoY/P6Ciiyjd7+iyI0nt7EwlATh\nsdIT6WMGwTBiMv+I+Ymmmd47r3YmvG88rJ7YQ/WcP+6seSTvL19hrH2oy6t5VFb36NqTupgc6u7R\nyOsqcTRhBsEwckaUWX/cWXOzzbrzuEocTZhBMMY0ecxrjzJrjztrtlm3UY4ZBGPM0t3XzaZdmw7t\nqC0OFNm0axOQbV571Fl73FlzXmfdeTTWox0zCMaY5YEnH6gpr6AoDzz5QKaKZzTM2nv37XZuIi2P\nP9TDNqFlgxkEI3GaZWY30gybRpDXWXsYDt6+xHm83r6GamwTWjaYQTASZSzO7JrFADYTtgktGwpZ\nC2CMLurN7PJGi7REOu6iZABLiqpkALv7uhORcawSlPZqm9DSxQyCkShp9xNe9+g6uh7uYt2j62Ir\n3fnt7r0DQcddNJMBbCY6pndQkEr1lOd02NGCuYyMRGmGfsIlkgjejgbXRh5dXqMhsN6MDGsQROQf\ngKtV9akGyGPkmapGKBcfDtfQVXGKq3pm1JmdS0HlNcjY7PV18hzzaebAerMSZoUwA/idiNwLfBP4\nuapaK8uxRnd3TSOUVT8qsGpjZ0XZgnFndjGubSKDOjiimV2Qgqop4OYTZyaehDLM607fsLP+vBpa\nIxuGNQiq+k8icjlwLvB24Ksi8n3gKlX9S9oCGjlhy5aKipaA93zLlgqDANDW2sbCmeFKm1V3Bjvh\nPQMMttYqqCCiBICrSap3culaeXFtRDF0o8HlZSRHqBiCqqqI7AB2AP3AdOAHInKLqn44TQGNnBDQ\nCCXweAhcncGejqjfvU6sIyMpZZg310YUQ9fsLi8jWcLEEN4PvBXYBVwJ/KOqHhSRAvAAYAZhLBDQ\nCIW2WsURtEu1mm99j5rOYHN6vXLPYYmziWy0KsNGFseriyPmtG1e/Msa6RFmhTAdeJ2qPlJ+UFUH\nReTV6Yhl5A5HIxQKBe94GUG7VJ30dtUcWrEGlp0Pz4wvu40UKEjBqfyn9rdyzQfXOZvRD0dUZZjH\nbBwX9Qydawxx2qAG4og5rVwNVxzRHfr7MRpP3X0IItICvL7aGJRQ1Y2pSGXkjxkzoLNzaEXQ1uY9\nnxHjj9uxuli6AVbe3MJx+9tAPSXW2d7J8YcfX5OX3jIIX1zdz8yeIgU8l9Ol39rIMavXhhvS5Bl0\ntnceWhGU7uVShs20AS0oh799YrtzDACLjl3EkrlLWHTsomSMnCPmNOkgXHK97c/IM3VXCKo6ICJ/\nEJE5qrq1UUIZOaWqEUpYgmrYXHw4rFztKYoSe8fBTfMG2No2gEBNG8jymeznbiryjj9UXnPSQfjM\nrcorXhpuNh/W/99M2ThBge6GjiEgtnRUjwWr80wYl9Es4D4RuQvYWzqoqn+VmlTG6GD9eujrg8Xu\nKpfb5nkuhPIsoysv7GDb6TNY7LhctfJ+311dztvePofI6aTrd6yn70Bf4FD6B/qdXeuL/flUcC5D\nt3GXe0GfSkZRQMxpZ3tzx2dGO2EMwqdSl8IYlRTev3vYc9acPmPEPuWd7W3MdMw4P3ZObarqcDPh\n3n27mbo/+F6TDsLjz6o9fvSeSCJnSkOD6I6Y095xcOWFVnoiz4TZhxDOIWsYDsLWvx8JV17YUZG2\nCrB/fIHHpoxsE9tTdy4JfG1VT1dNsPuwA/C5W+Cq50USOzMauomu5FosyzJa9qoi2yygnGvCpJ2+\nGPgKsAAYD7QAe1XVMV8yjMZRWllUu5zaWrc4lf9hB+Bb7+9iTi9snQrLz4ZrTgp3r6Wb22B1keVn\ne++d0+tlRL1kK1wVcxyNyl5q+Ca6qpjTNSd1OV2BecjeyoMMeSCMy+irwEXAdcCpeHsSjk9TKGPs\nEPcP0eVy6uijZibcMugFsOf2es/n9uIsvRFIRwdL77ufpRsqVx9LXxdvXI2uJZS3TXR5qKWUBxny\nQtidyg+KSIuqDgDfEpHfpCyXMQZI6w/RNRP+3E1Flm6oOjGg9Ib7orUuEAoFrjlpHydUKf/2ie3s\n2Lsj1LiaKXspDfIw/jzIkBfCGIRnRGQ8sF5E/g3YDkxKVyxjLJDmH2LYjKTBYpHWCK0dh/BcUkpt\nRtO2vm219wkY11ivJZSH8edBhrwQxiC8BS9u8F7gUuBY4MIkbi4i3wReDexU1ROTuKbRPDTyDzEo\nI2lnexuL5y1yvCMc6x5dF1reoAyfpDN/6pUNSTPIH4ZqV1prodW5Az1o/Gn4+kdrCZORMGzHNFV9\nRFX3qerTqvopVb1MVR9M6P7fBs5L6FpGk9HINolXXtjB/vGVP/f94wux0yCjGC/XuNLqDDa4dknl\nvy9FKBCVEqXVVPlO6aBaVO0T22uOpbVb3LqzDRG4QhCRDfj9TlyoauxkO1X9lYjMjXsdozlpZBpk\nUEZS3Lo6QbPLaoLGlcfy2WlSr5R5OT37emqOpeVinDF5Br37eytcfTMnzRy130E96rmMclG4TkSW\nAcsA5jhq3xjNS6OVYZxNcEEEGbWZk2bSs68n1LjylvmTB1xGNqqLMUqm1469OyqO7di7g6kTpo65\n7yXQIAQVtGs0qroSWAlw6pQp1qltlBFFGeYxV3yszfAbhcu9FsXXHyWDzbKMhrCNaUZTkOdccZvh\nh6cghRrlKwha5p0Ocq9FcTFGUfKWZTTEsEFlvI1pF+M1w5kIXIJnIAyjYdT7AzeaA4GacuMLjljA\nCUecEKoEeZRy5VGUfCOTG/JOphvTROQaYAlwhIg8BnxCVeNWAjBGITaLGx0Eraai9LAOc24U91JD\nazzlnEw3pqnqxUlcxxj9JJErnscYhJEOUZS8xYGGCLsxrUAKG9MMIyxxZ3FpxiByaWiq+hlTCPYO\n51L+mERV8hYH8ghT/voRABEZAH4CPK6qO9MWzDDKiTuLSyuTJI/B7os3UNPPuHS8usl9HuU3sqPe\nxrSvA19R1ftEZCqwDhgADheRD6nqNY0S0jAg3iwurRhEHlMWV6yhpp9x6fjbq/oc5lH+JDBDNzLq\nrRDOVNV3+4/fDmxW1deIyEzgZsAMgtE0RI1BhHWj5DHYPac3/PE8yp8Eo9XQpU29tNMDZY/PAX4E\noKo73KcbRn6JUq8mSs2cPKYsbp0a/nge5U+C0Wro0qaeQdgtIq8WkZOBlwA/AxCRVrz9CIbRNMyY\nPIOZk2ZWHAuqVxNlz0MeC6MtPxtnEHn52bXn5lH+JBithi5t6rmM3gV8GZgJfKBsZXA28NO0BTMy\npDpDpaMjuIlMlHMzJEq9miizyyRSFpPO8vHagg6yYg017UKrW1iO1pRL21swMurVMtqMozS1qv4c\n+HmaQhkZ0t1dm6FyvxeMq1H0Qef29kJPD/1dsLN9Xd2qomf/prumAikkX5U0ik85arwhTrA7jeDn\n4nlL2DavNoDs6mdcuk9Q0bdmNRSj1dCljag2T724U6dM0btPPTVrMUY369YdSlOsoK0NFi0Kd24V\ne8fBsvNrG9pfvMHrczzp4NCxYguowoRBx/ufJ5UXiPDbVfBqJzheqD7sPNe/VelwUo1mghrstLW0\nsejYkTfuiavMu/u62bhrY83x2ZNnV1RxLfYX0bVLKs6Z/qIueie4r5t1g56xStfbuu5R1WGVZ6jS\nFcYYIkjBu46HMAbgKfxVP21j1ZMOg3Kw8hptA+73/8fPYVtVOcXbrm6FM84IJcPcF97BI5Nqm7Ec\n90wrD/+u6hp33MFz3tXPlumegWobgKtWC0une3PswuKuiq5kYZWcS0mnEfxMYtWxuWez83h5z4CS\njKuO6mbpzqHr9o2HqROnsXDmwhHJb2SHGYQsyaP/vbUV+h1drFpba+VtaYEBhwZ3EcOgAMzcC7c9\nXOX0CGcLAFhxq7LsFfDM+KFjhx3wjlOdfXPGGfz5vqpj04ceDpZmxOvXU3j/7lD3D1LSUVtIhiGJ\nlMsBDfm9Cizv2FJhEIzmpd7GtMvqvVFVv5i8OGOIKL76RhLkhhkYqJVXXD6YAFzNjdrawhuFmM2R\nlv5+APoMAgRxAAAgAElEQVS94OrWqV6wdcUaWLphwCuvmDJBSlqQmpLQcYOfjU653NpmqZyjhXor\nhCn+/53AC/HKVgCcD/wqTaHGBFu21O4mHRz0jmdpEIJm/Kq1xkLVWzm0tAytGiZOhN2OWXN7bY9c\nOjoqjQx4Rqb6PoWCd24c2tpYuqHI0g21x+NSr6l9iaAYxsBg7ec9qINsemIjm56o9eGHxnGvKKuO\noJWLiznFtkirJSO/1Msy+hSAiPwCeIGq7vGffxK4riHSjWai+OobSZRZO3jupXI//h13uM/buROm\nTq11kXV21h6D5F1pLuMT19AsXMjg2nCnzn3xOh6ZUPu5Hlds4+Hfjjx47EIWd8VedRx/+PFs2rWp\nonFNdSMbABRWbOnAK4JsQeNmJ0wMYQ6Vu5YPAHNTkWYsEaR4s+4bHaQ4HbVxnLjiD6XjLhdZZ2dt\n9hIkv0oqXS+jmM2KLR0s67yfZ1qGPsfDBgq+Mk0WAQYHa91Tm57YGCqGcMfWOxgY6K9W/agoC45Y\nUBEYL/YX/fjB9sTkN7IjjEH4LnCXiNzgP38N8J30RBojRJ2xbt4M24YyPJg9G+bPD3+/sAHsIMW5\nMYb7okTWLrIZMzJzx5WCrss7trC1rcicYhsrtnSkEowdXLvEWf668OF9nntLhMVzg3YleEzdD0/d\nueTQ85fOXcva4/SQG0uAA/1FZyZvPZp5b8NYIEz56xUicjNwpn/o7ar6+3TFGgNEmbFWGwMYeh7G\nKEQNYLsUZ0nOaqpXNFEyj0qyjBGW7pzRmGwc1/ddKDD4aYGWFgrL+7lj6x2cMSd8mtZtDy+Gh2OK\nZRVIc0/YtNPDgKdV9VsicqSIzFPVh9IUbEwQdsZabQzKj4cxCFED2K7VRHu7W47qYPH8+e7VRL10\n1tIGt7yk3oL7M4B4LqegVVrS6cdB37e/uXDq/i76Eul5GFEsq0Cae4Y1CCLyCeBUvGyjbwHjgKvx\nCt4ZzUCUAHZ3N2zaNJTpUyx6z4PYubPSKM2Y4ZWuqHZvTZ3qzijq7x8yFGmn3oZVvK4ZdvlnUi0r\nDH/d7u5KQ1kses97e2HHjnjpx9XjKhZZdZIrxTbb1ZhVIM0/YVYIrwVOBu4FUNVtIjKl/luMXBEl\ngP3AA+700iCqZ/3d3Z6CK2fHDs8gVGcU9ffXupfSiitEcZtt2cKq5w5WKVStTVkdHPTceaqB9ZwO\njfXAAZy4Vl1RPgPHuK4+Cd51/tAmvEemeaU/npgIly3uAqBl+CvXEpRBFnK3eBJ9sY10CWMQDqiq\nioiXSi2SwWJzjDN7tltxzJ4d7v1RAthBWUJhqeeeWrSoUsl1dbmvkUZcIYLb7Or5RadCBWqNgite\nMjhY+X2NZDxh3+MY1z+dXbkjG7znnzy3lcXzImzvLuOlc9eydrF7YhA29dYqkOafMAbh+yLy38A0\nEXkn8A7gynTFMioouWRGmmWUZsplS9VcM4p7KmjlkkZcIYJcHz3HrVCXn+0wCGkRNv3YIX9Qg5ze\n1pjG3pGdFGZTXok8VCC1LKf6hMky+oKInAM8jRdH+GdVvSV1yYxK5s+PlmZaTdgAdlCWkGsHcUmu\ncuq5p6p93e3tlf7z0n3SiCtEMD6PBzhEaxRtoQCFAqsW9Dv89SHlCvq8w26Yc4xrTq+3qqk5Na5r\nRjWSAXARp1R4XCzLaXjCBJU/r6ofAW5xHDMaRaMK4QVlCZ1wgvf/cDIEuafa22t9+Dt2wMyZlb72\ntOIKQVlSDuMzfR88dVjtqXP6WqCttWL8q+b0suyUbeHcS9WIeGPavr3S2EapEeX4vP/5Nnj3+XCw\n7K+7ZRCKWjyk0FtaWg+lnVbPmg+V0yj7zd1WioNUrwghUpHBtAgz87csp+EJ4zI6B6hW/q9wHDPS\nopGF8IZzLw13v6D3B/nwe3oqdyqnFVfo6Ql33uAgX7nZU+o1lVFvGazZVb385C3h3UsiMH58zeey\n6kStDWBvDmkAHZ/3O55op+3H22pXLf0LYMYMpr9oKO3UNWsGeP52nHsZOP74fKQFlxF25m9ZTsNT\nr9rpe4C/AzpE5I9lL00Bfp22YEYZ9QKipdeTXDkEuZei7HauPh600zmtjWmOVMywlBR5rRtIayqj\nBlX6dPrxVYfkKBbh4YdZNb9YYXwOrTBWF1kaVuDqz3vdOpZucxiktloj45o1Azx4BNH2rixcCAz1\niQjqh5CGDz/szN+ynIan3grhf4Gbgc8CHy07vkdVn0xVKqOSegHRRq0c4q5SGlm7ySVrRJZuCHD5\nlK9gZs9mzsnwiEP5H/4MzP3AMHGFfftYHpARtPxlsLSsHkC9LmSDVR3LogTQg2bH24ISy+t8loMr\nWnnpmwdYe5w7GyktH37Ymb9lOQ1PvWqnvUAvcDGAiBwFTAAmi8hkVd3aGBHHIFEa0TSqPlDcct1h\nU1+DxuryXUeRNSx+IT/3xq6qc7dtY8Wtte6l8f3wdBv0+G6ZenGFoIygrVXd4frGu89zEsH4Bs2a\nZ++pc20X69dTWN7vxz/EuToImslv7tkca9UQduafhyynvBMmqHw+8EVgNrATOA7YCDw37s1F5Dzg\nS3j7ZK5U1c/FvWbT45rduoKM9SqQpuGGiVuuO2zqa1BANei4y40VdfwlBVoKFLdudLtxqFXoLvdS\n37ghY1AiKK4QlBE0p1ipzA7eviT8eCLsO3HNmgGevYva31iIcuH1iuYFzeQHdIABfxIwklVDlJl/\nlllOzUCYoPJngBcDt6rqySLyUvxVQxxEpAX4L7yg9WPA70TkJ6r657jXbmpcs1tXI5pSoLZRbpgk\nXD5hUl/rlc+uJsiNFVQ3KYiqQPHHF26MtA+h2r1U+IT7Nq7VwIo1sOw1heHLYq9fz/R31Tageeo/\nHH2lI+w7cc2aDwwc4A+z1N2rIsbKM2gmX03UzB+b+SdHGINwUFV7RKQgIgVVvU1EPp/AvU8DHlTV\nLQAici1wATC2DULQ7La6EQ3U1gwq4epOFpc0Gsy4iOIyCnJjiYTv4eBYeTwa5MYJOF5N4Ky/t/bY\n0g3Ags5hy2KP+4fdDBRq319Y3u/eKRyh1Hdp1rz2oS4O9Jf9/kZQLrxeUDloNeIiauaPzfyTIYxB\n2C0ik/HaZq4SkZ1AzC2PABwNPFr2/DHgRdUnicgyYBnAnKybxzSCKDPxoFTKsCmWUWhUg5koLqMg\n4zkwAAsW1G6CcxnP0v6KMuY808ojk2p/4i6FzsSJsG9fxaEVa2DZX8Ez44aOHXZQWLHGEWxdsCBU\nWexILqMY1ASow1LWPW7cmV3OU1wz+QEdcLbqtMyfbAhjEC4A9gOXAkuBqcCn0xSqHFVdCawEOHXK\nlDpV1kYJUWbiUauYxlXmjWgwE8VlVM94umR1tfB0jGfFI8ezbP4mnmkd+rkd1i+suGcqUOa2KZUP\nqepXsbRnNjwwtXbW34+X+pm3Ut8NpHomX515BJb5kyVhSlfsBRCRZwGrE7z348CxZc+P8Y+NbaLM\nxMOuJhq5sS0uUVxGKbmxArub7QLa9g19L1N9H5KjrMjSDd0s/TFQBNqADoINatxueGnRgN3x5v/P\nF2GyjN4FfApvlTCI1z1P8X7icfgdcLyIzMMzBBcBb4p5zdFB2Jl4WIUYN2W0kURxGUUxnhGNYo0b\nJ8r7o5wbtxteWjRwEmH+//wQxmX0IeBEVd2V5I1VtV9E3gv8HC/t9Juqel+S9xj1hFWIcVNGG0kU\nlxGEN55BRvGBB8IZlChGNcq94nbDS4tmmkQYiRHGIPwFeCaNm6vqTcBNaVx7zBBGITZyl3BUqt0S\nQSmjcWWtl70VprJqFKMa9V55pJkmEUZihDEIHwN+IyJ34nlEAVDV96UmlZEsjUoZHY7hyl+XlE11\nqe0kZA1bzyhoFhylrHfYfRAj3U3dCPI8iTBSI4xB+G/gl8AGvBiC0Ww0KmW0Hi6fdJC7pFDwlGoY\nWcMGPoPSTl24FOHEie7jhULsuklOwnbDS4u8TCKqsAY36RLGIPSr6mWpS2KkSyNSRku4smZ6esLP\niAcG4Mwzhz8vaqA3LK6Mpt21u4SBmj0IkXHtkUgiyyhuhlAeJhFVWIOb9AljEG7zN4etptJlZBVP\njVrqZc2EJWwLzSiBz6DigC6iNKiJS3t7/G541cTIECos7gJg8SPCbSwObwDWr6fw/gCjmRDW4CZ9\nwhiEUirox8qOJZF2aowGYvQdcBKlhWZagc/+/tpxpUUau8pHaYaQNbhJnzAb0+Y1QhCjCYnad8BV\nPTNOC820Ap8tLenEBVzkrDJtmqUr4mINbtKnXse0s1T1lyLyOtfrqvrD9MQyYhHFfxzH1xy170CY\n6plRWmimEfgsFLxVShQXUxxSWH08PBXmOuouPTwVOnyX0NTxk53vLbmMpu6Hp+5ckrhscbAGN+lT\nb4WwGC+76HzHawqYQcgjae2odRFldjt7dvJ7JqIEPqtTWYOYOTNazGPaNHj66ZGlkJaMV8KlK5af\nDVfdWGDCgSGZ9o8vcPVfd7J4XvDnv3jeEgDu2HoHydSvTBYrc5E+9Tqmlaq6f1pVHyp/zS83YeSR\nJHbUhvU1B9Udqla+URRc1Fl/2OyplpZwewN27Kjfoa6afftqVz71DGVVMx5nCfOYpSuuOQkWHNHJ\nJddv4aieIjvb27jywg7WnN78itPKXKRLmKDy9cALqo79ADgleXGM2CSxozbszD8oG6elpbZ3Q1jS\nSncM2zBncNDLcgrbT6FYrDVK69e701SnTfOb0ZexcaP7ujFLV6w5fcaoMABGY6kXQzgBr03m1Ko4\nwrPweisbeSSKyyVqULY63hC17lBYGrlnwkV/v7eqKZ+5B7mcXJ/VwoW1RmHaNJg1qzad1jByRL0V\nQifwamAalXGEPcA70xTKiEEUl0uUc6NkFDWyvEEaJZpFPNdRmPOClHr1SqBevKaBpLbTtwH7EIz0\nqRdD+DHwYxFZpKrrGiiTEYcoLpco54bNKGpkeYMoQfEoeyRUa1cDrtVBmCB1iaB4TRApbI7r7utm\n464hF1VxoHjoeVJ++VJg2mhOwsQQXisi9wH7gJ8Bzwc+oKpXpyqZUUmUmXAUl0vYc6MEShvl7okS\nFI9SyygKDzyQfEYWOFt7uiiliYZhc8/mwOPlBmHy+Mn0DuyOdG0Whz/VyC9hDMK5qvphEXktXt/j\nNwC3AWYQGkUeOp7VizcsWtQYGaqJEhRPY0cwhI+X1Pv8Ojpiub3CzsoH1J05VX184cyFzvOM0U8Y\ng1BqFf4q4BpVfVIaWevFyEcpgiQ2gSXt748SFI86Qw+bZRSWep9f1kH0UYpVRo1OGIOwWkQ24bmM\n3iMiR+K10zQaRR6alcRNB01jlRPFSEWJIbhm7QcOuGMGrsqoLvJQPVTxGuC6jg/D+h3r6d0XPmic\ndSwhamVUMx4eYWoZfVRE/g3oVdUBEXkGuCB90YxD5KVZSZyZbBqrnChK1mU8XKmkQbP27m73noEs\n21z6hFVm0/fBU4fVvn96iArevft2M7iiNdT+knFndrF+x/pMXU9RKqNaWe0h6u1D+LCq/pv/9GxV\nvQ5AVfeKyHLg440QcFQT1oWS02YlgbjGldYqJ6yRmjGjdlfwrFkwdWryGVkuUooDKYRWZl++Gd5x\nARws+6sf1+8dv+o5IW+YRppvCkSpjGpltYeot0K4CCgZhI8B15W9dh5mEOIRRUHkwd0A4ZRB0LjS\n6pUcJFNQu85yduzwDELYoHjeVkily4RUZi/b1sa3flxk+dmwdSrM6YUVa+DsbW1cFeZGAwPZJzeE\nJKgyKgprH+qqPBQQEh2LZbXrGQQJeOx6bkQlqoLIOvAY1oAFjSsoQDtxYvIy9fbW9mp2pZwmFZgP\nU5yuwXGgkjIrV34fXOwVvVu6obLo3Rfe1lFzrhPX/gzHZzj5APQWdg9/vRQ5JKVUHrz6h7B081Ca\n9FteUWTVQkEdgZSxWFa7nkHQgMeu50ZU8hAojkJYAxZV/qDWlHFkirLfIO7nXa9DXLlRaHAcqFyZ\nDa5dcmgnsavo3f8+D7Y8ug4Fjiu2sWJLB0t3uoxkl/tmVeMKLJu9fv0IRjJyVnUWWf6S/WydoszZ\nI6zoamXpfQMw6MtbLPL1n8Ldc4RNh9eqtPaJ7Q2VNw/UMwjPF5Gn8WzsRP8x/nOrZRSXvASKwxLW\ngCXRNS0sSdwn7ucdZHyqi9OlGAcqSKFuj4DC4q5DG8eqi95VBFQFHplQ5M0LNvLmBRtr3ABbfu/u\ns0BbW03pClejnSxLW2ydorzkLwehav4w6SDsaXWvXnv2pbR3JcfUK10RMp/OGBHNFigOa8CCxpVk\nTn9S1KtFlDRJxYGqYiNvOhx+f1ZnYJbRcOmfroAqwqHrlF/3H9/Wznf/346KPgt7x8G7X1Fk1fOL\nh+639qEuCou7aHF85VHSUZNOBZ3T2+U8vm2K+3yLIRiNIy+B4rCENWBB43LV/QfP354G1UbIlWIa\npRZREsSNAzliJitXwxVHwJrTR7ZbvF42TnX20g8P3wHvmclXru6pcDnd2LGdqQztcF48bwnrd6yn\n2F9kf/9+FEUQTjgiXDkOSCcVdGd7GzN7asd79B547Fm151sMwWgsWQeKoxC1aF5QplSCncEilYM4\ncMB9jc2b430H1WWyy48njSNmMukgXHL9lhH3PgjMxsGdvbT6iB52/Uel8VlI7b1nTZ7F/T33HwrW\nKhpJoaeRCnrlhR186Nv313SSe/HATH4oO6w1J2YQjCiENWBBqaDz5ye7kStKOYigXs1xeyeXxpOk\noQsiIGZylGPWG5agPsU1bqSSCCHdKHEVepR9BGEpGc3qoPquk2fQ2TfVdiqTkUEQkTcAnwQWAKep\n6t1ZyNFUNMmGoIYW4suL2y1pQxdEwIpoZ/vIXRtBfYpLz6tpLbSy7tF1wyrOuAo9aOUS140T1EnO\nWnN6FDK675+A1wG/yuj+zUVJyZaUQUnJdndnK5eLeumpaTBjhrexbMkS7/8gY9AaMPcJOp5HOjq8\nFVAZe8d5rpDEbzW9g4JU3ksQ+gf7Dynqkl+/u6/2dxikuMMqdNf9x6obp5Fk8tegqhsBrGpqSPJQ\n7TQsed1fcfzxsGlTZSBZxDveLDhWRMteVWRbhPhBdeZO+8R2duzdURO87WzvpLO9Mnupf7C/plR2\nkBsoyBUVVqEHrVzSmsVbcTuPJpoejWHyqmRd5HV/RZB7CWr7HLtKX+TFRVcVG7nmpK7QvWlcmTvb\n+moD4iUlv+jYRRVKsevhLud1Xa6dJBR6FDdOHIVuxe2GSM0giMitwEzHS8v99pxhr7MMWAYwJ2ul\nkhVpKtkoii/MuR0d7pl4HvZXuCqYhi19kdOaPUG4FKRzz0EAQf77KH79Rvnl4yp0K243RGoGQVVf\nltB1VgIrAU6dMmVslsxIaxNblABwPeXZ01NZRK6R+f5xZvJRSl/k1UXnIEhBhjUGQcR1A6VFHjOa\nmhVzGTUDaWXTRIlNhFGeQUXkSu9PWpnGzWiK6nKL66JrkBsqSEHGpdF+/bDkNaOpGckq7fS1wFeA\nI4Gfish6VX15FrI0DWlsYosSm4irDNOId8QNtketuxTHRdfAdNwkZrYt4q5ck8f0zCgK3eVKy+vK\nJwuyyjK6Abghi3sbZUSJTcQtWtfWlvwMOW6wPcgVN3NmZQyhdDyOi66BmWL1dh9HoVkyb8Iq9CBX\nmiujKq9jTRtzGY1GonRiCxsAdinPsBQKXmwh6RlyUNOdoL0Frs+ls9P9WYXtpBaWBmaKtU9sd2YP\nRWFAB5om8yasK6terKE6o2qsYgZhtBHVNRE2AOyKY5S6kLlm2OWB5lJdoaRnyEGyuo4HfS6dne6O\naUm76FLMFKueyVfvFRgpjcq8adRKxILHw2MGIY+kkTkTFCgOukbYonVhZ9KuBvUQb4YcVIfIdTzr\nzX0pZYq5eiqnSdLXT2IPQNhrWPB4eMwg5I20MmeiBIqjKOmwM+k0ZshRrpn15r4U6y6FzSBqkRYG\nddDZLtJ1rmulkbTyTGIPQNhrWPB4eMwg5I2gmezmzeGUSRKB4jQ2AKYxQ262JkMZljsXBBFBHe60\nFmmhtdBa4bIBQivPOC6fJNw49a5RXYjPgsf1MYOQN4JmrAMDQ66QequGKEqyvd29b6A9hV6yacyQ\n81LttAlQlP5BRwAeL4B85rFnOl8bTnnGdfkk4capl1VVXYivs72TRcc6YkYGYAYhf4RN7wzyf0dR\nkj0BPWO7u2uDwkko2TRmyM3UZKjJCLPnIK7LJwk3jusaLsZqOYoomEHIG65U0CCCDEdYJRl3NWLk\nAkEq4wIKuAoJBx2PQb2ZeRhXUlJF8KqvYRlFI8MMQh4JW/snrq8/7mqkmchrFdYEqAkSByj9FoUB\nx2tR3DPVSj4o+NxaaA3tSkpi93P1NUqxg2oso6g+WTXIMYII20gmieBpR4e3ES0MeSy1HQVHc5lc\nB6DjUmUjDjsASwJ+Wu0Tw8WMSvGCcr+8y01TkAKqGuhKagTWYGdk2Aohb9RTvKVZbhJlqks0ajUS\nhSD54+zPGGMB6PZnYPJB2DoV5vTCijXwkXPc5/bsC4glVeGKFyhKa6GVFmmpcPls3OXed9Iol02Q\nK+qBJx/ggScfqDj3jDlnNESmZsAMQjPh2lFbTZR9DFFXIy6FXLrOSJVs9TWrdz8n2aNglAagC1Ko\nUNQtg/Cln8HSDZXnvfl17veHVdJB5/UP9nPG3EqlGtSTuZEum2o30tqHumgZhMkHhs7pnQDrd6xn\n4cyFDZMrz5hBaHaqFWp/f/gduVFWI1C527hYrN19HFVJu4xXUC+CJu9RkBYCNbn1RS2ytH8BtJX9\nLgoFYJ/zGmGVdJQU0bxuAjv42VY4Y8h4jTuzKzthcogZhLwRJfjpUqhBBF0z6Hj1auT224OvXU4U\nJe3ahBeVZo9tJIBrJlyzGlq/HthXs5qIoqSjKPm89k4w6mMGIW9E2VgWRaG6DEqUewXVDXIRVkkn\nocxHQZZQo3CtJqIo6ahKPo+9E4z6mEHIG1GCn2EVapCSTyvQmrSSTqtHwRgkrpI2JT+6MYOQR+IW\njGtthZaWcEo+6UBrUkralVGVdI8CwzAqMIPQzAS5fI4/vrGKMmw6bND7XMcb0aPAMIwKzCA0M43M\nrZ89253pM3s2zJ8/sms2W7VSwxjlmEFodho1ay4p/XKjEMcYwJjbLGYYeccMghGe+fPjGQAX5gYy\njNxgtYwMwzAMwAyCYRiG4WMGwTAMwwDMIBiGYRg+ZhAMwzAMwAyCYRiG4WMGwTAMwwAyMggi8u8i\nsklE/igiN4jItCzkMAzDMIbIaoVwC3Ciqj4P2Ax8LCM5DMMwDJ9MDIKq/kJV+/2nvwWOyUIOwzAM\nY4g8xBDeAdwc9KKILBORu0Xk7icOHmygWIZhGGOL1GoZicitwEzHS8tV9cf+OcuBfmBV0HVUdSWw\nEuDUKVM0BVENwzAMUjQIqvqyeq+LyNuAVwNnq6opesMwjIzJpNqpiJwHfBhYrKrPZCGDYRiGUUlW\nMYSvAlOAW0RkvYh8PSM5DMMwDJ9MVgiq+uws7msYhmEEk4csI8MwDCMHmEEwDMMwADMIhmEYho8Z\nBMMwDAMwg2AYhmH4mEEwDMMwADMIhmEYho8ZBMMwDAMwg2AYhmH4mEEwDMMwADMIhmEYho8ZBMMw\nDAMwg2AYhmH4mEEwDMMwADMIhmEYho8ZBMMwxiyTD2QtQb6QZmpnLCJ7gPuzliMFjgB2ZS1ECozW\nccHoHdtoHReM3rGFGddxqnrkcBfKpGNaDO5X1VOzFiJpRORuG1dzMVrHNlrHBaN3bEmOy1xGhmEY\nBmAGwTAMw/BpNoOwMmsBUsLG1XyM1rGN1nHB6B1bYuNqqqCyYRiGkR7NtkIwDMMwUsIMgmEYhgE0\nmUEQkX8RkT+KyHoR+YWIzM5apqQQkX8XkU3++G4QkWlZy5QEIvIGEblPRAZFpOlT/kTkPBG5X0Qe\nFJGPZi1PUojIN0Vkp4j8KWtZkkREjhWR20Tkz/7v8P1Zy5QUIjJBRO4SkT/4Y/tU7Gs2UwxBRJ6l\nqk/7j98HPEdV352xWIkgIucCv1TVfhH5PICqfiRjsWIjIguAQeC/gQ+p6t0ZizRiRKQF2AycAzwG\n/A64WFX/nKlgCSAi/xfoA76jqidmLU9SiMgsYJaq3isiU4B7gNeMku9MgEmq2ici44A7gPer6m9H\nes2mWiGUjIHPJKB5rNkwqOovVLXff/pb4Jgs5UkKVd2oqqNld/lpwIOqukVVDwDXAhdkLFMiqOqv\ngCezliNpVHW7qt7rP94DbASOzlaqZFCPPv/pOP9fLJ3YVAYBQERWiMijwFLgn7OWJyXeAdyctRBG\nDUcDj5Y9f4xRolzGAiIyFzgZuDNbSZJDRFpEZD2wE7hFVWONLXcGQURuFZE/Of5dAKCqy1X1WGAV\n8N5spY3GcGPzz1kO9OONrykIMy7DyBIRmQxcD3ygytPQ1KjqgKouxPMonCYisdx9uatlpKovC3nq\nKuAm4BMpipMow41NRN4GvBo4W5souBPhO2t2HgeOLXt+jH/MyDG+f/16YJWq/jBredJAVXeLyG3A\necCIEwNyt0Koh4gcX/b0AmBTVrIkjYicB3wY+CtVfSZreQwnvwOOF5F5IjIeuAj4ScYyGXXwA69X\nARtV9YtZy5MkInJkKRtRRCbiJTvE0onNlmV0PdCJl7XyCPBuVR0VMzQReRBoA3r8Q78dDRlUIvJa\n4CvAkcBuYL2qvjxbqUaOiLwS+E+gBfimqq7IWKREEJFrgCV4pZS7gU+o6lWZCpUAInIGcDuwAU9v\nAHxcVW/KTqpkEJHnAf+D91ssAN9X1U/HumYzGQTDMAwjPZrKZWQYhmGkhxkEwzAMAzCDYBiGYfiY\nQTAMwzAAMwiGYRiGjxkEwwiJiLxGRFRETshaFsNIAzMIhhGei/EqSl6ctSCGkQZmEAwjBH4tnDOA\nv4Q1nyMAAAFOSURBVMXboYyIFETka34t+htF5CYReb3/2ikislZE7hGRn/tlmA0j15hBMIxwXAD8\nTFU3Az0icgrwOmAucBJwCbAIDtXO+QrwelU9BfgmMCp2NBujm9wVtzOMnHIx8CX/8bX+81bgOlUd\nBHb4xcXAK69yInCLV0qHFmB7Y8U1jOiYQTCMYRCRw4GzgJNERPEUvAI3BL0FuE9VFzVIRMNIBHMZ\nGcbwvB74rqoep6pz/X4cD+F1GLvQjyXMwCsOB3A/cKSIHHIhichzsxDcMKJgBsEwhudialcD1wMz\n8bqm/Qn4Ol4nrl6/vebrgc+LyB+A9cDpjRPXMEaGVTs1jBiIyGS/yXk7cBfwElXdkbVchjESLIZg\nGPG40W9SMh74FzMGRjNjKwTDMAwDsBiCYRiG4WMGwTAMwwDMIBiGYRg+ZhAMwzAMwAyCYRiG4fP/\nAfyzKuSV3NT5AAAAAElFTkSuQmCC\n",
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYQAAAEWCAYAAABmE+CbAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3XuYHGWZ9/HvPTPJJJqQxEAm4RDirBBR1KAoB8ObCKLo\nyiKiLmzURcWou66IqyhGRF2j664r63pYRURUsrIqooKgIjDRaOQgjiDkADuBcEgmEEjIQDLJzNzv\nH1Wd9PRU91RPV3VVT/8+15Ur3VXVVU91J89dz9ncHRERkZasEyAiIvmggCAiIoACgoiIhBQQREQE\nUEAQEZGQAoKIiAAKCFLCzBab2UNZp6NRpP19mdnXzezCovfvNbNeM+szs5nh350JXu8AM1trZpOT\nOmeWzOzDZvaprNPRKBQQGoCZ3W9mO8P//JvN7HIzm5J1umplZm5mT4X31Wdm2+p8/ViZuZm9zMyu\nM7NtZva4md1qZm+vRxrd/T3u/i9hOiYAXwRe5e5T3H1r+HdPgpf8KHC5u+80s7uLfptBM9tV9P5j\nY72AmV1pZh9PMM2F855iZveVbP4a8C4zm5H09cYjBYTGcaq7TwEWAEcBF2ScnqS8KMzUprj79Go/\nbGZtaSSq6PzHATcBK4HnADOB9wKvSfO6ZXQAk4C7az1R1PdmZu3A3wNXALj78wu/DfBb4H1Fv9Vn\na01DPbj7U8CNwJKs09IIFBAajLtvBn5JEBgAMLO/NrM/mdmTZvagmX2yaN+88En8781so5k9ZmbL\nivZPDkscT5jZPcBLi69nZkeYWVf4dHy3mf1N0b7LzexrZnZ9+NT4OzObbWb/GZ5vrZkdNZb7NLN3\nmdl94RP5z8zswKJ9bmb/aGb3AveG255rZjeEx68zszcXHf9aM7vHzHaY2cNm9iEzeyZwPXBg0VPv\ngSMSAv8OfMfdP+/uj3ngj+7+5ohjMbOPmtn/hde6x8xOL9r3HDNbaWbbw9/hf8PtZmYXm9mW8De8\ny8yOLPqOP2NmhwPrwlNtM7Obir6L54Sv283sC+Hv3GtBddPkcN9iM3vIzD5iZpuBb0ck/xhgm7vH\nrgIzs3eH3/fjZvZzMzso3N5qZl81s0fD+/2zmc03s/cDZwAXht/5DyPOGfnZcN/k8N/XgxaUlr8c\n3vdM4Gqgs+j3nBmesgv467j31NTcXX9y/ge4H3hl+Ppg4C7gS0X7FwMvIAjwLwR6gdeH++YBDnwT\nmAy8COgHjgj3/yvB09+zgEOAvwAPhfsmAPcBHwMmAicCO4D54f7LgceAlxA8ud4EbADeBrQCnwFu\nrnBfDjwnYvuJ4XlfDLQDXwZ+U/K5G8I0TwaeCTwIvB1oIyhBPQY8Lzx+E3BC+HoG8OKi7+2hCul7\nBjAIvKLCMcPOAbwJODD8Lf4WeAqYE+77PrAs3DcJWBhufzXwR2A6YMARRZ+5HPhMyW/ZFvUdAhcD\nPwu/l6nANcDnitI5AHw+/E4nR9zLPwI/L3OfXcA5Jdv+FlgDHB7+W9n7ewOnAauB/cL7fT4wK9x3\nJfDxCt9ppc/+N/Cj8LuaRvBwdFG47xTgvojzHQ88kvX/40b4oxJC4/iJme0gyPi2ABcVdrh7l7vf\n5e5D7n4nQcazqOTzn3L3ne7+Z+DPBIEB4M3Acnd/3N0fBP6r6DPHAlOAf3X33e5+E3AtcFbRMVd7\n8MS8i+AJbZe7f9fdB4H/JcicK7kjLH1sM7PCtZcAl7n7He7eT1A9dpyZzSv63OfCNO8EXgfc7+7f\ndvcBd/8TcBVB5gywB3ieme3n7k+4+x2jpKlgBkGGtCnm8bj7D939kfC3+F+CEszLitJxKHCgu+9y\n91VF26cCzwXM3de4e+xrQlDKAJYC54Xfyw7gs8CZRYcNEWSe/eH3Vmo6QcCP6z0EwWq9u+8BPgUs\nNLOO8J72C+8Jd7/b3bfEPG/kZy2o5noncK67b3P37QQPNGeWPxWE91R1dWQzUkBoHK9396kET3rP\nBfYv7DCzY8zs5kIRm+A/6v4ln99c9PppgowegqfZB4v2PVD0+kDgQXcfKtl/UNH73qLXOyPej9b4\n/WJ3nx7+eX/Rdfemw937gK0l1y1O86HAMUWBZRtBUJkd7j8DeC3wQFhlc9woaSp4giATnRPzeMzs\nbWbWXZSOI9n3W5xPUAK4Nax+e0d4fzcBXwG+Cmwxs0vMbL+41wwdQFCi+WPRtX8Rbi94NAzc5TxB\nEJjiOhT4etH1HiUohRxMUB33LeAbwGYLqhbjdoQo99kDCUoidxdd8yfArFHONxWoa4eFRqWA0GDc\nfSVBNcIXijb/D0FVwSHuPg34OkHGE8cmgqqigrlFrx8BDjGzlpL9D1eZ7Go9QpDZABDW988suW7x\nNL0PAiuLAst0Dxo+3wvg7re5+2kEGcdPgB9EnGMEd3+aoOrijDiJNrNDCarm3gfM9KCR/C+Ev4W7\nb3b3d7n7gcC7ga8V6v/d/b/c/SXA8wiqYD4c55pFHiMIwM8v+g6medAgvPeWRjnHneG143oQOLvk\ne58clhjd3b/o7kcRVGO+CDg3TjoqfHYTQcD5q5J7LLQVlDvvEQSlYhmFAkJj+k/gZDMrVPtMBR53\n911m9jLg76o41w+AC8xshpkdDPxT0b5bCEoT55vZBDNbDJxKUAecpu8DbzezBRb0fPkscIu731/m\n+GuBw83srWE6J5jZSy1oEJ9oZkvMbFpYrfEkwVM/BKWZmWY2rUJazgfOtqA/+0wAM3uRmUV9B88k\nyJQeDY97O0EJgfD9m8LvGIKncQeGwrQeY0G30qeAXUVpjCUsxX0TuNjMZoXXO8jMXl3FaW4Fphca\nhmP4OvDxogbfGWZ2Rvj6WDM7OqzmeQrYzfDvvezYiXKfDX+/y4Avmdn+FjjEzE4uOu+siJLIIoJS\nh4xCAaEBufujwHeBT4Sb/gH4dNjG8An2PQHH8SmC6pkNwK+A7xVdZzdBAHgNwRPo14C3ufvaWu+h\nEnf/NXAhQTvAJuCvqFBPHNaXvyo85hGC6rFC4ynAW4H7zexJguq0JeHn1hIEn56wCmJELyN3/z1B\nI/eJ4XGPA5cA10Ucew/wHwSlil6Chv7fFR3yUuAWM+sjKNGd68EYgv0IMvMnCH6LrQS9m6r1EYJO\nAH8I7/XXwPy4Hw5/78uBt8Q8/vsEVV0/Dq/XDRQy5+nhubYBPQT39aVw3yXAS8PvPCqwVvrsBwh+\n49uB7QTVYs8J9/2Z4Ht9IDz3s8LS5SsJu9JKZeauBXJEJGBmBxD0OjuqTMNzQzGzDwNT3f0Tox4s\nCggiIhJQlZGIiAAKCCIiElJAEBERIBjm3zAmTJ3gk/aflHUyRMaNvv4+XrIj2Ylz/zi1j9aWViZP\nGBczaI8Lfff3PebuB4x2XEMFhEn7T+LoTx6ddTJExo2VG7q4fWWy/6cmnNDFlGdOZcHsBaMfLHXR\ndXbXA6MfpSojEREJKSCIiAiggCAiIqGGakMQEcnClNYpnDn3TOZMnkNLTp+jhxhi085NXLnxSvoG\n+8Z0DgUEEZFRnDn3TI48+Ejap7YTLD2RP+7OzB0zOZMzuXTDpWM6Rz5DnYhIjsyZPCfXwQDAzGif\n2s6cybGX7xhBAUFEZBQttOQ6GBSYWU1VWpkFBDObZGa3hgto321mn8oqLSIikm0JoR840d1fBCwA\nTjGzYzNMj4hIrv32xt9yyrGn8KqXvopLvnRJ4ufPLCCEy+QVmsInhH80F7eISITBwUE+/dFP880r\nv8m1v7uWn1/9c+5bd1+i18i0DcHMWs2sG9gC3ODut0Qcs9TMbjez2/fs2FP/RIqIVGnqj66h86gT\nOXzWEXQedSJTf3RNzee88447mTtvLofMO4SJEyfy2te/lhuvvzGB1O6TaUBw90F3XwAcDLzMzI6M\nOOYSdz/a3Y+eMHVC/RMpIlKFqT+6htkfvJAJDz2CuTPhoUeY/cELaw4KvZt6mXPQvh5Esw+cTe+m\n3lqTO0wuehm5+zbgZuCUrNMiIlKLA5ZfTMvOXcO2tezcxQHLL84oRfFl2cvoADObHr6eTLA4d6qL\nt4uIpK3t4U1VbY+rY04Hm4rOsfmRzXTM6ajpnKWyLCHMAW42szuB2wjaEK7NMD0iIjUbOCh6YFi5\n7XG94KgX8MCGB3jogYfYvXs31/3kOk485cSazlkqs6kr3P1O4Kisri8ikoZHl53H7A9eOKzaaGjy\nJB5ddl5N521ra+PCz13IO9/8ToaGhjjjrDM47LmH1Zrc4ddI9GwiIk1uxxtPBYK2hLaHNzFw0Bwe\nXXbe3u21WHTyIhadvKjm85SjgCAikrAdbzw1kQBQb7noZSQiItlTQBAREUABQUREQgoIIiICKCCI\niEhIAUFEpEF87P0f4/gjjufUE9LpwaSAICLSIE4/83S+eeU3Uzu/AoKISMKuWX8NJ37nRI746hGc\n+J0TuWZ97dNfA7z0+Jcybca0RM4VRQPTREQSdM36a7jw5gvZNRBMXfFI3yNcePOFAJx6eL4Hq6mE\nICKSoItXX7w3GBTsGtjFxas1/bWISFPZ1Bc9zXW57XmigCAikqA5U6KnuS63PU8UEEREEnTececx\nqW3SsG2T2iZx3nG1TX8N8MGlH+Ss15zFhvs2sOiFi/jRFT+q+ZzF1KgsIpKgQsPxxasvZlPfJuZM\nmcN5x52XSIPyFy/5Ys3nqEQBQRpGb18vPU/00D/YT3trO50zOumYkuwSgiJJOPXwU3PfoyiKAoI0\nhN6+XtZtXceQDwHQP9jPuq3rABQURBKiNgRpCD1P9OwNBgVDPkTPEz0ZpUiayRBDuHvWyRiVuzPE\n0OgHlqGAIA2hf7C/qu0iSdq0cxP9O/pzHRTcnf4d/WzaOfburaoykobQ3toemfm3t7ancj21V0ix\nKzdeyZmcyZzJc2jJ6XP0EENs2rmJKzdeOeZzKCBIQ+ic0TmsDQGgxVronNGZ+LXUXiGl+gb7uHTD\npVknI3UKCNIQChlx0k/tUSWBSu0VjR4QSu83vxUgtVMpr3oKCNIwOqZ0JPofulxJoDQYFDR6e0XU\n/QKsmNXLki3jK6NUKW9s8lkZJlIH5UoC5aTVXlEvUfeLwbLO8ddTS73SxkYBQZpWpSf+FmsZ8T6N\n9op6Kne/G9sbu+QTRb3SxkYBQZpWuSf+9tZ25s+cv3d/4X2jVzWUu9+5/dHbV8zqZd6xq2lZ1MW8\nY1ezYlZvmslLVKXfVspTG4I0rUo9l5Jur8iDqPvFYXnPyJLPilm9LJ2/jqdbg2MfmNTP0vlBHXwj\ntDfUs1faeJJZCcHMDjGzm83sHjO728zOzSot0pw6pnSMy5JAOYX7xcEcDt3VzhVrjojM4Jd19uwN\nBgVPtw41THtDs/22ScmyhDAA/LO732FmU4E/mtkN7n5PhmmSJjMeSwKVdEzpYO2ja4Cg7eCtR6yJ\nDAjl2hUaqb2h2X7bJGQWENx9E7ApfL3DzNYABwEKCCIpWvTsxXtfr9zQRcuirhHHlBuf4DDi+KGV\ni6MOlQaUizYEM5sHHAXcErFvKbAUoH2mGoREklQcHIqV9uOHoA5+/v7Dq11WbuhKOYVST5n3MjKz\nKcBVwAfc/cnS/e5+ibsf7e5HT5g6of4JFGlCqoNvTpmWEMxsAkEwWOHuP84yLSIyXGkd/KqNq7j3\n8XszTJGkLbOAYGYGfAtY4+7prgsnIjVZuaGL1iGYsnv49gW9lk2CJBVZlhBeDrwVuMvMusNtH3P3\n68p9oK+/T3WWOVOuDloa16qNqxgcHBixfc/n2mDhwgxSJPWSZS+jVUBVjxcv2TGF21cenVKKpFpR\nvVOkduUeeqZNns6C2QvG/HkIAnich6ppu+CJWxYP36hYMO7lopeRiAxX2pVzwglddb3+9kmVA349\nupqmMX21psSuTAFBZJwZrRqv1mq+elTbpjF9tabEHp0CgkgORT2dx6kuqpfi9KVRWkhjkaLxvPBR\nUhQQRHIm7w31pSOd05DG9NWaEnt0mQ9MExEplcb01ZoSe3QKCCKSO50zOhNfpCiNc443qjISkdwp\n1Okn2SMojXOONwoIIpK47Tu3RbYvVNM+ksb01ZoSuzIFBBFJ1J7fLo7croGM+aeAICINTwPOkqGA\nICINTQPOkqNeRiLS0CoNOJPqKCCISEPTgLPkjFplZGb/BFzh7k/UIT3SYKJ6ksSdlVMkCe2t7ZGZ\nvwacVS9OG0IHcJuZ3QFcBvzS3cutwS1NJGoOmzRn5Tzp972cc1UPs7b2s2VmO5ee0cmNx6uOuNl1\nzuiMXP9ZA86qN2qVkbt/HDiMYHWzs4F7zeyzZvZXKadNZK+Tft/Lhy5fx+yt/bQAs7f286HL13HS\n73uzTppkTOs/JydWLyN3dzPbDGwGBoAZwI/M7AZ3Pz/NBIoAnHNVD5N2D284nLR7iHOu6oksJag0\n0Vw04CwZcdoQzgXeBjwGXAp82N33mFkLcC+ggCDDlBulOhaFka2ztkY3EEZtL5QmCgGkUJoAFBRE\nKohTQpgBvMHdHyje6O5DZva6dJIljarcKNWxKB7ZumVmO7MjMv8tM0c2HFZbmhCRQMWAYGatwBvd\n/ZNR+919TRqJEil16Rmdw576AZ6aAP+8qH9EaWTW1uhzlCtliEigYkBw90Ez+7OZzXX3jfVKlIwv\nScxhU3iyL24X+OdF/Xz/BSOPfXAaHLp95Pao0kReaSoGyUKcKqM5wN1mdivwVGGju/9NaqmS8aG7\nG/r6YFEyq4DdeHzHiCqfRRHHfe9ve0eUJnZNbOHSM8p3Q+ze3E3f7r6q07Rw7sKqPzMaTcUgWYkT\nED6VeipkXGo5d1sm140qTYzWy2j7zm1M21X9tVZu6Ep8yUut/StZGTUguPvKeiRExqes1geOKk2M\n5olbFld3ke7uVIKepmKQrMTpdnos8GXgCGAi0Ao85e77pZw2kcSktRh8GjQVg2QlTpXRV4AzgR8C\nRxOMSTgszUSJpCFqqo08SnMqBjVWSyVxRyrfZ2at7j4IfNvMfp9yukSaVlpr/6qxWkYTJyA8bWYT\ngW4z+zdgE/DMdJMlkrzEl3CM6uKUkDSmYlBjtYwmTkB4K0G7wfuA84BDgDOSuLiZXQa8Dtji7kcm\ncU6RKFk1bseRdDVOufYSB7CR29VYLQVxehkVpqzYSfJdUC8naKP4bsLnFWkIaVXjjGgv6e7GMuoG\nLI2jbEAws7sIHyqiuPsLa724u//GzObVeh6RRqVqHMmTSiWEXExcZ2ZLgaUAc9vV7U7GlzyMOVB3\nVikoGxBKZzfNirtfAlwCcPTUqVqpTcaVeo85aLEWrSwmZY26YpqZHWtmt5lZn5ntNrNBM3uyHokT\nSVtvXy+rH1xN1/1drH5wNb199V2BrXNGJy02/L9hWpm0gVYWk4rGOjDtOWkmSqQe8tAvP60xB5Wu\npwAg5WQ6MM3Mvg8sBvY3s4eAi9z9W0mcW2Q0eWnQVSYteZHpwDR3PyuJ84iMRR4adEXyZNQ2BIKB\naS0EA9OeIsGBadL4VszqZd6xq2lZ1MW8Y1ezYlZ96+BrUa7hVr1upFnFHphmZoPAz4CH3X1L2gmT\n/Fsxq5el89fxdGtQ7fLApH6Wzg/q4JdsyX8VSJqTyIk0orIlBDP7upk9P3w9DfgzwYjiP5mZqnqE\nZZ09e4NBwdOtQyzr7MkoRdXpmNKhXjciRSqVEE5w9/eEr98OrHf315vZbOB64Pupp05ybWN7dF17\nue15pAZdkX0qtSHsLnp9MvATAHffnGqKpGHM7Y+uay+3XUTyrVJA2GZmrzOzo4CXA78AMLM2YHI9\nEif5trynk2cMDv8n9IzBFpb3qA5epBFVqjJ6N/BfwGzgA0Ulg5OAn6edMMm/QsPxss4eNrb3M7e/\nneU9nQ3RoDzejVj7ocLaDWmtorZiVu+wfxuadyb/Ks1ltB44JWL7L4FfppkoqaPeXujpgf5+aG+H\nzk7oiJ8ZLLkLlvwU6AfagU5A8SBT1az9kNZo7ageaHhwPbXZ5FeskcqSY7Vk6L29sG4dDIU9hfr7\ng/cQ7xy9vbB2Lbjv+/zatcM+X/Pi9haxokuBp/jMWXrdkmvlecGdaqQxWnvGMV1sm8TIxXiMzKf1\n1prSlSkgNLJaM/Senn2fLRgaCrbH+fy9947MlN1h/Xro6WGoi9GDVKWAtmoVr3jLYNnL33xFGyxc\nOHo6qxVx3eJrtSzqGhboGjk4pDFau28ikSuz1XreWuVh7qq8U0BoFFEZZ60Zen+Z/5zltpcaGIje\nPjgY/Cmcq1yQGi2gLVzIzfeXHF/8HRyWUuN16XUBiuLO3tXIurtpafBVyCpNv13L07RheESrQZaj\nwPMyd1WeVVox7YOVPujuX0w+OQKMzPhmzoTNm0dmnKXBoCBuht7eHn1s0gsRlQtS1QS0WktDEqnc\naO2Zk2fW9DQ9qW0S/YP9uRoFrrmrRlephDA1/Hs+8FKCaSsATgV+k2aimlpUxvfIIyOPKxcMIH6G\n3tkJa9ZEb4+jtXVfSWA0UYGnmhJKraWhFNXcTpKCuNVY5abfrvppuqS0NKWtnXnT5+Wqvr7eixE1\nokq9jD4FYGa/Al7s7jvC958kWBtB0hCV8VXS0jL8+JaW+Bk6BI2nxe0AlRpxS3V0RAerKO3tI0s+\n5QJKVECrtXorDQsWMLQyu8uXM6LL6SiiRmuveSziQYHRn6ZLA1GeqmI6Z3Ry35a17GnZ9+99wpDR\nuX+nGptDcdoQ5jJ81PJuYF4qqZHqMrjitoSx9DLq6YluFI771L11a7zrtLQE1V6lJZ+o4FMuoNWr\nemucKFdqiVNyWLVxFTiRDcON/DT9d3fCwbc6n1wMG6fB3O3wyS7n54u28+NnbVZjM/ECwveAW83s\n6vD96wkmuZNaRTUUl8v4ShUyzo6OsVeZ1PrUXem4wn1UagB3h7a2oKQwWkDr7BzZblJtaahJ7G30\nLhZW6azc0AVmLJpXYaQaMHkPWGvL8MkLHfoH+nNZRRbHOVf1MHsrnN09fPuFJz7CUMlzUbM2NseZ\n/nq5mV0PnBBueru7/yndZDWBco2ks2cPb0CGIOObPTt4Io9TEog7NqHWp+5Knz/uuOHbotoqIOip\nFKfraCH9NQyia2oLFjC0fBUALcsGWLVxFQvnlv/e2wfhK/fNH1ej0GdtjX6AeXhq5OambGyO2+30\nGcCT7v5tMzvAzJ7t7hvSTNi4V66RdOtWmD+/PoPNqn3qHq33U6XPJ1HlU0tpqFo1juCuRukUD6ll\nvGHgnbari74Yax4u2dLR0AGg1JaZ7cyOCAoH7YCH9ht5fCNXj43VqCummdlFwEeAC8JNE4Ar0kxU\nU6hUXdPRETxhL14c/F1NRlSpN06pjo4g+BQy5fb24H3U9QqBppDu/v4gGMyeHe/zM2dGp3fyZFi9\nGrq6gr97c7DiWm8vK9rWMO+9/bRcBPPe28+KtjWppG3FrF6WHr6WByb14xYuMnT42oZaea5RXHpG\nJ7smDs/ydk1s4djBA2mx4duz7iKblTglhNOBo4A7ANz9ETMrU8iS2NJqJK22XSDuU3elEk1p9VCU\ncg3Q24oGduVkbMGKSetZ+hp4emLw/oHpsPRU4Pr1LEl4oqZlh97L023DK7CfbnOWHXrvmJ/O4/Qy\nah3LiVetit6exmjxFNx4fPB9nnNVD7O29rNlZjuXntHJY0d1ML9vmnoZES8g7HZ3NzMHMLMYhU0Z\nVVqNpHkJNGM9Ls2xBTGrgT62aHBvMCh4eiIsWzTIku4Rh9dUvbTxGdGjvcttjyvp6TReMW8lKxdF\nzx2Vx6635dx4fMfewFBMCyUF4gSEH5jZN4DpZvYu4B3ApekmqwlUaiStpf46r4Embu8pSGdsQRVt\nKw9Oiz7FxqjtNY6gnrs9KIFEbc+diN5JjdrjSKLF6WX0BTM7GXiSYNTyJ9z9htRT1gyiqmtqnaIh\nrd44tQaaqM+Xk8bYgipGOh/0JDwUkflHZtLlzhtO8Dfab7B8ZStLXzO8RPKM3cF2ygSmzLgrAIxz\nowYEM/u8u38EuCFimyQtiSka0uiNU2ugifp8Nb2UalVFldfnfg3vPpWRmfSNQGnbeLnzxpzgb8mu\nw+GaNSw7ad9gqeU3wpKBw2sKCKNl3K2tbRW7nZa6+f5FcEW5NoQqEia5FqfK6GSCXkbFXhOxTZKQ\nxykaCmoNNFGfnzYtd2ML3rK+Hbumf2Qmvb4dStvP41aFlQvqHR0s6YUl/53cdxA5MK3IjGPidTsd\noUEaj2XsKs12+l7gH4BOM7uzaNdU4HdpJ6xpNdsUDfUcWxBXZydL7l7HkrtKSi7zI0ou1VSF1drT\nK88WLAD2rRMxbfJ0FsxekGmSpHqVSgj/A1wPfA74aNH2He7+eKqpamaaoiEd1QTaaqrHoo4dHIxe\nK6KGoD7jmC62T4reN1qJoF6GlrfxircMsvJQrZ7cqCrNdrod2A6cBWBms4BJwBQzm+LuG+uTxCaj\nKRrSUW2greapvfTY0o4Bo10rRq+yvonRH82N7m5alg2EExaaSgcNKk6j8qnAF4EDgS3AocAa4Pm1\nXtzMTgG+RDBO5lJ3/9dazzkujIcqhLypZ6Ct5loxe5Xt+e3i5NOZgtEmzZN8i9Oo/BngWODX7n6U\nmb2CsNRQCzNrBb5K0Gj9EHCbmf3M3e+p9dwNo47z5QixA+0r5q2ku2N4tceCXgt62iR8rdi9yrq7\nmfHukct1PvEfKa0rLU0pTkDY4+5bzazFzFrc/WYz+3wC134ZcJ+79wCY2ZXAaUBzBAQtCZlbUXXg\nKw91uD+Fi8XsVTbhn7YxGDHzWMuygcRGCicxxkCNyo0tTkDYZmZTCJbNXGFmW4DaxtUHDgIeLHr/\nEHBM6UFmthRYCjB3PPW0yfGSkM2uro20MRu761VlNOZ7L1o9bsIJXUklR+ps1NlOCZ7adwLnAb8A\n/o9gXeW6cPdL3P1odz/6gAkT6nXZ9OV5vIHUT2dn0OBcTL3KJCNxpq54CsDM9gOuSfDaDwOHFL0/\nONzWHJptvIFEU68yyZE46yG828w2A3cCtwN/DP+u1W3AYWb2bDObCJwJ/CyB8zYGPRmKSM7EaUP4\nEHCkuz9738tOAAAQ1UlEQVSW5IXdfcDM3gf8kqDb6WXufneS18i1NJ8Mo3ovpXUtqY06F0iOxAkI\n/wc8ncbF3f064Lo0zt0Q0hhvEJXBrFkTDBhy37dNmU5l9eoSrM4FkiNxAsIFwO/N7BZgb6W3u78/\ntVTJ2EVlMLAvGBQo0ymvnk/t6lwgORInIHwDuAm4C4gxg5dkqpqMRJlOtCSe2uNW26lzQSJ6+3q1\nBGYC4gSEAXf/YOopkWRUszKZMp1otT61V1NtN3t2/daEGKd6+3pZt3UdQx58h/2D/azbGpToFBSq\nEycg3BwODruG4VVGmvE0j8pNx1ycGcG+TKfWuvL16+GRR/a9P/BAOPzw2u4ha7U+tVdTbbd1K8yf\nn5sG/5ZFXQAseqDKqTq6u2k5d+TUGvXQ80TP3mBQMORD9DzRo4BQpTgB4e/Cvy8o2uaAHmHyqFzv\npXLbaqkrLw0GsO99HoNC3OBX6xTk1VbbaTLDmvQPRn/f5bZLeXEGpj27HgmRBJXLYEq3rV5dW115\naTAo3p63gFBNQ3GtXYIbuNquEaeuaG9tj8z821vz9d02gkorpp3o7jeZ2Rui9rv7j9NLltRFmj1c\nVq/ORRXIXvXs3llttV3CCtU+lUybOKXiZ6ftgiduWZxcolLUOaNzWBsCQIu10DlDlRjVqlRCWETQ\nuyhq3iIHFBAaXWvrvoXgS7fXqhBU8jLmoZrgV2u302qq7VL6ThY9e/GYP7Nq4yqSmb+yPgrtBOpl\nVLtKK6ZdFL78tLtvKN5nZqpGGg/Mqtte6sADy1cbFcvDmIdqGoqTKE3ErbaTRHRM6VAASECc2U6v\nitj2o6QTIhmIWve30vZShx8eBIU4sh7zUM3cURosJk2qUhvCcwmWyZxW0o6wH8HaylKrrFdMS2JQ\n1OGHD29ALrQd1HLONORhVtGsf2+RUVRqQ5gPvA6YzvB2hB3Au9JMVFPIw6RmtXavrNc5k5Jl987e\nXli7dvjAtLVr96Wr0WU4DkGSU6kN4afAT83sOHdfXcc0NYc8TGqWxlNzHp7Ey4n7hF6u5NTWNvbe\nU/feO3JgmnuwPQ/fTULG0pgt+RFnYNrpZnY3wappvwBeBHzA3a9INWXjXV7qqdN4aq7mnPWqRqmm\nRBZVyjEL2lYK7SvVluhqba8pI04X02pNmTiF7YPbqjt3FYOaJb/iBIRXufv5ZnY6wbrHbwJuBhQQ\nalHvSc3yWH9dz2qzakpkUaWcgYGRXXTz0HuK5J/KF8xekOj5pHHECQiFhYz/Gvi+uz9ucbslSnn1\nrGvPQ3tFlHpWm1VbIist5XR1Vff50gBcOiitIIkxHyIJiRMQrjGztQRVRu81swOAXekmqwnUs649\nD+0VUepZbVbrILxqSnRRAbjcQ1Tepvgoo3tzN9t3xm80VltCY4ozl9FHzezfgO3uPmhmTwOnpZ+0\nJlCvXi95aa8oVc9qs1oH4VVToosKwO5Bo3Rra76q7WLavnMbQ8vbYOHCUY+dcEIX3Zu7VfXUgMoO\nTDOz84venuTugwDu/hSg1dIaSbkMNuuxAdUMFqtVrY26HR3BNNWF76y9PXgflaGXC7QDA3DccbB4\ncfB3gwQDaR6VSghnAv8Wvr4A+GHRvlOAj6WVKElYXscGpFltVlqHX67KqJqgGLdEl+NV0E76fS/n\nXNXDrK39bJnZzqVndHLj8c0RmFZu6IrcPm3ydJVmQpUCgpV5HfVe8izPYwPSqDaLW4efVlDMUQAu\nzgTPugs+dG0Lk3YH6Zq9tZ8PXR50Lrjx+I6yGWa1puyG7S3bEjtfkkqrvQpTdWsJzkClgOBlXke9\nl7xrpkVYsq7Dz0EALmRwDhza387ym1t4+dqde4NBwaTdQ5xzVc/eUkLF9RBGbz4AKkyb3d0d7wRp\nWjiyJNA/0K8lOEOVAsKLzOxJgtLA5PA14XvNZST5VakOP0ajaCIyDMDD1hg2eGBSP285Bb73NMy7\na+Txs7bu+77GMtBtaOXiEVNXRAWWPExtMbR81Yh/A7sGduElz7jNugRnpakr1EFaGlOO6/DrIWqN\nYQwuOBneEhEQtswMvpexdBVduaGLCSd0MRiOVF707MWs3NBFy6IuWiOWlc6yO+rKDV20LBugdahr\n77bBFkYEg4JmXIIzzjgEkcaSozr8LJTLyB6aCrsmtgyrNto1sYVLzxj797Lo2Yvp3hxUBRUaZou3\nFat3w21pu8BzDziCTX2bRhy3c89OLcEZUkCQ8ScHdfhZKrvGcFs7Xzi7M/FeRlEZfda9doZVm7Gv\nXWD+zPkjqoFKj4XmXYJTAUHGp2ZqRC9RaY3hGw/paIpuplHVZuXaBbQE5z6ZBAQzexPwSeAI4GXu\nfnsW6RAZj5LK4Bq5K2a5arNy27UEZyCrEsJfgDcA38jo+jIWeZwxVSLVmsGVq3IpnDvvylabNWG7\nQDUyCQjuvgZAs6Y2kHrOmKrAk7lqqlyqUa9SR6VqMylPbQgyUlSGXK8ZU/M6Vfc4FpVJV1vlEvc6\n9Sp1qF1gbFILCGb2a2B2xK5l4fKccc+zFFgKMLdJ+pFnqlyGXBoMCpKeMTWvU3XnWC1P3eUy6VZr\nZdBHzv1US5VLWqWOctQuUL3UAoK7vzKh81wCXAJw9NSpmjIjbeUy5HKSDtJ5nao7pxxqeuoul0m3\ntbTRQkuiVS5plDokWWWnv5YmVSnjLW3zMUt+sFdep+rOsXJP3XGUy4wHhgaYP3P+3hJBe2t7ZB/+\napQrXaihNz+y6nZ6OvBl4ADg52bW7e6vziItUqLctA9tbSPXDohaErJWTT7KOClxn7or9cZJuspF\nDb35l1Uvo6uBq7O4dtOK23OnXIZcLvNPum6/yUcZJyXuU3elTDrpHkFq6M0/9TJqBtX03CmXIa9Z\nE33uNOr2m3iU8Vi02Njr+stl0lBb20Sl6ykA5JcCQh4l3Q+/2p47URlyIT2lVLefKQPmz5xf01N3\nVCa9+sHVde0RJPmggJA3afTDT6LnTqPV7TfR4LY0nrrVI6g5qZdR3lR6mh+rJHruVLPIfNYKQbUQ\n8ApBtbc323Q1EPUIak4qIeRNGv3wk3q6b5S6fQ1uq5l6BDUnBYS8SWO1r2bruaPBbTVTj6DmpICQ\nN2nV1TfK030SKo2lWL163AXFtCaMU4+g5qOAkDfN9jSfhqigahYMrCsMrhsnk+aVm7ri/m330942\nvFSZ9Spmkn8KCHmUxtN8Wr1u8tibJyqoDgzAYMlkbeOkXSGqe+jOPTvZ079z2PaVG7oyXeRe8k8B\noRmkNaV0nqeqLg2qXV3Rx43jdoVB9SGUKikgNIO0et3UuzdPHksj0lBWbVw1YtvCuQszSEk+KSA0\ng7R63dSzN0+eSyMZK526Aocr1hzBki1F30t3Ny3nbqt/4nJk5YYuWodgyu5927ZPgu7N3WpfCalQ\n2QzSmlK6nlNV1zpgb5xOq12YuqJ4mmpgeDCQvfZ8ro0nblm8909rhaU+mpFKCM0gra6saZ03qmqo\n1tJIo029UYXS7qErN3RllxhpaAoIzSCtrqxpnLdc1VDUegwQ/wlf3XlFRqWA0CzSGpiW9HnLVQ2Z\nBU/0tTzhN9PgPJExUBuC5Eu5KqDBwcaZXE+kQamEIPlSaS4nPeGLpEolBMmXzs6gKqjYOGn8Fck7\nlRAkX9T4K5IZBQTJH1UNiWRCVUYiIgIoIIiISEgBQUREAAUEEREJKSCIiAiggCAiIiEFBBERATIK\nCGb272a21szuNLOrzWx6FukQEZF9sioh3AAc6e4vBNYDF2SUDhERCWUSENz9V+5emNz+D8DBWaRD\nRET2yUMbwjuA68vtNLOlZna7md3+6J49dUyWiEhzSW0uIzP7NTA7Ytcyd/9peMwyYABYUe487n4J\ncAnA0VOnegpJFRERUgwI7v7KSvvN7GzgdcBJ7q6MXkQkY5nMdmpmpwDnA4vc/eks0iAiIsNl1Ybw\nFWAqcIOZdZvZ1zNKh4iIhDIpIbj7c7K4roiIlJeHXkYiIpIDCggiIgIoIIiISEgBQUREAAUEEREJ\nKSCIiAiggCAiIiEFBBERARQQREQkpIAgIiKAAoKIiIQUEEREBFBAEBGRkAKCiIgACggiIhJSQBCR\npjVld9YpyBdrpOWMzWwHsC7rdKRgf+CxrBORgvF6XzB+72283heM33uLc1+HuvsBo50okxXTarDO\n3Y/OOhFJM7PbdV+NZbze23i9Lxi/95bkfanKSEREAAUEEREJNVpAuCTrBKRE99V4xuu9jdf7gvF7\nb4ndV0M1KouISHoarYQgIiIpUUAQERGgwQKCmf2Lmd1pZt1m9iszOzDrNCXFzP7dzNaG93e1mU3P\nOk1JMLM3mdndZjZkZg3f5c/MTjGzdWZ2n5l9NOv0JMXMLjOzLWb2l6zTkiQzO8TMbjaze8J/h+dm\nnaakmNkkM7vVzP4c3tunaj5nI7UhmNl+7v5k+Pr9wPPc/T0ZJysRZvYq4CZ3HzCzzwO4+0cyTlbN\nzOwIYAj4BvAhd7894ySNmZm1AuuBk4GHgNuAs9z9nkwTlgAz+39AH/Bddz8y6/QkxczmAHPc/Q4z\nmwr8EXj9OPnNDHimu/eZ2QRgFXCuu/9hrOdsqBJCIRiEngk0TjQbhbv/yt0Hwrd/AA7OMj1Jcfc1\n7j5eRpe/DLjP3XvcfTdwJXBaxmlKhLv/Bng863Qkzd03ufsd4esdwBrgoGxTlQwP9IVvJ4R/asoT\nGyogAJjZcjN7EFgCfCLr9KTkHcD1WSdCRjgIeLDo/UOMk8ylGZjZPOAo4JZsU5IcM2s1s25gC3CD\nu9d0b7kLCGb2azP7S8Sf0wDcfZm7HwKsAN6XbWqrM9q9hccsAwYI7q8hxLkvkSyZ2RTgKuADJTUN\nDc3dB919AUGNwsvMrKbqvtzNZeTur4x56ArgOuCiFJOTqNHuzczOBl4HnOQN1LhTxW/W6B4GDil6\nf3C4TXIsrF+/Cljh7j/OOj1pcPdtZnYzcAow5o4BuSshVGJmhxW9PQ1Ym1VakmZmpwDnA3/j7k9n\nnR6JdBtwmJk928wmAmcCP8s4TVJB2PD6LWCNu38x6/QkycwOKPRGNLPJBJ0dasoTG62X0VXAfIJe\nKw8A73H3cfGEZmb3Ae3A1nDTH8ZDDyozOx34MnAAsA3odvdXZ5uqsTOz1wL/CbQCl7n78oyTlAgz\n+z6wmGAq5V7gInf/VqaJSoCZLQR+C9xFkG8AfMzdr8suVckwsxcC3yH4t9gC/MDdP13TORspIIiI\nSHoaqspIRETSo4AgIiKAAoKIiIQUEEREBFBAEBGRkAKCSExm9nozczN7btZpEUmDAoJIfGcRzCh5\nVtYJEUmDAoJIDOFcOAuBdxKMUMbMWszsa+Fc9Nea2XVm9sZw30vMbKWZ/dHMfhlOwyySawoIIvGc\nBvzC3dcDW83sJcAbgHnAC4BzgONg79w5Xwbe6O4vAS4DxsWIZhnfcje5nUhOnQV8KXx9Zfi+Dfih\nuw8Bm8PJxSCYXuVI4IZgKh1agU31Ta5I9RQQREZhZs8CTgReYGZOkME7cHW5jwB3u/txdUqiSCJU\nZSQyujcC33P3Q919XrgexwaCFcbOCNsSOggmhwNYBxxgZnurkMzs+VkkXKQaCggiozuLkaWBq4DZ\nBKum/QX4OsFKXNvD5TXfCHzezP4MdAPH1y+5ImOj2U5FamBmU8JFzmcCtwIvd/fNWadLZCzUhiBS\nm2vDRUomAv+iYCCNTCUEEREB1IYgIiIhBQQREQEUEEREJKSAICIigAKCiIiE/j8wn8IRk+gohgAA\nAABJRU5ErkJggg==\n",
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- }
- ],
- "source": [
- "# Visualising the Training set results\n",
- "X_set, y_set = X_train, y_train\n",
- "X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\n",
- " np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\n",
- "plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n",
- " alpha = 0.75, cmap = ListedColormap(('red', 'green')))\n",
- "plt.xlim(X1.min(), X1.max())\n",
- "plt.ylim(X2.min(), X2.max())\n",
- "for i, j in enumerate(np.unique(y_set)):\n",
- " plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n",
- " c = ListedColormap(('red', 'green'))(i), label = j)\n",
- "plt.title('Random Forest Classifier (Training set)')\n",
- "plt.xlabel('Age')\n",
- "plt.ylabel('Estimated Salary')\n",
- "plt.legend()\n",
- "plt.show()\n",
- "\n",
- "# Visualising the Test set results\n",
- "X_set, y_set = X_test, y_test\n",
- "X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\n",
- " np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\n",
- "plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n",
- " alpha = 0.75, cmap = ListedColormap(('red', 'green')))\n",
- "plt.xlim(X1.min(), X1.max())\n",
- "plt.ylim(X2.min(), X2.max())\n",
- "for i, j in enumerate(np.unique(y_set)):\n",
- " plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n",
- " c = ListedColormap(('red', 'green'))(i), label = j)\n",
- "plt.title('Random Forest Classifier (Test set)')\n",
- "plt.xlabel('Age')\n",
- "plt.ylabel('Estimated Salary')\n",
- "plt.legend()\n",
- "plt.show()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.5.1"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/machine_learning/Random Forest Classification/Social_Network_Ads.csv b/machine_learning/Random Forest Classification/Social_Network_Ads.csv
deleted file mode 100644
index 4a53849c2baf..000000000000
--- a/machine_learning/Random Forest Classification/Social_Network_Ads.csv
+++ /dev/null
@@ -1,401 +0,0 @@
-User ID,Gender,Age,EstimatedSalary,Purchased
-15624510,Male,19,19000,0
-15810944,Male,35,20000,0
-15668575,Female,26,43000,0
-15603246,Female,27,57000,0
-15804002,Male,19,76000,0
-15728773,Male,27,58000,0
-15598044,Female,27,84000,0
-15694829,Female,32,150000,1
-15600575,Male,25,33000,0
-15727311,Female,35,65000,0
-15570769,Female,26,80000,0
-15606274,Female,26,52000,0
-15746139,Male,20,86000,0
-15704987,Male,32,18000,0
-15628972,Male,18,82000,0
-15697686,Male,29,80000,0
-15733883,Male,47,25000,1
-15617482,Male,45,26000,1
-15704583,Male,46,28000,1
-15621083,Female,48,29000,1
-15649487,Male,45,22000,1
-15736760,Female,47,49000,1
-15714658,Male,48,41000,1
-15599081,Female,45,22000,1
-15705113,Male,46,23000,1
-15631159,Male,47,20000,1
-15792818,Male,49,28000,1
-15633531,Female,47,30000,1
-15744529,Male,29,43000,0
-15669656,Male,31,18000,0
-15581198,Male,31,74000,0
-15729054,Female,27,137000,1
-15573452,Female,21,16000,0
-15776733,Female,28,44000,0
-15724858,Male,27,90000,0
-15713144,Male,35,27000,0
-15690188,Female,33,28000,0
-15689425,Male,30,49000,0
-15671766,Female,26,72000,0
-15782806,Female,27,31000,0
-15764419,Female,27,17000,0
-15591915,Female,33,51000,0
-15772798,Male,35,108000,0
-15792008,Male,30,15000,0
-15715541,Female,28,84000,0
-15639277,Male,23,20000,0
-15798850,Male,25,79000,0
-15776348,Female,27,54000,0
-15727696,Male,30,135000,1
-15793813,Female,31,89000,0
-15694395,Female,24,32000,0
-15764195,Female,18,44000,0
-15744919,Female,29,83000,0
-15671655,Female,35,23000,0
-15654901,Female,27,58000,0
-15649136,Female,24,55000,0
-15775562,Female,23,48000,0
-15807481,Male,28,79000,0
-15642885,Male,22,18000,0
-15789109,Female,32,117000,0
-15814004,Male,27,20000,0
-15673619,Male,25,87000,0
-15595135,Female,23,66000,0
-15583681,Male,32,120000,1
-15605000,Female,59,83000,0
-15718071,Male,24,58000,0
-15679760,Male,24,19000,0
-15654574,Female,23,82000,0
-15577178,Female,22,63000,0
-15595324,Female,31,68000,0
-15756932,Male,25,80000,0
-15726358,Female,24,27000,0
-15595228,Female,20,23000,0
-15782530,Female,33,113000,0
-15592877,Male,32,18000,0
-15651983,Male,34,112000,1
-15746737,Male,18,52000,0
-15774179,Female,22,27000,0
-15667265,Female,28,87000,0
-15655123,Female,26,17000,0
-15595917,Male,30,80000,0
-15668385,Male,39,42000,0
-15709476,Male,20,49000,0
-15711218,Male,35,88000,0
-15798659,Female,30,62000,0
-15663939,Female,31,118000,1
-15694946,Male,24,55000,0
-15631912,Female,28,85000,0
-15768816,Male,26,81000,0
-15682268,Male,35,50000,0
-15684801,Male,22,81000,0
-15636428,Female,30,116000,0
-15809823,Male,26,15000,0
-15699284,Female,29,28000,0
-15786993,Female,29,83000,0
-15709441,Female,35,44000,0
-15710257,Female,35,25000,0
-15582492,Male,28,123000,1
-15575694,Male,35,73000,0
-15756820,Female,28,37000,0
-15766289,Male,27,88000,0
-15593014,Male,28,59000,0
-15584545,Female,32,86000,0
-15675949,Female,33,149000,1
-15672091,Female,19,21000,0
-15801658,Male,21,72000,0
-15706185,Female,26,35000,0
-15789863,Male,27,89000,0
-15720943,Male,26,86000,0
-15697997,Female,38,80000,0
-15665416,Female,39,71000,0
-15660200,Female,37,71000,0
-15619653,Male,38,61000,0
-15773447,Male,37,55000,0
-15739160,Male,42,80000,0
-15689237,Male,40,57000,0
-15679297,Male,35,75000,0
-15591433,Male,36,52000,0
-15642725,Male,40,59000,0
-15701962,Male,41,59000,0
-15811613,Female,36,75000,0
-15741049,Male,37,72000,0
-15724423,Female,40,75000,0
-15574305,Male,35,53000,0
-15678168,Female,41,51000,0
-15697020,Female,39,61000,0
-15610801,Male,42,65000,0
-15745232,Male,26,32000,0
-15722758,Male,30,17000,0
-15792102,Female,26,84000,0
-15675185,Male,31,58000,0
-15801247,Male,33,31000,0
-15725660,Male,30,87000,0
-15638963,Female,21,68000,0
-15800061,Female,28,55000,0
-15578006,Male,23,63000,0
-15668504,Female,20,82000,0
-15687491,Male,30,107000,1
-15610403,Female,28,59000,0
-15741094,Male,19,25000,0
-15807909,Male,19,85000,0
-15666141,Female,18,68000,0
-15617134,Male,35,59000,0
-15783029,Male,30,89000,0
-15622833,Female,34,25000,0
-15746422,Female,24,89000,0
-15750839,Female,27,96000,1
-15749130,Female,41,30000,0
-15779862,Male,29,61000,0
-15767871,Male,20,74000,0
-15679651,Female,26,15000,0
-15576219,Male,41,45000,0
-15699247,Male,31,76000,0
-15619087,Female,36,50000,0
-15605327,Male,40,47000,0
-15610140,Female,31,15000,0
-15791174,Male,46,59000,0
-15602373,Male,29,75000,0
-15762605,Male,26,30000,0
-15598840,Female,32,135000,1
-15744279,Male,32,100000,1
-15670619,Male,25,90000,0
-15599533,Female,37,33000,0
-15757837,Male,35,38000,0
-15697574,Female,33,69000,0
-15578738,Female,18,86000,0
-15762228,Female,22,55000,0
-15614827,Female,35,71000,0
-15789815,Male,29,148000,1
-15579781,Female,29,47000,0
-15587013,Male,21,88000,0
-15570932,Male,34,115000,0
-15794661,Female,26,118000,0
-15581654,Female,34,43000,0
-15644296,Female,34,72000,0
-15614420,Female,23,28000,0
-15609653,Female,35,47000,0
-15594577,Male,25,22000,0
-15584114,Male,24,23000,0
-15673367,Female,31,34000,0
-15685576,Male,26,16000,0
-15774727,Female,31,71000,0
-15694288,Female,32,117000,1
-15603319,Male,33,43000,0
-15759066,Female,33,60000,0
-15814816,Male,31,66000,0
-15724402,Female,20,82000,0
-15571059,Female,33,41000,0
-15674206,Male,35,72000,0
-15715160,Male,28,32000,0
-15730448,Male,24,84000,0
-15662067,Female,19,26000,0
-15779581,Male,29,43000,0
-15662901,Male,19,70000,0
-15689751,Male,28,89000,0
-15667742,Male,34,43000,0
-15738448,Female,30,79000,0
-15680243,Female,20,36000,0
-15745083,Male,26,80000,0
-15708228,Male,35,22000,0
-15628523,Male,35,39000,0
-15708196,Male,49,74000,0
-15735549,Female,39,134000,1
-15809347,Female,41,71000,0
-15660866,Female,58,101000,1
-15766609,Female,47,47000,0
-15654230,Female,55,130000,1
-15794566,Female,52,114000,0
-15800890,Female,40,142000,1
-15697424,Female,46,22000,0
-15724536,Female,48,96000,1
-15735878,Male,52,150000,1
-15707596,Female,59,42000,0
-15657163,Male,35,58000,0
-15622478,Male,47,43000,0
-15779529,Female,60,108000,1
-15636023,Male,49,65000,0
-15582066,Male,40,78000,0
-15666675,Female,46,96000,0
-15732987,Male,59,143000,1
-15789432,Female,41,80000,0
-15663161,Male,35,91000,1
-15694879,Male,37,144000,1
-15593715,Male,60,102000,1
-15575002,Female,35,60000,0
-15622171,Male,37,53000,0
-15795224,Female,36,126000,1
-15685346,Male,56,133000,1
-15691808,Female,40,72000,0
-15721007,Female,42,80000,1
-15794253,Female,35,147000,1
-15694453,Male,39,42000,0
-15813113,Male,40,107000,1
-15614187,Male,49,86000,1
-15619407,Female,38,112000,0
-15646227,Male,46,79000,1
-15660541,Male,40,57000,0
-15753874,Female,37,80000,0
-15617877,Female,46,82000,0
-15772073,Female,53,143000,1
-15701537,Male,42,149000,1
-15736228,Male,38,59000,0
-15780572,Female,50,88000,1
-15769596,Female,56,104000,1
-15586996,Female,41,72000,0
-15722061,Female,51,146000,1
-15638003,Female,35,50000,0
-15775590,Female,57,122000,1
-15730688,Male,41,52000,0
-15753102,Female,35,97000,1
-15810075,Female,44,39000,0
-15723373,Male,37,52000,0
-15795298,Female,48,134000,1
-15584320,Female,37,146000,1
-15724161,Female,50,44000,0
-15750056,Female,52,90000,1
-15609637,Female,41,72000,0
-15794493,Male,40,57000,0
-15569641,Female,58,95000,1
-15815236,Female,45,131000,1
-15811177,Female,35,77000,0
-15680587,Male,36,144000,1
-15672821,Female,55,125000,1
-15767681,Female,35,72000,0
-15600379,Male,48,90000,1
-15801336,Female,42,108000,1
-15721592,Male,40,75000,0
-15581282,Male,37,74000,0
-15746203,Female,47,144000,1
-15583137,Male,40,61000,0
-15680752,Female,43,133000,0
-15688172,Female,59,76000,1
-15791373,Male,60,42000,1
-15589449,Male,39,106000,1
-15692819,Female,57,26000,1
-15727467,Male,57,74000,1
-15734312,Male,38,71000,0
-15764604,Male,49,88000,1
-15613014,Female,52,38000,1
-15759684,Female,50,36000,1
-15609669,Female,59,88000,1
-15685536,Male,35,61000,0
-15750447,Male,37,70000,1
-15663249,Female,52,21000,1
-15638646,Male,48,141000,0
-15734161,Female,37,93000,1
-15631070,Female,37,62000,0
-15761950,Female,48,138000,1
-15649668,Male,41,79000,0
-15713912,Female,37,78000,1
-15586757,Male,39,134000,1
-15596522,Male,49,89000,1
-15625395,Male,55,39000,1
-15760570,Male,37,77000,0
-15566689,Female,35,57000,0
-15725794,Female,36,63000,0
-15673539,Male,42,73000,1
-15705298,Female,43,112000,1
-15675791,Male,45,79000,0
-15747043,Male,46,117000,1
-15736397,Female,58,38000,1
-15678201,Male,48,74000,1
-15720745,Female,37,137000,1
-15637593,Male,37,79000,1
-15598070,Female,40,60000,0
-15787550,Male,42,54000,0
-15603942,Female,51,134000,0
-15733973,Female,47,113000,1
-15596761,Male,36,125000,1
-15652400,Female,38,50000,0
-15717893,Female,42,70000,0
-15622585,Male,39,96000,1
-15733964,Female,38,50000,0
-15753861,Female,49,141000,1
-15747097,Female,39,79000,0
-15594762,Female,39,75000,1
-15667417,Female,54,104000,1
-15684861,Male,35,55000,0
-15742204,Male,45,32000,1
-15623502,Male,36,60000,0
-15774872,Female,52,138000,1
-15611191,Female,53,82000,1
-15674331,Male,41,52000,0
-15619465,Female,48,30000,1
-15575247,Female,48,131000,1
-15695679,Female,41,60000,0
-15713463,Male,41,72000,0
-15785170,Female,42,75000,0
-15796351,Male,36,118000,1
-15639576,Female,47,107000,1
-15693264,Male,38,51000,0
-15589715,Female,48,119000,1
-15769902,Male,42,65000,0
-15587177,Male,40,65000,0
-15814553,Male,57,60000,1
-15601550,Female,36,54000,0
-15664907,Male,58,144000,1
-15612465,Male,35,79000,0
-15810800,Female,38,55000,0
-15665760,Male,39,122000,1
-15588080,Female,53,104000,1
-15776844,Male,35,75000,0
-15717560,Female,38,65000,0
-15629739,Female,47,51000,1
-15729908,Male,47,105000,1
-15716781,Female,41,63000,0
-15646936,Male,53,72000,1
-15768151,Female,54,108000,1
-15579212,Male,39,77000,0
-15721835,Male,38,61000,0
-15800515,Female,38,113000,1
-15591279,Male,37,75000,0
-15587419,Female,42,90000,1
-15750335,Female,37,57000,0
-15699619,Male,36,99000,1
-15606472,Male,60,34000,1
-15778368,Male,54,70000,1
-15671387,Female,41,72000,0
-15573926,Male,40,71000,1
-15709183,Male,42,54000,0
-15577514,Male,43,129000,1
-15778830,Female,53,34000,1
-15768072,Female,47,50000,1
-15768293,Female,42,79000,0
-15654456,Male,42,104000,1
-15807525,Female,59,29000,1
-15574372,Female,58,47000,1
-15671249,Male,46,88000,1
-15779744,Male,38,71000,0
-15624755,Female,54,26000,1
-15611430,Female,60,46000,1
-15774744,Male,60,83000,1
-15629885,Female,39,73000,0
-15708791,Male,59,130000,1
-15793890,Female,37,80000,0
-15646091,Female,46,32000,1
-15596984,Female,46,74000,0
-15800215,Female,42,53000,0
-15577806,Male,41,87000,1
-15749381,Female,58,23000,1
-15683758,Male,42,64000,0
-15670615,Male,48,33000,1
-15715622,Female,44,139000,1
-15707634,Male,49,28000,1
-15806901,Female,57,33000,1
-15775335,Male,56,60000,1
-15724150,Female,49,39000,1
-15627220,Male,39,71000,0
-15672330,Male,47,34000,1
-15668521,Female,48,35000,1
-15807837,Male,48,33000,1
-15592570,Male,47,23000,1
-15748589,Female,45,45000,1
-15635893,Male,60,42000,1
-15757632,Female,39,59000,0
-15691863,Female,46,41000,1
-15706071,Male,51,23000,1
-15654296,Female,50,20000,1
-15755018,Male,36,33000,0
-15594041,Female,49,36000,1
\ No newline at end of file
diff --git a/machine_learning/Random Forest Classification/random_forest_classification.py b/machine_learning/Random Forest Classification/random_forest_classification.py
deleted file mode 100644
index d5dde4b13822..000000000000
--- a/machine_learning/Random Forest Classification/random_forest_classification.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# Random Forest Classification
-
-# Importing the libraries
-import numpy as np
-import matplotlib.pyplot as plt
-import pandas as pd
-
-# Importing the dataset
-dataset = pd.read_csv('Social_Network_Ads.csv')
-X = dataset.iloc[:, [2, 3]].values
-y = dataset.iloc[:, 4].values
-
-# Splitting the dataset into the Training set and Test set
-from sklearn.cross_validation import train_test_split
-X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
-
-# Feature Scaling
-from sklearn.preprocessing import StandardScaler
-sc = StandardScaler()
-X_train = sc.fit_transform(X_train)
-X_test = sc.transform(X_test)
-
-# Fitting Random Forest Classification to the Training set
-from sklearn.ensemble import RandomForestClassifier
-classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)
-classifier.fit(X_train, y_train)
-
-# Predicting the Test set results
-y_pred = classifier.predict(X_test)
-
-# Making the Confusion Matrix
-from sklearn.metrics import confusion_matrix
-cm = confusion_matrix(y_test, y_pred)
-
-# Visualising the Training set results
-from matplotlib.colors import ListedColormap
-X_set, y_set = X_train, y_train
-X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
- np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
-plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
- alpha = 0.75, cmap = ListedColormap(('red', 'green')))
-plt.xlim(X1.min(), X1.max())
-plt.ylim(X2.min(), X2.max())
-for i, j in enumerate(np.unique(y_set)):
- plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
- c = ListedColormap(('red', 'green'))(i), label = j)
-plt.title('Random Forest Classification (Training set)')
-plt.xlabel('Age')
-plt.ylabel('Estimated Salary')
-plt.legend()
-plt.show()
-
-# Visualising the Test set results
-from matplotlib.colors import ListedColormap
-X_set, y_set = X_test, y_test
-X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
- np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
-plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
- alpha = 0.75, cmap = ListedColormap(('red', 'green')))
-plt.xlim(X1.min(), X1.max())
-plt.ylim(X2.min(), X2.max())
-for i, j in enumerate(np.unique(y_set)):
- plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
- c = ListedColormap(('red', 'green'))(i), label = j)
-plt.title('Random Forest Classification (Test set)')
-plt.xlabel('Age')
-plt.ylabel('Estimated Salary')
-plt.legend()
-plt.show()
\ No newline at end of file
diff --git a/machine_learning/Random Forest Regression/Position_Salaries.csv b/machine_learning/Random Forest Regression/Position_Salaries.csv
deleted file mode 100644
index 0c752c72a1d1..000000000000
--- a/machine_learning/Random Forest Regression/Position_Salaries.csv
+++ /dev/null
@@ -1,11 +0,0 @@
-Position,Level,Salary
-Business Analyst,1,45000
-Junior Consultant,2,50000
-Senior Consultant,3,60000
-Manager,4,80000
-Country Manager,5,110000
-Region Manager,6,150000
-Partner,7,200000
-Senior Partner,8,300000
-C-level,9,500000
-CEO,10,1000000
\ No newline at end of file
diff --git a/machine_learning/Random Forest Regression/Random Forest Regression.ipynb b/machine_learning/Random Forest Regression/Random Forest Regression.ipynb
deleted file mode 100644
index 17f4d42bfb0d..000000000000
--- a/machine_learning/Random Forest Regression/Random Forest Regression.ipynb
+++ /dev/null
@@ -1,147 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": 1,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "# Importing the libraries\n",
- "import numpy as np\n",
- "import matplotlib.pyplot as plt\n",
- "import pandas as pd\n",
- "from sklearn.ensemble import RandomForestRegressor"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "# Importing the dataset\n",
- "dataset = pd.read_csv('Position_Salaries.csv')\n",
- "X = dataset.iloc[:, 1:2].values\n",
- "y = dataset.iloc[:, 2].values"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/plain": [
- "RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=None,\n",
- " max_features='auto', max_leaf_nodes=None,\n",
- " min_impurity_split=1e-07, min_samples_leaf=1,\n",
- " min_samples_split=2, min_weight_fraction_leaf=0.0,\n",
- " n_estimators=300, n_jobs=1, oob_score=False, random_state=0,\n",
- " verbose=0, warm_start=False)"
- ]
- },
- "execution_count": 3,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "# Fitting Random Forest Regression to the dataset\n",
- "regressor = RandomForestRegressor(n_estimators = 300, random_state = 0)\n",
- "regressor.fit(X, y)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 4,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "# Predicting a new result\n",
- "y_pred = regressor.predict(6.5)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 5,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "[ 160333.33333333]\n"
- ]
- }
- ],
- "source": [
- "print(y_pred)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 6,
- "metadata": {},
- "outputs": [
- {
- "data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAaEAAAEWCAYAAADPZygPAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3XmcXFWd9/HPNx1ICAgJkGHJziSKcUGgBwPMuABCADE4\nIuBEySCYUWEE0UeB+MgaxEEFHBl4MgGBsU1YlYisg7KNsiSIQECGGMiCBALZIB2SdOf3/HFPm0pT\nvVSlum5X6vt+vepVt85dzu/e6q5fnXtPnauIwMzMLA998g7AzMzql5OQmZnlxknIzMxy4yRkZma5\ncRIyM7PcOAmZmVlunISsS5JGS+o1ffklHSLppRKWP1XSa5LekrSDpH+QNC+9/mQH61wi6dSKBV0C\nST+TdG4edVvlSZou6ewKbOfTkpoqEVNv4iRU49IHadtjg6Q1Ba8nlrnNxZI+VuFQS6n/QknrC/bj\nWUlHl7mt/sAPgI9HxHYRsRK4ELg0vb69yDq7Ap8DpqfXh6Rj+5akNyX9SdIJ5e9h7yDpZEmt7f6G\nLqtyDJ0mXEl9JYWk1Sm+xekLQs18dkXEyRFxUQU29UtgH0nvq8C2eo2aeSOtuPRBul1EbAcsBI4q\nKHvHtyZJfasfZcc6iaepYL++CcyQtHMZVewK9IuIuQVlI4C5HSwPcCLwq4h4u6BsYYple+D/ANdI\nGl1GPL3NQ4V/QxFxeqkbqNLf1PvS8T8I+AIwqdIVSOrTm5NbZCMLzAS+lHcsldRrD7hVRmpV3CBp\nhqQ3gc+3//ZZeHpL0gxgd+DO9M3zjILlTkjfRJdKOrOTOgemOpZKeknSWZKU5p0s6UFJP5a0DPhO\nV/sQEXcAa4A9itTV9k15ZEHZzySdK+m9pGST9uWetJ/DC/avoUiVhwMPdBBLRMSvgFXABwrq/Ek6\nNqskPS7pgIJ5F6bj/7PUknpG0j4F8/eV9GSaNwPo124fv5xOH74h6ZeSdmu371+R9Oe0/jmSxkh6\nJMUyQ9JWXRzidyjnPUzlf5K0XNKdkoal8j5p2dckrZT0lKSxkr4KHAecnd6LX3QVV0T8L/A74EPt\nYv2ppFfSe3B+WzKR1CDpsnTs5kv6VxWcWpb0sKQLJP0eWA0M72J77077vlLS65J+3tk+pnnt/9+6\nej//Jc1fLunH7Q7B/cCRJbyVvZ6TUH34NPBzYAfghs4WjIjPAX8BDk/fjH9UMPsAYDRwGHCepDEd\nbOY/gAFkSeMg4CSg8PTVAcBzwGDg+53Fo8ynAAF/6mzZIvvyHLBXmt4uIg6NiJHt9q+1yKofAJ7v\nIJ4+kj4NDALmFcx6FPggsCNwM3CTpMJkcjTwX8BA4E7gx2l7/YDbgGvSurelZdvqOxQ4HzgGGJJi\nb9/C/QTZh/KBwBSy4388WYtvb+DYogeocyW9h5I+Q9ZCnJDKHiX7m4MsqY8DxpAdt+OBZRHxH2R/\njxel9+LTXQWVvlgcyKbH/r/IvqT8LbAv2Yf0iWneV4BDyN6bRuAfi2z2C8AXyVq5i7vY3lTg12k/\nhgJXdLaPReLvzvt5RKp3b7IvjYcUzHsOGC1pQJH9qE0R4ccW8gBeAg5pV3Yh8Jt2ZT8Dzi14fQjw\nUsHrxcDHCl6PBgLYtaDsCeCYIjFsBbQA7y4oOwX47zR9MjC/i/24EFgHrACagVbgG8XiBfqm2EYW\n27+22Nttf5P9K1L/BmB0u/o2pHjWpnhO7WR9AW+SnUJq25+7CuZ/EHgrTR8ELAJUMP+xgvivI/uQ\nbpu3fap/aMG+f7hg/h/bHavLgR90EOfJ6b1aUfBoLOc9BO4FJhW87puO1RDgULIvEB8G+nT2t1gk\nxrZ9XEXWUom0ztZp/hCyhNGvYJ0vAPem6QeBkwrmjS/8ewAeBr5b8Lqr7f0cuBIY0i7Obu1jN9/P\ncQXzbwW+WfB6m7TM7uV8RvTGh1tC9WFRJTYSEUsKXjYD2xVZ7G+ABmBBQdkCsn/uUuL5eUQMjIgB\nZN8uT5Z0Uokhl2sF8K52ZQsjYiDZh8YVwMGFMyV9K52KWgksB7YFCq9htT9226bp3YHFkT5hksJj\nt3vh64hYlbZfeDxfLZheU+R1sfepzcPpOLc9ZlPeezgCuELSCkkrgNfJEvfQiLgHuIrsw/tVSVdJ\nan98u/JBsvfkn4D92Xj8RpCdvny1oO4rgF3S/N3bxVrsb6+wrKvtfYMsSc+W9LSkSQAl7GN33s/O\n/s/atrmiyLZrkpNQfWjfvXo12amWNrt2sXwpXiP7ZjeioGw48HK524+I+cBdwFFF5rWQfePubH9K\n9RTw7g5iWUt22mkfpe7dkj4OnAF8hux02yDgLbIWUVdeIfsWXGh4wfRfKDiW6YNtEJsez0or5z1c\nRNbiKExo20TEowARcVlE7AO8HxhLdryKbadDEbEhImYAs8lOO7bV2wzsWFDv9hHxwTS//fEdVmzT\n7fajw+1FxCuR9Xbbjax1OE3SqC72sdDmvp/vBeZFRHM3l+/1nITq05PAkZIGpYuiX2s3/1WKdALo\njohYT3ZN5CJJ26V/0K+TnZIoS7rAfRgd92j7IzAxXYQ+Evj7cutK7gA+2tHMlIguBb6bit5Fdvrq\ndbJvyeey8Zt6Vx4G+ij7LVNfSccC+xTMnwGcJOmD6frR98h6tC0uYX9KUuZ7eBUwJV2zaesscEya\n3i89+pJ9AVpH1kqC8v7WLga+LGlwRCwi60TyA0nbp2t2oyV9JC17I3C6pN0lDSL7AtHZvne6PUnH\nSmprtawgS2CtXexjoc19Pz9Kdk1xi+EkVJ+uJbvAuYCshTGz3fyLyDoerJBUcpdd4Ktk/4Qvkf1D\nXwdcX+I2JqYeU2+RXeS+n+zaSjFfI+t8sQL4LDCr9JA3cR1wVLuOBe1NJ7tAfDhZ0vpv4AWyfV5F\n9g28SymhfZqs2+3yNP3Lgvl3kV3I/kXa5nCgrN9/laik9zAibgJ+RNYhYxVZa/KwNHsgcDXZ+/MS\n2X60dXiZDuyVeoLd3J3AIuIPwO/Juu4DfJ4s6T9LdgxvYmNr+Eqyv52ngTlknQrWdVFFZ9v7MPC4\npNVk12tOiYiFXexjYexlv5+SRNbhYVp3lq8V2vRUtJkBSPo3sutAP8k7FqscSUcBl0XE3+YdS6lS\nr8zPRsQ/5R1LJTkJmdkWS9K2wD+QtVR3JWuBPBAR3+x0RasaJyEz22JJ2o7sdOJ7yK7V3A6cHhFv\n5hqY/ZWTkJmZ5cYdE8zMLDe9ajDL3mjnnXeOkSNH5h2GmVlNmTNnzusRMbir5ZyEujBy5Ehmz56d\ndxhmZjVF0oKul/LpODMzy5GTkJmZ5cZJyMzMcuMkZGZmuXESMjOz3PRYEpJ0TbrV7TMFZTtKulfS\nC+l5UCpXujXuvHRb3MJbH09Ky7/Qdu+OVL5vup/HvLSuyq3DzMySpiYYORL69Mmem9rf+LWyerIl\ndC3ZXQwLnQncFxFjgPvSa8hujTsmPSaTjXyLpB2Bc8hGrt0POKctqaRlvlSw3vhy6jAzs6SpCSZP\nhgULICJ7njy5RxNRjyWhiHiQd95jfQLZkPCk56MLyq+PzCPAwHSfm8PIbqu7LCKWk91CeHyat31E\nPJLuSHl9u22VUoeZmQFMmQLN7e6X19yclfeQal8T2iUi2u6zsoSNt8wdwqa32F2cyjorX1ykvJw6\n3kHSZEmzJc1eunRpN3fNzKzGLVxYWnkF5NYxIbVgenT01HLriIhpEdEYEY2DB3c56oSZ2ZZh+PDS\nyiug2kno1bZTYOn5tVT+Mpve+31oKuusfGiR8nLqMDMzgKlTYcCATcsGDMjKe0i1k9AsoK2H2yTg\ntoLyE1IPtnHAynRK7W7gUEmDUoeEQ4G707xVksalXnEntNtWKXWYmRnAxIkwbRqMGAFS9jxtWlbe\nQ3psAFNJM4CPATtLWkzWy+1i4EZJJwELgGPT4ncARwDzgGbgRICIWCbpAuDxtNz5EdHW2eGrZD3w\ntgHuTA9KrcPMzApMnNijSac939SuC42NjeFRtM3MSiNpTkQ0drWcR0wwM7PcOAmZmVlunITMzCw3\nTkJmZpYbJyEzM8uNk5CZmeXGScjMzHLjJGRmZrlxEjIzs9w4CZmZWW6chMzMLDdOQmZmlhsnITMz\ny42TkJmZ5cZJyMzMcuMkZGZmuXESMjOz3DgJmZlZbpyEzMwsN05CZmaWGychMzPLjZOQmZnlxknI\nzMxy4yRkZma5cRIyM7PcOAmZmVlunITMzCw3TkJmZpYbJyEzM8uNk5CZmeXGScjMzHLjJGRmZrnJ\nJQlJ+rqkuZKekTRDUn9JoyQ9KmmepBskbZ2W7Zdez0vzRxZs56xU/rykwwrKx6eyeZLOLCgvWoeZ\nmeWjb7UrlDQE+BowNiLWSLoROB44Arg0ImZKugo4CbgyPS+PiNGSjge+DxwnaWxa733A7sB/S3p3\nquYK4BPAYuBxSbMi4tm0brE6zMy2GLfdBk89tXnbGDYM/vmfKxJOp6qehArq3UbSemAA8ApwEPBP\naf51wLlkCWJCmga4GfiJJKXymRGxFnhR0jxgv7TcvIiYDyBpJjBB0nOd1GFmtsX44hdh2bLN28aB\nB1YnCVX9dFxEvAz8AFhIlnxWAnOAFRHRkhZbDAxJ00OARWndlrT8ToXl7dbpqHynTuowM9tirF8P\np58OLS3lPx54oDqx5nE6bhBZK2YUsAK4CRhf7Tg6I2kyMBlg+PDhOUdjZlaaDRugb19oaMg7kq7l\n0THhEODFiFgaEeuBW4EDgYGS2pLiUODlNP0yMAwgzd8BeKOwvN06HZW/0Ukdm4iIaRHRGBGNgwcP\n3px9NTOrutZW6FMjfZ/zCHMhME7SgHRt52DgWeC3wDFpmUnAbWl6VnpNmv+biIhUfnzqPTcKGAM8\nBjwOjEk94bYm67wwK63TUR1mZluMDRuchDoUEY+SdTB4Ang6xTAN+DZwRupgsBNwdVrlamCnVH4G\ncGbazlzgRrIEdhdwSkS0pms+pwJ3A88BN6Zl6aQOM7MtRi0lIWUNBOtIY2NjzJ49O+8wzMy6raEB\nzj4bLrggvxgkzYmIxq6Wq5FcaWZm3VVLLaEaCdPMzLqj7eSWk5CZmVVda2v2XAvds8FJyMxsi7Jh\nQ/bslpCZmVWdk5CZmeXGScjMzHLjJGRmZrlxEjIzs9y09Y5zEjIzs6prawm5i7aZmVWdT8eZmVlu\nnITMzCw3TkJmZpYbJyEzM8uNe8eZmVlu3BIyM7PcuIu2mZnlxi0hMzPLjZOQmZnlxknIzMxy495x\nZmaWG7eEzMwsN05CZmaWG3fRNjOz3LglZGZmuXESMjOz3Lh3nJmZ5cYtITMzy42TkJmZ5cZJyMzM\ncuMkZGZmuam13wn1zTsAMzPb6OGH4aGHyl9/3rzsuVZaQrkkIUkDgenA+4EAvgg8D9wAjAReAo6N\niOWSBFwOHAE0A/8cEU+k7UwCvpM2e2FEXJfK9wWuBbYB7gBOi4iQtGOxOnp2b83Muu+00+CJJzZv\nG9tsA0OGVCaenpZXrrwcuCsi9gT2Ap4DzgTui4gxwH3pNcDhwJj0mAxcCZASyjnAh4H9gHMkDUrr\nXAl8qWC98am8ozrMzHqFtWthwgR4++3yH6tWwahRee9J91Q9CUnaAfgIcDVARKyLiBXABOC6tNh1\nwNFpegJwfWQeAQZK2g04DLg3Ipal1sy9wPg0b/uIeCQiAri+3baK1WFm1iu0tsLWW0O/fuU/+tbQ\nhZY8WkKjgKXATyX9QdJ0SdsCu0TEK2mZJcAuaXoIsKhg/cWprLPyxUXK6aSOTUiaLGm2pNlLly4t\nZx/NzMrS2lo7nQoqIY8k1BfYB7gyIvYGVtPutFhqwURPBtFZHRExLSIaI6Jx8ODBPRmGmdkmnIR6\n3mJgcUQ8ml7fTJaUXk2n0kjPr6X5LwPDCtYfmso6Kx9apJxO6jAz6xWchIqQVLFDEhFLgEWS3pOK\nDgaeBWYBk1LZJOC2ND0LOEGZccDKdErtbuBQSYNSh4RDgbvTvFWSxqWedSe021axOszMeoV6S0Ld\nvXz1gqRbgJ9GxLMVqPdfgSZJWwPzgRPJEuKNkk4CFgDHpmXvIOuePY+si/aJABGxTNIFwONpufMj\nYlma/iobu2jfmR4AF3dQh5lZr9DSUlsdCzZXd3d1L+B4YLqkPsA1wMyIWFVOpRHxJNBYZNbBRZYN\n4JQOtnNNiqV9+Wyy3yC1L3+jWB1mZr1FvbWEunU6LiLejIj/jIgDgG+T/T7nFUnXSRrdoxGamdUR\nJ6EiJDVI+pSkXwCXAT8E9gB+RXa6zMzMKqDeklC3rwkBvwUuiYjfFZTfLOkjlQ/LzKw+OQm1k3rG\nXRsR5xebHxFfq3hUZmZ1qt6SUJen4yKiFfh4FWIxM6t7ra3uHVfM7yT9hGwE6tVthW2jWZuZWWW0\ntNRXS6i7SeiA9Fx4Si6AgyobjplZ/YrIbkrnJNRORPh0nJlZD6u1u6JWQrfPPEo6Engf0L+trKPO\nCmZmVrrW1uy5npJQd38ndBVwHNlwOwI+C4zowbjMzOpOWxKqp44J3R1F+4CIOAFYHhHnAfuz6QjW\nZma2mdwS6tia9NwsaXdgPdnN6czMrEKchDp2u6SBwCXAE8BLwMyeCsrMrB61zLgJgIYzToORI6Gp\nKd+AqqC7veMuSJO3SLod6B8RK3suLDOzOtPUROsZU4DP0kALLFgAkydn8yZOzDW0ntRpEpL0j53M\nIyJurXxIZmZ1aMoUWtesBaCBdF6uuRmmTKnfJAQc1cm8AJyEzMwqYeFCWtkdgL60bFK+Jes0CUXE\nidUKxMysrg0fTuuCAApaQql8S+Yfq5qZ9QZTp9J68kXwdkESGjAApk7NN64e1q0klH6sOoBsNO3p\nwDHAYz0Yl5lZzbnwQrjkknLXnkhrHAvAVrTAiBFZAtqCrwdBCQOYRsQHJT0VEedJ+iG+HmRmtonH\nHoN+/TYnb2xF//5w6Dd/DjtVMrLeq7tJqP2PVZfhH6uamW2ipSX7ec+ll+YdSe3obhJq+7HqvwFz\nUtn0ngnJzKw21dtdUSuhq98J/R2wqO3HqpK2A54G/gQ415uZFWhpqa/BRyuhq2F7/h+wDkDSR4CL\nU9lKYFrPhmZmVlvq7a6oldBVzm6IiGVp+jhgWkTcQjZ8z5M9G5qZWW1pbYX+/btezjbqqiXUIKkt\nUR0M/KZgnhudZmYFfDqudF0drhnAA5JeJ+sh9xCApNFkp+TMzCxxx4TSdTVsz1RJ9wG7AfdERKRZ\nfcjusmpmZolbQqXr8nBFxCNFyv63Z8IxM6td7phQuu7e1M7MzLrQ2uqWUKmchMzMKsSn40rnJGRm\nViHumFC63JKQpAZJf0i3C0fSKEmPSpon6QZJW6fyfun1vDR/ZME2zkrlz0s6rKB8fCqbJ+nMgvKi\ndZiZVYJbQqXLsyV0GvBcwevvA5dGxGhgOXBSKj8JWJ7KL03LIWkscDzZPY7GA/+RElsDcAVwODAW\n+FxatrM6zMw2m1tCpcslCUkaChxJGgRVkoCDgJvTItcBR6fpCek1af7BafkJwMyIWBsRLwLzgP3S\nY15EzI+IdcBMYEIXdZiZbTa3hEqXV0voMuBbwIb0eidgRUS03Vh9MTAkTQ8BFgGk+SvT8n8tb7dO\nR+Wd1bEJSZMlzZY0e+nSpeXuo5nVGXfRLl3Vk5CkTwKvRcScLhfOSURMi4jGiGgcPHhw3uGYWY1w\nF+3S5XG4DgQ+JekIoD+wPXA5MFBS39RSGQq8nJZ/GRgGLE7j2O0AvFFQ3qZwnWLlb3RSh5nZZvPp\nuNJVvSUUEWdFxNCIGEnWseA3ETER+C1wTFpsEnBbmp6VXpPm/yYNHzQLOD71nhsFjAEeAx4HxqSe\ncFunOmaldTqqw8xss7ljQul60++Evg2cIWke2fWbq1P51cBOqfwM4EyAiJgL3Ag8C9wFnBIRramV\ncypwN1nvuxvTsp3VYWa22dwSKl2uhysi7gfuT9PzyXq2tV/mbeCzHaw/FZhapPwO4I4i5UXrMDOr\nBHdMKF1vagmZmdWsDRsgwi2hUvlwmZkBv/41nHdelkjK0baeW0KlcRIyMwPuuguefBI+8Ynyt3HU\nUXDkkZWLqR44CZmZAevWwU47ZS0iqx5fEzIzI0tCW3tI46pzEjIzA9avdxLKg5OQmRluCeXFScjM\nDCehvDgJmZmRJaGttso7ivrjJGRmhltCeXESMjPDSSgvTkJmZjgJ5cVJyMysqYn1f3iare+eBSNH\nQlNT3hHVDSchM6tvTU0weXLWEmIdLFgAkyc7EVWJk5CZ1bcpU6C5mXVsnSUhgObmrNx6nMeOM7Mt\nwptvZnc2LdmClcAOvE1/tmL9xvKFCysVmnXCScjMat4tt8Axx5S79vK/Tg2geWPx8OGbFZN1j5OQ\nmdW8P/85e/7+98vo4TZnNtxwI1q/lgnclpUNGABT33HTZusBTkJmVvPWpUs5Z5xRzp1NG2H889k1\noIULYfiILAFNnFjpMK0IJyEzq3lr10KfPptxa+2JE510cuLecWZW89auhX798o7CyuEkZGY1z0mo\ndjkJmVnNW7vWQ+7UKichM6t5bgnVLichM6t5TkK1y0nIzGreunVOQrXKScjMap6vCdUuJyEzq3k+\nHVe7/GNVM8vV+vXwq1/BmjXlb2PRIthll8rFZNXjJGRmubr3XvjMZzZ/Ox/60OZvw6rPScjMcrU8\nDWJ9zz3ZTU3LNWJERcKxKnMSMrNcrV6dPY8dC0OG5BuLVZ87JphZrprTLXy23TbfOCwfVU9CkoZJ\n+q2kZyXNlXRaKt9R0r2SXkjPg1K5JP1Y0jxJT0nap2Bbk9LyL0iaVFC+r6Sn0zo/lqTO6jCznDQ1\n0XzevwEwYK8x0NSUc0BWbXm0hFqAb0TEWGAccIqkscCZwH0RMQa4L70GOBwYkx6TgSshSyjAOcCH\ngf2AcwqSypXAlwrWG5/KO6rDzKqtqQkmT2b1ivU00MJWC+fB5MlORHWm6kkoIl6JiCfS9JvAc8AQ\nYAJwXVrsOuDoND0BuD4yjwADJe0GHAbcGxHLImI5cC8wPs3bPiIeiYgArm+3rWJ1mFm1TZkCzc00\nM4BtWY0gOzc3ZUrekVkV5XpNSNJIYG/gUWCXiHglzVoCtPX6HwIsKlhtcSrrrHxxkXI6qaN9XJMl\nzZY0e+nSpaXvmJl1beFCAJoZwACa31Fu9SG33nGStgNuAU6PiFXpsg0AERGSoifr76yOiJgGTANo\nbGzs0TjMatmSJVmvthUrylg5WrIn+jCaFzaWDx9emeCsJuSShCRtRZaAmiLi1lT8qqTdIuKVdErt\ntVT+MjCsYPWhqexl4GPtyu9P5UOLLN9ZHWZWhvnzs9/5fP7zMGpUiSs/PRduvx1a1rM/v8/KBgyA\nqVMrHqf1XlVPQqmn2tXAcxHxo4JZs4BJwMXp+baC8lMlzSTrhLAyJZG7gYsKOiMcCpwVEcskrZI0\njuw03wnAv3dRh5mVYdWq7PmUU2DcuFLX/gA0PZVdA1q4EIaPyBLQxImVDtN6sTxaQgcCXwCelvRk\nKjubLDHcKOkkYAFwbJp3B3AEMA9oBk4ESMnmAuDxtNz5EbEsTX8VuBbYBrgzPeikDjMrQ1sSete7\nytzAxIlOOnWu6kkoIh4G1MHsg4ssH8ApHWzrGuCaIuWzgfcXKX+jWB1mVp62JLT99vnGYbXLIyaY\nWdmchGxzeew4s3rU1MSGs7/DKQu/zcJt3g3vfk9ZA7fNm5c9b7ddheOzuuEkZFZv0kgFf2kexFV8\nmZFrXmTnp5fAqv6w004lbWr77eHEE6GhoYditS2ek5BZvUkjFbzKngBcytc5esNtsGEEPP5SvrFZ\n3fE1IbN6k0YkeI2/AWAXXt2k3Kya3BIyq1ETJsCjj5axol6FaOVt+gMFScgjFVgOnITMatCGDdlg\nA3vvDY2NJa78wgp48AFoaWE3XmEUL3qkAsuNk5BZDVq5MktEEyfC179e6tpjoOkxj1RgvYKTkFkN\nev317HnnncvcgEcqsF7CScis2pqaeOKbP+exJcNhxx2zizv77VfSJhYsyJ5L7FFt1us4CZlVU/qN\nzgnNjzKX98My4KfpUaKGBhg9utIBmlWXk5BZNU2ZQjQ3M589+DJXcg7nZeVDh8Hjj3e+bjvbbAM7\n7NADMZpVkZOQWYluvz1r0JRlwfdooS9rGMBYnmXXtu7RL78Gu1YsRLOa4SRkVqLLL4f/+R8YNqzr\nZd+h737Q0sIHeIqPcf/Gcv9Gx+qUk5BZiV59FQ49FH75yzJWbnoEJk+G5uaNZf6NjtUxD9tjVqIl\nS2CXXcpceeJEmDYNRowAKXueNs3dpa1uuSVk9aOpif/82tN8Y9nZhPpAv37Qd6uSN/PWW7Dr5ly/\n8W90zP7KScjqQ+oafU/ztfRjLSfE9dCyFXz8E/De95a0qYaG7PYFZrb5nISsZixbBjNnQktLGSuf\n+wI0n8RsGmlkNj/km9ACPDsC7nipwpGaWXc5CVnNmDYNzjqr3LXP/evUCVy/sdi3LzDLlZOQ9bym\nJpgyhdULXmf9sD3gO9+BY48teTNz52bXYubOLSOGvfaCxYsQwUBWbCx312izXDkJWc9K12Lub/47\nDmI+sagP/AvZowwf/Wg23FrJLv6Wu0ab9UJOQluy1ALJhusfXvZw/UuXwic/md0+oGR/Hgctc3iD\nnejP20xlCiJg0I7w3e+WvLmDDy4jBti43xU4HmZWOYqIvGPo1RobG2P27Nmlr1ihBNDSAqtXl149\nN94Ip53GhjVvM52TWczQrDvyQQfBnnuWtKn587Ohaj71qWy8spLcMPOvkx/lAb7CVdkLKbshjplt\nkSTNiYgub7noJNSFspJQUxN/Ofm7nPV2wTf9hr6w//6wxx7d3syGDXDnnfDGG6VVX0xf1rMdb0Gf\nPrB96aNejh0LDz2UrV6SkSM33neg0IgR8NJLJcdhZrWhu0nIp+N6wpQprHm7gQf5yMayVuD3fWFx\naZsaOhROOQUGDiwxhjPOALIvGMNYxGe4BQGEYHkVWyBTp/pajJl1yEmoJyxcyN8SvEi7Vs8GwYtV\nSgCX31rGX8C+AAAGXUlEQVS8BVLt3mC+FmNmnfDYcT2how/6aiaAqVOzFkehvFogEydmp942bMie\nnYDMLHES6gm9IQF4oEwzqwE+HdcTesspKA+UaWa9nJNQT3ECMDPrkk/HmZlZbuouCUkaL+l5SfMk\nnZl3PGZm9ayukpCkBuAK4HBgLPA5SWPzjcrMrH7VVRIC9gPmRcT8iFgHzAQm5ByTmVndqrckNARY\nVPB6cSrbhKTJkmZLmr106dKqBWdmVm/cO66IiJgGTAOQtFRSkaEHasrOwOt5B9GL+Hhs5GOxKR+P\njTb3WIzozkL1loReBoYVvB6ayjoUEYN7NKIqkDS7OwMJ1gsfj418LDbl47FRtY5FvZ2OexwYI2mU\npK2B44FZOcdkZla36qolFBEtkk4F7gYagGsiopybRZuZWQXUVRICiIg7gDvyjqPKpuUdQC/j47GR\nj8WmfDw2qsqx8E3tzMwsN/V2TcjMzHoRJyEzM8uNk9AWTNIwSb+V9KykuZJOyzumvElqkPQHSbfn\nHUveJA2UdLOkP0l6TtL+eceUF0lfT/8jz0iaIal/3jFVk6RrJL0m6ZmCsh0l3SvphfQ8qCfqdhLa\nsrUA34iIscA44BSPlcdpwHN5B9FLXA7cFRF7AntRp8dF0hDga0BjRLyfrOfs8flGVXXXAuPblZ0J\n3BcRY4D70uuKcxLagkXEKxHxRJp+k+xD5h3DFNULSUOBI4HpeceSN0k7AB8BrgaIiHURsSLfqHLV\nF9hGUl9gAPCXnOOpqoh4EFjWrngCcF2avg44uifqdhKqE5JGAnsDj+YbSa4uA74FbMg7kF5gFLAU\n+Gk6PTld0rZ5B5WHiHgZ+AGwEHgFWBkR9+QbVa+wS0S8kqaXALv0RCVOQnVA0nbALcDpEbEq73jy\nIOmTwGsRMSfvWHqJvsA+wJURsTewmh463dLbpWsdE8gS8+7AtpI+n29UvUtkv+Xpkd/zOAlt4SRt\nRZaAmiLi1rzjydGBwKckvUR2C4+DJP0s35BytRhYHBFtLeObyZJSPToEeDEilkbEeuBW4ICcY+oN\nXpW0G0B6fq0nKnES2oJJEtk5/+ci4kd5x5OniDgrIoZGxEiyi86/iYi6/bYbEUuARZLek4oOBp7N\nMaQ8LQTGSRqQ/mcOpk47abQzC5iUpicBt/VEJU5CW7YDgS+Qfet/Mj2OyDso6zX+FWiS9BTwIeCi\nnOPJRWoN3gw8ATxN9rlYV8P3SJoB/B54j6TFkk4CLgY+IekFstbixT1St4ftMTOzvLglZGZmuXES\nMjOz3DgJmZlZbpyEzMwsN05CZmaWGychszJJak3d3p+RdJOkAWVsY3rboLKSzm4373cVivNaScdU\nYls9uU2rT05CZuVbExEfSiMvrwO+XOoGIuLkiGj7kejZ7eb5V/u2xXMSMquMh4DRAJLOSK2jZySd\nnsq2lfRrSX9M5cel8vslNUq6mGwU5yclNaV5b6VnSbokrfd0wbofS+u33ROoKf3iv0OS9pX0gKQ5\nku6WtJukPSU9VrDMSElPd7R85Q+d1bO+eQdgVuvS8P+HA3dJ2hc4EfgwIOBRSQ8AewB/iYgj0zo7\nFG4jIs6UdGpEfKhIFf9INqLBXsDOwOOSHkzz9gbeR3brgf8hGyXj4Q7i3Ar4d2BCRCxNyWxqRHxR\n0taSRkXEi8BxwA0dLQ98sZzjZFaMk5BZ+baR9GSafohsnL6vAL+IiNUAkm4F/gG4C/ihpO8Dt0fE\nQyXU8/fAjIhoJRtU8gHg74BVwGMRsTjV9SQwkg6SEPAe4P3AvanB1EB26wKAG8mSz8Xp+bguljer\nCCchs/Ktad9y6ehsWET8r6R9gCOA70m6JyLOr0AMawumW+n8f1rA3IgodhvvG4CbUtKMiHhB0gc6\nWd6sInxNyKyyHgKOTiMybwt8GnhI0u5Ac0T8jOwGasVum7A+nQIrts3jJDVIGkx2R9THiizXleeB\nwZL2h+z0nKT3AUTEn8mS2P8lS0idLm9WKW4JmVVQRDwh6Vo2JonpEfEHSYcBl0jaAKwnO23X3jTg\nKUlPRMTEgvJfAPsDfyS7sdi3ImKJpD1LjG1d6lb943RNqi/Z3WbnpkVuAC4hu7lbd5Y322weRdvM\nzHLj03FmZpYbJyEzM8uNk5CZmeXGScjMzHLjJGRmZrlxEjIzs9w4CZmZWW7+P0PNi1lCP0XzAAAA\nAElFTkSuQmCC\n",
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- }
- ],
- "source": [
- "# Visualising the Random Forest Regression results (higher resolution)\n",
- "X_grid = np.arange(min(X), max(X), 0.01)\n",
- "X_grid = X_grid.reshape((len(X_grid), 1))\n",
- "plt.scatter(X, y, color = 'red')\n",
- "plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')\n",
- "plt.title('Truth or Bluff (Random Forest Regression)')\n",
- "plt.xlabel('Position level')\n",
- "plt.ylabel('Salary')\n",
- "plt.show()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.5.1"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/machine_learning/Random Forest Regression/random_forest_regression.py b/machine_learning/Random Forest Regression/random_forest_regression.py
deleted file mode 100644
index fce58b1fe283..000000000000
--- a/machine_learning/Random Forest Regression/random_forest_regression.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Random Forest Regression
-
-# Importing the libraries
-import numpy as np
-import matplotlib.pyplot as plt
-import pandas as pd
-
-# Importing the dataset
-dataset = pd.read_csv('Position_Salaries.csv')
-X = dataset.iloc[:, 1:2].values
-y = dataset.iloc[:, 2].values
-
-# Splitting the dataset into the Training set and Test set
-"""from sklearn.cross_validation import train_test_split
-X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)"""
-
-# Feature Scaling
-"""from sklearn.preprocessing import StandardScaler
-sc_X = StandardScaler()
-X_train = sc_X.fit_transform(X_train)
-X_test = sc_X.transform(X_test)
-sc_y = StandardScaler()
-y_train = sc_y.fit_transform(y_train)"""
-
-# Fitting Random Forest Regression to the dataset
-from sklearn.ensemble import RandomForestRegressor
-regressor = RandomForestRegressor(n_estimators = 10, random_state = 0)
-regressor.fit(X, y)
-
-# Predicting a new result
-y_pred = regressor.predict(6.5)
-
-# Visualising the Random Forest Regression results (higher resolution)
-X_grid = np.arange(min(X), max(X), 0.01)
-X_grid = X_grid.reshape((len(X_grid), 1))
-plt.scatter(X, y, color = 'red')
-plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')
-plt.title('Truth or Bluff (Random Forest Regression)')
-plt.xlabel('Position level')
-plt.ylabel('Salary')
-plt.show()
\ No newline at end of file
diff --git a/machine_learning/__init__.py b/machine_learning/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/machine_learning/astar.py b/machine_learning/astar.py
new file mode 100644
index 000000000000..ee3fcff0b7bf
--- /dev/null
+++ b/machine_learning/astar.py
@@ -0,0 +1,150 @@
+"""
+The A* algorithm combines features of uniform-cost search and pure
+heuristic search to efficiently compute optimal solutions.
+A* algorithm is a best-first search algorithm in which the cost
+associated with a node is f(n) = g(n) + h(n),
+where g(n) is the cost of the path from the initial state to node n and
+h(n) is the heuristic estimate or the cost or a path
+from node n to a goal.A* algorithm introduces a heuristic into a
+regular graph-searching algorithm,
+essentially planning ahead at each step so a more optimal decision
+is made.A* also known as the algorithm with brains
+"""
+import numpy as np
+
+
+class Cell:
+ """
+ Class cell represents a cell in the world which have the property
+ position : The position of the represented by tupleof x and y
+ coordinates initially set to (0,0)
+ parent : This contains the parent cell object which we visited
+ before arrinving this cell
+ g,h,f : The parameters for constructing the heuristic function
+ which can be any function. for simplicity used line
+ distance
+ """
+
+ def __init__(self):
+ self.position = (0, 0)
+ self.parent = None
+
+ self.g = 0
+ self.h = 0
+ self.f = 0
+
+ """
+ overrides equals method because otherwise cell assign will give
+ wrong results
+ """
+
+ def __eq__(self, cell):
+ return self.position == cell.position
+
+ def showcell(self):
+ print(self.position)
+
+
+class Gridworld:
+ """
+ Gridworld class represents the external world here a grid M*M
+ matrix
+ world_size: create a numpy array with the given world_size default is 5
+ """
+
+ def __init__(self, world_size=(5, 5)):
+ self.w = np.zeros(world_size)
+ self.world_x_limit = world_size[0]
+ self.world_y_limit = world_size[1]
+
+ def show(self):
+ print(self.w)
+
+ def get_neigbours(self, cell):
+ """
+ Return the neighbours of cell
+ """
+ neughbour_cord = [
+ (-1, -1),
+ (-1, 0),
+ (-1, 1),
+ (0, -1),
+ (0, 1),
+ (1, -1),
+ (1, 0),
+ (1, 1),
+ ]
+ current_x = cell.position[0]
+ current_y = cell.position[1]
+ neighbours = []
+ for n in neughbour_cord:
+ x = current_x + n[0]
+ y = current_y + n[1]
+ if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
+ c = Cell()
+ c.position = (x, y)
+ c.parent = cell
+ neighbours.append(c)
+ return neighbours
+
+
+def astar(world, start, goal):
+ """
+ Implementation of a start algorithm
+ world : Object of the world object
+ start : Object of the cell as start position
+ stop : Object of the cell as goal position
+
+ >>> p = Gridworld()
+ >>> start = Cell()
+ >>> start.position = (0,0)
+ >>> goal = Cell()
+ >>> goal.position = (4,4)
+ >>> astar(p, start, goal)
+ [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
+ """
+ _open = []
+ _closed = []
+ _open.append(start)
+
+ while _open:
+ min_f = np.argmin([n.f for n in _open])
+ current = _open[min_f]
+ _closed.append(_open.pop(min_f))
+ if current == goal:
+ break
+ for n in world.get_neigbours(current):
+ for c in _closed:
+ if c == n:
+ continue
+ n.g = current.g + 1
+ x1, y1 = n.position
+ x2, y2 = goal.position
+ n.h = (y2 - y1) ** 2 + (x2 - x1) ** 2
+ n.f = n.h + n.g
+
+ for c in _open:
+ if c == n and c.f < n.f:
+ continue
+ _open.append(n)
+ path = []
+ while current.parent is not None:
+ path.append(current.position)
+ current = current.parent
+ path.append(current.position)
+ return path[::-1]
+
+
+if __name__ == "__main__":
+ world = Gridworld()
+ # stat position and Goal
+ start = Cell()
+ start.position = (0, 0)
+ goal = Cell()
+ goal.position = (4, 4)
+ print(f"path from {start.position} to {goal.position}")
+ s = astar(world, start, goal)
+ # Just for visual reasons
+ for i in s:
+ world.w[i] = 1
+ print(world.w)
diff --git a/machine_learning/data_transformations.py b/machine_learning/data_transformations.py
new file mode 100644
index 000000000000..9e0d747e93fa
--- /dev/null
+++ b/machine_learning/data_transformations.py
@@ -0,0 +1,62 @@
+"""
+Normalization Wikipedia: https://en.wikipedia.org/wiki/Normalization
+Normalization is the process of converting numerical data to a standard range of values.
+This range is typically between [0, 1] or [-1, 1]. The equation for normalization is
+x_norm = (x - x_min)/(x_max - x_min) where x_norm is the normalized value, x is the
+value, x_min is the minimum value within the column or list of data, and x_max is the
+maximum value within the column or list of data. Normalization is used to speed up the
+training of data and put all of the data on a similar scale. This is useful because
+variance in the range of values of a dataset can heavily impact optimization
+(particularly Gradient Descent).
+
+Standardization Wikipedia: https://en.wikipedia.org/wiki/Standardization
+Standardization is the process of converting numerical data to a normally distributed
+range of values. This range will have a mean of 0 and standard deviation of 1. This is
+also known as z-score normalization. The equation for standardization is
+x_std = (x - mu)/(sigma) where mu is the mean of the column or list of values and sigma
+is the standard deviation of the column or list of values.
+
+Choosing between Normalization & Standardization is more of an art of a science, but it
+is often recommended to run experiments with both to see which performs better.
+Additionally, a few rules of thumb are:
+ 1. gaussian (normal) distributions work better with standardization
+ 2. non-gaussian (non-normal) distributions work better with normalization
+ 3. If a column or list of values has extreme values / outliers, use standardization
+"""
+from statistics import mean, stdev
+
+
+def normalization(data: list, ndigits: int = 3) -> list:
+ """
+ Returns a normalized list of values
+ @params: data, a list of values to normalize
+ @returns: a list of normalized values (rounded to ndigits decimal places)
+ @examples:
+ >>> normalization([2, 7, 10, 20, 30, 50])
+ [0.0, 0.104, 0.167, 0.375, 0.583, 1.0]
+ >>> normalization([5, 10, 15, 20, 25])
+ [0.0, 0.25, 0.5, 0.75, 1.0]
+ """
+ # variables for calculation
+ x_min = min(data)
+ x_max = max(data)
+ # normalize data
+ return [round((x - x_min) / (x_max - x_min), ndigits) for x in data]
+
+
+def standardization(data: list, ndigits: int = 3) -> list:
+ """
+ Returns a standardized list of values
+ @params: data, a list of values to standardize
+ @returns: a list of standardized values (rounded to ndigits decimal places)
+ @examples:
+ >>> standardization([2, 7, 10, 20, 30, 50])
+ [-0.999, -0.719, -0.551, 0.009, 0.57, 1.69]
+ >>> standardization([5, 10, 15, 20, 25])
+ [-1.265, -0.632, 0.0, 0.632, 1.265]
+ """
+ # variables for calculation
+ mu = mean(data)
+ sigma = stdev(data)
+ # standardize data
+ return [round((x - mu) / (sigma), ndigits) for x in data]
diff --git a/machine_learning/decision_tree.py b/machine_learning/decision_tree.py
index 71849904ccf2..ace6fb0fa883 100644
--- a/machine_learning/decision_tree.py
+++ b/machine_learning/decision_tree.py
@@ -1,14 +1,13 @@
"""
Implementation of a basic regression decision tree.
Input data set: The input data set must be 1-dimensional with continuous labels.
-Output: The decision tree maps a real number input to a real number output.
+Output: The decision tree maps a real number input to a real number output.
"""
-from __future__ import print_function
-
import numpy as np
+
class Decision_Tree:
- def __init__(self, depth = 5, min_leaf_size = 5):
+ def __init__(self, depth=5, min_leaf_size=5):
self.depth = depth
self.decision_boundary = 0
self.left = None
@@ -19,9 +18,23 @@ def __init__(self, depth = 5, min_leaf_size = 5):
def mean_squared_error(self, labels, prediction):
"""
mean_squared_error:
- @param labels: a one dimensional numpy array
+ @param labels: a one dimensional numpy array
@param prediction: a floating point value
- return value: mean_squared_error calculates the error if prediction is used to estimate the labels
+ return value: mean_squared_error calculates the error if prediction is used to
+ estimate the labels
+ >>> tester = Decision_Tree()
+ >>> test_labels = np.array([1,2,3,4,5,6,7,8,9,10])
+ >>> test_prediction = np.float(6)
+ >>> tester.mean_squared_error(test_labels, test_prediction) == (
+ ... Test_Decision_Tree.helper_mean_squared_error_test(test_labels,
+ ... test_prediction))
+ True
+ >>> test_labels = np.array([1,2,3])
+ >>> test_prediction = np.float(2)
+ >>> tester.mean_squared_error(test_labels, test_prediction) == (
+ ... Test_Decision_Tree.helper_mean_squared_error_test(test_labels,
+ ... test_prediction))
+ True
"""
if labels.ndim != 1:
print("Error: Input labels must be one dimensional")
@@ -32,14 +45,15 @@ def train(self, X, y):
"""
train:
@param X: a one dimensional numpy array
- @param y: a one dimensional numpy array.
+ @param y: a one dimensional numpy array.
The contents of y are the labels for the corresponding X values
train does not have a return value
"""
"""
- this section is to check that the inputs conform to our dimensionality constraints
+ this section is to check that the inputs conform to our dimensionality
+ constraints
"""
if X.ndim != 1:
print("Error: Input data set must be one dimensional")
@@ -60,13 +74,13 @@ def train(self, X, y):
return
best_split = 0
- min_error = self.mean_squared_error(X,np.mean(y)) * 2
-
+ min_error = self.mean_squared_error(X, np.mean(y)) * 2
"""
loop over all possible splits for the decision tree. find the best split.
if no split exists that is less than 2 * error for the entire array
- then the data set is not split and the average for the entire array is used as the predictor
+ then the data set is not split and the average for the entire array is used as
+ the predictor
"""
for i in range(len(X)):
if len(X[:i]) < self.min_leaf_size:
@@ -88,8 +102,12 @@ def train(self, X, y):
right_y = y[best_split:]
self.decision_boundary = X[best_split]
- self.left = Decision_Tree(depth = self.depth - 1, min_leaf_size = self.min_leaf_size)
- self.right = Decision_Tree(depth = self.depth - 1, min_leaf_size = self.min_leaf_size)
+ self.left = Decision_Tree(
+ depth=self.depth - 1, min_leaf_size=self.min_leaf_size
+ )
+ self.right = Decision_Tree(
+ depth=self.depth - 1, min_leaf_size=self.min_leaf_size
+ )
self.left.train(left_X, left_y)
self.right.train(right_X, right_y)
else:
@@ -115,17 +133,37 @@ def predict(self, x):
print("Error: Decision tree not yet trained")
return None
+
+class Test_Decision_Tree:
+ """Decision Tres test class"""
+
+ @staticmethod
+ def helper_mean_squared_error_test(labels, prediction):
+ """
+ helper_mean_squared_error_test:
+ @param labels: a one dimensional numpy array
+ @param prediction: a floating point value
+ return value: helper_mean_squared_error_test calculates the mean squared error
+ """
+ squared_error_sum = np.float(0)
+ for label in labels:
+ squared_error_sum += (label - prediction) ** 2
+
+ return np.float(squared_error_sum / labels.size)
+
+
def main():
"""
- In this demonstration we're generating a sample data set from the sin function in numpy.
- We then train a decision tree on the data set and use the decision tree to predict the
- label of 10 different test values. Then the mean squared error over this test is displayed.
+ In this demonstration we're generating a sample data set from the sin function in
+ numpy. We then train a decision tree on the data set and use the decision tree to
+ predict the label of 10 different test values. Then the mean squared error over
+ this test is displayed.
"""
- X = np.arange(-1., 1., 0.005)
+ X = np.arange(-1.0, 1.0, 0.005)
y = np.sin(X)
- tree = Decision_Tree(depth = 10, min_leaf_size = 10)
- tree.train(X,y)
+ tree = Decision_Tree(depth=10, min_leaf_size=10)
+ tree.train(X, y)
test_cases = (np.random.rand(10) * 2) - 1
predictions = np.array([tree.predict(x) for x in test_cases])
@@ -135,6 +173,9 @@ def main():
print("Predictions: " + str(predictions))
print("Average error: " + str(avg_error))
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
+
+if __name__ == "__main__":
+ main()
+ import doctest
+
+ doctest.testmod(name="mean_squarred_error", verbose=True)
diff --git a/machine_learning/forecasting/__init__.py b/machine_learning/forecasting/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/machine_learning/forecasting/ex_data.csv b/machine_learning/forecasting/ex_data.csv
new file mode 100644
index 000000000000..1c429e649755
--- /dev/null
+++ b/machine_learning/forecasting/ex_data.csv
@@ -0,0 +1,114 @@
+total_user,total_events,days
+18231,0.0,1
+22621,1.0,2
+15675,0.0,3
+23583,1.0,4
+68351,5.0,5
+34338,3.0,6
+19238,0.0,0
+24192,0.0,1
+70349,0.0,2
+103510,0.0,3
+128355,1.0,4
+148484,6.0,5
+153489,3.0,6
+162667,1.0,0
+311430,3.0,1
+435663,7.0,2
+273526,0.0,3
+628588,2.0,4
+454989,13.0,5
+539040,3.0,6
+52974,1.0,0
+103451,2.0,1
+810020,5.0,2
+580982,3.0,3
+216515,0.0,4
+134694,10.0,5
+93563,1.0,6
+55432,1.0,0
+169634,1.0,1
+254908,4.0,2
+315285,3.0,3
+191764,0.0,4
+514284,7.0,5
+181214,4.0,6
+78459,2.0,0
+161620,3.0,1
+245610,4.0,2
+326722,5.0,3
+214578,0.0,4
+312365,5.0,5
+232454,4.0,6
+178368,1.0,0
+97152,1.0,1
+222813,4.0,2
+285852,4.0,3
+192149,1.0,4
+142241,1.0,5
+173011,2.0,6
+56488,3.0,0
+89572,2.0,1
+356082,2.0,2
+172799,0.0,3
+142300,1.0,4
+78432,2.0,5
+539023,9.0,6
+62389,1.0,0
+70247,1.0,1
+89229,0.0,2
+94583,1.0,3
+102455,0.0,4
+129270,0.0,5
+311409,1.0,6
+1837026,0.0,0
+361824,0.0,1
+111379,2.0,2
+76337,2.0,3
+96747,0.0,4
+92058,0.0,5
+81929,2.0,6
+143423,0.0,0
+82939,0.0,1
+74403,1.0,2
+68234,0.0,3
+94556,1.0,4
+80311,0.0,5
+75283,3.0,6
+77724,0.0,0
+49229,2.0,1
+65708,2.0,2
+273864,1.0,3
+1711281,0.0,4
+1900253,5.0,5
+343071,1.0,6
+1551326,0.0,0
+56636,1.0,1
+272782,2.0,2
+1785678,0.0,3
+241866,0.0,4
+461904,0.0,5
+2191901,2.0,6
+102925,0.0,0
+242778,1.0,1
+298608,0.0,2
+322458,10.0,3
+216027,9.0,4
+916052,12.0,5
+193278,12.0,6
+263207,8.0,0
+672948,10.0,1
+281909,1.0,2
+384562,1.0,3
+1027375,2.0,4
+828905,9.0,5
+624188,22.0,6
+392218,8.0,0
+292581,10.0,1
+299869,12.0,2
+769455,20.0,3
+316443,8.0,4
+1212864,24.0,5
+1397338,28.0,6
+223249,8.0,0
+191264,14.0,1
diff --git a/machine_learning/forecasting/run.py b/machine_learning/forecasting/run.py
new file mode 100644
index 000000000000..0e11f958825f
--- /dev/null
+++ b/machine_learning/forecasting/run.py
@@ -0,0 +1,157 @@
+"""
+this is code for forecasting
+but i modified it and used it for safety checker of data
+for ex: you have a online shop and for some reason some data are
+missing (the amount of data that u expected are not supposed to be)
+ then we can use it
+*ps : 1. ofc we can use normal statistic method but in this case
+ the data is quite absurd and only a little^^
+ 2. ofc u can use this and modified it for forecasting purpose
+ for the next 3 months sales or something,
+ u can just adjust it for ur own purpose
+"""
+
+import numpy as np
+import pandas as pd
+from sklearn.preprocessing import Normalizer
+from sklearn.svm import SVR
+from statsmodels.tsa.statespace.sarimax import SARIMAX
+
+
+def linear_regression_prediction(
+ train_dt: list, train_usr: list, train_mtch: list, test_dt: list, test_mtch: list
+) -> float:
+ """
+ First method: linear regression
+ input : training data (date, total_user, total_event) in list of float
+ output : list of total user prediction in float
+ >>> n = linear_regression_prediction([2,3,4,5], [5,3,4,6], [3,1,2,4], [2,1], [2,2])
+ >>> abs(n - 5.0) < 1e-6 # Checking precision because of floating point errors
+ True
+ """
+ x = [[1, item, train_mtch[i]] for i, item in enumerate(train_dt)]
+ x = np.array(x)
+ y = np.array(train_usr)
+ beta = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose(), x)), x.transpose()), y)
+ return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2])
+
+
+def sarimax_predictor(train_user: list, train_match: list, test_match: list) -> float:
+ """
+ second method: Sarimax
+ sarimax is a statistic method which using previous input
+ and learn its pattern to predict future data
+ input : training data (total_user, with exog data = total_event) in list of float
+ output : list of total user prediction in float
+ >>> sarimax_predictor([4,2,6,8], [3,1,2,4], [2])
+ 6.6666671111109626
+ """
+ order = (1, 2, 1)
+ seasonal_order = (1, 1, 0, 7)
+ model = SARIMAX(
+ train_user, exog=train_match, order=order, seasonal_order=seasonal_order
+ )
+ model_fit = model.fit(disp=False, maxiter=600, method="nm")
+ result = model_fit.predict(1, len(test_match), exog=[test_match])
+ return result[0]
+
+
+def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> float:
+ """
+ Third method: Support vector regressor
+ svr is quite the same with svm(support vector machine)
+ it uses the same principles as the SVM for classification,
+ with only a few minor differences and the only different is that
+ it suits better for regression purpose
+ input : training data (date, total_user, total_event) in list of float
+ where x = list of set (date and total event)
+ output : list of total user prediction in float
+ >>> support_vector_regressor([[5,2],[1,5],[6,2]], [[3,2]], [2,1,4])
+ 1.634932078116079
+ """
+ regressor = SVR(kernel="rbf", C=1, gamma=0.1, epsilon=0.1)
+ regressor.fit(x_train, train_user)
+ y_pred = regressor.predict(x_test)
+ return y_pred[0]
+
+
+def interquartile_range_checker(train_user: list) -> float:
+ """
+ Optional method: interquatile range
+ input : list of total user in float
+ output : low limit of input in float
+ this method can be used to check whether some data is outlier or not
+ >>> interquartile_range_checker([1,2,3,4,5,6,7,8,9,10])
+ 2.8
+ """
+ train_user.sort()
+ q1 = np.percentile(train_user, 25)
+ q3 = np.percentile(train_user, 75)
+ iqr = q3 - q1
+ low_lim = q1 - (iqr * 0.1)
+ return low_lim
+
+
+def data_safety_checker(list_vote: list, actual_result: float) -> None:
+ """
+ Used to review all the votes (list result prediction)
+ and compare it to the actual result.
+ input : list of predictions
+ output : print whether it's safe or not
+ >>> data_safety_checker([2,3,4],5.0)
+ Today's data is not safe.
+ """
+ safe = 0
+ not_safe = 0
+ for i in list_vote:
+ if i > actual_result:
+ safe = not_safe + 1
+ else:
+ if abs(abs(i) - abs(actual_result)) <= 0.1:
+ safe = safe + 1
+ else:
+ not_safe = not_safe + 1
+ print(f"Today's data is {'not ' if safe <= not_safe else ''}safe.")
+
+
+# data_input_df = pd.read_csv("ex_data.csv", header=None)
+data_input = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
+data_input_df = pd.DataFrame(data_input, columns=["total_user", "total_even", "days"])
+
+"""
+data column = total user in a day, how much online event held in one day,
+what day is that(sunday-saturday)
+"""
+
+# start normalization
+normalize_df = Normalizer().fit_transform(data_input_df.values)
+# split data
+total_date = normalize_df[:, 2].tolist()
+total_user = normalize_df[:, 0].tolist()
+total_match = normalize_df[:, 1].tolist()
+
+# for svr (input variable = total date and total match)
+x = normalize_df[:, [1, 2]].tolist()
+x_train = x[: len(x) - 1]
+x_test = x[len(x) - 1 :]
+
+# for linear reression & sarimax
+trn_date = total_date[: len(total_date) - 1]
+trn_user = total_user[: len(total_user) - 1]
+trn_match = total_match[: len(total_match) - 1]
+
+tst_date = total_date[len(total_date) - 1 :]
+tst_user = total_user[len(total_user) - 1 :]
+tst_match = total_match[len(total_match) - 1 :]
+
+
+# voting system with forecasting
+res_vote = []
+res_vote.append(
+ linear_regression_prediction(trn_date, trn_user, trn_match, tst_date, tst_match)
+)
+res_vote.append(sarimax_predictor(trn_user, trn_match, tst_match))
+res_vote.append(support_vector_regressor(x_train, x_test, trn_user))
+
+# check the safety of todays'data^^
+data_safety_checker(res_vote, tst_user)
diff --git a/machine_learning/gaussian_naive_bayes.py b/machine_learning/gaussian_naive_bayes.py
new file mode 100644
index 000000000000..c200aa5a4d2d
--- /dev/null
+++ b/machine_learning/gaussian_naive_bayes.py
@@ -0,0 +1,44 @@
+# Gaussian Naive Bayes Example
+from matplotlib import pyplot as plt
+from sklearn.datasets import load_iris
+from sklearn.metrics import plot_confusion_matrix
+from sklearn.model_selection import train_test_split
+from sklearn.naive_bayes import GaussianNB
+
+
+def main():
+
+ """
+ Gaussian Naive Bayes Example using sklearn function.
+ Iris type dataset is used to demonstrate algorithm.
+ """
+
+ # Load Iris dataset
+ iris = load_iris()
+
+ # Split dataset into train and test data
+ X = iris["data"] # features
+ Y = iris["target"]
+ x_train, x_test, y_train, y_test = train_test_split(
+ X, Y, test_size=0.3, random_state=1
+ )
+
+ # Gaussian Naive Bayes
+ NB_model = GaussianNB()
+ NB_model.fit(x_train, y_train)
+
+ # Display Confusion Matrix
+ plot_confusion_matrix(
+ NB_model,
+ x_test,
+ y_test,
+ display_labels=iris["target_names"],
+ cmap="Blues",
+ normalize="true",
+ )
+ plt.title("Normalized Confusion Matrix - IRIS Dataset")
+ plt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/machine_learning/gradient_boosting_regressor.py b/machine_learning/gradient_boosting_regressor.py
new file mode 100644
index 000000000000..0aa0e7a10ac5
--- /dev/null
+++ b/machine_learning/gradient_boosting_regressor.py
@@ -0,0 +1,66 @@
+"""Implementation of GradientBoostingRegressor in sklearn using the
+ boston dataset which is very popular for regression problem to
+ predict house price.
+"""
+
+import matplotlib.pyplot as plt
+import pandas as pd
+from sklearn.datasets import load_boston
+from sklearn.ensemble import GradientBoostingRegressor
+from sklearn.metrics import mean_squared_error, r2_score
+from sklearn.model_selection import train_test_split
+
+
+def main():
+
+ # loading the dataset from the sklearn
+ df = load_boston()
+ print(df.keys())
+ # now let construct a data frame
+ df_boston = pd.DataFrame(df.data, columns=df.feature_names)
+ # let add the target to the dataframe
+ df_boston["Price"] = df.target
+ # print the first five rows using the head function
+ print(df_boston.head())
+ # Summary statistics
+ print(df_boston.describe().T)
+ # Feature selection
+
+ X = df_boston.iloc[:, :-1]
+ y = df_boston.iloc[:, -1] # target variable
+ # split the data with 75% train and 25% test sets.
+ X_train, X_test, y_train, y_test = train_test_split(
+ X, y, random_state=0, test_size=0.25
+ )
+
+ model = GradientBoostingRegressor(
+ n_estimators=500, max_depth=5, min_samples_split=4, learning_rate=0.01
+ )
+ # training the model
+ model.fit(X_train, y_train)
+ # to see how good the model fit the data
+ training_score = model.score(X_train, y_train).round(3)
+ test_score = model.score(X_test, y_test).round(3)
+ print("Training score of GradientBoosting is :", training_score)
+ print("The test score of GradientBoosting is :", test_score)
+ # Let us evaluation the model by finding the errors
+ y_pred = model.predict(X_test)
+
+ # The mean squared error
+ print("Mean squared error: %.2f" % mean_squared_error(y_test, y_pred))
+ # Explained variance score: 1 is perfect prediction
+ print("Test Variance score: %.2f" % r2_score(y_test, y_pred))
+
+ # So let's run the model against the test data
+ fig, ax = plt.subplots()
+ ax.scatter(y_test, y_pred, edgecolors=(0, 0, 0))
+ ax.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], "k--", lw=4)
+ ax.set_xlabel("Actual")
+ ax.set_ylabel("Predicted")
+ ax.set_title("Truth vs Predicted")
+ # this show function will display the plotting
+ plt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/machine_learning/gradient_descent.py b/machine_learning/gradient_descent.py
index 6387d4939205..9fa460a07562 100644
--- a/machine_learning/gradient_descent.py
+++ b/machine_learning/gradient_descent.py
@@ -1,25 +1,32 @@
"""
-Implementation of gradient descent algorithm for minimizing cost of a linear hypothesis function.
+Implementation of gradient descent algorithm for minimizing cost of a linear hypothesis
+function.
"""
-from __future__ import print_function, division
import numpy
# List of input, output pairs
-train_data = (((5, 2, 3), 15), ((6, 5, 9), 25),
- ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41))
+train_data = (
+ ((5, 2, 3), 15),
+ ((6, 5, 9), 25),
+ ((11, 12, 13), 41),
+ ((1, 1, 1), 8),
+ ((11, 12, 13), 41),
+)
test_data = (((515, 22, 13), 555), ((61, 35, 49), 150))
parameter_vector = [2, 4, 1, 5]
m = len(train_data)
LEARNING_RATE = 0.009
-def _error(example_no, data_set='train'):
+def _error(example_no, data_set="train"):
"""
:param data_set: train data or test data
:param example_no: example number whose error has to be checked
:return: error in example pointed by example number.
"""
- return calculate_hypothesis_value(example_no, data_set) - output(example_no, data_set)
+ return calculate_hypothesis_value(example_no, data_set) - output(
+ example_no, data_set
+ )
def _hypothesis_value(data_input_tuple):
@@ -33,7 +40,7 @@ def _hypothesis_value(data_input_tuple):
"""
hyp_val = 0
for i in range(len(parameter_vector) - 1):
- hyp_val += data_input_tuple[i]*parameter_vector[i+1]
+ hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
@@ -44,9 +51,9 @@ def output(example_no, data_set):
:param example_no: example whose output is to be fetched
:return: output for that example
"""
- if data_set == 'train':
+ if data_set == "train":
return train_data[example_no][1]
- elif data_set == 'test':
+ elif data_set == "test":
return test_data[example_no][1]
@@ -69,14 +76,15 @@ def summation_of_cost_derivative(index, end=m):
:param index: index wrt derivative is being calculated
:param end: value where summation ends, default is m, number of examples
:return: Returns the summation of cost derivative
- Note: If index is -1, this means we are calculating summation wrt to biased parameter.
+ Note: If index is -1, this means we are calculating summation wrt to biased
+ parameter.
"""
summation_value = 0
for i in range(end):
if index == -1:
summation_value += _error(i)
else:
- summation_value += _error(i)*train_data[i][0][index]
+ summation_value += _error(i) * train_data[i][0][index]
return summation_value
@@ -84,9 +92,10 @@ def get_cost_derivative(index):
"""
:param index: index of the parameter vector wrt to derivative is to be calculated
:return: derivative wrt to that index
- Note: If index is -1, this means we are calculating summation wrt to biased parameter.
+ Note: If index is -1, this means we are calculating summation wrt to biased
+ parameter.
"""
- cost_derivative_value = summation_of_cost_derivative(index, m)/m
+ cost_derivative_value = summation_of_cost_derivative(index, m) / m
return cost_derivative_value
@@ -100,11 +109,16 @@ def run_gradient_descent():
j += 1
temp_parameter_vector = [0, 0, 0, 0]
for i in range(0, len(parameter_vector)):
- cost_derivative = get_cost_derivative(i-1)
- temp_parameter_vector[i] = parameter_vector[i] - \
- LEARNING_RATE*cost_derivative
- if numpy.allclose(parameter_vector, temp_parameter_vector,
- atol=absolute_error_limit, rtol=relative_error_limit):
+ cost_derivative = get_cost_derivative(i - 1)
+ temp_parameter_vector[i] = (
+ parameter_vector[i] - LEARNING_RATE * cost_derivative
+ )
+ if numpy.allclose(
+ parameter_vector,
+ temp_parameter_vector,
+ atol=absolute_error_limit,
+ rtol=relative_error_limit,
+ ):
break
parameter_vector = temp_parameter_vector
print(("Number of iterations:", j))
@@ -112,11 +126,11 @@ def run_gradient_descent():
def test_gradient_descent():
for i in range(len(test_data)):
- print(("Actual output value:", output(i, 'test')))
- print(("Hypothesis output:", calculate_hypothesis_value(i, 'test')))
+ print(("Actual output value:", output(i, "test")))
+ print(("Hypothesis output:", calculate_hypothesis_value(i, "test")))
-if __name__ == '__main__':
+if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py
index 368739a45fe9..f155d4845f41 100644
--- a/machine_learning/k_means_clust.py
+++ b/machine_learning/k_means_clust.py
@@ -1,173 +1,353 @@
-'''README, Author - Anurag Kumar(mailto:anuragkumarak95@gmail.com)
-
+"""README, Author - Anurag Kumar(mailto:anuragkumarak95@gmail.com)
Requirements:
- sklearn
- numpy
- matplotlib
-
Python:
- 3.5
-
Inputs:
- X , a 2D numpy array of features.
- k , number of clusters to create.
- - initial_centroids , initial centroid values generated by utility function(mentioned in usage).
+ - initial_centroids , initial centroid values generated by utility function(mentioned
+ in usage).
- maxiter , maximum number of iterations to process.
- - heterogeneity , empty list that will be filled with hetrogeneity values if passed to kmeans func.
-
+ - heterogeneity , empty list that will be filled with hetrogeneity values if passed
+ to kmeans func.
Usage:
1. define 'k' value, 'X' features array and 'hetrogeneity' empty list
-
2. create initial_centroids,
initial_centroids = get_initial_centroids(
- X,
- k,
- seed=0 # seed value for initial centroid generation, None for randomness(default=None)
+ X,
+ k,
+ seed=0 # seed value for initial centroid generation,
+ # None for randomness(default=None)
)
-
3. find centroids and clusters using kmeans function.
-
centroids, cluster_assignment = kmeans(
- X,
- k,
- initial_centroids,
+ X,
+ k,
+ initial_centroids,
maxiter=400,
- record_heterogeneity=heterogeneity,
+ record_heterogeneity=heterogeneity,
verbose=True # whether to print logs in console or not.(default=False)
)
-
-
- 4. Plot the loss function, hetrogeneity values for every iteration saved in hetrogeneity list.
+ 4. Plot the loss function, hetrogeneity values for every iteration saved in
+ hetrogeneity list.
plot_heterogeneity(
- heterogeneity,
+ heterogeneity,
k
)
-
- 5. Have fun..
-
-'''
-from __future__ import print_function
-from sklearn.metrics import pairwise_distances
+ 5. Transfers Dataframe into excel format it must have feature called
+ 'Clust' with k means clustering numbers in it.
+"""
+import warnings
+
import numpy as np
+import pandas as pd
+from matplotlib import pyplot as plt
+from sklearn.metrics import pairwise_distances
+
+warnings.filterwarnings("ignore")
+
+TAG = "K-MEANS-CLUST/ "
-TAG = 'K-MEANS-CLUST/ '
def get_initial_centroids(data, k, seed=None):
- '''Randomly choose k data points as initial centroids'''
- if seed is not None: # useful for obtaining consistent results
+ """Randomly choose k data points as initial centroids"""
+ if seed is not None: # useful for obtaining consistent results
np.random.seed(seed)
- n = data.shape[0] # number of data points
-
+ n = data.shape[0] # number of data points
+
# Pick K indices from range [0, N).
rand_indices = np.random.randint(0, n, k)
-
+
# Keep centroids as dense format, as many entries will be nonzero due to averaging.
# As long as at least one document in a cluster contains a word,
# it will carry a nonzero weight in the TF-IDF vector of the centroid.
- centroids = data[rand_indices,:]
-
+ centroids = data[rand_indices, :]
+
return centroids
-def centroid_pairwise_dist(X,centroids):
- return pairwise_distances(X,centroids,metric='euclidean')
+
+def centroid_pairwise_dist(X, centroids):
+ return pairwise_distances(X, centroids, metric="euclidean")
+
def assign_clusters(data, centroids):
-
+
# Compute distances between each data point and the set of centroids:
# Fill in the blank (RHS only)
- distances_from_centroids = centroid_pairwise_dist(data,centroids)
-
+ distances_from_centroids = centroid_pairwise_dist(data, centroids)
+
# Compute cluster assignments for each data point:
# Fill in the blank (RHS only)
- cluster_assignment = np.argmin(distances_from_centroids,axis=1)
-
+ cluster_assignment = np.argmin(distances_from_centroids, axis=1)
+
return cluster_assignment
+
def revise_centroids(data, k, cluster_assignment):
new_centroids = []
for i in range(k):
# Select all data points that belong to cluster i. Fill in the blank (RHS only)
- member_data_points = data[cluster_assignment==i]
+ member_data_points = data[cluster_assignment == i]
# Compute the mean of the data points. Fill in the blank (RHS only)
centroid = member_data_points.mean(axis=0)
new_centroids.append(centroid)
new_centroids = np.array(new_centroids)
-
+
return new_centroids
+
def compute_heterogeneity(data, k, centroids, cluster_assignment):
-
+
heterogeneity = 0.0
for i in range(k):
-
+
# Select all data points that belong to cluster i. Fill in the blank (RHS only)
- member_data_points = data[cluster_assignment==i, :]
-
- if member_data_points.shape[0] > 0: # check if i-th cluster is non-empty
+ member_data_points = data[cluster_assignment == i, :]
+
+ if member_data_points.shape[0] > 0: # check if i-th cluster is non-empty
# Compute distances from centroid to data points (RHS only)
- distances = pairwise_distances(member_data_points, [centroids[i]], metric='euclidean')
- squared_distances = distances**2
+ distances = pairwise_distances(
+ member_data_points, [centroids[i]], metric="euclidean"
+ )
+ squared_distances = distances ** 2
heterogeneity += np.sum(squared_distances)
-
+
return heterogeneity
-from matplotlib import pyplot as plt
+
def plot_heterogeneity(heterogeneity, k):
- plt.figure(figsize=(7,4))
+ plt.figure(figsize=(7, 4))
plt.plot(heterogeneity, linewidth=4)
- plt.xlabel('# Iterations')
- plt.ylabel('Heterogeneity')
- plt.title('Heterogeneity of clustering over time, K={0:d}'.format(k))
- plt.rcParams.update({'font.size': 16})
+ plt.xlabel("# Iterations")
+ plt.ylabel("Heterogeneity")
+ plt.title(f"Heterogeneity of clustering over time, K={k:d}")
+ plt.rcParams.update({"font.size": 16})
plt.show()
-def kmeans(data, k, initial_centroids, maxiter=500, record_heterogeneity=None, verbose=False):
- '''This function runs k-means on given data and initial set of centroids.
- maxiter: maximum number of iterations to run.(default=500)
- record_heterogeneity: (optional) a list, to store the history of heterogeneity as function of iterations
- if None, do not store the history.
- verbose: if True, print how many data points changed their cluster labels in each iteration'''
+
+def kmeans(
+ data, k, initial_centroids, maxiter=500, record_heterogeneity=None, verbose=False
+):
+ """This function runs k-means on given data and initial set of centroids.
+ maxiter: maximum number of iterations to run.(default=500)
+ record_heterogeneity: (optional) a list, to store the history of heterogeneity
+ as function of iterations
+ if None, do not store the history.
+ verbose: if True, print how many data points changed their cluster labels in
+ each iteration"""
centroids = initial_centroids[:]
prev_cluster_assignment = None
-
- for itr in range(maxiter):
+
+ for itr in range(maxiter):
if verbose:
- print(itr, end='')
-
+ print(itr, end="")
+
# 1. Make cluster assignments using nearest centroids
- cluster_assignment = assign_clusters(data,centroids)
-
- # 2. Compute a new centroid for each of the k clusters, averaging all data points assigned to that cluster.
- centroids = revise_centroids(data,k, cluster_assignment)
-
+ cluster_assignment = assign_clusters(data, centroids)
+
+ # 2. Compute a new centroid for each of the k clusters, averaging all data
+ # points assigned to that cluster.
+ centroids = revise_centroids(data, k, cluster_assignment)
+
# Check for convergence: if none of the assignments changed, stop
- if prev_cluster_assignment is not None and \
- (prev_cluster_assignment==cluster_assignment).all():
+ if (
+ prev_cluster_assignment is not None
+ and (prev_cluster_assignment == cluster_assignment).all()
+ ):
break
-
- # Print number of new assignments
+
+ # Print number of new assignments
if prev_cluster_assignment is not None:
- num_changed = np.sum(prev_cluster_assignment!=cluster_assignment)
+ num_changed = np.sum(prev_cluster_assignment != cluster_assignment)
if verbose:
- print(' {0:5d} elements changed their cluster assignment.'.format(num_changed))
-
+ print(
+ " {:5d} elements changed their cluster assignment.".format(
+ num_changed
+ )
+ )
+
# Record heterogeneity convergence metric
if record_heterogeneity is not None:
# YOUR CODE HERE
- score = compute_heterogeneity(data,k,centroids,cluster_assignment)
+ score = compute_heterogeneity(data, k, centroids, cluster_assignment)
record_heterogeneity.append(score)
-
+
prev_cluster_assignment = cluster_assignment[:]
-
+
return centroids, cluster_assignment
+
# Mock test below
-if False: # change to true to run this test case.
- import sklearn.datasets as ds
+if False: # change to true to run this test case.
+ from sklearn import datasets as ds
+
dataset = ds.load_iris()
k = 3
heterogeneity = []
- initial_centroids = get_initial_centroids(dataset['data'], k, seed=0)
- centroids, cluster_assignment = kmeans(dataset['data'], k, initial_centroids, maxiter=400,
- record_heterogeneity=heterogeneity, verbose=True)
+ initial_centroids = get_initial_centroids(dataset["data"], k, seed=0)
+ centroids, cluster_assignment = kmeans(
+ dataset["data"],
+ k,
+ initial_centroids,
+ maxiter=400,
+ record_heterogeneity=heterogeneity,
+ verbose=True,
+ )
plot_heterogeneity(heterogeneity, k)
+
+
+def ReportGenerator(
+ df: pd.DataFrame, ClusteringVariables: np.array, FillMissingReport=None
+) -> pd.DataFrame:
+ """
+ Function generates easy-erading clustering report. It takes 2 arguments as an input:
+ DataFrame - dataframe with predicted cluester column;
+ FillMissingReport - dictionary of rules how we are going to fill missing
+ values of for final report generate (not included in modeling);
+ in order to run the function following libraries must be imported:
+ import pandas as pd
+ import numpy as np
+ >>> data = pd.DataFrame()
+ >>> data['numbers'] = [1, 2, 3]
+ >>> data['col1'] = [0.5, 2.5, 4.5]
+ >>> data['col2'] = [100, 200, 300]
+ >>> data['col3'] = [10, 20, 30]
+ >>> data['Cluster'] = [1, 1, 2]
+ >>> ReportGenerator(data, ['col1', 'col2'], 0)
+ Features Type Mark 1 2
+ 0 # of Customers ClusterSize False 2.000000 1.000000
+ 1 % of Customers ClusterProportion False 0.666667 0.333333
+ 2 col1 mean_with_zeros True 1.500000 4.500000
+ 3 col2 mean_with_zeros True 150.000000 300.000000
+ 4 numbers mean_with_zeros False 1.500000 3.000000
+ .. ... ... ... ... ...
+ 99 dummy 5% False 1.000000 1.000000
+ 100 dummy 95% False 1.000000 1.000000
+ 101 dummy stdev False 0.000000 NaN
+ 102 dummy mode False 1.000000 1.000000
+ 103 dummy median False 1.000000 1.000000
+
+ [104 rows x 5 columns]
+ """
+ # Fill missing values with given rules
+ if FillMissingReport:
+ df.fillna(value=FillMissingReport, inplace=True)
+ df["dummy"] = 1
+ numeric_cols = df.select_dtypes(np.number).columns
+ report = (
+ df.groupby(["Cluster"])[ # construct report dataframe
+ numeric_cols
+ ] # group by cluster number
+ .agg(
+ [
+ ("sum", np.sum),
+ ("mean_with_zeros", lambda x: np.mean(np.nan_to_num(x))),
+ ("mean_without_zeros", lambda x: x.replace(0, np.NaN).mean()),
+ (
+ "mean_25-75",
+ lambda x: np.mean(
+ np.nan_to_num(
+ sorted(x)[
+ round(len(x) * 25 / 100) : round(len(x) * 75 / 100)
+ ]
+ )
+ ),
+ ),
+ ("mean_with_na", np.mean),
+ ("min", lambda x: x.min()),
+ ("5%", lambda x: x.quantile(0.05)),
+ ("25%", lambda x: x.quantile(0.25)),
+ ("50%", lambda x: x.quantile(0.50)),
+ ("75%", lambda x: x.quantile(0.75)),
+ ("95%", lambda x: x.quantile(0.95)),
+ ("max", lambda x: x.max()),
+ ("count", lambda x: x.count()),
+ ("stdev", lambda x: x.std()),
+ ("mode", lambda x: x.mode()[0]),
+ ("median", lambda x: x.median()),
+ ("# > 0", lambda x: (x > 0).sum()),
+ ]
+ )
+ .T.reset_index()
+ .rename(index=str, columns={"level_0": "Features", "level_1": "Type"})
+ ) # rename columns
+ # calculate the size of cluster(count of clientID's)
+ clustersize = report[
+ (report["Features"] == "dummy") & (report["Type"] == "count")
+ ].copy() # avoid SettingWithCopyWarning
+ clustersize.Type = (
+ "ClusterSize" # rename created cluster df to match report column names
+ )
+ clustersize.Features = "# of Customers"
+ clusterproportion = pd.DataFrame(
+ clustersize.iloc[:, 2:].values
+ / clustersize.iloc[:, 2:].values.sum() # calculating the proportion of cluster
+ )
+ clusterproportion[
+ "Type"
+ ] = "% of Customers" # rename created cluster df to match report column names
+ clusterproportion["Features"] = "ClusterProportion"
+ cols = clusterproportion.columns.tolist()
+ cols = cols[-2:] + cols[:-2]
+ clusterproportion = clusterproportion[cols] # rearrange columns to match report
+ clusterproportion.columns = report.columns
+ a = pd.DataFrame(
+ abs(
+ report[report["Type"] == "count"].iloc[:, 2:].values
+ - clustersize.iloc[:, 2:].values
+ )
+ ) # generating df with count of nan values
+ a["Features"] = 0
+ a["Type"] = "# of nan"
+ a.Features = report[
+ report["Type"] == "count"
+ ].Features.tolist() # filling values in order to match report
+ cols = a.columns.tolist()
+ cols = cols[-2:] + cols[:-2]
+ a = a[cols] # rearrange columns to match report
+ a.columns = report.columns # rename columns to match report
+ report = report.drop(
+ report[report.Type == "count"].index
+ ) # drop count values except cluster size
+ report = pd.concat(
+ [report, a, clustersize, clusterproportion], axis=0
+ ) # concat report with clustert size and nan values
+ report["Mark"] = report["Features"].isin(ClusteringVariables)
+ cols = report.columns.tolist()
+ cols = cols[0:2] + cols[-1:] + cols[2:-1]
+ report = report[cols]
+ sorter1 = {
+ "ClusterSize": 9,
+ "ClusterProportion": 8,
+ "mean_with_zeros": 7,
+ "mean_with_na": 6,
+ "max": 5,
+ "50%": 4,
+ "min": 3,
+ "25%": 2,
+ "75%": 1,
+ "# of nan": 0,
+ "# > 0": -1,
+ "sum_with_na": -2,
+ }
+ report = (
+ report.assign(
+ Sorter1=lambda x: x.Type.map(sorter1),
+ Sorter2=lambda x: list(reversed(range(len(x)))),
+ )
+ .sort_values(["Sorter1", "Mark", "Sorter2"], ascending=False)
+ .drop(["Sorter1", "Sorter2"], axis=1)
+ )
+ report.columns.name = ""
+ report = report.reset_index()
+ report.drop(columns=["index"], inplace=True)
+ return report
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/machine_learning/k_nearest_neighbours.py b/machine_learning/k_nearest_neighbours.py
new file mode 100644
index 000000000000..e90ea09a58c1
--- /dev/null
+++ b/machine_learning/k_nearest_neighbours.py
@@ -0,0 +1,58 @@
+from collections import Counter
+
+import numpy as np
+from sklearn import datasets
+from sklearn.model_selection import train_test_split
+
+data = datasets.load_iris()
+
+X = np.array(data["data"])
+y = np.array(data["target"])
+classes = data["target_names"]
+
+X_train, X_test, y_train, y_test = train_test_split(X, y)
+
+
+def euclidean_distance(a, b):
+ """
+ Gives the euclidean distance between two points
+ >>> euclidean_distance([0, 0], [3, 4])
+ 5.0
+ >>> euclidean_distance([1, 2, 3], [1, 8, 11])
+ 10.0
+ """
+ return np.linalg.norm(np.array(a) - np.array(b))
+
+
+def classifier(train_data, train_target, classes, point, k=5):
+ """
+ Classifies the point using the KNN algorithm
+ k closest points are found (ranked in ascending order of euclidean distance)
+ Params:
+ :train_data: Set of points that are classified into two or more classes
+ :train_target: List of classes in the order of train_data points
+ :classes: Labels of the classes
+ :point: The data point that needs to be classifed
+
+ >>> X_train = [[0, 0], [1, 0], [0, 1], [0.5, 0.5], [3, 3], [2, 3], [3, 2]]
+ >>> y_train = [0, 0, 0, 0, 1, 1, 1]
+ >>> classes = ['A','B']; point = [1.2,1.2]
+ >>> classifier(X_train, y_train, classes,point)
+ 'A'
+ """
+ data = zip(train_data, train_target)
+ # List of distances of all points from the point to be classified
+ distances = []
+ for data_point in data:
+ distance = euclidean_distance(data_point[0], point)
+ distances.append((distance, data_point[1]))
+ # Choosing 'k' points with the least distances.
+ votes = [i[1] for i in sorted(distances)[:k]]
+ # Most commonly occurring class among them
+ # is the class into which the point is classified
+ result = Counter(votes).most_common(1)[0][0]
+ return classes[result]
+
+
+if __name__ == "__main__":
+ print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
diff --git a/machine_learning/knn_sklearn.py b/machine_learning/knn_sklearn.py
new file mode 100644
index 000000000000..9a9114102ff3
--- /dev/null
+++ b/machine_learning/knn_sklearn.py
@@ -0,0 +1,31 @@
+from sklearn.datasets import load_iris
+from sklearn.model_selection import train_test_split
+from sklearn.neighbors import KNeighborsClassifier
+
+# Load iris file
+iris = load_iris()
+iris.keys()
+
+
+print(f"Target names: \n {iris.target_names} ")
+print(f"\n Features: \n {iris.feature_names}")
+
+# Train set e Test set
+X_train, X_test, y_train, y_test = train_test_split(
+ iris["data"], iris["target"], random_state=4
+)
+
+# KNN
+
+knn = KNeighborsClassifier(n_neighbors=1)
+knn.fit(X_train, y_train)
+
+# new array to test
+X_new = [[1, 2, 1, 4], [2, 3, 4, 5]]
+
+prediction = knn.predict(X_new)
+
+print(
+ "\nNew array: \n {}"
+ "\n\nTarget Names Prediction: \n {}".format(X_new, iris["target_names"][prediction])
+)
diff --git a/machine_learning/linear_discriminant_analysis.py b/machine_learning/linear_discriminant_analysis.py
new file mode 100644
index 000000000000..0d19e970e973
--- /dev/null
+++ b/machine_learning/linear_discriminant_analysis.py
@@ -0,0 +1,405 @@
+"""
+ Linear Discriminant Analysis
+
+
+
+ Assumptions About Data :
+ 1. The input variables has a gaussian distribution.
+ 2. The variance calculated for each input variables by class grouping is the
+ same.
+ 3. The mix of classes in your training set is representative of the problem.
+
+
+ Learning The Model :
+ The LDA model requires the estimation of statistics from the training data :
+ 1. Mean of each input value for each class.
+ 2. Probability of an instance belong to each class.
+ 3. Covariance for the input data for each class
+
+ Calculate the class means :
+ mean(x) = 1/n ( for i = 1 to i = n --> sum(xi))
+
+ Calculate the class probabilities :
+ P(y = 0) = count(y = 0) / (count(y = 0) + count(y = 1))
+ P(y = 1) = count(y = 1) / (count(y = 0) + count(y = 1))
+
+ Calculate the variance :
+ We can calculate the variance for dataset in two steps :
+ 1. Calculate the squared difference for each input variable from the
+ group mean.
+ 2. Calculate the mean of the squared difference.
+ ------------------------------------------------
+ Squared_Difference = (x - mean(k)) ** 2
+ Variance = (1 / (count(x) - count(classes))) *
+ (for i = 1 to i = n --> sum(Squared_Difference(xi)))
+
+ Making Predictions :
+ discriminant(x) = x * (mean / variance) -
+ ((mean ** 2) / (2 * variance)) + Ln(probability)
+ ---------------------------------------------------------------------------
+ After calculating the discriminant value for each class, the class with the
+ largest discriminant value is taken as the prediction.
+
+ Author: @EverLookNeverSee
+"""
+from math import log
+from os import name, system
+from random import gauss, seed
+from typing import Callable, TypeVar
+
+
+# Make a training dataset drawn from a gaussian distribution
+def gaussian_distribution(mean: float, std_dev: float, instance_count: int) -> list:
+ """
+ Generate gaussian distribution instances based-on given mean and standard deviation
+ :param mean: mean value of class
+ :param std_dev: value of standard deviation entered by usr or default value of it
+ :param instance_count: instance number of class
+ :return: a list containing generated values based-on given mean, std_dev and
+ instance_count
+
+ >>> gaussian_distribution(5.0, 1.0, 20) # doctest: +NORMALIZE_WHITESPACE
+ [6.288184753155463, 6.4494456086997705, 5.066335808938262, 4.235456349028368,
+ 3.9078267848958586, 5.031334516831717, 3.977896829989127, 3.56317055489747,
+ 5.199311976483754, 5.133374604658605, 5.546468300338232, 4.086029056264687,
+ 5.005005283626573, 4.935258239627312, 3.494170998739258, 5.537997178661033,
+ 5.320711100998849, 7.3891120432406865, 5.202969177309964, 4.855297691835079]
+ """
+ seed(1)
+ return [gauss(mean, std_dev) for _ in range(instance_count)]
+
+
+# Make corresponding Y flags to detecting classes
+def y_generator(class_count: int, instance_count: list) -> list:
+ """
+ Generate y values for corresponding classes
+ :param class_count: Number of classes(data groupings) in dataset
+ :param instance_count: number of instances in class
+ :return: corresponding values for data groupings in dataset
+
+ >>> y_generator(1, [10])
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ >>> y_generator(2, [5, 10])
+ [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+ >>> y_generator(4, [10, 5, 15, 20]) # doctest: +NORMALIZE_WHITESPACE
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
+ """
+
+ return [k for k in range(class_count) for _ in range(instance_count[k])]
+
+
+# Calculate the class means
+def calculate_mean(instance_count: int, items: list) -> float:
+ """
+ Calculate given class mean
+ :param instance_count: Number of instances in class
+ :param items: items that related to specific class(data grouping)
+ :return: calculated actual mean of considered class
+
+ >>> items = gaussian_distribution(5.0, 1.0, 20)
+ >>> calculate_mean(len(items), items)
+ 5.011267842911003
+ """
+ # the sum of all items divided by number of instances
+ return sum(items) / instance_count
+
+
+# Calculate the class probabilities
+def calculate_probabilities(instance_count: int, total_count: int) -> float:
+ """
+ Calculate the probability that a given instance will belong to which class
+ :param instance_count: number of instances in class
+ :param total_count: the number of all instances
+ :return: value of probability for considered class
+
+ >>> calculate_probabilities(20, 60)
+ 0.3333333333333333
+ >>> calculate_probabilities(30, 100)
+ 0.3
+ """
+ # number of instances in specific class divided by number of all instances
+ return instance_count / total_count
+
+
+# Calculate the variance
+def calculate_variance(items: list, means: list, total_count: int) -> float:
+ """
+ Calculate the variance
+ :param items: a list containing all items(gaussian distribution of all classes)
+ :param means: a list containing real mean values of each class
+ :param total_count: the number of all instances
+ :return: calculated variance for considered dataset
+
+ >>> items = gaussian_distribution(5.0, 1.0, 20)
+ >>> means = [5.011267842911003]
+ >>> total_count = 20
+ >>> calculate_variance([items], means, total_count)
+ 0.9618530973487491
+ """
+ squared_diff = [] # An empty list to store all squared differences
+ # iterate over number of elements in items
+ for i in range(len(items)):
+ # for loop iterates over number of elements in inner layer of items
+ for j in range(len(items[i])):
+ # appending squared differences to 'squared_diff' list
+ squared_diff.append((items[i][j] - means[i]) ** 2)
+
+ # one divided by (the number of all instances - number of classes) multiplied by
+ # sum of all squared differences
+ n_classes = len(means) # Number of classes in dataset
+ return 1 / (total_count - n_classes) * sum(squared_diff)
+
+
+# Making predictions
+def predict_y_values(
+ x_items: list, means: list, variance: float, probabilities: list
+) -> list:
+ """This function predicts new indexes(groups for our data)
+ :param x_items: a list containing all items(gaussian distribution of all classes)
+ :param means: a list containing real mean values of each class
+ :param variance: calculated value of variance by calculate_variance function
+ :param probabilities: a list containing all probabilities of classes
+ :return: a list containing predicted Y values
+
+ >>> x_items = [[6.288184753155463, 6.4494456086997705, 5.066335808938262,
+ ... 4.235456349028368, 3.9078267848958586, 5.031334516831717,
+ ... 3.977896829989127, 3.56317055489747, 5.199311976483754,
+ ... 5.133374604658605, 5.546468300338232, 4.086029056264687,
+ ... 5.005005283626573, 4.935258239627312, 3.494170998739258,
+ ... 5.537997178661033, 5.320711100998849, 7.3891120432406865,
+ ... 5.202969177309964, 4.855297691835079], [11.288184753155463,
+ ... 11.44944560869977, 10.066335808938263, 9.235456349028368,
+ ... 8.907826784895859, 10.031334516831716, 8.977896829989128,
+ ... 8.56317055489747, 10.199311976483754, 10.133374604658606,
+ ... 10.546468300338232, 9.086029056264687, 10.005005283626572,
+ ... 9.935258239627313, 8.494170998739259, 10.537997178661033,
+ ... 10.320711100998848, 12.389112043240686, 10.202969177309964,
+ ... 9.85529769183508], [16.288184753155463, 16.449445608699772,
+ ... 15.066335808938263, 14.235456349028368, 13.907826784895859,
+ ... 15.031334516831716, 13.977896829989128, 13.56317055489747,
+ ... 15.199311976483754, 15.133374604658606, 15.546468300338232,
+ ... 14.086029056264687, 15.005005283626572, 14.935258239627313,
+ ... 13.494170998739259, 15.537997178661033, 15.320711100998848,
+ ... 17.389112043240686, 15.202969177309964, 14.85529769183508]]
+
+ >>> means = [5.011267842911003, 10.011267842911003, 15.011267842911002]
+ >>> variance = 0.9618530973487494
+ >>> probabilities = [0.3333333333333333, 0.3333333333333333, 0.3333333333333333]
+ >>> predict_y_values(x_items, means, variance,
+ ... probabilities) # doctest: +NORMALIZE_WHITESPACE
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2]
+
+ """
+ # An empty list to store generated discriminant values of all items in dataset for
+ # each class
+ results = []
+ # for loop iterates over number of elements in list
+ for i in range(len(x_items)):
+ # for loop iterates over number of inner items of each element
+ for j in range(len(x_items[i])):
+ temp = [] # to store all discriminant values of each item as a list
+ # for loop iterates over number of classes we have in our dataset
+ for k in range(len(x_items)):
+ # appending values of discriminants for each class to 'temp' list
+ temp.append(
+ x_items[i][j] * (means[k] / variance)
+ - (means[k] ** 2 / (2 * variance))
+ + log(probabilities[k])
+ )
+ # appending discriminant values of each item to 'results' list
+ results.append(temp)
+
+ return [result.index(max(result)) for result in results]
+
+
+# Calculating Accuracy
+def accuracy(actual_y: list, predicted_y: list) -> float:
+ """
+ Calculate the value of accuracy based-on predictions
+ :param actual_y:a list containing initial Y values generated by 'y_generator'
+ function
+ :param predicted_y: a list containing predicted Y values generated by
+ 'predict_y_values' function
+ :return: percentage of accuracy
+
+ >>> actual_y = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
+ ... 1, 1 ,1 ,1 ,1 ,1 ,1]
+ >>> predicted_y = [0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0,
+ ... 0, 0, 1, 1, 1, 0, 1, 1, 1]
+ >>> accuracy(actual_y, predicted_y)
+ 50.0
+
+ >>> actual_y = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
+ ... 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
+ >>> predicted_y = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
+ ... 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
+ >>> accuracy(actual_y, predicted_y)
+ 100.0
+ """
+ # iterate over one element of each list at a time (zip mode)
+ # prediction is correct if actual Y value equals to predicted Y value
+ correct = sum(1 for i, j in zip(actual_y, predicted_y) if i == j)
+ # percentage of accuracy equals to number of correct predictions divided by number
+ # of all data and multiplied by 100
+ return (correct / len(actual_y)) * 100
+
+
+num = TypeVar("num")
+
+
+def valid_input(
+ input_type: Callable[[object], num], # Usually float or int
+ input_msg: str,
+ err_msg: str,
+ condition: Callable[[num], bool] = lambda x: True,
+ default: str = None,
+) -> num:
+ """
+ Ask for user value and validate that it fulfill a condition.
+
+ :input_type: user input expected type of value
+ :input_msg: message to show user in the screen
+ :err_msg: message to show in the screen in case of error
+ :condition: function that represents the condition that user input is valid.
+ :default: Default value in case the user does not type anything
+ :return: user's input
+ """
+ while True:
+ try:
+ user_input = input_type(input(input_msg).strip() or default)
+ if condition(user_input):
+ return user_input
+ else:
+ print(f"{user_input}: {err_msg}")
+ continue
+ except ValueError:
+ print(
+ f"{user_input}: Incorrect input type, expected {input_type.__name__!r}"
+ )
+
+
+# Main Function
+def main():
+ """ This function starts execution phase """
+ while True:
+ print(" Linear Discriminant Analysis ".center(50, "*"))
+ print("*" * 50, "\n")
+ print("First of all we should specify the number of classes that")
+ print("we want to generate as training dataset")
+ # Trying to get number of classes
+ n_classes = valid_input(
+ input_type=int,
+ condition=lambda x: x > 0,
+ input_msg="Enter the number of classes (Data Groupings): ",
+ err_msg="Number of classes should be positive!",
+ )
+
+ print("-" * 100)
+
+ # Trying to get the value of standard deviation
+ std_dev = valid_input(
+ input_type=float,
+ condition=lambda x: x >= 0,
+ input_msg=(
+ "Enter the value of standard deviation"
+ "(Default value is 1.0 for all classes): "
+ ),
+ err_msg="Standard deviation should not be negative!",
+ default="1.0",
+ )
+
+ print("-" * 100)
+
+ # Trying to get number of instances in classes and theirs means to generate
+ # dataset
+ counts = [] # An empty list to store instance counts of classes in dataset
+ for i in range(n_classes):
+ user_count = valid_input(
+ input_type=int,
+ condition=lambda x: x > 0,
+ input_msg=(f"Enter The number of instances for class_{i+1}: "),
+ err_msg="Number of instances should be positive!",
+ )
+ counts.append(user_count)
+ print("-" * 100)
+
+ # An empty list to store values of user-entered means of classes
+ user_means = []
+ for a in range(n_classes):
+ user_mean = valid_input(
+ input_type=float,
+ input_msg=(f"Enter the value of mean for class_{a+1}: "),
+ err_msg="This is an invalid value.",
+ )
+ user_means.append(user_mean)
+ print("-" * 100)
+
+ print("Standard deviation: ", std_dev)
+ # print out the number of instances in classes in separated line
+ for i, count in enumerate(counts, 1):
+ print(f"Number of instances in class_{i} is: {count}")
+ print("-" * 100)
+
+ # print out mean values of classes separated line
+ for i, user_mean in enumerate(user_means, 1):
+ print(f"Mean of class_{i} is: {user_mean}")
+ print("-" * 100)
+
+ # Generating training dataset drawn from gaussian distribution
+ x = [
+ gaussian_distribution(user_means[j], std_dev, counts[j])
+ for j in range(n_classes)
+ ]
+ print("Generated Normal Distribution: \n", x)
+ print("-" * 100)
+
+ # Generating Ys to detecting corresponding classes
+ y = y_generator(n_classes, counts)
+ print("Generated Corresponding Ys: \n", y)
+ print("-" * 100)
+
+ # Calculating the value of actual mean for each class
+ actual_means = [calculate_mean(counts[k], x[k]) for k in range(n_classes)]
+ # for loop iterates over number of elements in 'actual_means' list and print
+ # out them in separated line
+ for i, actual_mean in enumerate(actual_means, 1):
+ print(f"Actual(Real) mean of class_{i} is: {actual_mean}")
+ print("-" * 100)
+
+ # Calculating the value of probabilities for each class
+ probabilities = [
+ calculate_probabilities(counts[i], sum(counts)) for i in range(n_classes)
+ ]
+
+ # for loop iterates over number of elements in 'probabilities' list and print
+ # out them in separated line
+ for i, probability in enumerate(probabilities, 1):
+ print(f"Probability of class_{i} is: {probability}")
+ print("-" * 100)
+
+ # Calculating the values of variance for each class
+ variance = calculate_variance(x, actual_means, sum(counts))
+ print("Variance: ", variance)
+ print("-" * 100)
+
+ # Predicting Y values
+ # storing predicted Y values in 'pre_indexes' variable
+ pre_indexes = predict_y_values(x, actual_means, variance, probabilities)
+ print("-" * 100)
+
+ # Calculating Accuracy of the model
+ print(f"Accuracy: {accuracy(y, pre_indexes)}")
+ print("-" * 100)
+ print(" DONE ".center(100, "+"))
+
+ if input("Press any key to restart or 'q' for quit: ").strip().lower() == "q":
+ print("\n" + "GoodBye!".center(100, "-") + "\n")
+ break
+ system("cls" if name == "nt" else "clear")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/machine_learning/linear_regression.py b/machine_learning/linear_regression.py
index 8c23f1f77908..a726629efe00 100644
--- a/machine_learning/linear_regression.py
+++ b/machine_learning/linear_regression.py
@@ -1,39 +1,38 @@
"""
Linear regression is the most basic type of regression commonly used for
-predictive analysis. The idea is preety simple, we have a dataset and we have
-a feature's associated with it. The Features should be choose very cautiously
-as they determine, how much our model will be able to make future predictions.
-We try to set these Feature weights, over many iterations, so that they best
-fits our dataset. In this particular code, i had used a CSGO dataset (ADR vs
+predictive analysis. The idea is pretty simple: we have a dataset and we have
+features associated with it. Features should be chosen very cautiously
+as they determine how much our model will be able to make future predictions.
+We try to set the weight of these features, over many iterations, so that they best
+fit our dataset. In this particular code, I had used a CSGO dataset (ADR vs
Rating). We try to best fit a line through dataset and estimate the parameters.
"""
-from __future__ import print_function
-
-import requests
import numpy as np
+import requests
def collect_dataset():
- """ Collect dataset of CSGO
+ """Collect dataset of CSGO
The dataset contains ADR vs Rating of a Player
:return : dataset obtained from the link, as matrix
"""
- response = requests.get('https://raw.githubusercontent.com/yashLadha/' +
- 'The_Math_of_Intelligence/master/Week1/ADRvs' +
- 'Rating.csv')
+ response = requests.get(
+ "https://raw.githubusercontent.com/yashLadha/"
+ + "The_Math_of_Intelligence/master/Week1/ADRvs"
+ + "Rating.csv"
+ )
lines = response.text.splitlines()
data = []
for item in lines:
- item = item.split(',')
+ item = item.split(",")
data.append(item)
data.pop(0) # This is for removing the labels from the list
dataset = np.matrix(data)
return dataset
-def run_steep_gradient_descent(data_x, data_y,
- len_data, alpha, theta):
- """ Run steep gradient descent and updates the Feature vector accordingly_
+def run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta):
+ """Run steep gradient descent and updates the Feature vector accordingly_
:param data_x : contains the dataset
:param data_y : contains the output associated with each data-entry
:param len_data : length of the data_
@@ -52,7 +51,7 @@ def run_steep_gradient_descent(data_x, data_y,
def sum_of_square_error(data_x, data_y, len_data, theta):
- """ Return sum of square error for error calculation
+ """Return sum of square error for error calculation
:param data_x : contains our dataset
:param data_y : contains the output (result vector)
:param len_data : len of the dataset
@@ -67,7 +66,7 @@ def sum_of_square_error(data_x, data_y, len_data, theta):
def run_linear_regression(data_x, data_y):
- """ Implement Linear regression over the dataset
+ """Implement Linear regression over the dataset
:param data_x : contains our dataset
:param data_y : contains the output (result vector)
:return : feature for line of best fit (Feature vector)
@@ -81,10 +80,9 @@ def run_linear_regression(data_x, data_y):
theta = np.zeros((1, no_features))
for i in range(0, iterations):
- theta = run_steep_gradient_descent(data_x, data_y,
- len_data, alpha, theta)
+ theta = run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta)
error = sum_of_square_error(data_x, data_y, len_data, theta)
- print('At Iteration %d - Error is %.5f ' % (i + 1, error))
+ print("At Iteration %d - Error is %.5f " % (i + 1, error))
return theta
@@ -99,10 +97,10 @@ def main():
theta = run_linear_regression(data_x, data_y)
len_result = theta.shape[1]
- print('Resultant Feature vector : ')
+ print("Resultant Feature vector : ")
for i in range(0, len_result):
- print('%.5f' % (theta[0, i]))
+ print("%.5f" % (theta[0, i]))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/machine_learning/logistic_regression.py b/machine_learning/logistic_regression.py
index 71952e792e81..48d88ef61185 100644
--- a/machine_learning/logistic_regression.py
+++ b/machine_learning/logistic_regression.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
-# -*- coding: utf-8 -*-
-## Logistic Regression from scratch
+# Logistic Regression from scratch
# In[62]:
@@ -9,20 +8,24 @@
# importing all the required libraries
-''' Implementing logistic regression for classification problem
- Helpful resources : 1.Coursera ML course 2.https://medium.com/@martinpella/logistic-regression-from-scratch-in-python-124c5636b8ac'''
-
+"""
+Implementing logistic regression for classification problem
+Helpful resources:
+Coursera ML course
+https://medium.com/@martinpella/logistic-regression-from-scratch-in-python-124c5636b8ac
+"""
import numpy as np
-import matplotlib.pyplot as plt
+from matplotlib import pyplot as plt
+from sklearn import datasets
# get_ipython().run_line_magic('matplotlib', 'inline')
-from sklearn import datasets
-
# In[67]:
-# sigmoid function or logistic function is used as a hypothesis function in classification problems
+# sigmoid function or logistic function is used as a hypothesis function in
+# classification problems
+
def sigmoid_function(z):
return 1 / (1 + np.exp(-z))
@@ -32,70 +35,53 @@ def cost_function(h, y):
return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()
-# here alpha is the learning rate, X is the feature matrix,y is the target matrix
+def log_likelihood(X, Y, weights):
+ scores = np.dot(X, weights)
+ return np.sum(Y * scores - np.log(1 + np.exp(scores)))
+
-def logistic_reg(
- alpha,
- X,
- y,
- max_iterations=70000,
- ):
- converged = False
- iterations = 0
+# here alpha is the learning rate, X is the feature matrix,y is the target matrix
+def logistic_reg(alpha, X, y, max_iterations=70000):
theta = np.zeros(X.shape[1])
- while not converged:
+ for iterations in range(max_iterations):
z = np.dot(X, theta)
h = sigmoid_function(z)
gradient = np.dot(X.T, h - y) / y.size
- theta = theta - alpha * gradient
-
+ theta = theta - alpha * gradient # updating the weights
z = np.dot(X, theta)
h = sigmoid_function(z)
J = cost_function(h, y)
-
- iterations += 1 # update iterations
-
- if iterations == max_iterations:
- print ('Maximum iterations exceeded!')
- print ('Minimal cost function J=', J)
- converged = True
-
+ if iterations % 100 == 0:
+ print(f"loss: {J} \t") # printing the loss after every 100 iterations
return theta
# In[68]:
-if __name__ == '__main__':
+if __name__ == "__main__":
iris = datasets.load_iris()
X = iris.data[:, :2]
y = (iris.target != 0) * 1
alpha = 0.1
theta = logistic_reg(alpha, X, y, max_iterations=70000)
- print (theta)
-
+ print("theta: ", theta) # printing the theta i.e our weights vector
def predict_prob(X):
- return sigmoid_function(np.dot(X, theta)) # predicting the value of probability from the logistic regression algorithm
-
+ return sigmoid_function(
+ np.dot(X, theta)
+ ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
- plt.scatter(X[y == 0][:, 0], X[y == 0][:, 1], color='b', label='0')
- plt.scatter(X[y == 1][:, 0], X[y == 1][:, 1], color='r', label='1')
+ plt.scatter(X[y == 0][:, 0], X[y == 0][:, 1], color="b", label="0")
+ plt.scatter(X[y == 1][:, 0], X[y == 1][:, 1], color="r", label="1")
(x1_min, x1_max) = (X[:, 0].min(), X[:, 0].max())
(x2_min, x2_max) = (X[:, 1].min(), X[:, 1].max())
- (xx1, xx2) = np.meshgrid(np.linspace(x1_min, x1_max),
- np.linspace(x2_min, x2_max))
+ (xx1, xx2) = np.meshgrid(np.linspace(x1_min, x1_max), np.linspace(x2_min, x2_max))
grid = np.c_[xx1.ravel(), xx2.ravel()]
probs = predict_prob(grid).reshape(xx1.shape)
- plt.contour(
- xx1,
- xx2,
- probs,
- [0.5],
- linewidths=1,
- colors='black',
- )
+ plt.contour(xx1, xx2, probs, [0.5], linewidths=1, colors="black")
plt.legend()
+ plt.show()
diff --git a/machine_learning/lstm/__init__.py b/machine_learning/lstm/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/machine_learning/lstm/lstm_prediction.py_tf b/machine_learning/lstm/lstm_prediction.py_tf
new file mode 100644
index 000000000000..5452f0443f62
--- /dev/null
+++ b/machine_learning/lstm/lstm_prediction.py_tf
@@ -0,0 +1,54 @@
+"""
+ Create a Long Short Term Memory (LSTM) network model
+ An LSTM is a type of Recurrent Neural Network (RNN) as discussed at:
+ * http://colah.github.io/posts/2015-08-Understanding-LSTMs
+ * https://en.wikipedia.org/wiki/Long_short-term_memory
+"""
+import numpy as np
+import pandas as pd
+from keras.layers import LSTM, Dense
+from keras.models import Sequential
+from sklearn.preprocessing import MinMaxScaler
+
+if __name__ == "__main__":
+ """
+ First part of building a model is to get the data and prepare
+ it for our model. You can use any dataset for stock prediction
+ make sure you set the price column on line number 21. Here we
+ use a dataset which have the price on 3rd column.
+ """
+ df = pd.read_csv("sample_data.csv", header=None)
+ len_data = df.shape[:1][0]
+ # If you're using some other dataset input the target column
+ actual_data = df.iloc[:, 1:2]
+ actual_data = actual_data.values.reshape(len_data, 1)
+ actual_data = MinMaxScaler().fit_transform(actual_data)
+ look_back = 10
+ forward_days = 5
+ periods = 20
+ division = len_data - periods * look_back
+ train_data = actual_data[:division]
+ test_data = actual_data[division - look_back :]
+ train_x, train_y = [], []
+ test_x, test_y = [], []
+
+ for i in range(0, len(train_data) - forward_days - look_back + 1):
+ train_x.append(train_data[i : i + look_back])
+ train_y.append(train_data[i + look_back : i + look_back + forward_days])
+ for i in range(0, len(test_data) - forward_days - look_back + 1):
+ test_x.append(test_data[i : i + look_back])
+ test_y.append(test_data[i + look_back : i + look_back + forward_days])
+ x_train = np.array(train_x)
+ x_test = np.array(test_x)
+ y_train = np.array([list(i.ravel()) for i in train_y])
+ y_test = np.array([list(i.ravel()) for i in test_y])
+
+ model = Sequential()
+ model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
+ model.add(LSTM(64, input_shape=(128, 1)))
+ model.add(Dense(forward_days))
+ model.compile(loss="mean_squared_error", optimizer="adam")
+ history = model.fit(
+ x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
+ )
+ pred = model.predict(x_test)
diff --git a/machine_learning/lstm/sample_data.csv b/machine_learning/lstm/sample_data.csv
new file mode 100644
index 000000000000..f94db621f619
--- /dev/null
+++ b/machine_learning/lstm/sample_data.csv
@@ -0,0 +1,1259 @@
+04/24/2020, 1279.31, 1640394, 1261.17, 1280.4, 1249.45
+04/23/2020, 1276.31, 1566203, 1271.55, 1293.31, 1265.67
+04/22/2020, 1263.21, 2093140, 1245.54, 1285.6133, 1242
+04/21/2020, 1216.34, 2153003, 1247, 1254.27, 1209.71
+04/20/2020, 1266.61, 1695488, 1271, 1281.6, 1261.37
+04/17/2020, 1283.25, 1949042, 1284.85, 1294.43, 1271.23
+04/16/2020, 1263.47, 2518099, 1274.1, 1279, 1242.62
+04/15/2020, 1262.47, 1671703, 1245.61, 1280.46, 1240.4
+04/14/2020, 1269.23, 2470353, 1245.09, 1282.07, 1236.93
+04/13/2020, 1217.56, 1739828, 1209.18, 1220.51, 1187.5984
+04/09/2020, 1211.45, 2175421, 1224.08, 1225.57, 1196.7351
+04/08/2020, 1210.28, 1975135, 1206.5, 1219.07, 1188.16
+04/07/2020, 1186.51, 2387329, 1221, 1225, 1182.23
+04/06/2020, 1186.92, 2664723, 1138, 1194.66, 1130.94
+04/03/2020, 1097.88, 2313400, 1119.015, 1123.54, 1079.81
+04/02/2020, 1120.84, 1964881, 1098.26, 1126.86, 1096.4
+04/01/2020, 1105.62, 2344173, 1122, 1129.69, 1097.45
+03/31/2020, 1162.81, 2487983, 1147.3, 1175.31, 1138.14
+03/30/2020, 1146.82, 2574061, 1125.04, 1151.63, 1096.48
+03/27/2020, 1110.71, 3208495, 1125.67, 1150.6702, 1105.91
+03/26/2020, 1161.75, 3573755, 1111.8, 1169.97, 1093.53
+03/25/2020, 1102.49, 4081528, 1126.47, 1148.9, 1086.01
+03/24/2020, 1134.46, 3344450, 1103.77, 1135, 1090.62
+03/23/2020, 1056.62, 4044137, 1061.32, 1071.32, 1013.5361
+03/20/2020, 1072.32, 3601750, 1135.72, 1143.99, 1065.49
+03/19/2020, 1115.29, 3651106, 1093.05, 1157.9699, 1060.1075
+03/18/2020, 1096.8, 4233435, 1056.51, 1106.5, 1037.28
+03/17/2020, 1119.8, 3861489, 1093.11, 1130.86, 1056.01
+03/16/2020, 1084.33, 4252365, 1096, 1152.2665, 1074.44
+03/13/2020, 1219.73, 3700125, 1179, 1219.76, 1117.1432
+03/12/2020, 1114.91, 4226748, 1126, 1193.87, 1113.3
+03/11/2020, 1215.41, 2611229, 1249.7, 1260.96, 1196.07
+03/10/2020, 1280.39, 2611373, 1260, 1281.15, 1218.77
+03/09/2020, 1215.56, 3365365, 1205.3, 1254.7599, 1200
+03/06/2020, 1298.41, 2660628, 1277.06, 1306.22, 1261.05
+03/05/2020, 1319.04, 2561288, 1350.2, 1358.91, 1305.1
+03/04/2020, 1386.52, 1913315, 1359.23, 1388.09, 1343.11
+03/03/2020, 1341.39, 2402326, 1399.42, 1410.15, 1332
+03/02/2020, 1389.11, 2431468, 1351.61, 1390.87, 1326.815
+02/28/2020, 1339.33, 3790618, 1277.5, 1341.14, 1271
+02/27/2020, 1318.09, 2978300, 1362.06, 1371.7037, 1317.17
+02/26/2020, 1393.18, 2204037, 1396.14, 1415.7, 1379
+02/25/2020, 1388.45, 2478278, 1433, 1438.14, 1382.4
+02/24/2020, 1421.59, 2867053, 1426.11, 1436.97, 1411.39
+02/21/2020, 1485.11, 1732273, 1508.03, 1512.215, 1480.44
+02/20/2020, 1518.15, 1096552, 1522, 1529.64, 1506.82
+02/19/2020, 1526.69, 949268, 1525.07, 1532.1063, 1521.4
+02/18/2020, 1519.67, 1121140, 1515, 1531.63, 1512.59
+02/14/2020, 1520.74, 1197836, 1515.6, 1520.74, 1507.34
+02/13/2020, 1514.66, 929730, 1512.69, 1527.18, 1504.6
+02/12/2020, 1518.27, 1167565, 1514.48, 1520.695, 1508.11
+02/11/2020, 1508.79, 1344633, 1511.81, 1529.63, 1505.6378
+02/10/2020, 1508.68, 1419876, 1474.32, 1509.5, 1474.32
+02/07/2020, 1479.23, 1172270, 1467.3, 1485.84, 1466.35
+02/06/2020, 1476.23, 1679384, 1450.33, 1481.9997, 1449.57
+02/05/2020, 1448.23, 1986157, 1462.42, 1463.84, 1430.56
+02/04/2020, 1447.07, 3932954, 1457.07, 1469.5, 1426.3
+02/03/2020, 1485.94, 3055216, 1462, 1490, 1458.99
+01/31/2020, 1434.23, 2417214, 1468.9, 1470.13, 1428.53
+01/30/2020, 1455.84, 1339421, 1439.96, 1457.28, 1436.4
+01/29/2020, 1458.63, 1078667, 1458.8, 1465.43, 1446.74
+01/28/2020, 1452.56, 1577422, 1443, 1456, 1432.47
+01/27/2020, 1433.9, 1755201, 1431, 1438.07, 1421.2
+01/24/2020, 1466.71, 1784644, 1493.59, 1495.495, 1465.25
+01/23/2020, 1486.65, 1351354, 1487.64, 1495.52, 1482.1
+01/22/2020, 1485.95, 1610846, 1491, 1503.2143, 1484.93
+01/21/2020, 1484.4, 2036780, 1479.12, 1491.85, 1471.2
+01/17/2020, 1480.39, 2396215, 1462.91, 1481.2954, 1458.22
+01/16/2020, 1451.7, 1173688, 1447.44, 1451.99, 1440.92
+01/15/2020, 1439.2, 1282685, 1430.21, 1441.395, 1430.21
+01/14/2020, 1430.88, 1560453, 1439.01, 1441.8, 1428.37
+01/13/2020, 1439.23, 1653482, 1436.13, 1440.52, 1426.02
+01/10/2020, 1429.73, 1821566, 1427.56, 1434.9292, 1418.35
+01/09/2020, 1419.83, 1502664, 1420.57, 1427.33, 1410.27
+01/08/2020, 1404.32, 1529177, 1392.08, 1411.58, 1390.84
+01/07/2020, 1393.34, 1511693, 1397.94, 1402.99, 1390.38
+01/06/2020, 1394.21, 1733149, 1350, 1396.5, 1350
+01/03/2020, 1360.66, 1187006, 1347.86, 1372.5, 1345.5436
+01/02/2020, 1367.37, 1406731, 1341.55, 1368.14, 1341.55
+12/31/2019, 1337.02, 962468, 1330.11, 1338, 1329.085
+12/30/2019, 1336.14, 1051323, 1350, 1353, 1334.02
+12/27/2019, 1351.89, 1038718, 1362.99, 1364.53, 1349.31
+12/26/2019, 1360.4, 667754, 1346.17, 1361.3269, 1344.47
+12/24/2019, 1343.56, 347518, 1348.5, 1350.26, 1342.78
+12/23/2019, 1348.84, 883200, 1355.87, 1359.7999, 1346.51
+12/20/2019, 1349.59, 3316905, 1363.35, 1363.64, 1349
+12/19/2019, 1356.04, 1470112, 1351.82, 1358.1, 1348.985
+12/18/2019, 1352.62, 1657069, 1356.6, 1360.47, 1351
+12/17/2019, 1355.12, 1855259, 1362.89, 1365, 1351.3231
+12/16/2019, 1361.17, 1397451, 1356.5, 1364.68, 1352.67
+12/13/2019, 1347.83, 1550028, 1347.95, 1353.0931, 1343.87
+12/12/2019, 1350.27, 1281722, 1345.94, 1355.775, 1340.5
+12/11/2019, 1345.02, 850796, 1350.84, 1351.2, 1342.67
+12/10/2019, 1344.66, 1094653, 1341.5, 1349.975, 1336.04
+12/09/2019, 1343.56, 1355795, 1338.04, 1359.45, 1337.84
+12/06/2019, 1340.62, 1315510, 1333.44, 1344, 1333.44
+12/05/2019, 1328.13, 1212818, 1328, 1329.3579, 1316.44
+12/04/2019, 1320.54, 1538110, 1307.01, 1325.8, 1304.87
+12/03/2019, 1295.28, 1268647, 1279.57, 1298.461, 1279
+12/02/2019, 1289.92, 1511851, 1301, 1305.83, 1281
+11/29/2019, 1304.96, 586981, 1307.12, 1310.205, 1303.97
+11/27/2019, 1312.99, 996329, 1315, 1318.36, 1309.63
+11/26/2019, 1313.55, 1069795, 1309.86, 1314.8, 1305.09
+11/25/2019, 1306.69, 1036487, 1299.18, 1311.31, 1298.13
+11/22/2019, 1295.34, 1386506, 1305.62, 1308.73, 1291.41
+11/21/2019, 1301.35, 995499, 1301.48, 1312.59, 1293
+11/20/2019, 1303.05, 1309835, 1311.74, 1315, 1291.15
+11/19/2019, 1315.46, 1269372, 1327.7, 1327.7, 1312.8
+11/18/2019, 1320.7, 1488083, 1332.22, 1335.5288, 1317.5
+11/15/2019, 1334.87, 1782955, 1318.94, 1334.88, 1314.2796
+11/14/2019, 1311.46, 1194305, 1297.5, 1317, 1295.65
+11/13/2019, 1298, 853861, 1294.07, 1304.3, 1293.51
+11/12/2019, 1298.8, 1085859, 1300, 1310, 1295.77
+11/11/2019, 1299.19, 1012429, 1303.18, 1306.425, 1297.41
+11/08/2019, 1311.37, 1251916, 1305.28, 1318, 1304.365
+11/07/2019, 1308.86, 2029970, 1294.28, 1323.74, 1294.245
+11/06/2019, 1291.8, 1152977, 1289.46, 1293.73, 1282.5
+11/05/2019, 1292.03, 1282711, 1292.89, 1298.93, 1291.2289
+11/04/2019, 1291.37, 1500964, 1276.45, 1294.13, 1276.355
+11/01/2019, 1273.74, 1670072, 1265, 1274.62, 1260.5
+10/31/2019, 1260.11, 1455651, 1261.28, 1267.67, 1250.8428
+10/30/2019, 1261.29, 1408851, 1252.97, 1269.36, 1252
+10/29/2019, 1262.62, 1886380, 1276.23, 1281.59, 1257.2119
+10/28/2019, 1290, 2613237, 1275.45, 1299.31, 1272.54
+10/25/2019, 1265.13, 1213051, 1251.03, 1269.6, 1250.01
+10/24/2019, 1260.99, 1039868, 1260.9, 1264, 1253.715
+10/23/2019, 1259.13, 928595, 1242.36, 1259.89, 1242.36
+10/22/2019, 1242.8, 1047851, 1247.85, 1250.6, 1241.38
+10/21/2019, 1246.15, 1038042, 1252.26, 1254.6287, 1240.6
+10/18/2019, 1245.49, 1352839, 1253.46, 1258.89, 1241.08
+10/17/2019, 1253.07, 980510, 1250.93, 1263.325, 1249.94
+10/16/2019, 1243.64, 1168174, 1241.17, 1254.74, 1238.45
+10/15/2019, 1243.01, 1395259, 1220.4, 1247.33, 1220.4
+10/14/2019, 1217.14, 882039, 1212.34, 1226.33, 1211.76
+10/11/2019, 1215.45, 1277144, 1222.21, 1228.39, 1213.74
+10/10/2019, 1208.67, 932531, 1198.58, 1215, 1197.34
+10/09/2019, 1202.31, 876632, 1199.35, 1208.35, 1197.63
+10/08/2019, 1189.13, 1141784, 1197.59, 1206.08, 1189.01
+10/07/2019, 1207.68, 867149, 1204.4, 1218.2036, 1203.75
+10/04/2019, 1209, 1183264, 1191.89, 1211.44, 1189.17
+10/03/2019, 1187.83, 1663656, 1180, 1189.06, 1162.43
+10/02/2019, 1176.63, 1639237, 1196.98, 1196.99, 1171.29
+10/01/2019, 1205.1, 1358279, 1219, 1231.23, 1203.58
+09/30/2019, 1219, 1419676, 1220.97, 1226, 1212.3
+09/27/2019, 1225.09, 1354432, 1243.01, 1244.02, 1214.45
+09/26/2019, 1241.39, 1561882, 1241.96, 1245, 1232.268
+09/25/2019, 1246.52, 1593875, 1215.82, 1248.3, 1210.09
+09/24/2019, 1218.76, 1591786, 1240, 1246.74, 1210.68
+09/23/2019, 1234.03, 1075253, 1226, 1239.09, 1224.17
+09/20/2019, 1229.93, 2337269, 1233.12, 1243.32, 1223.08
+09/19/2019, 1238.71, 1000155, 1232.06, 1244.44, 1232.02
+09/18/2019, 1232.41, 1144333, 1227.51, 1235.61, 1216.53
+09/17/2019, 1229.15, 958112, 1230.4, 1235, 1223.69
+09/16/2019, 1231.3, 1053299, 1229.52, 1239.56, 1225.61
+09/13/2019, 1239.56, 1301350, 1231.35, 1240.88, 1227.01
+09/12/2019, 1234.25, 1725908, 1224.3, 1241.86, 1223.02
+09/11/2019, 1220.17, 1307033, 1203.41, 1222.6, 1202.2
+09/10/2019, 1206, 1260115, 1195.15, 1210, 1194.58
+09/09/2019, 1204.41, 1471880, 1204, 1220, 1192.62
+09/06/2019, 1204.93, 1072143, 1208.13, 1212.015, 1202.5222
+09/05/2019, 1211.38, 1408601, 1191.53, 1213.04, 1191.53
+09/04/2019, 1181.41, 1068968, 1176.71, 1183.48, 1171
+09/03/2019, 1168.39, 1480420, 1177.03, 1186.89, 1163.2
+08/30/2019, 1188.1, 1129959, 1198.5, 1198.5, 1183.8026
+08/29/2019, 1192.85, 1088858, 1181.12, 1196.06, 1181.12
+08/28/2019, 1171.02, 802243, 1161.71, 1176.4199, 1157.3
+08/27/2019, 1167.84, 1077452, 1180.53, 1182.4, 1161.45
+08/26/2019, 1168.89, 1226441, 1157.26, 1169.47, 1152.96
+08/23/2019, 1151.29, 1688271, 1181.99, 1194.08, 1147.75
+08/22/2019, 1189.53, 947906, 1194.07, 1198.0115, 1178.58
+08/21/2019, 1191.25, 741053, 1193.15, 1199, 1187.43
+08/20/2019, 1182.69, 915605, 1195.25, 1196.06, 1182.11
+08/19/2019, 1198.45, 1232517, 1190.09, 1206.99, 1190.09
+08/16/2019, 1177.6, 1349436, 1179.55, 1182.72, 1171.81
+08/15/2019, 1167.26, 1224739, 1163.5, 1175.84, 1162.11
+08/14/2019, 1164.29, 1578668, 1176.31, 1182.3, 1160.54
+08/13/2019, 1197.27, 1318009, 1171.46, 1204.78, 1171.46
+08/12/2019, 1174.71, 1003187, 1179.21, 1184.96, 1167.6723
+08/09/2019, 1188.01, 1065658, 1197.99, 1203.88, 1183.603
+08/08/2019, 1204.8, 1467997, 1182.83, 1205.01, 1173.02
+08/07/2019, 1173.99, 1444324, 1156, 1178.4451, 1149.6239
+08/06/2019, 1169.95, 1709374, 1163.31, 1179.96, 1160
+08/05/2019, 1152.32, 2597455, 1170.04, 1175.24, 1140.14
+08/02/2019, 1193.99, 1645067, 1200.74, 1206.9, 1188.94
+08/01/2019, 1209.01, 1698510, 1214.03, 1234.11, 1205.72
+07/31/2019, 1216.68, 1725454, 1223, 1234, 1207.7635
+07/30/2019, 1225.14, 1453263, 1225.41, 1234.87, 1223.3
+07/29/2019, 1239.41, 2223731, 1241.05, 1247.37, 1228.23
+07/26/2019, 1250.41, 4805752, 1224.04, 1265.5499, 1224
+07/25/2019, 1132.12, 2209823, 1137.82, 1141.7, 1120.92
+07/24/2019, 1137.81, 1590101, 1131.9, 1144, 1126.99
+07/23/2019, 1146.21, 1093688, 1144, 1146.9, 1131.8
+07/22/2019, 1138.07, 1301846, 1133.45, 1139.25, 1124.24
+07/19/2019, 1130.1, 1647245, 1148.19, 1151.14, 1129.62
+07/18/2019, 1146.33, 1291281, 1141.74, 1147.605, 1132.73
+07/17/2019, 1146.35, 1170047, 1150.97, 1158.36, 1145.77
+07/16/2019, 1153.58, 1238807, 1146, 1158.58, 1145
+07/15/2019, 1150.34, 903780, 1146.86, 1150.82, 1139.4
+07/12/2019, 1144.9, 863973, 1143.99, 1147.34, 1138.78
+07/11/2019, 1144.21, 1195569, 1143.25, 1153.07, 1139.58
+07/10/2019, 1140.48, 1209466, 1131.22, 1142.05, 1130.97
+07/09/2019, 1124.83, 1330370, 1111.8, 1128.025, 1107.17
+07/08/2019, 1116.35, 1236419, 1125.17, 1125.98, 1111.21
+07/05/2019, 1131.59, 1264540, 1117.8, 1132.88, 1116.14
+07/03/2019, 1121.58, 767011, 1117.41, 1126.76, 1113.86
+07/02/2019, 1111.25, 991755, 1102.24, 1111.77, 1098.17
+07/01/2019, 1097.95, 1438504, 1098, 1107.58, 1093.703
+06/28/2019, 1080.91, 1693450, 1076.39, 1081, 1073.37
+06/27/2019, 1076.01, 1004477, 1084, 1087.1, 1075.29
+06/26/2019, 1079.8, 1810869, 1086.5, 1092.97, 1072.24
+06/25/2019, 1086.35, 1546913, 1112.66, 1114.35, 1083.8
+06/24/2019, 1115.52, 1395696, 1119.61, 1122, 1111.01
+06/21/2019, 1121.88, 1947591, 1109.24, 1124.11, 1108.08
+06/20/2019, 1111.42, 1262011, 1119.99, 1120.12, 1104.74
+06/19/2019, 1102.33, 1339218, 1105.6, 1107, 1093.48
+06/18/2019, 1103.6, 1386684, 1109.69, 1116.39, 1098.99
+06/17/2019, 1092.5, 941602, 1086.28, 1099.18, 1086.28
+06/14/2019, 1085.35, 1111643, 1086.42, 1092.69, 1080.1721
+06/13/2019, 1088.77, 1058000, 1083.64, 1094.17, 1080.15
+06/12/2019, 1077.03, 1061255, 1078, 1080.93, 1067.54
+06/11/2019, 1078.72, 1437063, 1093.98, 1101.99, 1077.6025
+06/10/2019, 1080.38, 1464248, 1072.98, 1092.66, 1072.3216
+06/07/2019, 1066.04, 1802370, 1050.63, 1070.92, 1048.4
+06/06/2019, 1044.34, 1703244, 1044.99, 1047.49, 1033.7
+06/05/2019, 1042.22, 2168439, 1051.54, 1053.55, 1030.49
+06/04/2019, 1053.05, 2833483, 1042.9, 1056.05, 1033.69
+06/03/2019, 1036.23, 5130576, 1065.5, 1065.5, 1025
+05/31/2019, 1103.63, 1508203, 1101.29, 1109.6, 1100.18
+05/30/2019, 1117.95, 951873, 1115.54, 1123.13, 1112.12
+05/29/2019, 1116.46, 1538212, 1127.52, 1129.1, 1108.2201
+05/28/2019, 1134.15, 1365166, 1134, 1151.5871, 1133.12
+05/24/2019, 1133.47, 1112341, 1147.36, 1149.765, 1131.66
+05/23/2019, 1140.77, 1199300, 1140.5, 1145.9725, 1129.224
+05/22/2019, 1151.42, 914839, 1146.75, 1158.52, 1145.89
+05/21/2019, 1149.63, 1160158, 1148.49, 1152.7077, 1137.94
+05/20/2019, 1138.85, 1353292, 1144.5, 1146.7967, 1131.4425
+05/17/2019, 1162.3, 1208623, 1168.47, 1180.15, 1160.01
+05/16/2019, 1178.98, 1531404, 1164.51, 1188.16, 1162.84
+05/15/2019, 1164.21, 2289302, 1117.87, 1171.33, 1116.6657
+05/14/2019, 1120.44, 1836604, 1137.21, 1140.42, 1119.55
+05/13/2019, 1132.03, 1860648, 1141.96, 1147.94, 1122.11
+05/10/2019, 1164.27, 1314546, 1163.59, 1172.6, 1142.5
+05/09/2019, 1162.38, 1185973, 1159.03, 1169.66, 1150.85
+05/08/2019, 1166.27, 1309514, 1172.01, 1180.4243, 1165.74
+05/07/2019, 1174.1, 1551368, 1180.47, 1190.44, 1161.04
+05/06/2019, 1189.39, 1563943, 1166.26, 1190.85, 1166.26
+05/03/2019, 1185.4, 1980653, 1173.65, 1186.8, 1169
+05/02/2019, 1162.61, 1944817, 1167.76, 1174.1895, 1155.0018
+05/01/2019, 1168.08, 2642983, 1188.05, 1188.05, 1167.18
+04/30/2019, 1188.48, 6194691, 1185, 1192.81, 1175
+04/29/2019, 1287.58, 2412788, 1274, 1289.27, 1266.2949
+04/26/2019, 1272.18, 1228276, 1269, 1273.07, 1260.32
+04/25/2019, 1263.45, 1099614, 1264.77, 1267.4083, 1252.03
+04/24/2019, 1256, 1015006, 1264.12, 1268.01, 1255
+04/23/2019, 1264.55, 1271195, 1250.69, 1269, 1246.38
+04/22/2019, 1248.84, 806577, 1235.99, 1249.09, 1228.31
+04/18/2019, 1236.37, 1315676, 1239.18, 1242, 1234.61
+04/17/2019, 1236.34, 1211866, 1233, 1240.56, 1227.82
+04/16/2019, 1227.13, 855258, 1225, 1230.82, 1220.12
+04/15/2019, 1221.1, 1187353, 1218, 1224.2, 1209.1101
+04/12/2019, 1217.87, 926799, 1210, 1218.35, 1208.11
+04/11/2019, 1204.62, 709417, 1203.96, 1207.96, 1200.13
+04/10/2019, 1202.16, 724524, 1200.68, 1203.785, 1196.435
+04/09/2019, 1197.25, 865416, 1196, 1202.29, 1193.08
+04/08/2019, 1203.84, 859969, 1207.89, 1208.69, 1199.86
+04/05/2019, 1207.15, 900950, 1214.99, 1216.22, 1205.03
+04/04/2019, 1215, 949962, 1205.94, 1215.67, 1204.13
+04/03/2019, 1205.92, 1014195, 1207.48, 1216.3, 1200.5
+04/02/2019, 1200.49, 800820, 1195.32, 1201.35, 1185.71
+04/01/2019, 1194.43, 1188235, 1184.1, 1196.66, 1182
+03/29/2019, 1173.31, 1269573, 1174.9, 1178.99, 1162.88
+03/28/2019, 1168.49, 966843, 1171.54, 1171.565, 1159.4312
+03/27/2019, 1173.02, 1362217, 1185.5, 1187.559, 1159.37
+03/26/2019, 1184.62, 1894639, 1198.53, 1202.83, 1176.72
+03/25/2019, 1193, 1493841, 1196.93, 1206.3975, 1187.04
+03/22/2019, 1205.5, 1668910, 1226.32, 1230, 1202.825
+03/21/2019, 1231.54, 1195899, 1216, 1231.79, 1213.15
+03/20/2019, 1223.97, 2089367, 1197.35, 1227.14, 1196.17
+03/19/2019, 1198.85, 1404863, 1188.81, 1200, 1185.87
+03/18/2019, 1184.26, 1212506, 1183.3, 1190, 1177.4211
+03/15/2019, 1184.46, 2457597, 1193.38, 1196.57, 1182.61
+03/14/2019, 1185.55, 1150950, 1194.51, 1197.88, 1184.48
+03/13/2019, 1193.32, 1434816, 1200.645, 1200.93, 1191.94
+03/12/2019, 1193.2, 2012306, 1178.26, 1200, 1178.26
+03/11/2019, 1175.76, 1569332, 1144.45, 1176.19, 1144.45
+03/08/2019, 1142.32, 1212271, 1126.73, 1147.08, 1123.3
+03/07/2019, 1143.3, 1166076, 1155.72, 1156.755, 1134.91
+03/06/2019, 1157.86, 1094100, 1162.49, 1167.5658, 1155.49
+03/05/2019, 1162.03, 1422357, 1150.06, 1169.61, 1146.195
+03/04/2019, 1147.8, 1444774, 1146.99, 1158.2804, 1130.69
+03/01/2019, 1140.99, 1447454, 1124.9, 1142.97, 1124.75
+02/28/2019, 1119.92, 1541068, 1111.3, 1127.65, 1111.01
+02/27/2019, 1116.05, 968362, 1106.95, 1117.98, 1101
+02/26/2019, 1115.13, 1469761, 1105.75, 1119.51, 1099.92
+02/25/2019, 1109.4, 1395281, 1116, 1118.54, 1107.27
+02/22/2019, 1110.37, 1048361, 1100.9, 1111.24, 1095.6
+02/21/2019, 1096.97, 1414744, 1110.84, 1111.94, 1092.52
+02/20/2019, 1113.8, 1080144, 1119.99, 1123.41, 1105.28
+02/19/2019, 1118.56, 1046315, 1110, 1121.89, 1110
+02/15/2019, 1113.65, 1442461, 1130.08, 1131.67, 1110.65
+02/14/2019, 1121.67, 941678, 1118.05, 1128.23, 1110.445
+02/13/2019, 1120.16, 1048630, 1124.99, 1134.73, 1118.5
+02/12/2019, 1121.37, 1608658, 1106.8, 1125.295, 1105.85
+02/11/2019, 1095.01, 1063825, 1096.95, 1105.945, 1092.86
+02/08/2019, 1095.06, 1072031, 1087, 1098.91, 1086.55
+02/07/2019, 1098.71, 2040615, 1104.16, 1104.84, 1086
+02/06/2019, 1115.23, 2101674, 1139.57, 1147, 1112.77
+02/05/2019, 1145.99, 3529974, 1124.84, 1146.85, 1117.248
+02/04/2019, 1132.8, 2518184, 1112.66, 1132.8, 1109.02
+02/01/2019, 1110.75, 1455609, 1112.4, 1125, 1104.89
+01/31/2019, 1116.37, 1531463, 1103, 1117.33, 1095.41
+01/30/2019, 1089.06, 1241760, 1068.43, 1091, 1066.85
+01/29/2019, 1060.62, 1006731, 1072.68, 1075.15, 1055.8647
+01/28/2019, 1070.08, 1277745, 1080.11, 1083, 1063.8
+01/25/2019, 1090.99, 1114785, 1085, 1094, 1081.82
+01/24/2019, 1073.9, 1317718, 1076.48, 1079.475, 1060.7
+01/23/2019, 1075.57, 956526, 1077.35, 1084.93, 1059.75
+01/22/2019, 1070.52, 1607398, 1088, 1091.51, 1063.47
+01/18/2019, 1098.26, 1933754, 1100, 1108.352, 1090.9
+01/17/2019, 1089.9, 1223674, 1079.47, 1091.8, 1073.5
+01/16/2019, 1080.97, 1320530, 1080, 1092.375, 1079.34
+01/15/2019, 1077.15, 1452238, 1050.17, 1080.05, 1047.34
+01/14/2019, 1044.69, 1127417, 1046.92, 1051.53, 1041.255
+01/11/2019, 1057.19, 1512651, 1063.18, 1063.775, 1048.48
+01/10/2019, 1070.33, 1444976, 1067.66, 1071.15, 1057.71
+01/09/2019, 1074.66, 1198369, 1081.65, 1082.63, 1066.4
+01/08/2019, 1076.28, 1748371, 1076.11, 1084.56, 1060.53
+01/07/2019, 1068.39, 1978077, 1071.5, 1073.9999, 1054.76
+01/04/2019, 1070.71, 2080144, 1032.59, 1070.84, 1027.4179
+01/03/2019, 1016.06, 1829379, 1041, 1056.98, 1014.07
+01/02/2019, 1045.85, 1516681, 1016.57, 1052.32, 1015.71
+12/31/2018, 1035.61, 1492541, 1050.96, 1052.7, 1023.59
+12/28/2018, 1037.08, 1399218, 1049.62, 1055.56, 1033.1
+12/27/2018, 1043.88, 2102069, 1017.15, 1043.89, 997
+12/26/2018, 1039.46, 2337212, 989.01, 1040, 983
+12/24/2018, 976.22, 1590328, 973.9, 1003.54, 970.11
+12/21/2018, 979.54, 4560424, 1015.3, 1024.02, 973.69
+12/20/2018, 1009.41, 2659047, 1018.13, 1034.22, 996.36
+12/19/2018, 1023.01, 2419322, 1033.99, 1062, 1008.05
+12/18/2018, 1028.71, 2101854, 1026.09, 1049.48, 1021.44
+12/17/2018, 1016.53, 2337631, 1037.51, 1053.15, 1007.9
+12/14/2018, 1042.1, 1685802, 1049.98, 1062.6, 1040.79
+12/13/2018, 1061.9, 1329198, 1068.07, 1079.7597, 1053.93
+12/12/2018, 1063.68, 1523276, 1068, 1081.65, 1062.79
+12/11/2018, 1051.75, 1354751, 1056.49, 1060.6, 1039.84
+12/10/2018, 1039.55, 1793465, 1035.05, 1048.45, 1023.29
+12/07/2018, 1036.58, 2098526, 1060.01, 1075.26, 1028.5
+12/06/2018, 1068.73, 2758098, 1034.26, 1071.2, 1030.7701
+12/04/2018, 1050.82, 2278200, 1103.12, 1104.42, 1049.98
+12/03/2018, 1106.43, 1900355, 1123.14, 1124.65, 1103.6645
+11/30/2018, 1094.43, 2554416, 1089.07, 1095.57, 1077.88
+11/29/2018, 1088.3, 1403540, 1076.08, 1094.245, 1076
+11/28/2018, 1086.23, 2399374, 1048.76, 1086.84, 1035.76
+11/27/2018, 1044.41, 1801334, 1041, 1057.58, 1038.49
+11/26/2018, 1048.62, 1846430, 1038.35, 1049.31, 1033.91
+11/23/2018, 1023.88, 691462, 1030, 1037.59, 1022.3992
+11/21/2018, 1037.61, 1531676, 1036.76, 1048.56, 1033.47
+11/20/2018, 1025.76, 2447254, 1000, 1031.74, 996.02
+11/19/2018, 1020, 1837207, 1057.2, 1060.79, 1016.2601
+11/16/2018, 1061.49, 1641232, 1059.41, 1067, 1048.98
+11/15/2018, 1064.71, 1819132, 1044.71, 1071.85, 1031.78
+11/14/2018, 1043.66, 1561656, 1050, 1054.5643, 1031
+11/13/2018, 1036.05, 1496534, 1043.29, 1056.605, 1031.15
+11/12/2018, 1038.63, 1429319, 1061.39, 1062.12, 1031
+11/09/2018, 1066.15, 1343154, 1073.99, 1075.56, 1053.11
+11/08/2018, 1082.4, 1463022, 1091.38, 1093.27, 1072.2048
+11/07/2018, 1093.39, 2057155, 1069, 1095.46, 1065.9
+11/06/2018, 1055.81, 1225197, 1039.48, 1064.345, 1038.07
+11/05/2018, 1040.09, 2436742, 1055, 1058.47, 1021.24
+11/02/2018, 1057.79, 1829295, 1073.73, 1082.975, 1054.61
+11/01/2018, 1070, 1456222, 1075.8, 1083.975, 1062.46
+10/31/2018, 1076.77, 2528584, 1059.81, 1091.94, 1057
+10/30/2018, 1036.21, 3209126, 1008.46, 1037.49, 1000.75
+10/29/2018, 1020.08, 3873644, 1082.47, 1097.04, 995.83
+10/26/2018, 1071.47, 4185201, 1037.03, 1106.53, 1034.09
+10/25/2018, 1095.57, 2511884, 1071.79, 1110.98, 1069.55
+10/24/2018, 1050.71, 1910060, 1104.25, 1106.12, 1048.74
+10/23/2018, 1103.69, 1847798, 1080.89, 1107.89, 1070
+10/22/2018, 1101.16, 1494285, 1103.06, 1112.23, 1091
+10/19/2018, 1096.46, 1264605, 1093.37, 1110.36, 1087.75
+10/18/2018, 1087.97, 2056606, 1121.84, 1121.84, 1077.09
+10/17/2018, 1115.69, 1397613, 1126.46, 1128.99, 1102.19
+10/16/2018, 1121.28, 1845491, 1104.59, 1124.22, 1102.5
+10/15/2018, 1092.25, 1343231, 1108.91, 1113.4464, 1089
+10/12/2018, 1110.08, 2029872, 1108, 1115, 1086.402
+10/11/2018, 1079.32, 2939514, 1072.94, 1106.4, 1068.27
+10/10/2018, 1081.22, 2574985, 1131.08, 1132.17, 1081.13
+10/09/2018, 1138.82, 1308706, 1146.15, 1154.35, 1137.572
+10/08/2018, 1148.97, 1877142, 1150.11, 1168, 1127.3636
+10/05/2018, 1157.35, 1184245, 1167.5, 1173.4999, 1145.12
+10/04/2018, 1168.19, 2151762, 1195.33, 1197.51, 1155.576
+10/03/2018, 1202.95, 1207280, 1205, 1206.41, 1193.83
+10/02/2018, 1200.11, 1655602, 1190.96, 1209.96, 1186.63
+10/01/2018, 1195.31, 1345250, 1199.89, 1209.9, 1190.3
+09/28/2018, 1193.47, 1306822, 1191.87, 1195.41, 1184.5
+09/27/2018, 1194.64, 1244278, 1186.73, 1202.1, 1183.63
+09/26/2018, 1180.49, 1346434, 1185.15, 1194.23, 1174.765
+09/25/2018, 1184.65, 937577, 1176.15, 1186.88, 1168
+09/24/2018, 1173.37, 1218532, 1157.17, 1178, 1146.91
+09/21/2018, 1166.09, 4363929, 1192, 1192.21, 1166.04
+09/20/2018, 1186.87, 1209855, 1179.99, 1189.89, 1173.36
+09/19/2018, 1171.09, 1185321, 1164.98, 1173.21, 1154.58
+09/18/2018, 1161.22, 1184407, 1157.09, 1176.08, 1157.09
+09/17/2018, 1156.05, 1279147, 1170.14, 1177.24, 1154.03
+09/14/2018, 1172.53, 934300, 1179.1, 1180.425, 1168.3295
+09/13/2018, 1175.33, 1402005, 1170.74, 1178.61, 1162.85
+09/12/2018, 1162.82, 1291304, 1172.72, 1178.61, 1158.36
+09/11/2018, 1177.36, 1209171, 1161.63, 1178.68, 1156.24
+09/10/2018, 1164.64, 1115259, 1172.19, 1174.54, 1160.11
+09/07/2018, 1164.83, 1401034, 1158.67, 1175.26, 1157.215
+09/06/2018, 1171.44, 1886690, 1186.3, 1186.3, 1152
+09/05/2018, 1186.48, 2043732, 1193.8, 1199.0096, 1162
+09/04/2018, 1197, 1800509, 1204.27, 1212.99, 1192.5
+08/31/2018, 1218.19, 1812366, 1234.98, 1238.66, 1211.2854
+08/30/2018, 1239.12, 1320261, 1244.23, 1253.635, 1232.59
+08/29/2018, 1249.3, 1295939, 1237.45, 1250.66, 1236.3588
+08/28/2018, 1231.15, 1296532, 1241.29, 1242.545, 1228.69
+08/27/2018, 1241.82, 1154962, 1227.6, 1243.09, 1225.716
+08/24/2018, 1220.65, 946529, 1208.82, 1221.65, 1206.3588
+08/23/2018, 1205.38, 988509, 1207.14, 1221.28, 1204.24
+08/22/2018, 1207.33, 881463, 1200, 1211.84, 1199
+08/21/2018, 1201.62, 1187884, 1208, 1217.26, 1200.3537
+08/20/2018, 1207.77, 864462, 1205.02, 1211, 1194.6264
+08/17/2018, 1200.96, 1381724, 1202.03, 1209.02, 1188.24
+08/16/2018, 1206.49, 1319985, 1224.73, 1225.9999, 1202.55
+08/15/2018, 1214.38, 1815642, 1229.26, 1235.24, 1209.51
+08/14/2018, 1242.1, 1342534, 1235.19, 1245.8695, 1225.11
+08/13/2018, 1235.01, 957153, 1236.98, 1249.2728, 1233.6405
+08/10/2018, 1237.61, 1107323, 1243, 1245.695, 1232
+08/09/2018, 1249.1, 805227, 1249.9, 1255.542, 1246.01
+08/08/2018, 1245.61, 1369650, 1240.47, 1256.5, 1238.0083
+08/07/2018, 1242.22, 1493073, 1237, 1251.17, 1236.17
+08/06/2018, 1224.77, 1080923, 1225, 1226.0876, 1215.7965
+08/03/2018, 1223.71, 1072524, 1229.62, 1230, 1215.06
+08/02/2018, 1226.15, 1520488, 1205.9, 1229.88, 1204.79
+08/01/2018, 1220.01, 1567142, 1228, 1233.47, 1210.21
+07/31/2018, 1217.26, 1632823, 1220.01, 1227.5877, 1205.6
+07/30/2018, 1219.74, 1822782, 1228.01, 1234.916, 1211.47
+07/27/2018, 1238.5, 2115802, 1271, 1273.89, 1231
+07/26/2018, 1268.33, 2334881, 1251, 1269.7707, 1249.02
+07/25/2018, 1263.7, 2115890, 1239.13, 1265.86, 1239.13
+07/24/2018, 1248.08, 3303268, 1262.59, 1266, 1235.56
+07/23/2018, 1205.5, 2584034, 1181.01, 1206.49, 1181
+07/20/2018, 1184.91, 1246898, 1186.96, 1196.86, 1184.22
+07/19/2018, 1186.96, 1256113, 1191, 1200, 1183.32
+07/18/2018, 1195.88, 1391232, 1196.56, 1204.5, 1190.34
+07/17/2018, 1198.8, 1585091, 1172.22, 1203.04, 1170.6
+07/16/2018, 1183.86, 1049560, 1189.39, 1191, 1179.28
+07/13/2018, 1188.82, 1221687, 1185, 1195.4173, 1180
+07/12/2018, 1183.48, 1251083, 1159.89, 1184.41, 1155.935
+07/11/2018, 1153.9, 1094301, 1144.59, 1164.29, 1141.0003
+07/10/2018, 1152.84, 789249, 1156.98, 1159.59, 1149.59
+07/09/2018, 1154.05, 906073, 1148.48, 1154.67, 1143.42
+07/06/2018, 1140.17, 966155, 1123.58, 1140.93, 1120.7371
+07/05/2018, 1124.27, 1060752, 1110.53, 1127.5, 1108.48
+07/03/2018, 1102.89, 679034, 1135.82, 1135.82, 1100.02
+07/02/2018, 1127.46, 1188616, 1099, 1128, 1093.8
+06/29/2018, 1115.65, 1275979, 1120, 1128.2265, 1115
+06/28/2018, 1114.22, 1072438, 1102.09, 1122.31, 1096.01
+06/27/2018, 1103.98, 1287698, 1121.34, 1131.8362, 1103.62
+06/26/2018, 1118.46, 1559791, 1128, 1133.21, 1116.6589
+06/25/2018, 1124.81, 2155276, 1143.6, 1143.91, 1112.78
+06/22/2018, 1155.48, 1310164, 1159.14, 1162.4965, 1147.26
+06/21/2018, 1157.66, 1232352, 1174.85, 1177.295, 1152.232
+06/20/2018, 1169.84, 1648248, 1175.31, 1186.2856, 1169.16
+06/19/2018, 1168.06, 1616125, 1158.5, 1171.27, 1154.01
+06/18/2018, 1173.46, 1400641, 1143.65, 1174.31, 1143.59
+06/15/2018, 1152.26, 2119134, 1148.86, 1153.42, 1143.485
+06/14/2018, 1152.12, 1350085, 1143.85, 1155.47, 1140.64
+06/13/2018, 1134.79, 1490017, 1141.12, 1146.5, 1133.38
+06/12/2018, 1139.32, 899231, 1131.07, 1139.79, 1130.735
+06/11/2018, 1129.99, 1071114, 1118.6, 1137.26, 1118.6
+06/08/2018, 1120.87, 1289859, 1118.18, 1126.67, 1112.15
+06/07/2018, 1123.86, 1519860, 1131.32, 1135.82, 1116.52
+06/06/2018, 1136.88, 1697489, 1142.17, 1143, 1125.7429
+06/05/2018, 1139.66, 1538169, 1140.99, 1145.738, 1133.19
+06/04/2018, 1139.29, 1881046, 1122.33, 1141.89, 1122.005
+06/01/2018, 1119.5, 2416755, 1099.35, 1120, 1098.5
+05/31/2018, 1084.99, 3085325, 1067.56, 1097.19, 1067.56
+05/30/2018, 1067.8, 1129958, 1063.03, 1069.21, 1056.83
+05/29/2018, 1060.32, 1858676, 1064.89, 1073.37, 1055.22
+05/25/2018, 1075.66, 878903, 1079.02, 1082.56, 1073.775
+05/24/2018, 1079.24, 757752, 1079, 1080.47, 1066.15
+05/23/2018, 1079.69, 1057712, 1065.13, 1080.78, 1061.71
+05/22/2018, 1069.73, 1088700, 1083.56, 1086.59, 1066.69
+05/21/2018, 1079.58, 1012258, 1074.06, 1088, 1073.65
+05/18/2018, 1066.36, 1496448, 1061.86, 1069.94, 1060.68
+05/17/2018, 1078.59, 1031190, 1079.89, 1086.87, 1073.5
+05/16/2018, 1081.77, 989819, 1077.31, 1089.27, 1076.26
+05/15/2018, 1079.23, 1494306, 1090, 1090.05, 1073.47
+05/14/2018, 1100.2, 1450140, 1100, 1110.75, 1099.11
+05/11/2018, 1098.26, 1253205, 1093.6, 1101.3295, 1090.91
+05/10/2018, 1097.57, 1441456, 1086.03, 1100.44, 1085.64
+05/09/2018, 1082.76, 2032319, 1058.1, 1085.44, 1056.365
+05/08/2018, 1053.91, 1217260, 1058.54, 1060.55, 1047.145
+05/07/2018, 1054.79, 1464008, 1049.23, 1061.68, 1047.1
+05/04/2018, 1048.21, 1936797, 1016.9, 1048.51, 1016.9
+05/03/2018, 1023.72, 1813623, 1019, 1029.675, 1006.29
+05/02/2018, 1024.38, 1534094, 1028.1, 1040.389, 1022.87
+05/01/2018, 1037.31, 1427171, 1013.66, 1038.47, 1008.21
+04/30/2018, 1017.33, 1664084, 1030.01, 1037, 1016.85
+04/27/2018, 1030.05, 1617452, 1046, 1049.5, 1025.59
+04/26/2018, 1040.04, 1984448, 1029.51, 1047.98, 1018.19
+04/25/2018, 1021.18, 2225495, 1025.52, 1032.49, 1015.31
+04/24/2018, 1019.98, 4750851, 1052, 1057, 1010.59
+04/23/2018, 1067.45, 2278846, 1077.86, 1082.72, 1060.7
+04/20/2018, 1072.96, 1887698, 1082, 1092.35, 1069.57
+04/19/2018, 1087.7, 1741907, 1069.4, 1094.165, 1068.18
+04/18/2018, 1072.08, 1336678, 1077.43, 1077.43, 1066.225
+04/17/2018, 1074.16, 2311903, 1051.37, 1077.88, 1048.26
+04/16/2018, 1037.98, 1194144, 1037, 1043.24, 1026.74
+04/13/2018, 1029.27, 1175754, 1040.88, 1046.42, 1022.98
+04/12/2018, 1032.51, 1357599, 1025.04, 1040.69, 1021.4347
+04/11/2018, 1019.97, 1476133, 1027.99, 1031.3641, 1015.87
+04/10/2018, 1031.64, 1983510, 1026.44, 1036.28, 1011.34
+04/09/2018, 1015.45, 1738682, 1016.8, 1039.6, 1014.08
+04/06/2018, 1007.04, 1740896, 1020, 1031.42, 1003.03
+04/05/2018, 1027.81, 1345681, 1041.33, 1042.79, 1020.1311
+04/04/2018, 1025.14, 2464418, 993.41, 1028.7175, 993
+04/03/2018, 1013.41, 2271858, 1013.91, 1020.99, 994.07
+04/02/2018, 1006.47, 2679214, 1022.82, 1034.8, 990.37
+03/29/2018, 1031.79, 2714402, 1011.63, 1043, 1002.9
+03/28/2018, 1004.56, 3345046, 998, 1024.23, 980.64
+03/27/2018, 1005.1, 3081612, 1063, 1064.8393, 996.92
+03/26/2018, 1053.21, 2593808, 1046, 1055.63, 1008.4
+03/23/2018, 1021.57, 2147097, 1047.03, 1063.36, 1021.22
+03/22/2018, 1049.08, 2584639, 1081.88, 1082.9, 1045.91
+03/21/2018, 1090.88, 1878294, 1092.74, 1106.2999, 1085.15
+03/20/2018, 1097.71, 1802209, 1099, 1105.2, 1083.46
+03/19/2018, 1099.82, 2355186, 1120.01, 1121.99, 1089.01
+03/16/2018, 1135.73, 2614871, 1154.14, 1155.88, 1131.96
+03/15/2018, 1149.58, 1397767, 1149.96, 1161.08, 1134.54
+03/14/2018, 1149.49, 1290638, 1145.21, 1158.59, 1141.44
+03/13/2018, 1138.17, 1874176, 1170, 1176.76, 1133.33
+03/12/2018, 1164.5, 2106548, 1163.85, 1177.05, 1157.42
+03/09/2018, 1160.04, 2121425, 1136, 1160.8, 1132.4606
+03/08/2018, 1126, 1393529, 1115.32, 1127.6, 1112.8
+03/07/2018, 1109.64, 1277439, 1089.19, 1112.22, 1085.4823
+03/06/2018, 1095.06, 1497087, 1099.22, 1101.85, 1089.775
+03/05/2018, 1090.93, 1141932, 1075.14, 1097.1, 1069.0001
+03/02/2018, 1078.92, 2271394, 1053.08, 1081.9986, 1048.115
+03/01/2018, 1069.52, 2511872, 1107.87, 1110.12, 1067.001
+02/28/2018, 1104.73, 1873737, 1123.03, 1127.53, 1103.24
+02/27/2018, 1118.29, 1772866, 1141.24, 1144.04, 1118
+02/26/2018, 1143.75, 1514920, 1127.8, 1143.96, 1126.695
+02/23/2018, 1126.79, 1190432, 1112.64, 1127.28, 1104.7135
+02/22/2018, 1106.63, 1309536, 1116.19, 1122.82, 1102.59
+02/21/2018, 1111.34, 1507152, 1106.47, 1133.97, 1106.33
+02/20/2018, 1102.46, 1389491, 1090.57, 1113.95, 1088.52
+02/16/2018, 1094.8, 1680283, 1088.41, 1104.67, 1088.3134
+02/15/2018, 1089.52, 1785552, 1079.07, 1091.4794, 1064.34
+02/14/2018, 1069.7, 1547665, 1048.95, 1071.72, 1046.75
+02/13/2018, 1052.1, 1213800, 1045, 1058.37, 1044.0872
+02/12/2018, 1051.94, 2054002, 1048, 1061.5, 1040.928
+02/09/2018, 1037.78, 3503970, 1017.25, 1043.97, 992.56
+02/08/2018, 1001.52, 2809890, 1055.41, 1058.62, 1000.66
+02/07/2018, 1048.58, 2353003, 1081.54, 1081.78, 1048.26
+02/06/2018, 1080.6, 3432313, 1027.18, 1081.71, 1023.1367
+02/05/2018, 1055.8, 3769453, 1090.6, 1110, 1052.03
+02/02/2018, 1111.9, 4837979, 1122, 1123.07, 1107.2779
+02/01/2018, 1167.7, 2380221, 1162.61, 1174, 1157.52
+01/31/2018, 1169.94, 1523820, 1170.57, 1173, 1159.13
+01/30/2018, 1163.69, 1541771, 1167.83, 1176.52, 1163.52
+01/29/2018, 1175.58, 1337324, 1176.48, 1186.89, 1171.98
+01/26/2018, 1175.84, 1981173, 1175.08, 1175.84, 1158.11
+01/25/2018, 1170.37, 1461518, 1172.53, 1175.94, 1162.76
+01/24/2018, 1164.24, 1382904, 1177.33, 1179.86, 1161.05
+01/23/2018, 1169.97, 1309862, 1159.85, 1171.6266, 1158.75
+01/22/2018, 1155.81, 1616120, 1137.49, 1159.88, 1135.1101
+01/19/2018, 1137.51, 1390118, 1131.83, 1137.86, 1128.3
+01/18/2018, 1129.79, 1194943, 1131.41, 1132.51, 1117.5
+01/17/2018, 1131.98, 1200476, 1126.22, 1132.6, 1117.01
+01/16/2018, 1121.76, 1566662, 1132.51, 1139.91, 1117.8316
+01/12/2018, 1122.26, 1718491, 1102.41, 1124.29, 1101.15
+01/11/2018, 1105.52, 977727, 1106.3, 1106.525, 1099.59
+01/10/2018, 1102.61, 1042273, 1097.1, 1104.6, 1096.11
+01/09/2018, 1106.26, 900089, 1109.4, 1110.57, 1101.2307
+01/08/2018, 1106.94, 1046767, 1102.23, 1111.27, 1101.62
+01/05/2018, 1102.23, 1279990, 1094, 1104.25, 1092
+01/04/2018, 1086.4, 1002945, 1088, 1093.5699, 1084.0017
+01/03/2018, 1082.48, 1429757, 1064.31, 1086.29, 1063.21
+01/02/2018, 1065, 1236401, 1048.34, 1066.94, 1045.23
+12/29/2017, 1046.4, 886845, 1046.72, 1049.7, 1044.9
+12/28/2017, 1048.14, 833011, 1051.6, 1054.75, 1044.77
+12/27/2017, 1049.37, 1271780, 1057.39, 1058.37, 1048.05
+12/26/2017, 1056.74, 761097, 1058.07, 1060.12, 1050.2
+12/22/2017, 1060.12, 755089, 1061.11, 1064.2, 1059.44
+12/21/2017, 1063.63, 986548, 1064.95, 1069.33, 1061.7938
+12/20/2017, 1064.95, 1268285, 1071.78, 1073.38, 1061.52
+12/19/2017, 1070.68, 1307894, 1075.2, 1076.84, 1063.55
+12/18/2017, 1077.14, 1552016, 1066.08, 1078.49, 1062
+12/15/2017, 1064.19, 3275091, 1054.61, 1067.62, 1049.5
+12/14/2017, 1049.15, 1558684, 1045, 1058.5, 1043.11
+12/13/2017, 1040.61, 1220364, 1046.12, 1046.665, 1038.38
+12/12/2017, 1040.48, 1279511, 1039.63, 1050.31, 1033.6897
+12/11/2017, 1041.1, 1190527, 1035.5, 1043.8, 1032.0504
+12/08/2017, 1037.05, 1288419, 1037.49, 1042.05, 1032.5222
+12/07/2017, 1030.93, 1458145, 1020.43, 1034.24, 1018.071
+12/06/2017, 1018.38, 1258496, 1001.5, 1024.97, 1001.14
+12/05/2017, 1005.15, 2066247, 995.94, 1020.61, 988.28
+12/04/2017, 998.68, 1906058, 1012.66, 1016.1, 995.57
+12/01/2017, 1010.17, 1908962, 1015.8, 1022.4897, 1002.02
+11/30/2017, 1021.41, 1723003, 1022.37, 1028.4899, 1015
+11/29/2017, 1021.66, 2442974, 1042.68, 1044.08, 1015.65
+11/28/2017, 1047.41, 1421027, 1055.09, 1062.375, 1040
+11/27/2017, 1054.21, 1307471, 1040, 1055.46, 1038.44
+11/24/2017, 1040.61, 536996, 1035.87, 1043.178, 1035
+11/22/2017, 1035.96, 746351, 1035, 1039.706, 1031.43
+11/21/2017, 1034.49, 1096161, 1023.31, 1035.11, 1022.655
+11/20/2017, 1018.38, 898389, 1020.26, 1022.61, 1017.5
+11/17/2017, 1019.09, 1366936, 1034.01, 1034.42, 1017.75
+11/16/2017, 1032.5, 1129424, 1022.52, 1035.92, 1022.52
+11/15/2017, 1020.91, 847932, 1019.21, 1024.09, 1015.42
+11/14/2017, 1026, 958708, 1022.59, 1026.81, 1014.15
+11/13/2017, 1025.75, 885565, 1023.42, 1031.58, 1022.57
+11/10/2017, 1028.07, 720674, 1026.46, 1030.76, 1025.28
+11/09/2017, 1031.26, 1244701, 1033.99, 1033.99, 1019.6656
+11/08/2017, 1039.85, 1088395, 1030.52, 1043.522, 1028.45
+11/07/2017, 1033.33, 1112123, 1027.27, 1033.97, 1025.13
+11/06/2017, 1025.9, 1124757, 1028.99, 1034.87, 1025
+11/03/2017, 1032.48, 1075134, 1022.11, 1032.65, 1020.31
+11/02/2017, 1025.58, 1048584, 1021.76, 1028.09, 1013.01
+11/01/2017, 1025.5, 1371619, 1017.21, 1029.67, 1016.95
+10/31/2017, 1016.64, 1331265, 1015.22, 1024, 1010.42
+10/30/2017, 1017.11, 2083490, 1014, 1024.97, 1007.5
+10/27/2017, 1019.27, 5165922, 1009.19, 1048.39, 1008.2
+10/26/2017, 972.56, 2027218, 980, 987.6, 972.2
+10/25/2017, 973.33, 1210368, 968.37, 976.09, 960.5201
+10/24/2017, 970.54, 1206074, 970, 972.23, 961
+10/23/2017, 968.45, 1471544, 989.52, 989.52, 966.12
+10/20/2017, 988.2, 1176177, 989.44, 991, 984.58
+10/19/2017, 984.45, 1312706, 986, 988.88, 978.39
+10/18/2017, 992.81, 1057285, 991.77, 996.72, 986.9747
+10/17/2017, 992.18, 1290152, 990.29, 996.44, 988.59
+10/16/2017, 992, 910246, 992.1, 993.9065, 984
+10/13/2017, 989.68, 1169584, 992, 997.21, 989
+10/12/2017, 987.83, 1278357, 987.45, 994.12, 985
+10/11/2017, 989.25, 1692843, 973.72, 990.71, 972.25
+10/10/2017, 972.6, 968113, 980, 981.57, 966.0801
+10/09/2017, 977, 890620, 980, 985.425, 976.11
+10/06/2017, 978.89, 1146207, 966.7, 979.46, 963.36
+10/05/2017, 969.96, 1210427, 955.49, 970.91, 955.18
+10/04/2017, 951.68, 951766, 957, 960.39, 950.69
+10/03/2017, 957.79, 888303, 954, 958, 949.14
+10/02/2017, 953.27, 1282850, 959.98, 962.54, 947.84
+09/29/2017, 959.11, 1576365, 952, 959.7864, 951.51
+09/28/2017, 949.5, 997036, 941.36, 950.69, 940.55
+09/27/2017, 944.49, 2237538, 927.74, 949.9, 927.74
+09/26/2017, 924.86, 1666749, 923.72, 930.82, 921.14
+09/25/2017, 920.97, 1855742, 925.45, 926.4, 909.7
+09/22/2017, 928.53, 1052170, 927.75, 934.73, 926.48
+09/21/2017, 932.45, 1227059, 933, 936.53, 923.83
+09/20/2017, 931.58, 1535626, 922.98, 933.88, 922
+09/19/2017, 921.81, 912967, 917.42, 922.4199, 912.55
+09/18/2017, 915, 1300759, 920.01, 922.08, 910.6
+09/15/2017, 920.29, 2499466, 924.66, 926.49, 916.36
+09/14/2017, 925.11, 1395497, 931.25, 932.77, 924
+09/13/2017, 935.09, 1101145, 930.66, 937.25, 929.86
+09/12/2017, 932.07, 1133638, 932.59, 933.48, 923.861
+09/11/2017, 929.08, 1266020, 934.25, 938.38, 926.92
+09/08/2017, 926.5, 997699, 936.49, 936.99, 924.88
+09/07/2017, 935.95, 1211472, 931.73, 936.41, 923.62
+09/06/2017, 927.81, 1526209, 930.15, 930.915, 919.27
+09/05/2017, 928.45, 1346791, 933.08, 937, 921.96
+09/01/2017, 937.34, 943657, 941.13, 942.48, 935.15
+08/31/2017, 939.33, 1566888, 931.76, 941.98, 931.76
+08/30/2017, 929.57, 1300616, 920.05, 930.819, 919.65
+08/29/2017, 921.29, 1181391, 905.1, 923.33, 905
+08/28/2017, 913.81, 1085014, 916, 919.245, 911.87
+08/25/2017, 915.89, 1052764, 923.49, 925.555, 915.5
+08/24/2017, 921.28, 1266191, 928.66, 930.84, 915.5
+08/23/2017, 927, 1088575, 921.93, 929.93, 919.36
+08/22/2017, 924.69, 1166320, 912.72, 925.86, 911.4751
+08/21/2017, 906.66, 942328, 910, 913, 903.4
+08/18/2017, 910.67, 1341990, 910.31, 915.275, 907.1543
+08/17/2017, 910.98, 1241782, 925.78, 926.86, 910.98
+08/16/2017, 926.96, 1005261, 925.29, 932.7, 923.445
+08/15/2017, 922.22, 882479, 924.23, 926.5499, 919.82
+08/14/2017, 922.67, 1063404, 922.53, 924.668, 918.19
+08/11/2017, 914.39, 1205652, 907.97, 917.78, 905.58
+08/10/2017, 907.24, 1755521, 917.55, 919.26, 906.13
+08/09/2017, 922.9, 1191332, 920.61, 925.98, 917.2501
+08/08/2017, 926.79, 1057351, 927.09, 935.814, 925.6095
+08/07/2017, 929.36, 1031710, 929.06, 931.7, 926.5
+08/04/2017, 927.96, 1081814, 926.75, 930.3068, 923.03
+08/03/2017, 923.65, 1201519, 930.34, 932.24, 922.24
+08/02/2017, 930.39, 1822272, 928.61, 932.6, 916.68
+08/01/2017, 930.83, 1234612, 932.38, 937.447, 929.26
+07/31/2017, 930.5, 1964748, 941.89, 943.59, 926.04
+07/28/2017, 941.53, 1802343, 929.4, 943.83, 927.5
+07/27/2017, 934.09, 3128819, 951.78, 951.78, 920
+07/26/2017, 947.8, 2069349, 954.68, 955, 942.2788
+07/25/2017, 950.7, 4656609, 953.81, 959.7, 945.4
+07/24/2017, 980.34, 3205374, 972.22, 986.2, 970.77
+07/21/2017, 972.92, 1697190, 962.25, 973.23, 960.15
+07/20/2017, 968.15, 1620636, 975, 975.9, 961.51
+07/19/2017, 970.89, 1221155, 967.84, 973.04, 964.03
+07/18/2017, 965.4, 1152741, 953, 968.04, 950.6
+07/17/2017, 953.42, 1164141, 957, 960.74, 949.2407
+07/14/2017, 955.99, 1052855, 952, 956.91, 948.005
+07/13/2017, 947.16, 1294674, 946.29, 954.45, 943.01
+07/12/2017, 943.83, 1517168, 938.68, 946.3, 934.47
+07/11/2017, 930.09, 1112417, 929.54, 931.43, 922
+07/10/2017, 928.8, 1190237, 921.77, 930.38, 919.59
+07/07/2017, 918.59, 1590456, 908.85, 921.54, 908.85
+07/06/2017, 906.69, 1424290, 904.12, 914.9444, 899.7
+07/05/2017, 911.71, 1813309, 901.76, 914.51, 898.5
+07/03/2017, 898.7, 1710373, 912.18, 913.94, 894.79
+06/30/2017, 908.73, 2086340, 926.05, 926.05, 908.31
+06/29/2017, 917.79, 3287991, 929.92, 931.26, 910.62
+06/28/2017, 940.49, 2719213, 929, 942.75, 916
+06/27/2017, 927.33, 2566047, 942.46, 948.29, 926.85
+06/26/2017, 952.27, 1596664, 969.9, 973.31, 950.79
+06/23/2017, 965.59, 1527513, 956.83, 966, 954.2
+06/22/2017, 957.09, 941639, 958.7, 960.72, 954.55
+06/21/2017, 959.45, 1201971, 953.64, 960.1, 950.76
+06/20/2017, 950.63, 1125520, 957.52, 961.62, 950.01
+06/19/2017, 957.37, 1520715, 949.96, 959.99, 949.05
+06/16/2017, 939.78, 3061794, 940, 942.04, 931.595
+06/15/2017, 942.31, 2065271, 933.97, 943.339, 924.44
+06/14/2017, 950.76, 1487378, 959.92, 961.15, 942.25
+06/13/2017, 953.4, 2012980, 951.91, 959.98, 944.09
+06/12/2017, 942.9, 3762434, 939.56, 949.355, 915.2328
+06/09/2017, 949.83, 3305545, 984.5, 984.5, 935.63
+06/08/2017, 983.41, 1477151, 982.35, 984.57, 977.2
+06/07/2017, 981.08, 1447172, 979.65, 984.15, 975.77
+06/06/2017, 976.57, 1814323, 983.16, 988.25, 975.14
+06/05/2017, 983.68, 1251903, 976.55, 986.91, 975.1
+06/02/2017, 975.6, 1750723, 969.46, 975.88, 966
+06/01/2017, 966.95, 1408958, 968.95, 971.5, 960.01
+05/31/2017, 964.86, 2447176, 975.02, 979.27, 960.18
+05/30/2017, 975.88, 1466288, 970.31, 976.2, 969.49
+05/26/2017, 971.47, 1251425, 969.7, 974.98, 965.03
+05/25/2017, 969.54, 1659422, 957.33, 972.629, 955.47
+05/24/2017, 954.96, 1031408, 952.98, 955.09, 949.5
+05/23/2017, 948.82, 1269438, 947.92, 951.4666, 942.575
+05/22/2017, 941.86, 1118456, 935, 941.8828, 935
+05/19/2017, 934.01, 1389848, 931.47, 937.755, 931
+05/18/2017, 930.24, 1596058, 921, 933.17, 918.75
+05/17/2017, 919.62, 2357922, 935.67, 939.3325, 918.14
+05/16/2017, 943, 968288, 940, 943.11, 937.58
+05/15/2017, 937.08, 1104595, 932.95, 938.25, 929.34
+05/12/2017, 932.22, 1050377, 931.53, 933.44, 927.85
+05/11/2017, 930.6, 834997, 925.32, 932.53, 923.0301
+05/10/2017, 928.78, 1173887, 931.98, 932, 925.16
+05/09/2017, 932.17, 1581236, 936.95, 937.5, 929.53
+05/08/2017, 934.3, 1328885, 926.12, 936.925, 925.26
+05/05/2017, 927.13, 1910317, 933.54, 934.9, 925.2
+05/04/2017, 931.66, 1421938, 926.07, 935.93, 924.59
+05/03/2017, 927.04, 1497565, 914.86, 928.1, 912.5426
+05/02/2017, 916.44, 1543696, 909.62, 920.77, 909.4526
+05/01/2017, 912.57, 2114629, 901.94, 915.68, 901.45
+04/28/2017, 905.96, 3223850, 910.66, 916.85, 905.77
+04/27/2017, 874.25, 2009509, 873.6, 875.4, 870.38
+04/26/2017, 871.73, 1233724, 874.23, 876.05, 867.7481
+04/25/2017, 872.3, 1670095, 865, 875, 862.81
+04/24/2017, 862.76, 1371722, 851.2, 863.45, 849.86
+04/21/2017, 843.19, 1323364, 842.88, 843.88, 840.6
+04/20/2017, 841.65, 957994, 841.44, 845.2, 839.32
+04/19/2017, 838.21, 954324, 839.79, 842.22, 836.29
+04/18/2017, 836.82, 835433, 834.22, 838.93, 832.71
+04/17/2017, 837.17, 894540, 825.01, 837.75, 824.47
+04/13/2017, 823.56, 1118221, 822.14, 826.38, 821.44
+04/12/2017, 824.32, 900059, 821.93, 826.66, 821.02
+04/11/2017, 823.35, 1078951, 824.71, 827.4267, 817.0201
+04/10/2017, 824.73, 978825, 825.39, 829.35, 823.77
+04/07/2017, 824.67, 1056692, 827.96, 828.485, 820.5127
+04/06/2017, 827.88, 1254235, 832.4, 836.39, 826.46
+04/05/2017, 831.41, 1553163, 835.51, 842.45, 830.72
+04/04/2017, 834.57, 1044455, 831.36, 835.18, 829.0363
+04/03/2017, 838.55, 1670349, 829.22, 840.85, 829.22
+03/31/2017, 829.56, 1401756, 828.97, 831.64, 827.39
+03/30/2017, 831.5, 1055263, 833.5, 833.68, 829
+03/29/2017, 831.41, 1785006, 825, 832.765, 822.3801
+03/28/2017, 820.92, 1620532, 820.41, 825.99, 814.027
+03/27/2017, 819.51, 1894735, 806.95, 821.63, 803.37
+03/24/2017, 814.43, 1980415, 820.08, 821.93, 808.89
+03/23/2017, 817.58, 3485390, 821, 822.57, 812.257
+03/22/2017, 829.59, 1399409, 831.91, 835.55, 827.1801
+03/21/2017, 830.46, 2461375, 851.4, 853.5, 829.02
+03/20/2017, 848.4, 1217560, 850.01, 850.22, 845.15
+03/17/2017, 852.12, 1712397, 851.61, 853.4, 847.11
+03/16/2017, 848.78, 977384, 849.03, 850.85, 846.13
+03/15/2017, 847.2, 1381328, 847.59, 848.63, 840.77
+03/14/2017, 845.62, 779920, 843.64, 847.24, 840.8
+03/13/2017, 845.54, 1149928, 844, 848.685, 843.25
+03/10/2017, 843.25, 1702731, 843.28, 844.91, 839.5
+03/09/2017, 838.68, 1261393, 836, 842, 834.21
+03/08/2017, 835.37, 988900, 833.51, 838.15, 831.79
+03/07/2017, 831.91, 1037573, 827.4, 833.41, 826.52
+03/06/2017, 827.78, 1108799, 826.95, 828.88, 822.4
+03/03/2017, 829.08, 890640, 830.56, 831.36, 825.751
+03/02/2017, 830.63, 937824, 833.85, 834.51, 829.64
+03/01/2017, 835.24, 1495934, 828.85, 836.255, 827.26
+02/28/2017, 823.21, 2258695, 825.61, 828.54, 820.2
+02/27/2017, 829.28, 1101120, 824.55, 830.5, 824
+02/24/2017, 828.64, 1392039, 827.73, 829, 824.2
+02/23/2017, 831.33, 1471342, 830.12, 832.46, 822.88
+02/22/2017, 830.76, 983058, 828.66, 833.25, 828.64
+02/21/2017, 831.66, 1259841, 828.66, 833.45, 828.35
+02/17/2017, 828.07, 1602549, 823.02, 828.07, 821.655
+02/16/2017, 824.16, 1285919, 819.93, 824.4, 818.98
+02/15/2017, 818.98, 1311316, 819.36, 823, 818.47
+02/14/2017, 820.45, 1054472, 819, 823, 816
+02/13/2017, 819.24, 1205835, 816, 820.959, 815.49
+02/10/2017, 813.67, 1134701, 811.7, 815.25, 809.78
+02/09/2017, 809.56, 990260, 809.51, 810.66, 804.54
+02/08/2017, 808.38, 1155892, 807, 811.84, 803.1903
+02/07/2017, 806.97, 1240257, 803.99, 810.5, 801.78
+02/06/2017, 801.34, 1182882, 799.7, 801.67, 795.2501
+02/03/2017, 801.49, 1461217, 802.99, 806, 800.37
+02/02/2017, 798.53, 1530827, 793.8, 802.7, 792
+02/01/2017, 795.695, 2027708, 799.68, 801.19, 791.19
+01/31/2017, 796.79, 2153957, 796.86, 801.25, 790.52
+01/30/2017, 802.32, 3243568, 814.66, 815.84, 799.8
+01/27/2017, 823.31, 2964989, 834.71, 841.95, 820.44
+01/26/2017, 832.15, 2944642, 837.81, 838, 827.01
+01/25/2017, 835.67, 1612854, 829.62, 835.77, 825.06
+01/24/2017, 823.87, 1472228, 822.3, 825.9, 817.821
+01/23/2017, 819.31, 1962506, 807.25, 820.87, 803.74
+01/20/2017, 805.02, 1668638, 806.91, 806.91, 801.69
+01/19/2017, 802.175, 917085, 805.12, 809.48, 801.8
+01/18/2017, 806.07, 1293893, 805.81, 806.205, 800.99
+01/17/2017, 804.61, 1361935, 807.08, 807.14, 800.37
+01/13/2017, 807.88, 1098154, 807.48, 811.2244, 806.69
+01/12/2017, 806.36, 1352872, 807.14, 807.39, 799.17
+01/11/2017, 807.91, 1065360, 805, 808.15, 801.37
+01/10/2017, 804.79, 1176637, 807.86, 809.1299, 803.51
+01/09/2017, 806.65, 1274318, 806.4, 809.9664, 802.83
+01/06/2017, 806.15, 1639246, 795.26, 807.9, 792.2041
+01/05/2017, 794.02, 1334028, 786.08, 794.48, 785.02
+01/04/2017, 786.9, 1071198, 788.36, 791.34, 783.16
+01/03/2017, 786.14, 1657291, 778.81, 789.63, 775.8
+12/30/2016, 771.82, 1769809, 782.75, 782.78, 770.41
+12/29/2016, 782.79, 743808, 783.33, 785.93, 778.92
+12/28/2016, 785.05, 1142148, 793.7, 794.23, 783.2
+12/27/2016, 791.55, 789151, 790.68, 797.86, 787.657
+12/23/2016, 789.91, 623682, 790.9, 792.74, 787.28
+12/22/2016, 791.26, 972147, 792.36, 793.32, 788.58
+12/21/2016, 794.56, 1208770, 795.84, 796.6757, 787.1
+12/20/2016, 796.42, 950345, 796.76, 798.65, 793.27
+12/19/2016, 794.2, 1231966, 790.22, 797.66, 786.27
+12/16/2016, 790.8, 2435100, 800.4, 800.8558, 790.29
+12/15/2016, 797.85, 1623709, 797.34, 803, 792.92
+12/14/2016, 797.07, 1700875, 797.4, 804, 794.01
+12/13/2016, 796.1, 2122735, 793.9, 804.3799, 793.34
+12/12/2016, 789.27, 2102288, 785.04, 791.25, 784.3554
+12/09/2016, 789.29, 1821146, 780, 789.43, 779.021
+12/08/2016, 776.42, 1487517, 772.48, 778.18, 767.23
+12/07/2016, 771.19, 1757710, 761, 771.36, 755.8
+12/06/2016, 759.11, 1690365, 764.73, 768.83, 757.34
+12/05/2016, 762.52, 1393566, 757.71, 763.9, 752.9
+12/02/2016, 750.5, 1452181, 744.59, 754, 743.1
+12/01/2016, 747.92, 3017001, 757.44, 759.85, 737.0245
+11/30/2016, 758.04, 2386628, 770.07, 772.99, 754.83
+11/29/2016, 770.84, 1616427, 771.53, 778.5, 768.24
+11/28/2016, 768.24, 2177039, 760, 779.53, 759.8
+11/25/2016, 761.68, 587421, 764.26, 765, 760.52
+11/23/2016, 760.99, 1477501, 767.73, 768.2825, 755.25
+11/22/2016, 768.27, 1592372, 772.63, 776.96, 767
+11/21/2016, 769.2, 1324431, 762.61, 769.7, 760.6
+11/18/2016, 760.54, 1528555, 771.37, 775, 760
+11/17/2016, 771.23, 1298484, 766.92, 772.7, 764.23
+11/16/2016, 764.48, 1468196, 755.2, 766.36, 750.51
+11/15/2016, 758.49, 2375056, 746.97, 764.4162, 746.97
+11/14/2016, 736.08, 3644965, 755.6, 757.85, 727.54
+11/11/2016, 754.02, 2421889, 756.54, 760.78, 750.38
+11/10/2016, 762.56, 4733916, 791.17, 791.17, 752.18
+11/09/2016, 785.31, 2603860, 779.94, 791.2265, 771.67
+11/08/2016, 790.51, 1361472, 783.4, 795.633, 780.19
+11/07/2016, 782.52, 1574426, 774.5, 785.19, 772.55
+11/04/2016, 762.02, 2131948, 750.66, 770.36, 750.5611
+11/03/2016, 762.13, 1933937, 767.25, 769.95, 759.03
+11/02/2016, 768.7, 1905814, 778.2, 781.65, 763.4496
+11/01/2016, 783.61, 2404898, 782.89, 789.49, 775.54
+10/31/2016, 784.54, 2420892, 795.47, 796.86, 784
+10/28/2016, 795.37, 4261912, 808.35, 815.49, 793.59
+10/27/2016, 795.35, 2723097, 801, 803.49, 791.5
+10/26/2016, 799.07, 1645403, 806.34, 806.98, 796.32
+10/25/2016, 807.67, 1575020, 816.68, 816.68, 805.14
+10/24/2016, 813.11, 1693162, 804.9, 815.18, 804.82
+10/21/2016, 799.37, 1262042, 795, 799.5, 794
+10/20/2016, 796.97, 1755546, 803.3, 803.97, 796.03
+10/19/2016, 801.56, 1762990, 798.86, 804.63, 797.635
+10/18/2016, 795.26, 2046338, 787.85, 801.61, 785.565
+10/17/2016, 779.96, 1091524, 779.8, 785.85, 777.5
+10/14/2016, 778.53, 851512, 781.65, 783.95, 776
+10/13/2016, 778.19, 1360619, 781.22, 781.22, 773
+10/12/2016, 786.14, 935138, 783.76, 788.13, 782.06
+10/11/2016, 783.07, 1371461, 786.66, 792.28, 780.58
+10/10/2016, 785.94, 1161410, 777.71, 789.38, 775.87
+10/07/2016, 775.08, 932444, 779.66, 779.66, 770.75
+10/06/2016, 776.86, 1066910, 779, 780.48, 775.54
+10/05/2016, 776.47, 1457661, 779.31, 782.07, 775.65
+10/04/2016, 776.43, 1198361, 776.03, 778.71, 772.89
+10/03/2016, 772.56, 1276614, 774.25, 776.065, 769.5
+09/30/2016, 777.29, 1583293, 776.33, 780.94, 774.09
+09/29/2016, 775.01, 1310252, 781.44, 785.8, 774.232
+09/28/2016, 781.56, 1108249, 777.85, 781.81, 774.97
+09/27/2016, 783.01, 1152760, 775.5, 785.9899, 774.308
+09/26/2016, 774.21, 1531788, 782.74, 782.74, 773.07
+09/23/2016, 786.9, 1411439, 786.59, 788.93, 784.15
+09/22/2016, 787.21, 1483899, 780, 789.85, 778.44
+09/21/2016, 776.22, 1166290, 772.66, 777.16, 768.301
+09/20/2016, 771.41, 975434, 769, 773.33, 768.53
+09/19/2016, 765.7, 1171969, 772.42, 774, 764.4406
+09/16/2016, 768.88, 2047036, 769.75, 769.75, 764.66
+09/15/2016, 771.76, 1344945, 762.89, 773.8, 759.96
+09/14/2016, 762.49, 1093723, 759.61, 767.68, 759.11
+09/13/2016, 759.69, 1394158, 764.48, 766.2195, 755.8
+09/12/2016, 769.02, 1310493, 755.13, 770.29, 754.0001
+09/09/2016, 759.66, 1879903, 770.1, 773.245, 759.66
+09/08/2016, 775.32, 1268663, 778.59, 780.35, 773.58
+09/07/2016, 780.35, 893874, 780, 782.73, 776.2
+09/06/2016, 780.08, 1441864, 773.45, 782, 771
+09/02/2016, 771.46, 1070725, 773.01, 773.9199, 768.41
+09/01/2016, 768.78, 925019, 769.25, 771.02, 764.3
+08/31/2016, 767.05, 1247937, 767.01, 769.09, 765.38
+08/30/2016, 769.09, 1129932, 769.33, 774.466, 766.84
+08/29/2016, 772.15, 847537, 768.74, 774.99, 766.615
+08/26/2016, 769.54, 1164713, 769, 776.0799, 765.85
+08/25/2016, 769.41, 926856, 767, 771.89, 763.1846
+08/24/2016, 769.64, 1071569, 770.58, 774.5, 767.07
+08/23/2016, 772.08, 925356, 775.48, 776.44, 771.785
+08/22/2016, 772.15, 950417, 773.27, 774.54, 770.0502
+08/19/2016, 775.42, 860899, 775, 777.1, 773.13
+08/18/2016, 777.5, 718882, 780.01, 782.86, 777
+08/17/2016, 779.91, 921666, 777.32, 780.81, 773.53
+08/16/2016, 777.14, 1027836, 780.3, 780.98, 773.444
+08/15/2016, 782.44, 938183, 783.75, 787.49, 780.11
+08/12/2016, 783.22, 739761, 781.5, 783.395, 780.4
+08/11/2016, 784.85, 971742, 785, 789.75, 782.97
+08/10/2016, 784.68, 784559, 783.75, 786.8123, 782.778
+08/09/2016, 784.26, 1318457, 781.1, 788.94, 780.57
+08/08/2016, 781.76, 1106693, 782, 782.63, 778.091
+08/05/2016, 782.22, 1799478, 773.78, 783.04, 772.34
+08/04/2016, 771.61, 1139972, 772.22, 774.07, 768.795
+08/03/2016, 773.18, 1283186, 767.18, 773.21, 766.82
+08/02/2016, 771.07, 1782822, 768.69, 775.84, 767.85
+08/01/2016, 772.88, 2697699, 761.09, 780.43, 761.09
+07/29/2016, 768.79, 3830103, 772.71, 778.55, 766.77
+07/28/2016, 745.91, 3473040, 747.04, 748.65, 739.3
+07/27/2016, 741.77, 1509133, 738.28, 744.46, 737
+07/26/2016, 738.42, 1182993, 739.04, 741.69, 734.27
+07/25/2016, 739.77, 1031643, 740.67, 742.61, 737.5
+07/22/2016, 742.74, 1256741, 741.86, 743.24, 736.56
+07/21/2016, 738.63, 1022229, 740.36, 741.69, 735.831
+07/20/2016, 741.19, 1283931, 737.33, 742.13, 737.1
+07/19/2016, 736.96, 1225467, 729.89, 736.99, 729
+07/18/2016, 733.78, 1284740, 722.71, 736.13, 721.19
+07/15/2016, 719.85, 1277514, 725.73, 725.74, 719.055
+07/14/2016, 720.95, 949456, 721.58, 722.21, 718.03
+07/13/2016, 716.98, 933352, 723.62, 724, 716.85
+07/12/2016, 720.64, 1336112, 719.12, 722.94, 715.91
+07/11/2016, 715.09, 1107039, 708.05, 716.51, 707.24
+07/08/2016, 705.63, 1573909, 699.5, 705.71, 696.435
+07/07/2016, 695.36, 1303661, 698.08, 698.2, 688.215
+07/06/2016, 697.77, 1411080, 689.98, 701.68, 689.09
+07/05/2016, 694.49, 1462879, 696.06, 696.94, 688.88
+07/01/2016, 699.21, 1344387, 692.2, 700.65, 692.1301
+06/30/2016, 692.1, 1597298, 685.47, 692.32, 683.65
+06/29/2016, 684.11, 1931436, 683, 687.4292, 681.41
+06/28/2016, 680.04, 2169704, 678.97, 680.33, 673
+06/27/2016, 668.26, 2632011, 671, 672.3, 663.284
+06/24/2016, 675.22, 4442943, 675.17, 689.4, 673.45
+06/23/2016, 701.87, 2166183, 697.45, 701.95, 687
+06/22/2016, 697.46, 1182161, 699.06, 700.86, 693.0819
+06/21/2016, 695.94, 1464836, 698.4, 702.77, 692.01
+06/20/2016, 693.71, 2080645, 698.77, 702.48, 693.41
+06/17/2016, 691.72, 3397720, 708.65, 708.82, 688.4515
+06/16/2016, 710.36, 1981657, 714.91, 716.65, 703.26
+06/15/2016, 718.92, 1213386, 719, 722.98, 717.31
+06/14/2016, 718.27, 1303808, 716.48, 722.47, 713.12
+06/13/2016, 718.36, 1255199, 716.51, 725.44, 716.51
+06/10/2016, 719.41, 1213989, 719.47, 725.89, 716.43
+06/09/2016, 728.58, 987635, 722.87, 729.54, 722.3361
+06/08/2016, 728.28, 1583325, 723.96, 728.57, 720.58
+06/07/2016, 716.65, 1336348, 719.84, 721.98, 716.55
+06/06/2016, 716.55, 1565955, 724.91, 724.91, 714.61
+06/03/2016, 722.34, 1225924, 729.27, 729.49, 720.56
+06/02/2016, 730.4, 1340664, 732.5, 733.02, 724.17
+06/01/2016, 734.15, 1251468, 734.53, 737.21, 730.66
+05/31/2016, 735.72, 2128358, 731.74, 739.73, 731.26
+05/27/2016, 732.66, 1974425, 724.01, 733.936, 724
+05/26/2016, 724.12, 1573635, 722.87, 728.33, 720.28
+05/25/2016, 725.27, 1629790, 720.76, 727.51, 719.7047
+05/24/2016, 720.09, 1926828, 706.86, 720.97, 706.86
+05/23/2016, 704.24, 1326386, 706.53, 711.4781, 704.18
+05/20/2016, 709.74, 1825830, 701.62, 714.58, 700.52
+05/19/2016, 700.32, 1668887, 702.36, 706, 696.8
+05/18/2016, 706.63, 1765632, 703.67, 711.6, 700.63
+05/17/2016, 706.23, 1999883, 715.99, 721.52, 704.11
+05/16/2016, 716.49, 1316719, 709.13, 718.48, 705.65
+05/13/2016, 710.83, 1307559, 711.93, 716.6619, 709.26
+05/12/2016, 713.31, 1361170, 717.06, 719.25, 709
+05/11/2016, 715.29, 1690862, 723.41, 724.48, 712.8
+05/10/2016, 723.18, 1568621, 716.75, 723.5, 715.72
+05/09/2016, 712.9, 1509892, 712, 718.71, 710
+05/06/2016, 711.12, 1828508, 698.38, 711.86, 698.1067
+05/05/2016, 701.43, 1680220, 697.7, 702.3199, 695.72
+05/04/2016, 695.7, 1692757, 690.49, 699.75, 689.01
+05/03/2016, 692.36, 1541297, 696.87, 697.84, 692
+05/02/2016, 698.21, 1645013, 697.63, 700.64, 691
+04/29/2016, 693.01, 2486584, 690.7, 697.62, 689
+04/28/2016, 691.02, 2859790, 708.26, 714.17, 689.55
+04/27/2016, 705.84, 3094905, 707.29, 708.98, 692.3651
+04/26/2016, 708.14, 2739133, 725.42, 725.766, 703.0264
+04/25/2016, 723.15, 1956956, 716.1, 723.93, 715.59
+04/22/2016, 718.77, 5949699, 726.3, 736.12, 713.61
+04/21/2016, 759.14, 2995094, 755.38, 760.45, 749.55
+04/20/2016, 752.67, 1526776, 758, 758.1315, 750.01
+04/19/2016, 753.93, 2027962, 769.51, 769.9, 749.33
+04/18/2016, 766.61, 1557199, 760.46, 768.05, 757.3
+04/15/2016, 759, 1807062, 753.98, 761, 752.6938
+04/14/2016, 753.2, 1134056, 754.01, 757.31, 752.705
+04/13/2016, 751.72, 1707397, 749.16, 754.38, 744.261
+04/12/2016, 743.09, 1349780, 738, 743.83, 731.01
+04/11/2016, 736.1, 1218789, 743.02, 745, 736.05
+04/08/2016, 739.15, 1289869, 743.97, 745.45, 735.55
+04/07/2016, 740.28, 1452369, 745.37, 746.9999, 736.28
+04/06/2016, 745.69, 1052171, 735.77, 746.24, 735.56
+04/05/2016, 737.8, 1130817, 738, 742.8, 735.37
+04/04/2016, 745.29, 1134214, 750.06, 752.8, 742.43
+04/01/2016, 749.91, 1576240, 738.6, 750.34, 737
+03/31/2016, 744.95, 1718638, 749.25, 750.85, 740.94
+03/30/2016, 750.53, 1782278, 750.1, 757.88, 748.74
+03/29/2016, 744.77, 1902254, 734.59, 747.25, 728.76
+03/28/2016, 733.53, 1300817, 736.79, 738.99, 732.5
+03/24/2016, 735.3, 1570474, 732.01, 737.747, 731
+03/23/2016, 738.06, 1431130, 742.36, 745.7199, 736.15
+03/22/2016, 740.75, 1269263, 737.46, 745, 737.46
+03/21/2016, 742.09, 1835963, 736.5, 742.5, 733.5157
+03/18/2016, 737.6, 2982194, 741.86, 742, 731.83
+03/17/2016, 737.78, 1859562, 736.45, 743.07, 736
+03/16/2016, 736.09, 1621412, 726.37, 737.47, 724.51
+03/15/2016, 728.33, 1720790, 726.92, 732.29, 724.77
+03/14/2016, 730.49, 1717002, 726.81, 735.5, 725.15
+03/11/2016, 726.82, 1968164, 720, 726.92, 717.125
+03/10/2016, 712.82, 2830630, 708.12, 716.44, 703.36
+03/09/2016, 705.24, 1419661, 698.47, 705.68, 694
+03/08/2016, 693.97, 2075305, 688.59, 703.79, 685.34
+03/07/2016, 695.16, 2986064, 706.9, 708.0912, 686.9
+03/04/2016, 710.89, 1971379, 714.99, 716.49, 706.02
+03/03/2016, 712.42, 1956958, 718.68, 719.45, 706.02
+03/02/2016, 718.85, 1629501, 719, 720, 712
+03/01/2016, 718.81, 2148608, 703.62, 718.81, 699.77
+02/29/2016, 697.77, 2478214, 700.32, 710.89, 697.68
+02/26/2016, 705.07, 2241785, 708.58, 713.43, 700.86
+02/25/2016, 705.75, 1640430, 700.01, 705.98, 690.585
+02/24/2016, 699.56, 1961258, 688.92, 700, 680.78
+02/23/2016, 695.85, 2006572, 701.45, 708.4, 693.58
+02/22/2016, 706.46, 1949046, 707.45, 713.24, 702.51
+02/19/2016, 700.91, 1585152, 695.03, 703.0805, 694.05
+02/18/2016, 697.35, 1880306, 710, 712.35, 696.03
+02/17/2016, 708.4, 2490021, 699, 709.75, 691.38
+02/16/2016, 691, 2517324, 692.98, 698, 685.05
+02/12/2016, 682.4, 2138937, 690.26, 693.75, 678.6
+02/11/2016, 683.11, 3021587, 675, 689.35, 668.8675
+02/10/2016, 684.12, 2629130, 686.86, 701.31, 682.13
+02/09/2016, 678.11, 3605792, 672.32, 699.9, 668.77
+02/08/2016, 682.74, 4241416, 667.85, 684.03, 663.06
+02/05/2016, 683.57, 5098357, 703.87, 703.99, 680.15
+02/04/2016, 708.01, 5157988, 722.81, 727, 701.86
+02/03/2016, 726.95, 6166731, 770.22, 774.5, 720.5
+02/02/2016, 764.65, 6340548, 784.5, 789.8699, 764.65
+02/01/2016, 752, 5065235, 750.46, 757.86, 743.27
+01/29/2016, 742.95, 3464432, 731.53, 744.9899, 726.8
+01/28/2016, 730.96, 2664956, 722.22, 733.69, 712.35
+01/27/2016, 699.99, 2175913, 713.67, 718.235, 694.39
+01/26/2016, 713.04, 1329141, 713.85, 718.28, 706.48
+01/25/2016, 711.67, 1709777, 723.58, 729.68, 710.01
+01/22/2016, 725.25, 2009951, 723.6, 728.13, 720.121
+01/21/2016, 706.59, 2411079, 702.18, 719.19, 694.46
+01/20/2016, 698.45, 3441642, 688.61, 706.85, 673.26
+01/19/2016, 701.79, 2264747, 703.3, 709.98, 693.4101
+01/15/2016, 694.45, 3604137, 692.29, 706.74, 685.37
+01/14/2016, 714.72, 2225495, 705.38, 721.925, 689.1
+01/13/2016, 700.56, 2497086, 730.85, 734.74, 698.61
+01/12/2016, 726.07, 2010026, 721.68, 728.75, 717.3165
+01/11/2016, 716.03, 2089495, 716.61, 718.855, 703.54
+01/08/2016, 714.47, 2449420, 731.45, 733.23, 713
+01/07/2016, 726.39, 2960578, 730.31, 738.5, 719.06
+01/06/2016, 743.62, 1943685, 730, 747.18, 728.92
+01/05/2016, 742.58, 1949386, 746.45, 752, 738.64
+01/04/2016, 741.84, 3271348, 743, 744.06, 731.2577
+12/31/2015, 758.88, 1500129, 769.5, 769.5, 758.34
+12/30/2015, 771, 1293514, 776.6, 777.6, 766.9
+12/29/2015, 776.6, 1764044, 766.69, 779.98, 766.43
+12/28/2015, 762.51, 1515574, 752.92, 762.99, 749.52
+12/24/2015, 748.4, 527223, 749.55, 751.35, 746.62
+12/23/2015, 750.31, 1566723, 753.47, 754.21, 744
+12/22/2015, 750, 1365420, 751.65, 754.85, 745.53
+12/21/2015, 747.77, 1524535, 746.13, 750, 740
+12/18/2015, 739.31, 3140906, 746.51, 754.13, 738.15
+12/17/2015, 749.43, 1551087, 762.42, 762.68, 749
+12/16/2015, 758.09, 1986319, 750, 760.59, 739.435
+12/15/2015, 743.4, 2661199, 753, 758.08, 743.01
+12/14/2015, 747.77, 2417778, 741.79, 748.73, 724.17
+12/11/2015, 738.87, 2223284, 741.16, 745.71, 736.75
+12/10/2015, 749.46, 1988035, 752.85, 755.85, 743.83
+12/09/2015, 751.61, 2697978, 759.17, 764.23, 737.001
+12/08/2015, 762.37, 1829004, 757.89, 764.8, 754.2
+12/07/2015, 763.25, 1811336, 767.77, 768.73, 755.09
+12/04/2015, 766.81, 2756194, 753.1, 768.49, 750
+12/03/2015, 752.54, 2589641, 766.01, 768.995, 745.63
+12/02/2015, 762.38, 2196721, 768.9, 775.955, 758.96
+12/01/2015, 767.04, 2131827, 747.11, 768.95, 746.7
+11/30/2015, 742.6, 2045584, 748.81, 754.93, 741.27
+11/27/2015, 750.26, 838528, 748.46, 753.41, 747.49
+11/25/2015, 748.15, 1122224, 748.14, 752, 746.06
+11/24/2015, 748.28, 2333700, 752, 755.279, 737.63
+11/23/2015, 755.98, 1414640, 757.45, 762.7075, 751.82
+11/20/2015, 756.6, 2212934, 746.53, 757.92, 743
+11/19/2015, 738.41, 1327265, 738.74, 742, 737.43
+11/18/2015, 740, 1683978, 727.58, 741.41, 727
+11/17/2015, 725.3, 1507449, 729.29, 731.845, 723.027
+11/16/2015, 728.96, 1904395, 715.6, 729.49, 711.33
+11/13/2015, 717, 2072392, 729.17, 731.15, 716.73
+11/12/2015, 731.23, 1836567, 731, 737.8, 728.645
+11/11/2015, 735.4, 1366611, 732.46, 741, 730.23
+11/10/2015, 728.32, 1606499, 724.4, 730.59, 718.5001
+11/09/2015, 724.89, 2068920, 730.2, 734.71, 719.43
+11/06/2015, 733.76, 1510586, 731.5, 735.41, 727.01
+11/05/2015, 731.25, 1861100, 729.47, 739.48, 729.47
+11/04/2015, 728.11, 1705745, 722, 733.1, 721.9
+11/03/2015, 722.16, 1565355, 718.86, 724.65, 714.72
+11/02/2015, 721.11, 1885155, 711.06, 721.62, 705.85
+10/30/2015, 710.81, 1907732, 715.73, 718, 710.05
+10/29/2015, 716.92, 1455508, 710.5, 718.26, 710.01
+10/28/2015, 712.95, 2178841, 707.33, 712.98, 703.08
+10/27/2015, 708.49, 2232183, 707.38, 713.62, 704.55
+10/26/2015, 712.78, 2709292, 701.55, 719.15, 701.26
+10/23/2015, 702, 6651909, 727.5, 730, 701.5
+10/22/2015, 651.79, 3994360, 646.7, 657.8, 644.01
+10/21/2015, 642.61, 1792869, 654.15, 655.87, 641.73
+10/20/2015, 650.28, 2498077, 664.04, 664.7197, 644.195
+10/19/2015, 666.1, 1465691, 661.18, 666.82, 659.58
+10/16/2015, 662.2, 1610712, 664.11, 664.97, 657.2
+10/15/2015, 661.74, 1832832, 654.66, 663.13, 654.46
+10/14/2015, 651.16, 1413798, 653.21, 659.39, 648.85
+10/13/2015, 652.3, 1806003, 643.15, 657.8125, 643.15
+10/12/2015, 646.67, 1275565, 642.09, 648.5, 639.01
+10/09/2015, 643.61, 1648656, 640, 645.99, 635.318
+10/08/2015, 639.16, 2181990, 641.36, 644.45, 625.56
+10/07/2015, 642.36, 2092536, 649.24, 650.609, 632.15
+10/06/2015, 645.44, 2235078, 638.84, 649.25, 636.5295
+10/05/2015, 641.47, 1802263, 632, 643.01, 627
+10/02/2015, 626.91, 2681241, 607.2, 627.34, 603.13
+10/01/2015, 611.29, 1866223, 608.37, 612.09, 599.85
+09/30/2015, 608.42, 2412754, 603.28, 608.76, 600.73
+09/29/2015, 594.97, 2310065, 597.28, 605, 590.22
+09/28/2015, 594.89, 3118693, 610.34, 614.605, 589.38
+09/25/2015, 611.97, 2173134, 629.77, 629.77, 611
+09/24/2015, 625.8, 2238097, 616.64, 627.32, 612.4
+09/23/2015, 622.36, 1470633, 622.05, 628.93, 620
+09/22/2015, 622.69, 2561551, 627, 627.55, 615.43
+09/21/2015, 635.44, 1786543, 634.4, 636.49, 625.94
+09/18/2015, 629.25, 5123314, 636.79, 640, 627.02
+09/17/2015, 642.9, 2259404, 637.79, 650.9, 635.02
+09/16/2015, 635.98, 1276250, 635.47, 637.95, 632.32
+09/15/2015, 635.14, 2082426, 626.7, 638.7, 623.78
+09/14/2015, 623.24, 1701618, 625.7, 625.86, 619.43
+09/11/2015, 625.77, 1372803, 619.75, 625.78, 617.42
+09/10/2015, 621.35, 1903334, 613.1, 624.16, 611.43
+09/09/2015, 612.72, 1699686, 621.22, 626.52, 609.6
+09/08/2015, 614.66, 2277487, 612.49, 616.31, 604.12
+09/04/2015, 600.7, 2087028, 600, 603.47, 595.25
+09/03/2015, 606.25, 1757851, 617, 619.71, 602.8213
+09/02/2015, 614.34, 2573982, 605.59, 614.34, 599.71
+09/01/2015, 597.79, 3699844, 602.36, 612.86, 594.1
+08/31/2015, 618.25, 2172168, 627.54, 635.8, 617.68
+08/28/2015, 630.38, 1975818, 632.82, 636.88, 624.56
+08/27/2015, 637.61, 3485906, 639.4, 643.59, 622
+08/26/2015, 628.62, 4187276, 610.35, 631.71, 599.05
+08/25/2015, 582.06, 3521916, 614.91, 617.45, 581.11
+08/24/2015, 589.61, 5727282, 573, 614, 565.05
+08/21/2015, 612.48, 4261666, 639.78, 640.05, 612.33
+08/20/2015, 646.83, 2854028, 655.46, 662.99, 642.9
+08/19/2015, 660.9, 2132265, 656.6, 667, 654.19
+08/18/2015, 656.13, 1455664, 661.9, 664, 653.46
+08/17/2015, 660.87, 1050553, 656.8, 661.38, 651.24
+08/14/2015, 657.12, 1071333, 655.01, 659.855, 652.66
+08/13/2015, 656.45, 1807182, 659.323, 664.5, 651.661
+08/12/2015, 659.56, 2938651, 663.08, 665, 652.29
+08/11/2015, 660.78, 5016425, 669.2, 674.9, 654.27
+08/10/2015, 633.73, 1653836, 639.48, 643.44, 631.249
+08/07/2015, 635.3, 1403441, 640.23, 642.68, 629.71
+08/06/2015, 642.68, 1572150, 645, 645.379, 632.25
+08/05/2015, 643.78, 2331720, 634.33, 647.86, 633.16
+08/04/2015, 629.25, 1486858, 628.42, 634.81, 627.16
+08/03/2015, 631.21, 1301439, 625.34, 633.0556, 625.34
+07/31/2015, 625.61, 1705286, 631.38, 632.91, 625.5
+07/30/2015, 632.59, 1472286, 630, 635.22, 622.05
+07/29/2015, 631.93, 1573146, 628.8, 633.36, 622.65
+07/28/2015, 628, 1713684, 632.83, 632.83, 623.31
+07/27/2015, 627.26, 2673801, 621, 634.3, 620.5
+07/24/2015, 623.56, 3622089, 647, 648.17, 622.52
+07/23/2015, 644.28, 3014035, 661.27, 663.63, 641
+07/22/2015, 662.1, 3707818, 660.89, 678.64, 659
+07/21/2015, 662.3, 3363342, 655.21, 673, 654.3
+07/20/2015, 663.02, 5857092, 659.24, 668.88, 653.01
+07/17/2015, 672.93, 11153500, 649, 674.468, 645
+07/16/2015, 579.85, 4559712, 565.12, 580.68, 565
+07/15/2015, 560.22, 1782264, 560.13, 566.5029, 556.79
+07/14/2015, 561.1, 3231284, 546.76, 565.8487, 546.71
+07/13/2015, 546.55, 2204610, 532.88, 547.11, 532.4001
+07/10/2015, 530.13, 1954951, 526.29, 532.56, 525.55
+07/09/2015, 520.68, 1840155, 523.12, 523.77, 520.35
+07/08/2015, 516.83, 1293372, 521.05, 522.734, 516.11
+07/07/2015, 525.02, 1595672, 523.13, 526.18, 515.18
+07/06/2015, 522.86, 1278587, 519.5, 525.25, 519
+07/02/2015, 523.4, 1235773, 521.08, 524.65, 521.08
+07/01/2015, 521.84, 1961197, 524.73, 525.69, 518.2305
+06/30/2015, 520.51, 2234284, 526.02, 526.25, 520.5
+06/29/2015, 521.52, 1935361, 525.01, 528.61, 520.54
+06/26/2015, 531.69, 2108629, 537.26, 537.76, 531.35
+06/25/2015, 535.23, 1332412, 538.87, 540.9, 535.23
+06/24/2015, 537.84, 1286576, 540, 540, 535.66
+06/23/2015, 540.48, 1196115, 539.64, 541.499, 535.25
+06/22/2015, 538.19, 1243535, 539.59, 543.74, 537.53
+06/19/2015, 536.69, 1890916, 537.21, 538.25, 533.01
+06/18/2015, 536.73, 1832450, 531, 538.15, 530.79
+06/17/2015, 529.26, 1269113, 529.37, 530.98, 525.1
+06/16/2015, 528.15, 1071728, 528.4, 529.6399, 525.56
+06/15/2015, 527.2, 1632675, 528, 528.3, 524
+06/12/2015, 532.33, 955489, 531.6, 533.12, 530.16
+06/11/2015, 534.61, 1208632, 538.425, 538.98, 533.02
+06/10/2015, 536.69, 1813775, 529.36, 538.36, 529.35
+06/09/2015, 526.69, 1454172, 527.56, 529.2, 523.01
+06/08/2015, 526.83, 1523960, 533.31, 534.12, 526.24
+06/05/2015, 533.33, 1375008, 536.35, 537.2, 532.52
+06/04/2015, 536.7, 1346044, 537.76, 540.59, 534.32
+06/03/2015, 540.31, 1716836, 539.91, 543.5, 537.11
+06/02/2015, 539.18, 1936721, 532.93, 543, 531.33
+06/01/2015, 533.99, 1900257, 536.79, 536.79, 529.76
+05/29/2015, 532.11, 2590445, 537.37, 538.63, 531.45
+05/28/2015, 539.78, 1029764, 538.01, 540.61, 536.25
+05/27/2015, 539.79, 1524783, 532.8, 540.55, 531.71
+05/26/2015, 532.32, 2404462, 538.12, 539, 529.88
+05/22/2015, 540.11, 1175065, 540.15, 544.19, 539.51
+05/21/2015, 542.51, 1461431, 537.95, 543.8399, 535.98
+05/20/2015, 539.27, 1430565, 538.49, 542.92, 532.972
+05/19/2015, 537.36, 1964037, 533.98, 540.66, 533.04
+05/18/2015, 532.3, 2001117, 532.01, 534.82, 528.85
+05/15/2015, 533.85, 1965088, 539.18, 539.2743, 530.38
+05/14/2015, 538.4, 1401005, 533.77, 539, 532.41
+05/13/2015, 529.62, 1253005, 530.56, 534.3215, 528.655
+05/12/2015, 529.04, 1633180, 531.6, 533.2089, 525.26
+05/11/2015, 535.7, 904465, 538.37, 541.98, 535.4
+05/08/2015, 538.22, 1527181, 536.65, 541.15, 536
+05/07/2015, 530.7, 1543986, 523.99, 533.46, 521.75
+05/06/2015, 524.22, 1566865, 531.24, 532.38, 521.085
+05/05/2015, 530.8, 1380519, 538.21, 539.74, 530.3906
+05/04/2015, 540.78, 1303830, 538.53, 544.07, 535.06
+05/01/2015, 537.9, 1758085, 538.43, 539.54, 532.1
+04/30/2015, 537.34, 2080834, 547.87, 548.59, 535.05
+04/29/2015, 549.08, 1696886, 550.47, 553.68, 546.905
+04/28/2015, 553.68, 1490735, 554.64, 556.02, 550.366
+04/27/2015, 555.37, 2390696, 563.39, 565.95, 553.2001
diff --git a/machine_learning/multilayer_perceptron_classifier.py b/machine_learning/multilayer_perceptron_classifier.py
new file mode 100644
index 000000000000..604185cef677
--- /dev/null
+++ b/machine_learning/multilayer_perceptron_classifier.py
@@ -0,0 +1,29 @@
+from sklearn.neural_network import MLPClassifier
+
+X = [[0.0, 0.0], [1.0, 1.0], [1.0, 0.0], [0.0, 1.0]]
+y = [0, 1, 0, 0]
+
+
+clf = MLPClassifier(
+ solver="lbfgs", alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1
+)
+
+clf.fit(X, y)
+
+
+test = [[0.0, 0.0], [0.0, 1.0], [1.0, 1.0]]
+Y = clf.predict(test)
+
+
+def wrapper(Y):
+ """
+ >>> wrapper(Y)
+ [0, 0, 1]
+ """
+ return list(Y)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/machine_learning/perceptron.py b/machine_learning/perceptron.py
deleted file mode 100644
index fe1032aff4af..000000000000
--- a/machine_learning/perceptron.py
+++ /dev/null
@@ -1,124 +0,0 @@
-'''
-
- Perceptron
- w = w + N * (d(k) - y) * x(k)
-
- Using perceptron network for oil analysis,
- with Measuring of 3 parameters that represent chemical characteristics we can classify the oil, in p1 or p2
- p1 = -1
- p2 = 1
-
-'''
-from __future__ import print_function
-
-import random
-
-
-class Perceptron:
- def __init__(self, sample, exit, learn_rate=0.01, epoch_number=1000, bias=-1):
- self.sample = sample
- self.exit = exit
- self.learn_rate = learn_rate
- self.epoch_number = epoch_number
- self.bias = bias
- self.number_sample = len(sample)
- self.col_sample = len(sample[0])
- self.weight = []
-
- def trannig(self):
- for sample in self.sample:
- sample.insert(0, self.bias)
-
- for i in range(self.col_sample):
- self.weight.append(random.random())
-
- self.weight.insert(0, self.bias)
-
- epoch_count = 0
-
- while True:
- erro = False
- for i in range(self.number_sample):
- u = 0
- for j in range(self.col_sample + 1):
- u = u + self.weight[j] * self.sample[i][j]
- y = self.sign(u)
- if y != self.exit[i]:
-
- for j in range(self.col_sample + 1):
-
- self.weight[j] = self.weight[j] + self.learn_rate * (self.exit[i] - y) * self.sample[i][j]
- erro = True
- #print('Epoch: \n',epoch_count)
- epoch_count = epoch_count + 1
- # if you want controle the epoch or just by erro
- if erro == False:
- print(('\nEpoch:\n',epoch_count))
- print('------------------------\n')
- #if epoch_count > self.epoch_number or not erro:
- break
-
- def sort(self, sample):
- sample.insert(0, self.bias)
- u = 0
- for i in range(self.col_sample + 1):
- u = u + self.weight[i] * sample[i]
-
- y = self.sign(u)
-
- if y == -1:
- print(('Sample: ', sample))
- print('classification: P1')
- else:
- print(('Sample: ', sample))
- print('classification: P2')
-
- def sign(self, u):
- return 1 if u >= 0 else -1
-
-
-samples = [
- [-0.6508, 0.1097, 4.0009],
- [-1.4492, 0.8896, 4.4005],
- [2.0850, 0.6876, 12.0710],
- [0.2626, 1.1476, 7.7985],
- [0.6418, 1.0234, 7.0427],
- [0.2569, 0.6730, 8.3265],
- [1.1155, 0.6043, 7.4446],
- [0.0914, 0.3399, 7.0677],
- [0.0121, 0.5256, 4.6316],
- [-0.0429, 0.4660, 5.4323],
- [0.4340, 0.6870, 8.2287],
- [0.2735, 1.0287, 7.1934],
- [0.4839, 0.4851, 7.4850],
- [0.4089, -0.1267, 5.5019],
- [1.4391, 0.1614, 8.5843],
- [-0.9115, -0.1973, 2.1962],
- [0.3654, 1.0475, 7.4858],
- [0.2144, 0.7515, 7.1699],
- [0.2013, 1.0014, 6.5489],
- [0.6483, 0.2183, 5.8991],
- [-0.1147, 0.2242, 7.2435],
- [-0.7970, 0.8795, 3.8762],
- [-1.0625, 0.6366, 2.4707],
- [0.5307, 0.1285, 5.6883],
- [-1.2200, 0.7777, 1.7252],
- [0.3957, 0.1076, 5.6623],
- [-0.1013, 0.5989, 7.1812],
- [2.4482, 0.9455, 11.2095],
- [2.0149, 0.6192, 10.9263],
- [0.2012, 0.2611, 5.4631]
-
-]
-
-exit = [-1, -1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1]
-
-network = Perceptron(sample=samples, exit = exit, learn_rate=0.01, epoch_number=1000, bias=-1)
-
-network.trannig()
-
-while True:
- sample = []
- for i in range(3):
- sample.insert(i, float(input('value: ')))
- network.sort(sample)
diff --git a/machine_learning/polymonial_regression.py b/machine_learning/polymonial_regression.py
new file mode 100644
index 000000000000..374c35f7f905
--- /dev/null
+++ b/machine_learning/polymonial_regression.py
@@ -0,0 +1,45 @@
+import pandas as pd
+from matplotlib import pyplot as plt
+from sklearn.linear_model import LinearRegression
+
+# Splitting the dataset into the Training set and Test set
+from sklearn.model_selection import train_test_split
+
+# Fitting Polynomial Regression to the dataset
+from sklearn.preprocessing import PolynomialFeatures
+
+# Importing the dataset
+dataset = pd.read_csv(
+ "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
+ "position_salaries.csv"
+)
+X = dataset.iloc[:, 1:2].values
+y = dataset.iloc[:, 2].values
+
+
+X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
+
+
+poly_reg = PolynomialFeatures(degree=4)
+X_poly = poly_reg.fit_transform(X)
+pol_reg = LinearRegression()
+pol_reg.fit(X_poly, y)
+
+
+# Visualizing the Polymonial Regression results
+def viz_polymonial():
+ plt.scatter(X, y, color="red")
+ plt.plot(X, pol_reg.predict(poly_reg.fit_transform(X)), color="blue")
+ plt.title("Truth or Bluff (Linear Regression)")
+ plt.xlabel("Position level")
+ plt.ylabel("Salary")
+ plt.show()
+ return
+
+
+if __name__ == "__main__":
+ viz_polymonial()
+
+ # Predicting a new result with Polymonial Regression
+ pol_reg.predict(poly_reg.fit_transform([[5.5]]))
+ # output should be 132148.43750003
diff --git a/machine_learning/random_forest_classifier.py b/machine_learning/random_forest_classifier.py
new file mode 100644
index 000000000000..6370254090f7
--- /dev/null
+++ b/machine_learning/random_forest_classifier.py
@@ -0,0 +1,44 @@
+# Random Forest Classifier Example
+from matplotlib import pyplot as plt
+from sklearn.datasets import load_iris
+from sklearn.ensemble import RandomForestClassifier
+from sklearn.metrics import plot_confusion_matrix
+from sklearn.model_selection import train_test_split
+
+
+def main():
+
+ """
+ Random Forest Classifier Example using sklearn function.
+ Iris type dataset is used to demonstrate algorithm.
+ """
+
+ # Load Iris dataset
+ iris = load_iris()
+
+ # Split dataset into train and test data
+ X = iris["data"] # features
+ Y = iris["target"]
+ x_train, x_test, y_train, y_test = train_test_split(
+ X, Y, test_size=0.3, random_state=1
+ )
+
+ # Random Forest Classifier
+ rand_for = RandomForestClassifier(random_state=42, n_estimators=100)
+ rand_for.fit(x_train, y_train)
+
+ # Display Confusion Matrix of Classifier
+ plot_confusion_matrix(
+ rand_for,
+ x_test,
+ y_test,
+ display_labels=iris["target_names"],
+ cmap="Blues",
+ normalize="true",
+ )
+ plt.title("Normalized Confusion Matrix - IRIS Dataset")
+ plt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/machine_learning/random_forest_regressor.py b/machine_learning/random_forest_regressor.py
new file mode 100644
index 000000000000..0aade626b038
--- /dev/null
+++ b/machine_learning/random_forest_regressor.py
@@ -0,0 +1,40 @@
+# Random Forest Regressor Example
+from sklearn.datasets import load_boston
+from sklearn.ensemble import RandomForestRegressor
+from sklearn.metrics import mean_absolute_error, mean_squared_error
+from sklearn.model_selection import train_test_split
+
+
+def main():
+
+ """
+ Random Forest Regressor Example using sklearn function.
+ Boston house price dataset is used to demonstrate the algorithm.
+ """
+
+ # Load Boston house price dataset
+ boston = load_boston()
+ print(boston.keys())
+
+ # Split dataset into train and test data
+ X = boston["data"] # features
+ Y = boston["target"]
+ x_train, x_test, y_train, y_test = train_test_split(
+ X, Y, test_size=0.3, random_state=1
+ )
+
+ # Random Forest Regressor
+ rand_for = RandomForestRegressor(random_state=42, n_estimators=300)
+ rand_for.fit(x_train, y_train)
+
+ # Predict target for test data
+ predictions = rand_for.predict(x_test)
+ predictions = predictions.reshape(len(predictions), 1)
+
+ # Error printing
+ print(f"Mean Absolute Error:\t {mean_absolute_error(y_test, predictions)}")
+ print(f"Mean Square Error :\t {mean_squared_error(y_test, predictions)}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/machine_learning/reuters_one_vs_rest_classifier.ipynb b/machine_learning/reuters_one_vs_rest_classifier.ipynb
deleted file mode 100644
index 968130a6053a..000000000000
--- a/machine_learning/reuters_one_vs_rest_classifier.ipynb
+++ /dev/null
@@ -1,405 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": 1,
- "metadata": {},
- "outputs": [],
- "source": [
- "try:\n",
- " import nltk\n",
- "except ModuleNotFoundError:\n",
- " !pip install nltk"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "metadata": {},
- "outputs": [],
- "source": [
- "## This code downloads the required packages.\n",
- "## You can run `nltk.download('all')` to download everything.\n",
- "\n",
- "nltk_packages = [\n",
- " (\"reuters\", \"corpora/reuters.zip\")\n",
- "]\n",
- "\n",
- "for pid, fid in nltk_packages:\n",
- " try:\n",
- " nltk.data.find(fid)\n",
- " except LookupError:\n",
- " nltk.download(pid)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Setting up corpus"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "metadata": {},
- "outputs": [],
- "source": [
- "from nltk.corpus import reuters"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Setting up train/test data"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 4,
- "metadata": {},
- "outputs": [],
- "source": [
- "train_documents, train_categories = zip(*[(reuters.raw(i), reuters.categories(i)) for i in reuters.fileids() if i.startswith('training/')])\n",
- "test_documents, test_categories = zip(*[(reuters.raw(i), reuters.categories(i)) for i in reuters.fileids() if i.startswith('test/')])"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 5,
- "metadata": {},
- "outputs": [],
- "source": [
- "all_categories = sorted(list(set(reuters.categories())))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "The following cell defines a function **tokenize** that performs following actions:\n",
- "- Receive a document as an argument to the function\n",
- "- Tokenize the document using `nltk.word_tokenize()`\n",
- "- Use `PorterStemmer` provided by the `nltk` to remove morphological affixes from each token\n",
- "- Append stemmed token to an already defined list `stems`\n",
- "- Return the list `stems`"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 6,
- "metadata": {},
- "outputs": [],
- "source": [
- "from nltk.stem.porter import PorterStemmer\n",
- "def tokenize(text):\n",
- " tokens = nltk.word_tokenize(text)\n",
- " stems = []\n",
- " for item in tokens:\n",
- " stems.append(PorterStemmer().stem(item))\n",
- " return stems"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "To begin, I first used TF-IDF for feature selection on both train as well as test data using `TfidfVectorizer`.\n",
- "\n",
- "But first, What `TfidfVectorizer` actually does?\n",
- "- `TfidfVectorizer` converts a collection of raw documents to a matrix of **TF-IDF** features.\n",
- "\n",
- "**TF-IDF**?\n",
- "- TFIDF (abbreviation of the term *frequency–inverse document frequency*) is a numerical statistic that is intended to reflect how important a word is to a document in a collection or corpus. [tf–idf](https://en.wikipedia.org/wiki/Tf%E2%80%93idf)\n",
- "\n",
- "**Why `TfidfVectorizer`**?\n",
- "- `TfidfVectorizer` scale down the impact of tokens that occur very frequently (e.g., “a”, “the”, and “of”) in a given corpus. [Feature Extraction and Transformation](https://spark.apache.org/docs/latest/mllib-feature-extraction.html#tf-idf)\n",
- "\n",
- "I gave following two arguments to `TfidfVectorizer`:\n",
- "- tokenizer: `tokenize` function\n",
- "- stop_words\n",
- "\n",
- "Then I used `fit_transform` and `transform` on the train and test documents repectively.\n",
- "\n",
- "**Why `fit_transform` for training data while `transform` for test data**?\n",
- "\n",
- "To avoid data leakage during cross-validation, imputer computes the statistic on the train data during the `fit`, **stores it** and uses the same on the test data, during the `transform`. This also prevents the test data from appearing in `fit` operation."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 7,
- "metadata": {},
- "outputs": [],
- "source": [
- "from sklearn.feature_extraction.text import TfidfVectorizer\n",
- "\n",
- "vectorizer = TfidfVectorizer(tokenizer = tokenize, stop_words = 'english')\n",
- "\n",
- "vectorised_train_documents = vectorizer.fit_transform(train_documents)\n",
- "vectorised_test_documents = vectorizer.transform(test_documents)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "For the **efficient implementation** of machine learning algorithms, many machine learning algorithms **requires all input variables and output variables to be numeric**. This means that categorical data must be converted to a numerical form.\n",
- "\n",
- "For this purpose, I used `MultiLabelBinarizer` from `sklearn.preprocessing`."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 8,
- "metadata": {},
- "outputs": [],
- "source": [
- "from sklearn.preprocessing import MultiLabelBinarizer\n",
- "\n",
- "mlb = MultiLabelBinarizer()\n",
- "train_labels = mlb.fit_transform(train_categories)\n",
- "test_labels = mlb.transform(test_categories)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Now, To **train** the classifier, I used `LinearSVC` in combination with the `OneVsRestClassifier` function in the scikit-learn package.\n",
- "\n",
- "The strategy of `OneVsRestClassifier` is of **fitting one classifier per label** and the `OneVsRestClassifier` can efficiently do this task and also outputs are easy to interpret. Since each label is represented by **one and only one classifier**, it is possible to gain knowledge about the label by inspecting its corresponding classifier. [OneVsRestClassifier](http://scikit-learn.org/stable/modules/multiclass.html#one-vs-the-rest)\n",
- "\n",
- "The reason I combined `LinearSVC` with `OneVsRestClassifier` is because `LinearSVC` supports **Multi-class**, while we want to perform **Multi-label** classification."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 9,
- "metadata": {},
- "outputs": [],
- "source": [
- "%%capture\n",
- "from sklearn.multiclass import OneVsRestClassifier\n",
- "from sklearn.svm import LinearSVC\n",
- "\n",
- "classifier = OneVsRestClassifier(LinearSVC())\n",
- "classifier.fit(vectorised_train_documents, train_labels)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "After fitting the classifier, I decided to use `cross_val_score` to **measure score** of the classifier by **cross validation** on the training data. But the only problem was, I wanted to **shuffle** data to use with `cross_val_score`, but it does not support shuffle argument.\n",
- "\n",
- "So, I decided to use `KFold` with `cross_val_score` as `KFold` supports shuffling the data.\n",
- "\n",
- "I also enabled `random_state`, because `random_state` will guarantee the same output in each run. By setting the `random_state`, it is guaranteed that the pseudorandom number generator will generate the same sequence of random integers each time, which in turn will affect the split.\n",
- "\n",
- "Why **42**?\n",
- "- [Why '42' is the preferred number when indicating something random?](https://softwareengineering.stackexchange.com/questions/507/why-42-is-the-preferred-number-when-indicating-something-random)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 10,
- "metadata": {},
- "outputs": [],
- "source": [
- "%%capture\n",
- "from sklearn.model_selection import KFold, cross_val_score\n",
- "\n",
- "kf = KFold(n_splits=10, random_state = 42, shuffle = True)\n",
- "scores = cross_val_score(classifier, vectorised_train_documents, train_labels, cv = kf)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 11,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Cross-validation scores: [0.83655084 0.86743887 0.8043758 0.83011583 0.83655084 0.81724582\n",
- " 0.82754183 0.8030888 0.80694981 0.82731959]\n",
- "Cross-validation accuracy: 0.8257 (+/- 0.0368)\n"
- ]
- }
- ],
- "source": [
- "print('Cross-validation scores:', scores)\n",
- "print('Cross-validation accuracy: {:.4f} (+/- {:.4f})'.format(scores.mean(), scores.std() * 2))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "In the end, I used different methods (`accuracy_score`, `precision_score`, `recall_score`, `f1_score` and `confusion_matrix`) provided by scikit-learn **to evaluate** the classifier. (both *Macro-* and *Micro-averages*)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 12,
- "metadata": {},
- "outputs": [],
- "source": [
- "%%capture\n",
- "from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix\n",
- "\n",
- "predictions = classifier.predict(vectorised_test_documents)\n",
- "\n",
- "accuracy = accuracy_score(test_labels, predictions)\n",
- "\n",
- "macro_precision = precision_score(test_labels, predictions, average='macro')\n",
- "macro_recall = recall_score(test_labels, predictions, average='macro')\n",
- "macro_f1 = f1_score(test_labels, predictions, average='macro')\n",
- "\n",
- "micro_precision = precision_score(test_labels, predictions, average='micro')\n",
- "micro_recall = recall_score(test_labels, predictions, average='micro')\n",
- "micro_f1 = f1_score(test_labels, predictions, average='micro')\n",
- "\n",
- "cm = confusion_matrix(test_labels.argmax(axis = 1), predictions.argmax(axis = 1))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 13,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Accuracy: 0.8099\n",
- "Precision:\n",
- "- Macro: 0.6076\n",
- "- Micro: 0.9471\n",
- "Recall:\n",
- "- Macro: 0.3708\n",
- "- Micro: 0.7981\n",
- "F1-measure:\n",
- "- Macro: 0.4410\n",
- "- Micro: 0.8662\n"
- ]
- }
- ],
- "source": [
- "print(\"Accuracy: {:.4f}\\nPrecision:\\n- Macro: {:.4f}\\n- Micro: {:.4f}\\nRecall:\\n- Macro: {:.4f}\\n- Micro: {:.4f}\\nF1-measure:\\n- Macro: {:.4f}\\n- Micro: {:.4f}\".format(accuracy, macro_precision, micro_precision, macro_recall, micro_recall, macro_f1, micro_f1))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "In below cell, I used `matplotlib.pyplot` to **plot the confusion matrix** (of first *few results only* to keep the readings readable) using `heatmap` of `seaborn`."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 16,
- "metadata": {},
- "outputs": [
- {
- "data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAABSUAAAV0CAYAAAAhI3i0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAIABJREFUeJzs3Xl8lOW5//HvPUlYVRRRIQkVW1xarYUWUKsiFQvUqnSlP0+1ttXD6XGptlW7aGu1p9upnurpplgFl8qiPXUFi2AtUBGIEiAQQBCKCRFXVHAhJPfvjxnoCDPPMpPMM3fuz/v1mhfJJN9c1/XMTeaZJ8/MGGutAAAAAAAAAKBUUkk3AAAAAAAAAMAvHJQEAAAAAAAAUFIclAQAAAAAAABQUhyUBAAAAAAAAFBSHJQEAAAAAAAAUFIclAQAAAAAAABQUokdlDTGjDPGrDHGrDPGfC9m9nZjzIvGmIYC6g40xvzNGNNojFlpjLk0RraHMWaxMWZZJnttAfUrjDFLjTEPF5DdaIxZYYypN8bUxczub4y5zxizOjP7CRFzR2bq7bq8YYy5LGbtb2W2V4MxZqoxpkeM7KWZ3MoodXOtDWNMX2PMY8aYZzP/HhAj+8VM7XZjzLCYdX+V2d7LjTF/McbsHyP7k0yu3hgz2xhTHad21tcuN8ZYY0y/GLV/bIxpzrrNT49T1xhzSeb/9kpjzH/HqDs9q+ZGY0x9nJmNMUOMMU/t+v9hjBkRI/sRY8zCzP+vh4wx++XJ5vz9EWWNBWSjrrF8+dB1FpANXWf5sllfz7vGAupGXWN5a4ets4DaoessIBt1jeXLh64zk+d+xhhzmDFmUWaNTTfGdIuRvdik72uDfhfky/4ps50bTPr/TlXM/G2Z65ab9H3QPlGzWV//jTFmW8y6U4wxG7Ju6yEx88YY81NjzNrM7fjNGNn5WXU3G2Puj5EdbYx5JpNdYIwZHCN7aibbYIy5wxhTmWvmrJ/znv2RKGssIBu6xgKykdZYnmzo+grKZ12fd40F1I60xvJkQ9dXQDZ0fYXkQ9dYQDbyGjM59llN9P2xXNmo95W5spH2xwLykfbJcmWzvha2P5arbtT7ypx1TYT9sYDakfbJ8mSj3lfmykbaH8t8716PbWKssVzZqGssVzbqPn+ubJx9/ryP5yKssVy1o66xnHWjrLE8dePs8+fKR11jubJR9sVyPv6Nsb7y5UPXWEA28u8xwDnW2pJfJFVIWi/p/ZK6SVom6UMx8iMlfVRSQwG1B0j6aObjfSWtjVpbkpG0T+bjKkmLJB0fs/63Jd0j6eECet8oqV+B2/wOSRdkPu4maf8Cb7cXJB0aI1MjaYOknpnPZ0j6asTsMZIaJPWSVClpjqTD464NSf8t6XuZj78n6Zcxsh+UdKSkJyQNi1l3jKTKzMe/jFl3v6yPvynp5ji1M9cPlPRXSf/Mt27y1P6xpMsj3D65sp/I3E7dM58fHKfnrK/fIOlHMWvPlvSpzMenS3oiRnaJpFMyH39d0k/yZHP+/oiyxgKyUddYvnzoOgvIhq6zfNkoayygbtQ1li8fus6C+g5bZwF1o66xfPnQdaY89zNK/+78f5nrb5b0nzGyQyUNUsB9SED29MzXjKSpueqG5LPX2P8o8/8kSjbz+TBJd0naFrPuFElfiLDG8uW/JulOSamANRa6TyDpz5K+EqPuWkkfzFx/oaQpEbMfl/S8pCMy118n6fyQ2d+zPxJljQVkQ9dYQDbSGsuTDV1fQfkoayygdqQ1licbur6Ceg5bXyG1Q9dYrqzSJzJEXmO51oKi74/lyka9r8yVjbQ/FpCPtE+Wb/0r2v5Yrro/VrT7ylzZSPtjQX1nfT3vPlme2lHvK3NlI+2PZb6+12ObGGssVzbqGsuVjbrPnysbZ58/5+O5iGssV+2oayxXNuo+f+Bj0KD1FVA76hrLlY28xjLfs/vxb9T1FZCPtMbyZCP/HuPCxbVLUmdKjpC0zlr7nLV2h6RpksZHDVtr50l6tZDC1toWa+0zmY/flNSo9IGzKFlrrd31l/SqzMVGrW2MqZX0aUl/jNV0kTJ/ARop6TZJstbusNZuLeBHjZa03lr7z5i5Skk9Tfov6r0kbY6Y+6Ckp6y1b1lrd0r6u6TPBgXyrI3xSt8pKfPvZ6JmrbWN1to1YY3myc7O9C1JT0mqjZF9I+vT3gpYZwH/H34t6coCs6HyZP9T0i+ste9mvufFuHWNMUbSBKUfnMapbSXt+mtnH+VZZ3myR0qal/n4MUmfz5PN9/sjdI3ly8ZYY/nyoessIBu6zkJ+ZwausWJ+34bkQ9dZWO2gdRaQjbrG8uVD11nA/cypku7LXJ9vjeXMWmuXWms35uo1QnZm5mtW0mLl/z2WL/+GtHt791TuNZYza4ypkPQrpddYrL6DZo2Y/09J11lr2zPfl2uNBdY2xuyr9O2215lsAdnQNZYn2ybpXWvt2sz1eX+PZXp7z/5I5vYJXWO5spmeQtdYQDbSGsuTDV1fQfkoayxfNqo82dD1FVY3aH2F5CP9HsuRPVAx1lgekfbHcol6X5knG2l/LCAfeZ8sj9D9sU4QaX8sTJR9shwirbE8Iu2PBTy2CV1j+bJR1lhANnSNBWQjra+Qx3OBa6yYx4IB2dA1FlY3bH0F5EPXWEA20hrLkv34t5DfYbvzBfwey84W9XsMKGdJHZSsUfqvrbs0KcYD1Y5ijBmk9F/3F8XIVGROMX9R0mPW2shZSTcqfYfRHiOTzUqabYx52hgzMUbu/ZJekjTZpJ+G80djTO8C6v8/xdspkbW2WdL1kjZJapH0urV2dsR4g6SRxpgDjTG9lP5L2MA49TMOsda2ZPppkXRwAT+jWF+XNCtOwKSf2vW8pC9L+lHM7FmSmq21y+LkslyceXrA7fmempDHEZJONumnAP7dGDO8gNonS9pirX02Zu4ySb/KbLPrJX0/RrZB0lmZj7+oCOtsj98fsdZYIb97IuZD19me2TjrLDsbd43l6DnWGtsjH2ud5dlekdbZHtnYa2yPfKR1tuf9jNLPLNiatTOa9z6zmPuooKxJP6X2XEmPxs0bYyYr/Zf+oyT9Jkb2YkkP7vq/VUDfP82ssV8bY7rHzH9A0pdM+mlhs4wxh8esLaX/iDZ3jwecYdkLJM00xjQpvb1/ESWr9MG8qqyng31Bwb/H9twfOVAR11iObBx5sxHWWM5slPUVkI+0xgL6jrLGcmUjra+AulLI+grIR1pjObIvK94ay7XPGvW+stD93SjZsPvJnPmI95V7ZWPcV+brO8p9Za5snPvJoG0Wdl+ZKxv1vjJXNur+WL7HNlHWWDGPi6Jk862xvNmI6ytnPuIaC+o7bI3ly0ZZY2HbK2x95ctHWWP5snH3+bMf/xbymDL24+cI2diPK4FyltRBSZPjulL+9VAm/bpDf5Z0WcgO3XtYa9ustUOU/uvECGPMMRHrnSHpRWvt0wU1nHaitfajkj4l6SJjzMiIuUqln676B2vtUEnblT7lPDKTfm2psyTdGzN3gNJ/VTpMUrWk3saYc6JkrbWNSp+e/pjSD1KWSdoZGCpDxpirlO77T3Fy1tqrrLUDM7mLY9TrJekqxTyQmeUPSj9gGqL0geQbYmQrJR2g9NMQr5A0wxiT6/97kLNV2J33f0r6VmabfUuZv4xG9HWl/089rfTTbXcEfXOhvz+KzQblo6yzXNmo6yw7m6kTeY3lqBtrjeXIR15nAds7dJ3lyMZaYznykdbZnvczSp81vte3RclGvY+KkP29pHnW2vlx89baryn9+79R0pciZkcq/WAh6CBTUN3vK32QarikvpK+GzPfXdI71tphkm6VdHucmTMC11ie7LcknW6trZU0WemnJIdmJR2t9IOXXxtjFkt6U3nuL/Psj0TaLytmXyZCNu8aC8pGWV+58ib9um2hayygdugaC8iGrq8I2ytwfQXkQ9dYrqy11iriGssodJ+107IR98dy5iPeV+bKRr2vzJWNel+ZKxtnfyxoe4fdV+bKRr2vzJWNuj9WzGObTsuGrLG82YjrK1f+x4q2xvLVjrLG8mWjrLGwbR22vvLlo6yxfNnI+/yFPv7tiHy+bKGPK4GyZhN4zrikEyT9Nevz70v6fsyfMUgFvKZkJlul9OtufLvIOa5RhNfhyHzvz5U+82Cj0n/Rf0vS3UXU/nGM2v0lbcz6/GRJj8SsN17S7AL6/KKk27I+/4qk3xc4888kXRh3bUhaI2lA5uMBktbEXVeK8NofubKSzpO0UFKvuNmsrx0attaz85I+rPTZMxszl51Kn6nav4Dagf/PcmzrRyWNyvp8vaSDYmyvSklbJNUWcDu/LslkPjaS3ihwex8haXFAdq/fH1HXWK5szDWWMx9lnQXVDltne2bjrLEIdcPWWK7tHWmdBWyv0HWWp26cNRY2d+A6y/q+a5Te2X9Z/3otoffch4ZkL8/6fKMivi5xdjbz8f3KvP5d3HzWdacowuspZ7LXKH1fuWuNtSv9si+F1B0VpW52XtJqSYOybuvXY26zAyW9IqlHjLpXKP00rV3XvU/SqgJnHiNpRp7vz7U/8qcoayxP9u6sr+ddY0HZsDUWVjdsfeXJvxZljUWsnXON5ctGWV8h2yt0feXJPxJljUWcOe8ay/Hzfqz0/6vI+2N7ZrM+f0IRXottz6wi7o8F1c5cF7pPlpX9oWLsj4XUHRSj7uWKsT8WsM0i75PtUTvyfWXIzHnvJ5XnsU2UNZYvG2WNBWXD1lhY3bD1lSc/N8oai1g75xoL2Nahayxke0XZF8tXO3SNRZw5bJ//PY9/o6yvoHyUNRaUDVtjXLi4eknqTMklkg436Xd67Kb0X14fLEXhzF9wbpPUaK3NeQZCQPYgk3mnK2NMT0mnKb1jGcpa+31rba21dpDS8z5urY10xmCmXm+Tfv0gZU49H6P06edRar8g6XljzJGZq0ZLWhW1dkahZ69tknS8MaZXZtuPVvpshkiMMQdn/n2fpM8V2MODSv8SV+bfBwr4GbEZY8YpfebEWdbat2Jms5/KdZYirjNJstausNYebK0dlFlvTUq/6cYLEWsPyPr0s4q4zjLuV/o1rmSMOULpF5V+OUb+NEmrrbVNMTK7bFb6QakyPUR++nfWOktJulrpN3nI9X35fn+ErrFifvcE5aOss4Bs6DrLlY26xgLqRlpjAdssdJ2FbO/AdRaQjbTGAuYOXWd57mcaJf1N6adLSvnXWMH3UfmyxpgLJI2VdLbNvP5djPwak3ln38w2OTNXP3myT1tr+2etsbestbneiTpf3wOy6n5G+ddYvm22e40pfZuvjZGV0n+Qe9ha+06Muo2S+mTWtCR9UjnuLwNm3rW+uiv9OyHn77E8+yNfVoQ1Vsy+TL5slDWWKyvp3CjrK6D2AVHWWEDfoWssYHuFrq+QbR24vgK22XhFWGMBM0daYwH7rFHuKwve382Xjbo/FpCPcl+ZK7sk4n1lvrqh95UB2yvS/ljI9g67r8yXDb2vDJg50v5YwGOb0DVWzOOifNkoaywgG2mfP0/+mShrLKB26BoL2F6hayxkW4fu8wfkQ9dYwMyR1ljGno9/4z6mLPTx817ZqL/HACeV4shnrovSrw+4Vum/qlwVMztV6VPMW5X+5Rv4DpN7ZE9S+ilJyyXVZy6nR8weK2lpJtuggHcKC/k5oxTz3beVfl2MZZnLygK22RBJdZne75d0QIxsL6X/It+nwHmvVfoOtkHpd7jsHiM7X+k7n2WSRheyNpQ+o2Cu0ndYcyX1jZH9bObjd5X+a17Os5PyZNcp/dqpu9ZZvndrzJX9c2Z7LZf0kNJvSlLQ/wcFn7mSq/ZdklZkaj+ozF8EI2a7KX0WSIOkZySdGqdnpd/N9BsF3s4nSXo6s1YWSfpYjOylSv8+Wqv062uZPNmcvz+irLGAbNQ1li8fus4CsqHrLF82yhoLqBt1jeXLh66zoL7D1llA3ahrLF8+dJ0pz/2M0vcBizO3973K8Xs0IPvNzBrbqfSO/B9jZHcqfT+9a45878C6V17pl4j5R+a2blD6bLz9otbe43vyvft2vr4fz6p7tzLvVh0jv7/SZ2OsUPqshI/E6VvpsyDGBayxfHU/m6m5LPMz3h8j+yulDzCtUfolAwJ/j2Yyo/Svd2UOXWMB2dA1FpCNtMb2zEZdX0G1o6yxgL4jrbE82dD1FdRz2PoKqR26xgKykdaY8uyzKtp9Zb5s6H1lQDbq/li+fJT7ytD9dOW/r8xXN/S+MiAbdX8sb98Kv6/MVzv0vjIgG2l/LPO9ez22ibLGArJR98dyZaOusVzZOPv8gY/n8q2xgNpR98dyZaOusZw9h62vkNpR98dyZaPu8+/1+Dfq+grIR11jubKR1hgXLi5edp32DAAAAAAAAAAlkdTTtwEAAAAAAAB4ioOSAAAAAAAAAEqKg5IAAAAAAAAASoqDkgAAAAAAAABKioOSAAAAAAAAAEoq8YOSxpiJrmWTrO1j3z7OnGRtZnanNjO7U5uZ3anNzO7U9rFvH2dOsjYzu1Obmd2pzcylzwPlLPGDkpKK+Q+WVDbJ2j727ePMSdZmZndqM7M7tZnZndrM7E5tH/v2ceYkazOzO7WZ2Z3azFz6PFC2yuGgJAAAAAAAAACPGGttpxZ4/WunBRaYsqZZXz2yJufXDvxTY+DPbm/frlSqd0F9FZNNsraPffs4c5K1u+LMpohslN+QLm7vcr2tOjObZG1mdqc2M7tT28e+fZw5ydrM7E5tZnanNjN3bH7njuawhzpean35uc490OWYqn7vL9t1kvhBySBhByUBIIpifgNzbwYAAACgHHFQMjcOSr5XOR+U5OnbAAAAAAAAAEqKg5IAAAAAAAAASoqDkgAAAAAAAABKKu5BySMl1Wdd3pB02R7fc5SkhZLelXR5sQ1KUrdu3XTPn/6g1asW6MkFD2nqPTdrc9MyrVv7lBY9NUtLn5mjRU/N0idGnRjp540dM0orG+Zp9aoFuvKKi2L1klQ2ydqu9n3rpBu0uWmZ6pfOjZXriNouZrt3766F/3hYT9c9pmX1j+uaH32nZLWTXGPPrn1KS5+Zo7ols/XUwpklq+vq/ysf+/Zx5mLyrv7uTbK2j327OrOr69vH28rHvn2cOcnazOxH367ODDjDWlvopcJa+4K19tA9rj/YWjvcWvtTa+3lW7862ka9vP6df7OtjfW7P6+oqrYVVdX2oou/b2++5U5bUVVtz/7yN+zcx+fbYcPH2HXrNtja9w21FVXV9tghn7BNTZt3Z/JdqrrX2nXrNtjBRxxve/Q61NYvW2mPOfaU0FySWfourPaoT3zWDhs+xq5oaIycSbrvJLdXRVW13W//wbaiqtp27/k+u2jR0/bjJ55R9n1HyVcGXDZs2GQP6X903q+7OnO5ZV3t28eZi827+LvX19vKxWzStV1c3z7eVj727ePMrvbt48yu9u3CzEUcz+nSlx1b1lou/7okfXsEXULPlDTGHGWM+a4x5n+NMTdlPv6gpNGS1kv65x6RFyUtkdS658+qOmG0ev/wt9rn2pvV47zLJBPtRM2zzhyju+66V5L05z8/omM//CG9+tpWvf3OO2pp2SJJWrlyjXr06KFu3boF/qwRw4dq/fqN2rBhk1pbWzVjxgM668yxkfpIKkvfhdWev2CRXn1ta+TvL4e+k9xekrR9+1uSpKqqSlVWVcnaaG9a5uoaK4arM9M3M3d23sXfvUnW9rFvV2eW3FzfPt5WPvbt48yu9u3jzK727erMgEsCjwoaY74raZokI2mx0gcbjaSpTz755E8lTY1caMD7VDVilLb/7FJtu+YbUnu7qk4YHSlbXdNfzzdtliS1tbXp9dff0AH793nP93zuc59WfX2DduzYEflnSVJTc4uqq/vH7qOU2SRru9p3sVzc3h2xvVKplOqWzFZL83LNnTtPi5csLfu+i81bazVr5lQtemqWLjj/yyWp6+r/Kx/79nHmjsgXytWZ6duPmYvl4vZ29bbysW8fZ06yNjP70berMwMuqQz5+vmSjrbWvuesx+uuu+43Rx111BuSzsgVMsZMvOGGGyZu27atrc+aZn31yBpVfmioKg49XPv86Hfpb6rqLvtG+i/NvS7+sVIH9ZcqqpQ68GDtc+3NkqTzKn6nO+6cIWPMXjWyz9/60IeO0M9/+gN96tP/Fjpwzp8V8WywpLJJ1na172K5uL07Ynu1t7dr2PAx6tNnP/353tt09NFHauXKNZ1aO8k1JkmnjPqMWlq26KCDDtSjs6Zp9Zp1WrBgUafWdfX/lY99+zhzR+QL5erM9F26bNK1i+Hi9nb1tvKxbx9nTrI2M8fLJlnbx5kBl4QdlGyXVK09nqI9duzYsxsaGt4ZOXLkllwha+2kTG7b6xvm/Sp9rdGOJx/Tu/fdttf3v/XbH6e/48BD1OuCK7X9l+k32LjjT42SpOamFg2srVZzc4sqKirUp89+2rr1dUlSTc0A3Xfvbfra1y/Vc8/t+Uzyve36WbvU1gzY/RTwcs0mWdvVvovl4vbuyO31+utv6O/znky/uHKEg5KurjFJu7/3pZde0f0PzNLw4UMiHZR0dWb6ZuZS5Avl6sz07cfMxXJxe7t6W/nYt48zJ1mbmf3o29WZAZeEvajjZZLmGmNmGWMmZS6PtrS0/PqVV165OU6hnY3PqGrYyTL77i9JMr33lTnw4EjZhx6erXPP/aIk6fOf/7T+9sQ/JEkVqQo9+MCduurqn+vJhXWRftaSunoNHnyYBg0aqKqqKk2YMF4PPTy7rLP0XVjtYri4vYvdXv369VWfPvtJknr06KHRp56sNWvWl33fxeR79eqpffbpvfvjT552SqSDsMXWdfX/lY99+zhzR+QL5erM9O3HzMVycXu7elv52LePM7vat48zu9q3qzNDkm3nkn0pY4FnSlprHzXGHCFphKQaSebII498afz48f9njLku61u/kfn3Zkn9JdVJ2k9S+743TNWbV52v9s2b9O7/TVHvy3+RfoObtp16+67fqO2VF0ObvH3yNN0x5X+1etUCvfbaVm3Z8pIWzHtQBx/cT8YY3XD9j3XVDy6TJH3q9LP10kuv5P1ZbW1tuvSyqzXzkXtUkUppyh3TtWrV2tAekszSd2G1777rdzpl5Anq16+vNj5Xp2uvu16Tp0wr676T3F4DBhyi22+7URUVKaVSKd1330N6ZOacsu+7mPwhhxyk++5Nn71dUVmhadPu1+zZT3R6XVf/X/nYt48zF5t38XdvkrV97NvVmSU317ePt5WPffs4s6t9+zizq327OjPgEtPZr0vw+tdOK7jAgZmnbwNAMfZ+RZboeOUWAAAAAOVo547mYh7qdFmtW9bwMC5L1SFHlu06CXv6NgAAAAAAAAB0KA5KAgAAAAAAACipsHffBgAAAAAAANzQXt5v7oJ/4UxJAAAAAAAAACXV6WdKHnTP6oKzKVP4a3G2d/Ib+ABwB78NAAAAAAAoL5wpCQAAAAAAAKCkOCgJAAAAAAAAoKQ4KAkAAAAAAACgpHj3bQAAAAAAAHQJ1vLu265I7EzJiy8+X0ufmaP6pXN1ySXnh37/pFuuV9Pz9Vr6zJzd133+c59W/dK5euftTfroR4+NXHvsmFFa2TBPq1ct0JVXXBSr76SySdamb3f69nHmJGszsx99+zhzkrWZ2Y++fZw5ydrM7EffPs5cW1utObPv1YrlT2hZ/eO65OLwx5UdVZvbyo++XZ0ZcIa1tlMvVd1q7J6XIUNOtQ0NjXa/Ph+wPXq+z86ZO89+8EMn7fV92ZdPnPo5O3zEWNvQ0Lj7ug8fe4o9+piT7RNPPGmPO/5T7/n+iqrqnJeq7rV23boNdvARx9sevQ619ctW2mOOPSXv95dDlr7pm5nLrzYz+9G3jzO72rePM7vat48zu9q3jzO72rePM1dUVduagUPssOFjbEVVte1zwOF2zdr1Zd+3r7eVi327MHNnH89x9fJuc4Pl8q9L0rdH0CWRMyWPOmqwFi1aqrfffkdtbW2aP+8pjR8/LjCzYMEivfba1vdct3r1Oq1d+1ys2iOGD9X69Ru1YcMmtba2asaMB3TWmWPLOkvf9N3ZWfp2J0vf7mTp250sfbuTpW93svTtTtblvl944UUtrW+QJG3btl2rVz+rmur+Zd23r7eVi327OjPgkoIPShpjvlZoduWqNTr55OPUt+/+6tmzh8aNO1W1tdWF/rhYqmv66/mmzbs/b2puUXXEO66ksknWpu/S1mZmP/r2ceYkazOzH337OHOStZnZj759nDnJ2swcv+9shx5aqyEfOUaLFi/t9NrcVn707erMgEuKeaObayVNLiS4evU6/er632vWzKnatm27lq9YpZ07dxbRSnTGmL2us9aWdTbJ2vRd2trMHC+bZG1mjpdNsjYzx8smWZuZ42WTrM3M8bJJ1mbmeNkkazNzvGy23r17acb0W/Xty6/Rm29u6/Ta3FbxsknW9nFmSGrnjW5cEXhQ0hizPN+XJB0SkJsoaaIkVVTsr1RF772+Z8qUaZoyZZok6SfXfVdNzS0RWy5Oc1OLBmadlVlbM0AtLVvKOptkbfoubW1m9qNvH2dOsjYz+9G3jzMnWZuZ/ejbx5mTrM3M8fuWpMrKSt07/VZNnfoX3X//rMg5V2embzeySdcGXBH29O1DJH1F0pk5Lq/kC1lrJ1lrh1lrh+U6IClJBx10oCRp4MBqfeYzn9L06Q/E774AS+rqNXjwYRo0aKCqqqo0YcJ4PfTw7LLO0jd9d3aWvt3J0rc7Wfp2J0vf7mTp250sfbuTdblvSbp10g1qXL1ON940KVbO1Znp241s0rUBV4Q9ffthSftYa+v3/IIx5oliCk+fNkkHHniAWlt36puXXqWtW18P/P677vytRo48Qf369dVz65foup/coNde3apf//onOuigvnrg/ju0bPlKnXHGOYE/p62tTZdedrVmPnKPKlIpTbljulatWhup56Sy9E3fnZ2lb3ey9O1Olr7dydK3O1n6didL3+5kXe77xI8P17nnfEHLV6xS3ZL0AZsf/vAXmvXo42Xbt6+3lYt9uzoz4BI2RzUVAAAgAElEQVTT2a9L0K17bSIvfNDO6y0AAAAAAIAuaueO5r1ffBLa0bSCA0JZutV+uGzXSTFvdAMAAAAAAACUD8sb3bgi7DUlAQAAAAAAAKBDcVASAAAAAAAAQElxUBIAAAAAAABASXFQEgAAAAAAAEBJdfob3ST1LtgpU9ybC/Hu3QAAAAAAAI5pb0u6A0TEmZIAAAAAAAAASoqDkgAAAAAAAABKioOSAAAAAAAAAEqKg5IAAAAAAAAASiqxg5K3TrpBm5uWqX7p3ILyY8eM0sqGeVq9aoGuvOKiWNmLLz5fS5+Zo/qlc3XJJeeXrG4x2SRr+9h3kuuT28qPvl2cuba2WnNm36sVy5/QsvrHdcnF8X5/FlPb1WyStX3s28eZk6zNzH707ePMSdZmZvbZy7m2j327OjPgDGttp14qqqptrsuoT3zWDhs+xq5oaMz59aBLVfdau27dBjv4iONtj16H2vplK+0xx57y3u/pVpPzMmTIqbahodHu1+cDtkfP99k5c+fZD37opL2+r9C6xfTcWXn6jt93Z6/PcsvStzvZJGvXDBxihw0fYyuqqm2fAw63a9aud6JvH28rH/v2cWZX+/ZxZlf79nFmV/v2ceaKKvbZ6bt8s6Wq3dnHc1y9vLthieXyr0vSt0fQJfRMSWPMUcaY0caYffa4flwxB0PnL1ikV1/bWlB2xPChWr9+ozZs2KTW1lbNmPGAzjpzbKTsUUcN1qJFS/X22++ora1N8+c9pfHjo41STN1isknW9rXvpNYnt5Uffbs68wsvvKil9Q2SpG3btmv16mdVU92/7Pv28bbysW8fZ3a1bx9ndrVvH2d2tW8fZ5bYZ6fv8s0mXRtwReBBSWPMNyU9IOkSSQ3GmPFZX/5ZZzYWpLqmv55v2rz786bmFlVHfGC8ctUanXzycerbd3/17NlD48adqtra6k6vW0w2ydq+9l0MV2embzeySdfe5dBDazXkI8do0eKlkTMubm9Xbysf+/Zx5iRrM7Mfffs4c5K1mZl99nKu7WPfrs4MuKQy5Ov/Lulj1tptxphBku4zxgyy1t4kyeQLGWMmSpooSaaij1Kp3h3U7u6fv9d11tpI2dWr1+lX1/9es2ZO1bZt27V8xSrt3Lmz0+sWk02ytq99F8PVmenbjWzStSWpd+9emjH9Vn378mv05pvbIudc3N6u3lY+9u3jzEnWZuZ42SRrM3O8bJK1mTletliuzkzfbmSTrg24Iuzp2xXW2m2SZK3dKGmUpE8ZY/5HAQclrbWTrLXDrLXDOvqApCQ1N7VoYNbZjbU1A9TSsiVyfsqUaTru+E9p9Glf0GuvbtW6dRs6vW6xPSdV29e+i+HqzPTtRjbp2pWVlbp3+q2aOvUvuv/+WZFzxdZ2MZtkbR/79nHmJGszsx99+zhzkrWZmX32cq7tY9+uzgy4JOyg5AvGmCG7PskcoDxDUj9JH+7MxoIsqavX4MGHadCggaqqqtKECeP10MOzI+cPOuhASdLAgdX6zGc+penTH+j0usX2nFRtX/suhqsz07cb2aRr3zrpBjWuXqcbb5oUOZN03z7eVj727ePMrvbt48yu9u3jzK727ePMxXJ1Zvp2I5t0be+1t3PJvpSxsKdvf0XSe57bbK3dKekrxphbiil8912/0ykjT1C/fn218bk6XXvd9Zo8ZVqkbFtbmy697GrNfOQeVaRSmnLHdK1atTZy7enTJunAAw9Qa+tOffPSq7R16+udXrfYnpOq7WvfSa1Pbis/+nZ15hM/PlznnvMFLV+xSnVL0jtFP/zhLzTr0cfLum8fbysf+/ZxZlf79nFmV/v2cWZX+/ZxZol9dvou32zStQFXmM5+XYLKbjWJvPBByuR9dnkk7bxeAwAAAAAAKFM7dzQXd+Cji9rx3GIO6GTp9v4RZbtOwp6+DQAAAAAAAAAdioOSAAAAAAAAAEoq7DUlAQAAAAAAACdYW95v7oJ/4UxJAAAAAAAAACXVZc+ULPaNaipTFQVnd7a3FVUbACSpmFcj5pWdAQAAAADljDMlAQAAAAAAAJQUByUBAAAAAAAAlBQHJQEAAAAAAACUVJd9TUkAAAAAAAB4pp1333ZFImdK1tZWa87se7Vi+RNaVv+4Lrn4/Ng/Y+yYUVrZME+rVy3QlVdc1GnZW275lTZtekZPP/3Y7us+/OEP6okn/qK6utn6859v17777tPpPRebTyqbZG0f+3Z15lsn3aDNTctUv3RurFxH1HYxK0l9+uynadMmacWKv2v58id0/HEfK0ltV9cYM/vRt48zJ1mbmf3o28eZk6zNzH707ePMxeSLPX7g4swdURtwgrW2Uy8VVdV2z0vNwCF22PAxtqKq2vY54HC7Zu16e8yxp+z1ffkuVd1r7bp1G+zgI463PXodauuXrYycj5rt3n2g7d59oB09+vP2uOM+ZRsaVu++bsmSenvaaV+w3bsPtBMnfsf+7Gc37v5a9+4DO7znUs1M38nX9nHmiqpqO+oTn7XDho+xKxoaI2eS7rsU2cqAy513zrATJ37HVlZV2569DrUH9jvqPV8vt5ld2N7MnHxtZvajbx9ndrVvH2d2tW8fZ3a1bx9nLjZfzPEDV2eOmu3s4zmuXt5Z+w/L5V+XpG+PoEsiZ0q+8MKLWlrfIEnatm27Vq9+VjXV/SPnRwwfqvXrN2rDhk1qbW3VjBkP6Kwzx3ZKdsGCxXrtta3vue6II96v+fMXSZLmzp2vz3zm9E7tudh8Uln6diebdO35Cxbp1T3+n5V730lur3333UcnnXScbp88VZLU2tqq119/o+z7dnF7+zizq337OLOrffs4s6t9+zizq337OLOrffs4c7H5Yo4fuDpzsbUBV4QelDTGjDDGDM98/CFjzLeNMeFH4SI69NBaDfnIMVq0eGnkTHVNfz3ftHn3503NLaqO+EupmOwuK1eu0RlnfFKS9LnPfVq1tQM6vW5SM9N3aWv7OHOxXNzexW6v97//UL388iu67Y+/1pLFf9UtN/9KvXr1LPu+XdzePs6cZG1m9qNvH2dOsjYz+9G3jzMnWZuZS9t3trjHD1ydOcnHV0ApBR6UNMZcI+l/Jf3BGPNzSb+VtI+k7xljriq2eO/evTRj+q369uXX6M03t0XOGWP2us5a2+nZXf7jP67QN75xnp588hHtu+8+2rGjtdPrJjUzfZe2to8zF8vF7V3s9qqsqNDQoR/WLbfcqeEjxmr79rd05ZUXd3ptV9cYM8fLJlmbmeNlk6zNzPGySdZm5njZJGszc7xskrWZOV62I/JSYccPXJ05ycdXXYJt55J9KWNh7779BUlDJHWX9IKkWmvtG8aYX0laJOmnuULGmImSJkqSqeijVKr33oUrK3Xv9Fs1depfdP/9s2I13dzUooG11bs/r60ZoJaWLZ2e3WXt2vU644xzJEmDBx+mceNO7fS6Sc1M36Wt7ePMxXJxexe7vZqaW9TU1KLFS9J/If7z/z2iK6+IdlDSxzXGzH707ePMSdZmZj/69nHmJGszsx99+zhzR+QLPX7g6sxJPr4CSins6ds7rbVt1tq3JK231r4hSdbatyXlPdxqrZ1krR1mrR2W64CklH633cbV63TjTZNiN72krl6DBx+mQYMGqqqqShMmjNdDD8/u9OwuBx10oKT0Xy++//1v6o9/vLvT6yY1M32707erMxfLxe1d7PbasuUlNTVt1hFHfECSdOqpJ6mxcW3Z9+3i9vZxZlf79nFmV/v2cWZX+/ZxZlf79nFmV/v2ceaOyBd6/MDVmZN8fAWUUtiZkjuMMb0yByU/tutKY0wfBRyUDHPix4fr3HO+oOUrVqluSfo/1g9/+AvNevTxSPm2tjZdetnVmvnIPapIpTTljulatSraA/K42Tvv/I1OPvkE9et3gNatW6T/+q//Ue/evfWNb3xFknT//Y/qjjtmdGrPxeaTytK3O9mka9991+90ysgT1K9fX218rk7XXne9Jk+ZVtZ9J7m9JOmyb/1Qd97xG3XrVqXnNmzSBRd8u+z7dnF7+zizq337OLOrffs4s6t9+zizq337OLOrffs4c7H5Yo4fuDpzRzxeAFxggl6XwBjT3Vr7bo7r+0kaYK1dEVagsluNky98UJmqKDi7s72tAzsB4Ku9X0kmOid/8QIAAACIbOeO5mIeMnRZ765dwMOhLN2POKls10ngmZK5Dkhmrn9Z0sud0hEAAAAAAABQCE4Uc0bYa0oCAAAAAAAAQIfioCQAAAAAAACAkuKgJAAAAAAAAICS4qAkAAAAAAAAgJIKfKMbnxXzDtq8Yy6AjsDvAwAAAABAV8VBSQAAAAAAAHQNtj3pDhART98GAAAAAAAAUFIclAQAAAAAAABQUhyUBAAAAAAAADxkjLndGPOiMaYh67q+xpjHjDHPZv49IHO9Mcb8rzFmnTFmuTHmo1mZ8zLf/6wx5rwotRM5KNm9e3ct/MfDerruMS2rf1zX/Og7sX/G2DGjtLJhnlavWqArr7jIiewRR3xAdUtm77688vJqffOSC8q+72KySdYuJnvrpBu0uWmZ6pfOjZXriNrcVn707ePMSdZmZj/69nHmYvepXJw5ydo+9u3jzEnWZmY/+vZx5mLyrt7XJV0biGGKpHF7XPc9SXOttYdLmpv5XJI+JenwzGWipD9I6YOYkq6RdJykEZKu2XUgM4ixtnPf37WyW03OAr1799L27W+psrJS8574i7717Wu0aPEzkX5mKpVS48r5Gnf62WpqatFTC2fqnHMvVGPjs2WRjfLu26lUSv/c+LROPOkMbdrUvPv6fLdGuc9cbrWL7fvkk47Ttm3bNXnyTRoydHSkTNJ9+3pbudi3jzO72rePM7vat48z71LoPpWrM9O3G1n6didL3+5kfe1bcu++rlS1d+5ojnL4wTvvrpzbuQe6HNP96NGh68QYM0jSw9baYzKfr5E0ylrbYowZIOkJa+2RxphbMh9Pzf6+XRdr7X9krn/P9+UT+0xJY8ydcTO5bN/+liSpqqpSlVVVinNwdMTwoVq/fqM2bNik1tZWzZjxgM46c2xZZ/d06qkn6bnn/vmeA5Ll2HexM7va9/wFi/Tqa1sjf3859O3rbeVi3z7O7GrfPs7sat8+zrxLoftUrs5M325k6dudLH27k/W1b8m9+7qkawMd4BBrbYskZf49OHN9jaTns76vKXNdvusDBR6UNMY8uMflIUmf2/V59FlyFE6lVLdktlqal2vu3HlavGRp5Gx1TX8937R59+dNzS2qru5f1tk9fWnCeE2ffn/k73d1Zlf7LoarM9O3G9kka/vYt48zJ1mbmQu7vyp0n8rVmenbjWyStX3s28eZk6zNzKXtW3Lvvi7p2kA2Y8xEY0xd1mViMT8ux3U24PpAYWdK1kp6Q9L/SLohc3kz6+OCtbe3a9jwMTr0sGEaPmyojj76yMhZY/aeNepfSpLKZquqqtIZZ4zRfX9+OHLG1Zld7bsYrs5M325kk6ztY98+zpxkbWaOl92l0H0qV2embzeySdb2sW8fZ06yNjPHy3ZE3rX7uqRrA9mstZOstcOyLpMixLZknratzL8vZq5vkjQw6/tqJW0OuD5Q2EHJYZKelnSVpNettU9Ietta+3dr7d/zhbKPwra3bw8s8Prrb+jv857U2DGjwnrdrbmpRQNrq3d/XlszQC0tW8o6m23cuE9o6dIVevHFlyNnXJ3Z1b6L4erM9O1GNsnaPvbt48xJ1mbm4u6v4u5TuTozfbuRTbK2j337OHOStZm5tH1nc+W+LunaQAd4UNKud9A+T9IDWdd/JfMu3McrfaywRdJfJY0xxhyQeYObMZnrAgUelLTWtltrfy3pa5KuMsb8VlJl2A/NPgqbSvXe6+v9+vVVnz77SZJ69Oih0aeerDVr1of92N2W1NVr8ODDNGjQQFVVVWnChPF66OHZZZ3N9qUvfSbWU7eT7LvYmV3tuxiuzkzfbmTp250sfbuTdbnvYvapXJ2Zvt3I0rc7Wfp2J+tr3y7e1yVd23u2nUv2JYQxZqqkhZKONMY0GWPOl/QLSZ80xjwr6ZOZzyVppqTnJK2TdKukCyXJWvuqpJ9IWpK5XJe5LlDoAcbMD2+S9EVjzKeVfjp3UQYMOES333ajKipSSqVSuu++h/TIzDmR821tbbr0sqs185F7VJFKacod07Vq1dqyzu7Ss2cPnTZ6pC688Luxcq7O7Grfd9/1O50y8gT169dXG5+r07XXXa/JU6aVdd++3lYu9u3jzK727ePMrvbt48xScftUrs5M325k6dudLH27k/W1bxfv65KuDcRhrT07z5dG5/heK+miPD/ndkm3x6ltOvt1CSq71Xj3wgeh77UewLuNBQAAAAAAYtu5o7mYww9d1rsNj3FoJUv3Yz5Ztusk7DUlAQAAAAAAAKBDcVASAAAAAAAAQElxUBIAAAAAAABASUV6oxsAAAAAAACg7LWHv+M0ygMHJTsBr6gKAAAAAAAA5MfTtwEAAAAAAACUFAclAQAAAAAAAJQUByUBAAAAAAAAlBSvKQkAAAAAAIAuwdq2pFtARJwpCQAAAAAAAKCkEjsoeeukG7S5aZnql84tKD92zCitbJin1asW6MorLury2SRr07c7ffs4c5K1mdmPvn2cOcnazOxH3z7OnGRtZvajbx9nTrI2M/vRt6szA64w1tpOLVDZrSZngZNPOk7btm3X5Mk3acjQ0bF+ZiqVUuPK+Rp3+tlqamrRUwtn6pxzL1Rj47NdMkvf9M3M5Vebmf3o28eZXe3bx5ld7dvHmV3t28eZXe3bx5ld7dvHmV3t24WZd+5oNpGa8cw7y2Z27oEux/T4yOllu05inSlpjDnJGPNtY8yYYgvPX7BIr762taDsiOFDtX79Rm3YsEmtra2aMeMBnXXm2C6bpW/67uwsfbuTpW93svTtTpa+3cnStztZ+nYnS9/uZOnbnWzStQFXBB6UNMYszvr43yX9VtK+kq4xxnyvk3vLq7qmv55v2rz786bmFlVX9++y2SRr03dpazOzH337OHOStZnZj759nDnJ2szsR98+zpxkbWb2o28fZ06yto8zQ5Jt55J9KWNhZ0pWZX08UdInrbXXShoj6cv5QsaYicaYOmNMXXv79g5oc6+fv9d1UZ+G7mI2ydr0XdrazBwvm2RtZo6XTbI2M8fLJlmbmeNlk6zNzPGySdZm5njZJGszc7xskrWZOV42ydo+zgy4pDLk6yljzAFKH7w01tqXJMlau90YszNfyFo7SdIkKf9rShajualFA2urd39eWzNALS1bumw2ydr0XdrazOxH3z7OnGRtZvajbx9nTrI2M/vRt48zJ1mbmf3o28eZk6zt48yAS8LOlOwj6WlJdZL6GmP6S5IxZh9Jib1Q5pK6eg0efJgGDRqoqqoqTZgwXg89PLvLZumbvjs7S9/uZOnbnSx9u5Olb3ey9O1Olr7dydK3O1n6diebdG3AFYFnSlprB+X5UrukzxZT+O67fqdTRp6gfv36auNzdbr2uus1ecq0SNm2tjZdetnVmvnIPapIpTTljulatWptl83SN313dpa+3cnStztZ+nYnS9/uZOnbnSx9u5Olb3ey9O1ONunagCtMZ78uQWc8fRsAAAAAAMBnO3c0J/YM1nL2zjMPchwqS4+PnlW26yTs6dsAAAAAAAAA0KE4KAkAAAAAAACgpDgoCQAAAAAAAKCkOCgJAAAAAAAAoKQC330bbqlIFXeMua29vYM6AQAAAAAAAPLjoCQAAAAAAAC6BssJV67g6dsAAAAAAAAASoqDkgAAAAAAAABKioOSAAAAAAAAAEoqsYOSt066QZublql+6dyC8mPHjNLKhnlavWqBrrzioi6fjZu/5Zbr9fympXrm6Tnvuf7C//yqVix/QkufmaOf/fQHZdd3uWSTrM3MfvTt48xJ1mZmP/r2ceYkazOzH337OHOStZnZj759nDnJ2j7ODLjCWGs7tUBlt5qcBU4+6Tht27ZdkyffpCFDR8f6malUSo0r52vc6WerqalFTy2cqXPOvVCNjc92yWzUfPa7b5+U2b6333ajPvqx0yRJp5xygr733Us0/jNf1Y4dO3TQQQfqpZde2Z3J9e7bpei73LKu9u3jzK727ePMrvbt48yu9u3jzK727ePMrvbt48yu9u3jzK727ePMrvbtwsw7dzSbSM145p0lf+7cA12O6TH882W7ThI7U3L+gkV69bWtBWVHDB+q9es3asOGTWptbdWMGQ/orDPHdtlsIfkFCxbptT2278R/P1e/uv732rFjhyS954BkufRdDllX+/ZxZlf79nFmV/v2cWZX+/ZxZlf79nFmV/v2cWZX+/ZxZlf79nFmV/t2dWbAJYEHJY0xxxlj9st83NMYc60x5iFjzC+NMX1K0+Leqmv66/mmzbs/b2puUXV1/y6b7Yi8JB1++Pt14okjNH/eg3rssXv1sY99pKz7dnV7u5hNsraPffs4c5K1mdmPvn2cOcnazOxH3z7OnGRtZvajbx9nTrK2jzMDLgk7U/J2SW9lPr5JUh9Jv8xcN7kT+wpkzN5nnkZ9GrqL2Y7IS1JlZaUO2L+PTh55lr7//Z/qnj/9vtPr+ri9XcwmWdvHvn2cOcnazBwvm2RtZo6XTbI2M8fLJlmbmeNlk6zNzPGySdZm5njZJGv7ODPgksqQr6estTszHw+z1n408/ECY0x9vpAxZqKkiZJkKvoolepdfKdZmptaNLC2evfntTUD1NKypctmOyIvSc3NLbr/gVmSpLq6erW3W/Xr11cvv/xqWfbt6vZ2MZtkbR/79nHmJGszsx99+zhzkrWZ2Y++fZw5ydrM7EffPs6cZG0fZwZcEnamZIMx5muZj5cZY4ZJkjHmCEmt+ULW2knW2mHW2mEdfUBSkpbU1Wvw4MM0aNBAVVVVacKE8Xro4dldNtsReUl68MG/atSoEyVJhw8+TFXdqgIPSCbdt6vb28UsfbuTpW93svTtTpa+3cnStztZ+nYnS9/uZOnbnWzStb1n27lkX8pY2JmSF0i6yRhztaSXJS00xjwv6fnM1wp2912/0ykjT1C/fn218bk6XXvd9Zo8ZVqkbFtbmy697GrNfOQeVaRSmnLHdK1atbbLZgvJ33nnbzXy5OPVr19frV+3WD/5rxs05Y7pmjTpej3z9Bzt2LFDF1zwrbLruxyyrvbt48yu9u3jzK727ePMrvbt48yu9u3jzK727ePMrvbt48yu9u3jzK727erMgEtMlNclMMbsK+n9Sh/EbLLWRj5vuLJbDS98UCIVqeLeTL2tvbyPoAMAAAAAgLSdO5r3fvFJ6J3F93IcKkuPEV8s23USdqakJMla+6akZZ3cCwAAAAAAAAAPFHdqHQAAAAAAAADExEFJAAAAAAAAACUV6enbAAAAAAAAQNnj/TKcwZmSAAAAAAAAAEqKMyW7EN49GwAAAAAAAC7gTEkAAAAAAAAAJcVBSQAAAAAAAAAlxdO3AQAAAAAA0DVYXtrOFZwpCQAAAAAAAKCkEjsoeeukG7S5aZnql84tKD92zCitbJin1asW6MorLury2SRr+3hbJVmbmf3o28eZk6zNzH707ePMSdZmZj/69nHmJGszsx99+zhzkrV9nBlwhbHWdmqBym41OQucfNJx2rZtuyZPvklDho6O9TNTqZQaV87XuNPPVlNTi55aOFPnnHuhGhuf7ZLZpGv7dlu52rePM7vat48zu9q3jzO72rePM7vat48zu9q3jzO72rePM7vat48zu9q3CzPv3NFsIjXjmXcWTu3cA12O6XHC2WW7TgLPlDTGfNMYM7AzCs9fsEivvra1oOyI4UO1fv1GbdiwSa2trZox4wGddebYLptNurZvt5Wrffs4s6t9+zizq337OLOrffs4s6t9+zizq337OLOrffs4s6t9+zizq327OjPgkrCnb/9E0iJjzHxjzIXGmINK0VSY6pr+er5p8+7Pm5pbVF3dv8tmk65dDLa3G9kka/vYt48zJ1mbmf3o28eZk6zNzH707ePMSdZmZj/69nHmJGv7ODMktbdzyb6UsbCDks9JqlX64OTHJK0yxjxqjDnPGLNvvpAxZqIxps4YU9fevr0D29398/e6LurT0F3MJl27GGxvN7JJ1vaxbx9nTrI2M8fLJlmbmeNlk6zNzPGySdZm5njZJGszc7xskrWZOV42ydo+zgy4JOygpLXWtltrZ1trz5dULen3ksYpfcAyX2iStXaYtXZYKtW7A9tNa25q0cDa6t2f19YMUEvLli6bTbp2MdjebmSTrO1j3z7OnGRtZvajbx9nTrI2M/vRt48zJ1mbmf3o28eZk6zt48yAS8IOSr7n8Ly1ttVa+6C19mxJ7+u8toItqavX4MGHadCggaqqqtKECeP10MOzu2w26drFYHu7kaVvd7L07U6Wvt3J0rc7Wfp2J0vf7mTp250sfbuTTbo24IrKkK9/Kd8XrLVvF1P47rt+p1NGnqB+/fpq43N1uva66zV5yrRI2ba2Nl162dWa+cg9qkilNOWO6Vq1am2XzSZd27fbytW+fZzZ1b59nNnVvn2c2dW+fZzZ1b59nNnVvn2c2dW+fZzZ1b59nNnVvl2dGXCJ6ezXJajsVsMLHwAAAAAAAHSgnTua937xSeidf/yJ41BZepz45bJdJ2FnSgIAAAAAAABuKPN3nMa/hL2mJAAAAAAAAAB0KA5KAgAAAAAAACgpDkoCAAAAAAAAKCkOSgIAAAAAAAAoKd7oBgAAAAAAAF2CtW1Jt4CIOFMSAAAAAAAAQElxUBIAAAAAAABASXFQEgAAAAAAAEBJJXZQcuyYUVrZME+rVy3QlVdcVNK8i9kka9O3O337OHOStZnZj759nDnJ2szsR98+zlxMvra2WnNm36sVy5/QsvrHdcnF55ekbrHZJGv72LePMydZm5n96NYHzSAAACAASURBVNvVmQFXGGttpxao7FazV4FUKqXGlfM17vSz1dTUoqcWztQ5516oxsZnI/3MYvIuZumbvpm5/Gozsx99+zizq337OLOrffs4c7H5/v0P1oD+B2tpfYP22ae3Fi96VJ//wte79Mz0zcxdtW8fZ3a1bxdm3rmj2URqxjNvP3F75x7ockzPUV8v23WSyJmSI4YP1fr1G7Vhwya1trZqxowHdNaZY0uSdzFL3/Td2Vn6didL3+5k6dudLH27k/W17xdeeFFL6xskSdu2bdfq1c+qprp/p9fltnKnbx9ndrVvH2d2tW9XZwZcEnhQ0hjTzRjzFWPMaZnP/80Y81tjzEXGmKpCi1bX9NfzTZt3f97U3KLqiDtWxeZdzCZZm75LW5uZ/ejbx5mTrM3MfvTt48xJ1mbm0vad7dBDazXkI8do0eKlnV6X26q0tZnZj759nDnJ2j7ODLikMuTrkzPf08sYc56kfST9n6TRkkZIOq+QosbsfeZonKeRF5N3MZtkbfoubW1mjpdNsjYzx8smWZuZ42WTrM3M8bJJ1mbmeNmOyEtS7969NGP6rfr25dfozTe3dXpdbqvS1mbmeNkkazNzvGyStX2cGXBJ2EHJD1trjzXGVEpqllRtrW0zxtwtaVm+kDFmoqSJkmQq+iiV6v2erzc3tWhgbfXuz2trBqilZUvkpovJu5hNsjZ9l7Y2M/vRt48zJ1mbmf3o28eZk6zNzKXtW5IqKyt17/RbNXXqX3T//bNKUpfbqrS1mdmPvn2cOcnaPs4MuCTsNSVTxphukvaV1EtSn8z13SXlffq2tXaStXaYtXbYngckJWlJXb0GDz5MgwYNVFVVlSZMGK+HHp4dueli8i5m6Zu+OztL3+5k6dudLH27k6Vvd7K+9i1Jt066QY2r1+nGmyZFzhRbl9vKnb59nNnVvn2c2dW+XZ0ZcEnYmZK3SVotqULSVZLuNcY8J+l4SdMKLdrW1qZLL7taMx+5RxWplKbcMV2rVq0tSd7FLH3Td2dn6dudLH27k6Vvd7L07U7W175P/PhwnXvOF7R8xSrVLUk/KP3hD3+hWY8+3ql1ua3c6dvHmV3t28eZXe3b1ZkhybYn3QEiMmGvS2CMqZYka+1mY8z+kk6TtMlauzhKgcpuNbzwAQAAAAAAQAfauaN57xefhN7+2x85DpWl5ycuKNt1EnampKy1m7M+3irpvk7tCAAAAAAAAECXFvaakgAAAAAAAADQoTgoCQAAAAAAAKCkQp++DQAAAAAAADihnTe6cQVnSgIAAAAAAAAoKc6UROK6V1YVlX93Z2sHdQIAAAAAAIBS4ExJAAAAAAAAACXFQUkAAAAAAAAAJcXTtwEAAAAAANA1WN7oxhWcKQkAAAAAAACgpBI7KDl2zCitbJin1asW6MorLipp3sVskrVL2XdNzQDNnDVVTz8zR0vqZuvCC78mSfrBVZfp2XVPaeFTM7XwqZkaO3ZUWfXdFbJJ1vaxbx9nTrI2M/vRt48zJ1mbmf3o28eZk6zNzH707ePMSdb2cWbAFcZa26kFKrvV7FUglUqpceV8jTv9bDU1teiphTN1zrkXqrHx2Ug/s5i8i9mu3nf2u2/373+Q+vc/WPX1K7XPPr214B8P6f99aaI+9/kztH3bdt1006171cj17ts+bm8fZ3a1bx9ndrVvH2d2tW8fZ3a1bx9ndrVvH2d2tW8fZ3a1bx9ndrVvF2beuaPZRGrGM2/PublzD3Q5pudp3yjbdRJ6pqQx5gPGmMuNMTcZY24wxnzDGNOnmKIjhg/V+vUbtWHDJrW2tmrGjAd01pljS5J3MetT3y+88JLq61dKkrZt2641a9arurp/5HpJ9e16lr7dydK3O1n6didL3+5k6dudLH27k6Vvd7L07U426dqAKwIPShpjvinpZkk9JA2X1FPSQEkLjTGjCi1aXdNfzzdt3v15U3NLrANPxeRdzCZZO8m+3/e+Wn3kIx/SkiX1kqT/+MZ5WrRolv5w839r//33K9u+XcwmWdvHvn2cOcnazOxH3z7OnGRtZvajbx9nTrI2M/vRt48zJ1nbx5khqb2dS/aljIWdKfnvksZZa/9L0mmSPmStvUrSOEm/LrSoMXufORrnaeTF5F3MJlk7qb579+6le6b+QVdeeZ3efHOb/njr3Trm6JE6/vjT9cILL+rnv7i6LPt2NZtkbR/79nHmJGszc7xskrWZOV42ydrMHC+bZG1mjpdNsjYzx8smWZuZ42WTrO3jzIBLorzRTWXm3+6S9pUka+0mSVX5AsaYicaYOmNMXXv79r2+3tzUooG11bs/r60ZoJaWLZGbLibvYjbJ2kn0XVlZqXvuuVnTp92vBx/4qyTpxRdfVnt7u6y1mnz7NA372EfKrm+Xs0nW9rFvH2dOsjYz+9G3jzMnWZuZ/ejbx5mTrM3MfvTt48xJ1vZxZsAlYQcl/yhpiTFmkqSFkn4rScaYgyS9mi9krZ1krR1mrR2WSvXe6+tL6uo1ePBhGjRooKqqqjRhwng99PDsyE0Xk3cx61vff/jDL7VmzTr95je37b6uf/+Ddn981lljtXLV2rLr2+UsfbuTpW93svTtTpa+3cnStztZ+nYnS9/uZOnbnWzStQFXVAZ90Vp7kzFmjqQPSvofa+3qzPUvSRpZaNG2tjZdetnVmvnIPapIpTTljulaFXKQqaPyLmZ96vuEE4bp3778eTWsaNTCp2ZKkn58zX/ri188S8ce+yFZa/XPTU365iU/KKu+Xc/StztZ+nYnS9/uZOnbnSx9u5Olb3ey9O1Olr7dySZdG3CF6ezXJajsVsMLHyBQ98q8rwQQybs7WzuoEwAAAAAA3LBzR/PeLz4JvT379xyHytJzzIVlu04Cz5QEAAAAAAAAnGHL+x2n8S9R3ugGAAAAAAAAADoMByUBAAAAAAAAlBQHJQEAAAAAAACUFK8picQV+0Y1KVP4a7a2d/IbPQEAAAAAAGBvHJQEAAAAAABA19DOG924gqdvAwAAAAAAACgpDkoCAAAAAAAAKCkOSgIAAAAAAAAoKQ5KAgAAAAAAACipRA5Kdu/eXQv/8bCerntMy+of1zU/+k7snzF2zCitbJin1asW6MorLury2SRru9L3pFuuV9Pz9Vr6zJzd1/3851drxfIn9HTdY7p3xh/Vp89+Zdd3uWSTrO1j3z7OnGRtZvajbx9nTrI2M/vRt48zJ1mbmf3o28eZk6zt48zea2/nkn0pY8Za26kFKrvV5CzQu3cvbd/+liorKzXvib/oW9++RosWPxPpZ6ZSKTWunK9xp5+tpqYWPbVwps4590I1Nj7bJbP0HZxNGSNJOumk47Rt23ZNvv1GDf3oaZKk004bqb/97R9qa2vTz376A0nSD6762e5se5717+L2duG2om9/Z3a1bx9ndrVvH2d2tW8fZ3a1bx9ndrVvH2d2tW8fZ3a1bxdm3rmj2URqxjNvP3Jj5x7ockzPT19Wtusksadvb9/+liSpqqpSlVVVinNwdMTwoVq/fqM2bNik1tZWzZjxgM46c2yXzdJ3tOyCBYv02mtb33PdnDnz1NbWJklatOgZ1dQMKLu+yyFL3+5k6dudLH27k6Vvd7L07U6Wvt3J0rc7Wfp2J5t0bcAViR2UTKVSqlsyWy3NyzV37jwtXrI0cra6pr+eb9q8+/Om5hZVV/fvstkka7vady5f/eqX9Ne//q3Ta7uYTbK2j337OHOStZnZj759nDnJ2szsR98+zpxkbWb2o28fZ06yto8zAy4JPChpjOljjPmFMWa1MeaVzKUxc93+AbmJxpg6Y0xde/v2nN/T3t6uYcPH6NDDhmn4sKE6+ugjIzdtzN5nnkY909LFbJK1Xe17T9/77iXaubNN90z9v06v7WI2ydo+9u3jzEnWZuZ42SRrM3O8bJK1mTleNsnazBwvm2RtZo6XTbI2M8fLJlnbx5kBl4SdKTlD0muSRllrD7TWHijpE5nr7s0XstZOstYOs9YOS6V6BxZ4/fU39Pd5T2rsmFGRm25uatHA2urdn9fWDFBLy5Yum02ytqt9Zzv3nC/o9NNP01fOuzhyxsXt7ept5WPfPs6cZG1m9qNvH2dOsjYz+9G3jzMnWZuZ/ejbx5mTrO3jzIBLwg5KDrLW/tJa+8KuK6y1L1hrfynpfYUW7dev7+53Qe7Ro4dGn3qy1qxZHzm/pK5egwcfpkGDBqqqqkoTJozXQw/P7rJZ+i6stiSNGTNKl19+oT73+a/p7bffKfu+fbytfOzbx5ld7dvHmV3t28eZXe3bx5ld7dvHmV3t28eZXe3bx5ld7dvVmSHJtnPJvpSxypCv/9MYc6WkO6y1WyTJGHOIpK9Ker7QogMGHKLbb7tRFRUppVIp3XffQ3pk5pzI+ba2Nl162dWa+cg9qkilNOWO6Vq1am2XzdJ3tOxdd/5WI0eeoH79+uq59Ut03U9u0JVXXqzu3bpp1sypkqRFi5/RxRd/v6z6LocsfbuTpW93svTtTpa+3cnStztZ+nYnS9/uZOnbnWzStQFXmKDXJTDGHCDpe5LG6/+zd+fhUZZn+8fPe5KwK5YihCQUbKltf31tQcGtiLgUcKV9tbRaUFv70hZ3q9RWraKt2gp1aW0VqoBYEdQqZZHiRiFVQqJElgQXlsKEgBtaElGSzP37g5BGQmbJZOaZe+7v5zhySGa4cp1nnmHxYWYeqVfjzTsk/V3SHdbanbEW5HYo5I0PkFIh0/ar20d4Xw4AAAAAgIPq91S1/X+Gs9juBb/nf/Sb6Xzm1Rn7OIn6TMnGk44/b/z4FGPMDyRNT1EuAAAAAAAAAFkq1ntKRjOp3VIAAAAAAAAA8EbUZ0oaY1a3dpek3u0fBwAAAAAAAGijSGZf3AX/FetCN70ljZS0/3tHGkkvpSQRAAAAAAAAgKwW66TkAkndrLXl+99hjFmakkRAgpK5WE2XvI5tnv2o7pM2zwKZjgtIAQAAAABSKdaFbi6Oct/57R8HAAAAAAAAQLZL5kI3AAAAAAAAAJCwWC/fBgAAAAAAANxgudCNK3imJAAAAAAAAIC0Cuyk5MgRw7Vu7TKtryjWxGsvSeu8i7NB7vYhd8eOHfTiP5/Sv1YsVEnpYv3y+islSYuXzFHxywtU/PICvf7Wy3r0sfszKnd7zga528fcLnWe+sBkhbeWa9WrzzXd9pnPHKJFix7VunXLtWjRozrkkO4ZlzsTZoPc7WNuHzsHuZvOfuT2sXOQu+nsR24fOyczP23qFG0Lv6byVc8nvDOZvcnOBr0bcIGxKb5Kam6HwhYLQqGQKtct16jTz1M4XK0VLy/S2HETVFn5ZlxfM5l5F2fJnbrZ5lff7tq1i2prP1Jubq6WPDdXP7/2FpWW/vfC87P++ictWvisZj/6lKTWr76d6Z0zbbePuV3o3Pzq20OHHqOamlpNf+huDTryVEnS7bddr/ff/0B3Tr5P115ziT7zme765fW3SWr96tsufr9dOFbk9rezq7l97Oxqbh87u5rbx86u5vaxc7LzJ+z7u+j0ezRw0Clx7WuPvS4cq/o9VaaVL+G13fN+l9oTXY7pPHpixj5OAnmm5NFDBmnDhs3atGmL6urqNHfuPJ191si0zLs4S+70zNbWfiRJysvLVW5erpqfsO/WrauGnXicFsx/NuNyt8csud2ZDWJ3cXGJdu784FO3nXXWCM165HFJ0qxHHtfZZ8fe7+L327Vj5XNuHzu7mtvHzq7m9rGzq7l97Oxqbh87Jzu/vLhE7+/3d9F07HX1WAEuCeSkZEFhvraGtzV9Hq6qVkFBflrmXZwNcrdPuUOhkIpfXqANm0v14gv/UlnZa033nXX2CP1z6Uvatasm43K3x2yQu33M7Wrn5nr16qnt29+WJG3f/rYOPfSzKd3t4myQu33M7WPnIHfT2Y/cPnYOcjed/cjtY+f2mG8rVzsH9f0C0i2Qq28b0/KZo4m8jDyZeRdng9ztU+5IJKKhx52p7t0P0l9n36+v/L/DVVnxhiTp3O+cpZkz5qZsd9CzQe72MbernZPl4vfb1WPlY24fOwe5m86JzQa5m86JzQa5m86JzQa5m86JzbbHfFu52jnIv7NnhQhX33ZFm58paYx5Jsp9440xZcaYskiktsX9VeFq9S0qaPq8qLCPqqt3xL07mXkXZ4Pc7WPuDz/cpeLlJTr1m8MkST16HKKjjvq6/rH4hYzO7eOxCnK3j52be/vtd5Wf30uSlJ/fS++8815Kd7s4G+RuH3P72DnI3XT2I7ePnYPcTWc/cvvYuT3m28rVzkF9v4B0i3pS0hhzZCsfR0ka2NqctXaqtXawtXZwKNS1xf2lZeUaMOAw9e/fV3l5eRozZrTmL1gSd+hk5l2cJXfqZz/bs4e6dz9IktSpU0cNP+kbevP1jZKkb337dC1e/II++WRPxuVur1lyuzMb9O595i94VuPGfkeSNG7sdzR/fuyv4eL329Vj5WNuHzu7mtvHzq7m9rGzq7l97Oxqbh87t8d8W7naOajvF5BusV6+XSrpn5IOdKWeQ9q6tKGhQVdceYMWLXxUOaGQZsyco4rGl8mmet7FWXKnfjY/v5fun3qncnJyFAoZPfXkIi1ufGbkOeeeqbt+f39ce9Odu71mye3ObBC7Zz38Rw0bdpx69uyhjRtKdcutU3TnnX/Uo4/er4t+8D1t3Vql8877ScblDnqW3O7MktudWXK7M0tud2bJ7c6sr7kfmXWfTmz8u+jmjWWadMtkTZ/xWMr3unqsAJeYaO9LYIxZK+nb1toW16w3xmy11vaNtSC3QyFvfICM1SWvY5tnP6r7pB2TAJklZA70b1HxifB+NwAAAEDK1e+pavtf2rPY7qfu4H9Imun87esy9nES65mSN6v1l3hf1r5RAAAAAAAAgCRYLnTjiqgnJa21T0S5+zPtnAUAAAAAAACAB9p89W1Jk9otBQAAAAAAAABvRH2mpDFmdWt3Serd/nEAAAAAAAAAZLtY7ynZW9JISTv3u91IeikliQAAAAAAAABktVgnJRdI6matLd//DmPM0pQkAtIomStoc3ViZDMeowAAAACcFOFCN66IdaGbi6Pcd377xwEAAAAAAACQ7ZK50A0AAAAAAAAAJIyTkgAAAAAAAADSipOSAAAAAAAAANIqsJOSI0cM17q1y7S+olgTr70krfMuzga5m9yx56c+MFnhreVa9epzTbed879nqHzV8/p49xYdeeTX0pKbY5XY/LSpU7Qt/JrKVz2f8M5k9iY7G+RuV79nPh4rH3P72DnI3fxe4sex8rFzkLvp7EduHzsHudvHzoArjE3xFVZzOxS2WBAKhVS5brlGnX6ewuFqrXh5kcaOm6DKyjfj+prJzLs4S+7MzN386ttDhx6jmppaTX/obg068lRJ0pe/PECRSET3/fG3+vl1t+rVV1c3/fzWrmyc6Z0zbTbZ+RP2Hbfp92jgoFPi2tcee109VpKb3zMfj5WPuX3s7HJu334vcTW3j51dze1jZ1dz+9jZ1dwudK7fU2Va+RJe2z33ltSe6HJM5zG/ytjHSSDPlDx6yCBt2LBZmzZtUV1dnebOnaezzxqZlnkXZ8md+bmLi0u0c+cHn7pt/fq39MYbG+PemWxujlXi88uLS/T+fsctHXtdPVaSm98zH4+Vj7l97Oxybt9+L3E1t4+dXc3tY2dXc/vY2dXcrnYGXBLIScmCwnxtDW9r+jxcVa2Cgvy0zLs4G+Rucrdtvq1c7exq7mS42jmo71eyu12cDXK3j7l97Bzkbn4v8eNY+dg5yN109iO3j52D3O1jZ8AlgZyUNKblM0cTeRl5MvMuzga5m9xtm28rVzu7mjsZrnYO6vuV7G4XZ4Pc7WNuHzsHuZvfSxKbDXI3nRObDXI3nRObDXI3nRObDXK3j50Bl0Q9KWmMOdgYc7sxZpYx5vz97vtTlLnxxpgyY0xZJFLb4v6qcLX6FhU0fV5U2EfV1TviDp3MvIuzQe4md9vm28rVzq7mToarnYP6fiW728XZIHf7mNvHzkHu5vcSP46Vj52D3E1nP3L72DnI3T52BlwS65mS0yUZSU9K+p4x5kljTMfG+45tbchaO9VaO9haOzgU6tri/tKycg0YcJj69++rvLw8jRkzWvMXLIk7dDLzLs6S263cyXC1s6u5k+Fq56C+X8nudnGW3O7MkpvfS1I962puHzu7mtvHzq7m9rGzq7ld7QxJ1vLR/COD5ca4/wvW2nMaf/y0MeZ6SS8YY85OZmlDQ4OuuPIGLVr4qHJCIc2YOUcVFW+kZd7FWXJnfu5ZD/9Rw4Ydp549e2jjhlLdcusU7Xz/A91116069NAemvf0TL22ep3OPHNs1nTOhNlk5x+ZdZ9ObDxumzeWadItkzV9xmMp3+vqsZLc/J75eKx8zO1jZ5dz+/Z7iau5fezsam4fO7ua28fOruZ2tTPgEhPtfQmMMZWSvmqtjTS77UJJEyV1s9b2i7Ugt0NhZp+WBdooZFq+z0e8Ihn+rxUAAAAAgMxWv6eq7f9TmsV2z5nE/3A30/m7N2Xs4yTWy7fnSzq5+Q3W2pmSfiZpT6pCAQAAAAAAAMheUV++ba2d2Mrti40xt6UmEgAAAAAAAIBsFus9JaOZpL0XwgEAAAAAAACCF4nE/jnICFFPShpjVrd2l6Te7R8HAAAAAAAAQLaL9UzJ3pJGStq53+1G0kspSQQ4IpmL1SRzkZxkd8MdyTxKeIQAAAAAADJZrJOSC7T3Ktvl+99hjFmakkQAAAAAAAAAslqsC91cHOW+89s/DgAAAAAAAIBsl8yFbgAAAAAAAIDMwYVunBEKOgAAAAAAAAAAv3BSEgAAAAAAAEBaBXpSMhQKqXTlPzTvqZkJz44cMVzr1i7T+opiTbz2kqyfDXI3uVO7e+oDkxXeWq5Vrz7XdNvtt9+gNauX6pWyZ/X43L+oe/eDU5552tQp2hZ+TeWrnk9orj12u3KsMmVWkt58Y4VWvfqcykqXaMXLi9K2m2PlTmdXf037eKx8zO1j5yB309mP3D52DnI3ndOX29W/0wS9G3CBsdamdEFuh8JWF1x5xXgdddTXdPBBB2n0ty+M+2uGQiFVrluuUaefp3C4WiteXqSx4yaosvLNrJwld/blDhnT9OOhQ49RTU2tpj90twYdeaok6dRTh+nFF/+lhoYG3fabX0qSfnn9bU0zkQP8uk228wn7cky/RwMHnRLXTHvszvRjFeSsaWVe2ntS8tjjTtN77+084P2t/cbLsfKjs+Tmr2kfj5WPuX3s7GpuHzu7mtvHzq7m9rFzsvMu/p0mXbvr91RF+18Gb+3+642pPdHlmM7fvzVjHyeBPVOysLCPTj/tFD300OyEZ48eMkgbNmzWpk1bVFdXp7lz5+nss0Zm7Sy5szt3cXGJdu784FO3PffcMjU0NEiSSkpeVWFhn5RmlqTlxSV6f78c8fLlWGXCbLI4Vn50ltz8Ne3jsfIxt4+dXc3tY2dXc/vY2dXcPnZOdt7Fv9MEvRtwRWAnJX8/ZZKu+8WvFWnDVZEKCvO1Nbyt6fNwVbUKCvKzdjbI3eRO/+79XXTRd/WPf7yY9r2J8PFYBf0YsdbqmUWzVbLiGf3o4u/HPcex8qNzslz8frt6rHzM7WPnIHfT2Y/cPnYOcjed05s7Ga52DvLvgVnBRvho/pHBop6UNMbkG2P+bIy5zxjzWWPMzcaYNcaYucaYVp+6ZYwZb4wpM8aURSK1Le4/4/RT9fbb7+rVVWvaFNqYls88jfdl6C7OBrmb3Onf3dx1P79M9fUNenT239K6N1E+HqugHyMnDv+Wjj5mlM48a6x++tOLNHToMSnfzbFKbDbo3clw8fvt6rHyMbePnYPcTefEZoPcTefEZoPcTefEZttjvq1c7Rzk3wOBdIr1TMkZkiokbZX0oqTdks6QtFzS/a0NWWunWmsHW2sHh0JdW9x//PGDddaZI/TWGyv010f+pJNO+oZmzrg37tBV4Wr1LSpo+ryosI+qq3dk7WyQu8md/t37jBt7rk4//VRdcOGlad3bFj4eq6AfI/t+/jvvvKen5z2jIUMGpnw3x8qdzsly8fvt6rHyMbePnYPcTWc/cvvYOcjddE5v7mS42jnIvwcC6RTrpGRva+0frLV3SDrEWvtba+0Wa+0fJPVr69Lrb7hD/T8/WAMOP1bfHztBL774L1140eVxz5eWlWvAgMPUv39f5eXlacyY0Zq/YEnWzpLbn9z7jBgxXNdcM0H/e84PtHv3x2nb21Y+HqsgO3fp0lndunVt+vE3Tz1R69a9nvG5Xfx+u9o5WS5+v109Vj7m9rGzq7l97Oxqbh87u5rbx87tMd9WrnYO8u+BQDrlxri/+UnLh/e7L6eds8StoaFBV1x5gxYtfFQ5oZBmzJyjioo3snaW3Nmde9bDf9SwYcepZ88e2rihVLfcOkUTJ16qjh066JlFey8EVbLyVV166S9S2vmRWffpxMYcmzeWadItkzV9xmMp6dyeuV18jCXbuXfvQ/XE4w9KknJyc/TYY09ryZKlGZ/bxe+3q50lN39N+3isfMztY2dXc/vY2dXcPnZ2NbePnZOdd/HvNEHvBlxhor0vgTHmFkm/s9bW7Hf7AEl3WGvPjbUgt0Mhb3wA7CdkWr5HSCIivJ+IF5J5lPAIAQAAALJb/Z6q5P7HMkvtfvgX/O9QM50vuD1jHydRnylprf1VK7e/ZYxZmJpIAAAAAAAAALJZrPeUjGZSu6UAAAAAAAAA4I2oz5Q0xqxu7S5Jvds/DgAAAAAAAIBsF+tCN70ljZS0c7/bjaSXUpIIAAAAAAAAQFaLdVJygaRu1try/e8wxixNSSLAA1yoBvHgUQIAAAAACeL/t50R60I3F0e57/z2jwMAAAAAAAAg2yVzoRsAAAAAAAAASBgnJQEAAAAAAACkFScl1/yoWAAAIABJREFUAQAAAAAAAKRVYCclp02dom3h11S+6vk2zY8cMVzr1i7T+opiTbz2kqyfDXI3ud3J7WPnIHfT2Y/cPnYOcjed/cjtY+cgd9PZj9w+dg5yN539yO1qZ8AVxqb4qkS5HQoPuOCEoceopqZW06ffo4GDTknoa4ZCIVWuW65Rp5+ncLhaK15epLHjJqiy8s2snCU3uemcebvp7EduHzu7mtvHzq7m9rGzq7l97Oxqbh87u5rbx86u5nahc/2eKhNXGM/snj6Ry2830/kHv8vYx0lgz5RcXlyi93d+0KbZo4cM0oYNm7Vp0xbV1dVp7tx5OvuskVk7S25yp3qW3O7MktudWXK7M0tud2bJ7c4sud2ZJbc7s+R2Zzbo3YArnHxPyYLCfG0Nb2v6PFxVrYKC/KydDXI3udO7m85+5Paxc5C76exHbh87B7mbzn7k9rFzkLvp7EduHzsHudvHzoBLEj4paYzplYogCWZocVu8L0N3cTbI3eRO7246JzYb5G46JzYb5G46JzYb5G46JzYb5G46JzYb5G46JzYb5G46JzYb5G46JzYb5G4fOwMuyY12pzGmx/43SVppjBmkve9H+X4rc+MljZckk9NdoVDX9sjapCpcrb5FBU2fFxX2UXX1jqydDXI3udO7m85+5Paxc5C76exHbh87B7mbzn7k9rFzkLvp7EduHzsHudvHzoBLYj1T8l1JrzT7KJNUKOnVxh8fkLV2qrV2sLV2cHufkJSk0rJyDRhwmPr376u8vDyNGTNa8xcsydpZcpM71bPkdmeW3O7MktudWXK7M0tud2bJ7c4sud2ZJbc7s0Hv9l4kwkfzjwwW9ZmSkiZKOlXStdbaNZJkjNlkrT0s2cWPzLpPJw47Tj179tDmjWWadMtkTZ/xWFyzDQ0NuuLKG7Ro4aPKCYU0Y+YcVVS8kbWz5CZ3qmfJ7c4sud2ZJbc7s+R2Z5bc7syS251ZcrszS253ZoPeDbjCxHpfAmNMkaS7JG2VdJOk16y1n493QW6HQt74AAAAAAAAoB3V76lq+eaT0O4Hr+E8VDOdL56csY+TmBe6sdaGrbXfkfSipGcldUl5KgAAAAAAAABZK+6rb1tr50s6SXtfzi1jzA9SFQoAAAAAAABA9or1npKfYq3dLWlt46eTJE1v90QAAAAAAABAW9jMvrgL/ivqSUljzOrW7pLUu/3jAAAAAAAAAMh2sZ4p2VvSSEk797vdSHopJYkAAAAAAAAAZLVYJyUXSOpmrS3f/w5jzNJ4FuTlJPQK8U+pa6hv8yyAAxvc84ttni179812TAIAAAAAAHwV9YyhtfbiKPed3/5xAAAAAAAAAGS7uK++DQAAAAAAAADtoe2vrQYAAAAAAAAyiI3YoCMgTjxTEgAAAAAAAEBape2kZFFRHy1e/JhWrXper7zyrC655AeSpF/96mdauXKxVqxYpPnzZ6lPn15xfb2RI4Zr3dplWl9RrInXXpJQFhdng9xN7szOPW3qFG0Lv6byVc8fcPbEYcfpvXcqVVa6RGWlS/TDqy5IKM+BdOjQQY/+9c9aX1Gsl4rnq1+/Io0cMVybN5aq5j8bFd6ySiUrntFJw78R19fz5Vi112yQu33M7WPnIHfT2Y/crnZu/mduW7j4/Xb1WPmY28fOkhQKhVS68h+a99TMhGdd7UxuN2aD3g24wFib2qe1du7cz0pSfn4v5ef3Unn5WnXr1lUvvbRAY8aMV1VVtXbtqpEkTZhwkb785S/q8suvl9T61bdDoZAq1y3XqNPPUzhcrRUvL9LYcRNUWRn7ysAuzpKb3NFmTxh6jGpqajVj+r3q1Klji9leh/bU1Vf9RKO/faGkxK6+3acoXzfefZ0mnHulpP9effsnP75QRxzxFV1y6XUaM+ZsfXv0aTryyK/p6p/9SmvWrtdTf5uhmyfdqT/ee5v6HTa43Tu317yLs+R2Z5bc7syS253ZoHfv+zN3+vR7NHDQKXHNBJ3bx2PlY24fO+9z5RXjddRRX9PBBx3U9PfdeLjamdxuzKZrd/2eKhNXGM98NPUqXr/dTJfxd2Xs4yRtz5Tcvv1tlZevlSTV1NRq/fq3VFDQu+mEpCR16dJF8ZwkPXrIIG3YsFmbNm1RXV2d5s6dp7PPGhlXDhdnyU3uaJYXl+j9nR+oc+dOCc+O+t9v6sGFf9bDz/5FP//t1QqF4vst4eyzRmjWrMclSU8+uVDf/OZwbdiwWQsXPa8tW6o0d+48/b+vHK5OnTqpQ4cO7d65veZdnCW3O7PkdmeW3O7MBr1735+5beHi99vVY+Vjbh87S1JhYR+dftopeuih2XHPBJ3b12PlYm5XOwMuCeQ9JT/3uSINHPhVlZaWS5Juvvlavfnmy/re976lW2/9fcz5gsJ8bQ1va/o8XFWtgoL8uHa7OBvkbnKnd3cys7l5ua3OHnvsUXql7Fkt+PssHXZ4f0lS/wGf06mjT9L40Zfqgm/+SJGGiEb+76kJ52xoaNDHH3+sd95571O7TzjhWJWXr9WePXtS1jnZeRdng9ztY24fOwe5m85+5Ha1c7Jc/H67eqx8zO1jZ0n6/ZRJuu4Xv1YkEol7pj12c6z8yO1qZ0iKRPho/pHBUnL1bWPMeEnjJSk3t4dyc7s13de1axfNnn2/rr32lqZnSd588526+eY7dc01E/STn1yoX//6rlhfv8Vt8b4M3cXZIHeTO727k5o9wG3WWr26ao0+P+Bo1dZ+pNNGnaw/PXS7vjN0rAafcJS+dMThmv7MA5Kkjp06aOd7e5/9cceDt6rgc32Ul5er3oW99fCzf5Ek/fau+zTz4bkHztnsx4UF+Tru2KM05JhRsXP7eKw87BzkbjonNhvkbjonNhvkbh87J8vF77erx8rH3D52PuP0U/X22+/q1VVrdOKw4+Kaaa/dHKvEZoPc7WNnwCVRT0oaY0ZZaxc3/ri7pN9LGiJpraSrrLU7DjRnrZ0qaar03/eUlKTc3FzNnn2/5sx5WvPmLW4xN3fuPP3tb9NjnpSsClerb1FB0+dFhX1UXX3AKFkxG+Rucqd3dzKzdXX1B5xt/hYJzyx+Qbl5uereo7uMkRY9/g/9+fZpLb7WdRffKKn195Tcl7Oqqlo5OTnq1KmTeh36WUl7X0Zz1VU/0WNzntbGjf9Oaedk512cDXK3j7l97Bzkbjr7kdvVzsly8fvt6rHyMbePnY8/frDOOnOETht1sjp16qiDDz5IM2fcqwsvujyjc/t4rILc7WNnwCWxXr59W7MfT5FULeksSaWSHkh02f33/06vv/6W7r33L023feEL/Zt+fMYZ39Qbb2yI+XVKy8o1YMBh6t+/r/Ly8jRmzGjNX7AkrgwuzpKb3PHYvfvjA8727n1o088ZMnigTMjow/c/VOnyV3XyGSfqM589RJJ08CEHKb+wd1y75i9YonHjviNJOuecM/Tc88s0YMBhOuKIr2j+3x9WbW2t7vvT9JR3TnbexVlyuzNLbndmye3ObNC7k+Hi99vVY+Vjbh87X3/DHer/+cEacPix+v7YCXrxxX/FfUIyyNw+HitXc7vaGXBJIi/fHmytHdj447uMMfFf2kx7/yXr+98/R2vWVGrFikWSpJtuulMXXfRdffGLn1ckEtGWLVW6/PJfxvxaDQ0NuuLKG7Ro4aPKCYU0Y+YcVVS8EVcOF2fJTe5oHpl1n04cdpx69uyhDz/cpeLl85UTCunll8tUUfGGJvz0Iv34xxeovr5BH+/+WDf+9BZJ0uY3/60Hfveg7nlsskLGqL6+Xnf+8h5tr4r9L3APTX9MM2fcq/UVxdq58wOdP3aCvvylAXr2H3PVo8ch2rHjHT0884+SpNNOP+9T7zfZnt+vZOddnCW3O7PkdmeW3O7MBr27+Z+5mzeWadItkzV9xmMZndvHY+Vjbh87J8vVzuR2Yzbo3YArTLT3JTDGhLX3JdtG0iWSvmAbB4wxq621X4u1oPnLtxNV11Df1lEArRjc84ttnt338m0AAAAAQLDq91Qd6NIC3vvoz5fxBpzNdPnpHzL2cRLr5dvTJB0kqZukmZJ6SpIxJl9SeWqjAQAAAAAAAMhGUV++ba2d1Mrt240xL6YmEgAAAAAAAIBsFuuZktEc8IQlAAAAAAAAAEQT9ZmSxpjVrd0lKb7L9AIAAAAAAABAM7Guvt1b0khJO/e73Uh6KZ4FXKwGyCxcrAYAAAAAAAQt1knJBZK6WWtbXNTGGLM0JYkAAAAAAACAtohw8W1XxLrQzcVR7ju//eMAAAAAAAAAyHbJXOgGAAAAAAAAABLGSUkAAAAAAAAAaRXYScmRI4Zr3dplWl9RrInXXpLWeRdng9xNbndy+9g5yN109iO3q52nTZ2ibeHXVL7q+YTm2mO3i7NB7vYxt4+dg9xNZz9y+9g5yN109iO3q50BVxhrU/sGoLkdClssCIVCqly3XKNOP0/hcLVWvLxIY8dNUGVlfFcFTmbexVlyk5vOmbebzn7kdrWzJJ0w9BjV1NRq+vR7NHDQKXHNBJ3bx2PlY24fO7ua28fOrub2sbOruX3s7GpuFzrX76kycYXxzEd/mMCVbprpctmfMvZxEsgzJY8eMkgbNmzWpk1bVFdXp7lz5+nss0amZd7FWXKTO9Wz5HZnltzuzAa9e3lxid7f+UHcPz8Tcvt4rHzM7WNnV3P72NnV3D52djW3j51dze1qZ8AlgZyULCjM19bwtqbPw1XVKijIT8u8i7NB7iZ3enfT2Y/cPnYOcrePnZPl4vfb1WPlY24fOwe5m85+5Paxc5C76exHblc7Ay5J+KSkMeazyS41puUzRxN5GXky8y7OBrmb3OndTefEZoPcTefEZoPc7WPnZLn4/Xb1WPmY28fOQe6mc2KzQe6mc2KzQe6mc2KzQe72sTPgkqgnJY0xdxhjejb+eLAxZqOkEmPMv40xJ0aZG2+MKTPGlEUitS3urwpXq29RQdPnRYV9VF29I+7Qycy7OBvkbnKndzed/cjtY+cgd/vYOVkufr9dPVY+5vaxc5C76exHbh87B7mbzn7kdrUz4JJYz5Q8w1r7buOP75T0XWvtAEnflDSltSFr7VRr7WBr7eBQqGuL+0vLyjVgwGHq37+v8vLyNGbMaM1fsCTu0MnMuzhLbnKnepbc7syS253ZoHcnw8Xvt6vHysfcPnZ2NbePnV3N7WNnV3P72NnV3K52hqRIhI/mHxksN8b9ecaYXGttvaTO1tpSSbLWvmGM6djWpQ0NDbriyhu0aOGjygmFNGPmHFVUvJGWeRdnyU3uVM+S251ZcrszG/TuR2bdpxOHHaeePXto88YyTbplsqbPeCyjc/t4rHzM7WNnV3P72NnV3D52djW3j51dze1qZ8AlJtr7EhhjLpN0lqQ7JA2TdIikv0k6RdLnrbXjYi3I7VDIGx8AAAAAAAC0o/o9VS3ffBL66J6fcB6qmS5X3J+xj5Ooz5S01v7BGLNG0k8lHd748w+X9LSkW1MfDwAAAAAAAEC2ifXybVlrl0pauv/txpgfSJre/pEAAAAAAAAAZLNYF7qJZlK7pQAAAAAAAADgjajPlDTGrG7tLkm92z8OAAAAAAAA0EZRrp2CzBLr5du9JY2UtHO/242kl1KSCAAAAAAAAEBWi3VScoGkbtba8v3vMMYsTUkiAFmrU26HNs9+XL+nHZMAANIhmUs98hwHAACA7Bbr6tsXR7nv/PaPAwAAAAAAACDbJXOhGwAAAAAAAABIWKyXbwMAAAAAAABuiESCToA48UxJAAAAAAAAAGkVyEnJoqICPbfkca1ZvVSvlb+gyy5t9a0rWzVyxHCtW7tM6yuKNfHaS7J+Nsjd5E7v7mlTp2hb+DWVr3o+oblk9yY7n8hsx44dtHTZ03p5xSKVlv1D199wpSSpX78ivfjPp1S++gXNfPgPysvLy6jcmTIb5G4fc7vYuWPHjnr5Xwv0Stmzeq38Bd30q58lGtvJ77eLxyrZ2SB3JzPbvfvBeuyxqVqz5p9avXqpjj3mqLhnk/lzUuJY0Tmzd9PZj9w+dg5yt4+dAVcYa1N7bcPcDoUtFuTn91Kf/F5aVb5W3bp11cqSxTrn3B+qsvLNuL5mKBRS5brlGnX6eQqHq7Xi5UUaO25CXPMuzpLbn9ySdMLQY1RTU6vp0+/RwEGnxDXTHnvT0bn51be7du2i2tqPlJubq2eff1wTr5mkyy7/kf4+b7GeeGKB7rn311qzplJ/mfZXSa1ffdvFx5gLx4rcbneWPv1rbNnSp3TV1TepZOWrGZ3bx2OV7blbu/r2Qw/ereLiEj00fbby8vLUpUtnffjhfz71c1r7G2pb/5xMJHd7zwa5m85+5Paxs6u5fezsam4XOtfvqWrtj1qvffT7/0vtiS7HdLl6WsY+TgJ5puT27W9rVflaSVJNTa3Wr39ThQX5cc8fPWSQNmzYrE2btqiurk5z587T2WeNzNpZcvuTW5KWF5fo/Z0fxP3z22tvujvX1n4kScrLy1VeXq6spBNPPE5PPfWMJOmvjzypM88ckXG5g54ltzuzQe9u/mssNy9PifwjpIvfb1ePlY+5Dzqom4YOPUYPTZ8tSaqrq2txQjKatv45mWxuH4+Vj51dze1jZ1dz+9jZ1dyudgZcEvh7SvbrV6SBX/8flaxcFfdMQWG+toa3NX0erqpWQZwnNV2cDXI3udO/u61c6xwKhfTSioXa9O8yvfB8sTZt/Lc++PA/amhokCRVVW1XQUHvjMsd9GyQu33M7Wpnae+vsbLSJaquWq3nn1+mlaX8OZuJu33M/fnP99O7776nB/9yl0pX/kMP3H+nunTpHNdssjhWdM7k3XT2I7ePnYPc7WNnSIpYPpp/ZLBAT0p27dpFc+dM09XX3KRdu2rinjOm5TNP430GiIuzQe4md/p3t5VrnSORiI4/9gx96YvHafDgr+tLXxrQpv0uPsZcO1btMRvkbh87S3t/jQ0eMkL9DhusIYMH6atf/VLcsy5+v109Vj7mzs3J0aBBR+iBBx7WkKNHqrb2I02ceGlcs8niWKVvNsjdPub2sXOQu+mc2GyQu33sDLgk6klJY8yrxpgbjDFfSOSLGmPGG2PKjDFlkUjtAX9Obm6uHp8zTbNnP6Wnn34mkS+vqnC1+hYVNH1eVNhH1dU7snY2yN3kTv/utnK184cf7tLy5Ss05OhBOqT7wcrJyZEkFRbmq7r67YzN7ePj08fcrnZu7sMP/6N/LntJI0cMj3vGxe+3q8fKx9zhqmqFw9VNz9598m8LNWjgEXHNJotjRedM3k1nP3L72DnI3T52BlwS65mSn5F0iKQXjTErjTFXGWMKYszIWjvVWjvYWjs4FOp6wJ8zbeoUVa5/S3ffMzXh0KVl5Row4DD1799XeXl5GjNmtOYvWJK1s+T2J3cyXOrcs2cPde9+kCSpU6eOOumkoXr99be0bNkKffvbp0mSvj/2HC1c+GxG5c6EWXK7Mxvk7r2/xg6WJHXq1EmnnHyCXn99Q8bn9vFY+Zh7x453FA5v0+GH7/0375NPHqrKyjfimk0Wx4rOmbybzn7k9rGzq7ld7Qy4JDfG/TuttddIusYYc4Kk8yS9aoyplDTbWpv4GUVJ3zh+iMaNPVer11SorHTvL6wbb7xDzyx+Ia75hoYGXXHlDVq08FHlhEKaMXOOKiri+8usi7Pk9ie3JD0y6z6dOOw49ezZQ5s3lmnSLZM1fcZjKd+bzs6983tp6rTJygnlKBQy+tvfFmrxMy9ofeWbmvHwH3TjTT/T6tcqNHPG3IzKnQmz5HZnNsjdffr01kMP3q2cnJBCoZCeeGK+Fi56LuNz+3isfM195VU36uGZf1CHDnnauGmLfvSjq+Oebeufk8nm9vFY+djZ1dw+dnY1t4+dXc3tamfAJSba+xIYY1611h653205kr4p6bvW2h/EWpDboZA3PgAgSeqU26HNsx/X72nHJACAdGj5jljx4y+QAABEV7+nKpk/arPWR3f+kL9GNNPl2ocy9nES65mSLU7FW2sbJC1u/AAAAAAAAACAhER9T0lr7fdau88YE/NZkgAAAAAAAACwv1gXuolmUrulAAAAAAAAAOCNqC/fNsasbu0uSb3bPw4AAAAAAACAdDHGXCXpR9r7tt5rJP1AUh9Jj0nqIelVSeOstXuMMR0lPSzpKEnvae81Zza3ZW+s95TsLWmkpJ3755X0UlsWAvAXF6sBAL/wLvMAAACZzRhTKOlySf/PWrvbGDNX0vcknS7pLmvtY8aY+yVdLOnPjf/daa0dYIz5nqTfSvpuW3bHOim5QFI3a235AUIvbctCAAAAAAAAICUi/LNoG+RK6myMqZPURVK1pJMlnd94/0xJN2vvScnRjT+WpCck/dEYY6y1CX/jY13o5mJrbXEr951/oNsBAAAAAAAAZD5rbZWkyZK2aO/JyA8lvSLpA2ttfeNPC0sqbPxxoaStjbP1jT//s23ZncyFbgAAAAAAAABkKGPMeGNMWbOP8fvd/xntffbjYZIKJHWVdNoBvtS+Z0KaKPclJNbLtwEAAAAAAAA4yFo7VdLUKD/lVEmbrLXvSJIx5m+Sjpd0iDEmt/HZkEWStjX+/LCkvpLCxphcSd0lvd+WbDxTEgAAAAAAAPDTFknHGmO6GGOMpFMkVUh6UdK5jT/nQknzGn/898bP1Xj/C215P0kpwJOSI0cM17q1y7S+olgTr70krfMuzga5m9zu5Paxc5C76exHbh87B7mbzn7knjZ1iraFX1P5qucTmmuP3RwrOmfybjr7kdvHzkHu9rGz72wkwkezj5jfL2tLtPeCNa9KWqO95wqnSvq5pKuNMW9p73tGPtg48qCkzzbefrWk69p6rEwbT2bGLbdDYYsFoVBIleuWa9Tp5ykcrtaKlxdp7LgJqqx8M66vmcy8i7PkJjedM283nf3I7WNnV3P72Nnl3CcMPUY1NbWaPv0eDRx0SlwzQef28Vj52NnV3D52djW3j51dze1C5/o9VQd6bz/v1d5+IZffbqbrL2Zm7OMkkGdKHj1kkDZs2KxNm7aorq5Oc+fO09lnjUzLvIuz5CZ3qmfJ7c4sud2ZJbc7s+ROf+7lxSV6f+cHcf/8TMjt47HysbOruX3s7GpuHzu7mtvVzoBLAjkpWVCYr63hbU2fh6uqVVCQn5Z5F2eD3E3u9O6msx+5fewc5G46+5Hbx85B7k42dzJc7exibh87B7mbzn7k9rFzkLt97Ay4JOpJSWPMYGPMi8aYR4wxfY0xzxpjPjTGlBpjBkWZa7rceCRSe6D7W9yWyMvIk5l3cTbI3eRO7246JzYb5G46JzYb5G46JzYb5G46JzYb5O5kcyfD1c4u5vaxc5C76ZzYbJC76ZzYbJC7fewMuCQ3xv1/knSTpEMkvSTpKmvtN40xpzTed9yBhppfbvxA7ylZFa5W36KCps+LCvuounpH3KGTmXdxNsjd5E7vbjr7kdvHzkHuprMfuX3sHOTuZHMnw9XOLub2sXOQu+nsR24fOwe528fOkBThBK4rYr18O89a+4y1drYka619Qnt/8LykTm1dWlpWrgEDDlP//n2Vl5enMWNGa/6CJWmZd3GW3ORO9Sy53Zkltzuz5HZnltzpz50MVzu7mNvHzq7m9rGzq7l97Oxqblc7Ay6J9UzJj40xIyR1l2SNMd+y1j5tjDlRUkNblzY0NOiKK2/QooWPKicU0oyZc1RR8UZa5l2cJTe5Uz1Lbndmye3OLLndmSV3+nM/Mus+nTjsOPXs2UObN5Zp0i2TNX3GYxmd28dj5WNnV3P72NnV3D52djW3q50Bl5ho70tgjPm6pN9Jiki6StJPJV0oqUrS/1lrX4q14EAv3wYAAAAAAEDb1e+pavnmk1Dtby7gPFQzXa9/OGMfJ1Ffvm2tfc1aO9Jae5q1dr219gpr7SHW2q9K+lKaMgIAAAAAAADIIrHeUzKaSe2WAgAAAAAAAIA3or6npDFmdWt3Serd/nEAAAAAAACANrKRoBMgTrEudNNb0khJO/e73UiK+X6SAAAAAAAAALC/WCclF0jqZq0t3/8OY8zSuBaEctoQa6/6SJsv8A0ATXZvW97m2c4FJ7RjEgAAAAAAIMU4KWmtvTjKfee3fxwAAAAAAAAA2S6ZC90AAAAAAAAAQMJivXwbAAAAAAAAcEPEBp0AceKZkgAAAAAAAADSKq0nJR944E5t2fKqXnnl2abbZs26TyUlz6ik5Bm9/vq/VFLyTFxfa+SI4Vq3dpnWVxRr4rWXJJTDxdkgd5PbndzJzBYVFei5JY9rzeqleq38BV12aatvKdvuuxOdPbRnR/X/XBf1Lex8wPm8PKPCPp31+f5d1f3gvISyRNP70I76XFEXFfbprNxco5Ejhqti7TK99cZL+s2tV6qooLO6donv4l4+Pj6D3E1nP3L72DnI3XT2I7ePnYPcTWc/cvvYOcjdPnYGXGGsTe3TWjt1+lzTgqFDj1ZNzUd68MG7dNRR32zxc++44wb95z+7dNtt90hq/erboVBIleuWa9Tp5ykcrtaKlxdp7LgJqqx8M2YeF2fJTe50dM7P76U++b20qnytunXrqpUli3XOuT/MyNydOoUUiew9SVhV/UmL+e+P/bE2bdysrl1z1dBgtX39C3F9DySpqnqHrv/NFM344+8k/ffq2wcflKsOHXL07nufqFvXXB3ULU9Llz6n0844T1u37t17wYWXqHZXWJu3fJSy71ey8/y6onO25vaxs6u5fezsam4fO7ua28fOrub2sbOruV3oXL+nysQVxjO1t3yf12830/VXf83Yx0lanylZXLxSO3d+0Or95557pubMmRfz6xw9ZJA2bNisTZu2qK6uTnPnztPZZ42MK4OLs+Qmd6pnJWn79re1qnytJKmmplbr17+pwoL8jMz98ccRRRrfJ+RA86PPPl2f7IkO+TCkAAAgAElEQVToQP/mMv8fL+h7P7pC51x4iSb97l41NBz4Hz/217VLrnbV1EmSamrrdeyxR2rDhs3auLF57hGK508/Hx+frub2sbOruX3s7GpuHzu7mtvHzq7m9rGzq7l97Oxqblc7Ay7JmPeUHDr0aO3Y8a42bNgc8+cWFOZra3hb0+fhqmoVxHnyxMXZIHeTO727g+zcXL9+RRr49f9RycpVKd+dzmO1YfMWLX7+n5p1/xQ9OfM+hUIhLVjyYlx7cnON6uv/e8qxV6/eClft3duxY0i7P3pPh3+xr95995N2zdze864cq2yYDXK3j7l97Bzkbjr7kdvHzkHuprMfuX3sHORuHztDUiTCR/OPDJYxV98eM2a05s6N/SxJSTKm5TNP430ZuouzQe4md3p3B9l5n65du2junGm6+pqbtGtXTcp3p/NYlZSVq2L9W/rexVdIkj755BP1+MwhkqTLf3GLqrbtUF19nap3vKNzLtz7vi0HdcvVrpr6qBk++SSi93buUU1NnQ45pIM+2r37gM/SbEvm9p535Vhlw2yQu33M7WPnIHfTObHZIHfTObHZIHfTObHZIHfTObHZIHf72BlwSdSTksaYbpImSjpHUpGkPZI2SLrfWjsjytx4SeMlKTf3M8rJ6RY1RE5OjkaPHqXjjz8jrtBV4Wr1LSpo+ryosI+qq3dk7WyQu8md3t1Bdpak3NxcPT5nmmbPfkpPPx3fRaeS3Z3OY2Wt1dmnnaqrfvqDFvfde/uv9n69Vt5Tsr7eKjfXqKFh718G3n57h4oKP703XLVdNmLVIS+kT/a0/i9SPj4+g9xNZz9y+9g5yN109iO3j52D3E1nP3L72DnI3T52BlwS6+Xbf5W0UdJISZMk3StpnKSTjDG3tTZkrZ1qrR1srR0c64SkJJ188lC98cYGVVVtjyt0aVm5Bgw4TP3791VeXp7GjBmt+QuWZO0sucmd6tl9pk2dosr1b+nue6YmNOfKsTp28EA9u7RY7zW+t+2H/9mlbdvj+8O99qMGHdRt75W8u3XNVUnJKg0YcJgGfOFzTXsXPfOs8vJCqquP/hR5Hx+frub2sbOruX3s7GpuHzu7mtvHzq7m9rGzq7l97Oxqblc7Ay6J9fLt/s2eEfl7Y0yptfZWY8wPJFVI+mUiyx5++A864YTj1LPnZ/TWWyX69a9/rxkz5mjMmLM1Z87f4/46DQ0NuuLKG7Ro4aPKCYU0Y+YcVVS8kbWz5CZ3qmcl6RvHD9G4sedq9ZoKlZXu/QPvxhvv0DOLY1+5Ot25ex3aUZ075Sgnx6iooKMuuujHuuyS78sYoyeemK/XX39T/fp2UShkZK10yrfGat5fH9AXDuuny/7vAo2/8npFbER5ubm6/uoJKsjvHTPnrpo69Tq0kz5X1EUNEasdb+/WFVfeoAXz/6q8vFzNnfu4dr63Re9/sCfm23b4+Ph0NbePnV3N7WNnV3P72NnV3D52djW3j51dze1jZ1dzu9oZcImJ9r4ExpiXJE201hYbY86SdKm1dmTjfa9ba78Ua0GnTp9r8xsf1EfiuyouAESze9vyNs/ue/k2AAAAAGSS+j1VLd98Eqq9+TzegLOZrjfPztjHSaxnSv5E0l+MMYdLWivph5JkjDlU0n0pzgYAAAAAAADEL8I5SVdEPSlprV0t6egD3P6OMWZXylIBAAAAAAAAyFqxLnQTzaR2SwEAAAAAAADAG1GfKWmMWd3aXZJiXxkCAAAAAAAAAPYT6z0le0saKWnnfrcbSS+lJBEAAAAAAACArBbrpOQCSd2steX732GMWRrPggauoA0gYFxBGwAAAAA8YSNBJ0CcYl3o5uIo953f/nEAAAAAAAAAZLtkLnQDAAAAAAAAAAnjpCQAAAAAAACAtOKkJAAAAAAAAIC0CuykZPfuB+uxx6ZqzZp/avXqpTr2mKMSmh85YrjWrV2m9RXFmnjtJVk/G+RucruTO5nZaVOnaFv4NZWvej6hufbY7eKxKioq0HNLHtea1Uv1WvkLuuzSVt+Ct90zJzvv27EKcjbI3T7m9rFzkLvp7EduHzsHuZvOfuT2sXOQu33s7L2I5aP5RwYz1qY2YF6HwgMueOjBu1VcXKKHps9WXl6eunTprA8//M+nfk5ryUKhkCrXLdeo089TOFytFS8v0thxE1RZ+WbMPC7Okpvc6eh8wtBjVFNTq+nT79HAQafENZMJuYPanZ/fS33ye2lV+Vp169ZVK0sW65xzf5jVnX3M7WNnV3P72NnV3D52djW3j51dze1jZ1dz+9jZ1dwudK7fU2XiCuOZ2uu/k9ln4tKs628ez9jHSSDPlDzooG4aOvQYPTR9tiSprq6uxQnJaI4eMkgbNmzWpk1bVFdXp7lz5+nss0Zm7Sy5yZ3qWUlaXlyi93d+EPfPz5TcQe3evv1trSpfK0mqqanV+vVvqrAgP+V7k5338VjR2Y/cPnZ2NbePnV3N7WNnV3P72NnV3D52djW3q50Bl0Q9KWmM6W6MucMYs94Y817jR2XjbYe0dennP99P7777nh78y10qXfkPPXD/nerSpXPc8wWF+doa3tb0ebiqWgVxngxwcTbI3eRO7+4gOyfDx2PVXL9+RRr49f9RycpVadnr6mPMxdw+dg5yN539yO1j5yB309mP3D52DnI3nf3I7WpnwCWxnik5V9JOScOttZ+11n5W0kmNtz3e2pAxZrwxpswYUxaJ1La4PzcnR4MGHaEHHnhYQ44eqdrajzRx4qVxhzam5TNP430ZuouzQe4md3p3B9k5GT4eq326du2iuXOm6eprbtKuXTVp2evqY8zF3D52DnI3nRObDXI3nRObDXI3nRObDXI3nRObDXI3nRObDXK3j50Bl8Q6KdnfWvtba+32fTdYa7dba38r6XOtDVlrp1prB1trB4dCXVvcH66qVjhcrZWle59V9OTfFmrQwCPiDl0VrlbfooKmz4sK+6i6ekfWzga5m9zp3R1k52T4eKwkKTc3V4/PmabZs5/S008/k5bMyc77eKzo7EduHzsHuZvOfuT2sXOQu+nsR24fOwe528fOgEtinZT8tzFmojGm974bjDG9jTE/l7S1rUt37HhH4fA2HX74FyRJJ588VJWVb8Q9X1pWrgEDDlP//n2Vl5enMWNGa/6CJVk7S25yp3o2WT4eK2nvFcsr17+lu++ZGvdMe+x19THmYm4fO7ua28fOrub2sbOruX3s7GpuHzu7mtvHzq7mdrUzJBuJ8NHsI5Plxrj/u5Kuk/TPxhOTVtIOSX+XNCaZxVdedaMenvkHdeiQp42btuhHP7o67tmGhgZdceUNWrTwUeWEQpoxc44qKuI7qeniLLnJnepZSXpk1n06cdhx6tmzhzZvLNOkWyZr+ozHMj53ULu/cfwQjRt7rlavqVBZ6d6/INx44x16ZvELKd2b7LyPx4rOfuT2sbOruX3s7GpuHzu7mtvHzq7m9rGzq7ld7Qy4xMR6XwJjzJclFUlaYa2taXb7KGvt4lgL8joUtvmND3jHBAAAAAAAgJbq91S1fPNJqOYX53A6qZlutz+ZsY+TWFffvlzSPEmXSlprjBnd7O7bUhkMAAAAAAAAQHaK9fLt/5N0lLW2xhjTX9ITxpj+1tp7JGXsmVYAAAAAAAAAmSvWScmcfS/ZttZuNsYM194Tk/3ESUkAAAAAAABkkgiv3nZFrKtvbzfGDNz3SeMJyjMl9ZR0RCqDAQAAAAAAAMhOsU5KXiBpe/MbrLX11toLJA1LWSoAAAAAAAAAWSvqy7etteEo9/2r/eMAAAAAAAAAyHaxnikJAAAAAAAAAO0q1oVuAAAAAAAAADdwoRtn8ExJAAAAAAAAAGkVyEnJww//gspKlzR9vPfuel1+2Y8S+hojRwzXurXLtL6iWBOvvSTrZ4PcTW53cvvYOcjdbZ0tKirQc0se15rVS/Va+Qu67NKLE9qbzO4gZ4PcTWc/cvvYOcjddPYjt4+dg9xNZz9y+9g5yN0+dgZcYaxN7dNa8zoURl0QCoX0782v6BtDz9SWLVWfuq+1wVAopMp1yzXq9PMUDldrxcuLNHbcBFVWvhkzj4uz5CY3nTNvdzKz+fm91Ce/l1aVr1W3bl21smSxzjn3h1nd2dXcPnZ2NbePnV3N7WNnV3P72NnV3D52djW3j51dze1C5/o9VSauMJ6pufbbvH67mW53PpWxj5PAX7598slDtXHjv1uckIzm6CGDtGHDZm3atEV1dXWaO3eezj5rZNbOkpvcqZ4ld3pnt29/W6vK10qSampqtX79myosyI9rNsjcPh4rHzu7mtvHzq7m9rGzq7l97Oxqbh87u5rbx86u5na1M+CSwE9KfnfMaM2Z83RCMwWF+doa3tb0ebiqWgVx/g+9i7NB7iZ3enfT2Z/c+/TrV6SBX/8flaxcFfeMq51dzO1j5yB309mP3D52DnI3nf3I7WPnIHfT2Y/crnaGJBvho/lHBmvzSUljzDPJLs/Ly9OZZ47QE08uSHR3i9vifRm6i7NB7iZ3enfTObHZIHcnm1uSunbtorlzpunqa27Srl01cc+52tnF3D52DnI3nRObDXI3nRObDXI3nRObDXI3nRObDXI3nRObDXK3j50Bl+RGu9MYc2Rrd0kaGGVuvKTxkhTK6a5QqOsBf96oUSdp1ao1evvtd+NL26gqXK2+RQVNnxcV9lF19Y6snQ1yN7nTu5vO/uTOzc3V43Omafbsp/T004n9G4+rnV3M7WPnIHfT2Y/cPnYOcjed/cjtY+cgd9PZj9yudgZcEuuZkqWSJkuast/HZEmHtDZkrZ1qrR1srR3c2glJSfrud7+V8Eu3Jam0rFwDBhym/v37Ki8vT2PGjNb8BUuydpbc5E71LLnTn3va1CmqXP+W7r5natwzQef28Vj52NnV3D52djW3j51dze1jZ1dz+9jZ1dw+dnY1t6udAZdEfaakpEpJP7bWtrg8lDFmazKLO3fupFNPGaYJE36e8GxDQ4OuuPIGLVr4qHJCIc2YOUcVFW9k7Sy5yZ3qWXKnd/Ybxw/RuLHnavWaCpWV7v3LxY033qFnFr+Q0bl9PFY+dnY1t4+dXc3tY2dXc/vY2dXcPnZ2NbePnV3N7WpnwCUm2vsSGGPOlbTGWvv6Ae77lrU25tMc8zoUtvmND3jHBAAAAAAAgJbq91S1fPNJqOaa0ZxOaqbb5HkZ+ziJ+kxJa+0TxpgvG2NOkVRirW1+JYaPUxsNAAAAAAAASECEc5KuiPqeksaYyyXNk3SZpLXGmNHN7r4tlcEAAAAAAAAAZKdY7yn5f5KOstbWGGP6S3rCGNPfWnuP9l6BGwAAAAAAAAASEuukZM6+l2xbazcbY4Zr74nJfuKkJAAAAAAAAIA2iPrybUnbjTED933SeILyTEk9JR2RymAAAAAAAAAAslOsZ0peIKm++Q3W2npJFxhjHohnAW8vCgBtkxOK9e9GrWuIRNoxCQAAAAC4wXKhG2fEuvp2OMp9/2r/OAAAAAAAAACyXdufhgMAAAAAAAAAbcBJSQAAAAAAAABpxUlJAAAAAAAAAGkVyEnJjh076uV/LdArZc/qtfIXdNOvfpbw1xg5YrjWrV2m9RXFmnjtJVk/G+RucruT28fOQe5O5+wDD0zW1i2r9OorzzXddsMNV2njhlKtLFmslSWLNWrkSRmXO1N209mP3D52DnJ3MrPTpk7RtvBrKl/1fEJz7bGbY0XnTN5NZz9y+9g5yN0+dvZexPLR/CODGWtTGzC3Q+EBF3Tt2kW1tR8pNzdXy5Y+pauuvkklK1+N62uGQiFVrluuUaefp3C4WiteXqSx4yaosvLNrJwlN7npnHm70zHb/OrbQ4ceo5qaWj304N068qhTJe09KVlb85HuuvuBFjtau/o2x4rO2Zrbx84u5z6h8fe06dPv0cBBp8Q1E3RuH4+Vj51dze1jZ1dz+9jZ1dwudK7fU2XiCuOZXZefmdln4tLsoHsXZOzjJLCXb9fWfiRJysvLVW5enhI5OXr0kEHasGGzNm3aorq6Os2dO09nnzUya2fJTe5Uz5I782eLi0u0c+cHcX39TMqdCbvp7EduHzu7nHt5cYneb+Pvaa52djG3j51dze1jZ1dz+9jZ1dyudgZcEthJyVAopLLSJaquWq3nn1+mlaWr4p4tKMzX1vC2ps/DVdUqKMjP2tkgd5M7vbvp7EfuZDs395OfXqiy0iV64IHJOuSQ7indzbHyo3OQu+nsT+5kuNrZxdw+dg5yN539yO1j5yB3+9gZcEnUk5LGmIONMbcbY2YZY87f774/JbM4Eolo8JAR6nfYYA0ZPEhf/eqX4p41puUzT+N9pqWLs0HuJnd6d9M5sdkgdwfZeZ+pU2fpK18ZqiFHj9T27W/rt7+9MaW7OVaJzQa528fcPnYOcnd7/T7WFq52djG3j52D3E3nxGaD3E3nxGaD3O1jZ8AlsZ4pOV2SkfSkpO8ZY540xnRsvO/Y1oaMMeONMWXGmLJIpDbqgg8//I/+uewljRwxPO7QVeFq9S0qaPq8qLCPqqt3ZO1skLvJnd7ddPYjd7Kd93n77XcViURkrdVDDz2qIYMHZnRuF7/fPnYOcjed/cmdDFc7u5jbx85B7qazH7l97Bzkbh87Ay6JdVLyC9ba66y1T1trz5b0qqQXjDGfjTZkrZ1qrR1srR0cCnVtcX/Pnj3UvfvBkqROnTrplJNP0Ouvb4g7dGlZuQYMOEz9+/dVXl6exowZrfkLlmTtLLnJnepZcrsz21x+fq+mH48+e5TWrXs9o3O7+P32sbOruX3s7HLuZLja2cXcPnZ2NbePnV3N7WNnV3O72hmSIhE+mn9ksNwY93c0xoSstRFJstb+xhgTlrRMUre2Lu3Tp7ceevBu5eSEFAqF9MQT87Vw0XNxzzc0NOiKK2/QooWPKicU0oyZc1RR8UbWzpKb3KmeJXfmzz788B817IRj1bNnD214a6Vu/fUUDRt2nL7+ta/KWqt//zusSy69LuNyZ8JuOvuR28fOLud+ZNZ9OnHYcerZs4c2byzTpFsma/qMxzI6t4/HysfOrub2sbOruX3s7GpuVzsDLjHR3pfAGPM7SUustc/td/soSX+w1n4x1oLcDoW88QEAtEFOqO3XImvI8H8RAwAAAJCc+j1VLd98Etp16emch2rmoD8uytjHSdT/47XWTpQUNsacYozp1uz2xZIuT3U4AAAAAAAAANkn1tW3L5M0T9JlktYaY0Y3u/s3qQwGAAAAAAAAIDvFek/J8ZKOstbWGGP6S3rCGNPfWnuP9l6VGwAAAAAAAMgMEV697YpYJyVzrLU1kmSt3WyMGa69Jyb7iZOSAAAAAAAAANog1knJ7caYgdbacklqfMbkmZIeknREytMBgMeSuVhNXk6s396jq2uoT2oeAAAAAIBoYl3a9QJJ25vfYK2tt9ZeIGlYylIBAAAAAAAAyFpRn0pjrQ1Hue9f7R8HAAAAAAAAQLZL7vV9AAAAAADg/7N393FWl3X+x9+fwwwqYCoiDjNDjMW2bmVCguYdUpSgibRqtBZo5cavVNJt06z050+3XEspqWwVKiANBN2EVZFQ1ASTgdEZuZkZQYSFGQbvwHTIYm6u3x/cNArMOWfOnHOd61yv5+MxD5kz85nP+z1nRLj8nnMA5Ate6CYYyR6+DQAAAAAAAADdikNJAAAAAAAAADnl5VCyvLxUjy++X6tXPaUXap7Q5CsvS/trjD57pNaueVr1tct07TVXFPysz93kDid3jJ197g6lc3n5AC1adJ+qq5fouece0xVXfOVdH7/66kl6553/1dFHH5VXuQth1ufuGHPH2NnnbjrHkTvGzj530zmO3DF29rk7xs5AKMy57D7Wvqhn2X4LSkr6a0BJf1XXrFGfPr21onKRLrzoq6qrW5/S10wkEqpbu1Rjzr1YDQ1NWv7sQk2YeHlK8yHOkpvcdM6/3fneubjH358yuKSkv0pK+qtmz++5f/rTwxo/fpLq69ervHyAfvnLH+kf//GDOu208/TGGzskSS1trV5yF9IsucOZJXc4s+QOZ5bc4cySO5xZcoczm6vdrbsaLaUwkXn762N4UskODr9rUd7+nHi5UnLbtldVXbNGktTcvFP19etVVlqS8vzJw4dqw4ZN2rhxs1paWjRv3gKdP3Z0wc6Sm9zZniV3OLNdmd+27VXVvOv33JdUWnqsJOnHP/6/+v73/1Op/A8q7qs4OoeaO8bOoeaOsXOouWPsHGruGDuHmjvGzqHmDrUzEJJODyXNrMTM/svM7jSzo83s/5nZajObZ2YDuiPAoEHlGnLiR1W5ojrlmdKyEm1p2Lrv/YbGJpWmeKgZ4qzP3eTO7W46x5HbZ+f3v79cQ4Z8RCtX1uizn/20tm7dptWr6/I+d4izPnfHmDvGzj530zmO3DF29rmbznHkjrGzz90xdobknOOtw1s+S3al5ExJtZK2SHpS0juSPitpqaS7DjZkZpPMrMrMqtrbdx70i/fu3Uvz5k7Xt759o95+uznl0Gb7X3ma6jc6xFmfu8md2910Tm/W5+4QO/fu3Utz5tyla665Wa2trfrOd67UzTf/JOt7u2M+xFmfu2PMHWNnn7vpnN6sz910Tm/W5246pzfrczed05v1uTvGzkBIkh1KHuuc+7lz7lZJRzrnfuSc2+yc+7mkQQcbcs5Nc84Nc84NSyR6H/BzioqKdP/c6Zoz50HNn/9oWqEbG5o0sLx03/vlZQPU1PRKwc763E3u3O6mcxy5fXQuKirSnDl3ae7c+VqwYJE+8IFBGjRooFaseFT19ctUVjZAzz77iI499pi8yh3yrM/dMeaOsbPP3XSOI3eMnX3upnMcuWPs7HN3jJ2BkCQ7lOz48d++52M9Mlk8fdoU1dW/pDumTkt7dmVVjQYPPk4VFQNVXFys8ePH6aGHFxfsLLnJne1Zcocz29X5u+76sV588SX97Ge/kiStXfuiBg06Sccff4aOP/4MNTY26dRTP6tXXnktr3KHPEvucGbJHc4sucOZJXc4s+QOZ5bc4cz63g2EoijJxxeYWR/nXLNz7vq9N5rZYEkvdnXp6acN18QJF2nV6lpVrdz9L9YNN9yqRxc9kdJ8W1ubrrr6ei18ZLZ6JBKaOWuuamvXFewsucmd7VlyhzPblfnTThumL33pQq1eXaflyxdKkm688Tb94Q9PprzTR+7QZ8kdziy5w5kldziz5A5nltzhzJI7nFnfu4FQWLLnJTCz4yWVSap0zjV3uH2Mc25RsgVFPct44gMAyLHiHsn+n1PnWtpauykJAAAAgGxo3dW4/5NPQm997WzOoTp43/TFeftzkuzVtydLWiBpsqQ1Zjauw4dvyWYwAAAAAAAAAIUp2aU0kySd5JxrNrMKSQ+YWYVzbqqkvD1pBQAAAAAAAJC/kh1K9tj7kG3n3CYzG6ndB5ODxKEkAAAAAAAAgC5I9urb28xsyN539hxQniepn6QTshkMAAAAAAAAQGFKdqXkJZLe9WoHzrlWSZeY2d2pLEhY1y+obE/yIjwAgAPL9IVqjj7s8C7PvvHO2xntBgAAAIAua+csKRSdHko65xo6+dgz3R8HAAAAAAAAQKFL9vBtAAAAAAAAAOhWHEoCAAAAAAAAyCkOJQEAAAAAAADkVE4PJafdfbsattSo+vnH99121FFHauHC2Vq7dqkWLpytI488IqWvNfrskVq75mnV1y7TtddckVaOEGd97o4xd3l5qR5ffL9Wr3pKL9Q8oclXXpaz3dxXceQOqfP7jjhcv/rtVC1buVBLVzyiYcOH6MijjtC8+b/Ws88v0rz5v9YRR74v73Lnw6zP3THmjrGzz910jiN3jJ197g6x8/RpU7S14QXVVC9Je2cmezPd7TO3r/vK599xMp0Pcdb3biAE5rL8Ctc9Dynft+CMM05Rc/NOzfjNHRr68U9Lkv7zlu9r+/Y3ddvtd+qab1+ho446Qt/7/i2SDv7q24lEQnVrl2rMuReroaFJy59dqAkTL1dd3fqkeUKcJXfuc5eU9NeAkv6qrlmjPn16a0XlIl140VfzOnes91WIuUPo3PHVt3/2X7eq8tkq/e63D6i4uFiH9TpUV/37/9GbO/6sn/90uib/29d0xJHv0w9unCLp4K++HeL3O4T7itzxdg41d4ydQ80dY+dQc/vsfObev+PNmKohQ0eltK+7cmey21dun/eVr7/jZDof4myudrfuarSUwkTmz1/5NC+/3cERMx7P25+TnF4puWxZpXbsePNdt40de7buufd+SdI9996v888fnfTrnDx8qDZs2KSNGzerpaVF8+Yt0Pljk8+FOkvu3Ofetu1VVdeskSQ1N+9Uff16lZWW5HXuWO+rEHOH1LnP4b116unD9LvfPiBJamlp0Vt/fltjzh2lubPnS5Lmzp6vcz776bzKnQ+z5A5nltzhzJI7nFlyhzOb6fzSZZXa/p6/4+Vib6a7feX2eV/5+jtOpvMhzvreDYQi7UNJM+vfnQH69++nbdtelbT7N8ljjjk66UxpWYm2NGzd935DY5NKU/zNNMRZn7tjzd3RoEHlGnLiR1W5ojrru7mv4sgdUudBFQP1xuvbNfWX/6nHl/5eP/n5f6hXr8N0zDFH69VXXpMkvfrKa+p3TN+8yp0Psz53x5g7xs4+d9M5jtwxdva5O9TOmfC1N1OFcF/l8u84mc6HOOt7NxCKTg8lzazve96OlrTCzI4ys+R/A80Ss/2vPE31YeghzvrcHWvuvXr37qV5c6frW9++UW+/3Zz13dxX6c363B1L56KiIp1w4oc169dz9OkzL9Bfdr6jyf/2tZSzZrI79Fmfu2PMHWNnn7vpnN6sz910Tm/W5+5QO2fC195MhX5f5frvOJnOhzjrezcQimRXSr4u6bkOb1WSyiQ9v+fXB2Rmk8ysysyq2tt2dqdQB78AACAASURBVLrg1VdfV0nJ7osvS0r667XX3kgaurGhSQPLS/e9X142QE1NrySdC3XW5+5Yc0u7D2Punztdc+Y8qPnzH015LtTO5A5jNte7tzZu09bGV/T8c6skSQ8t+INOOPHDeu21N9T/2GMkSf2PPUavv7Y9r3Lnw6zP3THmjrGzz910jiN3jJ197g61cyZ87c1UyPeVj7/jZDof4qzv3UAokh1KXivpRUnnO+eOc84dJ6lhz68/cLAh59w059ww59ywRI/enS546OHHNHHC5yVJEyd8Xg89tDhp6JVVNRo8+DhVVAxUcXGxxo8fp4ceTj4X6iy5c59b2v2KfHX1L+mOqdPSmgu1M7nDmM317tdefV1bG5v0wcHHSZLOPOtUrXtxg/7w6BP6whc/J0n6whc/p0ULk79yZYjf75Duq9hzx9g51Nwxdg41d4ydQ83ts3MmfO3NVMj3lY+/42Q6H+Ks793Ra3e8dXzLY0WdfdA5d7uZ3Sfpp2a2RdKNkrrc6J7f/kIjRpyqfv366uUNK3Xzf0zRbbf9QrNn36Uvf+VftGVLoy6++OtJv05bW5uuuvp6LXxktnokEpo5a65qa9ellCHEWXLnPvfppw3XxAkXadXqWlWt3P2b/w033KpHFz2Rt7ljva9CzB1a5+9d+wP98le3qWdxsf530xZddcX3lLCEps/6qb448UI1NjTpXy+9Ou9y+54ldziz5A5nltzhzJI7nNlM5++9506dtefveJtertJNN9+uGTPvy0nuTHb7yu3zvvL1d5xM50Oc9b0bCIWl8ZwGYyV9X1KFcy7lZ1jteUh5lw8x23nOBADw4ujDDu/y7BvvvN2NSQAAAAAcSOuuxv2ffBL686WjOEzq4IhZS/L25yTpq2+b2fFmNkrSk5I+KenTe24fk+VsAAAAAAAAAApQslff/qakBZImS1oj6Wzn3Jo9H74ly9kAAAAAAAAAFKBOn1NS0tckneScazazCkkPmFmFc26qpLy9/BMAAAAAAAARavcdAKlKdijZwznXLEnOuU1mNlK7DyYHiUNJAAAAAAAAAF2Q7Dklt5nZkL3v7DmgPE9SP0knZDMYAAAAAAAAgMKU7ErJSyS1drzBOdcq6RIzuzuVBbyCNgCEh1fQBgAAAABkU6eHks65hk4+9kz3xwEAAAAAAABQ6JJdKQkAAAAAAAAEwbXziN1QJHtOSQAAAAAAAADoVhxKAgAAAAAAAMgpb4eSo88eqbVrnlZ97TJde80VOZ0PcdbnbnKHkzvGzj530zmO3DF29rmbznHkjrFzJvPTp03R1oYXVFO9JO2dmezNdNbn7hhzx9jZ52465y53eXmpHl98v1avekov1DyhyVdelpO9mc763g2EwFyWXx27qGfZfgsSiYTq1i7VmHMvVkNDk5Y/u1ATJl6uurr1KX3NTOZDnCU3uemcf7vpHEfuGDuHmjvGzqHmjrFzpvNnnnGKmpt3asaMqRoydFRK+7pjL/dVOLlj7Bxq7hg7ZzpfUtJfA0r6q7pmjfr06a0VlYt04UVfLejOqc627mq0lMJE5s0vfYonlezgyN89kbc/J16ulDx5+FBt2LBJGzduVktLi+bNW6Dzx47OyXyIs+Qmd7ZnyR3OLLnDmSV3OLPkDmc21txLl1Vq+443U97VXXu5r8LJHWPnUHPH2DnT+W3bXlV1zRpJUnPzTtXXr1dZaUnW94Z6XwEh8XIoWVpWoi0NW/e939DYpNIUf1PJdD7EWZ+7yZ3b3XSOI3eMnX3upnMcuWPs7HM3nXObOxOhdiY3nfN5N539/R44aFC5hpz4UVWuqM763lDvK0hqd7x1fMtjnR5KmtmYDr8+wsx+bWarzGy2mR3b1aVm+185ms7DyDOZD3HW525y53Y3ndOb9bmbzunN+txN5/Rmfe6mc3qzPnfTOb3Z7pjvqlA7kzt3sz53x5g7xs7dMS9JvXv30ry50/Wtb9+ot99uzvreUO8rICTJrpS8pcOvp0hqkjRW0kpJdx9syMwmmVmVmVW1t+/c7+ONDU0aWF667/3ysgFqanol5dCZzIc463M3uXO7m85x5I6xs8/ddI4jd4ydfe6mc25zZyLUzuSmcz7vpnPufw8sKirS/XOna86cBzV//qM52RvqfQWEJJ2Hbw9zzl3vnPtf59xPJVUc7BOdc9Occ8Occ8MSid77fXxlVY0GDz5OFRUDVVxcrPHjx+mhhxenHCST+RBnyU3ubM+SO5xZcoczS+5wZskdzmysuTMRamdy0zmfd9M5978HTp82RXX1L+mOqdNSnsl0b6j3FRCSoiQf729m35Jkkt5nZub+fs1wl5+Psq2tTVddfb0WPjJbPRIJzZw1V7W163IyH+Isucmd7VlyhzNL7nBmyR3OLLnDmY0197333KmzRpyqfv36atPLVbrp5ts1Y+Z9Wd/LfRVO7hg7h5o7xs6Zzp9+2nBNnHCRVq2uVdXK3QdzN9xwqx5d9ERW94Z6XwEhsc6el8DMbnzPTb90zr1mZiWSfuycuyTZgqKeZTzxAQAAAAAAQDdq3dW4/5NPQm9+4ZOcQ3Vw5Nwn8/bnpNMrJZ1zN5nZ8ZLKJFU655r33L7NzGbnIiAAAAAAAACAwpLs1bcnS1ogabKkNWY2rsOHbznwFAAAAAAAAAAcXLLnlJwk6STnXLOZVUh6wMwqnHNTtft5JgEAAAAAAAAgLckOJXt0eMj2JjMbqd0Hk4PEoSQAAAAAAACALkh2KLnNzIY452okac8Vk+dJ+o2kE7KeDgAAAAAAAEiRa+d1bkLR6XNKSrpE0raONzjnWve86vaIrKUCAAAAAAAAULCSvfp2Qycfe6b74wAAAAAAAAAodMmulAQAAAAAAACAbsWhJAAAAAAAAICc4lASAAAAAAAAQE55O5QcffZIrV3ztOprl+naa67I6XyIsz53kzuc3JnMTp82RVsbXlBN9ZK05rpjN/dVHJ0zmT/kkEP07DMP67mqx/RCzRO68f/+e072Zjrrc3eMuWPs7HM3nePIHWNnn7vpHEfuGDv73B1j5+i18/autzxmzmX3pdKLepbttyCRSKhu7VKNOfdiNTQ0afmzCzVh4uWqq1uf0tfMZD7EWXKTOxedzzzjFDU379SMGVM1ZOiolGbyIXeI3+8YO3fHfO/evbRz519UVFSkp596UP/2rRtVueL5rO7lvgond4ydQ80dY+dQc8fYOdTcMXYONXeMnUPNHULn1l2NllKYyOy4cGR2D7oCc9R/P5W3PydpXylpZkdnuvTk4UO1YcMmbdy4WS0tLZo3b4HOHzs6J/MhzpKb3NmelaSlyyq1fcebKX9+vuQO8fsdY+fumN+58y+SpOLiIhUVFyvV/6kWamdy0zmfd9M5jtwxdg41d4ydQ80dY+dQc4faGQhJp4eSZnarmfXb8+thZvaypEoz+18zO6urS0vLSrSlYeu+9xsam1RaWpKT+RBnfe4md253++ycCe4rOudiPpFIqGrlYjU1rtKSJU9rxcrqrO/lvsrtbjrHkTvGzj530zmO3DF29rmbznHkDrUzEJJkV0p+1jn3+p5f3ybpC865wZI+I2nKwYbMbJKZVZlZVXv7zgN9fL/b0nkYeSbzIc763E3u3O722TkT3Fe5m/W522duSWpvb9ew4Wdr0HHDNHzYUH3kI/+Y9b3cV7ndTef0Zn3upnN6sz530zm9WZ+76ZzerM/ddE5v1ufuGDsDISlK8vFiMytyzrVKOsw5t1KSnHPrzOyQgw0556ZJmiYd+DklGxuaNLC8dN/75WUD1NT0SsqhM5kPcdbnbnLndrfPzpngvqJzLub3+vOf39Ifn/7T7if/XvtiVvdyX+V2N53jyB1jZ5+76RxH7hg7+9xN5zhyh9oZkmvnADcUya6UvFPSQjP7lKRFZnaHmY0ws5sk1XR16cqqGg0efJwqKgaquLhY48eP00MPL87JfIiz5CZ3tmczxX1F52zP9+vXV0cc8T5J0qGHHqpRnzpTL764Iet7ua/CyR1j51Bzx9g51Nwxdg41d4ydQ80dY+dQc4faGQhJp1dKOud+bmarJX1D0of2fP6HJM2X9IOuLm1ra9NVV1+vhY/MVo9EQjNnzVVt7bqczIc4S25yZ3tWku69506dNeJU9evXV5tertJNN9+uGTPvy/vcIX6/Y+yc6fyAAcfqN7++Qz16JJRIJPTAAw/pkYWPZ30v91U4uWPsHGruGDuHmjvGzqHmjrFzqLlj7Bxq7lA7AyGxZM9LYGbHSyqTVOmca+5w+xjn3KJkCw708G0AAAAAAAB0Xeuuxv2ffBLa/s9ncQ7VQd8H/5i3PyfJXn37m5IWSJosaY2Zjevw4VuyGQwAAAAAAABAYUr2Qjdfk3SSc67ZzCokPWBmFc65qZLy9qQVAAAAAAAAEWr3HQCpSnYo2WPvQ7adc5vMbKR2H0wOEoeSAAAAAAAAALog2atvbzOzIXvf2XNAeZ6kfpJOyGYwAAAAAAAAAIUp2aHkJZK2dbzBOdfqnLtE0oispQIAAAAAAABQsDp9+LZzrqGTjz3T/XEAAAAAAAAAFLpkV0oCAAAAAAAAQLdK9kI3AAAAAAAAQBAcr74dDK6UBAAAAAAAAJBT3g4lp0+boq0NL6imekmX5kefPVJr1zyt+tpluvaaK4KYjbGzz90x5o6xs8/ddA4jd3l5qR5ffL9Wr3pKL9Q8oclXXpazzJnOx3Zf+Zz1uTvG3DF29rmbznHkjrGzz910jiN3qJ2BUJhzLqsLinqWHXDBmWecoubmnZoxY6qGDB2V1tdMJBKqW7tUY869WA0NTVr+7EJNmHi56urW5+2sFGdncocxS+5wZsmd/mxJSX8NKOmv6po16tOnt1ZULtKFF321oDvHmDvGzqHmjrFzqLlj7Bxq7hg7h5o7xs6h5g6hc+uuRkspTGTeGHtWdg+6AnP0Q3/M258Tb1dKLl1Wqe073uzS7MnDh2rDhk3auHGzWlpaNG/eAp0/dnRez0pxdiZ3GLPkDmeW3OnPbtv2qqpr1kiSmpt3qr5+vcpKS7K+N9P5GO8rOseRO8bOoeaOsXOouWPsHGruGDuHmjvUzkBIgnxOydKyEm1p2Lrv/YbGJpWm+BdMX7OZCrUzucOY9bk7xtwxdva9e69Bg8o15MSPqnJFdU72cl+FMetzd4y5Y+zsczed48gdY2efu+kcR+5QO0NSO2/vestjnR5KmtnzZna9mX0wV4FSYbb/laepPgzd12ymQu1M7jBmfe6OMXeMnX3vlqTevXtp3tzp+ta3b9TbbzfnZC/3VRizPnfHmDvGzj530zm9WZ+76ZzerM/ddE5v1ufuGDsDIUl2peRRko6U9KSZrTCzfzOz0mRf1MwmmVmVmVW1t+/slqAdNTY0aWD532OUlw1QU9MreT2bqVA7kzuMWZ+7Y8wdY2ffu4uKinT/3OmaM+dBzZ//aE4yZzof431F5zhyx9jZ5246x5E7xs4+d9M5jtyhdgZCkuxQcodz7tvOufdL+ndJ/yDpeTN70swmHWzIOTfNOTfMOTcskejdnXklSSurajR48HGqqBio4uJijR8/Tg89vDivZzMVamdyhzFL7nBmyd213dOnTVFd/Uu6Y+q0lGe6Yy/3VRiz5A5nltzhzJI7nFlyhzNL7nBmfe8GQlGU6ic655ZKWmpmkyV9RtIXJKX3t7sO7r3nTp014lT169dXm16u0k03364ZM+9LabatrU1XXX29Fj4yWz0SCc2cNVe1tevyelaKszO5w5gldziz5E5/9vTThmvihIu0anWtqlbu/sPcDTfcqkcXPZHVvZnOx3hf0TmO3DF2DjV3jJ1DzR1j51Bzx9g51NyhdgZCYp09L4GZ3eec+5dMFhT1LOOJDwAAAAAAALpR667G/Z98Enr9nLM4h+qg36N/zNufk04fvu2c+xczO97MRplZn44fM7Mx2Y0GAAAAAAAAoBAle/XtyZIWSJosaY2Zjevw4VuyGQwAAAAAAABAYUr2nJKTJJ3knGs2swpJD5hZhXNuqqS8vfwTAAAAAAAAQP5KdijZwznXLEnOuU1mNlK7DyYHiUNJAAAAAAAAAF3Q6cO3JW0zsyF739lzQHmepH6STshmMAAAAAAAAACFKdmVkpdIau14g3OuVdIlZnZ31lIBAAAAAAAA6Wr3HQCp6vRQ0jnX0MnHnun+OAAAAAAAAAAKXbKHbwMAAAAAAABAt+JQEgAAAAAAAEBOcSgJAAAAAAAAIKe8HUpOnzZFWxteUE31ki7Njz57pNaueVr1tct07TVXFPysz93kDid3jJ197qZz4ecuLy/V44vv1+pVT+mFmic0+crL0tqbyW6fsz530zmO3DF29rmbznHkjrGzz910jiN3qJ1j59p56/iWz8w5l9UFRT3LDrjgzDNOUXPzTs2YMVVDho5K62smEgnVrV2qMederIaGJi1/dqEmTLxcdXXrC3KW3OSmc/7tpnMcuUtK+mtASX9V16xRnz69taJykS686KsF3TnU3DF2DjV3jJ1DzR1j51Bzx9g51Nwxdg41dwidW3c1WkphIvPaZ87K7kFXYI557I95+3Pi7UrJpcsqtX3Hm12aPXn4UG3YsEkbN25WS0uL5s1boPPHji7YWXKTO9uz5A5nlty5nd227VVV16yRJDU371R9/XqVlZakNOszd4z3VYydQ80dY+dQc8fYOdTcMXYONXeMnUPNHWpnICSdHkqa2TAze9LM7jWzgWb2mJn92cxWmtnQXIV8r9KyEm1p2Lrv/YbGJpWm+JfEEGd97iZ3bnfTOY7cMXb2uTvT3HsNGlSuISd+VJUrqlOeCbVziLlj7OxzN53jyB1jZ5+76RxH7hg7+9wdY2cgJMmulPylpB9LekTSnyTd7Zw7QtJ1ez52QGY2ycyqzKyqvX1nt4Xt8PX3uy3Vh6GHOOtzN7lzu5vO6c363E3n9GZ97s40tyT17t1L8+ZO17e+faPefrs55blQO4eYO8bOPnfTOb1Zn7vpnN6sz910Tm/W5246pzfrc3eMnYGQFCX5eLFz7lFJMrMfOecekCTn3BIzu/1gQ865aZKmSQd/TslMNDY0aWB56b73y8sGqKnplYKd9bmb3LndTec4csfY2efuTHMXFRXp/rnTNWfOg5o//9GU5zLdzX1F53zeTec4csfY2eduOseRO8bOPnfH2Bn5/+Iu+LtkV0r+1czONrPPS3Jm9jlJMrOzJLVlPd1BrKyq0eDBx6miYqCKi4s1fvw4PfTw4oKdJTe5sz1L7nBmyZ373NOnTVFd/Uu6Y+q0lGd8547xvoqxc6i5Y+wcau4YO4eaO8bOoeaOsXOouUPtDIQk2ZWSX9fuh2+3Sxot6RtmNlNSo6SvZbL43nvu1FkjTlW/fn216eUq3XTz7Zox876UZtva2nTV1ddr4SOz1SOR0MxZc1Vbu65gZ8lN7mzPkjucWXLndvb004Zr4oSLtGp1rapW7v6D4A033KpHFz2R17ljvK9i7Bxq7hg7h5o7xs6h5o6xc6i5Y+wcau5QOwMhsWTPS2Bm/ySpVFKlc665w+1jnHOLki3IxsO3AQAAAAAAYta6q3H/J5+EXh11FudQHfRf8se8/TlJ9urb35T0oKTJktaY2bgOH74lm8EAAAAAAAAAFKZkD9/+mqRhzrlmM6uQ9ICZVTjnpkrK25NWAAAAAAAAxIcXuglHskPJHnsfsu2c22RmI7X7YHKQOJQEAAAAAAAA0AXJXn17m5kN2fvOngPK8yT1k3RCNoMBAAAAAAAAKEzJDiUvkbSt4w3OuVbn3CWSRmQtFQAAAAAAAICC1enDt51zDZ187JnujwMAAAAAAACg0CW7UhIAAAAAAAAAulWyF7oBAAAAAAAAwuB4XeZQcKUkAAAAAAAAgJzyeiiZSCS0csUftODBWWnPjj57pNaueVr1tct07TVXFPysz93kDid3jJ197qZzHLlD7Tx92hRtbXhBNdVL0prrjt0hzvrcHWPuGDv73E3nOHLH2NnnbjrHkTvUzkAozDmX1QVFPcsOuuDqqybppJM+pvcdfrjG/fOlKX/NRCKhurVLNebci9XQ0KTlzy7UhImXq65ufUHOkpvcdM6/3XSOI3eonSXpzDNOUXPzTs2YMVVDho5KacZ37hjvqxhzx9g51Nwxdg41d4ydQ80dY+dQc4fQuXVXI49TPoBXRo7M7kFXYI596qm8/TnxdqVkWdkAnXvOKP3mN3PSnj15+FBt2LBJGzduVktLi+bNW6Dzx44u2Flykzvbs+QOZ5bc4cz63r10WaW273gz5c/Ph9wx3lcx5o6xc6i5Y+wcau4YO4eaO8bOoeYOtTMQEm+Hkj+ZcpOu++4P1N7envZsaVmJtjRs3fd+Q2OTSktLCnbW525y53Y3nePIHWNnn7tj7JypEL/fod5XMeaOsbPP3XSOI3eMnX3upnMcuUPtDMm189bxLZ91eihpZn3M7GYzW2tmfzaz18xsuZl9OZOlnz3303r11df1fPXqLs2b7X/laaoPQw9x1uducud2N53Tm/W5m87pzfrcHWPnTIX4/Q71vooxd4ydfe6mc3qzPnfTOb1Zn7vpnN6sz90xdgZCUpTk47+T9KCk0ZLGS+ot6T5J15vZh5xz3zvQkJlNkjRJkqzHEUoker/r46edNkxjzztb54z5lA499BC9732Ha9bMn+nSL38zpdCNDU0aWF667/3ysgFqanqlYGd97iZ3bnfTOY7cMXb2uTvGzpkK8fsd6n0VY+4YO/vcTec4csfY2eduOseRO9TOQEiSPXy7wjk30znX4Jz7iaTznXPrJX1F0gUHG3LOTXPODXPODXvvgaQkff/6W1XxgWEa/KFP6EsTLteTTz6T8oGkJK2sqtHgwcepomKgiouLNX78OD308OKCnSU3ubM9S+5wZskdzqzv3ZkI8fsd6n0VY+4YO4eaO8bOoeaOsXOouWPsHGruUDsDIUl2peROMzvDObfMzMZK2i5Jzrl2O9D1xDnS1tamq66+Xgsfma0eiYRmzpqr2tp1BTtLbnJne5bc4cySO5xZ37vvvedOnTXiVPXr11ebXq7STTffrhkz78vr3DHeVzHmjrFzqLlj7Bxq7hg7h5o7xs6h5g61MxAS6+x5CczsREnTJX1I0hpJlznnXjSzYyRd7Jz7WbIFRT3LeOIDAAAAAACAbtS6q9HbxWL5rOmMT3IO1cGAZU/m7c9Jp1dKOudeMLNLJZVJWu6ca95z+2tmxjE9AAAAAAAAgLQle/Xtb2r3C91cKWmNmY3r8OFbshkMAAAAAAAAQGFK9pySX5M0zDnXbGYVkh4wswrn3FRJeXv5JwAAAAAAAID8lexQskeHh2xvMrOR2n0wOUgcSgIAAAAAAADogk4fvi1pm5kN2fvOngPK8yT1k3RCNoMBAAAAAAAAKEzJrpS8RFJrxxucc62SLjGzu7OWCgAAAAAAAEiTa/edAKlK9urbDZ187JnujwMAAAAAAACg0CV7+DYAAAAAAAAAdCsOJQEAAAAAAADkFIeSAAAAAAAAAHLK26Hk9GlTtLXhBdVUL+nS/OizR2rtmqdVX7tM115zRcHP+txN7nByx9jZ5246x5E7xs4+d9M5jtwxdva5m85x5I6xs8/ddI4jd6idY+ec8dbhLZ+Zcy6rC4p6lh1wwZlnnKLm5p2aMWOqhgwdldbXTCQSqlu7VGPOvVgNDU1a/uxCTZh4uerq1hfkLLnJTef8203nOHLH2DnU3DF2DjV3jJ1DzR1j51Bzx9g51Nwxdg41dwidW3c15veJkyeNp34quwddgSl79om8/TnxdqXk0mWV2r7jzS7Nnjx8qDZs2KSNGzerpaVF8+Yt0PljRxfsLLnJne1ZcoczS+5wZskdziy5w5kldziz5A5nltzhzJI7nFnfu4FQBPmckqVlJdrSsHXf+w2NTSotLSnYWZ+7yZ3b3XSOI3eMnX3upnMcuWPs7HM3nePIHWNnn7vpHEfuGDv73B1jZyAkRZ190MyKJF0m6Z8llUpykrZKWiDp1865lqwnPHCu/W5L9WHoIc763E3u3O6mc3qzPnfTOb1Zn7vpnN6sz910Tm/W5246pzfrczed05v1uZvO6c363E3n9GZ97o6xMxCSTg8lJd0j6U1J/09Sw57byiVdKuleSV840JCZTZI0SZKsxxFKJHp3R9Z9GhuaNLC8dN/75WUD1NT0SsHO+txN7tzupnMcuWPs7HM3nePIHWNnn7vpHEfuGDv73E3nOHLH2Nnn7hg7Q3LtvhMgVckevv1x59w3nHPLnXMNe96WO+e+IWnowYacc9Occ8Occ8O6+0BSklZW1Wjw4ONUUTFQxcXFGj9+nB56eHHBzpKb3NmeJXc4s+QOZ5bc4cySO5xZcoczS+5wZskdziy5w5n1vRsIRbIrJXeY2ecl/bdzu8+azSwh6fOSdmSy+N577tRZI05Vv359tenlKt108+2aMfO+lGbb2tp01dXXa+Ejs9UjkdDMWXNVW7uuYGfJTe5sz5I7nFlyhzNL7nBmyR3OLLnDmSV3OLPkDmeW3OHM+t4NhMI6e14CM6uQ9CNJn9Tuh3FL0pGSnpR0nXNuY7IFRT3LeOIDAAAAAACAbtS6q3H/J5+EGk75FOdQHZRXPpG3PyedXinpnNtkZj+RNEXSBkn/JOkTkmpTOZAEAAAAAAAAgPdK9urbN0o6Z8/nPSbpZEl/lHSdmQ11zv0w+xEBAAAAAAAAFJJkzyl5kaQhkg6RtE1SuXPuLTO7TVKlJA4lAQAAAAAAkBdce94+WhnvkezVt1udc23Oub9I2uCce0uSnHPvSOJF1gEAAAAAAACkLdmVkrvMrNeeQ8mT9t5oZkcoxUPJokSPLodrbW/r8iwAADEo7dO3y7Nbm7d3YxJgf8U9kv1R8+Ba2lq7MQkAAADyTbI/KY5wzv1NkpxzHQ8hiyVd8/PJ6gAAIABJREFUmrVUAAAAAAAAAApWslff/ttBbn9d0utZSQQAAAAAAACgoHX9MTUAAAAAAABAHnHOdwKkKtkL3QAAAAAAAABAt+JQEgAAAAAAAEBO5fRQ8u67b9Pmzc/ruece23fbCSf8k5566kFVVS3Wf//3b3T44X1S+lqjzx6ptWueVn3tMl17zRVp5Qhx1uducoeTO9TO06dN0daGF1RTvSStue7YHeJsqN8vn7tj6XzZNybq8T89qMee+b1+Pv1HOuSQnpp69616svJ/9Ngzv9dtP79ZRUWpPXNLiN/vkO6r7pr1uTud2fLyAVq06D5VVy/Rc889piuu+Iok6YILztVzzz2mnTs36uMfPyHvcnfnrM/ddI4jd4ydfe6mcxy5Q+0MhMJclh9sf+ih79+34IwzTlZz81/061//VCed9BlJ0rJlD+m73/2Bli6t1KWXjldFxUDddNMUSVJre9sBv2YikVDd2qUac+7Famho0vJnF2rCxMtVV7c+aZ4QZ8lN7kLuLElnnnGKmpt3asaMqRoydFRKM75z8/0K52es0DuX9ukrSTp2QH/998JZGnXq5/S3v/5Nv/zN7XrisaV647XtevLxpZKkn0//kSr/9JzunTFPkrS1ebu33Pk0S+7szRb32H0IXlLSXyUl/VVTs0Z9+vTWn/70sMaPnyTnnNrb2/WLX9yi7373h3r++dX7ZlvaWoPsnG+76RxH7hg7h5o7xs6h5g6hc+uuRkspTGQ2DxvFs0p28P6qJXn7c5LTKyWXLVuhHTvefNdtH/rQB7R0aaUkacmSpfrc585N+nVOHj5UGzZs0saNm9XS0qJ58xbo/LGjU8oQ4iy5yZ3tWd+7ly6r1Pb3/N6Q77n5foXzMxZT56KiIh166CHq0aOHDjvsUL2y7dV9B5KSVPP8Gg0oPTbvcvueJXf2Z7dte1U1NWskSc3NO1Vf/5JKS4/Viy++pPXrX05pp4/c3TUbau4YO4eaO8bOoeaOsXOouUPtDMm1G28d3vJZlw8lzWxadwRYu/ZFnXfe7qsmL7jgsyovH5B0prSsRFsatu57v6GxSaWlJSntC3HW525y53Z3jJ0zFeL3O8bvl8/dsXR+pelVTfvFTC1f9Ziq6p7QW281a+mTz+77eFFRkS4Yf57+uOSZvMqdD7M+d8eY+/3vL9eQIR/RypU1KX1+d+7mvqJzPu+mcxy5Y+zsc3eMnYGQdHooaWZ9D/J2tKSDXtJoZpPMrMrMqtramjsN8H/+zzX6+tcv1Z/+9IgOP7yPdu1qSRrabP+T3lQfhh7irM/d5M7t7hg7ZyrE73eM3y+fu2PpfMQR79NnzvmkTh86RsM/PEq9eh2mf/78efs+/sPbv68Vzz6nFcufz6vc+TDrc3dsuXv37qU5c+7SNdfcrLff7vzPiN292+esz910Tm/W5246pzfrczed05v1uTvGzkBIkj3b/WuS/ldSx38j3J73+x9syDk3TdI06d3PKXkg69Zt0HnnTZAkDR58nMaM+VTS0I0NTRpYXrrv/fKyAWpqeiXpXKizPneTO7e7Y+ycqRC/3zF+v3zujqXzGSM/oS2bG7X9jR2SpEUPP66TTj5RD97/sK6+9uvqe3RfXfdvV+dd7nyY9bk7ptxFRUWaM+cuzZ07XwsWLEppT3ft9j3rczed48gdY2efu+kcR+5QOwMhSfbw7ZcljXTOHdfh7QPOueMkdcu/Ecccc7Sk3f8n4Lvf/aZ+9at7k86srKrR4MHHqaJioIqLizV+/Dg99PDilPaFOEtucmd71vfuTIT4/Y7x++VzdyydGxua9PFhH9Ohhx0qSTp9xCl6ad1G/cvECzTiU6fryq9dm/L/YQ/x+x3SfRVr7rvu+rFefPEl/exnv0ppR77k7o7ZUHPH2DnU3DF2DjV3jJ1DzR1qZyAkya6UvEPSUZI2H+BjP0532W9/+3Odeeap6tfvKL30UqV+8IOfqHfv3vr61y+RJM2fv0izZs1L+nXa2tp01dXXa+Ejs9UjkdDMWXNVW7supQwhzpKb3Nme9b373nvu1FkjTlW/fn216eUq3XTz7Zox8768zs33K5yfsVg61zy3Wgv/5zEtfHKe2tpatXZVvWbPul/1DSvUuKVJ8/+w+3/6LXp4iabedlfe5M6HWXJnf/a004bpS1+6UKtX12n58oWSpBtvvE2HHNJTP/nJTerXr69+//sZWrWqVueff0ne5O6u2VBzx9g51Nwxdg41d4ydQ80damegK8zsSEm/kvRR7X6E9FclvShprqQKSZskjXfO7bDdzy8wVbuf1vEvkr7snEv+HFEH2pvsqgkzO1mSc86tNLMPSxojqd45tzCVBckevt2Z1va2ro4CABCF0j59uzy7tXl7NyYB9lfcI9n//z64lrbWbkwCAEDhad3VmN8vrezJpiGf4Qk4O6ioeSzpz4mZzZK01Dn3KzPrKamXpO9J2u6cu9XMrpN0lHPuO2Z2rqTJ2n0oeYqkqc65U7qSrdM/KZrZjZLOkVRkZo/tWfaUpOvMbKhz7oddWQoAAAAAAADALzN7n6QRkr4sSc65XZJ2mdk4SSP3fNos7T4P/I6kcZJ+63Zf5bjczI40swHOuaZ0dyf739cXSRoi6RBJ2ySVO+feMrPbJFVK4lASAAAAAAAAyENmNknSpA43TdvzAtV7fUC7X+h6hpmdKOk5SVdJOnbvQaNzrsnM9r7gdZmkLR3mG/bc1u2Hkq3OuTZJfzGzDc65t/aEecfM2tNdBgAAAAAAACA39hxATuvkU4okfVzSZOdcpZlNlXRdJ59/oIeDd+kh88lefXuXmfXa8+uT9m03O0ISh5IAAAAAAABAuBokNTjnKve8/4B2H1K+YmYDJGnPP1/t8PkDO8yXS9ralcXJrpQc4Zz7myQ55zoeQhZLujSVBbxYDQAA2ZPJi9UkrOvPjd6e5IXyAIkXqwEAALnHH1PT45zbZmZbzOwfnXMvSholqXbP26WSbt3zzwV7Rv5H0pVmdp92v/bMn7vyfJJSkkPJvQeSB7j9dUmvd2UhAAAAAAAAgLwxWdLv9rzy9suSvqLdj66eZ2aXSdos6fN7Pnehdr/y9kuS/rLnc7sk2ZWSAAAAAAAAAAqUc65G0rADfGjUAT7XSbqiO/Yme05JAAAAAAAAAOhWHEoCAAAAAAAAyCkvh5Ll5aV6fPH9Wr3qKb1Q84QmX3lZ2l9j9NkjtXbN06qvXaZrr0nvqtEQZ33uJnc4uWPs7HM3nePIHWPnK6+8TNXPP66a6iWaPDmO/0b73B1j7hg7+9xN5zhyx9jZ5246x5E71M6xc+3GW4e3fGYuyy9LVNSzbL8FJSX9NaCkv6pr1qhPn95aUblIF170VdXVrU/payYSCdWtXaox516shoYmLX92oSZMvDyl+RBnyU1uOuffbjrHkbvQOx/o1bc/8uF/1L333qnTTj9Pu3a16OGH79Xkyd/TSy9tfNfnHezVt/O9c77tjjF3jJ1DzR1j51Bzx9g51Nwxdg41dwidW3c15veJkycvn3A2r7/dwQdWL87bnxMvV0pu2/aqqmvWSJKam3eqvn69ykpLUp4/efhQbdiwSRs3blZLS4vmzVug88eOLthZcpM727PkDmeW3OHMhpr7+OMHq7KyWu+881e1tbVp6dPLNW7cmJRmfeaO8b4KNXeMnUPNHWPnUHPH2DnU3DF2DjV3qJ2BkHh/TslBg8o15MSPqnJFdcozpWUl2tKwdd/7DY1NKk3xUDPEWZ+7yZ3b3XSOI3eMnX3upnN6s2trX9SZZ56ivn2P1GGHHaoxYz6l8vLSlGZ95o7xvvK5m85x5I6xs8/ddI4jd4ydfe6OsTMQkqLOPmhmPST9q6RySYucc890+Nj1zrkfZLK8d+9emjd3ur717Rv19tvNKc/ZAR5ulurD0EOc9bmb3LndTef0Zn3upnN6sz530zm92fr6l3Tb7b/UowvnqLl5p1atrlVra2tKs5nu5r5Kb9bnbjqnN+tzN53Tm/W5m87pzfrcTef0Zn3ujrEzEJJkV0reLeksSW9I+pmZ/aTDxy442JCZTTKzKjOram/fecDPKSoq0v1zp2vOnAc1f/6jaYVubGjSwA5XbZSXDVBT0ysFO+tzN7lzu5vOceSOsbPP3XROP/fMmffplE+co1Gfvkg7tr+53/NJ5mPuWO+rEHPH2NnnbjrHkTvGzj530zmO3KF2huSc8dbhLZ8lO5Q82Tn3RefcHZJOkdTHzH5vZodIOmgz59w059ww59ywRKL3AT9n+rQpqqt/SXdMnZZ26JVVNRo8+DhVVAxUcXGxxo8fp4ceXlyws+Qmd7ZnyR3OLLnDmQ059zHHHC1JGjiwVJ/73DmaO3dByrOhdiZ3GLPkDmeW3OHMkjucWXKHM+t7NxCKTh++Lann3l8451olTTKzGyU9IalPV5eeftpwTZxwkVatrlXVyt3/Yt1ww616dNETKc23tbXpqquv18JHZqtHIqGZs+aqtnZdwc6Sm9zZniV3OLPkDmc25Nxz75umo48+Si0trfrmVd/Xm2/+OeXZUDuTO4xZcoczS+5wZskdziy5w5n1vRsIhXX2vARmdq+ke51zi95z+79K+i/nXHGyBUU9y3jiAwAA8lDCuv5wjnae1wgAAMCr1l2N+f3YXE82fHQ0f1Dt4INr/pC3PyedPnzbOTdB0nYzGy5JZvZhM/uWpK2pHEgCAAAAAAAAwHsle/XtGyWdI6nIzB7T7ueVfErSdWY21Dn3w+xHBAAAAAAAAFBIkj2n5EWShkg6RNI2SeXOubfM7DZJlZI4lAQAAAAAAEBecO2+EyBVyV59u9U51+ac+4ukDc65tyTJOfeOJO5mAAAAAAAAAGlLdii5y8x67fn1SXtvNLMjxKEkAAAAAAAAgC5I9vDtEc65v0mSc++6ALZY0qWpLMjkJX54uSQAALInk1fQ5r/vAAAAADLR6aHk3gPJA9z+uqTXs5IIAAAAAAAAQEFLdqUkAAAAAAAAEIR2l8ljepBLyZ5TEgAAAAAAAAC6FYeSAAAAAAAAAHLK26Hk+nXLVf3846pauVjLn12Y9vzos0dq7ZqnVV+7TNdec0XBz/rcTe5wcsfYOZP56dOmaGvDC6qpXpL2zkz2Zjrrc3eMuWPsnOn8Vd/8mmpqnlB19RLdc8+dOuSQQ3Kyl/sqnNwxdva5m85x5I6xs8/ddI4jd6idgVCYy+CVN1NR3LPsgAvWr1uuT5x6jt54Y8dBZw+WLJFIqG7tUo0592I1NDRp+bMLNWHi5aqrW580T4iz5CY3nbMzf+YZp6i5eadmzJiqIUNHpbSvO/ZyX4WTO8bOqc4f7Jl6SktL9NSTD+pjJ35Sf/3rXzV79l1a9OgT+u098/Z9Tr79993n7hhzx9g51Nwxdg41d4ydQ80dY+dQc4fQuXVXI0+eeADr/mlMdg+6AvOhukV5+3MS5MO3Tx4+VBs2bNLGjZvV0tKiefMW6Pyxowt2ltzkzvZsrLmXLqvU9h1vpryru/ZyX4WTO8bO3TFfVFSkww47VD169FCvww7T1qZtWd/LfRVO7hg7h5o7xs6h5o6xc6i5Y+wcau5QO0Nyznjr8JbPOj2UNLNeZnatmV1jZoea2ZfN7H/M7Mdm1ieTxc45PbpwjiqXP6p/vexLac2WlpVoS8PWfe83NDaptLSkYGd97iZ3bnfTObe5MxFqZ3LTOdvzW7du009/epde3rBCWzZX66233tLjjz+d9b3cV7ndTec4csfY2eduOseRO8bOPnfH2BkISbIrJWdKOlbScZIekTRM0u3a/ait/zrYkJlNMrMqM6tqb995wM85a+TndPIpY3Te2An6xje+rDPOOCXl0Gb7n/Sm+jD0EGd97iZ3bnfTOb3Z7pjvqlA7kzt3sz53+8x95JFHaOzY0fqHD31C7x/0cfXq3Utf/OIFWd/LfZXb3XROb9bnbjqnN+tzN53Tm/W5m87pzfrcHWNnICTJDiU/5Jz7d0lXSPqIpMnOuaclXSvpxIMNOeemOeeGOeeGJRK9D/g5TU2vSJJee+0NzV/wqIYPH5Jy6MaGJg0sL933fnnZgH1frxBnfe4md2530zm3uTMRamdy0znb86NGnalNmzbr9de3q7W1VfPnP6pTPzEs63u5r3K7m85x5I6xs8/ddI4jd4ydfe6OsTMQkpSeU9LtPpJfuOefe9/v8jF9r16HqU+f3vt+/ZlPn6W1a19MeX5lVY0GDz5OFRUDVVxcrPHjx+mhhxcX7Cy5yZ3t2VhzZyLUzuSmc7bnt2xu1MmnfFyHHXaoJOlTnzxD9fWpPSF8qJ3JTed83k3nOHLH2DnU3DF2DjV3qJ2BkBQl+XiVmfVxzjU7576690Yz+6Ckt7u69Nhjj9ED9/9aktSjqIfuu2++Fi9+KuX5trY2XXX19Vr4yGz1SCQ0c9Zc1dauK9hZcpM727Ox5r73njt11ohT1a9fX216uUo33Xy7Zsy8L+t7ua/CyR1j50znV6ys1u9//4hWrPiDWltb9ULNWk3/1e+yvpf7KpzcMXYONXeMnUPNHWPnUHPH2DnU3KF2BkJiyZ6XwMxO1u6LI1ea2YcljZH0ojpcOdmZ4p5lXb6ikmdMAAAgP2XyOn789x0AACBzrbsa8/ullT2p/9C5/HGzg+PXLczbn5NOr5Q0sxslnSOpyMwek3SKpKckfUfSEEk/zHZAAAAAAAAAAIUl2cO3L9Luw8dDJG2TVO6ce8vMbpNUKQ4lAQAAAAAAAKQp2QvdtDrn2pxzf5G0wTn3liQ5596R1J71dAAAAAAAAAAKTrJDyV1m1mvPr0/ae6OZHSEOJQEAAAAAAAB0QbKHb49wzv1NkpxzHQ8hiyVdmsoCnl0UAIDCw3/fAQAAkI+SvyQz8kWnh5J7DyQPcPvrkl7PSiIAAAAAAAAABS3Zw7cBAAAAAAAAoFtxKAkAAAAAAAAgpziUBAAAAAAAAJBT3g4lp0+boq0NL6imekmX5kefPVJr1zyt+tpluvaaKwp+1uducoeTO8bOPnfTOY7cMXb2uZvOceSOsbPP3XSOI3eMnX3upnMcuUPtHDvXbrx1eMtn5rL8skRFPcsOuODMM05Rc/NOzZgxVUOGjkrrayYSCdWtXaox516shoYmLX92oSZMvFx1desLcpbc5KZz/u2mcxy5Y+wcau4YO4eaO8bOoeaOsXOouWPsHGruGDuHmjuEzq27GvP7xMmT2g9+ltff7uDDGx7J258Tb1dKLl1Wqe073uzS7MnDh2rDhk3auHGzWlpaNG/eAp0/dnTBzpKb3NmeJXc4s+QOZ5bc4cySO5xZcoczS+5wZskdziy5w5n1vRsIRZDPKVlaVqItDVv3vd/Q2KTS0pKCnfW5m9y53U3nOHLH2NnnbjrHkTvGzj530zmO3DF29rmbznHkjrGzz90xdgZCkvahpJmty0aQNDPsd1uqD0MPcdbnbnLndjed05v1uZvO6c363E3n9GZ97qZzerM+d9M5vVmfu+mc3qzP3XROb9bnbjqnN+tzd4ydgZAUdfZBM3tb0t6f/L3/VvTae7tz7n0HmZskaZIkWY8jlEj07qa4uzU2NGlgeem+98vLBqip6ZWCnfW5m9y53U3nOHLH2NnnbjrHkTvGzj530zmO3DF29rmbznHkjrGzz90xdgZCkuxKyZmS5kv6B+fc4c65wyVt3vPrAx5ISpJzbppzbphzblh3H0hK0sqqGg0efJwqKgaquLhY48eP00MPLy7YWXKTO9uz5A5nltzhzJI7nFlyhzNL7nBmyR3OLLnDmSV3OLO+d8eu3RlvHd7yWadXSjrnJpvZSZLmmNl8Sb/Q36+czMi999yps0acqn79+mrTy1W66ebbNWPmfSnNtrW16aqrr9fCR2arRyKhmbPmqrZ2XcHOkpvc2Z4ldziz5A5nltzhzJI7nFlyhzNL7nBmyR3OLLnDmfW9GwiFpfK8BGaWkHSlpM9L+qBzrjTVBUU9y3jiAwAAAAAAgG7Uuqsxvy+D82TNB87jHKqDj778cN7+nHR6paQkmdnJ2v38kT8zs2pJnzSzc51zC7MfDwAAAAAAAEChSfZCNzdKOkdSkZk9JulkSX+UdJ2ZDXXO/TAHGQEAAAAAAAAUkGRXSl4kaYikQyRtk1TunHvLzG6TVCmJQ0kAAAAAAADkBZfnL+6Cv0v26tutzrk259xfJG1wzr0lSc65dyS1Zz0dAAAAAAAAgIKT7FByl5n12vPrk/beaGZHiENJAAAAAAAAAF2Q7OHbI5xzf5Mk51zHQ8hiSZdmLRUAAMBBJKzrD8lpd7wYIwAAAJAPOj2U3HsgeYDbX5f0elYSAQAAAAAAAChoya6UBAAAAAAAAILAA2PCkew5JQEAAAAAAACgW3EoCQAAAAAAACCnvBxKlpeX6vHF92v1qqf0Qs0TmnzlZWl/jdFnj9TaNU+rvnaZrr3mioKf9bmb3OHkjrGzz910jiN3jJ197k53dtrdt6thS42qn398320XXvBZ1VQv0V/f2ayPf/xjeZm7u2Z97qZzHLlj7OxzN53jyB1jZ5+7Y+wMhMJclh9sX9SzbL8FJSX9NaCkv6pr1qhPn95aUblIF170VdXVrU/payYSCdWtXaox516shoYmLX92oSZMvDyl+RBnyU1uOuffbjrHkTvGziHk7vjq22eccYqam3dqxm/u0NCPf1qSdPzxg9Xe3q47f/Ejfee6/9Dzz6/a9/kHe/XtfO+cb7vpHEfuGDuHmjvGzqHmjrFzqLlD6Ny6q9EO8iWitqpiLM8q2cHHNj2Utz8nXq6U3LbtVVXXrJEkNTfvVH39epWVlqQ8f/LwodqwYZM2btyslpYWzZu3QOePHV2ws+Qmd7ZnyR3OLLnDmSV3bmaXLavUjh1vvuu2+vqXtG7dyynt9JW7O2ZDzR1j51Bzx9g51Nwxdg41d4ydQ80damcgJN6fU3LQoHINOfGjqlxRnfJMaVmJtjRs3fd+Q2OTSlM81Axx1uducud2N53jyB1jZ5+76RxP7kyE2jnE3DF29rmbznHkjrGzz910jiN3qJ0htTvjrcNbPuv0UNLMPtbh18Vmdr2Z/Y+Z3WJmvTJd3rt3L82bO13f+vaNevvt5pTnzPb/pqb6MPQQZ33uJndud9M5vVmfu+mc3qzP3XROb9bn7kxzZyLUziHmjrGzz910Tm/W5246pzfrczed05v1uTvGzkBIkl0pObPDr2+VNFjSFEmHSbrrYENmNsnMqsysqr195wE/p6ioSPfPna45cx7U/PmPphW6saFJA8tL971fXjZATU2vFOysz93kzu1uOseRO8bOPnfTOZ7cmQi1c4i5Y+zsczed48gdY2efu+kcR+5QOwMhSXYo2fF4fpSkrznn/ijpW5KGHGzIOTfNOTfMOTcskeh9wM+ZPm2K6upf0h1Tp6WbWSurajR48HGqqBio4uJijR8/Tg89vLhgZ8lN7mzPkjucWXKHM0vu3OfORKidQ8wdY+dQc8fYOdTcMXYONXeMnUPNHWpnICRFST5+hJldoN2Hk4c451okyTnnzKzL1w6fftpwTZxwkVatrlXVyt3/Yt1ww616dNETKc23tbXpqquv18JHZqtHIqGZs+aqtnZdwc6Sm9zZniV3OLPkDmeW3LmZvee3v9CIEaeqX7++ennDSt38H1O0Y/ub+ulP/0PHHNNXC+bP0gur1uq88ybkVe7umA01d4ydQ80dY+dQc8fYOdTcMXYONXeonYGQWGfPS2BmM95z03XOuVfMrETS75xzo5ItKOpZxhMfAACAbpOwrj9hdzvPxwQAAApE667G/H4VE0+q3z+OP/B1MHTzgrz9Oen0Sknn3FfM7BRJ7c65lWb2YTP7kqT6VA4kAQAAAAAAAOC9Oj2UNLMbJZ0jqcjMHpN0sqQ/SrrOzIY6536Yg4wAAAAAAAAACkiy55S8SLtf0OYQSdsklbv/z969x0dZ3nkf//6GBBBUFFFDEip22XZf9dmu1IDVeqDiAmJB21W2Vqx23fLseqhuu7q2sPXR7bbdVVbtrl2LB7BQ5OCqCESLopxUDlFiNQkeEAoTAtYqUlBLDtfzB4GNEjIzSWauueb6vF+vvCQTfvl9v7mnSG/vmdu5XWZ2m6Q1kjgpCQAAAAAAACAjqe6+3eSca3bOfSBpo3NulyQ55z6U1JL1dAAAAAAAAAAKTqorJfeaWZ/Wk5Kn7H/QzPqJk5IAAMADblYDAACAQ+GviuFIdVLyLOfcHyXJOdf2JGSxpMuzlgoAAAAAAABAwUp19+0/HuLxdyS9k5VEAAAAAAAAAApaqveUBAAAAAAAAIBuxUlJAAAAAAAAADmV6j0lAQAAAAAAgCC0OPMdAWnydqXkvdOmalvyZVWvX9qp+dGjRqjm1RXaULtKN95wdcHP+txN7nByx9jZ5246x5E7xs4+d9M5s9kY/z7lc3eMuWPs7HM3nePIHWNnn7tj7AyEwlyW75Ve1LOs3QVnnnGqdu/eo+nT79LJQ0dm9D0TiYTqalZqzNhLlEw2aPULlZp42VWqq3ujIGfJTW46599uOseRO8bOoeaOsbMU39+nyB3OLLnDmSV3OLPkDmc2V7ub9tZzSWA7qsovzO6JrsBUJB/L2+eJtyslV65ao3ff29mp2eHDhmrjxs3atGmLGhsbNW/eAo0fN7pgZ8lN7mzPkjucWXKHM0vucGZDzh3b36fIHc4sucOZJXc4s+QOZ9b3biAUHZ6UNLNrzGxA66+HmNkKM9tpZmvM7M9zE/FgpWUl2prcduDzZH2DSktLCnbW525y53Y3nePIHWNnn7vpHEfuGDt3VaidyR3GrM/dMeaOsbPP3XSOI3eonYGQpLpS8u+dc++0/vouSXc4546S9E+S7jnUkJlNMrMqM6tqadnTTVE/9v0Peizdl6GHOOtzN7lzu5vOmc363E3nzGZ97qZzZrPcbKrwAAAgAElEQVQ+d9M5s9muCrUzucOY9bk7xtwxdva5m86ZzfrcHWNnICSp7r7d9uvHOecelSTn3DIzO+JQQ865aZKmSYd+T8muqE82aFB56YHPy8sGqqFhR8HO+txN7tzupnMcuWPs7HM3nePIHWPnrgq1M7nDmPW5O8bcMXb2uZvOceQOtTMkx923g5HqSsmHzWyGmX1a0qNmdr2ZfcrMviVpSw7ytWtdVbWGDDlRgwcPUnFxsSZMuEALFy0p2Flykzvbs+QOZ5bc4cySO5zZkHN3RaidyR3GLLnDmSV3OLPkDmfW924gFB1eKemcm2xmV0h6SNKfSOolaZKkxyRd2pXFs2berbPPOk0DBvTX5reqdMutt2v6jDlpzTY3N+u666eocvFs9UgkNOPBuaqtfb1gZ8lN7mzPkjucWXKHM0vucGZDzh3b36fIHc4sucOZJXc4s+QOZ9b3biAUlup9CcxsuCTnnFtnZidJGiOpzjlXmc6CbLx8GwAAAAAAIGZNe+t5nXI71pV9lfNQbQyrfzRvnycdXilpZjdLOk9SkZk9JWm4pOWSbjKzoc65f81BRgAAAAAAAAAFJNWNbi6SdLL2vWx7u6Ry59wuM7tN0hpJnJQEAAAAAABAXmjhRjfBSHWjmybnXLNz7gNJG51zuyTJOfehpJaspwMAAAAAAABQcFKdlNxrZn1af33K/gfNrJ84KQkAAAAAAACgE1K9fPss59wfJck51/YkZLGky9NZkLDOXzbbkuImPAAAAAAAAADC0+FJyf0nJNt5/B1J72QlEQAAAAAAAICClupKSQAAAAAAACAIvOY2HKneUxIAAAAAAAAAuhUnJQEAAAAAAADkVE5PSk77xe1Kbq3W+peePvDY0UcfpcrK2aqpWanKytk66qh+aX2v0aNGqObVFdpQu0o33nB1RjlCnPW5m9zh5I6xs8/ddI4jd4ydfe6mcxy5Y+zsczed48gdY+d7p03VtuTLql6/NKO57tjNsYojt6/OXX1uA6Ewl+U7XPfsVX5gwRlnnKrdu/do+gN3augXzpUk/eTHk/Xuuzt12+1364Z/vFpHH91PP5j8Y0mHvvt2IpFQXc1KjRl7iZLJBq1+oVITL7tKdXVvpMwT4iy5yU3n/NtN5zhyx9g51Nwxdg41d4ydQ80dY+dQc8fYWZLO3P//L6ffpZOHjkxrxnfuWI9ViLl9dk73ud20t97SChOZ1aVf420l2/jitkfy9nmS0yslV61ao/fe2/mxx8aNG6WZs+ZLkmbOmq/x40en/D7Dhw3Vxo2btWnTFjU2NmrevAUaPy71XKiz5CZ3tmfJHc4sucOZJXc4s+QOZ5bc4cySO5zZkHOvXLVG737i/1+mK9TO5A5jtqvzXXluAyHx/p6Sxx03QNu3vy1J2r79bR177DEpZ0rLSrQ1ue3A58n6BpWWlqS1L8RZn7vJndvddI4jd4ydfe6mcxy5Y+zsczed48gdY2efu+mcee6uCLUzucOY7Y55dF6LMz7afOSzDk9KmtkjZjbRzA7PVaB0mB38Q033ZeghzvrcTe7c7qZzZrM+d9M5s1mfu+mc2azP3XTObNbnbjpnNutzN50zm/W5m86ZzXZVqJ3JHcZsd8wDMUh1peSpki6UtMXM5pnZV82sZ6pvamaTzKzKzKpamvd0+HvffvsdlZQcJ0kqKTlOv/vd71OGrk82aFB56YHPy8sGqqFhR8q5UGd97iZ3bnfTOY7cMXb2uZvOceSOsbPP3XSOI3eMnX3upnPmubsi1M7kDmO2O+aBGKQ6Kfm2c+4iSSdIWijp25LqzWy6mY061JBzbppzrsI5V5Ho0bfDBQsXPaXLJl4sSbps4sVauHBJytDrqqo1ZMiJGjx4kIqLizVhwgVauCj1XKiz5CZ3tmfJHc4sucOZJXc4s+QOZ5bc4cySO5zZkHN3RaidyR3GbHfMAzEoSvF1J0nOuT9Imilpppn1lzRB0k2SMvpf1Mxf/pfOOus0DRjQX29tXKdb/2WqbrvtvzR79j264ltf19at9brkkr9L+X2am5t13fVTVLl4tnokEprx4FzV1r6eVoYQZ8lN7mzPkjucWXKHM0vucGbJHc4sucOZJXc4syHnnjXzbp3d+v8vN79VpVtuvV3TZ8zJ69yxHqsQc/vs3JXnNhAS6+g9DcxshXPurK4s6NmrvNNvmtDC+y0AAAAAAAAcpGlvfX7fxcST50ou4mRSG1/a/nDePk86vFLSOXeWmQ3f90u3zsw+J2mMpA3OucqcJAQAAAAAAABQUDo8KWlmN0s6T1KRmT2lfTe+WSbpJjMb6pz71+xHBAAAAAAAAFBIUr2n5EWSTpbUS9J2SeXOuV1mdpukNZI4KQkAAAAAAAAgI6nuvt3knGt2zn0gaaNzbpckOec+lNSS9XQAAAAAAAAACk6qKyX3mlmf1pOSp+x/0Mz6iZOSAAAAAAAAyCOcrApHqpOSZznn/ihJzrm2x7VY0uXpLOAO2gAAAAAAAADaSnX37T8e4vF3JL2TlUQAAAAAAAAAClqq95QEAAAAAAAAgG7FSUkAAAAAAAAAOcVJSQAAAAAAAAA55e2k5OhRI1Tz6gptqF2lG2+4OqfzIc763E3ucHLH2NnnbjrHkTvGzj53x9a5V69eeuG5RXqx6im9XP2Mbv7h9zKNHeTPO8Rj1dVZn7vpHEfuGDv73E3nOHKH2jl2TsZHm498Zi7Ld8cu6ll20IJEIqG6mpUaM/YSJZMNWv1CpSZedpXq6t5I63t2ZT7EWXKTm875t5vOceSOsXOouUPtLEl9+/bRnj0fqKioSCuWPap/+O7NWrP2pbzOHeOxijF3jJ1DzR1j51Bzx9g51NwhdG7aW5/fZ5w8WVFycXZPdAXmrO3z8/Z54uVKyeHDhmrjxs3atGmLGhsbNW/eAo0fNzon8yHOkpvc2Z4ldziz5A5nltzhzPrevWfPB5Kk4uIiFRUXK5P/YBzizzvUYxVj7hg7h5o7xs6h5o6xc6i5Q+0MhKTDk5Jm9mkze8DMfmRmh5vZvWb2qpnNN7PBnV1aWlaircltBz5P1jeotLQkJ/MhzvrcTe7c7qZzHLlj7OxzN53jyB1qZ2nf1RBV65aoof43Wrp0hdauW5/3uWM8VjHmjrGzz910jiN3jJ197o6xMxCSVFdKzpC0TtJuSaslbZB0nqQnJT3Q2aVmB185mslVAV2ZD3HW525y53Y3nTOb9bmbzpnN+txN58xmfe6OsbMktbS0qGLYKJ1wYoWGVQzVSSd9Nu3ZEH/eoR6rGHPH2NnnbjpnNutzN50zm/W5O8bOQEiKUnz9COfcf0uSmV3lnJva+vj9ZnbNoYbMbJKkSZJkPfopkej7sa/XJxs0qLz0wOflZQPV0LAj7dBdmQ9x1uducud2N53jyB1jZ5+76RxH7lA7t/X++7u0fMXz+97Yvua1rO8Ocdbn7hhzx9jZ5246x5E7xs4+d8fYGVIL52+DkepKyRYz+4yZDZfUx8wqJMnMhkjqcagh59w051yFc67ikyckJWldVbWGDDlRgwcPUnFxsSZMuEALFy1JO3RX5kOcJTe5sz1L7nBmyR3OLLnDmfW5e8CA/urX70hJUu/evTXynDP12msb8z53jMcqxtwxdg41d4ydQ80dY+dQc4faGQhJqislb5S0UFKLpAslfd/MPi+pn6Rvd3Zpc3Ozrrt+iioXz1aPREIzHpyr2trXczIf4iy5yZ3tWXKHM0vucGbJHc6sz90DBx6vB+6/Uz16JJRIJPTwwwu1uPLpvM8d47GKMXeMnUPNHWPnUHPH2DnU3KF2BkJiqd6XwMxOldTinFtnZidp33tK1jrnKtNZUNSzjAtnAQAAAAAAulHT3vqD33wSWnb8xZyHamPEjvl5+zzp8EpJM7tZ+05CFpnZU5KGS1ou6SYzG+qc+9ccZAQAAAAAAABQQFK9fPsiSSdL6iVpu6Ry59wuM7tN0hpJnJQEAAAAAABAXmhR3l4YiE9IdaObJudcs3PuA0kbnXO7JMk596H2vc8kAAAAAAAAAGQk1UnJvWbWp/XXp+x/0Mz6iZOSAAAAAAAAADoh1cu3z3LO/VGSnHNtT0IWS7o8a6kAAAAAAAAAFKwOT0ruPyHZzuPvSHonK4kAAAAAAAAAFLRUL98GAAAAAAAAgG6V6uXbAAAAAAAAQBAcd98OBldKAgAAAAAAAMgpTkoCAAAAAAAAyCmvJyUTiYTWrf21Fjz6YMazo0eNUM2rK7ShdpVuvOHqgp/1uZvc4eSOsbPP3XSOI3eMnX3upnNms+XlpXp6yXy98ptlern6GV17zZU5282xiiN3jJ197qZzHLlj7Oxzd4ydgVCYcy6rC4p6lh1ywfXXTdIpp3xeRx5xhC746uVpf89EIqG6mpUaM/YSJZMNWv1CpSZedpXq6t4oyFlyk5vO+bebznHkjrFzqLlj7CxJJSXHaWDJcVpf/aoOP7yv1q55Un910d/kde5Yj1WIuWPsHGruGDuHmjvGzqHmDqFz09563jyxHUuP/+vsnugKzMgdc/P2eeLtSsmysoEae95IPfDAQxnPDh82VBs3btamTVvU2NioefMWaPy40QU7S25yZ3uW3OHMkjucWXKHMxty7u3b39b66lclSbt379GGDW+orLQkr3PHeqxCzB1j51Bzx9g51Nwxdg41d6idIbXw8bGPfNbhSUkzS5jZ35jZYjN72cxeNLM5Zjaiq4v/Y+otuun7P1JLS+Y/otKyEm1NbjvwebK+QaVp/gU8xFmfu8md2910jiN3jJ197qZzHLlj7PxJJ5xQrpP/4v9ozdr1Wd/NsYojd4ydfe6mcxy5Y+zsc3eMnYGQpLpS8n5Jn5L0E0nPSlrc+tgUM7v2UENmNsnMqsysqqVlz0FfP3/suXr77Xf00vpXOhXa7OArT9N9GXqIsz53kzu3u+mc2azP3XTObNbnbjpnNutzN50zm22rb98+mjf3Xn33H2/WH/6wO+u7OVaZzfrcTefMZn3upnNmsz530zmzWZ+7Y+wMhKQoxddPcc59q/XXq8xstXPuh2a2QlK1pP9sb8g5N03SNKn995Q8/fQKjfvKKJ035hz17t1LRx55hB6c8TNdfsV30gpdn2zQoPLSA5+Xlw1UQ8OOgp31uZvcud1N5zhyx9jZ5246x5E7xs77FRUVaf7ce/XQQ4/qsceeSHsu1M7kDmPW5+4Yc8fY2eduOseRO9TOQEhSXSnZaGZ/Iklm9gVJeyXJOfdHSZ0+TT95yk81+NMVGvKZL+rSiVfp2WefS/uEpCStq6rWkCEnavDgQSouLtaECRdo4aIlBTtLbnJne5bc4cySO5xZcoczG3JuSbp32lTVbXhTd941LaO5UDuTO4xZcoczS+5wZskdzqzv3UAoUl0peYOkZ83sI0nFkr4uSWZ2rKRFWc52SM3Nzbru+imqXDxbPRIJzXhwrmprXy/YWXKTO9uz5A5nltzhzJI7nNmQc3/p9GG6bOJF+s0rtapat+//rPzzP/9UTzz5TN7mjvVYhZg7xs6h5o6xc6i5Y+wcau5QO0NyytubTeMTLNX7EpjZaZKanHPrzOxzksZI2uCcq0xnQXsv3wYAAAAAAEDnNe2t5+xbO5Yc/3XOQ7UxasecvH2edHilpJndLOk8SUVm9pSk4ZKWS7rJzIY65/41BxkBAAAAAAAAFJBUL9++SNLJknpJ2i6p3Dm3y8xuk7RGEiclAQAAAAAAAGQk1Y1umpxzzc65DyRtdM7tkiTn3IeSWrKeDgAAAAAAAEDBSXVScq+Z9Wn99Sn7HzSzfuKkJAAAAAAAAIBOSPXy7bOcc3+UJOdc25OQxZIuz1oqAAAAAAAAIENcQReODk9K7j8h2c7j70h6JyuJAAAAAAAAABS0VC/fBgAAAAAAAIBuxUlJAAAAAAAAADnFSUkAAAAAAAAAOeX1pGQikdC6tb/WgkcfzHh29KgRqnl1hTbUrtKNN1xd8LM+d5M7nNwxdva5m85x5I6xs8/ddM5d7nunTdW25MuqXr80451d2dvVWZ+7Y8wdY2efu+kcR+4YO/vcHWPn2LXw8bGPfGbOuawuKOpZdsgF1183Saec8nkdecQRuuCr6d/MO5FIqK5mpcaMvUTJZINWv1CpiZddpbq6NwpyltzkpnP+7aZzHLlj7Bxq7hg7d3X+zDNO1e7dezR9+l06eejItPZ1x16OVTi5Y+wcau4YO4eaO8bOoeYOoXPT3npLK0xkKo//enZPdAVm7I45efs88XalZFnZQI09b6QeeOChjGeHDxuqjRs3a9OmLWpsbNS8eQs0ftzogp0lN7mzPUvucGbJHc4sucOZjTX3ylVr9O57O9Pe1V17OVbh5I6xc6i5Y+wcau4YO4eaO9TOQEi8nZT8j6m36Kbv/0gtLZlfTFpaVqKtyW0HPk/WN6i0tKRgZ33uJndud9M5jtwxdva5m85x5I6xc3fMd1aonclN53zeTec4csfY2efuGDsDIenwpKSZFZnZ/zWzJ83sN2b2spk9YWZ/Z2bFnV16/thz9fbb7+il9a90at7s4CtP030ZeoizPneTO7e76ZzZrM/ddM5s1uduOmc263M3nTOb7Y75zgq1M7lzN+tzd4y5Y+zsczedM5v1uTvGzkBIilJ8faaknZL+n6Rk62Plki6XNEvSX7c3ZGaTJE2SJOvRT4lE3499/fTTKzTuK6N03phz1Lt3Lx155BF6cMbPdPkV30krdH2yQYPKSw98Xl42UA0NOwp21uducud2N53jyB1jZ5+76RxH7hg7d8d8Z4Xamdx0zufddI4jd4ydfe6OsTMkp7x9C0V8QqqXb3/BOff3zrnVzrlk68dq59zfSxp6qCHn3DTnXIVzruKTJyQlafKUn2rwpys05DNf1KUTr9Kzzz6X9glJSVpXVa0hQ07U4MGDVFxcrAkTLtDCRUsKdpbc5M72LLnDmSV3OLPkDmc21txdEWpnctM5n3fTOY7cMXYONXeonYGQpLpS8j0zu1jS/zjnWiTJzBKSLpb0XrbDHUpzc7Ouu36KKhfPVo9EQjMenKva2tcLdpbc5M72LLnDmSV3OLPkDmc21tyzZt6ts886TQMG9Nfmt6p0y623a/qMOVnfy7EKJ3eMnUPNHWPnUHPH2DnU3KF2BkJiHb0vgZkNlvRvks7RvpOQJqmfpGcl3eSc25RqQVHPMt74AAAAAAAAoBs17a3ndcrtWHz8JZyHauP8HQ/l7fOkwyslnXOb1fq+kWZ2jPadlLzTOTcx+9EAAAAAAAAAFKIOT0qa2ePtPHzO/sedc+OzkgoAAAAAAADIUEveXheIT0r1npLlkmol3SfJad+VksMkTc1yLgAAAAAAAAAFKtXdtyskvShpsqT3nXPLJH3onFvunFue7XAAAAAAAAAACk+q95RskXSHmc1v/eeOVDMAAAAAAAAA0JG0TjA655KSLjaz8yXtym4kAAAAAAAAAIUso6senXOLJS3OUhYAAAAAAAAAEeCl2AAAAAAAACgILeL226FIdaMbAAAAAAAAAOhWnJQEAAAAAAAAkFPeTkqOHjVCNa+u0IbaVbrxhqtzOh/irM/d5A4nd4ydfe6OsfO906ZqW/JlVa9fmtFcd+wOcdbn7hhzx9jZ9+5EIqF1a3+tBY8+mNO9sR2rUP/s9bk7xtwxdva5m85x5A61MxAKc85ldUFRz7KDFiQSCdXVrNSYsZcomWzQ6hcqNfGyq1RX90Za37Mr8yHOkpvcdM6/3TF2lqQzzzhVu3fv0fTpd+nkoSPTmvGdO8ZjFWPuGDv73i1J1183Saec8nkdecQRuuCrl2c9c1fnQz1WIf7Z63N3jLlj7Bxq7hg7h5o7hM5Ne+t588R2LCj5RnZPdAXmgu2z8/Z54uVKyeHDhmrjxs3atGmLGhsbNW/eAo0fNzon8yHOkpvc2Z4ldzizvnevXLVG7763M+3fnw+5YzxWMeaOsbPv3WVlAzX2vJF64IGH0p7pjr0xHqsQ/+z1uTvG3DF2DjV3jJ1DzR1qZ0iOj4995LNOn5Q0s2mdnS0tK9HW5LYDnyfrG1RaWpKT+RBnfe4md2530zmO3KF27qoQf96hHqsYc8fY2ffu/5h6i276/o/U0tKS9kx37I3xWHVFqJ3JTed83k3nOHKH2hkISYcnJc2s/yE+jpE0toO5SWZWZWZVLS172vv6QY9l8jLyrsyHOOtzN7lzu5vOmc363B1j564K8ecd6rGKMXeMnX3uPn/suXr77Xf00vpX0vr93bW3q/OhHquuCLUzuXM363N3jLlj7Oxzd4ydgZAUpfj67yT9VlLb/0W41s+PO9SQc26apGlS++8pWZ9s0KDy0gOfl5cNVEPDjrRDd2U+xFmfu8md2910jiN3qJ27KsSfd6jHKsbcMXb2ufv00ys07iujdN6Yc9S7dy8deeQRenDGz3T5Fd/J6t6uzod6rLoi1M7kpnM+76ZzHLlD7QyEJNXLt9+SNMI5d2Kbj087506U1On/RayrqtaQISdq8OBBKi4u1oQJF2jhoiU5mQ9xltzkzvYsucOZ9b27K0L8eYd6rGLMHWNnn7snT/mpBn+6QkM+80VdOvEqPfvsc2mdkOzq3q7Oh3qsuiLUzuSmcz7vpnMcuUPtDIQk1ZWSd0o6WtKWdr72751d2tzcrOuun6LKxbPVI5HQjAfnqrb29ZzMhzhLbnJne5bc4cz63j1r5t06+6zTNGBAf21+q0q33Hq7ps+Yk9e5YzxWMeaOsbPv3Z0VamefuUP8s9fn7hhzx9g51Nwxdg41d6idIWX2btfwyTJ9XwIz+6Vz7pvp/v72Xr4NAAAAAACAzmvaW3/wm09Cj5R8g/NQbXxt++y8fZ50eKWkmT3+yYckfdnMjpIk59z4bAUDAAAAAAAAUJhSvXx7kKQaSffpf29wUyFpapZzAQAAAAAAAChQqW50c4qkFyVNlvS+c26ZpA+dc8udc8uzHQ4AAAAAAABA4enwSknnXIukO8xsfus/d6SaAQAAAAAAAICOpHWC0TmXlHSxmZ0vaVcmC3oX9exMLknSR017Oz0LAEAMEtb5961uyfBmd0Cmjurdt9OzOz/a041JAABALFq68Pdj5FZGVz065xZLWpylLAAAAAAAAAAikOo9JQEAAAAAAACgW3FSEgAAAAAAAEBOcVISAAAAAAAAQE7l7KRkr149tWzFY3phdaXWVf1ak6dcL0m6/4E79FL1Uq1d96R+fs+/qagovbe5HD1qhGpeXaENtat04w1XZ5QlxFmfu8kdTu4QO/fq1UsvPLdIL1Y9pZern9HNP/xeprGD/HmHeKy6Outzd4ydr7nmSq1/6WlVr1+qa6+9MqPZru4Ocdbn7lhyv/TKM1rxwkI9u2qBnl72P5Kk8ReO0ao1i/X2zg06eej/SWvvvdOmalvyZVWvX5pR3s7m7q5Zn7vpHEfuGDv73E3nOHKH2jl2jo+PfeQzc1m+8+bhfU48sKBv3z7as+cDFRUV6aml83XjP96io/sfpSW/XiZJmj7jLj333Frdd++vJB367tuJREJ1NSs1ZuwlSiYbtPqFSk287CrV1b2RMk+Is+QmdyF3lj7+Z8OKZY/qH757s9asfSmvc8d4rGLMHULn9u6+fdLnPqtZs+7W6V/6ivbubdSiRbN07bU/0JtvbvrY7zvU3bdD/HmHcKxizN327tsvvfKMzj37r/Tuu+8deOxPP/Mnci0tmnrXrbp5yr+pev2rB752qLtvn3nGqdq9e4+mT79LJw8dmTJrrjvn2246x5E7xs6h5o6xc6i5Q+jctLee20y3Y/7AS/P9XFxOXdzwq7x9nuT05dt79nwgSSouLlJxcZGcdOCEpCRVVb2ssrKBKb/P8GFDtXHjZm3atEWNjY2aN2+Bxo8bnVaGEGfJTe5sz/re3fbPhqLiYmXyH0tC/HmHeqxizB1q5z/7syFas2a9PvzwIzU3N2vlitW64IIxeZ87xmMVa+793nh940Eny1NZuWqN3n1vZ8a7JI4VnQs3d4ydQ80dY+dQc4faGQhJTk9KJhIJPb96sTb9tkrPLF2lqnXVB75WVFSkS77xVT21ZHnK71NaVqKtyW0HPk/WN6i0tCStDCHO+txN7tzujrGztO/Phqp1S9RQ/xstXbpCa9etz/vcMR6rGHOH2rmm9jWdeeap6t//KB12WG+NGXOOystL8z53jMcqptzOOT382ANauvwRffOKv05rT3fjWNE5n3fTOY7cMXb2uTvGzkBIOnwDRzPrIelvJZVLetI591ybr01xzv0ok2UtLS06/Yvnq1+/I/TQnF/oc5/7jGprX5ck3XHXv+i5VWv1/PPrUn4fa+elauleWRXirM/d5M7t7hg7S/v+bKgYNkr9+h2p/5l/v0466bOqqXkt67tDnPW5O8bcoXbesOFN3Xb7z/VE5UPavXuPfvNKrZqamtKa7eruEGd97o4p9/mjLtH27W9rwID+enjBDL3x+ka98HxVWvu6C8cqd7M+d8eYO8bOPnfTObNZn7tj7AyEJNWVkr+QdLak30v6mZn9R5uvfe1QQ2Y2ycyqzKyqsekPB339/ff/oJUrV+vcvzxbkvT9H3xHAwb0103/lN45zvpkgwa1ueKjvGygGhp2FOysz93kzu3uGDu39f77u7R8xfMaPWpE2jMh/rxDPVYx5g61syTNmDFHp37xPI089yK99+7OjF4iG+LPO9RjFVPu7dvfliS98867qlz0lL5wyufT2tWdOFZ0zufddI4jd4ydfe6OsTOkFj4+9pHPUp2UHO6c+4Zz7k5Jp0o63MweMbNekg75RpnOuWnOuQrnXEVx0RGSpAED+qtfv32/7t27l7785TP0+usbdfkVf62R556lb13+nbTP/K+rqtaQISdq8OBBKi4u1oQJF2jhoiUFO0tucmd71ufufX82HClJ6t27t0aec6Zee+4HO8wAACAASURBVG1j3ueO8VjFmDvUzpJ07LHHSJIGDSrVhReep7lzF+R97hiPVSy5+/Q5TIcf3vfAr0ec86W0bxTQnThWdM7n3XSOI3eMnUPNHWpnICQdvnxbUs/9v3DONUmaZGY3S3pG0uGZLDq+5DhNu/d29Uj0UCJheuSRxXryiWe0c9cb2rKlXs8se0SS9PiCJ/XTn/xnh9+rublZ110/RZWLZ6tHIqEZD8498DLwVEKcJTe5sz3rc/fAgcfrgfvvVI8eCSUSCT388EItrnw673PHeKxizB1qZ0maO2eajjnmaDU2Nuk7103Wzp3v533uGI9VLLmPPW6AHvzV3ZKkoqIe+p/5C/XM0ys19it/qZ/e9s86ZkB/zZ4/Ta++UqcJX72yw92zZt6ts886TQMG9Nfmt6p0y623a/qMOXnXOV920zmO3DF2DjV3jJ1DzR1qZyAk1tHViWY2S9Is59yTn3j8byX9t3OuONWCw/uc2Ok3PvioaW9nRwEAiELCDvnChZRaeG8iZNlRvft2enbnR3u6MQkAAIWnaW995/8iWMDmDryUv+S28dcNv8rb50mHL992zk1s54TkL51z96VzQhIAAAAAAAAAPinV3bcf/+RDkr5sZkdJknNufLaCAQAAAAAAAChMqd5TcpCkGkn3SXLad1KyQtLULOcCAAAAAAAAMtKSty9Wxieluvv2KZJelDRZ0vvOuWWSPnTOLXfOLc92OAAAAAAAAACFp8MrJZ1zLZLuMLP5rf/ckWoGAAAAAAAAADqS1glG51xS0sVmdr6kXZks+CN30AYAIGu4gzbyWVfuoF3co/P/HbyxuanTswAAAMiNjP6255xbLGlxlrIAAAAAAAAAiAAvxQYAAAAAAEBBaBF3uglFqhvdAAAAAAAAAEC34qQkAAAAAAAAgJzydlLyjddXa/1LT6tq3RKtfqEy4/nRo0ao5tUV2lC7SjfecHXBz/rcTe5wcsfY2eduOseRO9TO906bqm3Jl1W9fmlGc92xO8RZn7tjzJ3p87O8fKCefHKO1q9fqhdffEpXX/0tSdKPf/wDVVcv1dq1T2ru3F+oX78js5o7xmMVY2efu+kcR+4YO/vcHWNnIBTmsnzXzuKeZe0ueOP11friaefp979/75Czh0qWSCRUV7NSY8ZeomSyQatfqNTEy65SXd0bKfOEOEtuctM5/3bTOY7coXaWpDPPOFW7d+/R9Ol36eShI9Oa8Z07xmMVa+50np9t775dUnKcSkqOU3X1qzr88L56/vlFmjBhksrKSrRs2fNqbm7Wj350kyRpypSfHvLu2xwrOhdq7hg7h5o7xs6h5g6hc9Peet48sR2/Kp2Y3RNdgbl026y8fZ4E+fLt4cOGauPGzdq0aYsaGxs1b94CjR83umBnyU3ubM+SO5xZcocz63v3ylVr9O57O9P+/fmQO8ZjFWvuTJ+f27e/rerqVyVJu3fv0YYNb6q09HgtXbpSzc3NkqS1a9errGxg1nLHeKxi7Bxq7hg7h5o7xs6h5g61M/Zd4MbH/37kM28nJZ1zeqLyIa1Z/YT+9spLM5otLSvR1uS2A58n6xtUWlpSsLM+d5M7t7vpHEfuGDv73B1j564K8ecd6rGKNXdXfOpT5Tr55JO0bl31xx7/5jcn6Ne/XtbhLMeKzvm8m85x5I6xs8/dMXYGQlLU0RfNrI+ka7Tv5Op/Svq6pK9J2iDpVufc7s4uPnvEhWpo2KFjjz1GTz4xRxtee1OrVq1Ja9bs4CtP030ZeoizPneTO7e76ZzZrM/ddM5s1ufuGDt3VYg/71CPVay5O6tv3z566KF7dMMNt+oPf/jfv4beeOM1am5u0pw5j3Y4z7HK3azP3THmjrGzz910zmzW5+4YOwMhSXWl5AxJx0s6UdJiSRWSbpdkkv77UENmNsnMqsysqqVlT7u/p6FhhyTpd7/7vR5b8ISGDTs57dD1yQYNKi898Hl52cAD368QZ33uJndud9M5jtwxdva5O8bOXRXizzvUYxVr7s4oKirSQw/do7lzH9OCBU8eePzSS/9KY8eO1BVXXJfye3Cs6JzPu+kcR+4YO/vcHWNnICSpTkp+xjn3PUlXSzpJ0rXOuRWSbpT0F4cacs5Nc85VOOcqEom+B329T5/DdPjhfQ/8+i/PPVs1Na+lHXpdVbWGDDlRgwcPUnFxsSZMuEALFy0p2Flykzvbs+QOZ5bc4cz63t0VIf68Qz1WsebujHvu+Xe99tqb+tnP7jvw2F/+5dn63vf+XhdddKU+/PCjlN+DY0XnfN5N5zhyx9g51NyhdgZC0uHLt/dzzjkzq3St1wu3ft7pa4ePP/5YPTz/fklSj6IemjPnMS1Zsizt+ebmZl13/RRVLp6tHomEZjw4V7W1rxfsLLnJne1ZcoczS+5wZn3vnjXzbp191mkaMKC/Nr9VpVtuvV3TZ8zJ69wxHqtYc2f6/Dz99Apdeulf6ZVX6rR6daUk6eabb9PUqf9PvXr11KJFsyTtu9nNd74zOS87h3isYuwcau4YO4eaO8bOoeYOtTMQEuvofQnM7D5J13/yvSPN7E8kPeicOyPVguKeZZ0+eck7JgAAAMSpuEda/+28XY3NTd2YBACA/NS0t/7gN5+Eflk2kdNJbXyzflbePk86fPm2c+5v2zkh+Uvn3EZJZ2Y1GQAAAAAAAICClOru249/8iFJXzazo1o/H5+VVAAAAAAAAAAKVqrXxQySVCPpPu17NbVp3x24p2Y5FwAAAAAAAIACleru26dIelHSZEnvO+eWSfrQObfcObc82+EAAAAAAAAAFJ4Or5R0zrVIusPM5rf+c0eqmYO+RxfCAQAAIE7crAYAAHRGi+8ASFtaJxidc0lJF5vZ+ZJ2ZTcSAAAAAAAAgEKW2VWPzi2WtDhLWQAAAAAAAABEINV7SgIAAAAAAABAt+KkJAAAAAAAAICcyujl2wAAAAAAAEC+4obL4fB2peS906ZqW/JlVa9f2qn50aNGqObVFdpQu0o33nB1wc/63E3ucHLH2NnnbjrHkTvGzj530zmO3DF29rmbznHkjrGzz910jiN3qJ2BUJhz2T2HXNSzrN0FZ55xqnbv3qPp0+/SyUNHZvQ9E4mE6mpWaszYS5RMNmj1C5WaeNlVqqt7oyBnyU1uOuffbjrHkTvGzqHmjrFzqLlj7Bxq7hg7h5o7xs6h5o6xc6i5Q+jctLfe0goTmellE7lYso1v1c/K2+eJtyslV65ao3ff29mp2eHDhmrjxs3atGmLGhsbNW/eAo0fN7pgZ8lN7mzPkjucWXKHM0vucGbJHc4sucOZJXc4s+QOZ5bc4cz63g2EIuOTkmb2ejaCZKK0rERbk9sOfJ6sb1BpaUnBzvrcTe7c7qZzHLlj7OxzN53jyB1jZ5+76RxH7hg7+9xN5zhyx9jZ5+4YOwMh6fBGN2b2B/3ve4Tuv9yzz/7HnXNHHmJukqRJkmQ9+imR6NtNcQ98/4MeS/dl6CHO+txN7tzupnNmsz530zmzWZ+76ZzZrM/ddM5s1uduOmc263M3nTOb9bmbzpnN+txN58xmfe6OsTOklrx9sTI+KdWVkjMkPSbpT51zRzjnjpC0pfXX7Z6QlCTn3DTnXIVzrqK7T0hKUn2yQYPKSw98Xl42UA0NOwp21uducud2N53jyB1jZ5+76RxH7hg7+9xN5zhyx9jZ5246x5E7xs4+d8fYGQhJhyclnXPXSrpL0kNm9h0zSygP7q6+rqpaQ4acqMGDB6m4uFgTJlyghYuWFOwsucmd7VlyhzNL7nBmyR3OLLnDmSV3OLPkDmeW3OHMkjucWd+7gVB0+PJtSXLOvWhm50q6RtJySb27Y/GsmXfr7LNO04AB/bX5rSrdcuvtmj5jTlqzzc3Nuu76KapcPFs9EgnNeHCuamtfL9hZcpM727PkDmeW3OHMkjucWXKHM0vucGbJHc4sucOZJXc4s753A6GwDN8TYaCkV51zx6Q7U9SzzPuVlQAAAAAAAIWkaW89757YjvvLJ3Ieqo0rk7Py9nmS6kY3j7fzcK/9jzvnxmclFQAAAAAAAICClerl2+WSaiXdp33vJWmShkmamuVcAAAAAAAAQEZafAdA2lLdfbtC0ouSJkt63zm3TNKHzrnlzrnl2Q4HAAAAAAAAoPB0eKWkc65F0h1mNr/1nztSzQAAAAAAAABAR9I6weicS0q62MzOl7Qru5EAAAAAf7rybvC8sz4AAEB6Mrrq0Tm3WNLiLGUBAAAAAAAAEAFeig0AAAAAAICCwI1uwpHqRjcAAAAAAAAA0K04KQkAAAAAAAAgp7yclOzVq5deeG6RXqx6Si9XP6Obf/i9jL/H6FEjVPPqCm2oXaUbb7i64Gd97iZ3OLm7MnvvtKnalnxZ1euXZjTXHbs5VnF09rmbznHkjrGzz90xdr7uO99WdfUzWr9+qWbOvFu9evXK2e4QZ33ujjF3jJ1D/ftrjMfK5+4YOwOhMOeye4/Aop5l7S7o27eP9uz5QEVFRVqx7FH9w3dv1pq1L6X1PROJhOpqVmrM2EuUTDZo9QuVmnjZVaqre6MgZ8lN7lx0PvOMU7V79x5Nn36XTh46Mq2ZfMgd4s87xs6h5o6xc6i5Y+wcau4QOrd39+3S0hIte/ZRff4vvqyPPvpIs2ffoyefeEa/nDnvY7/vUH+zDvHnHcKxIne8naUw//4a67EKMXcInZv21rf3r6zo/aJ8YnZPdAXm/yZn5e3zxNvLt/fs+UCSVFxcpKLiYmVycnT4sKHauHGzNm3aosbGRs2bt0Djx40u2Flykzvbs5K0ctUavfvezrR/f77kDvHnHWPnUHPH2DnU3DF2DjV3qJ0lqaioSIcd1ls9evRQn8MO07aG7XmfO8ZjFWPuGDtLYf79NdZjFWLuUDtDcsZH2490mFkPM1tvZotaPz/RzNaY2RtmNtfMerY+3qv18zdbvz64K8fK20nJRCKhqnVL1FD/Gy1dukJr161Pe7a0rERbk9sOfJ6sb1BpaUnBzvrcTe7c7vbZuSs4VnTO5910jiN3jJ197o6x87Zt23XHHfforY1rtXXLeu3atUtPP70i73PHeKxizB1j564KtTO5w5j1vRvohOsk1bX5/N8k3eGc+1NJ70m6svXxKyW955wbIumO1t/XaR2elDSzz7f5dbGZTTGzx83sx2bWpyuLW1paVDFslE44sULDKobqpJM+m/as2cGnetO90jLEWZ+7yZ3b3T47dwXHKnezPnfHmDvGzj530zmzWZ+7Y+x81FH9NG7caP3pZ76oT53wBfXp20ff+MbX0prt6u4QZ33ujjF3jJ27KtTO5A5j1vduIBNmVi7pfEn3tX5uks6R9HDrb3lQ0oWtv76g9XO1fn2ktfeETVOqKyVntPn1TyUNkTRV0mGS7jnUkJlNMrMqM6tqadnT4YL339+l5Sue1+hRI9IKLEn1yQYNKi898Hl52UA1NOwo2Fmfu8md290+O3cFx4rO+bybznHkjrGzz90xdh458kxt3rxF77zzrpqamvTYY0/otC9W5H3uGI9VjLlj7NxVoXYmdxizvncDGbpT0o2SWlo/P0bSTudcU+vnSUllrb8uk7RVklq//n7r7++UVCcl257tHCnp28655ZK+K+nkQw0556Y55yqccxWJRN+Dvj5gQH/163ekJKl3794aec6Zeu21jWmHXldVrSFDTtTgwYNUXFysCRMu0MJFSwp2ltzkzvZsV3Gs6JzPu+kcR+4YO4eaO9TOW7fUa/ipX9Bhh/WWJJ3z5TO0YUN6NzvwmTvGYxVj7hg7d1Wonckdxqzv3UBbbS8cbP2Y1OZrX5H0tnPuxbYj7Xwbl8bXMlaU4uv9zOyr2nfyspdzrlGSnHPOzDq9dODA4/XA/XeqR4+EEomEHn54oRZXPp32fHNzs667fooqF89Wj0RCMx6cq9ra1wt2ltzkzvasJM2aebfOPus0DRjQX5vfqtItt96u6TPm5H3uEH/eMXYONXeMnUPNHWPnUHOH2nntuvV65JHFWrv212pqatLL1TW6975f5X3uGI9VjLlj7CyF+ffXWI9ViLlD7Qx8knNumqRph/jylySNN7OxknpLOlL7rpw8ysyKWq+GLJe0/01Ok5IGSUqaWZGkfpLe7Ww26+h9Ccxshj5+xvMm59wOMyuR9Cvn3MhUC4p6lvHGBwAAAAhGp98YSV24VAAAgAw17a3vyr+yCtbPB03kX8dtXLV1VlrPEzMbIekfnXNfMbP5kv7HOTfHzO6R9Bvn3M/N7GpJf+6c+zsz+7qkrznnJnQ2W4dXSjrnrmgn5C+dc9/UvpdzAwAAAAAAACgc/yRpjpn9SNJ6Sfe3Pn6/pJlm9qb2XSH59a4s6fCkpJk93s7D55jZUZLknBvfleUAAAAAAAAA/HLOLZO0rPXXb0ka3s7v+UjSxd21M9V7Sg6SVKN9twV32vdqlmHadwduAAAAAAAAAMhYqrtvnyLpRUmTJb3fetb0Q+fc8ta7cAMAAAAAAABARlK9p2SLpDta3+DyDjPbkWoGAAAAAAAA8KHFdwCkLa0TjM65pKSLzex8SbuyGwkAAADwh1t2AgAAZF9GVz065xZLWpylLAAAAAAAAAAikOo9JQEAAAAAAACgW3FSEgAAAAAAAEBOcdMaAAAAAAAAFATeGzoc3q6UvHfaVG1Lvqzq9Us7NT961AjVvLpCG2pX6cYbri74WZ+7yR1O7hg7+9xN5zhyx9jZ5246x5E7xs4+d9M5jtwxdva5m85x5A61MxAKcy6755CLepa1u+DMM07V7t17NH36XTp56MiMvmcikVBdzUqNGXuJkskGrX6hUhMvu0p1dW8U5Cy5yU3n/NtN5zhyx9g51Nwxdg41d4ydQ80dY+dQc8fYOdTcMXYONXcInZv21ltaYSLzn4MmcrFkG9dunZW3zxNvV0quXLVG7763s1Ozw4cN1caNm7Vp0xY1NjZq3rwFGj9udMHOkpvc2Z4ldziz5A5nltzhzJI7nFlyhzNL7nBmyR3OLLnDmfW9GwhFhyclzewaMxvQ+ushZrbCzHaa2Roz+/PcRDxYaVmJtia3Hfg8Wd+g0tKSgp31uZvcud1N5zhyx9jZ5246x5E7xs4+d9M5jtwxdva5m85x5I6xs8/dMXYGQpLqSsm/d8690/rruyTd4Zw7StI/SbrnUENmNsnMqsysqqVlTzdF/dj3P+ixdF+GHuKsz93kzu1uOmc263M3nTOb9bmbzpnN+txN58xmfe6mc2azPnfTObNZn7vpnNmsz910zmzW5+4YOwMhSXX37bZfP84596gkOeeWmdkRhxpyzk2TNE069HtKdkV9skGDyksPfF5eNlANDTsKdtbnbnLndjed48gdY2efu+kcR+4YO/vcTec4csfY2eduOseRO8bOPnfH2BlSS96+gyI+KdWVkg+b2Qwz+7SkR83sejP7lJl9S9KWHORr17qqag0ZcqIGDx6k4uJiTZhwgRYuWlKws+Qmd7ZnyR3OLLnDmSV3OLPkDmeW3OHMkjucWXKHM0vucGZ97wZC0eGVks65ya0nIB+S9CeSekmaJOkxSZd2ZfGsmXfr7LNO04AB/bX5rSrdcuvtmj5jTlqzzc3Nuu76KapcPFs9EgnNeHCuamtfL9hZcpM727PkDmeW3OHMkjucWXKHM0vucGbJHc4sucOZJXc4s753A6GwTN+XwMxmOucuS/f3Z+Pl2wAAAAAAADFr2lvPC5XbcdenJnIeqo3rtszK2+dJh1dKmtnj7Tx8zv7HnXPjs5IKAAAAAAAAQMFKdaObckm1ku6T5CSZpGGSpmY5FwAAAAAAAJCRFt8BkLZUN7qpkPSipMmS3nfOLZP0oXNuuXNuebbDAQAAAAAAACg8qW500yLpDjOb3/rPHalmAAAAAAAAAKAjaZ1gdM4lJV1sZudL2pXdSAAAAEB8Eta196FvyfAGlgAAAD5ldNWjc26xpMVZygIAAAAAAAAgArwUGwAAAAAAAAWBG92EI9WNbgAAAAAAAACgW3FSEgAAAAAAAEBOeTkp2atXL73w3CK9WPWUXq5+Rjf/8HsZf4/Ro0ao5tUV2lC7SjfecHXBz/rcTe5wcsfY2eduOseRO8bOPnfTOY7cMXbOdH7aL25Xcmu11r/09IHHjj76KFVWzlZNzUpVVs7WUUf1y3pujlU4uWPs7HM3nePIHWpnIBTmsnyXvqKeZe0u6Nu3j/bs+UBFRUVasexR/cN3b9aatS+l9T0TiYTqalZqzNhLlEw2aPULlZp42VWqq3ujIGfJTW46599uOseRO8bOoeaOsXOouWPsnO5827tvn3HGqdq9e4+mP3Cnhn7hXEnST348We++u1O33X63bvjHq3X00f30g8k/PjDT3t23871zvs2GmjvGzqHmjrFzqLlD6Ny0t94O8S2iNvVTE7N7oisw39syK2+fJ95evr1nzweSpOLiIhUVFyuTk6PDhw3Vxo2btWnTFjU2NmrevAUaP250wc6Sm9zZniV3OLPkDmeW3OHMkjuc2Zhyr1q1Ru+9t/Njj40bN0ozZ82XJM2cNV/jx6feH1LnfJgNNXeMnUPNHWPnUHOH2hkIibeTkolEQlXrlqih/jdaunSF1q5bn/ZsaVmJtia3Hfg8Wd+g0tKSgp31uZvcud1N5zhyx9jZ5246x5E7xs4+d9M5t7n3O+64Adq+/W1J0vbtb+vYY4/J6l6OVW530zmO3DF29rk7xs6QHB8f+8hnHZ6UNLNHzGyimR3e3YtbWlpUMWyUTjixQsMqhuqkkz6b9qzZwVeepnulZYizPneTO7e76ZzZrM/ddM5s1uduOmc263M3nTOb9bmbzpnNdsd8Z4Xamdy5m/W5O8bcMXb2uTvGzkBIUl0peaqkCyVtMbN5ZvZVM+uZ6pua2SQzqzKzqpaWPR3+3vff36XlK57X6FEj0g5dn2zQoPLSA5+Xlw1UQ8OOgp31uZvcud1N5zhyx9jZ5246x5E7xs4+d9M5t7n3e/vtd1RScpwkqaTkOP3ud7/P6l6OVW530zmO3DF29rk7xs5ASFKdlHzbOXeRpBMkLZT0bUn1ZjbdzEYdasg5N805V+Gcq0gk+h709QED+qtfvyMlSb1799bIc87Ua69tTDv0uqpqDRlyogYPHqTi4mJNmHCBFi5aUrCz5CZ3tmfJHc4sucOZJXc4s+QOZzbW3PstXPSULpt4sSTpsokXa+HC1POhdiY3nfN5N53jyB1qZyAkRSm+7iTJOfcHSTMlzTSz/pImSLpJUqf+VzFw4PF64P471aNHQolEQg8/vFCLK59Oe765uVnXXT9FlYtnq0cioRkPzlVt7esFO0tucmd7ltzhzJI7nFlyhzNL7nBmY8o985f/pbPOOk0DBvTXWxvX6dZ/marbbvsvzZ59j6741te1dWu9Lrnk7wqqcz7Mhpo7xs6h5o6xc6i5Q+0MhMQ6el8CM1vhnDurKwuKepbxxgcAAABACgk7+D3EMtHC+40BQFSa9tZ37V8cBerfT5jIvxDbuPG3s/L2edLhy7fbOyFpZr/MXhwAAAAAAAAAha7Dl2+b2eOffEjSl83sKElyzo3PVjAAAAAAAAAAhSnVe0oOklQj6T7te39Jk1QhaWqWcwEAAAAAAAAoUKnuvn2KpBclTZb0vnNumaQPnXPLnXPLsx0OAAAAAAAAQOHp8EpJ51yLpDvMbH7rP3ekmgEAAAAAAAB8aPEdAGlL6wSjcy4p6WIzO1/SruxGAgAAAOLT1btnd+Xu3dy5GwAA5FpGVz065xZLWpylLAAAAAAAAAAikOo9JQEAAAAAAACgW3FSEgAAAAAAAEBOcdMaAAAAAAAAFATeJTkc3q6UHD1qhGpeXaENtat04w1X53Q+xFmfu8kdTu4YO/vcTec4csfY2eduOseRO8bOPndfc82VWv/S06pev1TXXntlzvZ2dT7GY0XnOHLH2Nnn7hg7A6Ewl+U77RX1LDtoQSKRUF3NSo0Ze4mSyQatfqFSEy+7SnV1b6T1PbsyH+IsuclN5/zbTec4csfYOdTcMXYONXeMnXO1u727b5/0uc9q1qy7dfqXvqK9exu1aNEsXXvtD/Tmm5s+9vvau/t2CJ27ezbU3DF2DjV3jJ1DzR1C56a99Qf/wQ/95ISJXCzZxvd/OytvnyderpQcPmyoNm7crE2btqixsVHz5i3Q+HGjczIf4iy5yZ3tWXKHM0vucGbJHc4sucOZJXfms3/2Z0O0Zs16ffjhR2pubtbKFat1wQVjsr63q/MxHis6x5E7xs6h5g61MxASLyclS8tKtDW57cDnyfoGlZaW5GQ+xFmfu8md2910jiN3jJ197qZzHLlj7OxzN53DyV1T+5rOPPNU9e9/lA47rLfGjDlH5eWlWd/b1fkYjxWd48gdY2efu2PsDISkwxvdmNmnJU2RtE3STyXdIek0SXWSbnDObe7MUmvnpSWZvIy8K/MhzvrcTe7c7qZzZrM+d9M5s1mfu+mc2azP3XTObNbnbjpnNutz94YNb+q223+uJyof0u7de/SbV2rV1NSU9b1dnY/xWNE5s1mfu+mc2azP3TF2BkKS6krJGZLWSdotabWkDZLOk/SkpAcONWRmk8ysysyqWlr2HPT1+mSDBrX5L7TlZQPV0LAj7dBdmQ9x1uducud2N53jyB1jZ5+76RxH7hg7+9xN53ByS9KMGXN06hfP08hzL9J77+486P0ks7WXYxXGrM/dMeaOsbPP3TF2htQix0ebj3yW6qTkEc65/3bO/VTSkc65qc65rc65+yUdfagh59w051yFc64ikeh70NfXVVVryJATNXjwIBUXF2vChAu0cNGStEN3ZT7EWXKTO9uz5A5nltzhzJI7nFlyhzNL7s7tPvbYYyRJgwaV6sILz9PcuQtyspdjFcYsucOZJXc4s753A6Ho8OXbklrM7DOSMqASaAAAIABJREFU+knqY2YVzrkqMxsiqUdnlzY3N+u666eocvFs9UgkNOPBuaqtfT0n8yHOkpvc2Z4ldziz5A5nltzhzJI7nFlyd2733DnTdMwxR6uxsUnfuW6ydu58Pyd7OVZhzJI7nFlyhzPrezcQCuvofQnMbKSkn0tqkfRtSf8g6fPad5JyknPusVQLinqW5fe1ogAAAEABSNjB70GWrhbeqwwAgtO0t77zf/AXsH894VL+pdbG5N/+Km+fJx1eKemcWyrps20eWmVmiySNd861ZDUZAAAAAAAAgIKU6u7bj7fz8AhJj5mZnHPjs5IKAAAAAAAAyBBX0IUj1XtKDpJUI+k+SU6SSRomaWqWcwEAAAAAAAAoUKnuvn2KpBclTZb0vnNumaQPnXPLnXPLsx0OAAAAAAAAQOFJ9Z6SLZLuMLP5rf/ckWoGAAAAAAAAADqS1glG51xS0sVmdr6kXdmNBAAAACBT3EEbAACEJKOrHp1ziyUtzlIWAAAAAAAAoNP4T3ThSPWekgAAAAAAAADQrTgpCQAAAAAAACCnOCkJAAAAAAAAIKe8nZS8d9pUbUu+rOr1Szs1P3rUCNW8ukIbalfpxhuuLvhZn7vJHU7uGDv73E3nOHLH2NnnbjrHkTvGzj530zmO3DF29rmbznHkDrUzEApzWb5LX1HPsnYXnHnGqdq9e4+mT79LJw8dmdH3TCQSqqtZqTFjL1Ey2aDVL1Rq4mVXqa7ujYKcJTe56Zx/u+kcR+4YO4eaO8bOoeaOsXOouWPsHGruGDuHmjvGzqHmDqFz0956SytMZG494VLuddPGD3/7q7x9nni7UnLlqjV6972dnZodPmyoNm7crE2btqixsVHz5i3Q+HGjC3aW3OTO9iy5w5kldziz5A5nltzhzJI7nFlyhzNL7nBmyR3OrO/dsWvh42Mf+azDk5JmljCzvzGzxWb2spm9aGZzzGxEjvK1q7SsRFuT2w58nqxvUGlpScHO+txN7tzupnMcuWPs7HM3nePIHWNnn7vpHEfuGDv73E3nOHLH2Nnn7hg7AyEpSvH1+yX9VtJPJF0kaZeklZKmmNmfO+f+s70hM5skaZIkWY9+SiT6dl/ifd//oMfSfRl6iLM+d5M7t7v/P3t3Hyd1fd77/30NuxhFgzdolt0lrin19OT00YhF1MS7xArGBIiNoVWxuTPkdzSpNid67KmJR/trqy02MYlpAm2AaBAwTbACGuJdhP7kZpVFYZeKCIFZFjRVE8Gk7M31+4OVrC67M7OzM5/5zOf19LGPzO7stdf7vTPywG++M186FzYbcjedC5sNuZvOhc2G3E3nwmZD7qZzYbMhd9O5sNmQu+lc2GzI3XQubDbk7hQ7AzHJdVDyD9390723V5vZGnf/qpk9KalF0mEPSrr7HElzpIHfU7IY7dkOjWusP/R5Y8NYdXTsrdrZkLvJXd7ddE4jd4qdQ+6mcxq5U+wccjed08idYueQu+mcRu4UO4fcnWJnICa53lOy08x+R5LM7HRJByTJ3f9LUrDD9OubWzR+/Clqahqn2tpazZgxXQ8uW1m1s+Qmd6lnyR3PLLnjmSV3PLPkjmeW3PHMkjueWXLHM0vueGZD7wZiketMyRskPW5m/9X7vZdLkpmdKGlZMYvvvedunX/e2Roz5njteLFZt942W/PmL8prtru7W9ddf7NWLF+oEZmM5i9YrNbW56t2ltzkLvUsueOZJXc8s+SOZ5bc8cySO55ZcsczS+54Zskdz2zo3anrqdhrTePtLNf7EtjBNzM4wd1/0fv59939z/JdUIqXbwMAAAAAAKSs60A7h98O46tNV3Icqo/bdvygYp8ng54paWb/1uf2mzc/ZGbHSpK7TytdNAAAAAAAAADVKNfLt8dJ2izpn3XwPSRN0hmS7ixxLgAAAAAAAABVKteFbv5Q0tOS/krSL939CUm/dvefufvPSh0OAAAAAAAAQPUZ9ExJd++R9DUzu7/3f/fmmgEAAAAAAABC6BFvKRmLvA4wuntW0ifM7COSflXaSAAAAAAAAACqWUFnPbr7cknLS5QFAAAAAAAAQAJyvackAAAAAAAAAAwrDkoCAAAAAAAAKCsOSgIAAAAAAAAoq2AHJadMvkCbNz2pLa2rdeMN15Z1PsbZkLvJHU/uFDuH3E3nNHKn2DnkbjqnkTvFziF3h+wsSZlMRuvX/UQP/HhB2XbzWKXROeRuOqeRO9bOqXM+3vJRycy9tBFrRjb0W5DJZNS2eZUuvuRyZbMdWvPUCs286hq1tW3N62cWMx/jLLnJTefK203nNHKn2DnW3Cl2jjV3ip1jzV1s5zddf90s/eEf/oHeecwxmn7pJ/Oa4bGic7XmTrFzrLlj6Nx1oN3yCpOYv2q6otKPxZXV3+xYWLHPkyBnSk46Y4K2bduh7dt3qrOzU0uWPKBpU6eUZT7GWXKTu9Sz5I5nltzxzJI7nllyxzNL7nhm39TQMFaXfPhCfe979xU0x2NF50reTec0csfaGYhJkIOS9Q112pXdfejzbHuH6uvryjIf42zI3eQu7246p5E7xc4hd9M5jdwpdg65m85p5C62syT945236qa//H/V09NT0ByPFZ0reTed08gda2cgJoMelDSz0WZ2u5ltMbP/7P1o6/3asUNdatb/zNFCXkZezHyMsyF3k7u8u+lc2GzI3XQubDbkbjoXNhtyN50Lmw25m86FzYbcHbLzRy75I7300i/0zIbn8p4Zjt08VoXNhtydYu4UO4fcnWJnICY1Oe5fIukxSRe4+x5JMrM6SZ+UdL+kiw43ZGazJM2SJBsxWpnMqLfc357t0LjG+kOfNzaMVUfH3rxDFzMf42zI3eQu7246p5E7xc4hd9M5jdwpdg65m85p5C628/vfP1FTPzpZH774Q3rHO47QO995jBbM/4Y++ak/r+jcMf6+U+wccjed08gda2dIhZ2bj5ByvXy7yd3vePOApCS5+x53v0PSuwcacvc57j7R3Se+/YCkJK1vbtH48aeoqWmcamtrNWPGdD24bGXeoYuZj3GW3OQu9Sy545kldzyz5I5nltzxzJI7nllJ+qubb1fTeyZq/Kln6cqZ1+jxx/89rwOSoXPH+PtOsXOsuVPsHGvuWDsDMcl1puTPzexGSQvcfa8kmdm7JH1K0q6hLu3u7tZ119+sFcsXakQmo/kLFqu19fmyzMc4S25yl3qW3PHMkjueWXLHM0vueGbJHc9ssXis6FzJu+mcRu5YOwMxscHel8DMjpN0k6Tpkt4lySXtlfRvku5w91dyLagZ2cAbHwAAAAAAAAyjrgPt/d98EvrLpis4DtXH3+1YWLHPk0HPlHT3VyX9794Pmdm5kiZJei6fA5IAAAAAAAAA8HaDHpQ0s3XuPqn39tWSrpW0VNItZna6u99ehowAAAAAAABATj3iRMlY5LrQTW2f25+XNNndb5U0WdKVJUsFAAAAAAAAoGrlutBNpvd9JTM6+P6TL0uSu+83s66SpwMAAAAAAABQdXIdlBwt6WlJJsnNrM7d95jZ0b1fAwAAAAAAAICC5LrQTdMAd/VIujSfBcUcueRdAAAAAAAAAIDqk+tMycNy9zckbR/mLAAAAAAAAAASMKSDkgAAAAAAAECl4VW38ch19W0AAAAAAAAAGFYclAQAAAAAAABQVsEOSm59fo02PPOImtev1JqnVhQ8P2XyBdq86UltaV2tG2+4tupnQ+4mdzy5U+wccjed08idYueQu+mcRu4UO4fcHWPnuXPu1O7sRrVseLTgncXsHY75GGdD7k4xd4qdQ+5OsTMQC3Mv7avta0c2HHbB1ufX6KyzP6z//M9XB5wdKFkmk1Hb5lW6+JLLlc12aM1TKzTzqmvU1rY1Z54YZ8lNbjpX3m46p5E7xc6x5k6xc6y5U+wca+6Qnc8950zt27df8+bdpdMmXJjXvkrIHeMsueOZJXc8s+Xa3XWg3fIKk5gbmy7nbSX7+Psd91Xs8yTKl29POmOCtm3boe3bd6qzs1NLljygaVOnVO0sucld6llyxzNL7nhmyR3PLLnjmSV3PLPFzq9avVavvPpa3rsqJXeMs+SOZ5bc8cyG3p26Hj7e8lHJgh2UdHc9tOI+rV3zkK7+7JUFzdY31GlXdvehz7PtHaqvr6va2ZC7yV3e3XROI3eKnUPupnMauVPsHHI3ndPIHbJzMXis0ugccjed08gda2cgJjVDHTSzh9z9w0OdP/+Cj6mjY69OPPEEPfzQIm35jxe0evXafHf3+1q+L0OPcTbkbnKXdzedC5sNuZvOhc2G3E3nwmZD7qZzYbMhd9O5sNmQu2PtXAweq8JmQ+5OMXeKnUPuTrEzEJNBD0qa2ekD3SXptEHmZkmaJUmZEaOVyYzq9z0dHXslSS+//J9a+sBDOuOM0/I+KNme7dC4xvpDnzc2jD3086pxNuRucpd3N53TyJ1i55C76ZxG7hQ7h9xN5zRyh+xcDB6rNDqH3E3nNHLH2hmISa6Xb6+XNFvSnW/7mC3p2IGG3H2Ou09094mHOyB51FFH6uijRx26fdEfna/Nm/8j79Drm1s0fvwpamoap9raWs2YMV0PLltZtbPkJnepZ8kdzyy545kldzyz5I5nltzxzA7H/FDxWKXROdbcKXaONXesnYGY5Hr5dpukz7t7v8tDmdmuoS5917tO1A/v/xdJ0oiaEVq0aKlWrnwi7/nu7m5dd/3NWrF8oUZkMpq/YLFaW5+v2llyk7vUs+SOZ5bc8cySO55ZcsczS+54Zoudv/eeu3X+eWdrzJjjtePFZt1622zNm7+o4nPHOEvueGbJHc9s6N2p6xEvdY+FDfa+BGZ2maTn3L3faYxm9jF3X5prQe3IhiE/G3gaAQAAAAAA9Nd1oL3/m09CX2r6Uw4n9fGPOxZV7PNk0DMl3f2HfT83s3MkTZK0KZ8DkgAAAAAAAADwdoO+p6SZretz+3OSviXpGEm3mNlNJc4GAAAAAAAAoArlutBNbZ/bsyRd5O63Spos6cqSpQIAAAAAAABQtXJd6CZjZsfp4MFLc/eXJcnd95tZV8nTAQAAAAAAAHniDSXjkeug5GhJT0sySW5mde6+x8yO7v1aTjwZAAAAAAAAAPSV60I3TQPc1SPp0mFPAwAAAAAAAKDq5TpT8rDc/Q1J24c5CwAAAAAAAIAE5LrQDQAAAAAAAAAMKw5KAgAAAAAAACirIb18GwAAAAAAAKg0PaEDIG9BzpRsbKzXIyvv13PPPqGNLY/pi1/4bME/Y8rkC7R505Pa0rpaN95wbdXPhtxN7nhyp9g55O4YO8+dc6d2ZzeqZcOjBe8sZu9wzMc4G3J3irlT7BxyN53TyJ1i55C76ZxG7hQ7h9ydYmcgFubuJV1QM7Kh34K6upM0tu4kbWjZpKOPHqV1ax/Wxy/7jNratub1MzOZjNo2r9LFl1yubLZDa55aoZlXXZPXfIyz5CY3nStvd6ydzz3nTO3bt1/z5t2l0yZcmNe+Ssgd4yy545kldzyz5I5nltzxzJI7nllyxzNbrt1dB9otrzCJua7pT0t7oCsyd+1YVLHPkyBnSu7Z85I2tGySJO3bt19btmxVQ31d3vOTzpigbdt2aPv2ners7NSSJQ9o2tQpVTtLbnKXepbc8cwWO79q9Vq98upree+qlNwxzpI7nllyxzNL7nhmyR3PLLnjmSV3PLOhdwOxGPSgpJm908z+zszuMbMr3nbft4cjwMknN+q09/2+1q7bkPdMfUOddmV3H/o8296h+jwPasY4G3I3ucu7m85p5A7ZuRg8Vml0DrmbzmnkTrFzyN10TiN3ip1D7qZzGrlj7QzEJNeZkvMkmaR/lfSnZvavZnZE731nDTRkZrPMrNnMmnt69g/4w0eNOkpLFs/Vl758i15/fV/eoc36n3ma78vQY5wNuZvc5d1N58JmQ+6OtXMxeKwKmw25O8XcKXYOuZvOhc2G3E3nwmZD7qZzYbMhd9O5sNmQu1PsDMn55y3/VLJcByV/x91vcvel7j5N0jOSHjOzEwYbcvc57j7R3SdmMqMO+z01NTW6f/Fc3Xffj7V06UMFhW7PdmhcY/2hzxsbxqqjY2/VzobcTe7y7qZzGrlDdi4Gj1UanUPupnMauVPsHHI3ndPInWLnkLvpnEbuWDsDMcl1UPIIMzv0Pe7+N5LmSHpS0qAHJnOZO+dOtW15QV+/a07Bs+ubWzR+/Clqahqn2tpazZgxXQ8uW1m1s+Qmd6lnyR3P7HDMDxWPVRqdY82dYudYc6fYOdbcKXaONXeKnWPNnWLnWHPH2hmISU2O+x+U9CFJj7z5BXdfYGZ7JX1zqEs/8P4zdNXMy/Tsc61qXn/wX6yvfOV2PfTwY3nNd3d367rrb9aK5Qs1IpPR/AWL1dr6fNXOkpvcpZ4ldzyzxc7fe8/dOv+8szVmzPHa8WKzbr1ttubNX1TxuWOcJXc8s+SOZ5bc8cySO55ZcsczS+54ZkPvBmJhBb4nwjmSJkna5O55HaavGdlQ2S9gBwAAAAAAiEzXgfb+bz4J/XnTn3Acqo9v7Fhcsc+TXFffXtfn9uckfUvSMZJuMbObSpwNAAAAAAAAyFsPH2/5qGS53lOyts/tWZIucvdbJU2WdGXJUgEAAAAAAACoWrneUzJjZsfp4MFLc/eXJcnd95tZV8nTAQAAAAAAAKg6uQ5Kjpb0tCST5GZW5+57zOzo3q8BAAAAAAAAQEEGPSjp7k0D3NUj6dJhTwMAAAAAAACg6uU6U/Kw3P0NSduHOQsAAAAAAACABAzpoCQAAAAAAABQaXrkoSMgT7muvg0AAAAAAAAAw4qDkgAAAAAAAADKKshBycbGej2y8n499+wT2tjymL74hc8W/DOmTL5Amzc9qS2tq3XjDddW/WzI3eSOJ3eKnUPupnMauVPsHHI3ndPInWLnkLvpXNjs3Dl3and2o1o2PFrQ3HDs5rFKI3eKnUPuTrEzEAtzL+1r7WtGNvRbUFd3ksbWnaQNLZt09NGjtG7tw/r4ZZ9RW9vWvH5mJpNR2+ZVuviSy5XNdmjNUys086pr8pqPcZbc5KZz5e2mcxq5U+wca+4UO8eaO8XOseZOsbMknXvOmdq3b7/mzbtLp024MK+Z0LlTfaxizJ1i51hzx9C560C75RUmMdc0zeBNJfv49o4lFfs8CXKm5J49L2lDyyZJ0r59+7Vly1Y11NflPT/pjAnatm2Htm/fqc7OTi1Z8oCmTZ1StbPkJnepZ8kdzyy545kldzyz5I5nltzxzMace9XqtXrl1dfy/v5KyJ3qYxVj7hQ7x5o71s6QnI+3fFSy4O8pefLJjTrtfb+vtes25D1T31CnXdndhz7PtneoPs+DmjHOhtxN7vLupnMauVPsHHI3ndPInWLnkLvpnEbuFDsXK9bO5I5jNuTuFHPH2hmIyaAHJc2szsz+yczuNrMTzOz/mtlzZrbEzMYWu3zUqKO0ZPFcfenLt+j11/flPWfW/8zTfF+GHuNsyN3kLu9uOhc2G3I3nQubDbmbzoXNhtxN58JmQ+6mc2GzIXfTubDZYsXamdxxzIbcnWLuWDsDMcl1puR8Sa2Sdkl6XNKvJX1E0ipJ3xloyMxmmVmzmTX39Ow/7PfU1NTo/sVzdd99P9bSpQ8VFLo926FxjfWHPm9sGKuOjr1VOxtyN7nLu5vOaeROsXPI3XROI3eKnUPupnMauVPsXKxYO5M7jtmQu1PMHWtnICa5Dkq+y92/6e63SzrW3e9w953u/k1JJw805O5z3H2iu0/MZEYd9nvmzrlTbVte0NfvmlNw6PXNLRo//hQ1NY1TbW2tZsyYrgeXrazaWXKTu9Sz5I5nltzxzJI7nllyxzNL7nhmY85djFg7kzuOWXLHMxt6NxCLmhz39z1o+f1B7ivIB95/hq6aeZmefa5VzesP/ov1la/crocefiyv+e7ubl13/c1asXyhRmQymr9gsVpbn6/aWXKTu9Sz5I5nltzxzJI7nllyxzNL7nhmY8597z136/zzztaYMcdrx4vNuvW22Zo3f1FF5071sYoxd4qdY80da2dIPRV/eRe8yQZ7XwIzu03S37v7vrd9fbyk2939slwLakY28GwAAAAAAAAYRl0H2vu/+ST0+aZPcByqj+/uuL9inyeDninp7l/t+7mZnSNpkqRN+RyQBAAAAAAAAIC3y3X17XV9bn9O0rckHSPpFjO7qcTZAAAAAAAAAFShXO8LWdvn9ixJF7n7rZImS7qyZKkAAAAAAAAAVK2cF7oxs+N08OClufvLkuTu+82sq+TpAAAAAAAAAFSdXAclR0t6WpJJcjOrc/c9ZnZ079cAAAAAAACAitATOgDylutCN00D3NUj6dJhTwMAAAAAAACg6uU6U/Kw3P0NSduHOQsAAAAAAACABOS60A0AAAAAAAAADCsOSgIAAAAAAAAoqyG9fBsAAAAAAACoNC4PHQF5Cnam5Nw5d2p3dqNaNjw6pPkpky/Q5k1Pakvrat14w7VVPxtyN7njyZ1i55C76ZxG7hQ7h9xN5zRyx9qZv7/G81ilmDvFziF30zmN3LF2BmJh7qU9glwzsuGwC84950zt27df8+bdpdMmXFjQz8xkMmrbvEoXX3K5stkOrXlqhWZedY3a2rZW5Sy5yU3nyttN5zRyp9g51twpdo41d6ydJf7+GstjlWLuFDvHmjvFzrHmjqFz14F2yytMYq5uuoxTJfv45x0/rNjnScFnSprZScOxeNXqtXrl1deGNDvpjAnatm2Htm/fqc7OTi1Z8oCmTZ1StbPkJnepZ8kdzyy545kldzyz5I5nNvRu/v4ax2OVYu4UO8eaO8XOseaOtTMQk0EPSprZ8W/7OEHSOjM7zsyOL1PGfuob6rQru/vQ59n2DtXX11XtbMjd5C7vbjqnkTvFziF30zmN3Cl2Drk7xc7FivH3HetjlWLuFDuH3E3nNHLH2hmISa4L3fxC0s/f9rUGSc9IcknvOdyQmc2SNEuSbMRoZTKjiozZ7+f3+1q+L0OPcTbkbnKXdzedC5sNuZvOhc2G3E3nwmZD7qZzYbMhd6fYuVgx/r5jfaxSzJ1i55C76VzYbMjdKXaG1BM6APKW6+XbN0r6D0nT3P0Udz9FUrb39mEPSEqSu89x94nuPnG4D0hKUnu2Q+Ma6w993tgwVh0de6t2NuRucpd3N53TyJ1i55C76ZxG7hQ7h9ydYudixfj7jvWxSjF3ip1D7qZzGrlj7QzEZNCDku4+W9LVkr5qZv9oZsdI4a+tvr65RePHn6KmpnGqra3VjBnT9eCylVU7S25yl3qW3PHMkjueWXLHM0vueGZD7y5GjL/vWB+rFHOn2DnW3Cl2jjV3rJ2BmOR6+bbcPSvpE2Y2VdJPJR01HIvvvedunX/e2Roz5njteLFZt942W/PmL8prtru7W9ddf7NWLF+oEZmM5i9YrNbW56t2ltzkLvUsueOZJXc8s+SOZ5bc8cyG3s3fX+N4rFLMnWLnWHOn2DnW3LF2BmJiBb4nwrmSzpe0zt3zOkxfM7Ih+JmVAAAAAAAA1aTrQHv/N5+EPtN0Gceh+vjejh9W7PMk19W31/W5/TlJ35A0QtItZnZTibMBAAAAAAAAqEK5Xr5d2+f2LEmT3f1lM5staY2k20uWDAAAAAAAACiAh78UCvKU66BkxsyO08EzKs3dX5Ykd99vZl0lTwcAAAAAAACg6uQ6KDla0tOSTJKbWZ277zGzo3u/BgAAAAAAAAAFGfSgpLs3DXBXj6RLhz0NAAAAAAAAgKqX60zJw3L3NyRtH+YsAAAAAAAAABIwpIOSAAAAAAAAQKXpCR0AecuEDgAAAAAAAAAgLRyUBAAAAAAAAFBWwQ5KTpl8gTZvelJbWlfrxhuuLet8jLMhd5M7ntzFzM6dc6d2ZzeqZcOjBc0Nx24eqzQ6h9xN5zRyp9g55G46p5E7xc4hd9M5jdwpdg65O8XOQCzM3Uu6oGZkQ78FmUxGbZtX6eJLLlc226E1T63QzKuuUVvb1rx+ZjHzMc6Sm9zl6HzuOWdq3779mjfvLp024cK8Ziohd4y/7xQ7x5o7xc6x5k6xc6y5U+wca+4UO8eaO8XOseZOsXOsuWPo3HWg3fIKk5hPNn28tAe6IrNgx79W7PMkyJmSk86YoG3bdmj79p3q7OzUkiUPaNrUKWWZj3GW3OQu9awkrVq9Vq+8+lre318puWP8fafYOdbcKXaONXeKnWPNnWLnWHOn2DnW3Cl2jjV3ip1jzR1rZ0g97nz0+ahkQQ5K1jfUaVd296HPs+0dqq+vK8t8jLMhd5O7vLtDdi4GjxWdK3k3ndPInWLnkLvpnEbuFDuH3E3nNHKn2Dnk7hQ7AzEZ9KCkmV3c5/ZoM/sXM3vWzBaa2buGutSs/5mjhbyMvJj5GGdD7iZ3eXeH7FwMHqvyzYbcnWLuFDuH3E3nwmZD7qZzYbMhd9O5sNmQu+lc2GzI3XQubDbk7hQ7AzHJdabk3/a5faekDklTJa2X9N2Bhsxslpk1m1lzT8/+fve3Zzs0rrH+0OeNDWPV0bE379DFzMc4G3I3ucu7O2TnYvBY0bmSd9M5jdwpdg65m85p5E6xc8jddE4jd4qdQ+5OsTMQk0Jevj3R3W9295+7+9ckNQ30je4+x90nuvvETGZUv/vXN7do/PhT1NQ0TrW1tZoxY7oeXLYy7yDFzMc4S25yl3q2WDxWdK7k3XROI3eKnWPNnWLnWHOn2DnW3Cl2jjV3ip1jzR1rZyAmNTnuP8nMviTJJL3TzMx/e87wkN+Psru7W9ddf7NWLF+oEZmM5i9YrNbW58syH+Msucld6llJuveeu3X+eWdrzJjjtePFZt1622zNm7+o4nPH+PtOsXOsuVPsHGvuFDvHmjvFzrHmTrFzrLlT7Bxr7hQ7x5o71s6QeKF7PGyw9yXz5hVrAAAgAElEQVQws1ve9qVvu/vLZlYn6e/d/c9yLagZ2cDzAQAAAAAAYBh1HWjv/+aT0MyT/5jjUH3c+/MfVezzZNAzJd391r6fm9k5ZnaVpE35HJAEAAAAAAAAgLfLdfXtdX1uXy3pW5KOkXSLmd1U4mwAAAAAAAAAqlCu94Ws7XP785Iu6j17crKkK0uWCgAAAAAAAEDVynWhm4yZHaeDBy/N3V+WJHffb2ZdJU8HAAAAAAAAoOrkOig5WtLTOnj1bTezOnffY2ZH934NAAAAAAAAqAg9XH87GrkudNM0wF09ki4d9jQAAAAAgAFlbOjnhvQ4/6EOAKgcuc6UPCx3f0PS9mHOAgAAAAAAACABuS50AwAAAAAAAADDioOSAAAAAAAAAMpqSC/fBgAAAAAAACqNc6GbaAQ7U3LunDu1O7tRLRseHdL8lMkXaPOmJ7WldbVuvOHaqp8NuZvc8eQuZjbWfydD7qZzGrlT7BxyN53TyJ1i55C76Vy9ued8d7ayu1q04ZlHDn3t43/8EbVseFS/+fVOnX76H1Rk7uGaDbmbzuXLzX+nDG03EAPzEl+BrWZkw2EXnHvOmdq3b7/mzbtLp024sKCfmclk1LZ5lS6+5HJlsx1a89QKzbzqGrW1ba3KWXKTuxydY/x3MuRuOqeRO8XOseZOsXOsuVPsHGvuFDvHkLvv1bfPefPvb9/7uiac/keSpN/7vfHq6enR3d+6Q//7pr/WM888e+j7B7r6dqV3rrTddC5vbv47ZeDZrgPtNsCPSNrlJ3+MUyX7uO/nSyv2eRLsTMlVq9fqlVdfG9LspDMmaNu2Hdq+fac6Ozu1ZMkDmjZ1StXOkpvcpZ6V4vx3MuRuOqeRO8XOseZOsXOsuVPsHGvuFDvHlnv16rV69W1/f9uy5QU9//yLee0MlXs4ZmPNnWLnYuf575TCdwOxKPigpJmdUIoghahvqNOu7O5Dn2fbO1RfX1e1syF3k7u8u0N2LgaPFZ0reTed08idYueQu+mcRu4UO4fcneLf5VJ8rFLsPBzzQxVr55B/HgDlNOhBSTO73czG9N6eaGYvSlprZj83s/PLkvDwufp9Ld+Xocc4G3I3ucu7O2TnYvBYlW825O4Uc6fYOeRuOhc2G3I3nQubDbmbzoXNhtyd4t/lUnysUuw8HPNDFWvnkH8eVIMePt7yUclynSn5EXf/Re/tf5D0J+4+XtJFku4caMjMZplZs5k19/TsH6aov9We7dC4xvpDnzc2jFVHx96qnQ25m9zl3R2yczF4rOhcybvpnEbuFDuH3E3nNHKn2Dnk7hT/LpfiY5Vi5+GYH6pYO4f88wAop1wHJWvNrKb39pHuvl6S3P15SUcMNOTuc9x9ortPzGRGDVPU31rf3KLx409RU9M41dbWasaM6Xpw2cqqnSU3uUs9WyweKzpX8m46p5E7xc6x5k6xc6y5U+wcc+5ixNo5xtwpdh6O+aGKtXPIPw+AcqrJcf/dklaY2e2SHjazr0v6kaQLJbUUs/jee+7W+eedrTFjjteOF5t1622zNW/+orxmu7u7dd31N2vF8oUakclo/oLFam19vmpnyU3uUs9Kcf47GXI3ndPInWLnWHOn2DnW3Cl2jjV3ip1jy33P97+l83r//vbitvW67a/v1KuvvKavfe2vdeKJx+uBpQu08dnN+uhHZ1ZU7uGYjTV3ip2Lnee/UwrfDcTCcr0vgZldIOl/SjpVBw9i7pK0VNI8d+/MtaBmZANvfAAAAAAAwyBj/d9rLl89vCcdUFW6DrQP/Q+EKvYnJ3+MP+z6WPzzpRX7PMl1pqTc/QlJT0iSmZ0raZKkHfkckAQAAAAAAACAtxv0oKSZrXP3Sb23r5Z0rQ6eJXmLmZ3u7reXISMAAAAAAACQU484UTIWOS900+f25yVNdvdbJU2WdGXJUgEAAAAAAACoWrlevp0xs+N08OClufvLkuTu+82sq+TpAAAAAAAAAFSdXAclR0t6WpJJcjOrc/c9ZnZ079cAAAAAAGXCxWoAANVi0IOS7t40wF09ki4d9jQAAAAAAAAAql7Oq28fjru/IWn7MGcBAAAAAAAAhsy50E00cl3oBgAAAAAAAACGFQclAQAAAAAAAJQVByUBAAAAAAAAlFWwg5JTJl+gzZue1JbW1brxhmvLOh/jbMjd5I4nd4qdQ+6mcxq5U+wccjed08idYueQu+mcRu5iZufOuVO7sxvVsuHRguaGYzePVRqdQ+5OsTMQC3Mv7RuA1oxs6Lcgk8mobfMqXXzJ5cpmO7TmqRWaedU1amvbmtfPLGY+xllyk5vOlbebzmnkTrFzrLlT7Bxr7hQ7x5o7xc6x5i6287nnnKl9+/Zr3ry7dNqEC/OaqYTcMf6+U+wca+4YOncdaLe8wiTmj0+expVu+vjRz/+tYp8nQc6UnHTGBG3btkPbt+9UZ2enlix5QNOmTinLfIyz5CZ3qWfJHc8sueOZJXc8s+SOZ5bc8cySO55ZSVq1eq1eefW1vL+/UnLH+PtOsXOsuWPtDMRk0IOSZvaMmd1sZr8znEvrG+q0K7v70OfZ9g7V19eVZT7G2ZC7yV3e3XROI3eKnUPupnMauVPsHHI3ndPInWLnkLtDdi4GjxWdK3l3ip2BmOQ6U/I4ScdKetzM1pnZX5hZfa4famazzKzZzJp7evYf7v5+XyvkZeTFzMc4G3I3ucu7m86FzYbcTefCZkPupnNhsyF307mw2ZC76VzYbMjddC5sNuTukJ2LwWNVvtmQu1PMHWtnICa5Dkq+6u5fdvd3S/pfkn5X0jNm9riZzRpoyN3nuPtEd5+YyYzqd397tkPjGn97bLOxYaw6OvbmHbqY+RhnQ+4md3l30zmN3Cl2DrmbzmnkTrFzyN10TiN3ip1D7g7ZuRg8VnSu5N0pdgZikvd7Srr7Kne/RlKDpDsknT3UpeubWzR+/Clqahqn2tpazZgxXQ8uW1mW+RhnyU3uUs+SO55ZcsczS+54Zskdzyy545kldzyzxeKxonMl706xMxCTmhz3P//2L7h7t6SHez+GpLu7W9ddf7NWLF+oEZmM5i9YrNbWfqtKMh/jLLnJXepZcsczS+54Zskdzyy545kldzyz5I5nVpLuvedunX/e2Roz5njteLFZt942W/PmL6r43DH+vlPsHGvuWDuDl7rHxAp8T4RzJE2StMnd8zpMXzOygWcDAAAAAADAMOo60N7/zSehS989leNQffx454MV+zzJdfXtdX1uf07StyQdI+kWM7upxNkAAAAAAAAAVKFc7ylZ2+f2LEkXufutkiZLurJkqQAAAAAAAABUrVzvKZkxs+N08OClufvLkuTu+82sq+TpAAAAAAAAAFSdXAclR0t6WpJJcjOrc/c9ZnZ079cAAAAAAACAitAj3lIyFoMelHT3pgHu6pF06bCnAQAAAAAAAFD1cp0peVju/oak7cOcBQAAAAAAAEACcl3oBgAAAAAAAACGFQclAQAAAAAAAJTVkF6+DQAAAAAAAFSantABkLdgZ0pOmXyBNm96UltaV+vGG64t63yMsyF3kzue3Cl2DrmbzmnkTrFzyN10TiN3ip1D7qZzGrlT7BxyN53Ll3vunDu1O7tRLRseLXhnMXuLnQ29G4iBuZf2Uuk1Ixv6LchkMmrbvEoXX3K5stkOrXlqhWZedY3a2rbm9TOLmY9xltzkpnPl7aZzGrlT7Bxr7hQ7x5o7xc6x5k6xc6y5U+wca+4UOxc7f+45Z2rfvv2aN+8unTbhwrz2DcfeGB6rrgPtlleYxEx990dLe6ArMg/uXFaxz5MgZ0pOOmOCtm3boe3bd6qzs1NLljygaVOnlGU+xllyk7vUs+SOZ5bc8cySO55ZcsczS+54Zskdzyy545lNNfeq1Wv1yquv5b1ruPbG+lgBMQlyULK+oU67srsPfZ5t71B9fV1Z5mOcDbmb3OXdTec0cqfYOeRuOqeRO8XOIXfTOY3cKXYOuZvOaeROsfNwzA9VrJ1D/b6Achv0oKSZTTSzx83sXjMbZ2Y/NbNfmtl6M5sw1KVm/c8cLeRl5MXMxzgbcje5y7ubzoXNhtxN58JmQ+6mc2GzIXfTubDZkLvpXNhsyN10Lmw25G46FzYbcjedC5sdjvmhirVzqN8XUG65rr79bUm3SDpW0v8n6S/c/SIzu7D3vrMPN2RmsyTNkiQbMVqZzKi33N+e7dC4xvpDnzc2jFVHx968QxczH+NsyN3kLu9uOqeRO8XOIXfTOY3cKXYOuZvOaeROsXPI3XROI3eKnYdjfqhi7Rzq91UtXBzAjUWul2/XuvtD7n6fJHf3H+rgjUclvWOgIXef4+4T3X3i2w9IStL65haNH3+KmprGqba2VjNmTNeDy1bmHbqY+RhnyU3uUs+SO55ZcsczS+54Zskdzyy545kldzyz5I5nNtXcxYi1c6jfF1Buuc6U/I2ZTZY0WpKb2cfcfamZnS+pe6hLu7u7dd31N2vF8oUakclo/oLFam19vizzMc6Sm9ylniV3PLPkjmeW3PHMkjueWXLHM0vueGbJHc9sqrnvvedunX/e2Roz5njteLFZt942W/PmLyr53lgfKyAmNtj7EpjZaZLukNQj6S8k/U9JfyZpt6RZ7v7vuRbUjGzgvFkAAAAAAIBh1HWgvf+bT0IfffdHOA7Vx7Kdyyv2eTLomZLu3iLp0HXnzeyHknZKei6fA5IAAAAAAAAA8HaDHpQ0s3XuPqn39uckXSNpqaRbzOx0d7+9DBkBAAAAAACAnHq40E00cl7ops/tWZImu/utkiZLurJkqQAAAAAAAABUrVwXusmY2XE6ePDS3P1lSXL3/WbWVfJ0AAAAAAAAAKpOroOSoyU9Lcl08Orbde6+x8yO7v0aAAAAAAAAABQk14Vumga4q0fSpcOeBgAAAABQdYo5o4V3hwOA6pTrTMnDcvc3JG0f5iwAAAAAAADAkLnzf2XEIteFbgAAAAAAAABUITMbZ2aPm1mbmW02s+t6v368mf3UzLb2/u9xvV83M/uGmb1gZs+a2elD3c1BSQAAAAAAACBNXZL+l7v/d0lnSbrWzN4r6SZJj7r770p6tPdzSfqwpN/t/Zgl6Z+GupiDkgAAAAAAAECC3L3D3Z/pvf26pDZJDZKmS1rQ+20LJH2s9/Z0Sd/3g9ZIOtbMxg5ld7CDknPn3Knd2Y1q2fDokOanTL5Amzc9qS2tq3XjDddW/WzI3eSOJ3eKnUPupnMauVPsHHI3ndPInWLnkLvpnEbuWDtf9+efU0vLY9qw4VHdc8/dOuKII8q2O8bZkLtTzB1rZ6AvM5tlZs19PmYN8r1NkiZIWivpXe7eIR08cCnppN5va5C0q89YtvdrhWcr9RuA1oxsOOyCc885U/v27de8eXfptAkXFvQzM5mM2jav0sWXXK5stkNrnlqhmVddo7a2rVU5S25y07nydtM5jdwpdo41d4qdY82dYudYc6fYOdbcMXQ+3NW36+vr9MTjP9YfvO+D+s1vfqOFC7+jhx96TN+/Z8lbvm+g/2KN8fcdw2NF7ng6dx1oL+bC9lVryrgPc6WbPn6y66G8nidmdrSkn0n6G3f/kZm95u7H9rn/VXc/zsyWS/o7d1/d+/VHJd3o7k8Xmi3YmZKrVq/VK6++NqTZSWdM0LZtO7R9+051dnZqyZIHNG3qlKqdJTe5Sz1L7nhmyR3PLLnjmSV3PLPkjmeW3PHMht5dU1OjI498h0aMGKGjjjxSuzv2VHzuFB+rFHPH2hkYCjOrlfSvkn7g7j/q/fLeN1+W3fu/L/V+PStpXJ/xRkm7h7I3yveUrG+o067sb/tm2ztUX19XtbMhd5O7vLvpnEbuFDuH3E3nNHKn2DnkbjqnkTvFziF3p9h59+49+trXvqMXt63Trp0b9Ktf/UqPPPJkxedO8bFKMXesnYFCmZlJ+hdJbe7+j33u+jdJn+y9/UlJD/T5+p/1XoX7LEm/fPNl3oUa9KCkmR1tZrf1XhL8l2b2spmtMbNP5Zg79Hr1np79Q8k1qIO/r7fK92XoMc6G3E3u8u6mc2GzIXfTubDZkLvpXNhsyN10Lmw25G46FzYbcjedC5sNuTvFzsceO1pTp07R7556lt598uk6atRRuuKKP85rttjdMc6G3J1i7lg7A0PwAUlXSfqQmbX0flwi6XZJF5nZVkkX9X4uSSskvSjpBUlzJV0z1MU1Oe7/gaQfS5oiaYakUZIWSbrZzE519/9zuCF3nyNpjjTwe0oWoz3boXGN9Yc+b2wYq46OvVU7G3I3ucu7m85p5E6xc8jddE4jd4qdQ+6mcxq5U+wccneKnS+88Fzt2LFTv/jFK5KkpUsf0tlnTdTChT/KMRk2d4qPVYq5Y+0MFKr3vSEHet/JfheB8YNHyIfl6ku5Xr7d5O7z3T3bewrnNHffKunTkvL/v7CG2frmFo0ff4qamsaptrZWM2ZM14PLVlbtLLnJXepZcsczS+54Zskdzyy545kldzyz5I5nNuTuXTvbNenM03Xkke+QJH3og+doy5b8LiISMneKj1WKuWPtDMQk15mS+83sHHdfbWZTJb0iSe7eY4c7n7gA995zt84/72yNGXO8drzYrFtvm6158xflNdvd3a3rrr9ZK5Yv1IhMRvMXLFZr6/NVO0tucpd6ltzxzJI7nllyxzNL7nhmyR3PLLnjmQ25e936DfrRj5Zr3bqfqKurSxtbNmvuP/+g4nOn+FilmDvWzpBcvNQ9FjbY+xKY2ft08PXhp0raJOkz7v68mZ0o6XJ3/0auBaV4+TYAAAAAIB7FnNHCf1ACh9d1oL2ok8Wq1eRxF/PHRh8rdz1csc+TQc+UdPeNkia9+bmZnWNmH5W0KZ8DkgAAAAAAAADwdrmuvr2uz+2rJX1L0jGSbjGzm0qcDQAAAAAAAEAVynWhm9o+tz8v6SJ3v1XSZElXliwVAAAAAAAAgKqV60I3GTM7TgcPXpq7vyxJ7r7fzLpKng4AAAAAAADIUw/vRBuNXAclR0t6Wgffl9jNrM7d95jZ0crzvYp5Q2OgsmRs6P9W9gxyYSwAAABgIMX8LbImM2LIs1093UVsBgCUUq4L3TQNcFePpEuHPQ0AAAAAAACAqpfrTMnDcvc3JG0f5iwAAAAAAAAAEpDrQjcAAAAAAAAAMKyGdKYkAAAAAAAAUGmcayFEI9iZktf9+efU0vKYNmx4VPfcc7eOOOKIguanTL5Amzc9qS2tq3XjDddW/WzI3eSOJ3exnb/whc9qwzOPqGXDo/riFz9btt08Vml0DrmbzmnkTrFzyN10TiN3ip1D7qZz7tnGxrH6yU8WqaXlUT3zzCO69trPSJKOO260li//gTZt+pmWL/+Bjj12dEXlHq7ZYuYbG+v1yMr79dyzT2hjy2P64hfK9/f9YudjnA29G4iBlfoIcu3Ihn4L6uvr9MTjP9YfvO+D+s1vfqOFC7+jhx96TN+/Z8lbvm+gZJlMRm2bV+niSy5XNtuhNU+t0MyrrlFb29aceWKcJTe5h3N2oKtv/4/3/jfde+/dev8HPqoDBzq1bNm9+uIX/49eeOG3bx870NW3eazoXK25U+wca+4UO8eaO8XOseZOsXOsuau9c9+rb9fVnaS6upPU0rJJRx89Sk89tVyf+MTndNVVn9Crr76m2bO/rS9/+Rode+xo3Xzz3w149e1K71yK+bq6kzS27iRt6P3drVv7sD5+2WcqPneMs+Xa3XWg/fD/cZe4Cxsnc6pkH49mV1bs8yTYmZI1NTU68sh3aMSIETrqyCO1u2NP3rOTzpigbdt2aPv2ners7NSSJQ9o2tQpVTtLbnKXelaSfu/3xmvt2g369a9/o+7ubq16co2mT7+44nPH+PtOsXOsuVPsHGvuFDvHmjvFzrHmTrFzrLlT6rxnz0tqadkkSdq3b7+2bHlBDQ11mjr1It177w8lSffe+0NNmza5onIPx2yx83v2vKQNb/ndbVVDfV3F545xNvRuIBZBDkru3r1HX/vad/TitnXatXODfvWrX+mRR57Me76+oU67srsPfZ5t71B9nn+Yxjgbcje5y7s7ZOfNrf+hc889U8cff6yOPPIduvjiD6mxsb7ic8f4+06xc8jddE4jd4qdQ+6mcxq5U+wccjedC8998smNOu20/6F16zbopJPGaM+elyQdPPh24oljKjJ3yMeqr5NPbtRp7/t9rV23oSx7Y/x9x9oZiMmgByXNbLSZ3W5mW8zsP3s/2nq/duxQlx577GhNnTpFv3vqWXr3yafrqFFH6Yor/jjveTvMy0/zfRl6jLMhd5O7vLtDdt6y5QX9w+xv66EV92nZg/fq2eda1dXVVfLdPFaFzYbcnWLuFDuH3E3nwmZD7qZzYbMhd9O5sNmQu+lc2OyoUUfpvvu+qy9/+Va9/vq+vGaGa3esj9WbRo06SksWz9WXvnxL3r+7FJ9jsXYGYpLrTMklkl6VdIG7n+DuJ0j6YO/X7h9oyMxmmVmzmTX39Ozvd/+FF56rHTt26he/eEVdXV1auvQhnX3WxLxDt2c7NK7PGVyNDWPV0bG3amdD7iZ3eXeH7CxJ8+cv0plnfVgX/tFlevWV197yfpKVmjvG33eKnUPupnMauVPsHHI3ndPInWLnkLvpnP9sTU2NFi36rhYt+rEeeOBhSdJLL/1CdXUnSTr43okvv/yListd7OxwzNfU1Oj+xXN1330/1tKlD5Vtb4y/71g7Q+qR89Hno5LlOijZ5O53uPuhN3x09z3ufoekdw805O5z3H2iu0/MZEb1u3/XznZNOvN0HXnkOyRJH/rgOdqyJb83i5Wk9c0tGj/+FDU1jVNtba1mzJiuB5etrNpZcpO71LNvOvHEEyRJ48bV62Mf+7AWL36g4nPH+PtOsXOsuVPsHGvuFDvHmjvFzrHmTrFzrLlT6/zd7/6Dtmx5Qd/4xj8f+tqyZT/VzJmXSZJmzrxMDz7404rLXezscMzPnXOn2ra8oK/fNSfvmdC5Y5wNvRuIRU2O+39uZjdKWuDueyXJzN4l6VOSdg116br1G/SjHy3XunU/UVdXlza2bNbcf/5B3vPd3d267vqbtWL5Qo3IZDR/wWK1tj5ftbPkJnepZ9+0eNEcnXDCcers7NKfX/dXeu21X1Z87hh/3yl2jjV3ip1jzZ1i51hzp9g51twpdo41d0qd3//+M3TllR/Xc8+1ae3ag2f6ffWrf6/Zs7+tH/zgn/SpT/2Jdu3arSuu+H8qKvdwzBY7/4H3n6GrZl6mZ59rVfP6gwe4vvKV2/XQw49VdO4YZ0PvBmJhg70vgZkdJ+kmSdMlvUuSS9or6d8k3eHur+RaUDuyYcjnilb2SaZAnDLW//1J8tXD+5gAAACgzGoyI4Y829XTPYxJgMrSdaB96P9xV8U+2HgR/+Hax+PZn1bs8yTXy7dPlfS37v57khokfUvStt77+NMdAAAAAAAAQMFyHZT8nqQ3r1TzdUnHSLpd0huS5pUwFwAAAAAAAFAQ55+3/FPJcr2nZMbdu3pvT3T303tvrzazlhLmAgAAAAAAAFClcp0pucnMPt17e6OZTZQkMztVUmdJkwEAAAAAAACoSrkOSl4t6Xwz2ybpvZKeMrMXJc3tvQ8AAAAAAAAACjLoy7fd/ZeSPmVmx0h6T+/3Z919b74LKvvV60B6uII2AAAAYsIVtAGgOuV6T0lJkru/LmljibMAAAAAAAAAQ8aJOPHI9fJtAAAAAAAAABhWHJQEAAAAAAAAUFYclAQAAAAAAABQVsEOSk6ZfIE2b3pSW1pX68Ybri3rfIyzIXeTO57cKXYOuZvOaeROsXPI3XROI3eKnUPupnMauVPsHHI3ndPIHWtnIBbmJX4D0JqRDf0WZDIZtW1epYsvuVzZbIfWPLVCM6+6Rm1tW/P6mcXMxzhLbnLTufJ20zmN3Cl2jjV3ip1jzZ1i51hzp9g51twpdo41d4qdY80dQ+euA+2WV5jEnNdwIVe66ePJ9kcr9nkS5EzJSWdM0LZtO7R9+051dnZqyZIHNG3qlLLMxzhLbnKXepbc8cySO55ZcsczS+54Zskdzyy545kldzyz5I5nNvTu1Dkfb/moZEEOStY31GlXdvehz7PtHaqvryvLfIyzIXeTu7y76ZxG7hQ7h9xN5zRyp9g55G46p5E7xc4hd9M5jdwpdg65O8XOQEyGfFDSzB4qYrbf1wp5GXkx8zHOhtxN7vLupnNhsyF307mw2ZC76VzYbMjddC5sNuRuOhc2G3I3nQubDbmbzoXNhtxN58JmQ+5OsTMQk5rB7jSz0we6S9Jpg8zNkjRLkmzEaGUyo95yf3u2Q+Ma6w993tgwVh0de/OMXNx8jLMhd5O7vLvpnEbuFDuH3E3nNHKn2DnkbjqnkTvFziF30zmN3Cl2Drk7xc5ATHKdKble0mxJd77tY7akYwcacvc57j7R3Se+/YCkJK1vbtH48aeoqWmcamtrNWPGdD24bGXeoYuZj3GW3OQu9Sy545kldzyz5I5nltzxzJI7nllyxzNL7nhmyR3PbOjdQCwGPVNSUpukz7t7v8tDmdmuoS7t7u7WddffrBXLF2pEJqP5CxartfX5sszHOEtucpd6ltzxzJI7nllyxzNL7nhmyR3PLLnjmSV3PLPkjmc29O7U9VT85V3wJhvsfQnM7DJJz7n7fxzmvo+5+9JcC2pGNvBsAAAAAAAAGEZdB9r7v/kk9IGGD3Ecqo9/b3+sYp8nuc6U3CWpQ5LM7EhJfylpgqRWSX9b2mgAAAAAAAAAqlGu95T8nqQ3em/fJemdku7o/dq8EuYCAAAAAAAAUKVynSmZcfeu3tsT3f3Nq3GvNrOWEuYCAAAAAAAAUKVyHZTcZGafdvd5kjaa2UR3bzazUyV1liEfAAAAAAAAkBcudBOPXC/fvlrS+Wa2TdJ7JT1lZi9Kmtt7H0Q7nxMAACAASURBVAAAAAAAAAAUZNAzJd39l5I+ZWbHSHpP7/dn3X1vOcIBAAAAAAAAqD65Xr4tSXL31yVtLHEWAAAAAAAAAAnI9fJtAAAAAAAAABhWHJQEAAAAAAAAUFZ5vXwbAAAAAAAAqHTuXH07FkHOlGxsrNcjK+/Xc88+oY0tj+mLX/hswT9jyuQLtHnTk9rSulo33nBt1c+G3E3ueHKn2DnkbjqnkTvFziF30zmN3Cl2DrmbzmnkTrFzyN10TiN3rJ2BWFipjyDXjGzot6Cu7iSNrTtJG1o26eijR2nd2of18cs+o7a2rXn9zEwmo7bNq3TxJZcrm+3QmqdWaOZV1+Q1H+MsuclN58rbTec0cqfYOdbcKXaONXeKnWPNnWLnWHOn2DnW3Cl2jjV3DJ27DrRbXmESc1b9BZwq2cea3U9U7PMkyJmSe/a8pA0tmyRJ+/bt15YtW9VQX5f3/KQzJmjbth3avn2nOjs7tWTJA5o2dUrVzpKb3KWeJXc8s+SOZ5bc8cySO55ZcsczS+54Zskdzyy545kNvRuIxaAHJc3snWb2d2Z2j5ld8bb7vj0cAU4+uVGnve/3tXbdhrxn6hvqtCu7+9Dn2fYO1ed5UDPG2ZC7yV3e3XROI3eKnUPupnMauVPsHHI3ndPInWLnkLvpnEbuFDuH3J1iZyAmuS50M0/SVkn/KukzZvZxSVe4+39JOmugITObJWmWJNmI0cpkRh32+0aNOkpLFs/Vl758i15/fV/eoc36n3ma78vQY5wNuZvc5d1N58JmQ+6mc2GzIXfTubDZkLvpXNhsyN10Lmw25G46FzYbcjedC5sNuZvOhc2G3J1iZ0g94ncVi1wv3/4dd7/J3Ze6+zRJz0h6zMxOGGzI3ee4+0R3nzjQAcmamhrdv3iu7rvvx1q69KGCQrdnOzSusf7Q540NY9XRsbdqZ0PuJnd5d9M5jdwpdg65m85p5E6xc8jddE4jd4qdQ+6mcxq5U+wccneKnYGY5DooeYSZHfoed/8bSXMkPSlp0AOTucydc6fatrygr981p+DZ9c0tGj/+FDU1jVNtba1mzJiuB5etrNpZcpO71LPkjmeW3PHMkjueWXLHM0vueGbJHc8sueOZJXc8s6F3A7HI9fLtByV9SNIjb37B3ReY2V5J3xzq0g+8/wxdNfMyPftcq5rXH/wX6ytfuV0PPfxYXvPd3d267vqbtWL5Qo3IZDR/wWK1tj5ftbPkJnepZ8kdzyy545kldzyz5I5nltzxzJI7nllyxzNL7nhmQ+8GYmGDvS+BmZ0paYu7/9LMjpT0l5ImSGqV9Lfu/stcC2pGNvBifgAAAAAAgGHUdaC9/5tPQpPqz+c4VB/rdv+sYp8nuV6+/T1J+3tv3yXpnZLukPSGDl4EBwAAAAAAAKgIzj9v+aeS5Xr5dsbdu3pvT3T303tvrzazlhLmAgAAAAAAAFClcp0pucnMPt17e6OZTZQkMztVUmdJkwEAAAAAAACoSrkOSl4t6Xwz2ybpvZKeMrMXJc3tvQ8AAAAAAAAACjLoy7d7L2TzKTM7RtJ7er8/6+57yxEOAAAAAAAAQPXJ9Z6SkiR3f13SxhJnAQAAAAAAAIbMvbIv7oLfyvXybQAAAAAAAAAYVhyUBAAAAAAAAFBWHJQEAAAAAAAAUFbBDkpOmXyBNm96UltaV+vGG64t63yMsyF3kzue3Cl2Drk7xc5z59yp3dmNatnwaEFzw7E7xtmQu1PMnWLnkLvpXL7csf7ZG3J3irlT7BxyN53TyB1rZyAWVuo3AK0Z2dBvQSaTUdvmVbr4ksuVzXZozVMrNPOqa9TWtjWvn1nMfIyz5CY3nStvd4qdJencc87Uvn37NW/eXTptwoV5zYTOneJjlWLuFDvHmjvFzsXOx/hnb8jdKeZOsXOsuVPsHGvuGDp3HWi3vMIkZuLYc7nSTR/NHasq9nkS5EzJSWdM0LZtO7R9+051dnZqyZIHNG3qlLLMxzhLbnKXepbc8cyG3r1q9Vq98upreX9/JeRO8bFKMXeKnWPNnWLnYudj/LM35O4Uc6fYOdbcKXaONXesnSH1yPno81HJghyUrG+o067s7kOfZ9s7VF9fV5b5GGdD7iZ3eXfTOY3csXYuVoy/71gfqxRzp9g55G46lzd3MWLtTG46V/JuOqeRO9bOQEwGPShpZnVm9k9mdreZnWBm/9fMnjOzJWY2dqhLzfqfOVrIy8iLmY9xNuRucpd3N50Lmw25O8XOxYrx9x3rY5Vi7hQ7h9xN58Jmh2N+qGLtTO7yzYbcnWLuFDuH3J1iZyAmuc6UnC+pVdIuSY9L+rWkj0haJek7Aw2Z2Swzazaz5p6e/f3ub892aFxj/aHPGxvGqqNjb96hi5mPcTbkbnKXdzed08gda+dixfj7jvWxSjF3ip1D7qZzeXMXI9bO5KZzJe+mcxq5Y+0MxCTXQcl3ufs33f12Sce6+x3uvtPdvynp5IGG3H2Ou09094mZzKh+969vbtH48aeoqWmcamtrNWPGdD24bGXeoYuZj3GW3OQu9Sy545kNvbsYMf6+Y32sUsydYudYc6fYeTjmhyrWzuSmcyXvpnMauWPtDMSkJsf9fQ9afn+Q+wrS3d2t666/WSuWL9SITEbzFyxWa+vzZZmPcZbc5C71LLnjmQ29+9577tb5552tMWOO144Xm3XrbbM1b/6iis6d4mOVYu4UO8eaO8XOxc7H+GdvyN0p5k6xc6y5U+wca+5YO4OXusfEBnuwzOw2SX/v7vve9vXxkm5398tyLagZ2cCzAQAAAAAAYBh1HWjv/+aT0IS6D3Acqo8Ne/69Yp8nuc6UXK7eMyLN7EhJN0k6XQffZ/KzpY0GAAAAAAAAoBrlegn29yS90Xv7LkmjJd3R+7V5JcwFAAAAAAAAoErlfE9Jd+/qvT3R3U/vvb3azFpKmAsAAAAAAABAlcp1UHKTmX3a3edJ2mhmE9292cxOldRZhnwAAAAAAABAXnrEW0rGItfLt6+WdL6ZbZP0XklPmdmLkub23gcAAAAAAAAABRn0TEl3/6WkT5nZMZLe0/v9WXffm++CYi7xw7FtAAAAAAAAoPrkevm2JMndX5e0scRZAAAAAAAAACQg18u3AQAAAAAAAGBYcVASAAAAAAAAQFnl9fJtAAAAAAAAoNI5VyiJRrAzJUePfqcWLZqj5577mZ599gmddeYfFjQ/ZfIF2rzpSW1pXa0bb7i26mdD7iZ3PLlT7BxyN53TyJ1i55C76ZxG7hQ7h9xN5zRyp9g55G46p5E71s5ALMy9tEeQa0c2HHbB9/7l61q9eq2+N+8+1dbW/v/t3Xt03XWZ7/HPs5PdpK1tEcqhTdIrsc4IgxRTrCi2gLaoFHTGU2aGmRFHDmeNKOAwdJyxI96OB0cY0VmytEgpBwbaigr2AhaKYylC20BT6N3ebJOGYkUKtLpIk+f8QSyFptl7J9n7m+/+vl+u31pJ9n76fD7ZlYVf90WDBg3UgQMvveE+x0uWyWS0acNjuvDDf6Xm5lY9+cRS/c3fflqbNv0qZ54YZ8lNbjr3v910TiN3ip1jzZ1i51hzp9g51twpdo41d4qdY82dYudYc8fQ+fCrLZZXmMScMeI9PFXyKM8890S//XsS5JmSQ4a8Re9737s19457JUltbW3HHEh25+xJE7V9+y7t3LlbbW1tWrjwAV08Y3rZzpKb3MWeJXc8s+SOZ5bc8cySO55ZcsczS+54Zskdzyy545kNvRuIRcGHkmb2P3q7dPz4Mdq//7e6/Qff0prVP9P3v/dNDRo0MO/5mtoR2tO898j3zS2tqqkZUbazIXeTu7S76ZxG7hQ7h9xN5zRyp9g55G46p5E7xc4hd9M5jdwpdg65O8XOQEy6PZQ0sxPfdJ0kabWZvdXMTuzp0sqKCk2c+Gf6/vf/nyadPV0HDx7SrFmfyXve7Nhnnub7MvQYZ0PuJndpd9O5sNmQu+lc2GzI3XQubDbkbjoXNhtyN50Lmw25m86FzYbcTefCZkPupnNhsyF3p9gZUoc711FXf5brmZL7JT111NUoqVbS051fd8nMrjSzRjNr7Og4eMztzS2tam5u1eo1ayVJP/rxEk0888/yDt3S3KpRdTVHvq+rHanW1n1lOxtyN7lLu5vOaeROsXPI3XROI3eKnUPupnMauVPsHHI3ndPInWLnkLtT7AzEJNeh5CxJWyRd7O7j3H2cpObOr8cfb8jd57h7g7s3ZDKDj7l9377fqLl5ryZMOFWSdP7579OmTVvzDr2msUn19eM0duwoZbNZzZx5iRYtXla2s+Qmd7FnyR3PLLnjmSV3PLPkjmeW3PHMkjueWXLHM0vueGZD7wZiUdndje5+k5nNl/QtM9sj6QYd/0OxC3Lt5/5N/+/O/9SAAVnt2LlbV1zxj3nPtre365prZ2vpkntUkclo3p0LtHFjfoeaMc6Sm9zFniV3PLPkjmeW3PHMkjueWXLHM0vueGbJHc8sueOZDb0biIUV8J4GMyR9QdJYd8/7HVazA2p7fIjZv1/5DgAAAAAAEMbhV1uOffNJ6PRTJnOcdJT1+57st39Pun2mpJm9W9Imd39J0nJJ50p6xcy+Ienr7n6gBBkBAAAAAACAnJynuEUj13tKzpV0qPPrWyRlJX2p82d3FC8WAAAAAAAAgHLV7TMlJWXc/XDn1w3uflbn1yvNrKmIuQAAAAAAAACUqVzPlFxvZp/s/HqdmTVIkplNkNRW1GQAAAAAAAAAylKuZ0peIenbZjZb0n5JT3R+Cveeztty4pX8AAAAAACURrYi1//M715b++HcdwKAPtDtP606P8jmcjMbIml85/2b3X1fKcIBAAAAAAAAKD95/V8o7v6ypHVFzgIAAAAAAAD0WIfzmt1Y5HpPSQAAAAAAAADoUxxKAgAAAAAAACgpDiUBAAAAAAAAlFSQQ8mqqio98fhiPdX4sNY1PaobvnhdwX/G9GlTtWH9Cm3euFKzrr+q7GdD7iZ3PLlT7BxyN53TyJ1i55C76ZxG7hQ7h9xN5zRyp9g55O5YOtfVjdRDD83X2rXL9dRTD+uqqz4pSfr61/9VTU3LtXr1Q1qw4PsaNmxov8pdDrOhdwNRcPeiXhXZGu/qGnpCvVdka7xq4GhfteopP+e9F3V5v66ubFWdb9u20+snTPbqQWO8ad0GP/2MKWU7S25y07n/7aZzGrlT7Bxr7hQ7x5o7xc6x5k6xc6y5U+wca+5SdK6uHn3kGju2wSdP/rBXV4/24cP/1Ldu3e5nnnmBf+Qjl/ngweO8unq033TTrX7TTbcemeGxiqdzsc9zYr3efnKDc71+hX48uruCvXz74MFDkqRstlKV2azc8/90pLMnTdT27bu0c+dutbW1aeHCB3TxjOllO0tuchd7ltzxzJI7nllyxzNL7nhmyR3PLLnjmSV3PLM9mX/uuefV1LRekvTKKwe1efM21dScouXLH1N7e7skafXqtaqtHdmvcsc+G3o3EItuDyXN7MKjvh5mZreb2TNmdo+ZndKrxZmMGtcsU2vLM1q+fIVWr1mb92xN7Qjtad575PvmllbV1Iwo29mQu8ld2t10TiN3ip1D7qZzGrlT7BxyN53TyJ1i55C76ZxG7pCdR4+u05lnnqY1a5re8PO/+7uZ+tnP/rvf5o5xNvRuIBa5nin59aO+vllSq6QZktZI+v7xhszsSjNrNLPGjo6DXd6no6NDDZOmacy4Bk1qmKjTTnt73qHN7Jif5ftMyxhnQ+4md2l307mw2ZC76VzYbMjddC5sNuRuOhc2G3I3nQubDbmbzoXNhtxN58JmQ+6OsfPgwYN0773f0/XXf0Uvv/zKkZ/PmvUZtbcf1vz5PynK3r6Yj3E29G4gFoW8fLvB3We7+6/d/VuSxh7vju4+x90b3L0hkxnc7R964MBL+sWKX2r6tKl5B2lpbtWoupoj39fVjlRr676ynQ25m9yl3U3nNHKn2DnkbjqnkTvFziF30zmN3Cl2DrmbzmnkDtG5srJS9977PS1YcL8eeOChIz+/7LK/0Ic/fIEuv/yafpk75tnQu4FY5DqU/B9m9o9mdp2kofbG4/oevx/l8OEnHvl0r+rqal1w/rnasmV73vNrGptUXz9OY8eOUjab1cyZl2jR4mVlO0tuchd7ltzxzJI7nllyxzNL7nhmyR3PLLnjmSV3PLM9nf/e9/5dW7Zs03e+84MjP/vgB6fouuv+QR//+Kf0+9//oV/mjnk29O7UdbhzHXX1Z5U5br9N0pDOr++UNFzSb8xshKSm407lMHLkKZp7+y2qqMgok8novvsWacnSR/Keb29v1zXXztbSJfeoIpPRvDsXaOPGrWU7S25yF3uW3PHMkjueWXLHM0vueGbJHc8sueOZJXc8sz2ZP+ecBl122V/o2Wc36cknl0qSbrjhm7r55i+pqmqAFi++W9JrH3Zz9dVf6De5Y58NvRuIhXX3vgRm9m5Jm939gJkNkvR5SRMlbZT0dXc/kGtB5YDa/n0sCwAAAABAmchW5HruUffa2g/3URIU2+FXW45980lowskNnEMdZetvGvvt35NcL8GeK+mPn1Rzi6Shkr4h6ZCkO4qYCwAAAAAAAECZyvV/oWTc/Y//N0mDu5/V+fVKM+vxy7cBAAAAAAAApCvXMyXXm9knO79eZ2YNkmRmEyS1FTUZAAAAAAAAgLKU65mSV0j6tpnNlrRf0hNmtkfSns7bAAAAAAAAgH7BxVtKxqLbQ8nOD7K53MyGSBrfef9md99XinAAAAAAACB/fFANgFjk9bFc7v6ypHVFzgIAAAAAAAAgAbneUxIAAAAAAAAA+hSHkgAAAAAAAABKKq+XbwMAAAAAAAD9XYfzQTexCPJMybq6Gj2y7Id69pn/1rqmR/XZz3yq4D9j+rSp2rB+hTZvXKlZ119V9rMhd5M7ntwpdg65m85p5E6xc8jddE4jd4qdQ+6mcxq5U+wccneKnW+bc7P2Nq9T09rlBc31xe4YZ0PvBqLg7kW9KrI1/uardtSZ3jBpmldka3zYW9/mW7Zu99PPmHLM/Y53ZavqfNu2nV4/YbJXDxrjTes25D0f4yy5yU3n/rebzmnkTrFzrLlT7Bxr7hQ7x5o7xc6x5k6xc6y5Y+1cka3xqed9zBsmTfNn12/KeyZ07nJ/rIp9nhPrNf6kic71+hX68ejuCvJMyeeee15rm9ZLkl555aA2b/6VamtG5D1/9qSJ2r59l3bu3K22tjYtXPiALp4xvWxnyU3uYs+SO55ZcsczS+54Zskdzyy545kldzyz5I5nNvTux1au0gu/ezHv+/eH3Kk+VkAsCj6UNLOT+jLAmDF1OvOdp2vV6rV5z9TUjtCe5r1Hvm9uaVVNnoeaMc6G3E3u0u6mcxq5U+wccjed08idYueQu+mcRu4UO4fcTec0csfaubdi/H2n+lgBpdTtoaSZ3Whmwzu/bjCzHZJWmdmvzWxKb5cPHjxICxfcpn/8pxv08suv5D1nZsf8zD2/NzKNcTbkbnKXdjedC5sNuZvOhc2G3E3nwmZD7qZzYbMhd9O5sNmQu+lc2GzI3XQubDbk7hQ791aMv+9UH6ty4PznDf/pz3I9U/Ij7r6/8+tvSrrU3eslfVDSzccbMrMrzazRzBo7Og52eZ/Kykr9cMFtuvfen+j++x8sKHRLc6tG1dUc+b6udqRaW/eV7WzI3eQu7W46p5E7xc4hd9M5jdwpdg65m85p5E6xc8jddE4jd6ydeyvG33eqjxVQSrkOJbNmVtn59UB3XyNJ7r5VUtXxhtx9jrs3uHtDJjO4y/vcNudmbdq8Tbd8e07Bodc0Nqm+fpzGjh2lbDarmTMv0aLFy8p2ltzkLvYsueOZJXc8s+SOZ5bc8cySO55ZcsczS+54ZkPv7o0Yf9+pPlZAKVXmuP27kpaa2Y2SHjKzWyT9WNIFkpp6uvS950zS3/7Nx/XMsxvVuOa1/2L927/dqAcfejSv+fb2dl1z7WwtXXKPKjIZzbtzgTZu3Fq2s+Qmd7FnyR3PLLnjmSV3PLPkjmeW3PHMkjueWXLHMxt69913fVdT3v8eDR9+onbtaNSXv3KT7pg3v1/nTvWxAmJhud6XwMymSvoHSRP02iHmHkn3S7rD3dtyLagcUNu/X8AOAAAAAAAQmcOvthz75pPQ+OETOYc6yo79a/vt35NcH3TzbklPu/ulkt4r6SeSOiSdKmlQ8eMBAAAAAAAAKDe5Xr49V9I7O7++RdJBSTfqtZdv3yHpz4sXDQAAAAAAAMife0foCMhTrkPJjLsf7vy6wd3P6vx6pZn1+D0lAQAAAAAAAKQr16dvrzezT3Z+vc7MGiTJzCZIyvl+kgAAAAAAAADwZrkOJa+QNMXMtkt6h6QnzGyHpNs6bwMAAAAAAACAgnT78m13PyDpcjMbIml85/2b3X1fKcIBAAAAAIA49OYjfvm4ZCA9ud5TUpLk7i9LWlfkLAAAAAAAAECPdXDEHY1cL98GAAAAAAAAgD7FoSQAAAAAAACAkuJQEgAAAAAAAEBJBTmUrKqq0hOPL9ZTjQ9rXdOjuuGL1xX8Z0yfNlUb1q/Q5o0rNev6q8p+NuRucseTO8XOIXfTOY3cKXYOuZvOaeROsXPI3XROI3eKnUPupnNhsxMmnKrGNcuOXL/dv1lXf/aKfp871scKiIa7F/WqyNZ4V9fQE+q9IlvjVQNH+6pVT/k5772oy/t1dWWr6nzbtp1eP2GyVw8a403rNvjpZ0wp21lyk5vO/W83ndPInWLnWHOn2DnW3Cl2jjV3ip1jzZ1i51hzl3vnyjyuAVV13tq6z8efOukNP4+1c8jdxT7PifUa9dbTnev1K/Tj0d0V7OXbBw8ekiRls5WqzGblnv+nI509aaK2b9+lnTt3q62tTQsXPqCLZ0wv21lyk7vYs+SOZ5bc8cySO55ZcsczS+54Zskdzyy545mNOffRzj//fdqx49favbulX+eO9bECYhLsUDKTyahxzTK1tjyj5ctXaPWatXnP1tSO0J7mvUe+b25pVU3NiLKdDbmb3KXdTec0cqfYOeRuOqeRO8XOIXfTOY3cKXYOuZvOaeROsfObXTrzEi1YcH/e94+1c3/5fQP9WbeHkmb2tJnNNrNTC/lDzexKM2s0s8aOjoNd3qejo0MNk6ZpzLgGTWqYqNNOe3shf/4xP8v3mZYxzobcTe7S7qZzYbMhd9O5sNmQu+lc2GzI3XQubDbkbjoXNhtyN50Lmw25m86FzYbcTefCZo+WzWZ10UXTdN+PFuc9E2vn/vD7Bvq7XM+UfKukEyT93MxWm9nnzKwm1x/q7nPcvcHdGzKZwd3e98CBl/SLFb/U9GlT8w7d0tyqUXWvx6irHanW1n1lOxtyN7lLu5vOaeROsXPI3XROI3eKnUPupnMauVPsHHI3ndPInWLno1144Xlau/ZZPf/8/rxnYu3cH37fQH+X61Dyd+7+T+4+WtJ1kt4m6Wkz+7mZXdnTpcOHn6hhw4ZKkqqrq3XB+edqy5btec+vaWxSff04jR07StlsVjNnXqJFi5eV7Sy5yV3sWXLHM0vueGbJHc8sueOZJXc8s+SOZ5bc8czGnPuPLr30owW9dDtk7lgfK0gdcq6jrv6sMsftR54z7O6PSXrMzD4r6YOSLpU0pydLR448RXNvv0UVFRllMhndd98iLVn6SN7z7e3tuuba2Vq65B5VZDKad+cCbdy4tWxnyU3uYs+SO55ZcsczS+54Zskdzyy545kldzyz5I5nNubckjRwYLU+cMH79elP/3NBc7F2Dv37BmJg3b0vgZnNd/e/7M2CygG1/ftYFgAAAAAA9Nqx74SYPw4OCnf41Zbe/MrLVt2Jp/PX6SjNL6zvt39Pcr18+1tmNlSSzGygmX3FzBaZ2TfMbFgJ8gEAAAAAAAAoM7kOJedKOtT59bclDZX0jc6f3VHEXAAAAAAAAADKVK73lMy4++HOrxvc/azOr1eaWVMRcwEAAAAAAAAoU7kOJdeb2Sfd/Q5J68yswd0bzWyCpLYS5AMAAAAAAADy0t1np6B/yXUoeYWkb5vZbEn7JT1hZnsk7em8LaeM9fz9NDv4iwQAAAAAQBR687/ghw8a2qvd+w+91Kt5AKXX7aGkux+QdLmZDZE0vvP+ze6+rxThAAAAAAAAAJSfXM+UlCS5+8uS1hU5CwAAAAAAAIAE5Pr0bQAAAAAAAADoU3k9UxIAAAAAAADo7/h8kngEe6bkZz7zKa19+hE1rV2uz372UwXPT582VRvWr9DmjSs16/qrSjJbVVWlJx5frKcaH9a6pkd1wxevK1nm3s6Hmg25O8XcKXYOuZvOaeROsXPI3XROI/dtc27W3uZ1alq7vKC5vtjNY0Xn/rybzmnkTrFzT+aHDhuiH9x5ix5bvUQrVi3WuyadqVlfuFqPPn6/Hnnsx5r/4x/olBEnFzV3rI8VEA13L+qVHVDrb77OPPN8X79+kw8ddqpXDxztjyxf4X/6jvcdc7+KbE2XV7aqzrdt2+n1EyZ79aAx3rRug59+xpTj3r+vZiuyNT70hHqvyNZ41cDRvmrVU37Oey8qyd5QnckdT+4UO8eaO8XOseZOsXOsuVPsHHPuqed9zBsmTfNn12/KeyZ07hQfqxQ7x5o7xc6x5k6xc77zpwz7kzdcC+75iX/uM7P9lGF/4nXD/8zfNnqSn1r3riO3/+usr/m82+898n2MnXs7W+zznFivEcP+1Llev0I/Ht1dQZ4p+Sd/Uq9Vq9bq97//g9rb2/XYiid1ySUX5j1/9qSJ2r59l3bu3K22tjYtXPiALp4xveizknTw4CFJUjZbqcpsVu75PS24t3tDdSZ3PLlT7Bxr7hQ7x5o7xc6x5k6xc8y5H1u5Si/87sW8798fcqf4WKXYOdbcKXaONXeKnXsy/5YhgzX5nAbdFPeRlAAAGdZJREFUc9d9kqS2tja9dOBlvfLywSP3GTRooJTjf47H1LkvdwOxCHIouWHjFp177rt14oknaODAal144fmqq6vJe76mdoT2NO898n1zS6tqakYUfVaSMpmMGtcsU2vLM1q+fIVWr1lbkr2hOpO7tLvpnEbuFDuH3E3nNHKn2Dnk7t7m7o1YO8eYO8XOIXfTOY3cKXbuyfyYsaP02/0v6Nu3fl0Pr/iRbv7OV187hJT0+dnX6Kn1j+ov/ucM/fvXv1O03LE+VkBMuj2UNLMGM/u5md1tZqPM7GEzO2Bma8xsYk+Xbt68Td+86VY9uPReLV50t555dqMOHz6c97yZHfOzfJ+x2JtZSero6FDDpGkaM65Bkxom6rTT3l6SvaE6k7u0u+lc2GzI3XQubDbkbjoXNhtyN50Lmw25u7e5eyPWzjHmTrFzyN10Lmw25G46Fzbbk/nKigr92TvfoXm3z9cH3/8XOnTokD7zuf8lSbrxa9/Wu04/Xz/64SL9/ZWXFS13rI8VJOc/b/hPf5brmZK3Svp3SUsk/VLS9919mKTPd97WJTO70swazayxo/1gl/eZN2++3j35Q7rgAx/X7154Udu27cw7dEtzq0Yd9czKutqRam3dV/TZox048JJ+seKXmj5takn2hupM7tLupnMauVPsHHI3ndPInWLnkLv76t+neiLWzjHmTrFzyN10TiN3ip17Mr937z617t2ntU89I0la/MAynXHGO95wn5/ct0QfmTGtaLljfayAmOQ6lMy6+4Pufq8kd/f79NoXyyVVH2/I3ee4e4O7N2QqBnd5n5NPPkmSNGpUjT760Q9pwYIH8g69prFJ9fXjNHbsKGWzWc2ceYkWLV5W9Nnhw0/UsGFDJUnV1dW64PxztWXL9qLv7e18qFlyxzNL7nhmyR3PLLnjmSV36XP3RqydY8ydYudYc6fYOdbcKXbuyfxvnt+vluZWnVo/VpJ07pTJ2rplm8aNH3PkPtM/dJ62/WpH0XLH+lgBManMcfsfzGyapGGS3Mw+6u73m9kUSe29Wbxg/hyddNJb1dZ2WFdf8wW9+OKBvGfb29t1zbWztXTJParIZDTvzgXauHFr0WdHjjxFc2+/RRUVGWUyGd133yItWfpI0ff2dj7ULLnjmSV3PLPkjmeW3PHMkrv0ue++67ua8v73aPjwE7VrR6O+/JWbdMe8+f06d4qPVYqdY82dYudYc6fYuafzX/jn/6Nbb/umsgOy+vWuPbr201/Qzf/5VdXXj1OHd6h5z17N+tyXipY71scKiIl1974EZvZOvfby7Q5Jn5P0D5L+TtJeSVe6++O5FgyoquvxC9g7eM8EAAAAAADK3vBBQ3s1v//QS32UJB6HX2059s0noREn/CmHSUd57sVN/fbvSa5nSlZLmunuB8xsoKQDkh6XtEHS+mKHAwAAAAAAAFB+ch1KzpX0zs6vvy3poKQbJV0g6Q5Jf168aAAAAAAAAED++KTyeOQ6lMy4++HOrxvc/azOr1eaWVMRcwEAAAAAAAAoU7k+fXu9mX2y8+t1ZtYgSWY2QVJbUZMBAAAAAAAAKEu5DiWvkDTFzLZLeoekJ8xsh6TbOm8DAAAAAAAAgIJ0+/Jtdz8g6XIzGyJpfOf9m919X74L+ARtAAAAAADQnd5+enZvPl6YUwsgjFzvKSlJcveXJa0rchYAAAAAAACgxzo4Zo5GrpdvAwAAAAAAAECf4lASAAAAAAAAQElxKAkAAAAAAACgpIIcSlZVVemJxxfrqcaHta7pUd3wxesK/jOmT5uqDetXaPPGlZp1/VVlPxtyN7njyZ1i55C76ZxG7hQ7h9xN5zRyp9g55G46p5E7xc4hd9M5ntzXXP2/1NT0qNauXa677vquqqqqSrK3t/O93Q1Ewd2LelVka7yra+gJ9V6RrfGqgaN91aqn/Jz3XtTl/bq6slV1vm3bTq+fMNmrB43xpnUb/PQzppTtLLnJTef+t5vOaeROsXOsuVPsHGvuFDvHmjvFzrHmTrFzrLlT7Fyq3ZVdXKPHnOU7dvza3zJkvFdma3zhD3/qf//31x5zv1g7F/s8J9brpCFvc67Xr9CPR3dXsJdvHzx4SJKUzVaqMpuVe/6fjnT2pInavn2Xdu7crba2Ni1c+IAunjG9bGfJTe5iz5I7nllyxzNL7nhmyR3PLLnjmSV3PLPkjmeW3D3bXVlZqYEDq1VRUaFBAwdqb+tzJdkbsjMQi2CHkplMRo1rlqm15RktX75Cq9eszXu2pnaE9jTvPfJ9c0urampGlO1syN3kLu1uOqeRO8XOIXfTOY3cKXYOuZvOaeROsXPI3XROI3eKnUPu3rv3OX3rW9/Tju2rtWf3Wr300kt65JEVRd/b2/ne7gZi0e2hpJm9xcy+YmYbzOyAmf3GzJ40s8t7u7ijo0MNk6ZpzLgGTWqYqNNOe3ves2Z2zM/yfaZljLMhd5O7tLvpXNhsyN10Lmw25G46FzYbcjedC5sNuZvOhc2G3E3nwmZD7qZzYbMhd9O5sNmQu084YZhmzJiut02YrNFjztKgwYP013/950Xf29v53u4GYpHrmZL/JWmHpOmSvizpO5L+VtJ5Zvb14w2Z2ZVm1mhmjR0dB7tdcODAS/rFil9q+rSpeYduaW7VqLqaI9/X1Y5Ua+u+sp0NuZvcpd1N5zRyp9g55G46p5E7xc4hd9M5jdwpdg65m85p5E6xc8jdF1xwrnbt2q39+1/Q4cOHdf/9D+o9kxuKvre3873dDcQi16HkWHef5+7N7v4fki52919J+qSk4/7fC+4+x90b3L0hkxl8zO3Dh5+oYcOGSpKqq6t1wfnnasuW7XmHXtPYpPr6cRo7dpSy2axmzrxEixYvK9tZcpO72LPkjmeW3PHMkjueWXLHM0vueGbJHc8sueOZJXfhs3t2t+jsd5+lgQOrJUnnn/c+bd78q6Lv7e18b3cDsajMcftBM3ufu680sxmSXpAkd++wrp5PnKeRI0/R3NtvUUVFRplMRvfdt0hLlj6S93x7e7uuuXa2li65RxWZjObduUAbN24t21lyk7vYs+SOZ5bc8cySO55ZcsczS+54Zskdzyy545kld+Gzq9es1Y9/vESrV/9Mhw8f1rqmDbrtB/9V9L29ne/t7tR18FL3aFh370tgZmdI+oGkCZLWS/p7d99qZidL+it3/06uBZUDavnbAAAAAAAAiqbHz5qSFOuhxeFXW3pTu2ydOORtsT6kRfHCy7/qt39Pcj1TcqCkD7r7ATMbJOmfzewsSRslHfc9JQEAAAAAAADgeHK9p+RcSX/8pJpbJA2T9A1JhyTdUcRcAAAAAAAAAMpUrmdKZtz9cOfXDe5+VufXK82sqYi5AAAAAAAAAJSpXIeS683sk+5+h6R1Ztbg7o1mNkFSWwnyAQAAAAAAAHnp7rNT0L/kevn2FZKmmNl2Se+Q9ISZ7ZB0W+dtAAAAAAAAQXkvLuvFBaDnun2mpLsfkHS5mQ2RNL7z/s3uvq8U4QAAAAAAAACUn1wv35YkufvLktYVOQsAAAAAAACABOR6+TYAAAAAAAAA9Km8nikJAAAAAAAA9Hcd4oNuYsEzJQEAAAAAAACUVJBDyaqqKj3x+GI91fiw1jU9qhu+eF3Bf8b0aVO1Yf0Kbd64UrOuv6rsZ0PuJnc8uVPsHHJ3ip1vm3Oz9javU9Pa5QXN9cXuGGdD7k4xd4qdQ+6mcxq5U+wccjed08idYueQu3ub+1dbn9Tapx9R45plevKJpSXb3dvcQBTcvahXRbbGu7qGnlDvFdkarxo42letesrPee9FXd6vqytbVefbtu30+gmTvXrQGG9at8FPP2NK2c6Sm9x07n+7U+xcka3xqed9zBsmTfNn12/KeyZ07hQfqxRzp9g51twpdo41d4qdY82dYudYc6fYOYbcld1cO3fu9lNGnHbc20PmLvZ5TqzX0MHjnev1K/Tj0d0V7OXbBw8ekiRls5WqzGblnv9r/s+eNFHbt+/Szp271dbWpoULH9DFM6aX7Sy5yV3sWXLHMxt692MrV+mF372Y9/37Q+4UH6sUc6fYOdbcKXaONXeKnWPNnWLnWHOn2Dnm3L0Ra26glLo9lDSzYWZ2o5ltNrPfdl6bOn92Qq8WZzJqXLNMrS3PaPnyFVq9Zm3eszW1I7Snee+R75tbWlVTM6JsZ0PuJndpd9M5jdyxdu6tGH/fsT5WKeZOsXPI3XROI3eKnUPupnMauVPsHHJ3X/y7r7vrwaX3atWTD+qKT12W91zo3EAMcn369kJJj0qa6u7PSZKZjZD0CUk/lPTBrobM7EpJV0qSVQxTJjP4mPt0dHSoYdI0DRs2VD/64e067bS3a8OGLXmFNrNjfpbvMy1jnA25m9yl3U3nwmZD7k6xc2/F+PuO9bFKMXeKnUPupnNhsyF307mw2ZC76VzYbMjddC5sNuTuvvh33ylTP6rW1n06+eST9NCD87V5yzatXLmqqLtD/jt7OeB3FY9cL98e6+7f+OOBpCS5+3Pu/g1Jo4835O5z3L3B3Ru6OpA82oEDL+kXK36p6dOm5h26pblVo+pqjnxfVztSra37ynY25G5yl3Y3ndPIHWvn3orx9x3rY5Vi7hQ7h9xN5zRyp9g55G46p5E7xc4hd/fFv/v+8f6/+c1vdf8DD2rSpDOLvjvkv7MDpZTrUPLXZjbLzE754w/M7BQz+2dJe3q6dPjwEzVs2FBJUnV1tS44/1xt2bI97/k1jU2qrx+nsWNHKZvNaubMS7Ro8bKynSU3uYs9S+54ZkPv7o0Yf9+xPlYp5k6xc6y5U+wca+4UO8eaO8XOseZOsXPMuQcNGqi3vGXwka8/+IEpeb/CM9Z/ZwdKKdfLty+V9HlJv+g8mHRJ+yT9VNLMni4dOfIUzb39FlVUZJTJZHTffYu0ZOkjec+3t7frmmtna+mSe1SRyWjenQu0cePWsp0lN7mLPUvueGZD7777ru9qyvvfo+HDT9SuHY368ldu0h3z5vfr3Ck+VinmTrFzrLlT7Bxr7hQ7x5o7xc6x5k6xc8y5TznlZN33w9slSRWVFZo//34tW/bf/T43EAvr7rX2Zna1pJ+4e4+fFVk5oJYX8wMAAAAAgH7p2HdwzF/IA4/Dr7b0JnrZGjp4POdQR3np4I5++/ck16HkAUkHJW2XdI+kH7r7/kIWcCgJAAAAAAD6Kw4ly8tbBo3jHOoorxza2W//nuR6T8kdkuokfVVSg6RNZvaQmX3CzIYUPR0AAAAAAACAspPrUNLdvcPdl7n7pyTVSLpV0oV67cASAAAAAAAAAAqS64Nu3vAUT3dv02sfcvNTMxtYtFQAAAAAAAAAylY+n77dJXf/fR9nAQAAAAAAKCnegBAIo9tDSXfnM+cBAAAAAAAQBeeYORq53lMSAAAAAAAAAPoUh5IAAAAAAAAASopDSQAAAAAAAAAlFexQ8rY5N2tv8zo1rV3eo/np06Zqw/oV2rxxpWZdf1XZz4bcTe54csfamX8exPNYpZg7xc4hd9M5ndyZTEZrVv9MD/zkzoJnY+0cY+4UO4fcTec0cqfYOeTuFDsD0XD3ol4V2Rrv6pp63se8YdI0f3b9pi5v7+7KVtX5tm07vX7CZK8eNMab1m3w08+YUraz5CZ3OXeuyPLPg1geqxRzp9g51twpdo45d0W2xq/7py/5Pff+2BcvfriguVg7x5g7xc6x5k6xc6y5U+wca+4YOhf7PCfWq7p6tHO9foV+PLq7gj1T8rGVq/TC717s0ezZkyZq+/Zd2rlzt9ra2rRw4QO6eMb0sp0lN7mLPRt6N/88iOOxSjF3ip1jzZ1i55hz19aO1Ic/dIHmzr0375nQuVN8rFLsHGvuFDvHmjvFzrHmjrUzEJMo31OypnaE9jTvPfJ9c0urampGlO1syN3kLu3uFDv3Voy/71gfqxRzp9g55G46p5P7P27+sj7/L19TR0dH3jN9sZvHis79eTed08idYueQu1PsDMSkx4eSZvZgXwYpcPcxP3P3sp0NuZvcpd2dYufeivH3HetjlWLuFDuH3E3nwmZD7u7N7Ec+/AE9//x+Pb322bzu35e7eaxKNxtyd4q5U+wccjedC5sNuTvFzkBMKru70czOOt5Nks7sZu5KSVdKklUMUyYzuMcBu9LS3KpRdTVHvq+rHanW1n1lOxtyN7lLuzvFzr0V4+871scqxdwpdg65m85p5D7nnAbNuGiaPnTh+aqurtLQoUN057zv6BOXX92vc6f4WKXYOeRuOqeRO8XOIXen2BmISa5nSq6RdJOkm9903STphOMNufscd29w94a+PpCUpDWNTaqvH6exY0cpm81q5sxLtGjxsrKdJTe5iz0bendvxPj7jvWxSjF3ip1jzZ1i51hzf2H2jRo7vkH1Eybrsr/5tH7+88fzPpAMmTvFxyrFzrHmTrFzrLlT7Bxr7lg7AzHp9pmSkjZJ+t/u/qs332Bme3qz+O67vqsp73+Phg8/Ubt2NOrLX7lJd8ybn9dse3u7rrl2tpYuuUcVmYzm3blAGzduLdtZcpO72LOhd/PPgzgeqxRzp9g51twpdo45d2/E2jnG3Cl2jjV3ip1jzZ1i51hzx9oZvNQ9Jtbdg2VmH5f0rLtv6eK2j7r7/bkWVA6o5W8DAAAAAABAHzr8asuxbz4JVVeP5hzqKH/4w+5++/ck18u3ayQd6uqGfA4kAQAAAAAAAODNch1KflXSKjN7zMw+bWYnlyIUAAAAAAAAgPKV61Byh6Q6vXY4+S5JG83sITP7hJkNKXo6AAAAAAAAAGUn1wfduLt3SFomaZmZZSV9SNJf6bVP4OaZkwAAAAAAAOgXXLylZCxyHUq+4c0w3b1N0k8l/dTMBhYtFQAAAAAAAICylevl25ce7wZ3/30fZwEAAAAAAACQgG4PJd19a6mCAAAAAAAAAEhDrmdKAgAAAAAAAECfyvWekgAAAAAAAEAU3Pmgm1jwTEkAAAAAAAAAJRXsUPK2OTdrb/M6Na1d3qP56dOmasP6Fdq8caVmXX9V2c+G3E3ueHKn2DnkbjqnkTvFziF30zmN3Cl2DrmbzmnkTrFzyN10TiN3yM6SlMlktGb1z/TAT+4seBaIgrsX9arI1nhX19TzPuYNk6b5s+s3dXl7d1e2qs63bdvp9RMme/WgMd60boOffsaUsp0lN7np3P920zmN3Cl2jjV3ip1jzZ1i51hzp9g51twpdo41d4qdY80dsvMfr+v+6Ut+z70/9sWLH+7y9mKf58R6ZQfUOtfrV+jHo7sr2DMlH1u5Si/87sUezZ49aaK2b9+lnTt3q62tTQsXPqCLZ0wv21lyk7vYs+SOZ5bc8cySO55ZcsczS+54Zskdzyy545kldzyzfTFfWztSH/7QBZo79968Z4DYRPmekjW1I7Snee+R75tbWlVTM6JsZ0PuJndpd9M5jdwpdg65m85p5E6xc8jddE4jd4qdQ+6mcxq5U+wccnesnSXpP27+sj7/L19TR0dH3jNAbLo9lDSzoWb2f83sLjP76zfddms3c1eaWaOZNXZ0HOyrrEf/+cf8zD2/T1eKcTbkbnKXdjedC5sNuZvOhc2G3E3nwmZD7qZzYbMhd9O5sNmQu+lc2GzI3XQubDbkbjoXNhtyd6ydP/LhD+j55/fr6bXP5r0Prwv9kuT+dvVnuZ4peYckk/QjSX9pZj8ys6rO2yYfb8jd57h7g7s3ZDKD+yjq61qaWzWqrubI93W1I9Xauq9sZ0PuJndpd9M5jdwpdg65m85p5E6xc8jddE4jd4qdQ+6mcxq5U+wccnesnc85p0EzLpqmbVuf1H/dfavOO++9unPed/LeDcQi16Hkqe7+eXe/390vlvS0pEfN7KQSZDuuNY1Nqq8fp7FjRymbzWrmzEu0aPGysp0lN7mLPUvueGbJHc8sueOZJXc8s+SOZ5bc8cySO55Zcscz29v5L8y+UWPHN6h+wmRd9jef1s9//rg+cfnVee8GYlGZ4/YqM8u4e4ckufv/MbNmSSskvaU3i+++67ua8v73aPjwE7VrR6O+/JWbdMe8+XnNtre365prZ2vpkntUkclo3p0LtHHj1rKdJTe5iz1L7nhmyR3PLLnjmSV3PLPkjmeW3PHMkjueWXLHM9sX80AKrLvXl5vZv0ta5u6PvOnnF0r6T3d/W64FlQNq+/cL2AEAAAAAACJz+NWWY9+4EspyDvUGbf3470muZ0o2S9ry5h+6+0OSch5IAgAAAAAAAKXCiWQ8cr2n5FclrTKzx8zs02Z2cilCAQAAAAAAACg+M7vQzLaY2TYz+3yp9uY6lNwhqU6vHU6+S9JGM3vIzD5hZkOKng4AAAAAAABAUZhZhaTvSvqQpHdI+isze0cpduc6lHR373D3Ze7+KUk1km6VdKFeO7AEAAAAAAAAEKezJW1z9x3u/qqk+ZIuKcXiXO8p+YY3w3T3Nkk/lfRTMxtYtFQAAAAAAAAAiq1W0p6jvm+W9O5SLM51KHnp8W5w99/ns4BPgwIAAAAAAEApcA71RmZ2paQrj/rRHHefc/RduhgryecFdXso6e5bSxECAAAAAAAAQN/qPICc081dmiWNOur7Okl7ixqqU673lAQAAAAAAABQntZIepuZjTOzAZL+Uq+9dWPR5Xr5NgAAAAAAAIAy5O6Hzewzkn4mqULSXHffUIrd5l6Sl4kDAAAAAAAAgCRevg0AAAAAAACgxDiUBAAAAAAAAFBSHEoCAAAAAAAAKCkOJQEAAAAAAACUFIeSAAAAAAAAAEqKQ0kAAAAAAAAAJcWhJAAAAAAAAICS4lASAAAAAAAAQEn9fymVlo281J4BAAAAAElFTkSuQmCC\n",
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- }
- ],
- "source": [
- "import matplotlib.pyplot as plt\n",
- "import seaborn as sb\n",
- "import pandas as pd\n",
- "\n",
- "cm_plt = pd.DataFrame(cm[:73])\n",
- "\n",
- "plt.figure(figsize = (25, 25))\n",
- "ax = plt.axes()\n",
- "\n",
- "sb.heatmap(cm_plt, annot=True)\n",
- "\n",
- "ax.xaxis.set_ticks_position('top')\n",
- "\n",
- "plt.show()"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Pipeline"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Now, I took the data from [Coconut - Wikipedia](https://en.wikipedia.org/wiki/Coconut) to check if the classifier is able to **correctly** predict the label(s) or not.\n",
- "\n",
- "And here is the output:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 15,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Example labels: [('coconut', 'oilseed')]\n"
- ]
- }
- ],
- "source": [
- "example_text = '''The coconut tree (Cocos nucifera) is a member of the family Arecaceae (palm family) and the only species of the genus Cocos.\n",
- "The term coconut can refer to the whole coconut palm or the seed, or the fruit, which, botanically, is a drupe, not a nut.\n",
- "The spelling cocoanut is an archaic form of the word.\n",
- "The term is derived from the 16th-century Portuguese and Spanish word coco meaning \"head\" or \"skull\", from the three indentations on the coconut shell that resemble facial features.\n",
- "Coconuts are known for their versatility ranging from food to cosmetics.\n",
- "They form a regular part of the diets of many people in the tropics and subtropics.\n",
- "Coconuts are distinct from other fruits for their endosperm containing a large quantity of water (also called \"milk\"), and when immature, may be harvested for the potable coconut water.\n",
- "When mature, they can be used as seed nuts or processed for oil, charcoal from the hard shell, and coir from the fibrous husk.\n",
- "When dried, the coconut flesh is called copra.\n",
- "The oil and milk derived from it are commonly used in cooking and frying, as well as in soaps and cosmetics.\n",
- "The husks and leaves can be used as material to make a variety of products for furnishing and decorating.\n",
- "The coconut also has cultural and religious significance in certain societies, particularly in India, where it is used in Hindu rituals.'''\n",
- "\n",
- "example_preds = classifier.predict(vectorizer.transform([example_text]))\n",
- "example_labels = mlb.inverse_transform(example_preds)\n",
- "print(\"Example labels: {}\".format(example_labels))"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.6.6"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/machine_learning/scoring_functions.py b/machine_learning/scoring_functions.py
old mode 100755
new mode 100644
index a2d97b09ded2..08b969a95c3b
--- a/machine_learning/scoring_functions.py
+++ b/machine_learning/scoring_functions.py
@@ -10,12 +10,23 @@
even log is used.
Using log and roots can be perceived as tools for penalizing big
- erors. However, using appropriate metrics depends on the situations,
+ errors. However, using appropriate metrics depends on the situations,
and types of data
"""
-#Mean Absolute Error
+
+# Mean Absolute Error
def mae(predict, actual):
+ """
+ Examples(rounded for precision):
+ >>> actual = [1,2,3];predict = [1,4,3]
+ >>> np.around(mae(predict,actual),decimals = 2)
+ 0.67
+
+ >>> actual = [1,1,1];predict = [1,1,1]
+ >>> mae(predict,actual)
+ 0.0
+ """
predict = np.array(predict)
actual = np.array(actual)
@@ -24,8 +35,19 @@ def mae(predict, actual):
return score
-#Mean Squared Error
+
+# Mean Squared Error
def mse(predict, actual):
+ """
+ Examples(rounded for precision):
+ >>> actual = [1,2,3];predict = [1,4,3]
+ >>> np.around(mse(predict,actual),decimals = 2)
+ 1.33
+
+ >>> actual = [1,1,1];predict = [1,1,1]
+ >>> mse(predict,actual)
+ 0.0
+ """
predict = np.array(predict)
actual = np.array(actual)
@@ -35,8 +57,19 @@ def mse(predict, actual):
score = square_diff.mean()
return score
-#Root Mean Squared Error
+
+# Root Mean Squared Error
def rmse(predict, actual):
+ """
+ Examples(rounded for precision):
+ >>> actual = [1,2,3];predict = [1,4,3]
+ >>> np.around(rmse(predict,actual),decimals = 2)
+ 1.15
+
+ >>> actual = [1,1,1];predict = [1,1,1]
+ >>> rmse(predict,actual)
+ 0.0
+ """
predict = np.array(predict)
actual = np.array(actual)
@@ -46,13 +79,24 @@ def rmse(predict, actual):
score = np.sqrt(mean_square_diff)
return score
-#Root Mean Square Logarithmic Error
+
+# Root Mean Square Logarithmic Error
def rmsle(predict, actual):
+ """
+ Examples(rounded for precision):
+ >>> actual = [10,10,30];predict = [10,2,30]
+ >>> np.around(rmsle(predict,actual),decimals = 2)
+ 0.75
+
+ >>> actual = [1,1,1];predict = [1,1,1]
+ >>> rmsle(predict,actual)
+ 0.0
+ """
predict = np.array(predict)
actual = np.array(actual)
- log_predict = np.log(predict+1)
- log_actual = np.log(actual+1)
+ log_predict = np.log(predict + 1)
+ log_actual = np.log(actual + 1)
difference = log_predict - log_actual
square_diff = np.square(difference)
@@ -62,17 +106,36 @@ def rmsle(predict, actual):
return score
-#Mean Bias Deviation
+
+# Mean Bias Deviation
def mbd(predict, actual):
+ """
+ This value is Negative, if the model underpredicts,
+ positive, if it overpredicts.
+
+ Example(rounded for precision):
+
+ Here the model overpredicts
+ >>> actual = [1,2,3];predict = [2,3,4]
+ >>> np.around(mbd(predict,actual),decimals = 2)
+ 50.0
+
+ Here the model underpredicts
+ >>> actual = [1,2,3];predict = [0,1,1]
+ >>> np.around(mbd(predict,actual),decimals = 2)
+ -66.67
+ """
predict = np.array(predict)
actual = np.array(actual)
difference = predict - actual
- numerator = np.sum(difference) / len(predict)
- denumerator = np.sum(actual) / len(predict)
- print(numerator)
- print(denumerator)
-
+ numerator = np.sum(difference) / len(predict)
+ denumerator = np.sum(actual) / len(predict)
+ # print(numerator, denumerator)
score = float(numerator) / denumerator * 100
return score
+
+
+def manual_accuracy(predict, actual):
+ return np.mean(np.array(actual) == np.array(predict))
diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py
new file mode 100644
index 000000000000..98ce05c46cff
--- /dev/null
+++ b/machine_learning/sequential_minimum_optimization.py
@@ -0,0 +1,632 @@
+"""
+ Implementation of sequential minimal optimization (SMO) for support vector machines
+ (SVM).
+
+ Sequential minimal optimization (SMO) is an algorithm for solving the quadratic
+ programming (QP) problem that arises during the training of support vector
+ machines.
+ It was invented by John Platt in 1998.
+
+Input:
+ 0: type: numpy.ndarray.
+ 1: first column of ndarray must be tags of samples, must be 1 or -1.
+ 2: rows of ndarray represent samples.
+
+Usage:
+ Command:
+ python3 sequential_minimum_optimization.py
+ Code:
+ from sequential_minimum_optimization import SmoSVM, Kernel
+
+ kernel = Kernel(kernel='poly', degree=3., coef0=1., gamma=0.5)
+ init_alphas = np.zeros(train.shape[0])
+ SVM = SmoSVM(train=train, alpha_list=init_alphas, kernel_func=kernel, cost=0.4,
+ b=0.0, tolerance=0.001)
+ SVM.fit()
+ predict = SVM.predict(test_samples)
+
+Reference:
+ https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/smo-book.pdf
+ https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/tr-98-14.pdf
+ http://web.cs.iastate.edu/~honavar/smo-svm.pdf
+"""
+
+
+import os
+import sys
+import urllib.request
+
+import numpy as np
+import pandas as pd
+from matplotlib import pyplot as plt
+from sklearn.datasets import make_blobs, make_circles
+from sklearn.preprocessing import StandardScaler
+
+CANCER_DATASET_URL = (
+ "http://archive.ics.uci.edu/ml/machine-learning-databases/"
+ "breast-cancer-wisconsin/wdbc.data"
+)
+
+
+class SmoSVM:
+ def __init__(
+ self,
+ train,
+ kernel_func,
+ alpha_list=None,
+ cost=0.4,
+ b=0.0,
+ tolerance=0.001,
+ auto_norm=True,
+ ):
+ self._init = True
+ self._auto_norm = auto_norm
+ self._c = np.float64(cost)
+ self._b = np.float64(b)
+ self._tol = np.float64(tolerance) if tolerance > 0.0001 else np.float64(0.001)
+
+ self.tags = train[:, 0]
+ self.samples = self._norm(train[:, 1:]) if self._auto_norm else train[:, 1:]
+ self.alphas = alpha_list if alpha_list is not None else np.zeros(train.shape[0])
+ self.Kernel = kernel_func
+
+ self._eps = 0.001
+ self._all_samples = list(range(self.length))
+ self._K_matrix = self._calculate_k_matrix()
+ self._error = np.zeros(self.length)
+ self._unbound = []
+
+ self.choose_alpha = self._choose_alphas()
+
+ # Calculate alphas using SMO algorithm
+ def fit(self):
+ K = self._k
+ state = None
+ while True:
+
+ # 1: Find alpha1, alpha2
+ try:
+ i1, i2 = self.choose_alpha.send(state)
+ state = None
+ except StopIteration:
+ print("Optimization done!\nEvery sample satisfy the KKT condition!")
+ break
+
+ # 2: calculate new alpha2 and new alpha1
+ y1, y2 = self.tags[i1], self.tags[i2]
+ a1, a2 = self.alphas[i1].copy(), self.alphas[i2].copy()
+ e1, e2 = self._e(i1), self._e(i2)
+ args = (i1, i2, a1, a2, e1, e2, y1, y2)
+ a1_new, a2_new = self._get_new_alpha(*args)
+ if not a1_new and not a2_new:
+ state = False
+ continue
+ self.alphas[i1], self.alphas[i2] = a1_new, a2_new
+
+ # 3: update threshold(b)
+ b1_new = np.float64(
+ -e1
+ - y1 * K(i1, i1) * (a1_new - a1)
+ - y2 * K(i2, i1) * (a2_new - a2)
+ + self._b
+ )
+ b2_new = np.float64(
+ -e2
+ - y2 * K(i2, i2) * (a2_new - a2)
+ - y1 * K(i1, i2) * (a1_new - a1)
+ + self._b
+ )
+ if 0.0 < a1_new < self._c:
+ b = b1_new
+ if 0.0 < a2_new < self._c:
+ b = b2_new
+ if not (np.float64(0) < a2_new < self._c) and not (
+ np.float64(0) < a1_new < self._c
+ ):
+ b = (b1_new + b2_new) / 2.0
+ b_old = self._b
+ self._b = b
+
+ # 4: update error value,here we only calculate those non-bound samples'
+ # error
+ self._unbound = [i for i in self._all_samples if self._is_unbound(i)]
+ for s in self.unbound:
+ if s == i1 or s == i2:
+ continue
+ self._error[s] += (
+ y1 * (a1_new - a1) * K(i1, s)
+ + y2 * (a2_new - a2) * K(i2, s)
+ + (self._b - b_old)
+ )
+
+ # if i1 or i2 is non-bound,update there error value to zero
+ if self._is_unbound(i1):
+ self._error[i1] = 0
+ if self._is_unbound(i2):
+ self._error[i2] = 0
+
+ # Predict test samles
+ def predict(self, test_samples, classify=True):
+
+ if test_samples.shape[1] > self.samples.shape[1]:
+ raise ValueError(
+ "Test samples' feature length does not equal to that of train samples"
+ )
+
+ if self._auto_norm:
+ test_samples = self._norm(test_samples)
+
+ results = []
+ for test_sample in test_samples:
+ result = self._predict(test_sample)
+ if classify:
+ results.append(1 if result > 0 else -1)
+ else:
+ results.append(result)
+ return np.array(results)
+
+ # Check if alpha violate KKT condition
+ def _check_obey_kkt(self, index):
+ alphas = self.alphas
+ tol = self._tol
+ r = self._e(index) * self.tags[index]
+ c = self._c
+
+ return (r < -tol and alphas[index] < c) or (r > tol and alphas[index] > 0.0)
+
+ # Get value calculated from kernel function
+ def _k(self, i1, i2):
+ # for test samples,use Kernel function
+ if isinstance(i2, np.ndarray):
+ return self.Kernel(self.samples[i1], i2)
+ # for train samples,Kernel values have been saved in matrix
+ else:
+ return self._K_matrix[i1, i2]
+
+ # Get sample's error
+ def _e(self, index):
+ """
+ Two cases:
+ 1:Sample[index] is non-bound,Fetch error from list: _error
+ 2:sample[index] is bound,Use predicted value deduct true value: g(xi) - yi
+
+ """
+ # get from error data
+ if self._is_unbound(index):
+ return self._error[index]
+ # get by g(xi) - yi
+ else:
+ gx = np.dot(self.alphas * self.tags, self._K_matrix[:, index]) + self._b
+ yi = self.tags[index]
+ return gx - yi
+
+ # Calculate Kernel matrix of all possible i1,i2 ,saving time
+ def _calculate_k_matrix(self):
+ k_matrix = np.zeros([self.length, self.length])
+ for i in self._all_samples:
+ for j in self._all_samples:
+ k_matrix[i, j] = np.float64(
+ self.Kernel(self.samples[i, :], self.samples[j, :])
+ )
+ return k_matrix
+
+ # Predict test sample's tag
+ def _predict(self, sample):
+ k = self._k
+ predicted_value = (
+ np.sum(
+ [
+ self.alphas[i1] * self.tags[i1] * k(i1, sample)
+ for i1 in self._all_samples
+ ]
+ )
+ + self._b
+ )
+ return predicted_value
+
+ # Choose alpha1 and alpha2
+ def _choose_alphas(self):
+ locis = yield from self._choose_a1()
+ if not locis:
+ return
+ return locis
+
+ def _choose_a1(self):
+ """
+ Choose first alpha ;steps:
+ 1:First loop over all sample
+ 2:Second loop over all non-bound samples till all non-bound samples does not
+ voilate kkt condition.
+ 3:Repeat this two process endlessly,till all samples does not voilate kkt
+ condition samples after first loop.
+ """
+ while True:
+ all_not_obey = True
+ # all sample
+ print("scanning all sample!")
+ for i1 in [i for i in self._all_samples if self._check_obey_kkt(i)]:
+ all_not_obey = False
+ yield from self._choose_a2(i1)
+
+ # non-bound sample
+ print("scanning non-bound sample!")
+ while True:
+ not_obey = True
+ for i1 in [
+ i
+ for i in self._all_samples
+ if self._check_obey_kkt(i) and self._is_unbound(i)
+ ]:
+ not_obey = False
+ yield from self._choose_a2(i1)
+ if not_obey:
+ print("all non-bound samples fit the KKT condition!")
+ break
+ if all_not_obey:
+ print("all samples fit the KKT condition! Optimization done!")
+ break
+ return False
+
+ def _choose_a2(self, i1):
+ """
+ Choose the second alpha by using heuristic algorithm ;steps:
+ 1: Choose alpha2 which gets the maximum step size (|E1 - E2|).
+ 2: Start in a random point,loop over all non-bound samples till alpha1 and
+ alpha2 are optimized.
+ 3: Start in a random point,loop over all samples till alpha1 and alpha2 are
+ optimized.
+ """
+ self._unbound = [i for i in self._all_samples if self._is_unbound(i)]
+
+ if len(self.unbound) > 0:
+ tmp_error = self._error.copy().tolist()
+ tmp_error_dict = {
+ index: value
+ for index, value in enumerate(tmp_error)
+ if self._is_unbound(index)
+ }
+ if self._e(i1) >= 0:
+ i2 = min(tmp_error_dict, key=lambda index: tmp_error_dict[index])
+ else:
+ i2 = max(tmp_error_dict, key=lambda index: tmp_error_dict[index])
+ cmd = yield i1, i2
+ if cmd is None:
+ return
+
+ for i2 in np.roll(self.unbound, np.random.choice(self.length)):
+ cmd = yield i1, i2
+ if cmd is None:
+ return
+
+ for i2 in np.roll(self._all_samples, np.random.choice(self.length)):
+ cmd = yield i1, i2
+ if cmd is None:
+ return
+
+ # Get the new alpha2 and new alpha1
+ def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2):
+ K = self._k
+ if i1 == i2:
+ return None, None
+
+ # calculate L and H which bound the new alpha2
+ s = y1 * y2
+ if s == -1:
+ L, H = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1)
+ else:
+ L, H = max(0.0, a2 + a1 - self._c), min(self._c, a2 + a1)
+ if L == H:
+ return None, None
+
+ # calculate eta
+ k11 = K(i1, i1)
+ k22 = K(i2, i2)
+ k12 = K(i1, i2)
+ eta = k11 + k22 - 2.0 * k12
+
+ # select the new alpha2 which could get the minimal objectives
+ if eta > 0.0:
+ a2_new_unc = a2 + (y2 * (e1 - e2)) / eta
+ # a2_new has a boundary
+ if a2_new_unc >= H:
+ a2_new = H
+ elif a2_new_unc <= L:
+ a2_new = L
+ else:
+ a2_new = a2_new_unc
+ else:
+ b = self._b
+ l1 = a1 + s * (a2 - L)
+ h1 = a1 + s * (a2 - H)
+
+ # way 1
+ f1 = y1 * (e1 + b) - a1 * K(i1, i1) - s * a2 * K(i1, i2)
+ f2 = y2 * (e2 + b) - a2 * K(i2, i2) - s * a1 * K(i1, i2)
+ ol = (
+ l1 * f1
+ + L * f2
+ + 1 / 2 * l1 ** 2 * K(i1, i1)
+ + 1 / 2 * L ** 2 * K(i2, i2)
+ + s * L * l1 * K(i1, i2)
+ )
+ oh = (
+ h1 * f1
+ + H * f2
+ + 1 / 2 * h1 ** 2 * K(i1, i1)
+ + 1 / 2 * H ** 2 * K(i2, i2)
+ + s * H * h1 * K(i1, i2)
+ )
+ """
+ # way 2
+ Use objective function check which alpha2 new could get the minimal
+ objectives
+ """
+ if ol < (oh - self._eps):
+ a2_new = L
+ elif ol > oh + self._eps:
+ a2_new = H
+ else:
+ a2_new = a2
+
+ # a1_new has a boundary too
+ a1_new = a1 + s * (a2 - a2_new)
+ if a1_new < 0:
+ a2_new += s * a1_new
+ a1_new = 0
+ if a1_new > self._c:
+ a2_new += s * (a1_new - self._c)
+ a1_new = self._c
+
+ return a1_new, a2_new
+
+ # Normalise data using min_max way
+ def _norm(self, data):
+ if self._init:
+ self._min = np.min(data, axis=0)
+ self._max = np.max(data, axis=0)
+ self._init = False
+ return (data - self._min) / (self._max - self._min)
+ else:
+ return (data - self._min) / (self._max - self._min)
+
+ def _is_unbound(self, index):
+ if 0.0 < self.alphas[index] < self._c:
+ return True
+ else:
+ return False
+
+ def _is_support(self, index):
+ if self.alphas[index] > 0:
+ return True
+ else:
+ return False
+
+ @property
+ def unbound(self):
+ return self._unbound
+
+ @property
+ def support(self):
+ return [i for i in range(self.length) if self._is_support(i)]
+
+ @property
+ def length(self):
+ return self.samples.shape[0]
+
+
+class Kernel:
+ def __init__(self, kernel, degree=1.0, coef0=0.0, gamma=1.0):
+ self.degree = np.float64(degree)
+ self.coef0 = np.float64(coef0)
+ self.gamma = np.float64(gamma)
+ self._kernel_name = kernel
+ self._kernel = self._get_kernel(kernel_name=kernel)
+ self._check()
+
+ def _polynomial(self, v1, v2):
+ return (self.gamma * np.inner(v1, v2) + self.coef0) ** self.degree
+
+ def _linear(self, v1, v2):
+ return np.inner(v1, v2) + self.coef0
+
+ def _rbf(self, v1, v2):
+ return np.exp(-1 * (self.gamma * np.linalg.norm(v1 - v2) ** 2))
+
+ def _check(self):
+ if self._kernel == self._rbf:
+ if self.gamma < 0:
+ raise ValueError("gamma value must greater than 0")
+
+ def _get_kernel(self, kernel_name):
+ maps = {"linear": self._linear, "poly": self._polynomial, "rbf": self._rbf}
+ return maps[kernel_name]
+
+ def __call__(self, v1, v2):
+ return self._kernel(v1, v2)
+
+ def __repr__(self):
+ return self._kernel_name
+
+
+def count_time(func):
+ def call_func(*args, **kwargs):
+ import time
+
+ start_time = time.time()
+ func(*args, **kwargs)
+ end_time = time.time()
+ print(f"smo algorithm cost {end_time - start_time} seconds")
+
+ return call_func
+
+
+@count_time
+def test_cancel_data():
+ print("Hello!\nStart test svm by smo algorithm!")
+ # 0: download dataset and load into pandas' dataframe
+ if not os.path.exists(r"cancel_data.csv"):
+ request = urllib.request.Request(
+ CANCER_DATASET_URL,
+ headers={"User-Agent": "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"},
+ )
+ response = urllib.request.urlopen(request)
+ content = response.read().decode("utf-8")
+ with open(r"cancel_data.csv", "w") as f:
+ f.write(content)
+
+ data = pd.read_csv(r"cancel_data.csv", header=None)
+
+ # 1: pre-processing data
+ del data[data.columns.tolist()[0]]
+ data = data.dropna(axis=0)
+ data = data.replace({"M": np.float64(1), "B": np.float64(-1)})
+ samples = np.array(data)[:, :]
+
+ # 2: dividing data into train_data data and test_data data
+ train_data, test_data = samples[:328, :], samples[328:, :]
+ test_tags, test_samples = test_data[:, 0], test_data[:, 1:]
+
+ # 3: choose kernel function,and set initial alphas to zero(optional)
+ mykernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5)
+ al = np.zeros(train_data.shape[0])
+
+ # 4: calculating best alphas using SMO algorithm and predict test_data samples
+ mysvm = SmoSVM(
+ train=train_data,
+ kernel_func=mykernel,
+ alpha_list=al,
+ cost=0.4,
+ b=0.0,
+ tolerance=0.001,
+ )
+ mysvm.fit()
+ predict = mysvm.predict(test_samples)
+
+ # 5: check accuracy
+ score = 0
+ test_num = test_tags.shape[0]
+ for i in range(test_tags.shape[0]):
+ if test_tags[i] == predict[i]:
+ score += 1
+ print(f"\nall: {test_num}\nright: {score}\nfalse: {test_num - score}")
+ print(f"Rough Accuracy: {score / test_tags.shape[0]}")
+
+
+def test_demonstration():
+ # change stdout
+ print("\nStart plot,please wait!!!")
+ sys.stdout = open(os.devnull, "w")
+
+ ax1 = plt.subplot2grid((2, 2), (0, 0))
+ ax2 = plt.subplot2grid((2, 2), (0, 1))
+ ax3 = plt.subplot2grid((2, 2), (1, 0))
+ ax4 = plt.subplot2grid((2, 2), (1, 1))
+ ax1.set_title("linear svm,cost:0.1")
+ test_linear_kernel(ax1, cost=0.1)
+ ax2.set_title("linear svm,cost:500")
+ test_linear_kernel(ax2, cost=500)
+ ax3.set_title("rbf kernel svm,cost:0.1")
+ test_rbf_kernel(ax3, cost=0.1)
+ ax4.set_title("rbf kernel svm,cost:500")
+ test_rbf_kernel(ax4, cost=500)
+
+ sys.stdout = sys.__stdout__
+ print("Plot done!!!")
+
+
+def test_linear_kernel(ax, cost):
+ train_x, train_y = make_blobs(
+ n_samples=500, centers=2, n_features=2, random_state=1
+ )
+ train_y[train_y == 0] = -1
+ scaler = StandardScaler()
+ train_x_scaled = scaler.fit_transform(train_x, train_y)
+ train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled))
+ mykernel = Kernel(kernel="linear", degree=5, coef0=1, gamma=0.5)
+ mysvm = SmoSVM(
+ train=train_data,
+ kernel_func=mykernel,
+ cost=cost,
+ tolerance=0.001,
+ auto_norm=False,
+ )
+ mysvm.fit()
+ plot_partition_boundary(mysvm, train_data, ax=ax)
+
+
+def test_rbf_kernel(ax, cost):
+ train_x, train_y = make_circles(
+ n_samples=500, noise=0.1, factor=0.1, random_state=1
+ )
+ train_y[train_y == 0] = -1
+ scaler = StandardScaler()
+ train_x_scaled = scaler.fit_transform(train_x, train_y)
+ train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled))
+ mykernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5)
+ mysvm = SmoSVM(
+ train=train_data,
+ kernel_func=mykernel,
+ cost=cost,
+ tolerance=0.001,
+ auto_norm=False,
+ )
+ mysvm.fit()
+ plot_partition_boundary(mysvm, train_data, ax=ax)
+
+
+def plot_partition_boundary(
+ model, train_data, ax, resolution=100, colors=("b", "k", "r")
+):
+ """
+ We can not get the optimum w of our kernel svm model which is different from linear
+ svm. For this reason, we generate randomly distributed points with high desity and
+ prediced values of these points are calculated by using our tained model. Then we
+ could use this prediced values to draw contour map.
+ And this contour map can represent svm's partition boundary.
+ """
+ train_data_x = train_data[:, 1]
+ train_data_y = train_data[:, 2]
+ train_data_tags = train_data[:, 0]
+ xrange = np.linspace(train_data_x.min(), train_data_x.max(), resolution)
+ yrange = np.linspace(train_data_y.min(), train_data_y.max(), resolution)
+ test_samples = np.array([(x, y) for x in xrange for y in yrange]).reshape(
+ resolution * resolution, 2
+ )
+
+ test_tags = model.predict(test_samples, classify=False)
+ grid = test_tags.reshape((len(xrange), len(yrange)))
+
+ # Plot contour map which represents the partition boundary
+ ax.contour(
+ xrange,
+ yrange,
+ np.mat(grid).T,
+ levels=(-1, 0, 1),
+ linestyles=("--", "-", "--"),
+ linewidths=(1, 1, 1),
+ colors=colors,
+ )
+ # Plot all train samples
+ ax.scatter(
+ train_data_x,
+ train_data_y,
+ c=train_data_tags,
+ cmap=plt.cm.Dark2,
+ lw=0,
+ alpha=0.5,
+ )
+
+ # Plot support vectors
+ support = model.support
+ ax.scatter(
+ train_data_x[support],
+ train_data_y[support],
+ c=train_data_tags[support],
+ cmap=plt.cm.Dark2,
+ )
+
+
+if __name__ == "__main__":
+ test_cancel_data()
+ test_demonstration()
+ plt.show()
diff --git a/machine_learning/similarity_search.py b/machine_learning/similarity_search.py
new file mode 100644
index 000000000000..6bfb12ed88cb
--- /dev/null
+++ b/machine_learning/similarity_search.py
@@ -0,0 +1,137 @@
+"""
+Similarity Search : https://en.wikipedia.org/wiki/Similarity_search
+Similarity search is a search algorithm for finding the nearest vector from
+vectors, used in natural language processing.
+In this algorithm, it calculates distance with euclidean distance and
+returns a list containing two data for each vector:
+ 1. the nearest vector
+ 2. distance between the vector and the nearest vector (float)
+"""
+import math
+
+import numpy as np
+
+
+def euclidean(input_a: np.ndarray, input_b: np.ndarray) -> float:
+ """
+ Calculates euclidean distance between two data.
+ :param input_a: ndarray of first vector.
+ :param input_b: ndarray of second vector.
+ :return: Euclidean distance of input_a and input_b. By using math.sqrt(),
+ result will be float.
+
+ >>> euclidean(np.array([0]), np.array([1]))
+ 1.0
+ >>> euclidean(np.array([0, 1]), np.array([1, 1]))
+ 1.0
+ >>> euclidean(np.array([0, 0, 0]), np.array([0, 0, 1]))
+ 1.0
+ """
+ return math.sqrt(sum(pow(a - b, 2) for a, b in zip(input_a, input_b)))
+
+
+def similarity_search(dataset: np.ndarray, value_array: np.ndarray) -> list:
+ """
+ :param dataset: Set containing the vectors. Should be ndarray.
+ :param value_array: vector/vectors we want to know the nearest vector from dataset.
+ :return: Result will be a list containing
+ 1. the nearest vector
+ 2. distance from the vector
+
+ >>> dataset = np.array([[0], [1], [2]])
+ >>> value_array = np.array([[0]])
+ >>> similarity_search(dataset, value_array)
+ [[[0], 0.0]]
+
+ >>> dataset = np.array([[0, 0], [1, 1], [2, 2]])
+ >>> value_array = np.array([[0, 1]])
+ >>> similarity_search(dataset, value_array)
+ [[[0, 0], 1.0]]
+
+ >>> dataset = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]])
+ >>> value_array = np.array([[0, 0, 1]])
+ >>> similarity_search(dataset, value_array)
+ [[[0, 0, 0], 1.0]]
+
+ >>> dataset = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]])
+ >>> value_array = np.array([[0, 0, 0], [0, 0, 1]])
+ >>> similarity_search(dataset, value_array)
+ [[[0, 0, 0], 0.0], [[0, 0, 0], 1.0]]
+
+ These are the errors that might occur:
+
+ 1. If dimensions are different.
+ For example, dataset has 2d array and value_array has 1d array:
+ >>> dataset = np.array([[1]])
+ >>> value_array = np.array([1])
+ >>> similarity_search(dataset, value_array)
+ Traceback (most recent call last):
+ ...
+ ValueError: Wrong input data's dimensions... dataset : 2, value_array : 1
+
+ 2. If data's shapes are different.
+ For example, dataset has shape of (3, 2) and value_array has (2, 3).
+ We are expecting same shapes of two arrays, so it is wrong.
+ >>> dataset = np.array([[0, 0], [1, 1], [2, 2]])
+ >>> value_array = np.array([[0, 0, 0], [0, 0, 1]])
+ >>> similarity_search(dataset, value_array)
+ Traceback (most recent call last):
+ ...
+ ValueError: Wrong input data's shape... dataset : 2, value_array : 3
+
+ 3. If data types are different.
+ When trying to compare, we are expecting same types so they should be same.
+ If not, it'll come up with errors.
+ >>> dataset = np.array([[0, 0], [1, 1], [2, 2]], dtype=np.float32)
+ >>> value_array = np.array([[0, 0], [0, 1]], dtype=np.int32)
+ >>> similarity_search(dataset, value_array) # doctest: +NORMALIZE_WHITESPACE
+ Traceback (most recent call last):
+ ...
+ TypeError: Input data have different datatype...
+ dataset : float32, value_array : int32
+ """
+
+ if dataset.ndim != value_array.ndim:
+ raise ValueError(
+ f"Wrong input data's dimensions... dataset : {dataset.ndim}, "
+ f"value_array : {value_array.ndim}"
+ )
+
+ try:
+ if dataset.shape[1] != value_array.shape[1]:
+ raise ValueError(
+ f"Wrong input data's shape... dataset : {dataset.shape[1]}, "
+ f"value_array : {value_array.shape[1]}"
+ )
+ except IndexError:
+ if dataset.ndim != value_array.ndim:
+ raise TypeError("Wrong shape")
+
+ if dataset.dtype != value_array.dtype:
+ raise TypeError(
+ f"Input data have different datatype... dataset : {dataset.dtype}, "
+ f"value_array : {value_array.dtype}"
+ )
+
+ answer = []
+
+ for value in value_array:
+ dist = euclidean(value, dataset[0])
+ vector = dataset[0].tolist()
+
+ for dataset_value in dataset[1:]:
+ temp_dist = euclidean(value, dataset_value)
+
+ if dist > temp_dist:
+ dist = temp_dist
+ vector = dataset_value.tolist()
+
+ answer.append([vector, dist])
+
+ return answer
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/machine_learning/support_vector_machines.py b/machine_learning/support_vector_machines.py
new file mode 100644
index 000000000000..c5e5085d8748
--- /dev/null
+++ b/machine_learning/support_vector_machines.py
@@ -0,0 +1,58 @@
+from sklearn import svm
+from sklearn.datasets import load_iris
+from sklearn.model_selection import train_test_split
+
+
+# different functions implementing different types of SVM's
+def NuSVC(train_x, train_y):
+ svc_NuSVC = svm.NuSVC()
+ svc_NuSVC.fit(train_x, train_y)
+ return svc_NuSVC
+
+
+def Linearsvc(train_x, train_y):
+ svc_linear = svm.LinearSVC(tol=10e-2)
+ svc_linear.fit(train_x, train_y)
+ return svc_linear
+
+
+def SVC(train_x, train_y):
+ # svm.SVC(C=1.0, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, shrinking=True,
+ # probability=False,tol=0.001, cache_size=200, class_weight=None, verbose=False,
+ # max_iter=1000, random_state=None)
+ # various parameters like "kernel","gamma","C" can effectively tuned for a given
+ # machine learning model.
+ SVC = svm.SVC(gamma="auto")
+ SVC.fit(train_x, train_y)
+ return SVC
+
+
+def test(X_new):
+ """
+ 3 test cases to be passed
+ an array containing the sepal length (cm), sepal width (cm), petal length (cm),
+ petal width (cm) based on which the target name will be predicted
+ >>> test([1,2,1,4])
+ 'virginica'
+ >>> test([5, 2, 4, 1])
+ 'versicolor'
+ >>> test([6,3,4,1])
+ 'versicolor'
+ """
+ iris = load_iris()
+ # splitting the dataset to test and train
+ train_x, test_x, train_y, test_y = train_test_split(
+ iris["data"], iris["target"], random_state=4
+ )
+ # any of the 3 types of SVM can be used
+ # current_model=SVC(train_x, train_y)
+ # current_model=NuSVC(train_x, train_y)
+ current_model = Linearsvc(train_x, train_y)
+ prediction = current_model.predict([X_new])
+ return iris["target_names"][prediction][0]
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/machine_learning/word_frequency_functions.py b/machine_learning/word_frequency_functions.py
new file mode 100644
index 000000000000..9cf7b694c6be
--- /dev/null
+++ b/machine_learning/word_frequency_functions.py
@@ -0,0 +1,137 @@
+import string
+from math import log10
+
+"""
+ tf-idf Wikipedia: https://en.wikipedia.org/wiki/Tf%E2%80%93idf
+ tf-idf and other word frequency algorithms are often used
+ as a weighting factor in information retrieval and text
+ mining. 83% of text-based recommender systems use
+ tf-idf for term weighting. In Layman's terms, tf-idf
+ is a statistic intended to reflect how important a word
+ is to a document in a corpus (a collection of documents)
+
+
+ Here I've implemented several word frequency algorithms
+ that are commonly used in information retrieval: Term Frequency,
+ Document Frequency, and TF-IDF (Term-Frequency*Inverse-Document-Frequency)
+ are included.
+
+ Term Frequency is a statistical function that
+ returns a number representing how frequently
+ an expression occurs in a document. This
+ indicates how significant a particular term is in
+ a given document.
+
+ Document Frequency is a statistical function that returns
+ an integer representing the number of documents in a
+ corpus that a term occurs in (where the max number returned
+ would be the number of documents in the corpus).
+
+ Inverse Document Frequency is mathematically written as
+ log10(N/df), where N is the number of documents in your
+ corpus and df is the Document Frequency. If df is 0, a
+ ZeroDivisionError will be thrown.
+
+ Term-Frequency*Inverse-Document-Frequency is a measure
+ of the originality of a term. It is mathematically written
+ as tf*log10(N/df). It compares the number of times
+ a term appears in a document with the number of documents
+ the term appears in. If df is 0, a ZeroDivisionError will be thrown.
+"""
+
+
+def term_frequency(term: str, document: str) -> int:
+ """
+ Return the number of times a term occurs within
+ a given document.
+ @params: term, the term to search a document for, and document,
+ the document to search within
+ @returns: an integer representing the number of times a term is
+ found within the document
+
+ @examples:
+ >>> term_frequency("to", "To be, or not to be")
+ 2
+ """
+ # strip all punctuation and newlines and replace it with ''
+ document_without_punctuation = document.translate(
+ str.maketrans("", "", string.punctuation)
+ ).replace("\n", "")
+ tokenize_document = document_without_punctuation.split(" ") # word tokenization
+ return len([word for word in tokenize_document if word.lower() == term.lower()])
+
+
+def document_frequency(term: str, corpus: str) -> int:
+ """
+ Calculate the number of documents in a corpus that contain a
+ given term
+ @params : term, the term to search each document for, and corpus, a collection of
+ documents. Each document should be separated by a newline.
+ @returns : the number of documents in the corpus that contain the term you are
+ searching for and the number of documents in the corpus
+ @examples :
+ >>> document_frequency("first", "This is the first document in the corpus.\\nThIs\
+is the second document in the corpus.\\nTHIS is \
+the third document in the corpus.")
+ (1, 3)
+ """
+ corpus_without_punctuation = corpus.lower().translate(
+ str.maketrans("", "", string.punctuation)
+ ) # strip all punctuation and replace it with ''
+ docs = corpus_without_punctuation.split("\n")
+ term = term.lower()
+ return (len([doc for doc in docs if term in doc]), len(docs))
+
+
+def inverse_document_frequency(df: int, N: int, smoothing=False) -> float:
+ """
+ Return an integer denoting the importance
+ of a word. This measure of importance is
+ calculated by log10(N/df), where N is the
+ number of documents and df is
+ the Document Frequency.
+ @params : df, the Document Frequency, N,
+ the number of documents in the corpus and
+ smoothing, if True return the idf-smooth
+ @returns : log10(N/df) or 1+log10(N/1+df)
+ @examples :
+ >>> inverse_document_frequency(3, 0)
+ Traceback (most recent call last):
+ ...
+ ValueError: log10(0) is undefined.
+ >>> inverse_document_frequency(1, 3)
+ 0.477
+ >>> inverse_document_frequency(0, 3)
+ Traceback (most recent call last):
+ ...
+ ZeroDivisionError: df must be > 0
+ >>> inverse_document_frequency(0, 3,True)
+ 1.477
+ """
+ if smoothing:
+ if N == 0:
+ raise ValueError("log10(0) is undefined.")
+ return round(1 + log10(N / (1 + df)), 3)
+
+ if df == 0:
+ raise ZeroDivisionError("df must be > 0")
+ elif N == 0:
+ raise ValueError("log10(0) is undefined.")
+ return round(log10(N / df), 3)
+
+
+def tf_idf(tf: int, idf: int) -> float:
+ """
+ Combine the term frequency
+ and inverse document frequency functions to
+ calculate the originality of a term. This
+ 'originality' is calculated by multiplying
+ the term frequency and the inverse document
+ frequency : tf-idf = TF * IDF
+ @params : tf, the term frequency, and idf, the inverse document
+ frequency
+ @examples :
+ >>> tf_idf(2, 0.477)
+ 0.954
+ """
+ return round(tf * idf, 3)
diff --git a/maths/3n+1.py b/maths/3n+1.py
deleted file mode 100644
index 6424fe0d8f15..000000000000
--- a/maths/3n+1.py
+++ /dev/null
@@ -1,19 +0,0 @@
-def main():
- def n31(a):# a = initial number
- c = 0
- l = [a]
- while a != 1:
- if a % 2 == 0:#if even divide it by 2
- a = a // 2
- elif a % 2 == 1:#if odd 3n+1
- a = 3*a +1
- c += 1#counter
- l += [a]
-
- return l , c
- print(n31(43))
- print(n31(98)[0][-1])# = a
- print("It took {0} steps.".format(n31(13)[1]))#optional finish
-
-if __name__ == '__main__':
- main()
diff --git a/maths/3n_plus_1.py b/maths/3n_plus_1.py
new file mode 100644
index 000000000000..28c9fd7b426f
--- /dev/null
+++ b/maths/3n_plus_1.py
@@ -0,0 +1,149 @@
+from __future__ import annotations
+
+
+def n31(a: int) -> tuple[list[int], int]:
+ """
+ Returns the Collatz sequence and its length of any positive integer.
+ >>> n31(4)
+ ([4, 2, 1], 3)
+ """
+
+ if not isinstance(a, int):
+ raise TypeError("Must be int, not {}".format(type(a).__name__))
+ if a < 1:
+ raise ValueError(f"Given integer must be greater than 1, not {a}")
+
+ path = [a]
+ while a != 1:
+ if a % 2 == 0:
+ a = a // 2
+ else:
+ a = 3 * a + 1
+ path += [a]
+ return path, len(path)
+
+
+def test_n31():
+ """
+ >>> test_n31()
+ """
+ assert n31(4) == ([4, 2, 1], 3)
+ assert n31(11) == ([11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1], 15)
+ assert n31(31) == (
+ [
+ 31,
+ 94,
+ 47,
+ 142,
+ 71,
+ 214,
+ 107,
+ 322,
+ 161,
+ 484,
+ 242,
+ 121,
+ 364,
+ 182,
+ 91,
+ 274,
+ 137,
+ 412,
+ 206,
+ 103,
+ 310,
+ 155,
+ 466,
+ 233,
+ 700,
+ 350,
+ 175,
+ 526,
+ 263,
+ 790,
+ 395,
+ 1186,
+ 593,
+ 1780,
+ 890,
+ 445,
+ 1336,
+ 668,
+ 334,
+ 167,
+ 502,
+ 251,
+ 754,
+ 377,
+ 1132,
+ 566,
+ 283,
+ 850,
+ 425,
+ 1276,
+ 638,
+ 319,
+ 958,
+ 479,
+ 1438,
+ 719,
+ 2158,
+ 1079,
+ 3238,
+ 1619,
+ 4858,
+ 2429,
+ 7288,
+ 3644,
+ 1822,
+ 911,
+ 2734,
+ 1367,
+ 4102,
+ 2051,
+ 6154,
+ 3077,
+ 9232,
+ 4616,
+ 2308,
+ 1154,
+ 577,
+ 1732,
+ 866,
+ 433,
+ 1300,
+ 650,
+ 325,
+ 976,
+ 488,
+ 244,
+ 122,
+ 61,
+ 184,
+ 92,
+ 46,
+ 23,
+ 70,
+ 35,
+ 106,
+ 53,
+ 160,
+ 80,
+ 40,
+ 20,
+ 10,
+ 5,
+ 16,
+ 8,
+ 4,
+ 2,
+ 1,
+ ],
+ 107,
+ )
+
+
+if __name__ == "__main__":
+ num = 4
+ path, length = n31(num)
+ print(f"The Collatz sequence of {num} took {length} steps. \nPath: {path}")
diff --git a/maths/FindMax.py b/maths/FindMax.py
deleted file mode 100644
index 0ce49a68c348..000000000000
--- a/maths/FindMax.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# NguyenU
-
-def find_max(nums):
- max = nums[0]
- for x in nums:
- if x > max:
- max = x
- print(max)
-
-def main():
- find_max([2, 4, 9, 7, 19, 94, 5])
-
-if __name__ == '__main__':
- main()
diff --git a/maths/FindMin.py b/maths/FindMin.py
deleted file mode 100644
index 86207984e3da..000000000000
--- a/maths/FindMin.py
+++ /dev/null
@@ -1,12 +0,0 @@
-def main():
- def findMin(x):
- minNum = x[0]
- for i in x:
- if minNum > i:
- minNum = i
- return minNum
-
- print(findMin([0,1,2,3,4,5,-3,24,-56])) # = -56
-
-if __name__ == '__main__':
- main()
diff --git a/maths/Hanoi.py b/maths/Hanoi.py
deleted file mode 100644
index dd04d0fa58d8..000000000000
--- a/maths/Hanoi.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# @author willx75
-# Tower of Hanoi recursion game algorithm is a game, it consists of three rods and a number of disks of different sizes, which can slide onto any rod
-
-import logging
-
-log = logging.getLogger()
-logging.basicConfig(level=logging.DEBUG)
-
-
-def Tower_Of_Hanoi(n, source, dest, by, mouvement):
- if n == 0:
- return n
- elif n == 1:
- mouvement += 1
- # no print statement (you could make it an optional flag for printing logs)
- logging.debug('Move the plate from', source, 'to', dest)
- return mouvement
- else:
-
- mouvement = mouvement + Tower_Of_Hanoi(n-1, source, by, dest, 0)
- logging.debug('Move the plate from', source, 'to', dest)
-
- mouvement = mouvement + 1 + Tower_Of_Hanoi(n-1, by, dest, source, 0)
- return mouvement
diff --git a/maths/PrimeCheck.py b/maths/PrimeCheck.py
deleted file mode 100644
index e0c51d77a038..000000000000
--- a/maths/PrimeCheck.py
+++ /dev/null
@@ -1,13 +0,0 @@
-import math
-def primeCheck(number):
- if number % 2 == 0 and number > 2:
- return False
- return all(number % i for i in range(3, int(math.sqrt(number)) + 1, 2))
-
-def main():
- print(primeCheck(37))
- print(primeCheck(100))
- print(primeCheck(77))
-
-if __name__ == '__main__':
- main()
diff --git a/maths/__init__.py b/maths/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/maths/abs.py b/maths/abs.py
index 6d0596478d5f..68c99a1d51d8 100644
--- a/maths/abs.py
+++ b/maths/abs.py
@@ -1,18 +1,28 @@
-def absVal(num):
+"""Absolute Value."""
+
+
+def abs_val(num):
"""
- Function to fins absolute value of numbers.
- >>absVal(-5)
- 5
- >>absVal(0)
+ Find the absolute value of a number.
+
+ >>> abs_val(-5.1)
+ 5.1
+ >>> abs_val(-5) == abs_val(5)
+ True
+ >>> abs_val(0)
0
"""
- if num < 0:
- return -num
- else:
- return num
+ return -num if num < 0 else num
+
+
+def test_abs_val():
+ """
+ >>> test_abs_val()
+ """
+ assert 0 == abs_val(0)
+ assert 34 == abs_val(34)
+ assert 100000000000 == abs_val(-100000000000)
-def main():
- print(absVal(-34)) # = 34
-if __name__ == '__main__':
- main()
+if __name__ == "__main__":
+ print(abs_val(-34)) # --> 34
diff --git a/maths/absMax.py b/maths/absMax.py
deleted file mode 100644
index 7ff9e4d3ca09..000000000000
--- a/maths/absMax.py
+++ /dev/null
@@ -1,25 +0,0 @@
-def absMax(x):
- """
- #>>>absMax([0,5,1,11])
- 11
- >>absMax([3,-10,-2])
- -10
- """
- j =x[0]
- for i in x:
- if abs(i) > abs(j):
- j = i
- return j
-
-
-def main():
- a = [1,2,-11]
- print(absMax(a)) # = -11
-
-
-if __name__ == '__main__':
- main()
-
-"""
-print abs Max
-"""
diff --git a/maths/absMin.py b/maths/absMin.py
deleted file mode 100644
index 67d510551907..000000000000
--- a/maths/absMin.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from Maths.abs import absVal
-def absMin(x):
- """
- # >>>absMin([0,5,1,11])
- 0
- # >>absMin([3,-10,-2])
- -2
- """
- j = x[0]
- for i in x:
- if absVal(i) < absVal(j):
- j = i
- return j
-
-def main():
- a = [-3,-1,2,-11]
- print(absMin(a)) # = -1
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/maths/abs_max.py b/maths/abs_max.py
new file mode 100644
index 000000000000..e5a8219657ac
--- /dev/null
+++ b/maths/abs_max.py
@@ -0,0 +1,35 @@
+from __future__ import annotations
+
+
+def abs_max(x: list[int]) -> int:
+ """
+ >>> abs_max([0,5,1,11])
+ 11
+ >>> abs_max([3,-10,-2])
+ -10
+ """
+ j = x[0]
+ for i in x:
+ if abs(i) > abs(j):
+ j = i
+ return j
+
+
+def abs_max_sort(x):
+ """
+ >>> abs_max_sort([0,5,1,11])
+ 11
+ >>> abs_max_sort([3,-10,-2])
+ -10
+ """
+ return sorted(x, key=abs)[-1]
+
+
+def main():
+ a = [1, 2, -11]
+ assert abs_max(a) == -11
+ assert abs_max_sort(a) == -11
+
+
+if __name__ == "__main__":
+ main()
diff --git a/maths/abs_min.py b/maths/abs_min.py
new file mode 100644
index 000000000000..eb84de37ce23
--- /dev/null
+++ b/maths/abs_min.py
@@ -0,0 +1,24 @@
+from .abs import abs_val
+
+
+def absMin(x):
+ """
+ >>> absMin([0,5,1,11])
+ 0
+ >>> absMin([3,-10,-2])
+ -2
+ """
+ j = x[0]
+ for i in x:
+ if abs_val(i) < abs_val(j):
+ j = i
+ return j
+
+
+def main():
+ a = [-3, -1, 2, -11]
+ print(absMin(a)) # = -1
+
+
+if __name__ == "__main__":
+ main()
diff --git a/maths/add.py b/maths/add.py
new file mode 100644
index 000000000000..0bc7da9697d3
--- /dev/null
+++ b/maths/add.py
@@ -0,0 +1,19 @@
+"""
+Just to check
+"""
+
+
+def add(a, b):
+ """
+ >>> add(2, 2)
+ 4
+ >>> add(2, -2)
+ 0
+ """
+ return a + b
+
+
+if __name__ == "__main__":
+ a = 5
+ b = 6
+ print(f"The sum of {a} + {b} is {add(a, b)}")
diff --git a/maths/aliquot_sum.py b/maths/aliquot_sum.py
new file mode 100644
index 000000000000..9c58aa61d19e
--- /dev/null
+++ b/maths/aliquot_sum.py
@@ -0,0 +1,48 @@
+def aliquot_sum(input_num: int) -> int:
+ """
+ Finds the aliquot sum of an input integer, where the
+ aliquot sum of a number n is defined as the sum of all
+ natural numbers less than n that divide n evenly. For
+ example, the aliquot sum of 15 is 1 + 3 + 5 = 9. This is
+ a simple O(n) implementation.
+ @param input_num: a positive integer whose aliquot sum is to be found
+ @return: the aliquot sum of input_num, if input_num is positive.
+ Otherwise, raise a ValueError
+ Wikipedia Explanation: https://en.wikipedia.org/wiki/Aliquot_sum
+
+ >>> aliquot_sum(15)
+ 9
+ >>> aliquot_sum(6)
+ 6
+ >>> aliquot_sum(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input must be positive
+ >>> aliquot_sum(0)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input must be positive
+ >>> aliquot_sum(1.6)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input must be an integer
+ >>> aliquot_sum(12)
+ 16
+ >>> aliquot_sum(1)
+ 0
+ >>> aliquot_sum(19)
+ 1
+ """
+ if not isinstance(input_num, int):
+ raise ValueError("Input must be an integer")
+ if input_num <= 0:
+ raise ValueError("Input must be positive")
+ return sum(
+ divisor for divisor in range(1, input_num // 2 + 1) if input_num % divisor == 0
+ )
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/allocation_number.py b/maths/allocation_number.py
new file mode 100644
index 000000000000..d419e74d01ff
--- /dev/null
+++ b/maths/allocation_number.py
@@ -0,0 +1,49 @@
+"""
+In a multi-threaded download, this algorithm could be used to provide
+each worker thread with a block of non-overlapping bytes to download.
+For example:
+ for i in allocation_list:
+ requests.get(url,headers={'Range':f'bytes={i}'})
+"""
+from __future__ import annotations
+
+
+def allocation_num(number_of_bytes: int, partitions: int) -> list[str]:
+ """
+ Divide a number of bytes into x partitions.
+ :param number_of_bytes: the total of bytes.
+ :param partitions: the number of partition need to be allocated.
+ :return: list of bytes to be assigned to each worker thread
+
+ >>> allocation_num(16647, 4)
+ ['1-4161', '4162-8322', '8323-12483', '12484-16647']
+ >>> allocation_num(50000, 5)
+ ['1-10000', '10001-20000', '20001-30000', '30001-40000', '40001-50000']
+ >>> allocation_num(888, 999)
+ Traceback (most recent call last):
+ ...
+ ValueError: partitions can not > number_of_bytes!
+ >>> allocation_num(888, -4)
+ Traceback (most recent call last):
+ ...
+ ValueError: partitions must be a positive number!
+ """
+ if partitions <= 0:
+ raise ValueError("partitions must be a positive number!")
+ if partitions > number_of_bytes:
+ raise ValueError("partitions can not > number_of_bytes!")
+ bytes_per_partition = number_of_bytes // partitions
+ allocation_list = []
+ for i in range(partitions):
+ start_bytes = i * bytes_per_partition + 1
+ end_bytes = (
+ number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
+ )
+ allocation_list.append(f"{start_bytes}-{end_bytes}")
+ return allocation_list
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/area.py b/maths/area.py
new file mode 100644
index 000000000000..8689f323cc9a
--- /dev/null
+++ b/maths/area.py
@@ -0,0 +1,288 @@
+"""
+Find the area of various geometric shapes
+"""
+from math import pi, sqrt
+
+
+def surface_area_cube(side_length: float) -> float:
+ """
+ Calculate the Surface Area of a Cube.
+
+ >>> surface_area_cube(1)
+ 6
+ >>> surface_area_cube(3)
+ 54
+ >>> surface_area_cube(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: surface_area_cube() only accepts non-negative values
+ """
+ if side_length < 0:
+ raise ValueError("surface_area_cube() only accepts non-negative values")
+ return 6 * side_length ** 2
+
+
+def surface_area_sphere(radius: float) -> float:
+ """
+ Calculate the Surface Area of a Sphere.
+ Wikipedia reference: https://en.wikipedia.org/wiki/Sphere
+ Formula: 4 * pi * r^2
+
+ >>> surface_area_sphere(5)
+ 314.1592653589793
+ >>> surface_area_sphere(1)
+ 12.566370614359172
+ >>> surface_area_sphere(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: surface_area_sphere() only accepts non-negative values
+ """
+ if radius < 0:
+ raise ValueError("surface_area_sphere() only accepts non-negative values")
+ return 4 * pi * radius ** 2
+
+
+def area_rectangle(length: float, width: float) -> float:
+ """
+ Calculate the area of a rectangle.
+
+ >>> area_rectangle(10, 20)
+ 200
+ >>> area_rectangle(-1, -2)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_rectangle() only accepts non-negative values
+ >>> area_rectangle(1, -2)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_rectangle() only accepts non-negative values
+ >>> area_rectangle(-1, 2)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_rectangle() only accepts non-negative values
+ """
+ if length < 0 or width < 0:
+ raise ValueError("area_rectangle() only accepts non-negative values")
+ return length * width
+
+
+def area_square(side_length: float) -> float:
+ """
+ Calculate the area of a square.
+
+ >>> area_square(10)
+ 100
+ >>> area_square(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_square() only accepts non-negative values
+ """
+ if side_length < 0:
+ raise ValueError("area_square() only accepts non-negative values")
+ return side_length ** 2
+
+
+def area_triangle(base: float, height: float) -> float:
+ """
+ Calculate the area of a triangle given the base and height.
+
+ >>> area_triangle(10, 10)
+ 50.0
+ >>> area_triangle(-1, -2)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_triangle() only accepts non-negative values
+ >>> area_triangle(1, -2)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_triangle() only accepts non-negative values
+ >>> area_triangle(-1, 2)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_triangle() only accepts non-negative values
+ """
+ if base < 0 or height < 0:
+ raise ValueError("area_triangle() only accepts non-negative values")
+ return (base * height) / 2
+
+
+def area_triangle_three_sides(side1: float, side2: float, side3: float) -> float:
+ """
+ Calculate area of triangle when the length of 3 sides are known.
+
+ This function uses Heron's formula: https://en.wikipedia.org/wiki/Heron%27s_formula
+
+ >>> area_triangle_three_sides(5, 12, 13)
+ 30.0
+ >>> area_triangle_three_sides(10, 11, 12)
+ 51.521233486786784
+ >>> area_triangle_three_sides(-1, -2, -1)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_triangle_three_sides() only accepts non-negative values
+ >>> area_triangle_three_sides(1, -2, 1)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_triangle_three_sides() only accepts non-negative values
+ """
+ if side1 < 0 or side2 < 0 or side3 < 0:
+ raise ValueError("area_triangle_three_sides() only accepts non-negative values")
+ elif side1 + side2 < side3 or side1 + side3 < side2 or side2 + side3 < side1:
+ raise ValueError("Given three sides do not form a triangle")
+ semi_perimeter = (side1 + side2 + side3) / 2
+ area = sqrt(
+ semi_perimeter
+ * (semi_perimeter - side1)
+ * (semi_perimeter - side2)
+ * (semi_perimeter - side3)
+ )
+ return area
+
+
+def area_parallelogram(base: float, height: float) -> float:
+ """
+ Calculate the area of a parallelogram.
+
+ >>> area_parallelogram(10, 20)
+ 200
+ >>> area_parallelogram(-1, -2)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_parallelogram() only accepts non-negative values
+ >>> area_parallelogram(1, -2)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_parallelogram() only accepts non-negative values
+ >>> area_parallelogram(-1, 2)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_parallelogram() only accepts non-negative values
+ """
+ if base < 0 or height < 0:
+ raise ValueError("area_parallelogram() only accepts non-negative values")
+ return base * height
+
+
+def area_trapezium(base1: float, base2: float, height: float) -> float:
+ """
+ Calculate the area of a trapezium.
+
+ >>> area_trapezium(10, 20, 30)
+ 450.0
+ >>> area_trapezium(-1, -2, -3)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_trapezium() only accepts non-negative values
+ >>> area_trapezium(-1, 2, 3)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_trapezium() only accepts non-negative values
+ >>> area_trapezium(1, -2, 3)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_trapezium() only accepts non-negative values
+ >>> area_trapezium(1, 2, -3)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_trapezium() only accepts non-negative values
+ >>> area_trapezium(-1, -2, 3)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_trapezium() only accepts non-negative values
+ >>> area_trapezium(1, -2, -3)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_trapezium() only accepts non-negative values
+ >>> area_trapezium(-1, 2, -3)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_trapezium() only accepts non-negative values
+ """
+ if base1 < 0 or base2 < 0 or height < 0:
+ raise ValueError("area_trapezium() only accepts non-negative values")
+ return 1 / 2 * (base1 + base2) * height
+
+
+def area_circle(radius: float) -> float:
+ """
+ Calculate the area of a circle.
+
+ >>> area_circle(20)
+ 1256.6370614359173
+ >>> area_circle(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_circle() only accepts non-negative values
+ """
+ if radius < 0:
+ raise ValueError("area_circle() only accepts non-negative values")
+ return pi * radius ** 2
+
+
+def area_ellipse(radius_x: float, radius_y: float) -> float:
+ """
+ Calculate the area of a ellipse.
+
+ >>> area_ellipse(10, 10)
+ 314.1592653589793
+ >>> area_ellipse(10, 20)
+ 628.3185307179587
+ >>> area_ellipse(-10, 20)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_ellipse() only accepts non-negative values
+ >>> area_ellipse(10, -20)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_ellipse() only accepts non-negative values
+ >>> area_ellipse(-10, -20)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_ellipse() only accepts non-negative values
+ """
+ if radius_x < 0 or radius_y < 0:
+ raise ValueError("area_ellipse() only accepts non-negative values")
+ return pi * radius_x * radius_y
+
+
+def area_rhombus(diagonal_1: float, diagonal_2: float) -> float:
+ """
+ Calculate the area of a rhombus.
+
+ >>> area_rhombus(10, 20)
+ 100.0
+ >>> area_rhombus(-1, -2)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_rhombus() only accepts non-negative values
+ >>> area_rhombus(1, -2)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_rhombus() only accepts non-negative values
+ >>> area_rhombus(-1, 2)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_rhombus() only accepts non-negative values
+ """
+ if diagonal_1 < 0 or diagonal_2 < 0:
+ raise ValueError("area_rhombus() only accepts non-negative values")
+ return 1 / 2 * diagonal_1 * diagonal_2
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod(verbose=True) # verbose so we can see methods missing tests
+
+ print("[DEMO] Areas of various geometric shapes: \n")
+ print(f"Rectangle: {area_rectangle(10, 20) = }")
+ print(f"Square: {area_square(10) = }")
+ print(f"Triangle: {area_triangle(10, 10) = }")
+ print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
+ print(f"Parallelogram: {area_parallelogram(10, 20) = }")
+ print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
+ print(f"Circle: {area_circle(20) = }")
+ print("\nSurface Areas of various geometric shapes: \n")
+ print(f"Cube: {surface_area_cube(20) = }")
+ print(f"Sphere: {surface_area_sphere(20) = }")
+ print(f"Rhombus: {area_rhombus(10, 20) = }")
diff --git a/maths/area_under_curve.py b/maths/area_under_curve.py
new file mode 100644
index 000000000000..2d01e414b63b
--- /dev/null
+++ b/maths/area_under_curve.py
@@ -0,0 +1,59 @@
+"""
+Approximates the area under the curve using the trapezoidal rule
+"""
+
+from typing import Callable, Union
+
+
+def trapezoidal_area(
+ fnc: Callable[[Union[int, float]], Union[int, float]],
+ x_start: Union[int, float],
+ x_end: Union[int, float],
+ steps: int = 100,
+) -> float:
+ """
+ Treats curve as a collection of linear lines and sums the area of the
+ trapezium shape they form
+ :param fnc: a function which defines a curve
+ :param x_start: left end point to indicate the start of line segment
+ :param x_end: right end point to indicate end of line segment
+ :param steps: an accuracy gauge; more steps increases the accuracy
+ :return: a float representing the length of the curve
+
+ >>> def f(x):
+ ... return 5
+ >>> f"{trapezoidal_area(f, 12.0, 14.0, 1000):.3f}"
+ '10.000'
+ >>> def f(x):
+ ... return 9*x**2
+ >>> f"{trapezoidal_area(f, -4.0, 0, 10000):.4f}"
+ '192.0000'
+ >>> f"{trapezoidal_area(f, -4.0, 4.0, 10000):.4f}"
+ '384.0000'
+ """
+ x1 = x_start
+ fx1 = fnc(x_start)
+ area = 0.0
+ for i in range(steps):
+ # Approximates small segments of curve as linear and solve
+ # for trapezoidal area
+ x2 = (x_end - x_start) / steps + x1
+ fx2 = fnc(x2)
+ area += abs(fx2 + fx1) * (x2 - x1) / 2
+ # Increment step
+ x1 = x2
+ fx1 = fx2
+ return area
+
+
+if __name__ == "__main__":
+
+ def f(x):
+ return x ** 3 + x ** 2
+
+ print("f(x) = x^3 + x^2")
+ print("The area between the curve, x = -5, x = 5 and the x axis is:")
+ i = 10
+ while i <= 100000:
+ print(f"with {i} steps: {trapezoidal_area(f, -5, 5, i)}")
+ i *= 10
diff --git a/maths/armstrong_numbers.py b/maths/armstrong_numbers.py
new file mode 100644
index 000000000000..d30ed2e430a0
--- /dev/null
+++ b/maths/armstrong_numbers.py
@@ -0,0 +1,69 @@
+"""
+An Armstrong number is equal to the sum of the cubes of its digits.
+For example, 370 is an Armstrong number because 3*3*3 + 7*7*7 + 0*0*0 = 370.
+An Armstrong number is often called Narcissistic number.
+"""
+
+
+def armstrong_number(n: int) -> bool:
+ """
+ Return True if n is an Armstrong number or False if it is not.
+
+ >>> armstrong_number(153)
+ True
+ >>> armstrong_number(200)
+ False
+ >>> armstrong_number(1634)
+ True
+ >>> armstrong_number(0)
+ False
+ >>> armstrong_number(-1)
+ False
+ >>> armstrong_number(1.2)
+ False
+ """
+ if not isinstance(n, int) or n < 1:
+ return False
+
+ # Initialization of sum and number of digits.
+ sum = 0
+ number_of_digits = 0
+ temp = n
+ # Calculation of digits of the number
+ while temp > 0:
+ number_of_digits += 1
+ temp //= 10
+ # Dividing number into separate digits and find Armstrong number
+ temp = n
+ while temp > 0:
+ rem = temp % 10
+ sum += rem ** number_of_digits
+ temp //= 10
+ return n == sum
+
+
+def narcissistic_number(n: int) -> bool:
+ """Return True if n is a narcissistic number or False if it is not"""
+
+ expo = len(str(n)) # power, all number will be raised to
+ # each digit will be multiplied expo times
+ temp = [(int(i) ** expo) for i in str(n)]
+
+ # check if sum of cube of each digit is equal to number
+ return n == sum(temp)
+
+
+def main():
+ """
+ Request that user input an integer and tell them if it is Armstrong number.
+ """
+ num = int(input("Enter an integer to see if it is an Armstrong number: ").strip())
+ print(f"{num} is {'' if armstrong_number(num) else 'not '}an Armstrong number.")
+ print(f"{num} is {'' if narcissistic_number(num) else 'not '}an Armstrong number.")
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ main()
diff --git a/maths/average.py b/maths/average.py
deleted file mode 100644
index dc70836b5e83..000000000000
--- a/maths/average.py
+++ /dev/null
@@ -1,14 +0,0 @@
-def average(nums):
- sum = 0
- n = 0
- for x in nums:
- sum += x
- n += 1
- avg = sum / n
- print(avg)
-
-def main():
- average([2, 4, 6, 8, 20, 50, 70])
-
-if __name__ == '__main__':
- main()
diff --git a/maths/average_mean.py b/maths/average_mean.py
new file mode 100644
index 000000000000..4beca1f741a0
--- /dev/null
+++ b/maths/average_mean.py
@@ -0,0 +1,20 @@
+"""Find mean of a list of numbers."""
+
+
+def average(nums):
+ """Find mean of a list of numbers."""
+ return sum(nums) / len(nums)
+
+
+def test_average():
+ """
+ >>> test_average()
+ """
+ assert 12.0 == average([3, 6, 9, 12, 15, 18, 21])
+ assert 20 == average([5, 10, 15, 20, 25, 30, 35])
+ assert 4.5 == average([1, 2, 3, 4, 5, 6, 7, 8])
+
+
+if __name__ == "__main__":
+ """Call average module to find mean of a specific list of numbers."""
+ print(average([2, 4, 6, 8, 20, 50, 70]))
diff --git a/maths/average_median.py b/maths/average_median.py
new file mode 100644
index 000000000000..0257e3f76f1a
--- /dev/null
+++ b/maths/average_median.py
@@ -0,0 +1,35 @@
+def median(nums):
+ """
+ Find median of a list of numbers.
+
+ >>> median([0])
+ 0
+ >>> median([4,1,3,2])
+ 2.5
+ >>> median([2, 70, 6, 50, 20, 8, 4])
+ 8
+
+ Args:
+ nums: List of nums
+
+ Returns:
+ Median.
+ """
+ sorted_list = sorted(nums)
+ length = len(sorted_list)
+ mid_index = length >> 1
+ return (
+ (sorted_list[mid_index] + sorted_list[mid_index - 1]) / 2
+ if length % 2 == 0
+ else sorted_list[mid_index]
+ )
+
+
+def main():
+ import doctest
+
+ doctest.testmod()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/maths/average_mode.py b/maths/average_mode.py
new file mode 100644
index 000000000000..d472dc04d4bf
--- /dev/null
+++ b/maths/average_mode.py
@@ -0,0 +1,31 @@
+import statistics
+
+
+def mode(input_list): # Defining function "mode."
+ """This function returns the mode(Mode as in the measures of
+ central tendency) of the input data.
+
+ The input list may contain any Datastructure or any Datatype.
+
+ >>> input_list = [2, 3, 4, 5, 3, 4, 2, 5, 2, 2, 4, 2, 2, 2]
+ >>> mode(input_list)
+ 2
+ >>> input_list = [2, 3, 4, 5, 3, 4, 2, 5, 2, 2, 4, 2, 2, 2]
+ >>> mode(input_list) == statistics.mode(input_list)
+ True
+ """
+ # Copying input_list to check with the index number later.
+ check_list = input_list.copy()
+ result = list() # Empty list to store the counts of elements in input_list
+ for x in input_list:
+ result.append(input_list.count(x))
+ input_list.remove(x)
+ y = max(result) # Gets the maximum value in the result list.
+ # Returns the value with the maximum number of repetitions.
+ return check_list[result.index(y)]
+
+
+if __name__ == "__main__":
+ data = [2, 3, 4, 5, 3, 4, 2, 5, 2, 2, 4, 2, 2, 2]
+ print(mode(data))
+ print(statistics.mode(data))
diff --git a/maths/bailey_borwein_plouffe.py b/maths/bailey_borwein_plouffe.py
new file mode 100644
index 000000000000..febf7e975516
--- /dev/null
+++ b/maths/bailey_borwein_plouffe.py
@@ -0,0 +1,90 @@
+def bailey_borwein_plouffe(digit_position: int, precision: int = 1000) -> str:
+ """
+ Implement a popular pi-digit-extraction algorithm known as the
+ Bailey-Borwein-Plouffe (BBP) formula to calculate the nth hex digit of pi.
+ Wikipedia page:
+ https://en.wikipedia.org/wiki/Bailey%E2%80%93Borwein%E2%80%93Plouffe_formula
+ @param digit_position: a positive integer representing the position of the digit to
+ extract.
+ The digit immediately after the decimal point is located at position 1.
+ @param precision: number of terms in the second summation to calculate.
+ A higher number reduces the chance of an error but increases the runtime.
+ @return: a hexadecimal digit representing the digit at the nth position
+ in pi's decimal expansion.
+
+ >>> "".join(bailey_borwein_plouffe(i) for i in range(1, 11))
+ '243f6a8885'
+ >>> bailey_borwein_plouffe(5, 10000)
+ '6'
+ >>> bailey_borwein_plouffe(-10)
+ Traceback (most recent call last):
+ ...
+ ValueError: Digit position must be a positive integer
+ >>> bailey_borwein_plouffe(0)
+ Traceback (most recent call last):
+ ...
+ ValueError: Digit position must be a positive integer
+ >>> bailey_borwein_plouffe(1.7)
+ Traceback (most recent call last):
+ ...
+ ValueError: Digit position must be a positive integer
+ >>> bailey_borwein_plouffe(2, -10)
+ Traceback (most recent call last):
+ ...
+ ValueError: Precision must be a nonnegative integer
+ >>> bailey_borwein_plouffe(2, 1.6)
+ Traceback (most recent call last):
+ ...
+ ValueError: Precision must be a nonnegative integer
+ """
+ if (not isinstance(digit_position, int)) or (digit_position <= 0):
+ raise ValueError("Digit position must be a positive integer")
+ elif (not isinstance(precision, int)) or (precision < 0):
+ raise ValueError("Precision must be a nonnegative integer")
+
+ # compute an approximation of (16 ** (n - 1)) * pi whose fractional part is mostly
+ # accurate
+ sum_result = (
+ 4 * _subsum(digit_position, 1, precision)
+ - 2 * _subsum(digit_position, 4, precision)
+ - _subsum(digit_position, 5, precision)
+ - _subsum(digit_position, 6, precision)
+ )
+
+ # return the first hex digit of the fractional part of the result
+ return hex(int((sum_result % 1) * 16))[2:]
+
+
+def _subsum(
+ digit_pos_to_extract: int, denominator_addend: int, precision: int
+) -> float:
+ # only care about first digit of fractional part; don't need decimal
+ """
+ Private helper function to implement the summation
+ functionality.
+ @param digit_pos_to_extract: digit position to extract
+ @param denominator_addend: added to denominator of fractions in the formula
+ @param precision: same as precision in main function
+ @return: floating-point number whose integer part is not important
+ """
+ sum = 0.0
+ for sum_index in range(digit_pos_to_extract + precision):
+ denominator = 8 * sum_index + denominator_addend
+ exponential_term = 0.0
+ if sum_index < digit_pos_to_extract:
+ # if the exponential term is an integer and we mod it by the denominator
+ # before dividing, only the integer part of the sum will change;
+ # the fractional part will not
+ exponential_term = pow(
+ 16, digit_pos_to_extract - 1 - sum_index, denominator
+ )
+ else:
+ exponential_term = pow(16, digit_pos_to_extract - 1 - sum_index)
+ sum += exponential_term / denominator
+ return sum
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/basic_maths.py b/maths/basic_maths.py
index 6e8c919a001d..07ee3b3df296 100644
--- a/maths/basic_maths.py
+++ b/maths/basic_maths.py
@@ -1,74 +1,80 @@
+"""Implementation of Basic Math in Python."""
import math
-def primeFactors(n):
+
+def prime_factors(n: int) -> list:
+ """Find Prime Factors.
+ >>> prime_factors(100)
+ [2, 2, 5, 5]
+ """
pf = []
while n % 2 == 0:
pf.append(2)
n = int(n / 2)
-
- for i in range(3, int(math.sqrt(n))+1, 2):
+ for i in range(3, int(math.sqrt(n)) + 1, 2):
while n % i == 0:
pf.append(i)
n = int(n / i)
-
if n > 2:
pf.append(n)
-
return pf
-def numberOfDivisors(n):
+
+def number_of_divisors(n: int) -> int:
+ """Calculate Number of Divisors of an Integer.
+ >>> number_of_divisors(100)
+ 9
+ """
div = 1
-
temp = 1
while n % 2 == 0:
temp += 1
n = int(n / 2)
- div = div * (temp)
-
- for i in range(3, int(math.sqrt(n))+1, 2):
+ div *= temp
+ for i in range(3, int(math.sqrt(n)) + 1, 2):
temp = 1
while n % i == 0:
temp += 1
n = int(n / i)
- div = div * (temp)
-
+ div *= temp
return div
-def sumOfDivisors(n):
+
+def sum_of_divisors(n: int) -> int:
+ """Calculate Sum of Divisors.
+ >>> sum_of_divisors(100)
+ 217
+ """
s = 1
-
temp = 1
while n % 2 == 0:
temp += 1
n = int(n / 2)
if temp > 1:
- s *= (2**temp - 1) / (2 - 1)
-
- for i in range(3, int(math.sqrt(n))+1, 2):
+ s *= (2 ** temp - 1) / (2 - 1)
+ for i in range(3, int(math.sqrt(n)) + 1, 2):
temp = 1
while n % i == 0:
temp += 1
n = int(n / i)
if temp > 1:
- s *= (i**temp - 1) / (i - 1)
-
- return s
+ s *= (i ** temp - 1) / (i - 1)
+ return int(s)
-def eulerPhi(n):
- l = primeFactors(n)
- l = set(l)
+
+def euler_phi(n: int) -> int:
+ """Calculate Euler's Phi Function.
+ >>> euler_phi(100)
+ 40
+ """
s = n
- for x in l:
- s *= (x - 1)/x
- return s
+ for x in set(prime_factors(n)):
+ s *= (x - 1) / x
+ return int(s)
+
-def main():
- print(primeFactors(100))
- print(numberOfDivisors(100))
- print(sumOfDivisors(100))
- print(eulerPhi(100))
-
-if __name__ == '__main__':
- main()
-
-
\ No newline at end of file
+if __name__ == "__main__":
+ print(prime_factors(100))
+ print(number_of_divisors(100))
+ print(sum_of_divisors(100))
+ print(euler_phi(100))
diff --git a/maths/binary_exp_mod.py b/maths/binary_exp_mod.py
new file mode 100644
index 000000000000..67dd1e728b18
--- /dev/null
+++ b/maths/binary_exp_mod.py
@@ -0,0 +1,28 @@
+def bin_exp_mod(a, n, b):
+ """
+ >>> bin_exp_mod(3, 4, 5)
+ 1
+ >>> bin_exp_mod(7, 13, 10)
+ 7
+ """
+ # mod b
+ assert not (b == 0), "This cannot accept modulo that is == 0"
+ if n == 0:
+ return 1
+
+ if n % 2 == 1:
+ return (bin_exp_mod(a, n - 1, b) * a) % b
+
+ r = bin_exp_mod(a, n / 2, b)
+ return (r * r) % b
+
+
+if __name__ == "__main__":
+ try:
+ BASE = int(input("Enter Base : ").strip())
+ POWER = int(input("Enter Power : ").strip())
+ MODULO = int(input("Enter Modulo : ").strip())
+ except ValueError:
+ print("Invalid literal for integer")
+
+ print(bin_exp_mod(BASE, POWER, MODULO))
diff --git a/maths/binary_exponentiation.py b/maths/binary_exponentiation.py
new file mode 100644
index 000000000000..8dda5245cf44
--- /dev/null
+++ b/maths/binary_exponentiation.py
@@ -0,0 +1,28 @@
+"""Binary Exponentiation."""
+
+# Author : Junth Basnet
+# Time Complexity : O(logn)
+
+
+def binary_exponentiation(a, n):
+
+ if n == 0:
+ return 1
+
+ elif n % 2 == 1:
+ return binary_exponentiation(a, n - 1) * a
+
+ else:
+ b = binary_exponentiation(a, n / 2)
+ return b * b
+
+
+if __name__ == "__main__":
+ try:
+ BASE = int(input("Enter Base : ").strip())
+ POWER = int(input("Enter Power : ").strip())
+ except ValueError:
+ print("Invalid literal for integer")
+
+ RESULT = binary_exponentiation(BASE, POWER)
+ print(f"{BASE}^({POWER}) : {RESULT}")
diff --git a/maths/binomial_coefficient.py b/maths/binomial_coefficient.py
new file mode 100644
index 000000000000..4def041492f3
--- /dev/null
+++ b/maths/binomial_coefficient.py
@@ -0,0 +1,20 @@
+def binomial_coefficient(n, r):
+ """
+ Find binomial coefficient using pascals triangle.
+
+ >>> binomial_coefficient(10, 5)
+ 252
+ """
+ C = [0 for i in range(r + 1)]
+ # nc0 = 1
+ C[0] = 1
+ for i in range(1, n + 1):
+ # to compute current row from previous row.
+ j = min(i, r)
+ while j > 0:
+ C[j] += C[j - 1]
+ j -= 1
+ return C[r]
+
+
+print(binomial_coefficient(n=10, r=5))
diff --git a/maths/binomial_distribution.py b/maths/binomial_distribution.py
new file mode 100644
index 000000000000..a74a5a7ed994
--- /dev/null
+++ b/maths/binomial_distribution.py
@@ -0,0 +1,40 @@
+"""For more information about the Binomial Distribution -
+ https://en.wikipedia.org/wiki/Binomial_distribution"""
+from math import factorial
+
+
+def binomial_distribution(successes: int, trials: int, prob: float) -> float:
+ """
+ Return probability of k successes out of n tries, with p probability for one
+ success
+
+ The function uses the factorial function in order to calculate the binomial
+ coefficient
+
+ >>> binomial_distribution(3, 5, 0.7)
+ 0.30870000000000003
+ >>> binomial_distribution (2, 4, 0.5)
+ 0.375
+ """
+ if successes > trials:
+ raise ValueError("""successes must be lower or equal to trials""")
+ if trials < 0 or successes < 0:
+ raise ValueError("the function is defined for non-negative integers")
+ if not isinstance(successes, int) or not isinstance(trials, int):
+ raise ValueError("the function is defined for non-negative integers")
+ if not 0 < prob < 1:
+ raise ValueError("prob has to be in range of 1 - 0")
+ probability = (prob ** successes) * ((1 - prob) ** (trials - successes))
+ # Calculate the binomial coefficient: n! / k!(n-k)!
+ coefficient = float(factorial(trials))
+ coefficient /= factorial(successes) * factorial(trials - successes)
+ return probability * coefficient
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
+ print("Probability of 2 successes out of 4 trails")
+ print("with probability of 0.75 is:", end=" ")
+ print(binomial_distribution(2, 4, 0.75))
diff --git a/maths/bisection.py b/maths/bisection.py
new file mode 100644
index 000000000000..93cc2247b64e
--- /dev/null
+++ b/maths/bisection.py
@@ -0,0 +1,63 @@
+"""
+Given a function on floating number f(x) and two floating numbers ‘a’ and ‘b’ such that
+f(a) * f(b) < 0 and f(x) is continuous in [a, b].
+Here f(x) represents algebraic or transcendental equation.
+Find root of function in interval [a, b] (Or find a value of x such that f(x) is 0)
+
+https://en.wikipedia.org/wiki/Bisection_method
+"""
+
+
+def equation(x: float) -> float:
+ """
+ >>> equation(5)
+ -15
+ >>> equation(0)
+ 10
+ >>> equation(-5)
+ -15
+ >>> equation(0.1)
+ 9.99
+ >>> equation(-0.1)
+ 9.99
+ """
+ return 10 - x * x
+
+
+def bisection(a: float, b: float) -> float:
+ """
+ >>> bisection(-2, 5)
+ 3.1611328125
+ >>> bisection(0, 6)
+ 3.158203125
+ >>> bisection(2, 3)
+ Traceback (most recent call last):
+ ...
+ ValueError: Wrong space!
+ """
+ # Bolzano theory in order to find if there is a root between a and b
+ if equation(a) * equation(b) >= 0:
+ raise ValueError("Wrong space!")
+
+ c = a
+ while (b - a) >= 0.01:
+ # Find middle point
+ c = (a + b) / 2
+ # Check if middle point is root
+ if equation(c) == 0.0:
+ break
+ # Decide the side to repeat the steps
+ if equation(c) * equation(a) < 0:
+ b = c
+ else:
+ a = c
+ return c
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+
+ print(bisection(-2, 5))
+ print(bisection(0, 6))
diff --git a/maths/ceil.py b/maths/ceil.py
new file mode 100644
index 000000000000..97578265c1a9
--- /dev/null
+++ b/maths/ceil.py
@@ -0,0 +1,24 @@
+"""
+https://en.wikipedia.org/wiki/Floor_and_ceiling_functions
+"""
+
+
+def ceil(x) -> int:
+ """
+ Return the ceiling of x as an Integral.
+
+ :param x: the number
+ :return: the smallest integer >= x.
+
+ >>> import math
+ >>> all(ceil(n) == math.ceil(n) for n
+ ... in (1, -1, 0, -0, 1.1, -1.1, 1.0, -1.0, 1_000_000_000))
+ True
+ """
+ return int(x) if x - int(x) <= 0 else int(x) + 1
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/chudnovsky_algorithm.py b/maths/chudnovsky_algorithm.py
new file mode 100644
index 000000000000..aaee7462822e
--- /dev/null
+++ b/maths/chudnovsky_algorithm.py
@@ -0,0 +1,60 @@
+from decimal import Decimal, getcontext
+from math import ceil, factorial
+
+
+def pi(precision: int) -> str:
+ """
+ The Chudnovsky algorithm is a fast method for calculating the digits of PI,
+ based on Ramanujan’s PI formulae.
+
+ https://en.wikipedia.org/wiki/Chudnovsky_algorithm
+
+ PI = constant_term / ((multinomial_term * linear_term) / exponential_term)
+ where constant_term = 426880 * sqrt(10005)
+
+ The linear_term and the exponential_term can be defined iteratively as follows:
+ L_k+1 = L_k + 545140134 where L_0 = 13591409
+ X_k+1 = X_k * -262537412640768000 where X_0 = 1
+
+ The multinomial_term is defined as follows:
+ 6k! / ((3k)! * (k!) ^ 3)
+ where k is the k_th iteration.
+
+ This algorithm correctly calculates around 14 digits of PI per iteration
+
+ >>> pi(10)
+ '3.14159265'
+ >>> pi(100)
+ '3.14159265358979323846264338327950288419716939937510582097494459230781640628620899862803482534211706'
+ >>> pi('hello')
+ Traceback (most recent call last):
+ ...
+ TypeError: Undefined for non-integers
+ >>> pi(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: Undefined for non-natural numbers
+ """
+
+ if not isinstance(precision, int):
+ raise TypeError("Undefined for non-integers")
+ elif precision < 1:
+ raise ValueError("Undefined for non-natural numbers")
+
+ getcontext().prec = precision
+ num_iterations = ceil(precision / 14)
+ constant_term = 426880 * Decimal(10005).sqrt()
+ exponential_term = 1
+ linear_term = 13591409
+ partial_sum = Decimal(linear_term)
+ for k in range(1, num_iterations):
+ multinomial_term = factorial(6 * k) // (factorial(3 * k) * factorial(k) ** 3)
+ linear_term += 545140134
+ exponential_term *= -262537412640768000
+ partial_sum += Decimal(multinomial_term * linear_term) / exponential_term
+ return str(constant_term / partial_sum)[:-1]
+
+
+if __name__ == "__main__":
+ n = 50
+ print(f"The first {n} digits of pi is: {pi(n)}")
diff --git a/maths/collatz_sequence.py b/maths/collatz_sequence.py
new file mode 100644
index 000000000000..7b3636de69f4
--- /dev/null
+++ b/maths/collatz_sequence.py
@@ -0,0 +1,44 @@
+from __future__ import annotations
+
+
+def collatz_sequence(n: int) -> list[int]:
+ """
+ Collatz conjecture: start with any positive integer n. The next term is
+ obtained as follows:
+ If n term is even, the next term is: n / 2 .
+ If n is odd, the next term is: 3 * n + 1.
+
+ The conjecture states the sequence will always reach 1 for any starting value n.
+ Example:
+ >>> collatz_sequence(2.1)
+ Traceback (most recent call last):
+ ...
+ Exception: Sequence only defined for natural numbers
+ >>> collatz_sequence(0)
+ Traceback (most recent call last):
+ ...
+ Exception: Sequence only defined for natural numbers
+ >>> collatz_sequence(43) # doctest: +NORMALIZE_WHITESPACE
+ [43, 130, 65, 196, 98, 49, 148, 74, 37, 112, 56, 28, 14, 7,
+ 22, 11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1]
+ """
+
+ if not isinstance(n, int) or n < 1:
+ raise Exception("Sequence only defined for natural numbers")
+
+ sequence = [n]
+ while n != 1:
+ n = 3 * n + 1 if n & 1 else n // 2
+ sequence.append(n)
+ return sequence
+
+
+def main():
+ n = 43
+ sequence = collatz_sequence(n)
+ print(sequence)
+ print(f"collatz sequence from {n} took {len(sequence)} steps.")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/maths/combinations.py b/maths/combinations.py
new file mode 100644
index 000000000000..40f4f7a9f850
--- /dev/null
+++ b/maths/combinations.py
@@ -0,0 +1,58 @@
+"""
+https://en.wikipedia.org/wiki/Combination
+"""
+from math import factorial
+
+
+def combinations(n: int, k: int) -> int:
+ """
+ Returns the number of different combinations of k length which can
+ be made from n values, where n >= k.
+
+ Examples:
+ >>> combinations(10,5)
+ 252
+
+ >>> combinations(6,3)
+ 20
+
+ >>> combinations(20,5)
+ 15504
+
+ >>> combinations(52, 5)
+ 2598960
+
+ >>> combinations(0, 0)
+ 1
+
+ >>> combinations(-4, -5)
+ ...
+ Traceback (most recent call last):
+ ValueError: Please enter positive integers for n and k where n >= k
+ """
+
+ # If either of the conditions are true, the function is being asked
+ # to calculate a factorial of a negative number, which is not possible
+ if n < k or k < 0:
+ raise ValueError("Please enter positive integers for n and k where n >= k")
+ return int(factorial(n) / ((factorial(k)) * (factorial(n - k))))
+
+
+if __name__ == "__main__":
+
+ print(
+ "\nThe number of five-card hands possible from a standard",
+ f"fifty-two card deck is: {combinations(52, 5)}",
+ )
+
+ print(
+ "\nIf a class of 40 students must be arranged into groups of",
+ f"4 for group projects, there are {combinations(40, 4)} ways",
+ "to arrange them.\n",
+ )
+
+ print(
+ "If 10 teams are competing in a Formula One race, there",
+ f"are {combinations(10, 3)} ways that first, second and",
+ "third place can be awarded.\n",
+ )
diff --git a/maths/decimal_isolate.py b/maths/decimal_isolate.py
new file mode 100644
index 000000000000..0e3967a4671d
--- /dev/null
+++ b/maths/decimal_isolate.py
@@ -0,0 +1,45 @@
+"""
+Isolate the Decimal part of a Number
+https://stackoverflow.com/questions/3886402/how-to-get-numbers-after-decimal-point
+"""
+
+
+def decimal_isolate(number, digitAmount):
+
+ """
+ Isolates the decimal part of a number.
+ If digitAmount > 0 round to that decimal place, else print the entire decimal.
+ >>> decimal_isolate(1.53, 0)
+ 0.53
+ >>> decimal_isolate(35.345, 1)
+ 0.3
+ >>> decimal_isolate(35.345, 2)
+ 0.34
+ >>> decimal_isolate(35.345, 3)
+ 0.345
+ >>> decimal_isolate(-14.789, 3)
+ -0.789
+ >>> decimal_isolate(0, 2)
+ 0
+ >>> decimal_isolate(-14.123, 1)
+ -0.1
+ >>> decimal_isolate(-14.123, 2)
+ -0.12
+ >>> decimal_isolate(-14.123, 3)
+ -0.123
+ """
+ if digitAmount > 0:
+ return round(number - int(number), digitAmount)
+ return number - int(number)
+
+
+if __name__ == "__main__":
+ print(decimal_isolate(1.53, 0))
+ print(decimal_isolate(35.345, 1))
+ print(decimal_isolate(35.345, 2))
+ print(decimal_isolate(35.345, 3))
+ print(decimal_isolate(-14.789, 3))
+ print(decimal_isolate(0, 2))
+ print(decimal_isolate(-14.123, 1))
+ print(decimal_isolate(-14.123, 2))
+ print(decimal_isolate(-14.123, 3))
diff --git a/maths/entropy.py b/maths/entropy.py
new file mode 100644
index 000000000000..43bb3860fc12
--- /dev/null
+++ b/maths/entropy.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python3
+
+"""
+Implementation of entropy of information
+https://en.wikipedia.org/wiki/Entropy_(information_theory)
+"""
+from __future__ import annotations
+
+import math
+from collections import Counter
+from string import ascii_lowercase
+
+
+def calculate_prob(text: str) -> None:
+ """
+ This method takes path and two dict as argument
+ and than calculates entropy of them.
+ :param dict:
+ :param dict:
+ :return: Prints
+ 1) Entropy of information based on 1 alphabet
+ 2) Entropy of information based on couples of 2 alphabet
+ 3) print Entropy of H(X n∣Xn−1)
+
+ Text from random books. Also, random quotes.
+ >>> text = ("Behind Winston’s back the voice "
+ ... "from the telescreen was still "
+ ... "babbling and the overfulfilment")
+ >>> calculate_prob(text)
+ 4.0
+ 6.0
+ 2.0
+
+ >>> text = ("The Ministry of Truth—Minitrue, in Newspeak [Newspeak was the official"
+ ... "face in elegant lettering, the three")
+ >>> calculate_prob(text)
+ 4.0
+ 5.0
+ 1.0
+ >>> text = ("Had repulsive dashwoods suspicion sincerity but advantage now him. "
+ ... "Remark easily garret nor nay. Civil those mrs enjoy shy fat merry. "
+ ... "You greatest jointure saw horrible. He private he on be imagine "
+ ... "suppose. Fertile beloved evident through no service elderly is. Blind "
+ ... "there if every no so at. Own neglected you preferred way sincerity "
+ ... "delivered his attempted. To of message cottage windows do besides "
+ ... "against uncivil. Delightful unreserved impossible few estimating "
+ ... "men favourable see entreaties. She propriety immediate was improving. "
+ ... "He or entrance humoured likewise moderate. Much nor game son say "
+ ... "feel. Fat make met can must form into gate. Me we offending prevailed "
+ ... "discovery.")
+ >>> calculate_prob(text)
+ 4.0
+ 7.0
+ 3.0
+ """
+ single_char_strings, two_char_strings = analyze_text(text)
+ my_alphas = list(" " + ascii_lowercase)
+ # what is our total sum of probabilities.
+ all_sum = sum(single_char_strings.values())
+
+ # one length string
+ my_fir_sum = 0
+ # for each alpha we go in our dict and if it is in it we calculate entropy
+ for ch in my_alphas:
+ if ch in single_char_strings:
+ my_str = single_char_strings[ch]
+ prob = my_str / all_sum
+ my_fir_sum += prob * math.log2(prob) # entropy formula.
+
+ # print entropy
+ print("{:.1f}".format(round(-1 * my_fir_sum)))
+
+ # two len string
+ all_sum = sum(two_char_strings.values())
+ my_sec_sum = 0
+ # for each alpha (two in size) calculate entropy.
+ for ch0 in my_alphas:
+ for ch1 in my_alphas:
+ sequence = ch0 + ch1
+ if sequence in two_char_strings:
+ my_str = two_char_strings[sequence]
+ prob = int(my_str) / all_sum
+ my_sec_sum += prob * math.log2(prob)
+
+ # print second entropy
+ print("{:.1f}".format(round(-1 * my_sec_sum)))
+
+ # print the difference between them
+ print("{:.1f}".format(round((-1 * my_sec_sum) - (-1 * my_fir_sum))))
+
+
+def analyze_text(text: str) -> tuple[dict, dict]:
+ """
+ Convert text input into two dicts of counts.
+ The first dictionary stores the frequency of single character strings.
+ The second dictionary stores the frequency of two character strings.
+ """
+ single_char_strings = Counter() # type: ignore
+ two_char_strings = Counter() # type: ignore
+ single_char_strings[text[-1]] += 1
+
+ # first case when we have space at start.
+ two_char_strings[" " + text[0]] += 1
+ for i in range(0, len(text) - 1):
+ single_char_strings[text[i]] += 1
+ two_char_strings[text[i : i + 2]] += 1
+ return single_char_strings, two_char_strings
+
+
+def main():
+ import doctest
+
+ doctest.testmod()
+ # text = (
+ # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
+ # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
+ # "jointure saw horrible. He private he on be imagine suppose. Fertile "
+ # "beloved evident through no service elderly is. Blind there if every no so "
+ # "at. Own neglected you preferred way sincerity delivered his attempted. To "
+ # "of message cottage windows do besides against uncivil. Delightful "
+ # "unreserved impossible few estimating men favourable see entreaties. She "
+ # "propriety immediate was improving. He or entrance humoured likewise "
+ # "moderate. Much nor game son say feel. Fat make met can must form into "
+ # "gate. Me we offending prevailed discovery. "
+ # )
+
+ # calculate_prob(text)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/maths/euclidean_distance.py b/maths/euclidean_distance.py
new file mode 100644
index 000000000000..6e0da6370219
--- /dev/null
+++ b/maths/euclidean_distance.py
@@ -0,0 +1,62 @@
+from typing import Iterable, Union
+
+import numpy as np
+
+Vector = Union[Iterable[float], Iterable[int], np.ndarray]
+VectorOut = Union[np.float64, int, float]
+
+
+def euclidean_distance(vector_1: Vector, vector_2: Vector) -> VectorOut:
+ """
+ Calculate the distance between the two endpoints of two vectors.
+ A vector is defined as a list, tuple, or numpy 1D array.
+ >>> euclidean_distance((0, 0), (2, 2))
+ 2.8284271247461903
+ >>> euclidean_distance(np.array([0, 0, 0]), np.array([2, 2, 2]))
+ 3.4641016151377544
+ >>> euclidean_distance(np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]))
+ 8.0
+ >>> euclidean_distance([1, 2, 3, 4], [5, 6, 7, 8])
+ 8.0
+ """
+ return np.sqrt(np.sum((np.asarray(vector_1) - np.asarray(vector_2)) ** 2))
+
+
+def euclidean_distance_no_np(vector_1: Vector, vector_2: Vector) -> VectorOut:
+ """
+ Calculate the distance between the two endpoints of two vectors without numpy.
+ A vector is defined as a list, tuple, or numpy 1D array.
+ >>> euclidean_distance_no_np((0, 0), (2, 2))
+ 2.8284271247461903
+ >>> euclidean_distance_no_np([1, 2, 3, 4], [5, 6, 7, 8])
+ 8.0
+ """
+ return sum((v1 - v2) ** 2 for v1, v2 in zip(vector_1, vector_2)) ** (1 / 2)
+
+
+if __name__ == "__main__":
+
+ def benchmark() -> None:
+ """
+ Benchmarks
+ """
+ from timeit import timeit
+
+ print("Without Numpy")
+ print(
+ timeit(
+ "euclidean_distance_no_np([1, 2, 3], [4, 5, 6])",
+ number=10000,
+ globals=globals(),
+ )
+ )
+ print("With Numpy")
+ print(
+ timeit(
+ "euclidean_distance([1, 2, 3], [4, 5, 6])",
+ number=10000,
+ globals=globals(),
+ )
+ )
+
+ benchmark()
diff --git a/maths/eulers_totient.py b/maths/eulers_totient.py
new file mode 100644
index 000000000000..6a35e69bde0b
--- /dev/null
+++ b/maths/eulers_totient.py
@@ -0,0 +1,45 @@
+# Eulers Totient function finds the number of relative primes of a number n from 1 to n
+def totient(n: int) -> list:
+ is_prime = [True for i in range(n + 1)]
+ totients = [i - 1 for i in range(n + 1)]
+ primes = []
+ for i in range(2, n + 1):
+ if is_prime[i]:
+ primes.append(i)
+ for j in range(0, len(primes)):
+ if i * primes[j] >= n:
+ break
+ is_prime[i * primes[j]] = False
+
+ if i % primes[j] == 0:
+ totients[i * primes[j]] = totients[i] * primes[j]
+ break
+
+ totients[i * primes[j]] = totients[i] * (primes[j] - 1)
+
+ return totients
+
+
+def test_totient() -> None:
+ """
+ >>> n = 10
+ >>> totient_calculation = totient(n)
+ >>> for i in range(1, n):
+ ... print(f"{i} has {totient_calculation[i]} relative primes.")
+ 1 has 0 relative primes.
+ 2 has 1 relative primes.
+ 3 has 2 relative primes.
+ 4 has 2 relative primes.
+ 5 has 4 relative primes.
+ 6 has 2 relative primes.
+ 7 has 6 relative primes.
+ 8 has 4 relative primes.
+ 9 has 6 relative primes.
+ """
+ pass
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/explicit_euler.py b/maths/explicit_euler.py
new file mode 100644
index 000000000000..7c780198602b
--- /dev/null
+++ b/maths/explicit_euler.py
@@ -0,0 +1,40 @@
+import numpy as np
+
+
+def explicit_euler(ode_func, y0, x0, step_size, x_end):
+ """
+ Calculate numeric solution at each step to an ODE using Euler's Method
+
+ https://en.wikipedia.org/wiki/Euler_method
+
+ Arguments:
+ ode_func -- The ode as a function of x and y
+ y0 -- the initial value for y
+ x0 -- the initial value for x
+ stepsize -- the increment value for x
+ x_end -- the end value for x
+
+ >>> # the exact solution is math.exp(x)
+ >>> def f(x, y):
+ ... return y
+ >>> y0 = 1
+ >>> y = explicit_euler(f, y0, 0.0, 0.01, 5)
+ >>> y[-1]
+ 144.77277243257308
+ """
+ N = int(np.ceil((x_end - x0) / step_size))
+ y = np.zeros((N + 1,))
+ y[0] = y0
+ x = x0
+
+ for k in range(N):
+ y[k + 1] = y[k] + step_size * ode_func(x, y[k])
+ x += step_size
+
+ return y
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/extended_euclidean_algorithm.py b/maths/extended_euclidean_algorithm.py
index f5a3cc88e474..e7087636ce09 100644
--- a/maths/extended_euclidean_algorithm.py
+++ b/maths/extended_euclidean_algorithm.py
@@ -1,51 +1,85 @@
+"""
+Extended Euclidean Algorithm.
+
+Finds 2 numbers a and b such that it satisfies
+the equation am + bn = gcd(m, n) (a.k.a Bezout's Identity)
+
+https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm
+"""
+
# @Author: S. Sharma
# @Date: 2019-02-25T12:08:53-06:00
# @Email: silentcat@protonmail.com
-# @Last modified by: silentcat
-# @Last modified time: 2019-02-26T07:07:38-06:00
+# @Last modified by: pikulet
+# @Last modified time: 2020-10-02
import sys
+from typing import Tuple
+
+
+def extended_euclidean_algorithm(a: int, b: int) -> Tuple[int, int]:
+ """
+ Extended Euclidean Algorithm.
+
+ Finds 2 numbers a and b such that it satisfies
+ the equation am + bn = gcd(m, n) (a.k.a Bezout's Identity)
+
+ >>> extended_euclidean_algorithm(1, 24)
+ (1, 0)
+
+ >>> extended_euclidean_algorithm(8, 14)
+ (2, -1)
+
+ >>> extended_euclidean_algorithm(240, 46)
+ (-9, 47)
+
+ >>> extended_euclidean_algorithm(1, -4)
+ (1, 0)
+
+ >>> extended_euclidean_algorithm(-2, -4)
+ (-1, 0)
+
+ >>> extended_euclidean_algorithm(0, -4)
+ (0, -1)
+
+ >>> extended_euclidean_algorithm(2, 0)
+ (1, 0)
+
+ """
+ # base cases
+ if abs(a) == 1:
+ return a, 0
+ elif abs(b) == 1:
+ return 0, b
+
+ old_remainder, remainder = a, b
+ old_coeff_a, coeff_a = 1, 0
+ old_coeff_b, coeff_b = 0, 1
+
+ while remainder != 0:
+ quotient = old_remainder // remainder
+ old_remainder, remainder = remainder, old_remainder - quotient * remainder
+ old_coeff_a, coeff_a = coeff_a, old_coeff_a - quotient * coeff_a
+ old_coeff_b, coeff_b = coeff_b, old_coeff_b - quotient * coeff_b
+
+ # sign correction for negative numbers
+ if a < 0:
+ old_coeff_a = -old_coeff_a
+ if b < 0:
+ old_coeff_b = -old_coeff_b
+
+ return old_coeff_a, old_coeff_b
-# Finds 2 numbers a and b such that it satisfies
-# the equation am + bn = gcd(m, n) (a.k.a Bezout's Identity)
-def extended_euclidean_algorithm(m, n):
- a = 0; aprime = 1; b = 1; bprime = 0
- q = 0; r = 0
- if m > n:
- c = m; d = n
- else:
- c = n; d = m
-
- while True:
- q = int(c / d)
- r = c % d
- if r == 0:
- break
- c = d
- d = r
-
- t = aprime
- aprime = a
- a = t - q*a
-
- t = bprime
- bprime = b
- b = t - q*b
-
- pair = None
- if m > n:
- pair = (a,b)
- else:
- pair = (b,a)
- return pair
def main():
+ """Call Extended Euclidean Algorithm."""
if len(sys.argv) < 3:
- print('2 integer arguments required')
+ print("2 integer arguments required")
exit(1)
- m = int(sys.argv[1])
- n = int(sys.argv[2])
- print(extended_euclidean_algorithm(m, n))
+ a = int(sys.argv[1])
+ b = int(sys.argv[2])
+ print(extended_euclidean_algorithm(a, b))
+
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/maths/factorial_iterative.py b/maths/factorial_iterative.py
new file mode 100644
index 000000000000..64314790c11c
--- /dev/null
+++ b/maths/factorial_iterative.py
@@ -0,0 +1,30 @@
+# factorial of a positive integer -- https://en.wikipedia.org/wiki/Factorial
+
+
+def factorial(n: int) -> int:
+ """
+ >>> import math
+ >>> all(factorial(i) == math.factorial(i) for i in range(20))
+ True
+ >>> factorial(0.1)
+ Traceback (most recent call last):
+ ...
+ ValueError: factorial() only accepts integral values
+ >>> factorial(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: factorial() not defined for negative values
+ """
+ if n != int(n):
+ raise ValueError("factorial() only accepts integral values")
+ if n < 0:
+ raise ValueError("factorial() not defined for negative values")
+ value = 1
+ for i in range(1, n + 1):
+ value *= i
+ return value
+
+
+if __name__ == "__main__":
+ n = int(input("Enter a positive integer: ").strip() or 0)
+ print(f"factorial{n} is {factorial(n)}")
diff --git a/maths/factorial_python.py b/maths/factorial_python.py
index 376983e08dab..46688261af56 100644
--- a/maths/factorial_python.py
+++ b/maths/factorial_python.py
@@ -1,19 +1,34 @@
-# Python program to find the factorial of a number provided by the user.
+def factorial(input_number: int) -> int:
+ """
+ Calculate the factorial of specified number
-# change the value for a different result
-num = 10
+ >>> factorial(1)
+ 1
+ >>> factorial(6)
+ 720
+ >>> factorial(0)
+ 1
+ >>> factorial(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: factorial() not defined for negative values
+ >>> factorial(0.1)
+ Traceback (most recent call last):
+ ...
+ ValueError: factorial() only accepts integral values
+ """
-# uncomment to take input from the user
-#num = int(input("Enter a number: "))
+ if input_number < 0:
+ raise ValueError("factorial() not defined for negative values")
+ if not isinstance(input_number, int):
+ raise ValueError("factorial() only accepts integral values")
+ result = 1
+ for i in range(1, input_number):
+ result = result * (i + 1)
+ return result
-factorial = 1
-# check if the number is negative, positive or zero
-if num < 0:
- print("Sorry, factorial does not exist for negative numbers")
-elif num == 0:
- print("The factorial of 0 is 1")
-else:
- for i in range(1,num + 1):
- factorial = factorial*i
- print("The factorial of",num,"is",factorial)
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/factorial_recursive.py b/maths/factorial_recursive.py
index 41391a2718f6..137112738905 100644
--- a/maths/factorial_recursive.py
+++ b/maths/factorial_recursive.py
@@ -1,13 +1,28 @@
-def fact(n):
- """
- Return 1, if n is 1 or below,
- otherwise, return n * fact(n-1).
- """
- return 1 if n <= 1 else n * fact(n-1)
-
-"""
-Shown factorial for i,
-where i ranges from 1 to 20.
-"""
-for i in range(1,21):
- print(i, ": ", fact(i), sep='')
+def factorial(n: int) -> int:
+ """
+ Calculate the factorial of a positive integer
+ https://en.wikipedia.org/wiki/Factorial
+
+ >>> import math
+ >>> all(factorial(i) == math.factorial(i) for i in range(20))
+ True
+ >>> factorial(0.1)
+ Traceback (most recent call last):
+ ...
+ ValueError: factorial() only accepts integral values
+ >>> factorial(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: factorial() not defined for negative values
+ """
+ if not isinstance(n, int):
+ raise ValueError("factorial() only accepts integral values")
+ if n < 0:
+ raise ValueError("factorial() not defined for negative values")
+ return 1 if n == 0 or n == 1 else n * factorial(n - 1)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/factors.py b/maths/factors.py
new file mode 100644
index 000000000000..e2fdc4063a13
--- /dev/null
+++ b/maths/factors.py
@@ -0,0 +1,18 @@
+def factors_of_a_number(num: int) -> list:
+ """
+ >>> factors_of_a_number(1)
+ [1]
+ >>> factors_of_a_number(5)
+ [1, 5]
+ >>> factors_of_a_number(24)
+ [1, 2, 3, 4, 6, 8, 12, 24]
+ >>> factors_of_a_number(-24)
+ []
+ """
+ return [i for i in range(1, num + 1) if num % i == 0]
+
+
+if __name__ == "__main__":
+ num = int(input("Enter a number to find its factors: "))
+ factors = factors_of_a_number(num)
+ print(f"{num} has {len(factors)} factors: {', '.join(str(f) for f in factors)}")
diff --git a/maths/fermat_little_theorem.py b/maths/fermat_little_theorem.py
new file mode 100644
index 000000000000..73af3e28c618
--- /dev/null
+++ b/maths/fermat_little_theorem.py
@@ -0,0 +1,31 @@
+# Python program to show the usage of Fermat's little theorem in a division
+# According to Fermat's little theorem, (a / b) mod p always equals
+# a * (b ^ (p - 2)) mod p
+# Here we assume that p is a prime number, b divides a, and p doesn't divide b
+# Wikipedia reference: https://en.wikipedia.org/wiki/Fermat%27s_little_theorem
+
+
+def binary_exponentiation(a, n, mod):
+
+ if n == 0:
+ return 1
+
+ elif n % 2 == 1:
+ return (binary_exponentiation(a, n - 1, mod) * a) % mod
+
+ else:
+ b = binary_exponentiation(a, n / 2, mod)
+ return (b * b) % mod
+
+
+# a prime number
+p = 701
+
+a = 1000000000
+b = 10
+
+# using binary exponentiation function, O(log(p)):
+print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
+
+# using Python operators:
+print((a / b) % p == (a * b ** (p - 2)) % p)
diff --git a/maths/fibonacci.py b/maths/fibonacci.py
new file mode 100644
index 000000000000..e6519035401e
--- /dev/null
+++ b/maths/fibonacci.py
@@ -0,0 +1,130 @@
+# fibonacci.py
+"""
+1. Calculates the iterative fibonacci sequence
+
+2. Calculates the fibonacci sequence with a formula
+ an = [ Phin - (phi)n ]/Sqrt[5]
+ reference-->Su, Francis E., et al. "Fibonacci Number Formula." Math Fun Facts.
+
+"""
+import functools
+import math
+import time
+from decimal import Decimal, getcontext
+
+getcontext().prec = 100
+
+
+def timer_decorator(func):
+ @functools.wraps(func)
+ def timer_wrapper(*args, **kwargs):
+ start = time.time()
+ func(*args, **kwargs)
+ end = time.time()
+ if int(end - start) > 0:
+ print(f"Run time for {func.__name__}: {(end - start):0.2f}s")
+ else:
+ print(f"Run time for {func.__name__}: {(end - start)*1000:0.2f}ms")
+ return func(*args, **kwargs)
+
+ return timer_wrapper
+
+
+# define Python user-defined exceptions
+class Error(Exception):
+ """Base class for other exceptions"""
+
+
+class ValueTooLargeError(Error):
+ """Raised when the input value is too large"""
+
+
+class ValueTooSmallError(Error):
+ """Raised when the input value is not greater than one"""
+
+
+class ValueLessThanZero(Error):
+ """Raised when the input value is less than zero"""
+
+
+def _check_number_input(n, min_thresh, max_thresh=None):
+ """
+ :param n: single integer
+ :type n: int
+ :param min_thresh: min threshold, single integer
+ :type min_thresh: int
+ :param max_thresh: max threshold, single integer
+ :type max_thresh: int
+ :return: boolean
+ """
+ try:
+ if n >= min_thresh and max_thresh is None:
+ return True
+ elif min_thresh <= n <= max_thresh:
+ return True
+ elif n < 0:
+ raise ValueLessThanZero
+ elif n < min_thresh:
+ raise ValueTooSmallError
+ elif n > max_thresh:
+ raise ValueTooLargeError
+ except ValueLessThanZero:
+ print("Incorrect Input: number must not be less than 0")
+ except ValueTooSmallError:
+ print(
+ f"Incorrect Input: input number must be > {min_thresh} for the recursive "
+ "calculation"
+ )
+ except ValueTooLargeError:
+ print(
+ f"Incorrect Input: input number must be < {max_thresh} for the recursive "
+ "calculation"
+ )
+ return False
+
+
+@timer_decorator
+def fib_iterative(n):
+ """
+ :param n: calculate Fibonacci to the nth integer
+ :type n:int
+ :return: Fibonacci sequence as a list
+ """
+ n = int(n)
+ if _check_number_input(n, 2):
+ seq_out = [0, 1]
+ a, b = 0, 1
+ for _ in range(n - len(seq_out)):
+ a, b = b, a + b
+ seq_out.append(b)
+ return seq_out
+
+
+@timer_decorator
+def fib_formula(n):
+ """
+ :param n: calculate Fibonacci to the nth integer
+ :type n:int
+ :return: Fibonacci sequence as a list
+ """
+ seq_out = [0, 1]
+ n = int(n)
+ if _check_number_input(n, 2, 1000000):
+ sqrt = Decimal(math.sqrt(5))
+ phi_1 = Decimal(1 + sqrt) / Decimal(2)
+ phi_2 = Decimal(1 - sqrt) / Decimal(2)
+ for i in range(2, n):
+ temp_out = ((phi_1 ** Decimal(i)) - (phi_2 ** Decimal(i))) * (
+ Decimal(sqrt) ** Decimal(-1)
+ )
+ seq_out.append(int(temp_out))
+ return seq_out
+
+
+if __name__ == "__main__":
+ num = 20
+ # print(f'{fib_recursive(num)}\n')
+ # print(f'{fib_iterative(num)}\n')
+ # print(f'{fib_formula(num)}\n')
+ fib_iterative(num)
+ fib_formula(num)
diff --git a/maths/fibonacci_sequence_recursion.py b/maths/fibonacci_sequence_recursion.py
index 9190e7fc7a40..794b9fc0bd3a 100644
--- a/maths/fibonacci_sequence_recursion.py
+++ b/maths/fibonacci_sequence_recursion.py
@@ -1,21 +1,22 @@
# Fibonacci Sequence Using Recursion
-def recur_fibo(n):
- if n <= 1:
- return n
- else:
- (recur_fibo(n-1) + recur_fibo(n-2))
-def isPositiveInteger(limit):
- return limit >= 0
+def recur_fibo(n: int) -> int:
+ """
+ >>> [recur_fibo(i) for i in range(12)]
+ [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
+ """
+ return n if n <= 1 else recur_fibo(n - 1) + recur_fibo(n - 2)
+
-def main():
+def main() -> None:
limit = int(input("How many terms to include in fibonacci series: "))
- if isPositiveInteger(limit):
- print("The first {limit} terms of the fibonacci series are as follows:")
+ if limit > 0:
+ print(f"The first {limit} terms of the fibonacci series are as follows:")
print([recur_fibo(n) for n in range(limit)])
else:
print("Please enter a positive integer: ")
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()
diff --git a/maths/find_lcm.py b/maths/find_lcm.py
deleted file mode 100644
index 126242699ab7..000000000000
--- a/maths/find_lcm.py
+++ /dev/null
@@ -1,18 +0,0 @@
-def find_lcm(num_1, num_2):
- max = num_1 if num_1 > num_2 else num_2
- lcm = max
- while (True):
- if ((lcm % num_1 == 0) and (lcm % num_2 == 0)):
- break
- lcm += max
- return lcm
-
-
-def main():
- num_1 = 12
- num_2 = 76
- print(find_lcm(num_1, num_2))
-
-
-if __name__ == '__main__':
- main()
diff --git a/maths/find_max.py b/maths/find_max.py
new file mode 100644
index 000000000000..4d92e37eb2e1
--- /dev/null
+++ b/maths/find_max.py
@@ -0,0 +1,25 @@
+# NguyenU
+
+
+def find_max(nums):
+ """
+ >>> for nums in ([3, 2, 1], [-3, -2, -1], [3, -3, 0], [3.0, 3.1, 2.9]):
+ ... find_max(nums) == max(nums)
+ True
+ True
+ True
+ True
+ """
+ max_num = nums[0]
+ for x in nums:
+ if x > max_num:
+ max_num = x
+ return max_num
+
+
+def main():
+ print(find_max([2, 4, 9, 7, 19, 94, 5])) # 94
+
+
+if __name__ == "__main__":
+ main()
diff --git a/maths/find_max_recursion.py b/maths/find_max_recursion.py
new file mode 100644
index 000000000000..03fb81950dcb
--- /dev/null
+++ b/maths/find_max_recursion.py
@@ -0,0 +1,25 @@
+# Divide and Conquer algorithm
+def find_max(nums, left, right):
+ """
+ find max value in list
+ :param nums: contains elements
+ :param left: index of first element
+ :param right: index of last element
+ :return: max in nums
+
+ >>> nums = [1, 3, 5, 7, 9, 2, 4, 6, 8, 10]
+ >>> find_max(nums, 0, len(nums) - 1) == max(nums)
+ True
+ """
+ if left == right:
+ return nums[left]
+ mid = (left + right) >> 1 # the middle
+ left_max = find_max(nums, left, mid) # find max in range[left, mid]
+ right_max = find_max(nums, mid + 1, right) # find max in range[mid + 1, right]
+
+ return left_max if left_max >= right_max else right_max
+
+
+if __name__ == "__main__":
+ nums = [1, 3, 5, 7, 9, 2, 4, 6, 8, 10]
+ assert find_max(nums, 0, len(nums) - 1) == 10
diff --git a/maths/find_min.py b/maths/find_min.py
new file mode 100644
index 000000000000..2af2e44ba353
--- /dev/null
+++ b/maths/find_min.py
@@ -0,0 +1,26 @@
+def find_min(nums):
+ """
+ Find Minimum Number in a List
+ :param nums: contains elements
+ :return: min number in list
+
+ >>> for nums in ([3, 2, 1], [-3, -2, -1], [3, -3, 0], [3.0, 3.1, 2.9]):
+ ... find_min(nums) == min(nums)
+ True
+ True
+ True
+ True
+ """
+ min_num = nums[0]
+ for num in nums:
+ if min_num > num:
+ min_num = num
+ return min_num
+
+
+def main():
+ assert find_min([0, 1, 2, 3, 4, 5, -3, 24, -56]) == -56
+
+
+if __name__ == "__main__":
+ main()
diff --git a/maths/find_min_recursion.py b/maths/find_min_recursion.py
new file mode 100644
index 000000000000..4488967cc57a
--- /dev/null
+++ b/maths/find_min_recursion.py
@@ -0,0 +1,25 @@
+# Divide and Conquer algorithm
+def find_min(nums, left, right):
+ """
+ find min value in list
+ :param nums: contains elements
+ :param left: index of first element
+ :param right: index of last element
+ :return: min in nums
+
+ >>> nums = [1, 3, 5, 7, 9, 2, 4, 6, 8, 10]
+ >>> find_min(nums, 0, len(nums) - 1) == min(nums)
+ True
+ """
+ if left == right:
+ return nums[left]
+ mid = (left + right) >> 1 # the middle
+ left_min = find_min(nums, left, mid) # find min in range[left, mid]
+ right_min = find_min(nums, mid + 1, right) # find min in range[mid + 1, right]
+
+ return left_min if left_min <= right_min else right_min
+
+
+if __name__ == "__main__":
+ nums = [1, 3, 5, 7, 9, 2, 4, 6, 8, 10]
+ assert find_min(nums, 0, len(nums) - 1) == 1
diff --git a/maths/floor.py b/maths/floor.py
new file mode 100644
index 000000000000..482250f5e59e
--- /dev/null
+++ b/maths/floor.py
@@ -0,0 +1,22 @@
+"""
+https://en.wikipedia.org/wiki/Floor_and_ceiling_functions
+"""
+
+
+def floor(x) -> int:
+ """
+ Return the floor of x as an Integral.
+ :param x: the number
+ :return: the largest integer <= x.
+ >>> import math
+ >>> all(floor(n) == math.floor(n) for n
+ ... in (1, -1, 0, -0, 1.1, -1.1, 1.0, -1.0, 1_000_000_000))
+ True
+ """
+ return int(x) if x - int(x) >= 0 else int(x) - 1
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/gamma.py b/maths/gamma.py
new file mode 100644
index 000000000000..69cd819ef186
--- /dev/null
+++ b/maths/gamma.py
@@ -0,0 +1,63 @@
+import math
+
+from numpy import inf
+from scipy.integrate import quad
+
+
+def gamma(num: float) -> float:
+ """
+ https://en.wikipedia.org/wiki/Gamma_function
+ In mathematics, the gamma function is one commonly
+ used extension of the factorial function to complex numbers.
+ The gamma function is defined for all complex numbers except the non-positive
+ integers
+
+
+ >>> gamma(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: math domain error
+
+
+
+ >>> gamma(0)
+ Traceback (most recent call last):
+ ...
+ ValueError: math domain error
+
+
+ >>> gamma(9)
+ 40320.0
+
+ >>> from math import gamma as math_gamma
+ >>> all(.99999999 < gamma(i) / math_gamma(i) <= 1.000000001
+ ... for i in range(1, 50))
+ True
+
+
+ >>> from math import gamma as math_gamma
+ >>> gamma(-1)/math_gamma(-1) <= 1.000000001
+ Traceback (most recent call last):
+ ...
+ ValueError: math domain error
+
+
+ >>> from math import gamma as math_gamma
+ >>> gamma(3.3) - math_gamma(3.3) <= 0.00000001
+ True
+ """
+
+ if num <= 0:
+ raise ValueError("math domain error")
+
+ return quad(integrand, 0, inf, args=(num))[0]
+
+
+def integrand(x: float, z: float) -> float:
+ return math.pow(x, z - 1) * math.exp(-x)
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
diff --git a/maths/gaussian.py b/maths/gaussian.py
new file mode 100644
index 000000000000..a5dba50a927d
--- /dev/null
+++ b/maths/gaussian.py
@@ -0,0 +1,61 @@
+"""
+Reference: https://en.wikipedia.org/wiki/Gaussian_function
+"""
+from numpy import exp, pi, sqrt
+
+
+def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int:
+ """
+ >>> gaussian(1)
+ 0.24197072451914337
+
+ >>> gaussian(24)
+ 3.342714441794458e-126
+
+ >>> gaussian(1, 4, 2)
+ 0.06475879783294587
+
+ >>> gaussian(1, 5, 3)
+ 0.05467002489199788
+
+ Supports NumPy Arrays
+ Use numpy.meshgrid with this to generate gaussian blur on images.
+ >>> import numpy as np
+ >>> x = np.arange(15)
+ >>> gaussian(x)
+ array([3.98942280e-01, 2.41970725e-01, 5.39909665e-02, 4.43184841e-03,
+ 1.33830226e-04, 1.48671951e-06, 6.07588285e-09, 9.13472041e-12,
+ 5.05227108e-15, 1.02797736e-18, 7.69459863e-23, 2.11881925e-27,
+ 2.14638374e-32, 7.99882776e-38, 1.09660656e-43])
+
+ >>> gaussian(15)
+ 5.530709549844416e-50
+
+ >>> gaussian([1,2, 'string'])
+ Traceback (most recent call last):
+ ...
+ TypeError: unsupported operand type(s) for -: 'list' and 'float'
+
+ >>> gaussian('hello world')
+ Traceback (most recent call last):
+ ...
+ TypeError: unsupported operand type(s) for -: 'str' and 'float'
+
+ >>> gaussian(10**234) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ OverflowError: (34, 'Result too large')
+
+ >>> gaussian(10**-326)
+ 0.3989422804014327
+
+ >>> gaussian(2523, mu=234234, sigma=3425)
+ 0.0
+ """
+ return 1 / sqrt(2 * pi * sigma ** 2) * exp(-((x - mu) ** 2) / (2 * sigma ** 2))
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/greater_common_divisor.py b/maths/greater_common_divisor.py
deleted file mode 100644
index 15adaca1fb8d..000000000000
--- a/maths/greater_common_divisor.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Greater Common Divisor - https://en.wikipedia.org/wiki/Greatest_common_divisor
-def gcd(a, b):
- return b if a == 0 else gcd(b % a, a)
-
-def main():
- try:
- nums = input("Enter two Integers separated by comma (,): ").split(',')
- num1 = int(nums[0]); num2 = int(nums[1])
- except (IndexError, UnboundLocalError, ValueError):
- print("Wrong Input")
- print(f"gcd({num1}, {num2}) = {gcd(num1, num2)}")
-
-if __name__ == '__main__':
- main()
-
diff --git a/maths/greatest_common_divisor.py b/maths/greatest_common_divisor.py
new file mode 100644
index 000000000000..a2174a8eb74a
--- /dev/null
+++ b/maths/greatest_common_divisor.py
@@ -0,0 +1,77 @@
+"""
+Greatest Common Divisor.
+
+Wikipedia reference: https://en.wikipedia.org/wiki/Greatest_common_divisor
+
+gcd(a, b) = gcd(a, -b) = gcd(-a, b) = gcd(-a, -b) by definition of divisibility
+"""
+
+
+def greatest_common_divisor(a: int, b: int) -> int:
+ """
+ Calculate Greatest Common Divisor (GCD).
+ >>> greatest_common_divisor(24, 40)
+ 8
+ >>> greatest_common_divisor(1, 1)
+ 1
+ >>> greatest_common_divisor(1, 800)
+ 1
+ >>> greatest_common_divisor(11, 37)
+ 1
+ >>> greatest_common_divisor(3, 5)
+ 1
+ >>> greatest_common_divisor(16, 4)
+ 4
+ >>> greatest_common_divisor(-3, 9)
+ 3
+ >>> greatest_common_divisor(9, -3)
+ 3
+ >>> greatest_common_divisor(3, -9)
+ 3
+ >>> greatest_common_divisor(-3, -9)
+ 3
+ """
+ return abs(b) if a == 0 else greatest_common_divisor(b % a, a)
+
+
+def gcd_by_iterative(x: int, y: int) -> int:
+ """
+ Below method is more memory efficient because it does not create additional
+ stack frames for recursive functions calls (as done in the above method).
+ >>> gcd_by_iterative(24, 40)
+ 8
+ >>> greatest_common_divisor(24, 40) == gcd_by_iterative(24, 40)
+ True
+ >>> gcd_by_iterative(-3, -9)
+ 3
+ >>> gcd_by_iterative(3, -9)
+ 3
+ >>> gcd_by_iterative(1, -800)
+ 1
+ >>> gcd_by_iterative(11, 37)
+ 1
+ """
+ while y: # --> when y=0 then loop will terminate and return x as final GCD.
+ x, y = y, x % y
+ return abs(x)
+
+
+def main():
+ """
+ Call Greatest Common Divisor function.
+ """
+ try:
+ nums = input("Enter two integers separated by comma (,): ").split(",")
+ num_1 = int(nums[0])
+ num_2 = int(nums[1])
+ print(
+ f"greatest_common_divisor({num_1}, {num_2}) = "
+ f"{greatest_common_divisor(num_1, num_2)}"
+ )
+ print(f"By iterative gcd({num_1}, {num_2}) = {gcd_by_iterative(num_1, num_2)}")
+ except (IndexError, UnboundLocalError, ValueError):
+ print("Wrong input")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/maths/hardy_ramanujanalgo.py b/maths/hardy_ramanujanalgo.py
new file mode 100644
index 000000000000..90e4913c70a7
--- /dev/null
+++ b/maths/hardy_ramanujanalgo.py
@@ -0,0 +1,45 @@
+# This theorem states that the number of prime factors of n
+# will be approximately log(log(n)) for most natural numbers n
+
+import math
+
+
+def exactPrimeFactorCount(n):
+ """
+ >>> exactPrimeFactorCount(51242183)
+ 3
+ """
+ count = 0
+ if n % 2 == 0:
+ count += 1
+ while n % 2 == 0:
+ n = int(n / 2)
+ # the n input value must be odd so that
+ # we can skip one element (ie i += 2)
+
+ i = 3
+
+ while i <= int(math.sqrt(n)):
+ if n % i == 0:
+ count += 1
+ while n % i == 0:
+ n = int(n / i)
+ i = i + 2
+
+ # this condition checks the prime
+ # number n is greater than 2
+
+ if n > 2:
+ count += 1
+ return count
+
+
+if __name__ == "__main__":
+ n = 51242183
+ print(f"The number of distinct prime factors is/are {exactPrimeFactorCount(n)}")
+ print("The value of log(log(n)) is {:.4f}".format(math.log(math.log(n))))
+
+ """
+ The number of distinct prime factors is/are 3
+ The value of log(log(n)) is 2.8765
+ """
diff --git a/maths/images/__init__.py b/maths/images/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/maths/images/gaussian.png b/maths/images/gaussian.png
new file mode 100644
index 000000000000..eb007c7e21b2
Binary files /dev/null and b/maths/images/gaussian.png differ
diff --git a/maths/is_square_free.py b/maths/is_square_free.py
new file mode 100644
index 000000000000..8d83d95ffb67
--- /dev/null
+++ b/maths/is_square_free.py
@@ -0,0 +1,39 @@
+"""
+References: wikipedia:square free number
+python/black : True
+flake8 : True
+"""
+from __future__ import annotations
+
+
+def is_square_free(factors: list[int]) -> bool:
+ """
+ # doctest: +NORMALIZE_WHITESPACE
+ This functions takes a list of prime factors as input.
+ returns True if the factors are square free.
+ >>> is_square_free([1, 1, 2, 3, 4])
+ False
+
+ These are wrong but should return some value
+ it simply checks for repition in the numbers.
+ >>> is_square_free([1, 3, 4, 'sd', 0.0])
+ True
+
+ >>> is_square_free([1, 0.5, 2, 0.0])
+ True
+ >>> is_square_free([1, 2, 2, 5])
+ False
+ >>> is_square_free('asd')
+ True
+ >>> is_square_free(24)
+ Traceback (most recent call last):
+ ...
+ TypeError: 'int' object is not iterable
+ """
+ return len(set(factors)) == len(factors)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/jaccard_similarity.py b/maths/jaccard_similarity.py
new file mode 100644
index 000000000000..4f24d308f340
--- /dev/null
+++ b/maths/jaccard_similarity.py
@@ -0,0 +1,80 @@
+"""
+The Jaccard similarity coefficient is a commonly used indicator of the
+similarity between two sets. Let U be a set and A and B be subsets of U,
+then the Jaccard index/similarity is defined to be the ratio of the number
+of elements of their intersection and the number of elements of their union.
+
+Inspired from Wikipedia and
+the book Mining of Massive Datasets [MMDS 2nd Edition, Chapter 3]
+
+https://en.wikipedia.org/wiki/Jaccard_index
+https://mmds.org
+
+Jaccard similarity is widely used with MinHashing.
+"""
+
+
+def jaccard_similariy(setA, setB, alternativeUnion=False):
+ """
+ Finds the jaccard similarity between two sets.
+ Essentially, its intersection over union.
+
+ The alternative way to calculate this is to take union as sum of the
+ number of items in the two sets. This will lead to jaccard similarity
+ of a set with itself be 1/2 instead of 1. [MMDS 2nd Edition, Page 77]
+
+ Parameters:
+ :setA (set,list,tuple): A non-empty set/list
+ :setB (set,list,tuple): A non-empty set/list
+ :alternativeUnion (boolean): If True, use sum of number of
+ items as union
+
+ Output:
+ (float) The jaccard similarity between the two sets.
+
+ Examples:
+ >>> setA = {'a', 'b', 'c', 'd', 'e'}
+ >>> setB = {'c', 'd', 'e', 'f', 'h', 'i'}
+ >>> jaccard_similariy(setA,setB)
+ 0.375
+
+ >>> jaccard_similariy(setA,setA)
+ 1.0
+
+ >>> jaccard_similariy(setA,setA,True)
+ 0.5
+
+ >>> setA = ['a', 'b', 'c', 'd', 'e']
+ >>> setB = ('c', 'd', 'e', 'f', 'h', 'i')
+ >>> jaccard_similariy(setA,setB)
+ 0.375
+ """
+
+ if isinstance(setA, set) and isinstance(setB, set):
+
+ intersection = len(setA.intersection(setB))
+
+ if alternativeUnion:
+ union = len(setA) + len(setB)
+ else:
+ union = len(setA.union(setB))
+
+ return intersection / union
+
+ if isinstance(setA, (list, tuple)) and isinstance(setB, (list, tuple)):
+
+ intersection = [element for element in setA if element in setB]
+
+ if alternativeUnion:
+ union = len(setA) + len(setB)
+ else:
+ union = setA + [element for element in setB if element not in setA]
+
+ return len(intersection) / len(union)
+
+
+if __name__ == "__main__":
+
+ setA = {"a", "b", "c", "d", "e"}
+ setB = {"c", "d", "e", "f", "h", "i"}
+ print(jaccard_similariy(setA, setB))
diff --git a/maths/kadanes.py b/maths/kadanes.py
new file mode 100644
index 000000000000..d239d4a2589b
--- /dev/null
+++ b/maths/kadanes.py
@@ -0,0 +1,65 @@
+"""
+Kadane's algorithm to get maximum subarray sum
+https://medium.com/@rsinghal757/kadanes-algorithm-dynamic-programming-how-and-why-does-it-work-3fd8849ed73d
+https://en.wikipedia.org/wiki/Maximum_subarray_problem
+"""
+test_data: tuple = ([-2, -8, -9], [2, 8, 9], [-1, 0, 1], [0, 0], [])
+
+
+def negative_exist(arr: list) -> int:
+ """
+ >>> negative_exist([-2,-8,-9])
+ -2
+ >>> [negative_exist(arr) for arr in test_data]
+ [-2, 0, 0, 0, 0]
+ """
+ arr = arr or [0]
+ max = arr[0]
+ for i in arr:
+ if i >= 0:
+ return 0
+ elif max <= i:
+ max = i
+ return max
+
+
+def kadanes(arr: list) -> int:
+ """
+ If negative_exist() returns 0 than this function will execute
+ else it will return the value return by negative_exist function
+
+ For example: arr = [2, 3, -9, 8, -2]
+ Initially we set value of max_sum to 0 and max_till_element to 0 than when
+ max_sum is less than max_till particular element it will assign that value to
+ max_sum and when value of max_till_sum is less than 0 it will assign 0 to i
+ and after that whole process, return the max_sum
+ So the output for above arr is 8
+
+ >>> kadanes([2, 3, -9, 8, -2])
+ 8
+ >>> [kadanes(arr) for arr in test_data]
+ [-2, 19, 1, 0, 0]
+ """
+ max_sum = negative_exist(arr)
+ if max_sum < 0:
+ return max_sum
+
+ max_sum = 0
+ max_till_element = 0
+
+ for i in arr:
+ max_till_element += i
+ if max_sum <= max_till_element:
+ max_sum = max_till_element
+ if max_till_element < 0:
+ max_till_element = 0
+ return max_sum
+
+
+if __name__ == "__main__":
+ try:
+ print("Enter integer values sepatated by spaces")
+ arr = [int(x) for x in input().split()]
+ print(f"Maximum subarray sum of {arr} is {kadanes(arr)}")
+ except ValueError:
+ print("Please enter integer values.")
diff --git a/maths/karatsuba.py b/maths/karatsuba.py
new file mode 100644
index 000000000000..df29c77a5cf2
--- /dev/null
+++ b/maths/karatsuba.py
@@ -0,0 +1,32 @@
+""" Multiply two numbers using Karatsuba algorithm """
+
+
+def karatsuba(a, b):
+ """
+ >>> karatsuba(15463, 23489) == 15463 * 23489
+ True
+ >>> karatsuba(3, 9) == 3 * 9
+ True
+ """
+ if len(str(a)) == 1 or len(str(b)) == 1:
+ return a * b
+ else:
+ m1 = max(len(str(a)), len(str(b)))
+ m2 = m1 // 2
+
+ a1, a2 = divmod(a, 10 ** m2)
+ b1, b2 = divmod(b, 10 ** m2)
+
+ x = karatsuba(a2, b2)
+ y = karatsuba((a1 + a2), (b1 + b2))
+ z = karatsuba(a1, b1)
+
+ return (z * 10 ** (2 * m2)) + ((y - z - x) * 10 ** (m2)) + (x)
+
+
+def main():
+ print(karatsuba(15463, 23489))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/maths/krishnamurthy_number.py b/maths/krishnamurthy_number.py
new file mode 100644
index 000000000000..c88f68a07f27
--- /dev/null
+++ b/maths/krishnamurthy_number.py
@@ -0,0 +1,49 @@
+"""
+ == Krishnamurthy Number ==
+It is also known as Peterson Number
+A Krishnamurthy Number is a number whose sum of the
+factorial of the digits equals to the original
+number itself.
+
+For example: 145 = 1! + 4! + 5!
+ So, 145 is a Krishnamurthy Number
+"""
+
+
+def factorial(digit: int) -> int:
+ """
+ >>> factorial(3)
+ 6
+ >>> factorial(0)
+ 1
+ >>> factorial(5)
+ 120
+ """
+
+ return 1 if digit in (0, 1) else (digit * factorial(digit - 1))
+
+
+def krishnamurthy(number: int) -> bool:
+ """
+ >>> krishnamurthy(145)
+ True
+ >>> krishnamurthy(240)
+ False
+ >>> krishnamurthy(1)
+ True
+ """
+
+ factSum = 0
+ duplicate = number
+ while duplicate > 0:
+ duplicate, digit = divmod(duplicate, 10)
+ factSum += factorial(digit)
+ return factSum == number
+
+
+if __name__ == "__main__":
+ print("Program to check whether a number is a Krisnamurthy Number or not.")
+ number = int(input("Enter number: ").strip())
+ print(
+ f"{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."
+ )
diff --git a/maths/kth_lexicographic_permutation.py b/maths/kth_lexicographic_permutation.py
new file mode 100644
index 000000000000..23eab626fbf8
--- /dev/null
+++ b/maths/kth_lexicographic_permutation.py
@@ -0,0 +1,40 @@
+def kthPermutation(k, n):
+ """
+ Finds k'th lexicographic permutation (in increasing order) of
+ 0,1,2,...n-1 in O(n^2) time.
+
+ Examples:
+ First permutation is always 0,1,2,...n
+ >>> kthPermutation(0,5)
+ [0, 1, 2, 3, 4]
+
+ The order of permutation of 0,1,2,3 is [0,1,2,3], [0,1,3,2], [0,2,1,3],
+ [0,2,3,1], [0,3,1,2], [0,3,2,1], [1,0,2,3], [1,0,3,2], [1,2,0,3],
+ [1,2,3,0], [1,3,0,2]
+ >>> kthPermutation(10,4)
+ [1, 3, 0, 2]
+ """
+ # Factorails from 1! to (n-1)!
+ factorials = [1]
+ for i in range(2, n):
+ factorials.append(factorials[-1] * i)
+ assert 0 <= k < factorials[-1] * n, "k out of bounds"
+
+ permutation = []
+ elements = list(range(n))
+
+ # Find permutation
+ while factorials:
+ factorial = factorials.pop()
+ number, k = divmod(k, factorial)
+ permutation.append(elements[number])
+ elements.remove(elements[number])
+ permutation.append(elements[0])
+
+ return permutation
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/largest_of_very_large_numbers.py b/maths/largest_of_very_large_numbers.py
new file mode 100644
index 000000000000..d2dc0af18126
--- /dev/null
+++ b/maths/largest_of_very_large_numbers.py
@@ -0,0 +1,35 @@
+# Author: Abhijeeth S
+
+import math
+
+
+def res(x, y):
+ if 0 not in (x, y):
+ # We use the relation x^y = y*log10(x), where 10 is the base.
+ return y * math.log10(x)
+ else:
+ if x == 0: # 0 raised to any number is 0
+ return 0
+ elif y == 0:
+ return 1 # any number raised to 0 is 1
+
+
+if __name__ == "__main__": # Main function
+ # Read two numbers from input and typecast them to int using map function.
+ # Here x is the base and y is the power.
+ prompt = "Enter the base and the power separated by a comma: "
+ x1, y1 = map(int, input(prompt).split(","))
+ x2, y2 = map(int, input(prompt).split(","))
+
+ # We find the log of each number, using the function res(), which takes two
+ # arguments.
+ res1 = res(x1, y1)
+ res2 = res(x2, y2)
+
+ # We check for the largest number
+ if res1 > res2:
+ print("Largest number is", x1, "^", y1)
+ elif res2 > res1:
+ print("Largest number is", x2, "^", y2)
+ else:
+ print("Both are equal")
diff --git a/maths/least_common_multiple.py b/maths/least_common_multiple.py
new file mode 100644
index 000000000000..0d087643e869
--- /dev/null
+++ b/maths/least_common_multiple.py
@@ -0,0 +1,95 @@
+import unittest
+from timeit import timeit
+
+
+def least_common_multiple_slow(first_num: int, second_num: int) -> int:
+ """
+ Find the least common multiple of two numbers.
+
+ Learn more: https://en.wikipedia.org/wiki/Least_common_multiple
+
+ >>> least_common_multiple_slow(5, 2)
+ 10
+ >>> least_common_multiple_slow(12, 76)
+ 228
+ """
+ max_num = first_num if first_num >= second_num else second_num
+ common_mult = max_num
+ while (common_mult % first_num > 0) or (common_mult % second_num > 0):
+ common_mult += max_num
+ return common_mult
+
+
+def greatest_common_divisor(a: int, b: int) -> int:
+ """
+ Calculate Greatest Common Divisor (GCD).
+ see greatest_common_divisor.py
+ >>> greatest_common_divisor(24, 40)
+ 8
+ >>> greatest_common_divisor(1, 1)
+ 1
+ >>> greatest_common_divisor(1, 800)
+ 1
+ >>> greatest_common_divisor(11, 37)
+ 1
+ >>> greatest_common_divisor(3, 5)
+ 1
+ >>> greatest_common_divisor(16, 4)
+ 4
+ """
+ return b if a == 0 else greatest_common_divisor(b % a, a)
+
+
+def least_common_multiple_fast(first_num: int, second_num: int) -> int:
+ """
+ Find the least common multiple of two numbers.
+ https://en.wikipedia.org/wiki/Least_common_multiple#Using_the_greatest_common_divisor
+ >>> least_common_multiple_fast(5,2)
+ 10
+ >>> least_common_multiple_fast(12,76)
+ 228
+ """
+ return first_num // greatest_common_divisor(first_num, second_num) * second_num
+
+
+def benchmark():
+ setup = (
+ "from __main__ import least_common_multiple_slow, least_common_multiple_fast"
+ )
+ print(
+ "least_common_multiple_slow():",
+ timeit("least_common_multiple_slow(1000, 999)", setup=setup),
+ )
+ print(
+ "least_common_multiple_fast():",
+ timeit("least_common_multiple_fast(1000, 999)", setup=setup),
+ )
+
+
+class TestLeastCommonMultiple(unittest.TestCase):
+
+ test_inputs = [
+ (10, 20),
+ (13, 15),
+ (4, 31),
+ (10, 42),
+ (43, 34),
+ (5, 12),
+ (12, 25),
+ (10, 25),
+ (6, 9),
+ ]
+ expected_results = [20, 195, 124, 210, 1462, 60, 300, 50, 18]
+
+ def test_lcm_function(self):
+ for i, (first_num, second_num) in enumerate(self.test_inputs):
+ slow_result = least_common_multiple_slow(first_num, second_num)
+ fast_result = least_common_multiple_fast(first_num, second_num)
+ with self.subTest(i=i):
+ self.assertEqual(slow_result, self.expected_results[i])
+ self.assertEqual(fast_result, self.expected_results[i])
+
+
+if __name__ == "__main__":
+ benchmark()
+ unittest.main()
diff --git a/maths/line_length.py b/maths/line_length.py
new file mode 100644
index 000000000000..1d386b44b50d
--- /dev/null
+++ b/maths/line_length.py
@@ -0,0 +1,65 @@
+import math
+from typing import Callable, Union
+
+
+def line_length(
+ fnc: Callable[[Union[int, float]], Union[int, float]],
+ x_start: Union[int, float],
+ x_end: Union[int, float],
+ steps: int = 100,
+) -> float:
+
+ """
+ Approximates the arc length of a line segment by treating the curve as a
+ sequence of linear lines and summing their lengths
+ :param fnc: a function which defines a curve
+ :param x_start: left end point to indicate the start of line segment
+ :param x_end: right end point to indicate end of line segment
+ :param steps: an accuracy gauge; more steps increases accuracy
+ :return: a float representing the length of the curve
+
+ >>> def f(x):
+ ... return x
+ >>> f"{line_length(f, 0, 1, 10):.6f}"
+ '1.414214'
+
+ >>> def f(x):
+ ... return 1
+ >>> f"{line_length(f, -5.5, 4.5):.6f}"
+ '10.000000'
+
+ >>> def f(x):
+ ... return math.sin(5 * x) + math.cos(10 * x) + x * x/10
+ >>> f"{line_length(f, 0.0, 10.0, 10000):.6f}"
+ '69.534930'
+ """
+
+ x1 = x_start
+ fx1 = fnc(x_start)
+ length = 0.0
+
+ for i in range(steps):
+
+ # Approximates curve as a sequence of linear lines and sums their length
+ x2 = (x_end - x_start) / steps + x1
+ fx2 = fnc(x2)
+ length += math.hypot(x2 - x1, fx2 - fx1)
+
+ # Increment step
+ x1 = x2
+ fx1 = fx2
+
+ return length
+
+
+if __name__ == "__main__":
+
+ def f(x):
+ return math.sin(10 * x)
+
+ print("f(x) = sin(10 * x)")
+ print("The length of the curve from x = -10 to x = 10 is:")
+ i = 10
+ while i <= 100000:
+ print(f"With {i} steps: {line_length(f, -10, 10, i)}")
+ i *= 10
diff --git a/maths/lucas_lehmer_primality_test.py b/maths/lucas_lehmer_primality_test.py
new file mode 100644
index 000000000000..15e25cbfe996
--- /dev/null
+++ b/maths/lucas_lehmer_primality_test.py
@@ -0,0 +1,41 @@
+"""
+ In mathematics, the Lucas–Lehmer test (LLT) is a primality test for Mersenne
+ numbers. https://en.wikipedia.org/wiki/Lucas%E2%80%93Lehmer_primality_test
+
+ A Mersenne number is a number that is one less than a power of two.
+ That is M_p = 2^p - 1
+ https://en.wikipedia.org/wiki/Mersenne_prime
+
+ The Lucas–Lehmer test is the primality test used by the
+ Great Internet Mersenne Prime Search (GIMPS) to locate large primes.
+"""
+
+
+# Primality test 2^p - 1
+# Return true if 2^p - 1 is prime
+def lucas_lehmer_test(p: int) -> bool:
+ """
+ >>> lucas_lehmer_test(p=7)
+ True
+
+ >>> lucas_lehmer_test(p=11)
+ False
+
+ # M_11 = 2^11 - 1 = 2047 = 23 * 89
+ """
+
+ if p < 2:
+ raise ValueError("p should not be less than 2!")
+ elif p == 2:
+ return True
+
+ s = 4
+ M = (1 << p) - 1
+ for i in range(p - 2):
+ s = ((s * s) - 2) % M
+ return s == 0
+
+
+if __name__ == "__main__":
+ print(lucas_lehmer_test(7))
+ print(lucas_lehmer_test(11))
diff --git a/maths/lucas_series.py b/maths/lucas_series.py
new file mode 100644
index 000000000000..6b32c2022e13
--- /dev/null
+++ b/maths/lucas_series.py
@@ -0,0 +1,66 @@
+"""
+https://en.wikipedia.org/wiki/Lucas_number
+"""
+
+
+def recursive_lucas_number(n_th_number: int) -> int:
+ """
+ Returns the nth lucas number
+ >>> recursive_lucas_number(1)
+ 1
+ >>> recursive_lucas_number(20)
+ 15127
+ >>> recursive_lucas_number(0)
+ 2
+ >>> recursive_lucas_number(25)
+ 167761
+ >>> recursive_lucas_number(-1.5)
+ Traceback (most recent call last):
+ ...
+ TypeError: recursive_lucas_number accepts only integer arguments.
+ """
+ if not isinstance(n_th_number, int):
+ raise TypeError("recursive_lucas_number accepts only integer arguments.")
+ if n_th_number == 0:
+ return 2
+ if n_th_number == 1:
+ return 1
+
+ return recursive_lucas_number(n_th_number - 1) + recursive_lucas_number(
+ n_th_number - 2
+ )
+
+
+def dynamic_lucas_number(n_th_number: int) -> int:
+ """
+ Returns the nth lucas number
+ >>> dynamic_lucas_number(1)
+ 1
+ >>> dynamic_lucas_number(20)
+ 15127
+ >>> dynamic_lucas_number(0)
+ 2
+ >>> dynamic_lucas_number(25)
+ 167761
+ >>> dynamic_lucas_number(-1.5)
+ Traceback (most recent call last):
+ ...
+ TypeError: dynamic_lucas_number accepts only integer arguments.
+ """
+ if not isinstance(n_th_number, int):
+ raise TypeError("dynamic_lucas_number accepts only integer arguments.")
+ a, b = 2, 1
+ for i in range(n_th_number):
+ a, b = b, a + b
+ return a
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
+ n = int(input("Enter the number of terms in lucas series:\n").strip())
+ print("Using recursive function to calculate lucas series:")
+ print(" ".join(str(recursive_lucas_number(i)) for i in range(n)))
+ print("\nUsing dynamic function to calculate lucas series:")
+ print(" ".join(str(dynamic_lucas_number(i)) for i in range(n)))
diff --git a/maths/matrix_exponentiation.py b/maths/matrix_exponentiation.py
new file mode 100644
index 000000000000..033ceb3f28a0
--- /dev/null
+++ b/maths/matrix_exponentiation.py
@@ -0,0 +1,100 @@
+"""Matrix Exponentiation"""
+
+import timeit
+
+"""
+Matrix Exponentiation is a technique to solve linear recurrences in logarithmic time.
+You read more about it here:
+http://zobayer.blogspot.com/2010/11/matrix-exponentiation.html
+https://www.hackerearth.com/practice/notes/matrix-exponentiation-1/
+"""
+
+
+class Matrix:
+ def __init__(self, arg):
+ if isinstance(arg, list): # Initializes a matrix identical to the one provided.
+ self.t = arg
+ self.n = len(arg)
+ else: # Initializes a square matrix of the given size and set values to zero.
+ self.n = arg
+ self.t = [[0 for _ in range(self.n)] for _ in range(self.n)]
+
+ def __mul__(self, b):
+ matrix = Matrix(self.n)
+ for i in range(self.n):
+ for j in range(self.n):
+ for k in range(self.n):
+ matrix.t[i][j] += self.t[i][k] * b.t[k][j]
+ return matrix
+
+
+def modular_exponentiation(a, b):
+ matrix = Matrix([[1, 0], [0, 1]])
+ while b > 0:
+ if b & 1:
+ matrix *= a
+ a *= a
+ b >>= 1
+ return matrix
+
+
+def fibonacci_with_matrix_exponentiation(n, f1, f2):
+ # Trivial Cases
+ if n == 1:
+ return f1
+ elif n == 2:
+ return f2
+ matrix = Matrix([[1, 1], [1, 0]])
+ matrix = modular_exponentiation(matrix, n - 2)
+ return f2 * matrix.t[0][0] + f1 * matrix.t[0][1]
+
+
+def simple_fibonacci(n, f1, f2):
+ # Trivial Cases
+ if n == 1:
+ return f1
+ elif n == 2:
+ return f2
+
+ fn_1 = f1
+ fn_2 = f2
+ n -= 2
+
+ while n > 0:
+ fn_1, fn_2 = fn_1 + fn_2, fn_1
+ n -= 1
+
+ return fn_1
+
+
+def matrix_exponentiation_time():
+ setup = """
+from random import randint
+from __main__ import fibonacci_with_matrix_exponentiation
+"""
+ code = "fibonacci_with_matrix_exponentiation(randint(1,70000), 1, 1)"
+ exec_time = timeit.timeit(setup=setup, stmt=code, number=100)
+ print("With matrix exponentiation the average execution time is ", exec_time / 100)
+ return exec_time
+
+
+def simple_fibonacci_time():
+ setup = """
+from random import randint
+from __main__ import simple_fibonacci
+"""
+ code = "simple_fibonacci(randint(1,70000), 1, 1)"
+ exec_time = timeit.timeit(setup=setup, stmt=code, number=100)
+ print(
+ "Without matrix exponentiation the average execution time is ", exec_time / 100
+ )
+ return exec_time
+
+
+def main():
+ matrix_exponentiation_time()
+ simple_fibonacci_time()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/maths/miller_rabin.py b/maths/miller_rabin.py
new file mode 100644
index 000000000000..fe992027190b
--- /dev/null
+++ b/maths/miller_rabin.py
@@ -0,0 +1,50 @@
+import random
+
+from .binary_exp_mod import bin_exp_mod
+
+
+# This is a probabilistic check to test primality, useful for big numbers!
+# if it's a prime, it will return true
+# if it's not a prime, the chance of it returning true is at most 1/4**prec
+def is_prime(n, prec=1000):
+ """
+ >>> from .prime_check import prime_check
+ >>> all(is_prime(i) == prime_check(i) for i in range(1000))
+ True
+ """
+ if n < 2:
+ return False
+
+ if n % 2 == 0:
+ return n == 2
+
+ # this means n is odd
+ d = n - 1
+ exp = 0
+ while d % 2 == 0:
+ d /= 2
+ exp += 1
+
+ # n - 1=d*(2**exp)
+ count = 0
+ while count < prec:
+ a = random.randint(2, n - 1)
+ b = bin_exp_mod(a, d, n)
+ if b != 1:
+ flag = True
+ for i in range(exp):
+ if b == n - 1:
+ flag = False
+ break
+ b = b * b
+ b %= n
+ if flag:
+ return False
+ count += 1
+ return True
+
+
+if __name__ == "__main__":
+ n = abs(int(input("Enter bound : ").strip()))
+ print("Here's the list of primes:")
+ print(", ".join(str(i) for i in range(n + 1) if is_prime(i)))
diff --git a/maths/mobius_function.py b/maths/mobius_function.py
new file mode 100644
index 000000000000..4fcf35f21813
--- /dev/null
+++ b/maths/mobius_function.py
@@ -0,0 +1,43 @@
+"""
+References: https://en.wikipedia.org/wiki/M%C3%B6bius_function
+References: wikipedia:square free number
+python/black : True
+flake8 : True
+"""
+
+from maths.is_square_free import is_square_free
+from maths.prime_factors import prime_factors
+
+
+def mobius(n: int) -> int:
+ """
+ Mobius function
+ >>> mobius(24)
+ 0
+ >>> mobius(-1)
+ 1
+ >>> mobius('asd')
+ Traceback (most recent call last):
+ ...
+ TypeError: '<=' not supported between instances of 'int' and 'str'
+ >>> mobius(10**400)
+ 0
+ >>> mobius(10**-400)
+ 1
+ >>> mobius(-1424)
+ 1
+ >>> mobius([1, '2', 2.0])
+ Traceback (most recent call last):
+ ...
+ TypeError: '<=' not supported between instances of 'int' and 'list'
+ """
+ factors = prime_factors(n)
+ if is_square_free(factors):
+ return -1 if len(factors) % 2 else 1
+ return 0
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/modular_exponential.py b/maths/modular_exponential.py
index b3f4c00bd5d8..42987dbf3a24 100644
--- a/maths/modular_exponential.py
+++ b/maths/modular_exponential.py
@@ -1,20 +1,45 @@
-def modularExponential(base, power, mod):
- if power < 0:
- return -1
- base %= mod
- result = 1
+"""
+ Modular Exponential.
+ Modular exponentiation is a type of exponentiation performed over a modulus.
+ For more explanation, please check
+ https://en.wikipedia.org/wiki/Modular_exponentiation
+"""
- while power > 0:
- if power & 1:
- result = (result * base) % mod
- power = power >> 1
- base = (base * base) % mod
- return result
+"""Calculate Modular Exponential."""
+
+
+def modular_exponential(base: int, power: int, mod: int):
+ """
+ >>> modular_exponential(5, 0, 10)
+ 1
+ >>> modular_exponential(2, 8, 7)
+ 4
+ >>> modular_exponential(3, -2, 9)
+ -1
+ """
+
+ if power < 0:
+ return -1
+ base %= mod
+ result = 1
+
+ while power > 0:
+ if power & 1:
+ result = (result * base) % mod
+ power = power >> 1
+ base = (base * base) % mod
+
+ return result
def main():
- print(modularExponential(3, 200, 13))
+ """Call Modular Exponential Function."""
+ print(modular_exponential(3, 200, 13))
+
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
-if __name__ == '__main__':
- main()
+ main()
diff --git a/maths/monte_carlo.py b/maths/monte_carlo.py
new file mode 100644
index 000000000000..28027cbe4178
--- /dev/null
+++ b/maths/monte_carlo.py
@@ -0,0 +1,129 @@
+"""
+@author: MatteoRaso
+"""
+from math import pi, sqrt
+from random import uniform
+from statistics import mean
+from typing import Callable
+
+
+def pi_estimator(iterations: int):
+ """
+ An implementation of the Monte Carlo method used to find pi.
+ 1. Draw a 2x2 square centred at (0,0).
+ 2. Inscribe a circle within the square.
+ 3. For each iteration, place a dot anywhere in the square.
+ a. Record the number of dots within the circle.
+ 4. After all the dots are placed, divide the dots in the circle by the total.
+ 5. Multiply this value by 4 to get your estimate of pi.
+ 6. Print the estimated and numpy value of pi
+ """
+ # A local function to see if a dot lands in the circle.
+ def is_in_circle(x: float, y: float) -> bool:
+ distance_from_centre = sqrt((x ** 2) + (y ** 2))
+ # Our circle has a radius of 1, so a distance
+ # greater than 1 would land outside the circle.
+ return distance_from_centre <= 1
+
+ # The proportion of guesses that landed in the circle
+ proportion = mean(
+ int(is_in_circle(uniform(-1.0, 1.0), uniform(-1.0, 1.0)))
+ for _ in range(iterations)
+ )
+ # The ratio of the area for circle to square is pi/4.
+ pi_estimate = proportion * 4
+ print(f"The estimated value of pi is {pi_estimate}")
+ print(f"The numpy value of pi is {pi}")
+ print(f"The total error is {abs(pi - pi_estimate)}")
+
+
+def area_under_curve_estimator(
+ iterations: int,
+ function_to_integrate: Callable[[float], float],
+ min_value: float = 0.0,
+ max_value: float = 1.0,
+) -> float:
+ """
+ An implementation of the Monte Carlo method to find area under
+ a single variable non-negative real-valued continuous function,
+ say f(x), where x lies within a continuous bounded interval,
+ say [min_value, max_value], where min_value and max_value are
+ finite numbers
+ 1. Let x be a uniformly distributed random variable between min_value to
+ max_value
+ 2. Expected value of f(x) =
+ (integrate f(x) from min_value to max_value)/(max_value - min_value)
+ 3. Finding expected value of f(x):
+ a. Repeatedly draw x from uniform distribution
+ b. Evaluate f(x) at each of the drawn x values
+ c. Expected value = average of the function evaluations
+ 4. Estimated value of integral = Expected value * (max_value - min_value)
+ 5. Returns estimated value
+ """
+
+ return mean(
+ function_to_integrate(uniform(min_value, max_value)) for _ in range(iterations)
+ ) * (max_value - min_value)
+
+
+def area_under_line_estimator_check(
+ iterations: int, min_value: float = 0.0, max_value: float = 1.0
+) -> None:
+ """
+ Checks estimation error for area_under_curve_estimator function
+ for f(x) = x where x lies within min_value to max_value
+ 1. Calls "area_under_curve_estimator" function
+ 2. Compares with the expected value
+ 3. Prints estimated, expected and error value
+ """
+
+ def identity_function(x: float) -> float:
+ """
+ Represents identity function
+ >>> [function_to_integrate(x) for x in [-2.0, -1.0, 0.0, 1.0, 2.0]]
+ [-2.0, -1.0, 0.0, 1.0, 2.0]
+ """
+ return x
+
+ estimated_value = area_under_curve_estimator(
+ iterations, identity_function, min_value, max_value
+ )
+ expected_value = (max_value * max_value - min_value * min_value) / 2
+
+ print("******************")
+ print(f"Estimating area under y=x where x varies from {min_value} to {max_value}")
+ print(f"Estimated value is {estimated_value}")
+ print(f"Expected value is {expected_value}")
+ print(f"Total error is {abs(estimated_value - expected_value)}")
+ print("******************")
+
+
+def pi_estimator_using_area_under_curve(iterations: int) -> None:
+ """
+ Area under curve y = sqrt(4 - x^2) where x lies in 0 to 2 is equal to pi
+ """
+
+ def function_to_integrate(x: float) -> float:
+ """
+ Represents semi-circle with radius 2
+ >>> [function_to_integrate(x) for x in [-2.0, 0.0, 2.0]]
+ [0.0, 2.0, 0.0]
+ """
+ return sqrt(4.0 - x * x)
+
+ estimated_value = area_under_curve_estimator(
+ iterations, function_to_integrate, 0.0, 2.0
+ )
+
+ print("******************")
+ print("Estimating pi using area_under_curve_estimator")
+ print(f"Estimated value is {estimated_value}")
+ print(f"Expected value is {pi}")
+ print(f"Total error is {abs(estimated_value - pi)}")
+ print("******************")
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/monte_carlo_dice.py b/maths/monte_carlo_dice.py
new file mode 100644
index 000000000000..e8e3abe83a99
--- /dev/null
+++ b/maths/monte_carlo_dice.py
@@ -0,0 +1,47 @@
+from __future__ import annotations
+
+import random
+
+
+class Dice:
+ NUM_SIDES = 6
+
+ def __init__(self):
+ """ Initialize a six sided dice """
+ self.sides = list(range(1, Dice.NUM_SIDES + 1))
+
+ def roll(self):
+ return random.choice(self.sides)
+
+ def _str_(self):
+ return "Fair Dice"
+
+
+def throw_dice(num_throws: int, num_dice: int = 2) -> list[float]:
+ """
+ Return probability list of all possible sums when throwing dice.
+
+ >>> random.seed(0)
+ >>> throw_dice(10, 1)
+ [10.0, 0.0, 30.0, 50.0, 10.0, 0.0]
+ >>> throw_dice(100, 1)
+ [19.0, 17.0, 17.0, 11.0, 23.0, 13.0]
+ >>> throw_dice(1000, 1)
+ [18.8, 15.5, 16.3, 17.6, 14.2, 17.6]
+ >>> throw_dice(10000, 1)
+ [16.35, 16.89, 16.93, 16.6, 16.52, 16.71]
+ >>> throw_dice(10000, 2)
+ [2.74, 5.6, 7.99, 11.26, 13.92, 16.7, 14.44, 10.63, 8.05, 5.92, 2.75]
+ """
+ dices = [Dice() for i in range(num_dice)]
+ count_of_sum = [0] * (len(dices) * Dice.NUM_SIDES + 1)
+ for i in range(num_throws):
+ count_of_sum[sum(dice.roll() for dice in dices)] += 1
+ probability = [round((count * 100) / num_throws, 2) for count in count_of_sum]
+ return probability[num_dice:] # remove probability of sums that never appear
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/newton_raphson.py b/maths/newton_raphson.py
index c08bcedc9a4d..f2b7cb9766d2 100644
--- a/maths/newton_raphson.py
+++ b/maths/newton_raphson.py
@@ -1,4 +1,4 @@
-'''
+"""
Author: P Shreyas Shetty
Implementation of Newton-Raphson method for solving equations of kind
f(x) = 0. It is an iterative method where solution is found by the expression
@@ -6,45 +6,49 @@
If no solution exists, then either the solution will not be found when iteration
limit is reached or the gradient f'(x[n]) approaches zero. In both cases, exception
is raised. If iteration limit is reached, try increasing maxiter.
- '''
-
+ """
import math as m
+
def calc_derivative(f, a, h=0.001):
- '''
- Calculates derivative at point a for function f using finite difference
- method
- '''
- return (f(a+h)-f(a-h))/(2*h)
-
-def newton_raphson(f, x0=0, maxiter=100, step=0.0001, maxerror=1e-6,logsteps=False):
-
- a = x0 #set the initial guess
+ """
+ Calculates derivative at point a for function f using finite difference
+ method
+ """
+ return (f(a + h) - f(a - h)) / (2 * h)
+
+
+def newton_raphson(f, x0=0, maxiter=100, step=0.0001, maxerror=1e-6, logsteps=False):
+
+ a = x0 # set the initial guess
steps = [a]
error = abs(f(a))
- f1 = lambda x:calc_derivative(f, x, h=step) #Derivative of f(x)
+ f1 = lambda x: calc_derivative(f, x, h=step) # noqa: E731 Derivative of f(x)
for _ in range(maxiter):
if f1(a) == 0:
raise ValueError("No converging solution found")
- a = a - f(a)/f1(a) #Calculate the next estimate
+ a = a - f(a) / f1(a) # Calculate the next estimate
if logsteps:
steps.append(a)
- error = abs(f(a))
if error < maxerror:
break
else:
- raise ValueError("Itheration limit reached, no converging solution found")
+ raise ValueError("Iteration limit reached, no converging solution found")
if logsteps:
- #If logstep is true, then log intermediate steps
+ # If logstep is true, then log intermediate steps
return a, error, steps
return a, error
-
-if __name__ == '__main__':
- import matplotlib.pyplot as plt
- f = lambda x:m.tanh(x)**2-m.exp(3*x)
- solution, error, steps = newton_raphson(f, x0=10, maxiter=1000, step=1e-6, logsteps=True)
+
+
+if __name__ == "__main__":
+ from matplotlib import pyplot as plt
+
+ f = lambda x: m.tanh(x) ** 2 - m.exp(3 * x) # noqa: E731
+ solution, error, steps = newton_raphson(
+ f, x0=10, maxiter=1000, step=1e-6, logsteps=True
+ )
plt.plot([abs(f(x)) for x in steps])
plt.xlabel("step")
plt.ylabel("error")
plt.show()
- print("solution = {%f}, error = {%f}" % (solution, error))
\ No newline at end of file
+ print(f"solution = {{{solution:f}}}, error = {{{error:f}}}")
diff --git a/maths/number_of_digits.py b/maths/number_of_digits.py
new file mode 100644
index 000000000000..3c0eb7b3863f
--- /dev/null
+++ b/maths/number_of_digits.py
@@ -0,0 +1,159 @@
+import math
+from timeit import timeit
+
+
+def num_digits(n: int) -> int:
+ """
+ Find the number of digits in a number.
+
+ >>> num_digits(12345)
+ 5
+ >>> num_digits(123)
+ 3
+ >>> num_digits(0)
+ 1
+ >>> num_digits(-1)
+ 1
+ >>> num_digits(-123456)
+ 6
+ """
+ digits = 0
+ n = abs(n)
+ while True:
+ n = n // 10
+ digits += 1
+ if n == 0:
+ break
+ return digits
+
+
+def num_digits_fast(n: int) -> int:
+ """
+ Find the number of digits in a number.
+ abs() is used as logarithm for negative numbers is not defined.
+
+ >>> num_digits_fast(12345)
+ 5
+ >>> num_digits_fast(123)
+ 3
+ >>> num_digits_fast(0)
+ 1
+ >>> num_digits_fast(-1)
+ 1
+ >>> num_digits_fast(-123456)
+ 6
+ """
+ return 1 if n == 0 else math.floor(math.log(abs(n), 10) + 1)
+
+
+def num_digits_faster(n: int) -> int:
+ """
+ Find the number of digits in a number.
+ abs() is used for negative numbers
+
+ >>> num_digits_faster(12345)
+ 5
+ >>> num_digits_faster(123)
+ 3
+ >>> num_digits_faster(0)
+ 1
+ >>> num_digits_faster(-1)
+ 1
+ >>> num_digits_faster(-123456)
+ 6
+ """
+ return len(str(abs(n)))
+
+
+def benchmark() -> None:
+ """
+ Benchmark code for comparing 3 functions,
+ with 3 different length int values.
+ """
+ print("\nFor small_num = ", small_num, ":")
+ print(
+ "> num_digits()",
+ "\t\tans =",
+ num_digits(small_num),
+ "\ttime =",
+ timeit("z.num_digits(z.small_num)", setup="import __main__ as z"),
+ "seconds",
+ )
+ print(
+ "> num_digits_fast()",
+ "\tans =",
+ num_digits_fast(small_num),
+ "\ttime =",
+ timeit("z.num_digits_fast(z.small_num)", setup="import __main__ as z"),
+ "seconds",
+ )
+ print(
+ "> num_digits_faster()",
+ "\tans =",
+ num_digits_faster(small_num),
+ "\ttime =",
+ timeit("z.num_digits_faster(z.small_num)", setup="import __main__ as z"),
+ "seconds",
+ )
+
+ print("\nFor medium_num = ", medium_num, ":")
+ print(
+ "> num_digits()",
+ "\t\tans =",
+ num_digits(medium_num),
+ "\ttime =",
+ timeit("z.num_digits(z.medium_num)", setup="import __main__ as z"),
+ "seconds",
+ )
+ print(
+ "> num_digits_fast()",
+ "\tans =",
+ num_digits_fast(medium_num),
+ "\ttime =",
+ timeit("z.num_digits_fast(z.medium_num)", setup="import __main__ as z"),
+ "seconds",
+ )
+ print(
+ "> num_digits_faster()",
+ "\tans =",
+ num_digits_faster(medium_num),
+ "\ttime =",
+ timeit("z.num_digits_faster(z.medium_num)", setup="import __main__ as z"),
+ "seconds",
+ )
+
+ print("\nFor large_num = ", large_num, ":")
+ print(
+ "> num_digits()",
+ "\t\tans =",
+ num_digits(large_num),
+ "\ttime =",
+ timeit("z.num_digits(z.large_num)", setup="import __main__ as z"),
+ "seconds",
+ )
+ print(
+ "> num_digits_fast()",
+ "\tans =",
+ num_digits_fast(large_num),
+ "\ttime =",
+ timeit("z.num_digits_fast(z.large_num)", setup="import __main__ as z"),
+ "seconds",
+ )
+ print(
+ "> num_digits_faster()",
+ "\tans =",
+ num_digits_faster(large_num),
+ "\ttime =",
+ timeit("z.num_digits_faster(z.large_num)", setup="import __main__ as z"),
+ "seconds",
+ )
+
+
+if __name__ == "__main__":
+ small_num = 262144
+ medium_num = 1125899906842624
+ large_num = 1267650600228229401496703205376
+ benchmark()
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/numerical_integration.py b/maths/numerical_integration.py
new file mode 100644
index 000000000000..87184a76b740
--- /dev/null
+++ b/maths/numerical_integration.py
@@ -0,0 +1,66 @@
+"""
+Approximates the area under the curve using the trapezoidal rule
+"""
+
+from typing import Callable, Union
+
+
+def trapezoidal_area(
+ fnc: Callable[[Union[int, float]], Union[int, float]],
+ x_start: Union[int, float],
+ x_end: Union[int, float],
+ steps: int = 100,
+) -> float:
+
+ """
+ Treats curve as a collection of linear lines and sums the area of the
+ trapezium shape they form
+ :param fnc: a function which defines a curve
+ :param x_start: left end point to indicate the start of line segment
+ :param x_end: right end point to indicate end of line segment
+ :param steps: an accuracy gauge; more steps increases the accuracy
+ :return: a float representing the length of the curve
+
+ >>> def f(x):
+ ... return 5
+ >>> '%.3f' % trapezoidal_area(f, 12.0, 14.0, 1000)
+ '10.000'
+
+ >>> def f(x):
+ ... return 9*x**2
+ >>> '%.4f' % trapezoidal_area(f, -4.0, 0, 10000)
+ '192.0000'
+
+ >>> '%.4f' % trapezoidal_area(f, -4.0, 4.0, 10000)
+ '384.0000'
+ """
+ x1 = x_start
+ fx1 = fnc(x_start)
+ area = 0.0
+
+ for i in range(steps):
+
+ # Approximates small segments of curve as linear and solve
+ # for trapezoidal area
+ x2 = (x_end - x_start) / steps + x1
+ fx2 = fnc(x2)
+ area += abs(fx2 + fx1) * (x2 - x1) / 2
+
+ # Increment step
+ x1 = x2
+ fx1 = fx2
+ return area
+
+
+if __name__ == "__main__":
+
+ def f(x):
+ return x ** 3
+
+ print("f(x) = x^3")
+ print("The area between the curve, x = -10, x = 10 and the x axis is:")
+ i = 10
+ while i <= 100000:
+ area = trapezoidal_area(f, -5, 5, i)
+ print(f"with {i} steps: {area}")
+ i *= 10
diff --git a/maths/perfect_cube.py b/maths/perfect_cube.py
new file mode 100644
index 000000000000..9ad287e41e75
--- /dev/null
+++ b/maths/perfect_cube.py
@@ -0,0 +1,16 @@
+def perfect_cube(n: int) -> bool:
+ """
+ Check if a number is a perfect cube or not.
+
+ >>> perfect_cube(27)
+ True
+ >>> perfect_cube(4)
+ False
+ """
+ val = n ** (1 / 3)
+ return (val * val * val) == n
+
+
+if __name__ == "__main__":
+ print(perfect_cube(27))
+ print(perfect_cube(4))
diff --git a/maths/perfect_number.py b/maths/perfect_number.py
new file mode 100644
index 000000000000..148e988fb4c5
--- /dev/null
+++ b/maths/perfect_number.py
@@ -0,0 +1,34 @@
+"""
+== Perfect Number ==
+In number theory, a perfect number is a positive integer that is equal to the sum of
+its positive divisors, excluding the number itself.
+For example: 6 ==> divisors[1, 2, 3, 6]
+ Excluding 6, the sum(divisors) is 1 + 2 + 3 = 6
+ So, 6 is a Perfect Number
+
+Other examples of Perfect Numbers: 28, 486, ...
+
+https://en.wikipedia.org/wiki/Perfect_number
+"""
+
+
+def perfect(number: int) -> bool:
+ """
+ >>> perfect(27)
+ False
+ >>> perfect(28)
+ True
+ >>> perfect(29)
+ False
+
+ Start from 1 because dividing by 0 will raise ZeroDivisionError.
+ A number at most can be divisible by the half of the number except the number
+ itself. For example, 6 is at most can be divisible by 3 except by 6 itself.
+ """
+ return sum(i for i in range(1, number // 2 + 1) if number % i == 0) == number
+
+
+if __name__ == "__main__":
+ print("Program to check whether a number is a Perfect number or not...")
+ number = int(input("Enter number: ").strip())
+ print(f"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
diff --git a/maths/perfect_square.py b/maths/perfect_square.py
new file mode 100644
index 000000000000..4393dcfbc774
--- /dev/null
+++ b/maths/perfect_square.py
@@ -0,0 +1,73 @@
+import math
+
+
+def perfect_square(num: int) -> bool:
+ """
+ Check if a number is perfect square number or not
+ :param num: the number to be checked
+ :return: True if number is square number, otherwise False
+
+ >>> perfect_square(9)
+ True
+ >>> perfect_square(16)
+ True
+ >>> perfect_square(1)
+ True
+ >>> perfect_square(0)
+ True
+ >>> perfect_square(10)
+ False
+ """
+ return math.sqrt(num) * math.sqrt(num) == num
+
+
+def perfect_square_binary_search(n: int) -> bool:
+ """
+ Check if a number is perfect square using binary search.
+ Time complexity : O(Log(n))
+ Space complexity: O(1)
+
+ >>> perfect_square_binary_search(9)
+ True
+ >>> perfect_square_binary_search(16)
+ True
+ >>> perfect_square_binary_search(1)
+ True
+ >>> perfect_square_binary_search(0)
+ True
+ >>> perfect_square_binary_search(10)
+ False
+ >>> perfect_square_binary_search(-1)
+ False
+ >>> perfect_square_binary_search(1.1)
+ False
+ >>> perfect_square_binary_search("a")
+ Traceback (most recent call last):
+ ...
+ TypeError: '<=' not supported between instances of 'int' and 'str'
+ >>> perfect_square_binary_search(None)
+ Traceback (most recent call last):
+ ...
+ TypeError: '<=' not supported between instances of 'int' and 'NoneType'
+ >>> perfect_square_binary_search([])
+ Traceback (most recent call last):
+ ...
+ TypeError: '<=' not supported between instances of 'int' and 'list'
+ """
+ left = 0
+ right = n
+ while left <= right:
+ mid = (left + right) // 2
+ if mid ** 2 == n:
+ return True
+ elif mid ** 2 > n:
+ right = mid - 1
+ else:
+ left = mid + 1
+ return False
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/pi_monte_carlo_estimation.py b/maths/pi_monte_carlo_estimation.py
new file mode 100644
index 000000000000..20b46dddc6e5
--- /dev/null
+++ b/maths/pi_monte_carlo_estimation.py
@@ -0,0 +1,67 @@
+import random
+
+
+class Point:
+ def __init__(self, x: float, y: float) -> None:
+ self.x = x
+ self.y = y
+
+ def is_in_unit_circle(self) -> bool:
+ """
+ True, if the point lies in the unit circle
+ False, otherwise
+ """
+ return (self.x ** 2 + self.y ** 2) <= 1
+
+ @classmethod
+ def random_unit_square(cls):
+ """
+ Generates a point randomly drawn from the unit square [0, 1) x [0, 1).
+ """
+ return cls(x=random.random(), y=random.random())
+
+
+def estimate_pi(number_of_simulations: int) -> float:
+ """
+ Generates an estimate of the mathematical constant PI.
+ See https://en.wikipedia.org/wiki/Monte_Carlo_method#Overview
+
+ The estimate is generated by Monte Carlo simulations. Let U be uniformly drawn from
+ the unit square [0, 1) x [0, 1). The probability that U lies in the unit circle is:
+
+ P[U in unit circle] = 1/4 PI
+
+ and therefore
+
+ PI = 4 * P[U in unit circle]
+
+ We can get an estimate of the probability P[U in unit circle].
+ See https://en.wikipedia.org/wiki/Empirical_probability by:
+
+ 1. Draw a point uniformly from the unit square.
+ 2. Repeat the first step n times and count the number of points in the unit
+ circle, which is called m.
+ 3. An estimate of P[U in unit circle] is m/n
+ """
+ if number_of_simulations < 1:
+ raise ValueError("At least one simulation is necessary to estimate PI.")
+
+ number_in_unit_circle = 0
+ for simulation_index in range(number_of_simulations):
+ random_point = Point.random_unit_square()
+
+ if random_point.is_in_unit_circle():
+ number_in_unit_circle += 1
+
+ return 4 * number_in_unit_circle / number_of_simulations
+
+
+if __name__ == "__main__":
+ # import doctest
+
+ # doctest.testmod()
+ from math import pi
+
+ prompt = "Please enter the desired number of Monte Carlo simulations: "
+ my_pi = estimate_pi(int(input(prompt).strip()))
+ print(f"An estimate of PI is {my_pi} with an error of {abs(my_pi - pi)}")
diff --git a/maths/polynomial_evaluation.py b/maths/polynomial_evaluation.py
new file mode 100644
index 000000000000..e929a2d02972
--- /dev/null
+++ b/maths/polynomial_evaluation.py
@@ -0,0 +1,54 @@
+from typing import Sequence
+
+
+def evaluate_poly(poly: Sequence[float], x: float) -> float:
+ """Evaluate a polynomial f(x) at specified point x and return the value.
+
+ Arguments:
+ poly -- the coeffiecients of a polynomial as an iterable in order of
+ ascending degree
+ x -- the point at which to evaluate the polynomial
+
+ >>> evaluate_poly((0.0, 0.0, 5.0, 9.3, 7.0), 10.0)
+ 79800.0
+ """
+ return sum(c * (x ** i) for i, c in enumerate(poly))
+
+
+def horner(poly: Sequence[float], x: float) -> float:
+ """Evaluate a polynomial at specified point using Horner's method.
+
+ In terms of computational complexity, Horner's method is an efficient method
+ of evaluating a polynomial. It avoids the use of expensive exponentiation,
+ and instead uses only multiplication and addition to evaluate the polynomial
+ in O(n), where n is the degree of the polynomial.
+
+ https://en.wikipedia.org/wiki/Horner's_method
+
+ Arguments:
+ poly -- the coeffiecients of a polynomial as an iterable in order of
+ ascending degree
+ x -- the point at which to evaluate the polynomial
+
+ >>> horner((0.0, 0.0, 5.0, 9.3, 7.0), 10.0)
+ 79800.0
+ """
+ result = 0.0
+ for coeff in reversed(poly):
+ result = result * x + coeff
+ return result
+
+
+if __name__ == "__main__":
+ """
+ Example:
+ >>> poly = (0.0, 0.0, 5.0, 9.3, 7.0) # f(x) = 7.0x^4 + 9.3x^3 + 5.0x^2
+ >>> x = -13.0
+ >>> # f(-13) = 7.0(-13)^4 + 9.3(-13)^3 + 5.0(-13)^2 = 180339.9
+ >>> print(evaluate_poly(poly, x))
+ 180339.9
+ """
+ poly = (0.0, 0.0, 5.0, 9.3, 7.0)
+ x = 10.0
+ print(evaluate_poly(poly, x))
+ print(horner(poly, x))
diff --git a/maths/power_using_recursion.py b/maths/power_using_recursion.py
new file mode 100644
index 000000000000..f82097f6d8ec
--- /dev/null
+++ b/maths/power_using_recursion.py
@@ -0,0 +1,36 @@
+"""
+== Raise base to the power of exponent using recursion ==
+ Input -->
+ Enter the base: 3
+ Enter the exponent: 4
+ Output -->
+ 3 to the power of 4 is 81
+ Input -->
+ Enter the base: 2
+ Enter the exponent: 0
+ Output -->
+ 2 to the power of 0 is 1
+"""
+
+
+def power(base: int, exponent: int) -> float:
+ """
+ power(3, 4)
+ 81
+ >>> power(2, 0)
+ 1
+ >>> all(power(base, exponent) == pow(base, exponent)
+ ... for base in range(-10, 10) for exponent in range(10))
+ True
+ """
+ return base * power(base, (exponent - 1)) if exponent else 1
+
+
+if __name__ == "__main__":
+ print("Raise base to the power of exponent using recursion...")
+ base = int(input("Enter the base: ").strip())
+ exponent = int(input("Enter the exponent: ").strip())
+ result = power(base, abs(exponent))
+ if exponent < 0: # power() does not properly deal w/ negative exponents
+ result = 1 / result
+ print(f"{base} to the power of {exponent} is {result}")
diff --git a/maths/prime_check.py b/maths/prime_check.py
new file mode 100644
index 000000000000..e2bcb7b8f151
--- /dev/null
+++ b/maths/prime_check.py
@@ -0,0 +1,58 @@
+"""Prime Check."""
+
+import math
+import unittest
+
+
+def prime_check(number: int) -> bool:
+ """Checks to see if a number is a prime.
+
+ A number is prime if it has exactly two factors: 1 and itself.
+ """
+
+ if 1 < number < 4:
+ # 2 and 3 are primes
+ return True
+ elif number < 2 or not number % 2:
+ # Negatives, 0, 1 and all even numbers are not primes
+ return False
+
+ odd_numbers = range(3, int(math.sqrt(number) + 1), 2)
+ return not any(not number % i for i in odd_numbers)
+
+
+class Test(unittest.TestCase):
+ def test_primes(self):
+ self.assertTrue(prime_check(2))
+ self.assertTrue(prime_check(3))
+ self.assertTrue(prime_check(5))
+ self.assertTrue(prime_check(7))
+ self.assertTrue(prime_check(11))
+ self.assertTrue(prime_check(13))
+ self.assertTrue(prime_check(17))
+ self.assertTrue(prime_check(19))
+ self.assertTrue(prime_check(23))
+ self.assertTrue(prime_check(29))
+
+ def test_not_primes(self):
+ self.assertFalse(
+ prime_check(-19),
+ "Negative numbers are excluded by definition of prime numbers.",
+ )
+ self.assertFalse(
+ prime_check(0),
+ "Zero doesn't have any positive factors, primes must have exactly two.",
+ )
+ self.assertFalse(
+ prime_check(1),
+ "One only has 1 positive factor, primes must have exactly two.",
+ )
+ self.assertFalse(prime_check(2 * 2))
+ self.assertFalse(prime_check(2 * 3))
+ self.assertFalse(prime_check(3 * 3))
+ self.assertFalse(prime_check(3 * 5))
+ self.assertFalse(prime_check(3 * 5 * 7))
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/maths/prime_factors.py b/maths/prime_factors.py
new file mode 100644
index 000000000000..e520ae3a6d04
--- /dev/null
+++ b/maths/prime_factors.py
@@ -0,0 +1,52 @@
+"""
+python/black : True
+"""
+from __future__ import annotations
+
+
+def prime_factors(n: int) -> list[int]:
+ """
+ Returns prime factors of n as a list.
+
+ >>> prime_factors(0)
+ []
+ >>> prime_factors(100)
+ [2, 2, 5, 5]
+ >>> prime_factors(2560)
+ [2, 2, 2, 2, 2, 2, 2, 2, 2, 5]
+ >>> prime_factors(10**-2)
+ []
+ >>> prime_factors(0.02)
+ []
+ >>> x = prime_factors(10**241) # doctest: +NORMALIZE_WHITESPACE
+ >>> x == [2]*241 + [5]*241
+ True
+ >>> prime_factors(10**-354)
+ []
+ >>> prime_factors('hello')
+ Traceback (most recent call last):
+ ...
+ TypeError: '<=' not supported between instances of 'int' and 'str'
+ >>> prime_factors([1,2,'hello'])
+ Traceback (most recent call last):
+ ...
+ TypeError: '<=' not supported between instances of 'int' and 'list'
+
+ """
+ i = 2
+ factors = []
+ while i * i <= n:
+ if n % i:
+ i += 1
+ else:
+ n //= i
+ factors.append(i)
+ if n > 1:
+ factors.append(n)
+ return factors
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/prime_numbers.py b/maths/prime_numbers.py
new file mode 100644
index 000000000000..38bebddeee41
--- /dev/null
+++ b/maths/prime_numbers.py
@@ -0,0 +1,70 @@
+import math
+from typing import Generator
+
+
+def slow_primes(max: int) -> Generator[int, None, None]:
+ """
+ Return a list of all primes numbers up to max.
+ >>> list(slow_primes(0))
+ []
+ >>> list(slow_primes(-1))
+ []
+ >>> list(slow_primes(-10))
+ []
+ >>> list(slow_primes(25))
+ [2, 3, 5, 7, 11, 13, 17, 19, 23]
+ >>> list(slow_primes(11))
+ [2, 3, 5, 7, 11]
+ >>> list(slow_primes(33))
+ [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31]
+ >>> list(slow_primes(10000))[-1]
+ 9973
+ """
+ numbers: Generator = (i for i in range(1, (max + 1)))
+ for i in (n for n in numbers if n > 1):
+ for j in range(2, i):
+ if (i % j) == 0:
+ break
+ else:
+ yield i
+
+
+def primes(max: int) -> Generator[int, None, None]:
+ """
+ Return a list of all primes numbers up to max.
+ >>> list(primes(0))
+ []
+ >>> list(primes(-1))
+ []
+ >>> list(primes(-10))
+ []
+ >>> list(primes(25))
+ [2, 3, 5, 7, 11, 13, 17, 19, 23]
+ >>> list(primes(11))
+ [2, 3, 5, 7, 11]
+ >>> list(primes(33))
+ [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31]
+ >>> list(primes(10000))[-1]
+ 9973
+ """
+ numbers: Generator = (i for i in range(1, (max + 1)))
+ for i in (n for n in numbers if n > 1):
+ # only need to check for factors up to sqrt(i)
+ bound = int(math.sqrt(i)) + 1
+ for j in range(2, bound):
+ if (i % j) == 0:
+ break
+ else:
+ yield i
+
+
+if __name__ == "__main__":
+ number = int(input("Calculate primes up to:\n>> ").strip())
+ for ret in primes(number):
+ print(ret)
+
+ # Let's benchmark them side-by-side...
+ from timeit import timeit
+
+ print(timeit("slow_primes(1_000_000)", setup="from __main__ import slow_primes"))
+ print(timeit("primes(1_000_000)", setup="from __main__ import primes"))
diff --git a/maths/prime_sieve_eratosthenes.py b/maths/prime_sieve_eratosthenes.py
new file mode 100644
index 000000000000..8d60e48c2140
--- /dev/null
+++ b/maths/prime_sieve_eratosthenes.py
@@ -0,0 +1,47 @@
+# flake8: noqa
+
+"""
+Sieve of Eratosthenes
+
+Input : n =10
+Output: 2 3 5 7
+
+Input : n = 20
+Output: 2 3 5 7 11 13 17 19
+
+you can read in detail about this at
+https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes
+"""
+
+
+def prime_sieve_eratosthenes(num):
+ """
+ print the prime numbers up to n
+
+ >>> prime_sieve_eratosthenes(10)
+ 2,3,5,7,
+ >>> prime_sieve_eratosthenes(20)
+ 2,3,5,7,11,13,17,19,
+ """
+
+ primes = [True for i in range(num + 1)]
+ p = 2
+
+ while p * p <= num:
+ if primes[p]:
+ for i in range(p * p, num + 1, p):
+ primes[i] = False
+ p += 1
+
+ for prime in range(2, num + 1):
+ if primes[prime]:
+ print(prime, end=",")
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ num = int(input())
+
+ prime_sieve_eratosthenes(num)
diff --git a/maths/pythagoras.py b/maths/pythagoras.py
new file mode 100644
index 000000000000..69a17731a0fd
--- /dev/null
+++ b/maths/pythagoras.py
@@ -0,0 +1,33 @@
+"""Uses Pythagoras theorem to calculate the distance between two points in space."""
+
+import math
+
+
+class Point:
+ def __init__(self, x, y, z):
+ self.x = x
+ self.y = y
+ self.z = z
+
+ def __repr__(self) -> str:
+ return f"Point({self.x}, {self.y}, {self.z})"
+
+
+def distance(a: Point, b: Point) -> float:
+ return math.sqrt(abs((b.x - a.x) ** 2 + (b.y - a.y) ** 2 + (b.z - a.z) ** 2))
+
+
+def test_distance() -> None:
+ """
+ >>> point1 = Point(2, -1, 7)
+ >>> point2 = Point(1, -3, 5)
+ >>> print(f"Distance from {point1} to {point2} is {distance(point1, point2)}")
+ Distance from Point(2, -1, 7) to Point(1, -3, 5) is 3.0
+ """
+ pass
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/qr_decomposition.py b/maths/qr_decomposition.py
new file mode 100644
index 000000000000..5e15fede4f2a
--- /dev/null
+++ b/maths/qr_decomposition.py
@@ -0,0 +1,71 @@
+import numpy as np
+
+
+def qr_householder(A):
+ """Return a QR-decomposition of the matrix A using Householder reflection.
+
+ The QR-decomposition decomposes the matrix A of shape (m, n) into an
+ orthogonal matrix Q of shape (m, m) and an upper triangular matrix R of
+ shape (m, n). Note that the matrix A does not have to be square. This
+ method of decomposing A uses the Householder reflection, which is
+ numerically stable and of complexity O(n^3).
+
+ https://en.wikipedia.org/wiki/QR_decomposition#Using_Householder_reflections
+
+ Arguments:
+ A -- a numpy.ndarray of shape (m, n)
+
+ Note: several optimizations can be made for numeric efficiency, but this is
+ intended to demonstrate how it would be represented in a mathematics
+ textbook. In cases where efficiency is particularly important, an optimized
+ version from BLAS should be used.
+
+ >>> A = np.array([[12, -51, 4], [6, 167, -68], [-4, 24, -41]], dtype=float)
+ >>> Q, R = qr_householder(A)
+
+ >>> # check that the decomposition is correct
+ >>> np.allclose(Q@R, A)
+ True
+
+ >>> # check that Q is orthogonal
+ >>> np.allclose(Q@Q.T, np.eye(A.shape[0]))
+ True
+ >>> np.allclose(Q.T@Q, np.eye(A.shape[0]))
+ True
+
+ >>> # check that R is upper triangular
+ >>> np.allclose(np.triu(R), R)
+ True
+ """
+ m, n = A.shape
+ t = min(m, n)
+ Q = np.eye(m)
+ R = A.copy()
+
+ for k in range(t - 1):
+ # select a column of modified matrix A':
+ x = R[k:, [k]]
+ # construct first basis vector
+ e1 = np.zeros_like(x)
+ e1[0] = 1.0
+ # determine scaling factor
+ alpha = np.linalg.norm(x)
+ # construct vector v for Householder reflection
+ v = x + np.sign(x[0]) * alpha * e1
+ v /= np.linalg.norm(v)
+
+ # construct the Householder matrix
+ Q_k = np.eye(m - k) - 2.0 * v @ v.T
+ # pad with ones and zeros as necessary
+ Q_k = np.block([[np.eye(k), np.zeros((k, m - k))], [np.zeros((m - k, k)), Q_k]])
+
+ Q = Q @ Q_k.T
+ R = Q_k @ R
+
+ return Q, R
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/quadratic_equations_complex_numbers.py b/maths/quadratic_equations_complex_numbers.py
new file mode 100644
index 000000000000..01a411bc560d
--- /dev/null
+++ b/maths/quadratic_equations_complex_numbers.py
@@ -0,0 +1,38 @@
+from __future__ import annotations
+
+from cmath import sqrt
+
+
+def quadratic_roots(a: int, b: int, c: int) -> tuple[complex, complex]:
+ """
+ Given the numerical coefficients a, b and c,
+ calculates the roots for any quadratic equation of the form ax^2 + bx + c
+
+ >>> quadratic_roots(a=1, b=3, c=-4)
+ (1.0, -4.0)
+ >>> quadratic_roots(5, 6, 1)
+ (-0.2, -1.0)
+ >>> quadratic_roots(1, -6, 25)
+ ((3+4j), (3-4j))
+ """
+
+ if a == 0:
+ raise ValueError("Coefficient 'a' must not be zero.")
+ delta = b * b - 4 * a * c
+
+ root_1 = (-b + sqrt(delta)) / (2 * a)
+ root_2 = (-b - sqrt(delta)) / (2 * a)
+
+ return (
+ root_1.real if not root_1.imag else root_1,
+ root_2.real if not root_2.imag else root_2,
+ )
+
+
+def main():
+ solutions = quadratic_roots(a=5, b=6, c=1)
+ print("The solutions are: {} and {}".format(*solutions))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/maths/radians.py b/maths/radians.py
new file mode 100644
index 000000000000..465467a3ba08
--- /dev/null
+++ b/maths/radians.py
@@ -0,0 +1,29 @@
+from math import pi
+
+
+def radians(degree: float) -> float:
+ """
+ Coverts the given angle from degrees to radians
+ https://en.wikipedia.org/wiki/Radian
+
+ >>> radians(180)
+ 3.141592653589793
+ >>> radians(92)
+ 1.6057029118347832
+ >>> radians(274)
+ 4.782202150464463
+ >>> radians(109.82)
+ 1.9167205845401725
+
+ >>> from math import radians as math_radians
+ >>> all(abs(radians(i)-math_radians(i)) <= 0.00000001 for i in range(-2, 361))
+ True
+ """
+
+ return degree / (180 / pi)
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
diff --git a/maths/radix2_fft.py b/maths/radix2_fft.py
new file mode 100644
index 000000000000..de87071e5440
--- /dev/null
+++ b/maths/radix2_fft.py
@@ -0,0 +1,180 @@
+"""
+Fast Polynomial Multiplication using radix-2 fast Fourier Transform.
+"""
+
+import mpmath # for roots of unity
+import numpy as np
+
+
+class FFT:
+ """
+ Fast Polynomial Multiplication using radix-2 fast Fourier Transform.
+
+ Reference:
+ https://en.wikipedia.org/wiki/Cooley%E2%80%93Tukey_FFT_algorithm#The_radix-2_DIT_case
+
+ For polynomials of degree m and n the algorithms has complexity
+ O(n*logn + m*logm)
+
+ The main part of the algorithm is split in two parts:
+ 1) __DFT: We compute the discrete fourier transform (DFT) of A and B using a
+ bottom-up dynamic approach -
+ 2) __multiply: Once we obtain the DFT of A*B, we can similarly
+ invert it to obtain A*B
+
+ The class FFT takes two polynomials A and B with complex coefficients as arguments;
+ The two polynomials should be represented as a sequence of coefficients starting
+ from the free term. Thus, for instance x + 2*x^3 could be represented as
+ [0,1,0,2] or (0,1,0,2). The constructor adds some zeros at the end so that the
+ polynomials have the same length which is a power of 2 at least the length of
+ their product.
+
+ Example:
+
+ Create two polynomials as sequences
+ >>> A = [0, 1, 0, 2] # x+2x^3
+ >>> B = (2, 3, 4, 0) # 2+3x+4x^2
+
+ Create an FFT object with them
+ >>> x = FFT(A, B)
+
+ Print product
+ >>> print(x.product) # 2x + 3x^2 + 8x^3 + 4x^4 + 6x^5
+ [(-0+0j), (2+0j), (3+0j), (8+0j), (6+0j), (8+0j)]
+
+ __str__ test
+ >>> print(x)
+ A = 0*x^0 + 1*x^1 + 2*x^0 + 3*x^2
+ B = 0*x^2 + 1*x^3 + 2*x^4
+ A*B = 0*x^(-0+0j) + 1*x^(2+0j) + 2*x^(3+0j) + 3*x^(8+0j) + 4*x^(6+0j) + 5*x^(8+0j)
+ """
+
+ def __init__(self, polyA=[0], polyB=[0]):
+ # Input as list
+ self.polyA = list(polyA)[:]
+ self.polyB = list(polyB)[:]
+
+ # Remove leading zero coefficients
+ while self.polyA[-1] == 0:
+ self.polyA.pop()
+ self.len_A = len(self.polyA)
+
+ while self.polyB[-1] == 0:
+ self.polyB.pop()
+ self.len_B = len(self.polyB)
+
+ # Add 0 to make lengths equal a power of 2
+ self.C_max_length = int(
+ 2 ** np.ceil(np.log2(len(self.polyA) + len(self.polyB) - 1))
+ )
+
+ while len(self.polyA) < self.C_max_length:
+ self.polyA.append(0)
+ while len(self.polyB) < self.C_max_length:
+ self.polyB.append(0)
+ # A complex root used for the fourier transform
+ self.root = complex(mpmath.root(x=1, n=self.C_max_length, k=1))
+
+ # The product
+ self.product = self.__multiply()
+
+ # Discrete fourier transform of A and B
+ def __DFT(self, which):
+ if which == "A":
+ dft = [[x] for x in self.polyA]
+ else:
+ dft = [[x] for x in self.polyB]
+ # Corner case
+ if len(dft) <= 1:
+ return dft[0]
+ #
+ next_ncol = self.C_max_length // 2
+ while next_ncol > 0:
+ new_dft = [[] for i in range(next_ncol)]
+ root = self.root ** next_ncol
+
+ # First half of next step
+ current_root = 1
+ for j in range(self.C_max_length // (next_ncol * 2)):
+ for i in range(next_ncol):
+ new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
+ current_root *= root
+ # Second half of next step
+ current_root = 1
+ for j in range(self.C_max_length // (next_ncol * 2)):
+ for i in range(next_ncol):
+ new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
+ current_root *= root
+ # Update
+ dft = new_dft
+ next_ncol = next_ncol // 2
+ return dft[0]
+
+ # multiply the DFTs of A and B and find A*B
+ def __multiply(self):
+ dftA = self.__DFT("A")
+ dftB = self.__DFT("B")
+ inverseC = [[dftA[i] * dftB[i] for i in range(self.C_max_length)]]
+ del dftA
+ del dftB
+
+ # Corner Case
+ if len(inverseC[0]) <= 1:
+ return inverseC[0]
+ # Inverse DFT
+ next_ncol = 2
+ while next_ncol <= self.C_max_length:
+ new_inverseC = [[] for i in range(next_ncol)]
+ root = self.root ** (next_ncol // 2)
+ current_root = 1
+ # First half of next step
+ for j in range(self.C_max_length // next_ncol):
+ for i in range(next_ncol // 2):
+ # Even positions
+ new_inverseC[i].append(
+ (
+ inverseC[i][j]
+ + inverseC[i][j + self.C_max_length // next_ncol]
+ )
+ / 2
+ )
+ # Odd positions
+ new_inverseC[i + next_ncol // 2].append(
+ (
+ inverseC[i][j]
+ - inverseC[i][j + self.C_max_length // next_ncol]
+ )
+ / (2 * current_root)
+ )
+ current_root *= root
+ # Update
+ inverseC = new_inverseC
+ next_ncol *= 2
+ # Unpack
+ inverseC = [round(x[0].real, 8) + round(x[0].imag, 8) * 1j for x in inverseC]
+
+ # Remove leading 0's
+ while inverseC[-1] == 0:
+ inverseC.pop()
+ return inverseC
+
+ # Overwrite __str__ for print(); Shows A, B and A*B
+ def __str__(self):
+ A = "A = " + " + ".join(
+ f"{coef}*x^{i}" for coef, i in enumerate(self.polyA[: self.len_A])
+ )
+ B = "B = " + " + ".join(
+ f"{coef}*x^{i}" for coef, i in enumerate(self.polyB[: self.len_B])
+ )
+ C = "A*B = " + " + ".join(
+ f"{coef}*x^{i}" for coef, i in enumerate(self.product)
+ )
+
+ return "\n".join((A, B, C))
+
+
+# Unit tests
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/relu.py b/maths/relu.py
new file mode 100644
index 000000000000..458c6bd5c391
--- /dev/null
+++ b/maths/relu.py
@@ -0,0 +1,40 @@
+"""
+This script demonstrates the implementation of the ReLU function.
+
+It's a kind of activation function defined as the positive part of its argument in the
+context of neural network.
+The function takes a vector of K real numbers as input and then argmax(x, 0).
+After through ReLU, the element of the vector always 0 or real number.
+
+Script inspired from its corresponding Wikipedia article
+https://en.wikipedia.org/wiki/Rectifier_(neural_networks)
+"""
+from __future__ import annotations
+
+import numpy as np
+
+
+def relu(vector: list[float]):
+ """
+ Implements the relu function
+
+ Parameters:
+ vector (np.array,list,tuple): A numpy array of shape (1,n)
+ consisting of real values or a similar list,tuple
+
+
+ Returns:
+ relu_vec (np.array): The input numpy array, after applying
+ relu.
+
+ >>> vec = np.array([-1, 0, 5])
+ >>> relu(vec)
+ array([0, 0, 5])
+ """
+
+ # compare two arrays and then return element-wise maxima.
+ return np.maximum(0, vector)
+
+
+if __name__ == "__main__":
+ print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
diff --git a/maths/runge_kutta.py b/maths/runge_kutta.py
new file mode 100644
index 000000000000..383797daa5ac
--- /dev/null
+++ b/maths/runge_kutta.py
@@ -0,0 +1,44 @@
+import numpy as np
+
+
+def runge_kutta(f, y0, x0, h, x_end):
+ """
+ Calculate the numeric solution at each step to the ODE f(x, y) using RK4
+
+ https://en.wikipedia.org/wiki/Runge-Kutta_methods
+
+ Arguments:
+ f -- The ode as a function of x and y
+ y0 -- the initial value for y
+ x0 -- the initial value for x
+ h -- the stepsize
+ x_end -- the end value for x
+
+ >>> # the exact solution is math.exp(x)
+ >>> def f(x, y):
+ ... return y
+ >>> y0 = 1
+ >>> y = runge_kutta(f, y0, 0.0, 0.01, 5)
+ >>> y[-1]
+ 148.41315904125113
+ """
+ N = int(np.ceil((x_end - x0) / h))
+ y = np.zeros((N + 1,))
+ y[0] = y0
+ x = x0
+
+ for k in range(N):
+ k1 = f(x, y[k])
+ k2 = f(x + 0.5 * h, y[k] + 0.5 * h * k1)
+ k3 = f(x + 0.5 * h, y[k] + 0.5 * h * k2)
+ k4 = f(x + h, y[k] + h * k3)
+ y[k + 1] = y[k] + (1 / 6) * h * (k1 + 2 * k2 + 2 * k3 + k4)
+ x += h
+
+ return y
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/segmented_sieve.py b/maths/segmented_sieve.py
index 52ca6fbe601d..c1cc497ad33e 100644
--- a/maths/segmented_sieve.py
+++ b/maths/segmented_sieve.py
@@ -1,46 +1,51 @@
+"""Segmented Sieve."""
+
import math
+
def sieve(n):
+ """Segmented Sieve."""
in_prime = []
start = 2
- end = int(math.sqrt(n)) # Size of every segment
+ end = int(math.sqrt(n)) # Size of every segment
temp = [True] * (end + 1)
prime = []
-
- while(start <= end):
- if temp[start] == True:
+
+ while start <= end:
+ if temp[start] is True:
in_prime.append(start)
- for i in range(start*start, end+1, start):
- if temp[i] == True:
+ for i in range(start * start, end + 1, start):
+ if temp[i] is True:
temp[i] = False
start += 1
prime += in_prime
-
+
low = end + 1
high = low + end - 1
if high > n:
high = n
-
- while(low <= n):
- temp = [True] * (high-low+1)
+
+ while low <= n:
+ temp = [True] * (high - low + 1)
for each in in_prime:
-
+
t = math.floor(low / each) * each
if t < low:
t += each
-
- for j in range(t, high+1, each):
+
+ for j in range(t, high + 1, each):
temp[j - low] = False
-
+
for j in range(len(temp)):
- if temp[j] == True:
- prime.append(j+low)
-
+ if temp[j] is True:
+ prime.append(j + low)
+
low = high + 1
high = low + end - 1
if high > n:
high = n
-
+
return prime
-print(sieve(10**6))
\ No newline at end of file
+
+print(sieve(10 ** 6))
diff --git a/maths/series/__init__.py b/maths/series/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/maths/series/geometric_series.py b/maths/series/geometric_series.py
new file mode 100644
index 000000000000..d12382e6d8c4
--- /dev/null
+++ b/maths/series/geometric_series.py
@@ -0,0 +1,63 @@
+"""
+This is a pure Python implementation of the Geometric Series algorithm
+https://en.wikipedia.org/wiki/Geometric_series
+
+Run the doctests with the following command:
+python3 -m doctest -v geometric_series.py
+or
+python -m doctest -v geometric_series.py
+For manual testing run:
+python3 geometric_series.py
+"""
+
+
+def geometric_series(nth_term: int, start_term_a: int, common_ratio_r: int) -> list:
+ """Pure Python implementation of Geometric Series algorithm
+ :param nth_term: The last term (nth term of Geometric Series)
+ :param start_term_a : The first term of Geometric Series
+ :param common_ratio_r : The common ratio between all the terms
+ :return: The Geometric Series starting from first term a and multiple of common
+ ration with first term with increase in power till last term (nth term)
+ Examples:
+ >>> geometric_series(4, 2, 2)
+ [2, '4.0', '8.0', '16.0']
+ >>> geometric_series(4.0, 2.0, 2.0)
+ [2.0, '4.0', '8.0', '16.0']
+ >>> geometric_series(4.1, 2.1, 2.1)
+ [2.1, '4.41', '9.261000000000001', '19.448100000000004']
+ >>> geometric_series(4, 2, -2)
+ [2, '-4.0', '8.0', '-16.0']
+ >>> geometric_series(4, -2, 2)
+ [-2, '-4.0', '-8.0', '-16.0']
+ >>> geometric_series(-4, 2, 2)
+ []
+ >>> geometric_series(0, 100, 500)
+ []
+ >>> geometric_series(1, 1, 1)
+ [1]
+ >>> geometric_series(0, 0, 0)
+ []
+ """
+ if "" in (nth_term, start_term_a, common_ratio_r):
+ return ""
+ series = []
+ power = 1
+ multiple = common_ratio_r
+ for _ in range(int(nth_term)):
+ if series == []:
+ series.append(start_term_a)
+ else:
+ power += 1
+ series.append(str(float(start_term_a) * float(multiple)))
+ multiple = pow(float(common_ratio_r), power)
+ return series
+
+
+if __name__ == "__main__":
+ nth_term = input("Enter the last number (n term) of the Geometric Series")
+ start_term_a = input("Enter the starting term (a) of the Geometric Series")
+ common_ratio_r = input(
+ "Enter the common ratio between two terms (r) of the Geometric Series"
+ )
+ print("Formula of Geometric Series => a + ar + ar^2 ... +ar^n")
+ print(geometric_series(nth_term, start_term_a, common_ratio_r))
diff --git a/maths/series/harmonic_series.py b/maths/series/harmonic_series.py
new file mode 100644
index 000000000000..91b5944583e4
--- /dev/null
+++ b/maths/series/harmonic_series.py
@@ -0,0 +1,46 @@
+"""
+This is a pure Python implementation of the Harmonic Series algorithm
+https://en.wikipedia.org/wiki/Harmonic_series_(mathematics)
+
+For doctests run following command:
+python -m doctest -v harmonic_series.py
+or
+python3 -m doctest -v harmonic_series.py
+
+For manual testing run:
+python3 harmonic_series.py
+"""
+
+
+def harmonic_series(n_term: str) -> list:
+ """Pure Python implementation of Harmonic Series algorithm
+
+ :param n_term: The last (nth) term of Harmonic Series
+ :return: The Harmonic Series starting from 1 to last (nth) term
+
+ Examples:
+ >>> harmonic_series(5)
+ ['1', '1/2', '1/3', '1/4', '1/5']
+ >>> harmonic_series(5.0)
+ ['1', '1/2', '1/3', '1/4', '1/5']
+ >>> harmonic_series(5.1)
+ ['1', '1/2', '1/3', '1/4', '1/5']
+ >>> harmonic_series(-5)
+ []
+ >>> harmonic_series(0)
+ []
+ >>> harmonic_series(1)
+ ['1']
+ """
+ if n_term == "":
+ return n_term
+ series = []
+ for temp in range(int(n_term)):
+ series.append(f"1/{temp + 1}" if series else "1")
+ return series
+
+
+if __name__ == "__main__":
+ nth_term = input("Enter the last number (nth term) of the Harmonic Series")
+ print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
+ print(harmonic_series(nth_term))
diff --git a/maths/series/p_series.py b/maths/series/p_series.py
new file mode 100644
index 000000000000..04019aed5a85
--- /dev/null
+++ b/maths/series/p_series.py
@@ -0,0 +1,48 @@
+"""
+This is a pure Python implementation of the P-Series algorithm
+https://en.wikipedia.org/wiki/Harmonic_series_(mathematics)#P-series
+
+For doctests run following command:
+python -m doctest -v p_series.py
+or
+python3 -m doctest -v p_series.py
+
+For manual testing run:
+python3 p_series.py
+"""
+
+
+def p_series(nth_term: int, power: int) -> list:
+ """Pure Python implementation of P-Series algorithm
+
+ :return: The P-Series starting from 1 to last (nth) term
+
+ Examples:
+ >>> p_series(5, 2)
+ [1, '1/4', '1/9', '1/16', '1/25']
+ >>> p_series(-5, 2)
+ []
+ >>> p_series(5, -2)
+ [1, '1/0.25', '1/0.1111111111111111', '1/0.0625', '1/0.04']
+ >>> p_series("", 1000)
+ ''
+ >>> p_series(0, 0)
+ []
+ >>> p_series(1, 1)
+ [1]
+ """
+ if nth_term == "":
+ return nth_term
+ nth_term = int(nth_term)
+ power = int(power)
+ series = []
+ for temp in range(int(nth_term)):
+ series.append(f"1/{pow(temp + 1, int(power))}" if series else 1)
+ return series
+
+
+if __name__ == "__main__":
+ nth_term = input("Enter the last number (nth term) of the P-Series")
+ power = input("Enter the power for P-Series")
+ print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
+ print(p_series(nth_term, power))
diff --git a/maths/sieve_of_eratosthenes.py b/maths/sieve_of_eratosthenes.py
index 26c17fa6ffec..47a086546900 100644
--- a/maths/sieve_of_eratosthenes.py
+++ b/maths/sieve_of_eratosthenes.py
@@ -1,24 +1,65 @@
+"""
+Sieve of Eratosthones
+
+The sieve of Eratosthenes is an algorithm used to find prime numbers, less than or
+equal to a given value.
+Illustration:
+https://upload.wikimedia.org/wikipedia/commons/b/b9/Sieve_of_Eratosthenes_animation.gif
+Reference: https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes
+
+doctest provider: Bruno Simas Hadlich (https://github.com/brunohadlich)
+Also thanks to Dmitry (https://github.com/LizardWizzard) for finding the problem
+"""
+
+
import math
-n = int(input("Enter n: "))
+from typing import List
+
+
+def prime_sieve(num: int) -> List[int]:
+ """
+ Returns a list with all prime numbers up to n.
+
+ >>> prime_sieve(50)
+ [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47]
+ >>> prime_sieve(25)
+ [2, 3, 5, 7, 11, 13, 17, 19, 23]
+ >>> prime_sieve(10)
+ [2, 3, 5, 7]
+ >>> prime_sieve(9)
+ [2, 3, 5, 7]
+ >>> prime_sieve(2)
+ [2]
+ >>> prime_sieve(1)
+ []
+ """
-def sieve(n):
- l = [True] * (n+1)
+ if num <= 0:
+ raise ValueError(f"{num}: Invalid input, please enter a positive integer.")
+
+ sieve = [True] * (num + 1)
prime = []
start = 2
- end = int(math.sqrt(n))
- while(start <= end):
- if l[start] == True:
+ end = int(math.sqrt(num))
+
+ while start <= end:
+ # If start is a prime
+ if sieve[start] is True:
prime.append(start)
- for i in range(start*start, n+1, start):
- if l[i] == True:
- l[i] = False
+
+ # Set multiples of start be False
+ for i in range(start * start, num + 1, start):
+ if sieve[i] is True:
+ sieve[i] = False
+
start += 1
-
- for j in range(end+1,n+1):
- if l[j] == True:
+
+ for j in range(end + 1, num + 1):
+ if sieve[j] is True:
prime.append(j)
-
+
return prime
-print(sieve(n))
-
+
+if __name__ == "__main__":
+ print(prime_sieve(int(input("Enter a positive integer: ").strip())))
diff --git a/maths/sigmoid.py b/maths/sigmoid.py
new file mode 100644
index 000000000000..147588e8871f
--- /dev/null
+++ b/maths/sigmoid.py
@@ -0,0 +1,39 @@
+"""
+This script demonstrates the implementation of the Sigmoid function.
+
+The function takes a vector of K real numbers as input and then 1 / (1 + exp(-x)).
+After through Sigmoid, the element of the vector mostly 0 between 1. or 1 between -1.
+
+Script inspired from its corresponding Wikipedia article
+https://en.wikipedia.org/wiki/Sigmoid_function
+"""
+
+import numpy as np
+
+
+def sigmoid(vector: np.array) -> np.array:
+ """
+ Implements the sigmoid function
+
+ Parameters:
+ vector (np.array): A numpy array of shape (1,n)
+ consisting of real values
+
+ Returns:
+ sigmoid_vec (np.array): The input numpy array, after applying
+ sigmoid.
+
+ Examples:
+ >>> sigmoid(np.array([-1.0, 1.0, 2.0]))
+ array([0.26894142, 0.73105858, 0.88079708])
+
+ >>> sigmoid(np.array([0.0]))
+ array([0.5])
+ """
+ return 1 / (1 + np.exp(-vector))
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/simpson_rule.py b/maths/simpson_rule.py
index 091c86c17f1b..d66dc39a7171 100644
--- a/maths/simpson_rule.py
+++ b/maths/simpson_rule.py
@@ -1,49 +1,51 @@
-
-'''
+"""
Numerical integration or quadrature for a smooth function f with known values at x_i
-This method is the classical approch of suming 'Equally Spaced Abscissas'
+This method is the classical approach of suming 'Equally Spaced Abscissas'
-method 2:
+method 2:
"Simpson Rule"
-'''
-from __future__ import print_function
+"""
def method_2(boundary, steps):
-# "Simpson Rule"
-# int(f) = delta_x/2 * (b-a)/3*(f1 + 4f2 + 2f_3 + ... + fn)
- h = (boundary[1] - boundary[0]) / steps
- a = boundary[0]
- b = boundary[1]
- x_i = makePoints(a,b,h)
- y = 0.0
- y += (h/3.0)*f(a)
- cnt = 2
- for i in x_i:
- y += (h/3)*(4-2*(cnt%2))*f(i)
- cnt += 1
- y += (h/3.0)*f(b)
- return y
-
-def makePoints(a,b,h):
- x = a + h
- while x < (b-h):
- yield x
- x = x + h
-
-def f(x): #enter your function here
- y = (x-0)*(x-0)
- return y
+ # "Simpson Rule"
+ # int(f) = delta_x/2 * (b-a)/3*(f1 + 4f2 + 2f_3 + ... + fn)
+ h = (boundary[1] - boundary[0]) / steps
+ a = boundary[0]
+ b = boundary[1]
+ x_i = make_points(a, b, h)
+ y = 0.0
+ y += (h / 3.0) * f(a)
+ cnt = 2
+ for i in x_i:
+ y += (h / 3) * (4 - 2 * (cnt % 2)) * f(i)
+ cnt += 1
+ y += (h / 3.0) * f(b)
+ return y
+
+
+def make_points(a, b, h):
+ x = a + h
+ while x < (b - h):
+ yield x
+ x = x + h
+
+
+def f(x): # enter your function here
+ y = (x - 0) * (x - 0)
+ return y
+
def main():
- a = 0.0 #Lower bound of integration
- b = 1.0 #Upper bound of integration
- steps = 10.0 #define number of steps or resolution
- boundary = [a, b] #define boundary of integration
- y = method_2(boundary, steps)
- print('y = {0}'.format(y))
-
-if __name__ == '__main__':
- main()
+ a = 0.0 # Lower bound of integration
+ b = 1.0 # Upper bound of integration
+ steps = 10.0 # define number of steps or resolution
+ boundary = [a, b] # define boundary of integration
+ y = method_2(boundary, steps)
+ print(f"y = {y}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/maths/softmax.py b/maths/softmax.py
new file mode 100644
index 000000000000..e021a7f8a6fe
--- /dev/null
+++ b/maths/softmax.py
@@ -0,0 +1,56 @@
+"""
+This script demonstrates the implementation of the Softmax function.
+
+Its a function that takes as input a vector of K real numbers, and normalizes
+it into a probability distribution consisting of K probabilities proportional
+to the exponentials of the input numbers. After softmax, the elements of the
+vector always sum up to 1.
+
+Script inspired from its corresponding Wikipedia article
+https://en.wikipedia.org/wiki/Softmax_function
+"""
+
+import numpy as np
+
+
+def softmax(vector):
+ """
+ Implements the softmax function
+
+ Parameters:
+ vector (np.array,list,tuple): A numpy array of shape (1,n)
+ consisting of real values or a similar list,tuple
+
+
+ Returns:
+ softmax_vec (np.array): The input numpy array after applying
+ softmax.
+
+ The softmax vector adds up to one. We need to ceil to mitigate for
+ precision
+ >>> np.ceil(np.sum(softmax([1,2,3,4])))
+ 1.0
+
+ >>> vec = np.array([5,5])
+ >>> softmax(vec)
+ array([0.5, 0.5])
+
+ >>> softmax([0])
+ array([1.])
+ """
+
+ # Calculate e^x for each x in your vector where e is Euler's
+ # number (approximately 2.718)
+ exponentVector = np.exp(vector)
+
+ # Add up the all the exponentials
+ sumOfExponents = np.sum(exponentVector)
+
+ # Divide every exponent by the sum of all exponents
+ softmax_vector = exponentVector / sumOfExponents
+
+ return softmax_vector
+
+
+if __name__ == "__main__":
+ print(softmax((0,)))
diff --git a/maths/square_root.py b/maths/square_root.py
new file mode 100644
index 000000000000..b324c723037c
--- /dev/null
+++ b/maths/square_root.py
@@ -0,0 +1,64 @@
+import math
+
+
+def fx(x: float, a: float) -> float:
+ return math.pow(x, 2) - a
+
+
+def fx_derivative(x: float) -> float:
+ return 2 * x
+
+
+def get_initial_point(a: float) -> float:
+ start = 2.0
+
+ while start <= a:
+ start = math.pow(start, 2)
+
+ return start
+
+
+def square_root_iterative(
+ a: float, max_iter: int = 9999, tolerance: float = 0.00000000000001
+) -> float:
+ """
+ Square root is aproximated using Newtons method.
+ https://en.wikipedia.org/wiki/Newton%27s_method
+
+ >>> all(abs(square_root_iterative(i)-math.sqrt(i)) <= .00000000000001
+ ... for i in range(500))
+ True
+
+ >>> square_root_iterative(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: math domain error
+
+ >>> square_root_iterative(4)
+ 2.0
+
+ >>> square_root_iterative(3.2)
+ 1.788854381999832
+
+ >>> square_root_iterative(140)
+ 11.832159566199232
+ """
+
+ if a < 0:
+ raise ValueError("math domain error")
+
+ value = get_initial_point(a)
+
+ for i in range(max_iter):
+ prev_value = value
+ value = value - fx(value, a) / fx_derivative(value)
+ if abs(prev_value - value) < tolerance:
+ return value
+
+ return value
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
diff --git a/maths/sum_of_arithmetic_series.py b/maths/sum_of_arithmetic_series.py
new file mode 100644
index 000000000000..74eef0f18a12
--- /dev/null
+++ b/maths/sum_of_arithmetic_series.py
@@ -0,0 +1,23 @@
+# DarkCoder
+def sum_of_series(first_term, common_diff, num_of_terms):
+ """
+ Find the sum of n terms in an arithmetic progression.
+
+ >>> sum_of_series(1, 1, 10)
+ 55.0
+ >>> sum_of_series(1, 10, 100)
+ 49600.0
+ """
+ sum = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
+ # formula for sum of series
+ return sum
+
+
+def main():
+ print(sum_of_series(1, 1, 10))
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/sum_of_digits.py b/maths/sum_of_digits.py
new file mode 100644
index 000000000000..64da00d4634c
--- /dev/null
+++ b/maths/sum_of_digits.py
@@ -0,0 +1,149 @@
+from timeit import timeit
+
+
+def sum_of_digits(n: int) -> int:
+ """
+ Find the sum of digits of a number.
+
+ >>> sum_of_digits(12345)
+ 15
+ >>> sum_of_digits(123)
+ 6
+ >>> sum_of_digits(-123)
+ 6
+ >>> sum_of_digits(0)
+ 0
+ """
+ n = -n if n < 0 else n
+ res = 0
+ while n > 0:
+ res += n % 10
+ n = n // 10
+ return res
+
+
+def sum_of_digits_recursion(n: int) -> int:
+ """
+ Find the sum of digits of a number using recursion
+
+ >>> sum_of_digits_recursion(12345)
+ 15
+ >>> sum_of_digits_recursion(123)
+ 6
+ >>> sum_of_digits_recursion(-123)
+ 6
+ >>> sum_of_digits_recursion(0)
+ 0
+ """
+ n = -n if n < 0 else n
+ return n if n < 10 else n % 10 + sum_of_digits(n // 10)
+
+
+def sum_of_digits_compact(n: int) -> int:
+ """
+ Find the sum of digits of a number
+
+ >>> sum_of_digits_compact(12345)
+ 15
+ >>> sum_of_digits_compact(123)
+ 6
+ >>> sum_of_digits_compact(-123)
+ 6
+ >>> sum_of_digits_compact(0)
+ 0
+ """
+ return sum(int(c) for c in str(abs(n)))
+
+
+def benchmark() -> None:
+ """
+ Benchmark code for comparing 3 functions,
+ with 3 different length int values.
+ """
+ print("\nFor small_num = ", small_num, ":")
+ print(
+ "> sum_of_digits()",
+ "\t\tans =",
+ sum_of_digits(small_num),
+ "\ttime =",
+ timeit("z.sum_of_digits(z.small_num)", setup="import __main__ as z"),
+ "seconds",
+ )
+ print(
+ "> sum_of_digits_recursion()",
+ "\tans =",
+ sum_of_digits_recursion(small_num),
+ "\ttime =",
+ timeit("z.sum_of_digits_recursion(z.small_num)", setup="import __main__ as z"),
+ "seconds",
+ )
+ print(
+ "> sum_of_digits_compact()",
+ "\tans =",
+ sum_of_digits_compact(small_num),
+ "\ttime =",
+ timeit("z.sum_of_digits_compact(z.small_num)", setup="import __main__ as z"),
+ "seconds",
+ )
+
+ print("\nFor medium_num = ", medium_num, ":")
+ print(
+ "> sum_of_digits()",
+ "\t\tans =",
+ sum_of_digits(medium_num),
+ "\ttime =",
+ timeit("z.sum_of_digits(z.medium_num)", setup="import __main__ as z"),
+ "seconds",
+ )
+ print(
+ "> sum_of_digits_recursion()",
+ "\tans =",
+ sum_of_digits_recursion(medium_num),
+ "\ttime =",
+ timeit("z.sum_of_digits_recursion(z.medium_num)", setup="import __main__ as z"),
+ "seconds",
+ )
+ print(
+ "> sum_of_digits_compact()",
+ "\tans =",
+ sum_of_digits_compact(medium_num),
+ "\ttime =",
+ timeit("z.sum_of_digits_compact(z.medium_num)", setup="import __main__ as z"),
+ "seconds",
+ )
+
+ print("\nFor large_num = ", large_num, ":")
+ print(
+ "> sum_of_digits()",
+ "\t\tans =",
+ sum_of_digits(large_num),
+ "\ttime =",
+ timeit("z.sum_of_digits(z.large_num)", setup="import __main__ as z"),
+ "seconds",
+ )
+ print(
+ "> sum_of_digits_recursion()",
+ "\tans =",
+ sum_of_digits_recursion(large_num),
+ "\ttime =",
+ timeit("z.sum_of_digits_recursion(z.large_num)", setup="import __main__ as z"),
+ "seconds",
+ )
+ print(
+ "> sum_of_digits_compact()",
+ "\tans =",
+ sum_of_digits_compact(large_num),
+ "\ttime =",
+ timeit("z.sum_of_digits_compact(z.large_num)", setup="import __main__ as z"),
+ "seconds",
+ )
+
+
+if __name__ == "__main__":
+ small_num = 262144
+ medium_num = 1125899906842624
+ large_num = 1267650600228229401496703205376
+ benchmark()
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/sum_of_geometric_progression.py b/maths/sum_of_geometric_progression.py
new file mode 100644
index 000000000000..f29dd8005cff
--- /dev/null
+++ b/maths/sum_of_geometric_progression.py
@@ -0,0 +1,28 @@
+def sum_of_geometric_progression(
+ first_term: int, common_ratio: int, num_of_terms: int
+) -> float:
+ """ "
+ Return the sum of n terms in a geometric progression.
+ >>> sum_of_geometric_progression(1, 2, 10)
+ 1023.0
+ >>> sum_of_geometric_progression(1, 10, 5)
+ 11111.0
+ >>> sum_of_geometric_progression(0, 2, 10)
+ 0.0
+ >>> sum_of_geometric_progression(1, 0, 10)
+ 1.0
+ >>> sum_of_geometric_progression(1, 2, 0)
+ -0.0
+ >>> sum_of_geometric_progression(-1, 2, 10)
+ -1023.0
+ >>> sum_of_geometric_progression(1, -2, 10)
+ -341.0
+ >>> sum_of_geometric_progression(1, 2, -10)
+ -0.9990234375
+ """
+ if common_ratio == 1:
+ # Formula for sum if common ratio is 1
+ return num_of_terms * first_term
+
+ # Formula for finding sum of n terms of a GeometricProgression
+ return (first_term / (1 - common_ratio)) * (1 - common_ratio ** num_of_terms)
diff --git a/maths/test_prime_check.py b/maths/test_prime_check.py
new file mode 100644
index 000000000000..b6389684af9e
--- /dev/null
+++ b/maths/test_prime_check.py
@@ -0,0 +1,8 @@
+"""
+Minimalist file that allows pytest to find and run the Test unittest. For details, see:
+http://doc.pytest.org/en/latest/goodpractices.html#conventions-for-python-test-discovery
+"""
+
+from .prime_check import Test
+
+Test()
diff --git a/maths/trapezoidal_rule.py b/maths/trapezoidal_rule.py
index 52310c1ed3b0..9a4ddc8af66b 100644
--- a/maths/trapezoidal_rule.py
+++ b/maths/trapezoidal_rule.py
@@ -1,46 +1,50 @@
-'''
+"""
Numerical integration or quadrature for a smooth function f with known values at x_i
-This method is the classical approch of suming 'Equally Spaced Abscissas'
+This method is the classical approach of suming 'Equally Spaced Abscissas'
-method 1:
+method 1:
"extended trapezoidal rule"
-'''
-from __future__ import print_function
+"""
+
def method_1(boundary, steps):
-# "extended trapezoidal rule"
-# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
- h = (boundary[1] - boundary[0]) / steps
- a = boundary[0]
- b = boundary[1]
- x_i = makePoints(a,b,h)
- y = 0.0
- y += (h/2.0)*f(a)
- for i in x_i:
- #print(i)
- y += h*f(i)
- y += (h/2.0)*f(b)
- return y
-
-def makePoints(a,b,h):
- x = a + h
- while x < (b-h):
- yield x
- x = x + h
-
-def f(x): #enter your function here
- y = (x-0)*(x-0)
- return y
+ # "extended trapezoidal rule"
+ # int(f) = dx/2 * (f1 + 2f2 + ... + fn)
+ h = (boundary[1] - boundary[0]) / steps
+ a = boundary[0]
+ b = boundary[1]
+ x_i = make_points(a, b, h)
+ y = 0.0
+ y += (h / 2.0) * f(a)
+ for i in x_i:
+ # print(i)
+ y += h * f(i)
+ y += (h / 2.0) * f(b)
+ return y
+
+
+def make_points(a, b, h):
+ x = a + h
+ while x < (b - h):
+ yield x
+ x = x + h
+
+
+def f(x): # enter your function here
+ y = (x - 0) * (x - 0)
+ return y
+
def main():
- a = 0.0 #Lower bound of integration
- b = 1.0 #Upper bound of integration
- steps = 10.0 #define number of steps or resolution
- boundary = [a, b] #define boundary of integration
- y = method_1(boundary, steps)
- print('y = {0}'.format(y))
-
-if __name__ == '__main__':
- main()
+ a = 0.0 # Lower bound of integration
+ b = 1.0 # Upper bound of integration
+ steps = 10.0 # define number of steps or resolution
+ boundary = [a, b] # define boundary of integration
+ y = method_1(boundary, steps)
+ print(f"y = {y}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/maths/ugly_numbers.py b/maths/ugly_numbers.py
new file mode 100644
index 000000000000..4451a68cdaad
--- /dev/null
+++ b/maths/ugly_numbers.py
@@ -0,0 +1,54 @@
+"""
+Ugly numbers are numbers whose only prime factors are 2, 3 or 5. The sequence
+1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, … shows the first 11 ugly numbers. By convention,
+1 is included.
+Given an integer n, we have to find the nth ugly number.
+
+For more details, refer this article
+https://www.geeksforgeeks.org/ugly-numbers/
+"""
+
+
+def ugly_numbers(n: int) -> int:
+ """
+ Returns the nth ugly number.
+ >>> ugly_numbers(100)
+ 1536
+ >>> ugly_numbers(0)
+ 1
+ >>> ugly_numbers(20)
+ 36
+ >>> ugly_numbers(-5)
+ 1
+ >>> ugly_numbers(-5.5)
+ Traceback (most recent call last):
+ ...
+ TypeError: 'float' object cannot be interpreted as an integer
+ """
+ ugly_nums = [1]
+
+ i2, i3, i5 = 0, 0, 0
+ next_2 = ugly_nums[i2] * 2
+ next_3 = ugly_nums[i3] * 3
+ next_5 = ugly_nums[i5] * 5
+
+ for i in range(1, n):
+ next_num = min(next_2, next_3, next_5)
+ ugly_nums.append(next_num)
+ if next_num == next_2:
+ i2 += 1
+ next_2 = ugly_nums[i2] * 2
+ if next_num == next_3:
+ i3 += 1
+ next_3 = ugly_nums[i3] * 3
+ if next_num == next_5:
+ i5 += 1
+ next_5 = ugly_nums[i5] * 5
+ return ugly_nums[-1]
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod(verbose=True)
+ print(f"{ugly_numbers(200) = }")
diff --git a/maths/volume.py b/maths/volume.py
new file mode 100644
index 000000000000..41d2331db3cb
--- /dev/null
+++ b/maths/volume.py
@@ -0,0 +1,132 @@
+"""
+Find Volumes of Various Shapes.
+
+Wikipedia reference: https://en.wikipedia.org/wiki/Volume
+"""
+from math import pi, pow
+from typing import Union
+
+
+def vol_cube(side_length: Union[int, float]) -> float:
+ """
+ Calculate the Volume of a Cube.
+
+ >>> vol_cube(1)
+ 1.0
+ >>> vol_cube(3)
+ 27.0
+ """
+ return pow(side_length, 3)
+
+
+def vol_cuboid(width: float, height: float, length: float) -> float:
+ """
+ Calculate the Volume of a Cuboid.
+ :return multiple of width, length and height
+
+ >>> vol_cuboid(1, 1, 1)
+ 1.0
+ >>> vol_cuboid(1, 2, 3)
+ 6.0
+ """
+ return float(width * height * length)
+
+
+def vol_cone(area_of_base: float, height: float) -> float:
+ """
+ Calculate the Volume of a Cone.
+
+ Wikipedia reference: https://en.wikipedia.org/wiki/Cone
+ :return (1/3) * area_of_base * height
+
+ >>> vol_cone(10, 3)
+ 10.0
+ >>> vol_cone(1, 1)
+ 0.3333333333333333
+ """
+ return area_of_base * height / 3.0
+
+
+def vol_right_circ_cone(radius: float, height: float) -> float:
+ """
+ Calculate the Volume of a Right Circular Cone.
+
+ Wikipedia reference: https://en.wikipedia.org/wiki/Cone
+ :return (1/3) * pi * radius^2 * height
+
+ >>> vol_right_circ_cone(2, 3)
+ 12.566370614359172
+ """
+ return pi * pow(radius, 2) * height / 3.0
+
+
+def vol_prism(area_of_base: float, height: float) -> float:
+ """
+ Calculate the Volume of a Prism.
+ Wikipedia reference: https://en.wikipedia.org/wiki/Prism_(geometry)
+ :return V = Bh
+
+ >>> vol_prism(10, 2)
+ 20.0
+ >>> vol_prism(11, 1)
+ 11.0
+ """
+ return float(area_of_base * height)
+
+
+def vol_pyramid(area_of_base: float, height: float) -> float:
+ """
+ Calculate the Volume of a Pyramid.
+ Wikipedia reference: https://en.wikipedia.org/wiki/Pyramid_(geometry)
+ :return (1/3) * Bh
+
+ >>> vol_pyramid(10, 3)
+ 10.0
+ >>> vol_pyramid(1.5, 3)
+ 1.5
+ """
+ return area_of_base * height / 3.0
+
+
+def vol_sphere(radius: float) -> float:
+ """
+ Calculate the Volume of a Sphere.
+ Wikipedia reference: https://en.wikipedia.org/wiki/Sphere
+ :return (4/3) * pi * r^3
+
+ >>> vol_sphere(5)
+ 523.5987755982989
+ >>> vol_sphere(1)
+ 4.1887902047863905
+ """
+ return 4 / 3 * pi * pow(radius, 3)
+
+
+def vol_circular_cylinder(radius: float, height: float) -> float:
+ """Calculate the Volume of a Circular Cylinder.
+ Wikipedia reference: https://en.wikipedia.org/wiki/Cylinder
+ :return pi * radius^2 * height
+
+ >>> vol_circular_cylinder(1, 1)
+ 3.141592653589793
+ >>> vol_circular_cylinder(4, 3)
+ 150.79644737231007
+ """
+ return pi * pow(radius, 2) * height
+
+
+def main():
+ """Print the Results of Various Volume Calculations."""
+ print("Volumes:")
+ print("Cube: " + str(vol_cube(2))) # = 8
+ print("Cuboid: " + str(vol_cuboid(2, 2, 2))) # = 8
+ print("Cone: " + str(vol_cone(2, 2))) # ~= 1.33
+ print("Right Circular Cone: " + str(vol_right_circ_cone(2, 2))) # ~= 8.38
+ print("Prism: " + str(vol_prism(2, 2))) # = 4
+ print("Pyramid: " + str(vol_pyramid(2, 2))) # ~= 1.33
+ print("Sphere: " + str(vol_sphere(2))) # ~= 33.5
+ print("Circular Cylinder: " + str(vol_circular_cylinder(2, 2))) # ~= 25.1
+
+
+if __name__ == "__main__":
+ main()
diff --git a/maths/zellers_congruence.py b/maths/zellers_congruence.py
new file mode 100644
index 000000000000..2d4a22a0a5ba
--- /dev/null
+++ b/maths/zellers_congruence.py
@@ -0,0 +1,158 @@
+import argparse
+import datetime
+
+
+def zeller(date_input: str) -> str:
+
+ """
+ Zellers Congruence Algorithm
+ Find the day of the week for nearly any Gregorian or Julian calendar date
+
+ >>> zeller('01-31-2010')
+ 'Your date 01-31-2010, is a Sunday!'
+
+ Validate out of range month
+ >>> zeller('13-31-2010')
+ Traceback (most recent call last):
+ ...
+ ValueError: Month must be between 1 - 12
+ >>> zeller('.2-31-2010')
+ Traceback (most recent call last):
+ ...
+ ValueError: invalid literal for int() with base 10: '.2'
+
+ Validate out of range date:
+ >>> zeller('01-33-2010')
+ Traceback (most recent call last):
+ ...
+ ValueError: Date must be between 1 - 31
+ >>> zeller('01-.4-2010')
+ Traceback (most recent call last):
+ ...
+ ValueError: invalid literal for int() with base 10: '.4'
+
+ Validate second separator:
+ >>> zeller('01-31*2010')
+ Traceback (most recent call last):
+ ...
+ ValueError: Date separator must be '-' or '/'
+
+ Validate first separator:
+ >>> zeller('01^31-2010')
+ Traceback (most recent call last):
+ ...
+ ValueError: Date separator must be '-' or '/'
+
+ Validate out of range year:
+ >>> zeller('01-31-8999')
+ Traceback (most recent call last):
+ ...
+ ValueError: Year out of range. There has to be some sort of limit...right?
+
+ Test null input:
+ >>> zeller()
+ Traceback (most recent call last):
+ ...
+ TypeError: zeller() missing 1 required positional argument: 'date_input'
+
+ Test length of date_input:
+ >>> zeller('')
+ Traceback (most recent call last):
+ ...
+ ValueError: Must be 10 characters long
+ >>> zeller('01-31-19082939')
+ Traceback (most recent call last):
+ ...
+ ValueError: Must be 10 characters long"""
+
+ # Days of the week for response
+ days = {
+ "0": "Sunday",
+ "1": "Monday",
+ "2": "Tuesday",
+ "3": "Wednesday",
+ "4": "Thursday",
+ "5": "Friday",
+ "6": "Saturday",
+ }
+
+ convert_datetime_days = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
+
+ # Validate
+ if not 0 < len(date_input) < 11:
+ raise ValueError("Must be 10 characters long")
+
+ # Get month
+ m: int = int(date_input[0] + date_input[1])
+ # Validate
+ if not 0 < m < 13:
+ raise ValueError("Month must be between 1 - 12")
+
+ sep_1: str = date_input[2]
+ # Validate
+ if sep_1 not in ["-", "/"]:
+ raise ValueError("Date separator must be '-' or '/'")
+
+ # Get day
+ d: int = int(date_input[3] + date_input[4])
+ # Validate
+ if not 0 < d < 32:
+ raise ValueError("Date must be between 1 - 31")
+
+ # Get second separator
+ sep_2: str = date_input[5]
+ # Validate
+ if sep_2 not in ["-", "/"]:
+ raise ValueError("Date separator must be '-' or '/'")
+
+ # Get year
+ y: int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9])
+ # Arbitrary year range
+ if not 45 < y < 8500:
+ raise ValueError(
+ "Year out of range. There has to be some sort of limit...right?"
+ )
+
+ # Get datetime obj for validation
+ dt_ck = datetime.date(int(y), int(m), int(d))
+
+ # Start math
+ if m <= 2:
+ y = y - 1
+ m = m + 12
+ # maths var
+ c: int = int(str(y)[:2])
+ k: int = int(str(y)[2:])
+ t: int = int(2.6 * m - 5.39)
+ u: int = int(c / 4)
+ v: int = int(k / 4)
+ x: int = int(d + k)
+ z: int = int(t + u + v + x)
+ w: int = int(z - (2 * c))
+ f: int = round(w % 7)
+ # End math
+
+ # Validate math
+ if f != convert_datetime_days[dt_ck.weekday()]:
+ raise AssertionError("The date was evaluated incorrectly. Contact developer.")
+
+ # Response
+ response: str = f"Your date {date_input}, is a {days[str(f)]}!"
+ return response
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ parser = argparse.ArgumentParser(
+ description=(
+ "Find out what day of the week nearly any date is or was. Enter "
+ "date as a string in the mm-dd-yyyy or mm/dd/yyyy format"
+ )
+ )
+ parser.add_argument(
+ "date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)"
+ )
+ args = parser.parse_args()
+ zeller(args.date_input)
diff --git a/matrix/__init__.py b/matrix/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/matrix/count_islands_in_matrix.py b/matrix/count_islands_in_matrix.py
new file mode 100644
index 000000000000..ad9c67fb8c1b
--- /dev/null
+++ b/matrix/count_islands_in_matrix.py
@@ -0,0 +1,36 @@
+# An island in matrix is a group of linked areas, all having the same value.
+# This code counts number of islands in a given matrix, with including diagonal
+# connections.
+
+
+class matrix: # Public class to implement a graph
+ def __init__(self, row: int, col: int, graph: list):
+ self.ROW = row
+ self.COL = col
+ self.graph = graph
+
+ def is_safe(self, i, j, visited) -> bool:
+ return (
+ 0 <= i < self.ROW
+ and 0 <= j < self.COL
+ and not visited[i][j]
+ and self.graph[i][j]
+ )
+
+ def diffs(self, i, j, visited): # Checking all 8 elements surrounding nth element
+ rowNbr = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
+ colNbr = [-1, 0, 1, -1, 1, -1, 0, 1]
+ visited[i][j] = True # Make those cells visited
+ for k in range(8):
+ if self.is_safe(i + rowNbr[k], j + colNbr[k], visited):
+ self.diffs(i + rowNbr[k], j + colNbr[k], visited)
+
+ def count_islands(self) -> int: # And finally, count all islands.
+ visited = [[False for j in range(self.COL)] for i in range(self.ROW)]
+ count = 0
+ for i in range(self.ROW):
+ for j in range(self.COL):
+ if visited[i][j] is False and self.graph[i][j] == 1:
+ self.diffs(i, j, visited)
+ count += 1
+ return count
diff --git a/matrix/inverse_of_matrix.py b/matrix/inverse_of_matrix.py
new file mode 100644
index 000000000000..9deca6c3c08e
--- /dev/null
+++ b/matrix/inverse_of_matrix.py
@@ -0,0 +1,40 @@
+from __future__ import annotations
+
+from decimal import Decimal
+
+
+def inverse_of_matrix(matrix: list[list[float]]) -> list[list[float]]:
+ """
+ A matrix multiplied with its inverse gives the identity matrix.
+ This function finds the inverse of a 2x2 matrix.
+ If the determinant of a matrix is 0, its inverse does not exist.
+
+ Sources for fixing inaccurate float arithmetic:
+ https://stackoverflow.com/questions/6563058/how-do-i-use-accurate-float-arithmetic-in-python
+ https://docs.python.org/3/library/decimal.html
+
+ >>> inverse_of_matrix([[2, 5], [2, 0]])
+ [[0.0, 0.5], [0.2, -0.2]]
+ >>> inverse_of_matrix([[2.5, 5], [1, 2]])
+ Traceback (most recent call last):
+ ...
+ ValueError: This matrix has no inverse.
+ >>> inverse_of_matrix([[12, -16], [-9, 0]])
+ [[0.0, -0.1111111111111111], [-0.0625, -0.08333333333333333]]
+ >>> inverse_of_matrix([[12, 3], [16, 8]])
+ [[0.16666666666666666, -0.0625], [-0.3333333333333333, 0.25]]
+ >>> inverse_of_matrix([[10, 5], [3, 2.5]])
+ [[0.25, -0.5], [-0.3, 1.0]]
+ """
+
+ D = Decimal # An abbreviation to be conciseness
+ # Calculate the determinant of the matrix
+ determinant = D(matrix[0][0]) * D(matrix[1][1]) - D(matrix[1][0]) * D(matrix[0][1])
+ if determinant == 0:
+ raise ValueError("This matrix has no inverse.")
+ # Creates a copy of the matrix with swapped positions of the elements
+ swapped_matrix = [[0.0, 0.0], [0.0, 0.0]]
+ swapped_matrix[0][0], swapped_matrix[1][1] = matrix[1][1], matrix[0][0]
+ swapped_matrix[1][0], swapped_matrix[0][1] = -matrix[1][0], -matrix[0][1]
+ # Calculate the inverse of the matrix
+ return [[float(D(n) / determinant) or 0.0 for n in row] for row in swapped_matrix]
diff --git a/matrix/matrix_class.py b/matrix/matrix_class.py
new file mode 100644
index 000000000000..57a2fc45ffd1
--- /dev/null
+++ b/matrix/matrix_class.py
@@ -0,0 +1,358 @@
+# An OOP approach to representing and manipulating matrices
+
+
+class Matrix:
+ """
+ Matrix object generated from a 2D array where each element is an array representing
+ a row.
+ Rows can contain type int or float.
+ Common operations and information available.
+ >>> rows = [
+ ... [1, 2, 3],
+ ... [4, 5, 6],
+ ... [7, 8, 9]
+ ... ]
+ >>> matrix = Matrix(rows)
+ >>> print(matrix)
+ [[1. 2. 3.]
+ [4. 5. 6.]
+ [7. 8. 9.]]
+
+ Matrix rows and columns are available as 2D arrays
+ >>> print(matrix.rows)
+ [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
+ >>> print(matrix.columns())
+ [[1, 4, 7], [2, 5, 8], [3, 6, 9]]
+
+ Order is returned as a tuple
+ >>> matrix.order
+ (3, 3)
+
+ Squareness and invertability are represented as bool
+ >>> matrix.is_square
+ True
+ >>> matrix.is_invertable()
+ False
+
+ Identity, Minors, Cofactors and Adjugate are returned as Matrices. Inverse can be
+ a Matrix or Nonetype
+ >>> print(matrix.identity())
+ [[1. 0. 0.]
+ [0. 1. 0.]
+ [0. 0. 1.]]
+ >>> print(matrix.minors())
+ [[-3. -6. -3.]
+ [-6. -12. -6.]
+ [-3. -6. -3.]]
+ >>> print(matrix.cofactors())
+ [[-3. 6. -3.]
+ [6. -12. 6.]
+ [-3. 6. -3.]]
+ >>> # won't be apparent due to the nature of the cofactor matrix
+ >>> print(matrix.adjugate())
+ [[-3. 6. -3.]
+ [6. -12. 6.]
+ [-3. 6. -3.]]
+ >>> print(matrix.inverse())
+ None
+
+ Determinant is an int, float, or Nonetype
+ >>> matrix.determinant()
+ 0
+
+ Negation, scalar multiplication, addition, subtraction, multiplication and
+ exponentiation are available and all return a Matrix
+ >>> print(-matrix)
+ [[-1. -2. -3.]
+ [-4. -5. -6.]
+ [-7. -8. -9.]]
+ >>> matrix2 = matrix * 3
+ >>> print(matrix2)
+ [[3. 6. 9.]
+ [12. 15. 18.]
+ [21. 24. 27.]]
+ >>> print(matrix + matrix2)
+ [[4. 8. 12.]
+ [16. 20. 24.]
+ [28. 32. 36.]]
+ >>> print(matrix - matrix2)
+ [[-2. -4. -6.]
+ [-8. -10. -12.]
+ [-14. -16. -18.]]
+ >>> print(matrix ** 3)
+ [[468. 576. 684.]
+ [1062. 1305. 1548.]
+ [1656. 2034. 2412.]]
+
+ Matrices can also be modified
+ >>> matrix.add_row([10, 11, 12])
+ >>> print(matrix)
+ [[1. 2. 3.]
+ [4. 5. 6.]
+ [7. 8. 9.]
+ [10. 11. 12.]]
+ >>> matrix2.add_column([8, 16, 32])
+ >>> print(matrix2)
+ [[3. 6. 9. 8.]
+ [12. 15. 18. 16.]
+ [21. 24. 27. 32.]]
+ >>> print(matrix * matrix2)
+ [[90. 108. 126. 136.]
+ [198. 243. 288. 304.]
+ [306. 378. 450. 472.]
+ [414. 513. 612. 640.]]
+
+ """
+
+ def __init__(self, rows):
+ error = TypeError(
+ "Matrices must be formed from a list of zero or more lists containing at "
+ "least one and the same number of values, each of which must be of type "
+ "int or float."
+ )
+ if len(rows) != 0:
+ cols = len(rows[0])
+ if cols == 0:
+ raise error
+ for row in rows:
+ if len(row) != cols:
+ raise error
+ for value in row:
+ if not isinstance(value, (int, float)):
+ raise error
+ self.rows = rows
+ else:
+ self.rows = []
+
+ # MATRIX INFORMATION
+ def columns(self):
+ return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
+
+ @property
+ def num_rows(self):
+ return len(self.rows)
+
+ @property
+ def num_columns(self):
+ return len(self.rows[0])
+
+ @property
+ def order(self):
+ return (self.num_rows, self.num_columns)
+
+ @property
+ def is_square(self):
+ return self.order[0] == self.order[1]
+
+ def identity(self):
+ values = [
+ [0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
+ for row_num in range(self.num_rows)
+ ]
+ return Matrix(values)
+
+ def determinant(self):
+ if not self.is_square:
+ return None
+ if self.order == (0, 0):
+ return 1
+ if self.order == (1, 1):
+ return self.rows[0][0]
+ if self.order == (2, 2):
+ return (self.rows[0][0] * self.rows[1][1]) - (
+ self.rows[0][1] * self.rows[1][0]
+ )
+ else:
+ return sum(
+ self.rows[0][column] * self.cofactors().rows[0][column]
+ for column in range(self.num_columns)
+ )
+
+ def is_invertable(self):
+ return bool(self.determinant())
+
+ def get_minor(self, row, column):
+ values = [
+ [
+ self.rows[other_row][other_column]
+ for other_column in range(self.num_columns)
+ if other_column != column
+ ]
+ for other_row in range(self.num_rows)
+ if other_row != row
+ ]
+ return Matrix(values).determinant()
+
+ def get_cofactor(self, row, column):
+ if (row + column) % 2 == 0:
+ return self.get_minor(row, column)
+ return -1 * self.get_minor(row, column)
+
+ def minors(self):
+ return Matrix(
+ [
+ [self.get_minor(row, column) for column in range(self.num_columns)]
+ for row in range(self.num_rows)
+ ]
+ )
+
+ def cofactors(self):
+ return Matrix(
+ [
+ [
+ self.minors().rows[row][column]
+ if (row + column) % 2 == 0
+ else self.minors().rows[row][column] * -1
+ for column in range(self.minors().num_columns)
+ ]
+ for row in range(self.minors().num_rows)
+ ]
+ )
+
+ def adjugate(self):
+ values = [
+ [self.cofactors().rows[column][row] for column in range(self.num_columns)]
+ for row in range(self.num_rows)
+ ]
+ return Matrix(values)
+
+ def inverse(self):
+ determinant = self.determinant()
+ return None if not determinant else self.adjugate() * (1 / determinant)
+
+ def __repr__(self):
+ return str(self.rows)
+
+ def __str__(self):
+ if self.num_rows == 0:
+ return "[]"
+ if self.num_rows == 1:
+ return "[[" + ". ".join(self.rows[0]) + "]]"
+ return (
+ "["
+ + "\n ".join(
+ [
+ "[" + ". ".join([str(value) for value in row]) + ".]"
+ for row in self.rows
+ ]
+ )
+ + "]"
+ )
+
+ # MATRIX MANIPULATION
+ def add_row(self, row, position=None):
+ type_error = TypeError("Row must be a list containing all ints and/or floats")
+ if not isinstance(row, list):
+ raise type_error
+ for value in row:
+ if not isinstance(value, (int, float)):
+ raise type_error
+ if len(row) != self.num_columns:
+ raise ValueError(
+ "Row must be equal in length to the other rows in the matrix"
+ )
+ if position is None:
+ self.rows.append(row)
+ else:
+ self.rows = self.rows[0:position] + [row] + self.rows[position:]
+
+ def add_column(self, column, position=None):
+ type_error = TypeError(
+ "Column must be a list containing all ints and/or floats"
+ )
+ if not isinstance(column, list):
+ raise type_error
+ for value in column:
+ if not isinstance(value, (int, float)):
+ raise type_error
+ if len(column) != self.num_rows:
+ raise ValueError(
+ "Column must be equal in length to the other columns in the matrix"
+ )
+ if position is None:
+ self.rows = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
+ else:
+ self.rows = [
+ self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
+ for i in range(self.num_rows)
+ ]
+
+ # MATRIX OPERATIONS
+ def __eq__(self, other):
+ if not isinstance(other, Matrix):
+ raise TypeError("A Matrix can only be compared with another Matrix")
+ return self.rows == other.rows
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __neg__(self):
+ return self * -1
+
+ def __add__(self, other):
+ if self.order != other.order:
+ raise ValueError("Addition requires matrices of the same order")
+ return Matrix(
+ [
+ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
+ for i in range(self.num_rows)
+ ]
+ )
+
+ def __sub__(self, other):
+ if self.order != other.order:
+ raise ValueError("Subtraction requires matrices of the same order")
+ return Matrix(
+ [
+ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
+ for i in range(self.num_rows)
+ ]
+ )
+
+ def __mul__(self, other):
+ if isinstance(other, (int, float)):
+ return Matrix([[element * other for element in row] for row in self.rows])
+ elif isinstance(other, Matrix):
+ if self.num_columns != other.num_rows:
+ raise ValueError(
+ "The number of columns in the first matrix must "
+ "be equal to the number of rows in the second"
+ )
+ return Matrix(
+ [
+ [Matrix.dot_product(row, column) for column in other.columns()]
+ for row in self.rows
+ ]
+ )
+ else:
+ raise TypeError(
+ "A Matrix can only be multiplied by an int, float, or another matrix"
+ )
+
+ def __pow__(self, other):
+ if not isinstance(other, int):
+ raise TypeError("A Matrix can only be raised to the power of an int")
+ if not self.is_square:
+ raise ValueError("Only square matrices can be raised to a power")
+ if other == 0:
+ return self.identity()
+ if other < 0:
+ if self.is_invertable:
+ return self.inverse() ** (-other)
+ raise ValueError(
+ "Only invertable matrices can be raised to a negative power"
+ )
+ result = self
+ for i in range(other - 1):
+ result *= self
+ return result
+
+ @classmethod
+ def dot_product(cls, row, column):
+ return sum(row[i] * column[i] for i in range(len(row)))
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/matrix/matrix_multiplication_addition.py b/matrix/matrix_multiplication_addition.py
deleted file mode 100644
index dd50db729e43..000000000000
--- a/matrix/matrix_multiplication_addition.py
+++ /dev/null
@@ -1,75 +0,0 @@
-def add(matrix_a, matrix_b):
- rows = len(matrix_a)
- columns = len(matrix_a[0])
- matrix_c = []
- for i in range(rows):
- list_1 = []
- for j in range(columns):
- val = matrix_a[i][j] + matrix_b[i][j]
- list_1.append(val)
- matrix_c.append(list_1)
- return matrix_c
-
-def scalarMultiply(matrix , n):
- return [[x * n for x in row] for row in matrix]
-
-def multiply(matrix_a, matrix_b):
- matrix_c = []
- n = len(matrix_a)
- for i in range(n):
- list_1 = []
- for j in range(n):
- val = 0
- for k in range(n):
- val = val + matrix_a[i][k] * matrix_b[k][j]
- list_1.append(val)
- matrix_c.append(list_1)
- return matrix_c
-
-def identity(n):
- return [[int(row == column) for column in range(n)] for row in range(n)]
-
-def transpose(matrix):
- return map(list , zip(*matrix))
-
-def minor(matrix, row, column):
- minor = matrix[:row] + matrix[row + 1:]
- minor = [row[:column] + row[column + 1:] for row in minor]
- return minor
-
-def determinant(matrix):
- if len(matrix) == 1: return matrix[0][0]
-
- res = 0
- for x in range(len(matrix)):
- res += matrix[0][x] * determinant(minor(matrix , 0 , x)) * (-1) ** x
- return res
-
-def inverse(matrix):
- det = determinant(matrix)
- if det == 0: return None
-
- matrixMinor = [[] for _ in range(len(matrix))]
- for i in range(len(matrix)):
- for j in range(len(matrix)):
- matrixMinor[i].append(determinant(minor(matrix , i , j)))
-
- cofactors = [[x * (-1) ** (row + col) for col, x in enumerate(matrixMinor[row])] for row in range(len(matrix))]
- adjugate = transpose(cofactors)
- return scalarMultiply(adjugate , 1/det)
-
-def main():
- matrix_a = [[12, 10], [3, 9]]
- matrix_b = [[3, 4], [7, 4]]
- matrix_c = [[11, 12, 13, 14], [21, 22, 23, 24], [31, 32, 33, 34], [41, 42, 43, 44]]
- matrix_d = [[3, 0, 2], [2, 0, -2], [0, 1, 1]]
-
- print(add(matrix_a, matrix_b))
- print(multiply(matrix_a, matrix_b))
- print(identity(5))
- print(minor(matrix_c , 1 , 2))
- print(determinant(matrix_b))
- print(inverse(matrix_d))
-
-if __name__ == '__main__':
- main()
diff --git a/matrix/matrix_operation.py b/matrix/matrix_operation.py
new file mode 100644
index 000000000000..dca01f9c3183
--- /dev/null
+++ b/matrix/matrix_operation.py
@@ -0,0 +1,184 @@
+"""
+Functions for 2D matrix operations
+"""
+
+from __future__ import annotations
+
+
+def add(*matrix_s: list[list]) -> list[list]:
+ """
+ >>> add([[1,2],[3,4]],[[2,3],[4,5]])
+ [[3, 5], [7, 9]]
+ >>> add([[1.2,2.4],[3,4]],[[2,3],[4,5]])
+ [[3.2, 5.4], [7, 9]]
+ >>> add([[1, 2], [4, 5]], [[3, 7], [3, 4]], [[3, 5], [5, 7]])
+ [[7, 14], [12, 16]]
+ """
+ if all(_check_not_integer(m) for m in matrix_s):
+ for i in matrix_s[1:]:
+ _verify_matrix_sizes(matrix_s[0], i)
+ return [[sum(t) for t in zip(*m)] for m in zip(*matrix_s)]
+
+
+def subtract(matrix_a: list[list], matrix_b: list[list]) -> list[list]:
+ """
+ >>> subtract([[1,2],[3,4]],[[2,3],[4,5]])
+ [[-1, -1], [-1, -1]]
+ >>> subtract([[1,2.5],[3,4]],[[2,3],[4,5.5]])
+ [[-1, -0.5], [-1, -1.5]]
+ """
+ if (
+ _check_not_integer(matrix_a)
+ and _check_not_integer(matrix_b)
+ and _verify_matrix_sizes(matrix_a, matrix_b)
+ ):
+ return [[i - j for i, j in zip(*m)] for m in zip(matrix_a, matrix_b)]
+
+
+def scalar_multiply(matrix: list[list], n: int) -> list[list]:
+ """
+ >>> scalar_multiply([[1,2],[3,4]],5)
+ [[5, 10], [15, 20]]
+ >>> scalar_multiply([[1.4,2.3],[3,4]],5)
+ [[7.0, 11.5], [15, 20]]
+ """
+ return [[x * n for x in row] for row in matrix]
+
+
+def multiply(matrix_a: list[list], matrix_b: list[list]) -> list[list]:
+ """
+ >>> multiply([[1,2],[3,4]],[[5,5],[7,5]])
+ [[19, 15], [43, 35]]
+ >>> multiply([[1,2.5],[3,4.5]],[[5,5],[7,5]])
+ [[22.5, 17.5], [46.5, 37.5]]
+ >>> multiply([[1, 2, 3]], [[2], [3], [4]])
+ [[20]]
+ """
+ if _check_not_integer(matrix_a) and _check_not_integer(matrix_b):
+ rows, cols = _verify_matrix_sizes(matrix_a, matrix_b)
+
+ if cols[0] != rows[1]:
+ raise ValueError(
+ f"Cannot multiply matrix of dimensions ({rows[0]},{cols[0]}) "
+ f"and ({rows[1]},{cols[1]})"
+ )
+ return [
+ [sum(m * n for m, n in zip(i, j)) for j in zip(*matrix_b)] for i in matrix_a
+ ]
+
+
+def identity(n: int) -> list[list]:
+ """
+ :param n: dimension for nxn matrix
+ :type n: int
+ :return: Identity matrix of shape [n, n]
+ >>> identity(3)
+ [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
+ """
+ n = int(n)
+ return [[int(row == column) for column in range(n)] for row in range(n)]
+
+
+def transpose(matrix: list[list], return_map: bool = True) -> list[list]:
+ """
+ >>> transpose([[1,2],[3,4]]) # doctest: +ELLIPSIS
+