From 8ac8850295a650eed83c95ae957d34c6c4bf47b2 Mon Sep 17 00:00:00 2001 From: czgdp1807 Date: Sat, 26 Jul 2025 17:59:50 +0530 Subject: [PATCH 01/47] Add Meson build system --- meson.build | 7 ++++ pydatastructs/meson.build | 68 +++++++++++++++++++++++++++++++++++++++ pyproject.toml | 3 ++ 3 files changed, 78 insertions(+) create mode 100644 meson.build create mode 100644 pydatastructs/meson.build create mode 100644 pyproject.toml diff --git a/meson.build b/meson.build new file mode 100644 index 000000000..46b3a7b1d --- /dev/null +++ b/meson.build @@ -0,0 +1,7 @@ +project('pydatastructs', 'cpp', + version : '1.0.1-dev', + default_options : ['cpp_std=c++17']) + +python = import('python').find_installation(pure: false) + +subdir('pydatastructs') diff --git a/pydatastructs/meson.build b/pydatastructs/meson.build new file mode 100644 index 000000000..446c92435 --- /dev/null +++ b/pydatastructs/meson.build @@ -0,0 +1,68 @@ +python = import('python').find_installation(pure: false) + +# Install pure python sources +install_subdir( + '.', + install_dir: python.get_install_dir(subdir: 'pydatastructs'), +) + +# utils extension modules +python.extension_module( + 'pydatastructs.utils._backend.cpp._nodes', + 'utils/_backend/cpp/nodes.cpp', + install: true, + subdir: 'pydatastructs/utils' +) +python.extension_module( + 'pydatastructs.utils._backend.cpp._graph_utils', + 'utils/_backend/cpp/graph_utils.cpp', + install: true, + subdir: 'pydatastructs/utils' +) + +# linear_data_structures extension modules +python.extension_module( + 'pydatastructs.linear_data_structures._backend.cpp._arrays', + 'linear_data_structures/_backend/cpp/arrays/arrays.cpp', + install: true, + subdir: 'pydatastructs/linear_data_structures' +) +python.extension_module( + 'pydatastructs.linear_data_structures._backend.cpp._algorithms', + 'linear_data_structures/_backend/cpp/algorithms/algorithms.cpp', + install: true, + subdir: 'pydatastructs/linear_data_structures' +) + +# miscellaneous_data_structures extension module +python.extension_module( + 'pydatastructs.miscellaneous_data_structures._backend.cpp._stack', + 'miscellaneous_data_structures/_backend/cpp/stack/stack.cpp', + install: true, + subdir: 'pydatastructs/miscellaneous_data_structures' +) + +# trees extension module +python.extension_module( + 'pydatastructs.trees._backend.cpp._trees', + 'trees/_backend/cpp/trees.cpp', + install: true, + subdir: 'pydatastructs/trees' +) + +# graphs extension modules +py_include = include_directories('utils/_backend/cpp') +python.extension_module( + 'pydatastructs.graphs._backend.cpp._graph', + 'graphs/_backend/cpp/graph.cpp', + include_directories: py_include, + install: true, + subdir: 'pydatastructs/graphs' +) +python.extension_module( + 'pydatastructs.graphs._backend.cpp._algorithms', + 'graphs/_backend/cpp/algorithms.cpp', + include_directories: py_include, + install: true, + subdir: 'pydatastructs/graphs' +) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..abbc7e154 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["meson-python"] +build-backend = "mesonpy" From 583d35db13af9191f25801b7fa35371977ce687e Mon Sep 17 00:00:00 2001 From: czgdp1807 Date: Sat, 26 Jul 2025 21:07:08 +0530 Subject: [PATCH 02/47] Add spin config and install Python sources --- pydatastructs/graphs/meson.build | 38 ++++++++++ pydatastructs/graphs/tests/meson.build | 12 +++ .../linear_data_structures/meson.build | 33 +++++++++ .../linear_data_structures/tests/meson.build | 15 ++++ pydatastructs/meson.build | 74 ++----------------- .../miscellaneous_data_structures/meson.build | 31 ++++++++ .../tests/meson.build | 16 ++++ pydatastructs/strings/meson.build | 12 +++ pydatastructs/strings/tests/meson.build | 11 +++ pydatastructs/trees/meson.build | 22 ++++++ pydatastructs/trees/tests/meson.build | 14 ++++ pydatastructs/utils/meson.build | 28 +++++++ pydatastructs/utils/tests/meson.build | 11 +++ pyproject.toml | 7 ++ 14 files changed, 258 insertions(+), 66 deletions(-) create mode 100644 pydatastructs/graphs/meson.build create mode 100644 pydatastructs/graphs/tests/meson.build create mode 100644 pydatastructs/linear_data_structures/meson.build create mode 100644 pydatastructs/linear_data_structures/tests/meson.build create mode 100644 pydatastructs/miscellaneous_data_structures/meson.build create mode 100644 pydatastructs/miscellaneous_data_structures/tests/meson.build create mode 100644 pydatastructs/strings/meson.build create mode 100644 pydatastructs/strings/tests/meson.build create mode 100644 pydatastructs/trees/meson.build create mode 100644 pydatastructs/trees/tests/meson.build create mode 100644 pydatastructs/utils/meson.build create mode 100644 pydatastructs/utils/tests/meson.build diff --git a/pydatastructs/graphs/meson.build b/pydatastructs/graphs/meson.build new file mode 100644 index 000000000..ecd832908 --- /dev/null +++ b/pydatastructs/graphs/meson.build @@ -0,0 +1,38 @@ +python = import('python').find_installation(pure: false) + +python.install_sources( + [ + '__init__.py', + '_extensions.py', + 'adjacency_list.py', + 'adjacency_matrix.py', + 'algorithms.py', + 'graph.py' + ], + subdir: 'pydatastructs/graphs' +) + +python.install_sources( + ['_backend/__init__.py', '_backend/cpp/__init__.py'], + subdir: 'pydatastructs/graphs/_backend' +) + +py_include = include_directories('../utils/_backend/cpp') + +python.extension_module( + 'pydatastructs.graphs._backend.cpp._graph', + '_backend/cpp/graph.cpp', + include_directories: py_include, + install: true, + subdir: 'pydatastructs/graphs' +) + +python.extension_module( + 'pydatastructs.graphs._backend.cpp._algorithms', + '_backend/cpp/algorithms.cpp', + include_directories: py_include, + install: true, + subdir: 'pydatastructs/graphs' +) + +subdir('tests') diff --git a/pydatastructs/graphs/tests/meson.build b/pydatastructs/graphs/tests/meson.build new file mode 100644 index 000000000..e887b63a9 --- /dev/null +++ b/pydatastructs/graphs/tests/meson.build @@ -0,0 +1,12 @@ +python = import('python').find_installation(pure: false) + +python.install_sources( + [ + '__init__.py', + 'test_adjacency_list.py', + 'test_adjacency_matrix.py', + 'test_algorithms.py' + ], + subdir: 'pydatastructs/graphs/tests', + install_tag: 'tests' +) diff --git a/pydatastructs/linear_data_structures/meson.build b/pydatastructs/linear_data_structures/meson.build new file mode 100644 index 000000000..bf30078d5 --- /dev/null +++ b/pydatastructs/linear_data_structures/meson.build @@ -0,0 +1,33 @@ +python = import('python').find_installation(pure: false) + +python.install_sources( + [ + '__init__.py', + '_extensions.py', + 'algorithms.py', + 'arrays.py', + 'linked_lists.py' + ], + subdir: 'pydatastructs/linear_data_structures' +) + +python.install_sources( + ['_backend/__init__.py', '_backend/cpp/__init__.py'], + subdir: 'pydatastructs/linear_data_structures/_backend' +) + +python.extension_module( + 'pydatastructs.linear_data_structures._backend.cpp._arrays', + '_backend/cpp/arrays/arrays.cpp', + install: true, + subdir: 'pydatastructs/linear_data_structures' +) + +python.extension_module( + 'pydatastructs.linear_data_structures._backend.cpp._algorithms', + '_backend/cpp/algorithms/algorithms.cpp', + install: true, + subdir: 'pydatastructs/linear_data_structures' +) + +subdir('tests') diff --git a/pydatastructs/linear_data_structures/tests/meson.build b/pydatastructs/linear_data_structures/tests/meson.build new file mode 100644 index 000000000..343f068f4 --- /dev/null +++ b/pydatastructs/linear_data_structures/tests/meson.build @@ -0,0 +1,15 @@ +python = import('python').find_installation(pure: false) + +python.install_sources( + [ + '__init__.py', + 'benchmarks/__init__.py', + 'benchmarks/test_algorithms.py', + 'benchmarks/test_arrays.py', + 'test_algorithms.py', + 'test_arrays.py', + 'test_linked_lists.py' + ], + subdir: 'pydatastructs/linear_data_structures/tests', + install_tag: 'tests' +) diff --git a/pydatastructs/meson.build b/pydatastructs/meson.build index 446c92435..f7e116460 100644 --- a/pydatastructs/meson.build +++ b/pydatastructs/meson.build @@ -1,68 +1,10 @@ python = import('python').find_installation(pure: false) -# Install pure python sources -install_subdir( - '.', - install_dir: python.get_install_dir(subdir: 'pydatastructs'), -) - -# utils extension modules -python.extension_module( - 'pydatastructs.utils._backend.cpp._nodes', - 'utils/_backend/cpp/nodes.cpp', - install: true, - subdir: 'pydatastructs/utils' -) -python.extension_module( - 'pydatastructs.utils._backend.cpp._graph_utils', - 'utils/_backend/cpp/graph_utils.cpp', - install: true, - subdir: 'pydatastructs/utils' -) - -# linear_data_structures extension modules -python.extension_module( - 'pydatastructs.linear_data_structures._backend.cpp._arrays', - 'linear_data_structures/_backend/cpp/arrays/arrays.cpp', - install: true, - subdir: 'pydatastructs/linear_data_structures' -) -python.extension_module( - 'pydatastructs.linear_data_structures._backend.cpp._algorithms', - 'linear_data_structures/_backend/cpp/algorithms/algorithms.cpp', - install: true, - subdir: 'pydatastructs/linear_data_structures' -) - -# miscellaneous_data_structures extension module -python.extension_module( - 'pydatastructs.miscellaneous_data_structures._backend.cpp._stack', - 'miscellaneous_data_structures/_backend/cpp/stack/stack.cpp', - install: true, - subdir: 'pydatastructs/miscellaneous_data_structures' -) - -# trees extension module -python.extension_module( - 'pydatastructs.trees._backend.cpp._trees', - 'trees/_backend/cpp/trees.cpp', - install: true, - subdir: 'pydatastructs/trees' -) - -# graphs extension modules -py_include = include_directories('utils/_backend/cpp') -python.extension_module( - 'pydatastructs.graphs._backend.cpp._graph', - 'graphs/_backend/cpp/graph.cpp', - include_directories: py_include, - install: true, - subdir: 'pydatastructs/graphs' -) -python.extension_module( - 'pydatastructs.graphs._backend.cpp._algorithms', - 'graphs/_backend/cpp/algorithms.cpp', - include_directories: py_include, - install: true, - subdir: 'pydatastructs/graphs' -) +python.install_sources(['__init__.py'], subdir: 'pydatastructs') + +subdir('utils') +subdir('linear_data_structures') +subdir('miscellaneous_data_structures') +subdir('trees') +subdir('graphs') +subdir('strings') diff --git a/pydatastructs/miscellaneous_data_structures/meson.build b/pydatastructs/miscellaneous_data_structures/meson.build new file mode 100644 index 000000000..0cbe06950 --- /dev/null +++ b/pydatastructs/miscellaneous_data_structures/meson.build @@ -0,0 +1,31 @@ +python = import('python').find_installation(pure: false) + +python.install_sources( + [ + '__init__.py', + '_extensions.py', + 'algorithms.py', + 'multiset.py', + 'sparse_table.py', + 'disjoint_set.py', + 'queue.py', + 'binomial_trees.py', + 'segment_tree.py', + 'stack.py' + ], + subdir: 'pydatastructs/miscellaneous_data_structures' +) + +python.install_sources( + ['_backend/__init__.py', '_backend/cpp/__init__.py'], + subdir: 'pydatastructs/miscellaneous_data_structures/_backend' +) + +python.extension_module( + 'pydatastructs.miscellaneous_data_structures._backend.cpp._stack', + '_backend/cpp/stack/stack.cpp', + install: true, + subdir: 'pydatastructs/miscellaneous_data_structures' +) + +subdir('tests') diff --git a/pydatastructs/miscellaneous_data_structures/tests/meson.build b/pydatastructs/miscellaneous_data_structures/tests/meson.build new file mode 100644 index 000000000..9841338be --- /dev/null +++ b/pydatastructs/miscellaneous_data_structures/tests/meson.build @@ -0,0 +1,16 @@ +python = import('python').find_installation(pure: false) + +python.install_sources( + [ + '__init__.py', + 'test_binomial_trees.py', + 'test_disjoint_set.py', + 'test_multiset.py', + 'test_queue.py', + 'test_range_query_dynamic.py', + 'test_range_query_static.py', + 'test_stack.py' + ], + subdir: 'pydatastructs/miscellaneous_data_structures/tests', + install_tag: 'tests' +) diff --git a/pydatastructs/strings/meson.build b/pydatastructs/strings/meson.build new file mode 100644 index 000000000..5a588232d --- /dev/null +++ b/pydatastructs/strings/meson.build @@ -0,0 +1,12 @@ +python = import('python').find_installation(pure: false) + +python.install_sources( + [ + '__init__.py', + 'algorithms.py', + 'trie.py' + ], + subdir: 'pydatastructs/strings' +) + +subdir('tests') diff --git a/pydatastructs/strings/tests/meson.build b/pydatastructs/strings/tests/meson.build new file mode 100644 index 000000000..30f1da937 --- /dev/null +++ b/pydatastructs/strings/tests/meson.build @@ -0,0 +1,11 @@ +python = import('python').find_installation(pure: false) + +python.install_sources( + [ + '__init__.py', + 'test_algorithms.py', + 'test_trie.py' + ], + subdir: 'pydatastructs/strings/tests', + install_tag: 'tests' +) diff --git a/pydatastructs/trees/meson.build b/pydatastructs/trees/meson.build new file mode 100644 index 000000000..d30cf494a --- /dev/null +++ b/pydatastructs/trees/meson.build @@ -0,0 +1,22 @@ +python = import('python').find_installation(pure: false) + +python.install_sources( + [ + '__init__.py', + '_extensions.py', + 'binary_trees.py', + 'heaps.py', + 'm_ary_trees.py', + 'space_partitioning_trees.py' + ], + subdir: 'pydatastructs/trees' +) + +python.extension_module( + 'pydatastructs.trees._backend.cpp._trees', + '_backend/cpp/trees.cpp', + install: true, + subdir: 'pydatastructs/trees' +) + +subdir('tests') diff --git a/pydatastructs/trees/tests/meson.build b/pydatastructs/trees/tests/meson.build new file mode 100644 index 000000000..fcafdff37 --- /dev/null +++ b/pydatastructs/trees/tests/meson.build @@ -0,0 +1,14 @@ +python = import('python').find_installation(pure: false) + +python.install_sources( + [ + '__init__.py', + 'benchmarks/test_binary_trees.py', + 'test_binary_trees.py', + 'test_heaps.py', + 'test_m_ary_trees.py', + 'test_space_partitioning_tree.py' + ], + subdir: 'pydatastructs/trees/tests', + install_tag: 'tests' +) diff --git a/pydatastructs/utils/meson.build b/pydatastructs/utils/meson.build new file mode 100644 index 000000000..17a285eee --- /dev/null +++ b/pydatastructs/utils/meson.build @@ -0,0 +1,28 @@ +python = import('python').find_installation(pure: false) + +python.install_sources( + [ + '__init__.py', + '_extensions.py', + 'misc_util.py', + 'raises_util.py', + 'testing_util.py' + ], + subdir: 'pydatastructs/utils' +) + +python.extension_module( + 'pydatastructs.utils._backend.cpp._nodes', + '_backend/cpp/nodes.cpp', + install: true, + subdir: 'pydatastructs/utils' +) + +python.extension_module( + 'pydatastructs.utils._backend.cpp._graph_utils', + '_backend/cpp/graph_utils.cpp', + install: true, + subdir: 'pydatastructs/utils' +) + +subdir('tests') diff --git a/pydatastructs/utils/tests/meson.build b/pydatastructs/utils/tests/meson.build new file mode 100644 index 000000000..880f40987 --- /dev/null +++ b/pydatastructs/utils/tests/meson.build @@ -0,0 +1,11 @@ +python = import('python').find_installation(pure: false) + +python.install_sources( + [ + '__init__.py', + 'test_misc_util.py', + 'test_code_quality.py' + ], + subdir: 'pydatastructs/utils/tests', + install_tag: 'tests' +) diff --git a/pyproject.toml b/pyproject.toml index abbc7e154..e0a56a44f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,10 @@ [build-system] requires = ["meson-python"] build-backend = "mesonpy" + +[tool.spin] +package = "pydatastructs" + +[tool.spin.commands] +Build = ["spin.cmds.meson.build"] +Test = ["spin.cmds.meson.test"] From 30359b3b4227960f37a24a2bdaf4c2f6e04a5d07 Mon Sep 17 00:00:00 2001 From: czgdp1807 Date: Sat, 26 Jul 2025 21:52:18 +0530 Subject: [PATCH 03/47] Use spin build system and update CI --- .github/workflows/ci.yml | 104 +++++++++--------- environment.yml | 2 + pydatastructs/graphs/meson.build | 8 +- .../linear_data_structures/meson.build | 8 +- .../miscellaneous_data_structures/meson.build | 4 +- pydatastructs/trees/_backend/__init__.py | 0 pydatastructs/trees/_backend/cpp/__init__.py | 0 pydatastructs/trees/meson.build | 9 +- pydatastructs/utils/_backend/__init__.py | 0 pydatastructs/utils/_backend/cpp/__init__.py | 0 pydatastructs/utils/meson.build | 13 ++- pyproject.toml | 7 ++ requirements.txt | 4 +- 13 files changed, 93 insertions(+), 66 deletions(-) create mode 100644 pydatastructs/trees/_backend/__init__.py create mode 100644 pydatastructs/trees/_backend/cpp/__init__.py create mode 100644 pydatastructs/utils/_backend/__init__.py create mode 100644 pydatastructs/utils/_backend/cpp/__init__.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8acfbc292..0ec5d9fdd 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -40,12 +40,15 @@ jobs: sudo apt-get install -y lcov - name: Build package + env: + CXXFLAGS: "-std=c++17 --coverage" + CFLAGS: "--coverage" run: | - CXXFLAGS="-std=c++17 --coverage" CFLAGS="--coverage" python scripts/build/install.py + spin build -v # coverage tests - name: Run tests run: | - python -m pytest --doctest-modules --cov=./ --cov-report=xml -s + spin test -v - name: Capture Coverage Data with lcov run: | @@ -101,12 +104,14 @@ jobs: python -m pip install -r docs/requirements.txt - name: Build package + env: + CXXFLAGS: "-std=c++17" run: | - CXXFLAGS="-std=c++17" python scripts/build/install.py + spin build -v - name: Run tests run: | - python -c "import pydatastructs; pydatastructs.test(only_benchmarks=True)" + spin test -v - name: Build Documentation run: | @@ -144,61 +149,62 @@ jobs: - name: Build package env: MACOSX_DEPLOYMENT_TARGET: 11.0 + CXXFLAGS: "-std=c++17" run: | - CXXFLAGS="-std=c++17" python scripts/build/install.py + spin build -v - name: Run tests run: | - python -c "import pydatastructs; pydatastructs.test()" + spin test -v - name: Build Documentation run: | sphinx-build -b html docs/source/ docs/build/html - # test-windows: - # runs-on: ${{matrix.os}} - # timeout-minutes: 20 - # strategy: - # fail-fast: false - # matrix: - # os: [windows-latest] - # python-version: - # - "3.8" - - # steps: - # - uses: actions/checkout@v3 - - # - name: Set up Python ${{ matrix.python-version }} - # uses: actions/setup-python@v4 - # with: - # python-version: ${{ matrix.python-version }} - - # - name: Setup conda - # uses: s-weigand/setup-conda@v1 - # with: - # update-conda: true - # python-version: ${{ matrix.python-version }} - # conda-channels: anaconda, conda-forge - # # - run: conda --version # This fails due to unknown reasons - # - run: which python - - # - name: Upgrade pip version - # run: | - # python -m pip install --upgrade pip - - # - name: Install requirements - # run: | - # python -m pip install -r requirements.txt - # python -m pip install -r docs/requirements.txt + # test-windows: + # runs-on: ${{matrix.os}} + # timeout-minutes: 20 + # strategy: + # fail-fast: false + # matrix: + # os: [windows-latest] + # python-version: + # - "3.8" + + # steps: + # - uses: actions/checkout@v3 + + # - name: Set up Python ${{ matrix.python-version }} + # uses: actions/setup-python@v4 + # with: + # python-version: ${{ matrix.python-version }} + + # - name: Setup conda + # uses: s-weigand/setup-conda@v1 + # with: + # update-conda: true + # python-version: ${{ matrix.python-version }} + # conda-channels: anaconda, conda-forge + # # - run: conda --version # This fails due to unknown reasons + # - run: which python + + # - name: Upgrade pip version + # run: | + # python -m pip install --upgrade pip + + # - name: Install requirements + # run: | + # python -m pip install -r requirements.txt + # python -m pip install -r docs/requirements.txt - # - name: Build package - # env: - # CL: "/std:c++17" - # run: | - # python scripts/build/install.py + - name: Build package + env: + CL: "/std:c++17" + run: | + spin build -v - # - name: Run tests - # run: | - # python -c "import pydatastructs; pydatastructs.test()" + - name: Run tests + run: | + spin test -v # - name: Build Documentation # run: | diff --git a/environment.yml b/environment.yml index 2d2ce160d..f08245fbc 100644 --- a/environment.yml +++ b/environment.yml @@ -9,6 +9,8 @@ dependencies: - pip: - codecov - pytest-cov + - spin + - meson - sphinx==5.0 - sphinx-readable-theme==1.3.0 - myst_nb==0.17.2 diff --git a/pydatastructs/graphs/meson.build b/pydatastructs/graphs/meson.build index ecd832908..50c3a3fe2 100644 --- a/pydatastructs/graphs/meson.build +++ b/pydatastructs/graphs/meson.build @@ -20,19 +20,19 @@ python.install_sources( py_include = include_directories('../utils/_backend/cpp') python.extension_module( - 'pydatastructs.graphs._backend.cpp._graph', + '_graph', '_backend/cpp/graph.cpp', include_directories: py_include, install: true, - subdir: 'pydatastructs/graphs' + subdir: 'pydatastructs/graphs/_backend/cpp' ) python.extension_module( - 'pydatastructs.graphs._backend.cpp._algorithms', + '_algorithms', '_backend/cpp/algorithms.cpp', include_directories: py_include, install: true, - subdir: 'pydatastructs/graphs' + subdir: 'pydatastructs/graphs/_backend/cpp' ) subdir('tests') diff --git a/pydatastructs/linear_data_structures/meson.build b/pydatastructs/linear_data_structures/meson.build index bf30078d5..50bf3b775 100644 --- a/pydatastructs/linear_data_structures/meson.build +++ b/pydatastructs/linear_data_structures/meson.build @@ -17,17 +17,17 @@ python.install_sources( ) python.extension_module( - 'pydatastructs.linear_data_structures._backend.cpp._arrays', + '_arrays', '_backend/cpp/arrays/arrays.cpp', install: true, - subdir: 'pydatastructs/linear_data_structures' + subdir: 'pydatastructs/linear_data_structures/_backend/cpp' ) python.extension_module( - 'pydatastructs.linear_data_structures._backend.cpp._algorithms', + '_algorithms', '_backend/cpp/algorithms/algorithms.cpp', install: true, - subdir: 'pydatastructs/linear_data_structures' + subdir: 'pydatastructs/linear_data_structures/_backend/cpp' ) subdir('tests') diff --git a/pydatastructs/miscellaneous_data_structures/meson.build b/pydatastructs/miscellaneous_data_structures/meson.build index 0cbe06950..d6872ff25 100644 --- a/pydatastructs/miscellaneous_data_structures/meson.build +++ b/pydatastructs/miscellaneous_data_structures/meson.build @@ -22,10 +22,10 @@ python.install_sources( ) python.extension_module( - 'pydatastructs.miscellaneous_data_structures._backend.cpp._stack', + '_stack', '_backend/cpp/stack/stack.cpp', install: true, - subdir: 'pydatastructs/miscellaneous_data_structures' + subdir: 'pydatastructs/miscellaneous_data_structures/_backend/cpp' ) subdir('tests') diff --git a/pydatastructs/trees/_backend/__init__.py b/pydatastructs/trees/_backend/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pydatastructs/trees/_backend/cpp/__init__.py b/pydatastructs/trees/_backend/cpp/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pydatastructs/trees/meson.build b/pydatastructs/trees/meson.build index d30cf494a..fac490cda 100644 --- a/pydatastructs/trees/meson.build +++ b/pydatastructs/trees/meson.build @@ -12,11 +12,16 @@ python.install_sources( subdir: 'pydatastructs/trees' ) +python.install_sources( + ['_backend/__init__.py', '_backend/cpp/__init__.py'], + subdir: 'pydatastructs/trees/_backend' +) + python.extension_module( - 'pydatastructs.trees._backend.cpp._trees', + '_trees', '_backend/cpp/trees.cpp', install: true, - subdir: 'pydatastructs/trees' + subdir: 'pydatastructs/trees/_backend/cpp' ) subdir('tests') diff --git a/pydatastructs/utils/_backend/__init__.py b/pydatastructs/utils/_backend/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pydatastructs/utils/_backend/cpp/__init__.py b/pydatastructs/utils/_backend/cpp/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pydatastructs/utils/meson.build b/pydatastructs/utils/meson.build index 17a285eee..a2f9be4c3 100644 --- a/pydatastructs/utils/meson.build +++ b/pydatastructs/utils/meson.build @@ -11,18 +11,23 @@ python.install_sources( subdir: 'pydatastructs/utils' ) +python.install_sources( + ['_backend/__init__.py', '_backend/cpp/__init__.py'], + subdir: 'pydatastructs/utils/_backend' +) + python.extension_module( - 'pydatastructs.utils._backend.cpp._nodes', + '_nodes', '_backend/cpp/nodes.cpp', install: true, - subdir: 'pydatastructs/utils' + subdir: 'pydatastructs/utils/_backend/cpp' ) python.extension_module( - 'pydatastructs.utils._backend.cpp._graph_utils', + '_graph_utils', '_backend/cpp/graph_utils.cpp', install: true, - subdir: 'pydatastructs/utils' + subdir: 'pydatastructs/utils/_backend/cpp' ) subdir('tests') diff --git a/pyproject.toml b/pyproject.toml index e0a56a44f..c2ee41443 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,6 +2,13 @@ requires = ["meson-python"] build-backend = "mesonpy" +[project] +name = "pydatastructs" +version = "1.0.1.dev0" +description = "Data structures and algorithms implemented using Python and C++" +readme = "README.md" +requires-python = ">=3.8" + [tool.spin] package = "pydatastructs" diff --git a/requirements.txt b/requirements.txt index ebb62275e..0b5f457a3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,6 @@ codecov pytest pytest-cov -llvmlite \ No newline at end of file +llvmlite +spin +meson From 8eabb3da3fafa0382a892f58e936922ccd6b25be Mon Sep 17 00:00:00 2001 From: Gagandeep Singh Date: Sat, 26 Jul 2025 23:32:14 +0530 Subject: [PATCH 04/47] Remove _extensions.py dummy installation --- pydatastructs/graphs/__init__.py | 1 - pydatastructs/graphs/meson.build | 1 - pydatastructs/linear_data_structures/__init__.py | 1 - pydatastructs/linear_data_structures/meson.build | 1 - pydatastructs/miscellaneous_data_structures/__init__.py | 1 - pydatastructs/miscellaneous_data_structures/meson.build | 1 - pydatastructs/trees/__init__.py | 1 - pydatastructs/trees/meson.build | 1 - pydatastructs/utils/__init__.py | 1 - pydatastructs/utils/meson.build | 1 - 10 files changed, 10 deletions(-) diff --git a/pydatastructs/graphs/__init__.py b/pydatastructs/graphs/__init__.py index c1a70574a..21e0a5f35 100644 --- a/pydatastructs/graphs/__init__.py +++ b/pydatastructs/graphs/__init__.py @@ -9,7 +9,6 @@ from . import algorithms from . import adjacency_list from . import adjacency_matrix -from . import _extensions from .algorithms import ( breadth_first_search, diff --git a/pydatastructs/graphs/meson.build b/pydatastructs/graphs/meson.build index 50c3a3fe2..2878bc186 100644 --- a/pydatastructs/graphs/meson.build +++ b/pydatastructs/graphs/meson.build @@ -3,7 +3,6 @@ python = import('python').find_installation(pure: false) python.install_sources( [ '__init__.py', - '_extensions.py', 'adjacency_list.py', 'adjacency_matrix.py', 'algorithms.py', diff --git a/pydatastructs/linear_data_structures/__init__.py b/pydatastructs/linear_data_structures/__init__.py index de247b88e..c6b3341d2 100644 --- a/pydatastructs/linear_data_structures/__init__.py +++ b/pydatastructs/linear_data_structures/__init__.py @@ -4,7 +4,6 @@ arrays, linked_lists, algorithms, - _extensions ) from .arrays import ( diff --git a/pydatastructs/linear_data_structures/meson.build b/pydatastructs/linear_data_structures/meson.build index 50bf3b775..fca4004cc 100644 --- a/pydatastructs/linear_data_structures/meson.build +++ b/pydatastructs/linear_data_structures/meson.build @@ -3,7 +3,6 @@ python = import('python').find_installation(pure: false) python.install_sources( [ '__init__.py', - '_extensions.py', 'algorithms.py', 'arrays.py', 'linked_lists.py' diff --git a/pydatastructs/miscellaneous_data_structures/__init__.py b/pydatastructs/miscellaneous_data_structures/__init__.py index 60754c413..6ed099769 100644 --- a/pydatastructs/miscellaneous_data_structures/__init__.py +++ b/pydatastructs/miscellaneous_data_structures/__init__.py @@ -6,7 +6,6 @@ queue, disjoint_set, sparse_table, - _extensions, ) from .binomial_trees import ( diff --git a/pydatastructs/miscellaneous_data_structures/meson.build b/pydatastructs/miscellaneous_data_structures/meson.build index d6872ff25..644ec7e3b 100644 --- a/pydatastructs/miscellaneous_data_structures/meson.build +++ b/pydatastructs/miscellaneous_data_structures/meson.build @@ -3,7 +3,6 @@ python = import('python').find_installation(pure: false) python.install_sources( [ '__init__.py', - '_extensions.py', 'algorithms.py', 'multiset.py', 'sparse_table.py', diff --git a/pydatastructs/trees/__init__.py b/pydatastructs/trees/__init__.py index 1c99cca25..892730122 100644 --- a/pydatastructs/trees/__init__.py +++ b/pydatastructs/trees/__init__.py @@ -5,7 +5,6 @@ m_ary_trees, space_partitioning_trees, heaps, - _extensions ) from .binary_trees import ( diff --git a/pydatastructs/trees/meson.build b/pydatastructs/trees/meson.build index fac490cda..bcaae16ae 100644 --- a/pydatastructs/trees/meson.build +++ b/pydatastructs/trees/meson.build @@ -3,7 +3,6 @@ python = import('python').find_installation(pure: false) python.install_sources( [ '__init__.py', - '_extensions.py', 'binary_trees.py', 'heaps.py', 'm_ary_trees.py', diff --git a/pydatastructs/utils/__init__.py b/pydatastructs/utils/__init__.py index 20a8c750c..c4971be32 100644 --- a/pydatastructs/utils/__init__.py +++ b/pydatastructs/utils/__init__.py @@ -3,7 +3,6 @@ from . import ( misc_util, testing_util, - _extensions ) from .misc_util import ( diff --git a/pydatastructs/utils/meson.build b/pydatastructs/utils/meson.build index a2f9be4c3..cdc466028 100644 --- a/pydatastructs/utils/meson.build +++ b/pydatastructs/utils/meson.build @@ -3,7 +3,6 @@ python = import('python').find_installation(pure: false) python.install_sources( [ '__init__.py', - '_extensions.py', 'misc_util.py', 'raises_util.py', 'testing_util.py' From 0bdc7108528c1d1ffe7dad6c8ea0f359969328b9 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Wed, 1 Oct 2025 20:56:54 +0530 Subject: [PATCH 05/47] disabled py3.8 checks and fixed nodes issue --- .github/workflows/ci.yml | 229 ++++++++++++++++---------------- pydatastructs/utils/meson.build | 6 +- 2 files changed, 117 insertions(+), 118 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0ec5d9fdd..d5626756a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,73 +7,73 @@ on: branches: [main] jobs: - test-ubuntu-py38: - runs-on: ${{matrix.os}} - timeout-minutes: 20 - strategy: - fail-fast: false - matrix: - os: [ubuntu-latest] - python-version: - - "3.8" - - steps: - - uses: actions/checkout@v3 - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - - name: Upgrade pip version - run: | - python -m pip install --upgrade pip - - - name: Install requirements - run: | - python -m pip install -r requirements.txt - python -m pip install -r docs/requirements.txt - - - name: Install lcov - run: | - sudo apt-get update - sudo apt-get install -y lcov - - - name: Build package - env: - CXXFLAGS: "-std=c++17 --coverage" - CFLAGS: "--coverage" - run: | - spin build -v - # coverage tests - - name: Run tests - run: | - spin test -v - - - name: Capture Coverage Data with lcov - run: | - lcov --capture --directory . --output-file coverage.info --no-external - - - name: Generate HTML Coverage Report with genhtml - run: | - genhtml coverage.info --output-directory coverage_report - - - name: Upload Coverage - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN }} - directory: ./coverage/reports/ - env_vars: OS,PYTHON - fail_ci_if_error: false - files: ./coverage.xml - flags: unittests - name: codecov-umbrella - path_to_write_report: ./coverage/codecov_report.txt - verbose: true - - - name: Build Documentation - run: | - sphinx-build -b html docs/source/ docs/build/html + # test-ubuntu-py38: + # runs-on: ${{matrix.os}} + # timeout-minutes: 20 + # strategy: + # fail-fast: false + # matrix: + # os: [ubuntu-latest] + # python-version: + # - "3.8" + # + # steps: + # - uses: actions/checkout@v3 + # + # - name: Set up Python ${{ matrix.python-version }} + # uses: actions/setup-python@v4 + # with: + # python-version: ${{ matrix.python-version }} + # + # - name: Upgrade pip version + # run: | + # python -m pip install --upgrade pip + # + # - name: Install requirements + # run: | + # python -m pip install -r requirements.txt + # python -m pip install -r docs/requirements.txt + # + # - name: Install lcov + # run: | + # sudo apt-get update + # sudo apt-get install -y lcov + # + # - name: Build package + # env: + # CXXFLAGS: "-std=c++17 --coverage" + # CFLAGS: "--coverage" + # run: | + # spin build -v + # # coverage tests + # - name: Run tests + # run: | + # spin test -v + # + # - name: Capture Coverage Data with lcov + # run: | + # lcov --capture --directory . --output-file coverage.info --no-external + # + # - name: Generate HTML Coverage Report with genhtml + # run: | + # genhtml coverage.info --output-directory coverage_report + # + # - name: Upload Coverage + # uses: codecov/codecov-action@v3 + # with: + # token: ${{ secrets.CODECOV_TOKEN }} + # directory: ./coverage/reports/ + # env_vars: OS,PYTHON + # fail_ci_if_error: false + # files: ./coverage.xml + # flags: unittests + # name: codecov-umbrella + # path_to_write_report: ./coverage/codecov_report.txt + # verbose: true + # + # - name: Build Documentation + # run: | + # sphinx-build -b html docs/source/ docs/build/html test-ubuntu-py39-py310: runs-on: ${{matrix.os}} @@ -125,7 +125,7 @@ jobs: matrix: os: [macos-latest] python-version: - - "3.8" + # - "3.8" - "3.9" - "3.10" @@ -160,52 +160,53 @@ jobs: run: | sphinx-build -b html docs/source/ docs/build/html - # test-windows: - # runs-on: ${{matrix.os}} - # timeout-minutes: 20 - # strategy: - # fail-fast: false - # matrix: - # os: [windows-latest] - # python-version: - # - "3.8" - - # steps: - # - uses: actions/checkout@v3 - - # - name: Set up Python ${{ matrix.python-version }} - # uses: actions/setup-python@v4 - # with: - # python-version: ${{ matrix.python-version }} - - # - name: Setup conda - # uses: s-weigand/setup-conda@v1 - # with: - # update-conda: true - # python-version: ${{ matrix.python-version }} - # conda-channels: anaconda, conda-forge - # # - run: conda --version # This fails due to unknown reasons - # - run: which python - - # - name: Upgrade pip version - # run: | - # python -m pip install --upgrade pip - - # - name: Install requirements - # run: | - # python -m pip install -r requirements.txt - # python -m pip install -r docs/requirements.txt - - - name: Build package - env: - CL: "/std:c++17" - run: | - spin build -v - - - name: Run tests - run: | - spin test -v - + # test-windows: + # runs-on: ${{matrix.os}} + # timeout-minutes: 20 + # strategy: + # fail-fast: false + # matrix: + # os: [windows-latest] + # python-version: + # # - "3.8" + # - "3.9" + # + # steps: + # - uses: actions/checkout@v3 + # + # - name: Set up Python ${{ matrix.python-version }} + # uses: actions/setup-python@v4 + # with: + # python-version: ${{ matrix.python-version }} + # + # - name: Setup conda + # uses: s-weigand/setup-conda@v1 + # with: + # update-conda: true + # python-version: ${{ matrix.python-version }} + # conda-channels: anaconda, conda-forge + # # - run: conda --version # This fails due to unknown reasons + # - run: which python + # + # - name: Upgrade pip version + # run: | + # python -m pip install --upgrade pip + # + # - name: Install requirements + # run: | + # python -m pip install -r requirements.txt + # python -m pip install -r docs/requirements.txt + # + # - name: Build package + # env: + # CL: "/std:c++17" + # run: | + # spin build -v + # + # - name: Run tests + # run: | + # spin test -v + # # - name: Build Documentation # run: | # sphinx-build -b html docs/source/ docs/build/html diff --git a/pydatastructs/utils/meson.build b/pydatastructs/utils/meson.build index cdc466028..eb9968eb8 100644 --- a/pydatastructs/utils/meson.build +++ b/pydatastructs/utils/meson.build @@ -16,17 +16,15 @@ python.install_sources( ) python.extension_module( - '_nodes', + 'pydatastructs.utils._backend.cpp._nodes', '_backend/cpp/nodes.cpp', install: true, - subdir: 'pydatastructs/utils/_backend/cpp' ) python.extension_module( - '_graph_utils', + 'pydatastructs.utils._backend.cpp._graph_utils', '_backend/cpp/graph_utils.cpp', install: true, - subdir: 'pydatastructs/utils/_backend/cpp' ) subdir('tests') From 4036995aa9cb07579cfe1abc19c352ab53d89ed6 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Wed, 1 Oct 2025 21:23:21 +0530 Subject: [PATCH 06/47] bug fix --- pydatastructs/utils/meson.build | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pydatastructs/utils/meson.build b/pydatastructs/utils/meson.build index eb9968eb8..6ecbb5e58 100644 --- a/pydatastructs/utils/meson.build +++ b/pydatastructs/utils/meson.build @@ -19,12 +19,14 @@ python.extension_module( 'pydatastructs.utils._backend.cpp._nodes', '_backend/cpp/nodes.cpp', install: true, + subdir: 'pydatastructs/utils/_backend/cpp' ) python.extension_module( 'pydatastructs.utils._backend.cpp._graph_utils', '_backend/cpp/graph_utils.cpp', install: true, + subdir: 'pydatastructs/utils/_backend/cpp' ) subdir('tests') From 24b1394b27ed3f3e9775d3c0df30fa2b2e128604 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Wed, 1 Oct 2025 21:39:16 +0530 Subject: [PATCH 07/47] bug fix --- pydatastructs/graphs/meson.build | 14 ++++++-------- pydatastructs/utils/__init__.py | 16 +++++++++++++--- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/pydatastructs/graphs/meson.build b/pydatastructs/graphs/meson.build index 2878bc186..0310ae387 100644 --- a/pydatastructs/graphs/meson.build +++ b/pydatastructs/graphs/meson.build @@ -19,19 +19,17 @@ python.install_sources( py_include = include_directories('../utils/_backend/cpp') python.extension_module( - '_graph', - '_backend/cpp/graph.cpp', - include_directories: py_include, + '_nodes', + '_backend/cpp/nodes.cpp', install: true, - subdir: 'pydatastructs/graphs/_backend/cpp' + subdir: 'pydatastructs/utils/_backend/cpp' ) python.extension_module( - '_algorithms', - '_backend/cpp/algorithms.cpp', - include_directories: py_include, + '_graph_utils', + '_backend/cpp/graph_utils.cpp', install: true, - subdir: 'pydatastructs/graphs/_backend/cpp' + subdir: 'pydatastructs/utils/_backend/cpp' ) subdir('tests') diff --git a/pydatastructs/utils/__init__.py b/pydatastructs/utils/__init__.py index c4971be32..24250ea7b 100644 --- a/pydatastructs/utils/__init__.py +++ b/pydatastructs/utils/__init__.py @@ -5,14 +5,18 @@ testing_util, ) +from ._backend.cpp import _graph_utils + +AdjacencyListGraphNode = _graph_utils.AdjacencyListGraphNode +AdjacencyMatrixGraphNode = _graph_utils.AdjacencyMatrixGraphNode +GraphNode = _graph_utils.GraphNode +GraphEdge = _graph_utils.GraphEdge + from .misc_util import ( TreeNode, MAryTreeNode, LinkedListNode, BinomialTreeNode, - AdjacencyListGraphNode, - AdjacencyMatrixGraphNode, - GraphEdge, Set, CartesianTreeNode, RedBlackTreeNode, @@ -25,5 +29,11 @@ ) from .testing_util import test +__all__.extend([ + 'AdjacencyListGraphNode', + 'AdjacencyMatrixGraphNode', + 'GraphNode', + 'GraphEdge', +]) __all__.extend(misc_util.__all__) __all__.extend(testing_util.__all__) From 2bcc3fd4f0d239126b1730ca30116968862c7909 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Wed, 1 Oct 2025 21:45:54 +0530 Subject: [PATCH 08/47] bug fix --- pydatastructs/graphs/meson.build | 17 ++++++++++------- pydatastructs/utils/meson.build | 4 ++-- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/pydatastructs/graphs/meson.build b/pydatastructs/graphs/meson.build index 0310ae387..89810c257 100644 --- a/pydatastructs/graphs/meson.build +++ b/pydatastructs/graphs/meson.build @@ -3,6 +3,7 @@ python = import('python').find_installation(pure: false) python.install_sources( [ '__init__.py', + '_extensions.py', 'adjacency_list.py', 'adjacency_matrix.py', 'algorithms.py', @@ -19,17 +20,19 @@ python.install_sources( py_include = include_directories('../utils/_backend/cpp') python.extension_module( - '_nodes', - '_backend/cpp/nodes.cpp', + 'pydatastructs.graphs._backend.cpp._graph', + '_backend/cpp/graph.cpp', + include_directories: py_include, install: true, - subdir: 'pydatastructs/utils/_backend/cpp' + subdir: 'pydatastructs/graphs' ) python.extension_module( - '_graph_utils', - '_backend/cpp/graph_utils.cpp', + 'pydatastructs.graphs._backend.cpp._algorithms', + '_backend/cpp/algorithms.cpp', + include_directories: py_include, install: true, - subdir: 'pydatastructs/utils/_backend/cpp' + subdir: 'pydatastructs/graphs' ) -subdir('tests') +subdir('tests') \ No newline at end of file diff --git a/pydatastructs/utils/meson.build b/pydatastructs/utils/meson.build index 6ecbb5e58..6509f2247 100644 --- a/pydatastructs/utils/meson.build +++ b/pydatastructs/utils/meson.build @@ -19,14 +19,14 @@ python.extension_module( 'pydatastructs.utils._backend.cpp._nodes', '_backend/cpp/nodes.cpp', install: true, - subdir: 'pydatastructs/utils/_backend/cpp' + subdir: 'pydatastructs/utils' ) python.extension_module( 'pydatastructs.utils._backend.cpp._graph_utils', '_backend/cpp/graph_utils.cpp', install: true, - subdir: 'pydatastructs/utils/_backend/cpp' + subdir: 'pydatastructs/utils' ) subdir('tests') From 6f354724c597207b83992e35a09d59568c7475b7 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Wed, 1 Oct 2025 21:54:35 +0530 Subject: [PATCH 09/47] bug fix --- pydatastructs/utils/meson.build | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/pydatastructs/utils/meson.build b/pydatastructs/utils/meson.build index 6509f2247..de2f70f1d 100644 --- a/pydatastructs/utils/meson.build +++ b/pydatastructs/utils/meson.build @@ -3,6 +3,7 @@ python = import('python').find_installation(pure: false) python.install_sources( [ '__init__.py', + '_extensions.py', 'misc_util.py', 'raises_util.py', 'testing_util.py' @@ -16,17 +17,17 @@ python.install_sources( ) python.extension_module( - 'pydatastructs.utils._backend.cpp._nodes', + '_nodes', '_backend/cpp/nodes.cpp', install: true, - subdir: 'pydatastructs/utils' + subdir: 'pydatastructs/utils/_backend/cpp' ) python.extension_module( - 'pydatastructs.utils._backend.cpp._graph_utils', + '_graph_utils', '_backend/cpp/graph_utils.cpp', install: true, - subdir: 'pydatastructs/utils' + subdir: 'pydatastructs/utils/_backend/cpp' ) -subdir('tests') +subdir('tests') \ No newline at end of file From ee430b94516c1ebfae251dd61a92b4bfdd9a1d16 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Wed, 1 Oct 2025 22:01:19 +0530 Subject: [PATCH 10/47] bug fix --- pydatastructs/graphs/meson.build | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pydatastructs/graphs/meson.build b/pydatastructs/graphs/meson.build index 89810c257..f6e084142 100644 --- a/pydatastructs/graphs/meson.build +++ b/pydatastructs/graphs/meson.build @@ -20,19 +20,19 @@ python.install_sources( py_include = include_directories('../utils/_backend/cpp') python.extension_module( - 'pydatastructs.graphs._backend.cpp._graph', + '_graph', '_backend/cpp/graph.cpp', include_directories: py_include, install: true, - subdir: 'pydatastructs/graphs' + subdir: 'pydatastructs/graphs/_backend/cpp' ) python.extension_module( - 'pydatastructs.graphs._backend.cpp._algorithms', + '_algorithms', '_backend/cpp/algorithms.cpp', include_directories: py_include, install: true, - subdir: 'pydatastructs/graphs' + subdir: 'pydatastructs/graphs/_backend/cpp' ) subdir('tests') \ No newline at end of file From 3da3f9c2ccb7a7f8c9441889b795e8b62a50152d Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Thu, 2 Oct 2025 01:36:14 +0530 Subject: [PATCH 11/47] bug fix --- .github/workflows/ci.yml | 25 +++++++---- .../cpp/algorithms/llvm_algorithms.py | 41 +++++-------------- pydatastructs/utils/__init__.py | 17 ++------ 3 files changed, 31 insertions(+), 52 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d5626756a..599512cc8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -76,7 +76,7 @@ jobs: # sphinx-build -b html docs/source/ docs/build/html test-ubuntu-py39-py310: - runs-on: ${{matrix.os}} + runs-on: ${{ matrix.os }} timeout-minutes: 20 strategy: fail-fast: false @@ -102,30 +102,33 @@ jobs: run: | python -m pip install -r requirements.txt python -m pip install -r docs/requirements.txt + python -m pip install meson ninja pytest - - name: Build package + - name: Build package (Meson) env: CXXFLAGS: "-std=c++17" run: | - spin build -v + meson setup build + meson compile -C build + meson install -C build - name: Run tests run: | - spin test -v + pytest --import-mode=importlib - name: Build Documentation run: | sphinx-build -b html docs/source/ docs/build/html test-macos: - runs-on: ${{matrix.os}} + runs-on: ${{ matrix.os }} timeout-minutes: 20 strategy: fail-fast: false matrix: os: [macos-latest] python-version: - # - "3.8" + # - "3.8" - "3.9" - "3.10" @@ -145,16 +148,20 @@ jobs: run: | python -m pip install -r requirements.txt python -m pip install -r docs/requirements.txt + python -m pip install meson ninja pytest - - name: Build package + - name: Build package (Meson) env: MACOSX_DEPLOYMENT_TARGET: 11.0 CXXFLAGS: "-std=c++17" run: | - spin build -v + meson setup build + meson compile -C build + meson install -C build + - name: Run tests run: | - spin test -v + pytest --import-mode=importlib - name: Build Documentation run: | diff --git a/pydatastructs/linear_data_structures/_backend/cpp/algorithms/llvm_algorithms.py b/pydatastructs/linear_data_structures/_backend/cpp/algorithms/llvm_algorithms.py index 20c4c4ce6..397fec152 100644 --- a/pydatastructs/linear_data_structures/_backend/cpp/algorithms/llvm_algorithms.py +++ b/pydatastructs/linear_data_structures/_backend/cpp/algorithms/llvm_algorithms.py @@ -13,7 +13,6 @@ _fn_ptr_cache = {} def _cleanup(): - """Clean up LLVM resources on exit.""" global _engines, _target_machine, _fn_ptr_cache _engines.clear() _target_machine = None @@ -27,9 +26,8 @@ def _ensure_target_machine(): return try: - binding.initialize() - binding.initialize_native_target() - binding.initialize_native_asmprinter() + binding.initialize_all_targets() + binding.initialize_all_asmprinters() target = binding.Target.from_default_triple() _target_machine = target.create_target_machine( @@ -40,7 +38,6 @@ def _ensure_target_machine(): raise RuntimeError(f"Failed to initialize LLVM target machine: {e}") def get_bubble_sort_ptr(dtype: str) -> int: - """Get function pointer for bubble sort with specified dtype.""" dtype = dtype.lower().strip() if dtype not in _SUPPORTED: raise ValueError(f"Unsupported dtype '{dtype}'. Supported: {list(_SUPPORTED)}") @@ -148,31 +145,15 @@ def _materialize(dtype: str) -> int: mod = binding.parse_assembly(llvm_ir) mod.verify() - pmb = binding.PassManagerBuilder() - pmb.opt_level = 3 - pmb.loop_vectorize = True - pmb.slp_vectorize = True - - fpm = binding.create_function_pass_manager(mod) - pm = binding.create_module_pass_manager() - - pm.add_basic_alias_analysis_pass() - pm.add_type_based_alias_analysis_pass() - pm.add_instruction_combining_pass() - pm.add_gvn_pass() - pm.add_cfg_simplification_pass() - pm.add_loop_unroll_pass() - pm.add_loop_unswitch_pass() - - pmb.populate(fpm) - pmb.populate(pm) - - fpm.initialize() - for func in mod.functions: - fpm.run(func) - fpm.finalize() - - pm.run(mod) + try: + pm = binding.ModulePassManager() + pm.add_instruction_combining_pass() + pm.add_reassociate_pass() + pm.add_gvn_pass() + pm.add_cfg_simplification_pass() + pm.run(mod) + except AttributeError: + pass engine = binding.create_mcjit_compiler(mod, _target_machine) engine.finalize_object() diff --git a/pydatastructs/utils/__init__.py b/pydatastructs/utils/__init__.py index 24250ea7b..20a8c750c 100644 --- a/pydatastructs/utils/__init__.py +++ b/pydatastructs/utils/__init__.py @@ -3,20 +3,17 @@ from . import ( misc_util, testing_util, + _extensions ) -from ._backend.cpp import _graph_utils - -AdjacencyListGraphNode = _graph_utils.AdjacencyListGraphNode -AdjacencyMatrixGraphNode = _graph_utils.AdjacencyMatrixGraphNode -GraphNode = _graph_utils.GraphNode -GraphEdge = _graph_utils.GraphEdge - from .misc_util import ( TreeNode, MAryTreeNode, LinkedListNode, BinomialTreeNode, + AdjacencyListGraphNode, + AdjacencyMatrixGraphNode, + GraphEdge, Set, CartesianTreeNode, RedBlackTreeNode, @@ -29,11 +26,5 @@ ) from .testing_util import test -__all__.extend([ - 'AdjacencyListGraphNode', - 'AdjacencyMatrixGraphNode', - 'GraphNode', - 'GraphEdge', -]) __all__.extend(misc_util.__all__) __all__.extend(testing_util.__all__) From 7d6733c1505d5ffdee5def253f8bcc9aeb9db178 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Thu, 2 Oct 2025 01:39:51 +0530 Subject: [PATCH 12/47] bug fix --- .github/workflows/ci.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 599512cc8..bd8b09067 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -108,9 +108,10 @@ jobs: env: CXXFLAGS: "-std=c++17" run: | - meson setup build + meson setup build --prefix=$PWD/install meson compile -C build meson install -C build + export PYTHONPATH=$PWD/install/lib/python${{ matrix.python-version }}/site-packages:$PYTHONPATH - name: Run tests run: | @@ -155,9 +156,10 @@ jobs: MACOSX_DEPLOYMENT_TARGET: 11.0 CXXFLAGS: "-std=c++17" run: | - meson setup build + meson setup build --prefix=$PWD/install meson compile -C build meson install -C build + export PYTHONPATH=$PWD/install/lib/python${{ matrix.python-version }}/site-packages:$PYTHONPATH - name: Run tests run: | From 8e4fb88c6b36e5c395d2976c62d6f5a5d7d3c814 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Thu, 2 Oct 2025 02:07:47 +0530 Subject: [PATCH 13/47] bug fix --- .github/workflows/ci.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bd8b09067..de8d7336b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -108,14 +108,14 @@ jobs: env: CXXFLAGS: "-std=c++17" run: | - meson setup build --prefix=$PWD/install + meson setup build --prefix=$PWD/install --wipe meson compile -C build meson install -C build - export PYTHONPATH=$PWD/install/lib/python${{ matrix.python-version }}/site-packages:$PYTHONPATH - name: Run tests run: | - pytest --import-mode=importlib + cd /tmp + PYTHONPATH=$GITHUB_WORKSPACE/install/lib/python${{ matrix.python-version }}/site-packages pytest --import-mode=importlib pydatastructs - name: Build Documentation run: | @@ -156,14 +156,14 @@ jobs: MACOSX_DEPLOYMENT_TARGET: 11.0 CXXFLAGS: "-std=c++17" run: | - meson setup build --prefix=$PWD/install + meson setup build --prefix=$PWD/install --wipe meson compile -C build meson install -C build - export PYTHONPATH=$PWD/install/lib/python${{ matrix.python-version }}/site-packages:$PYTHONPATH - name: Run tests run: | - pytest --import-mode=importlib + cd /tmp + PYTHONPATH=$GITHUB_WORKSPACE/install/lib/python${{ matrix.python-version }}/site-packages pytest --import-mode=importlib pydatastructs - name: Build Documentation run: | From 3e7278eeb0742dc89584c4088421b88d522a5174 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Thu, 2 Oct 2025 02:10:44 +0530 Subject: [PATCH 14/47] bug fix --- .github/workflows/ci.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index de8d7336b..66e75ec44 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -114,8 +114,8 @@ jobs: - name: Run tests run: | - cd /tmp - PYTHONPATH=$GITHUB_WORKSPACE/install/lib/python${{ matrix.python-version }}/site-packages pytest --import-mode=importlib pydatastructs + export PYTHONPATH=$PWD/install/lib/python${{ matrix.python-version }}/site-packages:$PWD:$PYTHONPATH + pytest --import-mode=importlib - name: Build Documentation run: | @@ -162,8 +162,8 @@ jobs: - name: Run tests run: | - cd /tmp - PYTHONPATH=$GITHUB_WORKSPACE/install/lib/python${{ matrix.python-version }}/site-packages pytest --import-mode=importlib pydatastructs + export PYTHONPATH=$PWD/install/lib/python${{ matrix.python-version }}/site-packages:$PWD:$PYTHONPATH + pytest --import-mode=importlib - name: Build Documentation run: | From 25e3f608bb5d8b3d31be60aa6fefb8e0ae4b63b2 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Thu, 2 Oct 2025 02:14:50 +0530 Subject: [PATCH 15/47] bug fix --- .github/workflows/ci.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 66e75ec44..bb1c75fd2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -108,14 +108,14 @@ jobs: env: CXXFLAGS: "-std=c++17" run: | - meson setup build --prefix=$PWD/install --wipe + PREFIX=$(python -c "import sysconfig; print(sysconfig.get_paths()['purelib'])") + meson setup build --prefix=$PREFIX --wipe meson compile -C build meson install -C build - name: Run tests run: | - export PYTHONPATH=$PWD/install/lib/python${{ matrix.python-version }}/site-packages:$PWD:$PYTHONPATH - pytest --import-mode=importlib + python -m pytest --import-mode=importlib pydatastructs - name: Build Documentation run: | @@ -156,14 +156,14 @@ jobs: MACOSX_DEPLOYMENT_TARGET: 11.0 CXXFLAGS: "-std=c++17" run: | - meson setup build --prefix=$PWD/install --wipe + PREFIX=$(python -c "import sysconfig; print(sysconfig.get_paths()['purelib'])") + meson setup build --prefix=$PREFIX --wipe meson compile -C build meson install -C build - name: Run tests run: | - export PYTHONPATH=$PWD/install/lib/python${{ matrix.python-version }}/site-packages:$PWD:$PYTHONPATH - pytest --import-mode=importlib + python -m pytest --import-mode=importlib pydatastructs - name: Build Documentation run: | From fd28eb54f8a706415f45eb2b94c5b850f4d1adb1 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Thu, 2 Oct 2025 02:25:59 +0530 Subject: [PATCH 16/47] bug fix --- .github/workflows/ci.yml | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bb1c75fd2..cb57bcbea 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -108,14 +108,16 @@ jobs: env: CXXFLAGS: "-std=c++17" run: | - PREFIX=$(python -c "import sysconfig; print(sysconfig.get_paths()['purelib'])") - meson setup build --prefix=$PREFIX --wipe + meson setup build --wipe meson compile -C build - meson install -C build + meson install -C build --destdir=$PWD/install - name: Run tests run: | - python -m pytest --import-mode=importlib pydatastructs + export PYTHONPATH=$PWD/install/usr/local/lib/python${{ matrix.python-version }}/site-packages:$PYTHONPATH + cd /tmp + python -c "import pydatastructs; print(pydatastructs.__file__)" + python -m pytest --pyargs pydatastructs -v - name: Build Documentation run: | @@ -156,14 +158,16 @@ jobs: MACOSX_DEPLOYMENT_TARGET: 11.0 CXXFLAGS: "-std=c++17" run: | - PREFIX=$(python -c "import sysconfig; print(sysconfig.get_paths()['purelib'])") - meson setup build --prefix=$PREFIX --wipe + meson setup build --wipe meson compile -C build - meson install -C build + meson install -C build --destdir=$PWD/install - name: Run tests run: | - python -m pytest --import-mode=importlib pydatastructs + export PYTHONPATH=$PWD/install/usr/local/lib/python${{ matrix.python-version }}/site-packages:$PYTHONPATH + cd /tmp + python -c "import pydatastructs; print(pydatastructs.__file__)" + python -m pytest --pyargs pydatastructs -v - name: Build Documentation run: | From 2b26b7551cf36929a29d278d97fb2251e8b78dfb Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Thu, 2 Oct 2025 02:32:28 +0530 Subject: [PATCH 17/47] bug fix --- .github/workflows/ci.yml | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cb57bcbea..957190074 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -102,21 +102,17 @@ jobs: run: | python -m pip install -r requirements.txt python -m pip install -r docs/requirements.txt - python -m pip install meson ninja pytest + python -m pip install meson-python meson ninja pytest - name: Build package (Meson) env: CXXFLAGS: "-std=c++17" run: | - meson setup build --wipe - meson compile -C build - meson install -C build --destdir=$PWD/install + python -m pip install . -v --no-build-isolation - name: Run tests run: | - export PYTHONPATH=$PWD/install/usr/local/lib/python${{ matrix.python-version }}/site-packages:$PYTHONPATH - cd /tmp - python -c "import pydatastructs; print(pydatastructs.__file__)" + cd $HOME python -m pytest --pyargs pydatastructs -v - name: Build Documentation @@ -151,22 +147,18 @@ jobs: run: | python -m pip install -r requirements.txt python -m pip install -r docs/requirements.txt - python -m pip install meson ninja pytest + python -m pip install meson-python meson ninja pytest - name: Build package (Meson) env: MACOSX_DEPLOYMENT_TARGET: 11.0 CXXFLAGS: "-std=c++17" run: | - meson setup build --wipe - meson compile -C build - meson install -C build --destdir=$PWD/install + python -m pip install . -v --no-build-isolation - name: Run tests run: | - export PYTHONPATH=$PWD/install/usr/local/lib/python${{ matrix.python-version }}/site-packages:$PYTHONPATH - cd /tmp - python -c "import pydatastructs; print(pydatastructs.__file__)" + cd $HOME python -m pytest --pyargs pydatastructs -v - name: Build Documentation From a4f1572daf705d1942ce1e6b614ae79e2eb83d65 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Thu, 2 Oct 2025 02:42:39 +0530 Subject: [PATCH 18/47] bug fix --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 957190074..85fc7b14e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -108,7 +108,7 @@ jobs: env: CXXFLAGS: "-std=c++17" run: | - python -m pip install . -v --no-build-isolation + python -m pip install . -v - name: Run tests run: | @@ -154,7 +154,7 @@ jobs: MACOSX_DEPLOYMENT_TARGET: 11.0 CXXFLAGS: "-std=c++17" run: | - python -m pip install . -v --no-build-isolation + python -m pip install . -v - name: Run tests run: | From 2db7fe3eb76337ac196e1a23cdf409af73bf63b7 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Thu, 2 Oct 2025 02:46:49 +0530 Subject: [PATCH 19/47] bug fix --- .github/workflows/ci.yml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 85fc7b14e..27d8f34c6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -108,12 +108,11 @@ jobs: env: CXXFLAGS: "-std=c++17" run: | - python -m pip install . -v + python -m pip install --no-build-isolation -e . -v - name: Run tests run: | - cd $HOME - python -m pytest --pyargs pydatastructs -v + python -m pytest pydatastructs/ -v - name: Build Documentation run: | @@ -154,12 +153,11 @@ jobs: MACOSX_DEPLOYMENT_TARGET: 11.0 CXXFLAGS: "-std=c++17" run: | - python -m pip install . -v + python -m pip install --no-build-isolation -e . -v - name: Run tests run: | - cd $HOME - python -m pytest --pyargs pydatastructs -v + python -m pytest pydatastructs/ -v - name: Build Documentation run: | From 251c1d89484bfa272497913f1b306909991aaaed Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Thu, 2 Oct 2025 03:00:11 +0530 Subject: [PATCH 20/47] bug fix --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 27d8f34c6..c17e341d5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -112,7 +112,7 @@ jobs: - name: Run tests run: | - python -m pytest pydatastructs/ -v + python -m pytest pydatastructs/ -v --import-mode=importlib --ignore-glob="**/benchmarks/**" --ignore=pydatastructs/linear_data_structures/tests/benchmarks/ - name: Build Documentation run: | @@ -157,7 +157,7 @@ jobs: - name: Run tests run: | - python -m pytest pydatastructs/ -v + python -m pytest pydatastructs/ -v --import-mode=importlib --ignore-glob="**/benchmarks/**" --ignore=pydatastructs/linear_data_structures/tests/benchmarks/ - name: Build Documentation run: | From 2c2cf649e70a7fcabcfaabb2d8a2a8393d8bd358 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Thu, 2 Oct 2025 03:04:49 +0530 Subject: [PATCH 21/47] bug fix --- .github/workflows/ci.yml | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c17e341d5..4c6ab2ce9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -108,12 +108,16 @@ jobs: env: CXXFLAGS: "-std=c++17" run: | - python -m pip install --no-build-isolation -e . -v + python -m mesonbuild.mesonmain setup builddir + python -m mesonbuild.mesonmain compile -C builddir - name: Run tests run: | - python -m pytest pydatastructs/ -v --import-mode=importlib --ignore-glob="**/benchmarks/**" --ignore=pydatastructs/linear_data_structures/tests/benchmarks/ - + BUILDDIR=$(find builddir -type d -name "*.cpython-*" | head -1) + export PYTHONPATH="${BUILDDIR}:${PWD}:${PYTHONPATH}" + echo "PYTHONPATH: $PYTHONPATH" + python -c "import pydatastructs; print('Loaded from:', pydatastructs.__file__)" + python -m pytest pydatastructs/ -v --ignore-glob="**/benchmarks/**" - name: Build Documentation run: | sphinx-build -b html docs/source/ docs/build/html @@ -153,11 +157,15 @@ jobs: MACOSX_DEPLOYMENT_TARGET: 11.0 CXXFLAGS: "-std=c++17" run: | - python -m pip install --no-build-isolation -e . -v - + python -m mesonbuild.mesonmain setup builddir + python -m mesonbuild.mesonmain compile -C builddir - name: Run tests run: | - python -m pytest pydatastructs/ -v --import-mode=importlib --ignore-glob="**/benchmarks/**" --ignore=pydatastructs/linear_data_structures/tests/benchmarks/ + BUILDDIR=$(find builddir -type d -name "*.cpython-*" | head -1) + export PYTHONPATH="${BUILDDIR}:${PWD}:${PYTHONPATH}" + echo "PYTHONPATH: $PYTHONPATH" + python -c "import pydatastructs; print('Loaded from:', pydatastructs.__file__)" + python -m pytest pydatastructs/ -v --ignore-glob="**/benchmarks/**" - name: Build Documentation run: | From cb285b96d98941f721d394feb519acec5f2c11aa Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Thu, 2 Oct 2025 03:10:09 +0530 Subject: [PATCH 22/47] bug fix --- .github/workflows/ci.yml | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4c6ab2ce9..3e79b69ad 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -108,16 +108,13 @@ jobs: env: CXXFLAGS: "-std=c++17" run: | - python -m mesonbuild.mesonmain setup builddir - python -m mesonbuild.mesonmain compile -C builddir + spin build -v + python -m pip install . --no-deps --no-build-isolation -v - name: Run tests run: | - BUILDDIR=$(find builddir -type d -name "*.cpython-*" | head -1) - export PYTHONPATH="${BUILDDIR}:${PWD}:${PYTHONPATH}" - echo "PYTHONPATH: $PYTHONPATH" - python -c "import pydatastructs; print('Loaded from:', pydatastructs.__file__)" - python -m pytest pydatastructs/ -v --ignore-glob="**/benchmarks/**" + cd $HOME + python -m pytest --pyargs pydatastructs -v --ignore-glob="**/benchmarks/**" - name: Build Documentation run: | sphinx-build -b html docs/source/ docs/build/html @@ -157,15 +154,12 @@ jobs: MACOSX_DEPLOYMENT_TARGET: 11.0 CXXFLAGS: "-std=c++17" run: | - python -m mesonbuild.mesonmain setup builddir - python -m mesonbuild.mesonmain compile -C builddir + spin build -v + python -m pip install . --no-deps --no-build-isolation -v - name: Run tests run: | - BUILDDIR=$(find builddir -type d -name "*.cpython-*" | head -1) - export PYTHONPATH="${BUILDDIR}:${PWD}:${PYTHONPATH}" - echo "PYTHONPATH: $PYTHONPATH" - python -c "import pydatastructs; print('Loaded from:', pydatastructs.__file__)" - python -m pytest pydatastructs/ -v --ignore-glob="**/benchmarks/**" + cd $HOME + python -m pytest --pyargs pydatastructs -v --ignore-glob="**/benchmarks/**" - name: Build Documentation run: | From 01cfaff42071faf4c546e72f628ee2ac5abb294c Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Thu, 2 Oct 2025 03:12:26 +0530 Subject: [PATCH 23/47] bug fix --- .github/workflows/ci.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3e79b69ad..bc62e6b2f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -109,7 +109,6 @@ jobs: CXXFLAGS: "-std=c++17" run: | spin build -v - python -m pip install . --no-deps --no-build-isolation -v - name: Run tests run: | @@ -155,7 +154,6 @@ jobs: CXXFLAGS: "-std=c++17" run: | spin build -v - python -m pip install . --no-deps --no-build-isolation -v - name: Run tests run: | cd $HOME From 531b7ed3d35529dad8a577557c9a5d96cfd33fcd Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Thu, 2 Oct 2025 03:14:14 +0530 Subject: [PATCH 24/47] bug fix --- .github/workflows/ci.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bc62e6b2f..d6fb6c97a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -112,8 +112,7 @@ jobs: - name: Run tests run: | - cd $HOME - python -m pytest --pyargs pydatastructs -v --ignore-glob="**/benchmarks/**" + spin test -v - name: Build Documentation run: | sphinx-build -b html docs/source/ docs/build/html @@ -156,8 +155,7 @@ jobs: spin build -v - name: Run tests run: | - cd $HOME - python -m pytest --pyargs pydatastructs -v --ignore-glob="**/benchmarks/**" + spin test -v - name: Build Documentation run: | From b02653ffea0adf36a7b2340010ac472effe9f75e Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Thu, 2 Oct 2025 12:35:19 +0530 Subject: [PATCH 25/47] bug fix --- .github/workflows/ci.yml | 246 ++++++++++++++++---------------- pydatastructs/utils/__init__.py | 1 - 2 files changed, 122 insertions(+), 125 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d6fb6c97a..c1b995530 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,76 +7,76 @@ on: branches: [main] jobs: - # test-ubuntu-py38: - # runs-on: ${{matrix.os}} - # timeout-minutes: 20 - # strategy: - # fail-fast: false - # matrix: - # os: [ubuntu-latest] - # python-version: - # - "3.8" - # - # steps: - # - uses: actions/checkout@v3 - # - # - name: Set up Python ${{ matrix.python-version }} - # uses: actions/setup-python@v4 - # with: - # python-version: ${{ matrix.python-version }} - # - # - name: Upgrade pip version - # run: | - # python -m pip install --upgrade pip - # - # - name: Install requirements - # run: | - # python -m pip install -r requirements.txt - # python -m pip install -r docs/requirements.txt - # - # - name: Install lcov - # run: | - # sudo apt-get update - # sudo apt-get install -y lcov - # - # - name: Build package - # env: - # CXXFLAGS: "-std=c++17 --coverage" - # CFLAGS: "--coverage" - # run: | - # spin build -v - # # coverage tests - # - name: Run tests - # run: | - # spin test -v - # - # - name: Capture Coverage Data with lcov - # run: | - # lcov --capture --directory . --output-file coverage.info --no-external - # - # - name: Generate HTML Coverage Report with genhtml - # run: | - # genhtml coverage.info --output-directory coverage_report - # - # - name: Upload Coverage - # uses: codecov/codecov-action@v3 - # with: - # token: ${{ secrets.CODECOV_TOKEN }} - # directory: ./coverage/reports/ - # env_vars: OS,PYTHON - # fail_ci_if_error: false - # files: ./coverage.xml - # flags: unittests - # name: codecov-umbrella - # path_to_write_report: ./coverage/codecov_report.txt - # verbose: true - # - # - name: Build Documentation - # run: | - # sphinx-build -b html docs/source/ docs/build/html + test-ubuntu-py38: + runs-on: ${{matrix.os}} + timeout-minutes: 20 + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest] + python-version: + - "3.8" + + steps: + - uses: actions/checkout@v3 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Upgrade pip version + run: | + python -m pip install --upgrade pip + + - name: Install requirements + run: | + python -m pip install -r requirements.txt + python -m pip install -r docs/requirements.txt + + - name: Install lcov + run: | + sudo apt-get update + sudo apt-get install -y lcov + + - name: Build package + env: + CXXFLAGS: "-std=c++17 --coverage" + CFLAGS: "--coverage" + run: | + spin build -v + # coverage tests + - name: Run tests + run: | + spin test -v + + - name: Capture Coverage Data with lcov + run: | + lcov --capture --directory . --output-file coverage.info --no-external + + - name: Generate HTML Coverage Report with genhtml + run: | + genhtml coverage.info --output-directory coverage_report + + - name: Upload Coverage + uses: codecov/codecov-action@v3 + with: + token: ${{ secrets.CODECOV_TOKEN }} + directory: ./coverage/reports/ + env_vars: OS,PYTHON + fail_ci_if_error: false + files: ./coverage.xml + flags: unittests + name: codecov-umbrella + path_to_write_report: ./coverage/codecov_report.txt + verbose: true + + - name: Build Documentation + run: | + sphinx-build -b html docs/source/ docs/build/html test-ubuntu-py39-py310: - runs-on: ${{ matrix.os }} + runs-on: ${{matrix.os}} timeout-minutes: 20 strategy: fail-fast: false @@ -102,9 +102,8 @@ jobs: run: | python -m pip install -r requirements.txt python -m pip install -r docs/requirements.txt - python -m pip install meson-python meson ninja pytest - - name: Build package (Meson) + - name: Build package env: CXXFLAGS: "-std=c++17" run: | @@ -113,19 +112,20 @@ jobs: - name: Run tests run: | spin test -v + - name: Build Documentation run: | sphinx-build -b html docs/source/ docs/build/html test-macos: - runs-on: ${{ matrix.os }} + runs-on: ${{matrix.os}} timeout-minutes: 20 strategy: fail-fast: false matrix: os: [macos-latest] python-version: - # - "3.8" + - "3.8" - "3.9" - "3.10" @@ -145,9 +145,8 @@ jobs: run: | python -m pip install -r requirements.txt python -m pip install -r docs/requirements.txt - python -m pip install meson-python meson ninja pytest - - name: Build package (Meson) + - name: Build package env: MACOSX_DEPLOYMENT_TARGET: 11.0 CXXFLAGS: "-std=c++17" @@ -161,53 +160,52 @@ jobs: run: | sphinx-build -b html docs/source/ docs/build/html - # test-windows: - # runs-on: ${{matrix.os}} - # timeout-minutes: 20 - # strategy: - # fail-fast: false - # matrix: - # os: [windows-latest] - # python-version: - # # - "3.8" - # - "3.9" - # - # steps: - # - uses: actions/checkout@v3 - # - # - name: Set up Python ${{ matrix.python-version }} - # uses: actions/setup-python@v4 - # with: - # python-version: ${{ matrix.python-version }} - # - # - name: Setup conda - # uses: s-weigand/setup-conda@v1 - # with: - # update-conda: true - # python-version: ${{ matrix.python-version }} - # conda-channels: anaconda, conda-forge - # # - run: conda --version # This fails due to unknown reasons - # - run: which python - # - # - name: Upgrade pip version - # run: | - # python -m pip install --upgrade pip - # - # - name: Install requirements - # run: | - # python -m pip install -r requirements.txt - # python -m pip install -r docs/requirements.txt - # - # - name: Build package - # env: - # CL: "/std:c++17" - # run: | - # spin build -v - # - # - name: Run tests - # run: | - # spin test -v - # - # - name: Build Documentation - # run: | - # sphinx-build -b html docs/source/ docs/build/html + test-windows: + runs-on: ${{matrix.os}} + timeout-minutes: 20 + strategy: + fail-fast: false + matrix: + os: [windows-latest] + python-version: + - "3.8" + + steps: + - uses: actions/checkout@v3 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Setup conda + uses: s-weigand/setup-conda@v1 + with: + update-conda: true + python-version: ${{ matrix.python-version }} + conda-channels: anaconda, conda-forge + # - run: conda --version # This fails due to unknown reasons + - run: which python + + - name: Upgrade pip version + run: | + python -m pip install --upgrade pip + + - name: Install requirements + run: | + python -m pip install -r requirements.txt + python -m pip install -r docs/requirements.txt + + - name: Build package + env: + CL: "/std:c++17" + run: | + spin build -v + + - name: Run tests + run: | + spin test -v + + - name: Build Documentation + run: | + sphinx-build -b html docs/source/ docs/build/html diff --git a/pydatastructs/utils/__init__.py b/pydatastructs/utils/__init__.py index 20a8c750c..c4971be32 100644 --- a/pydatastructs/utils/__init__.py +++ b/pydatastructs/utils/__init__.py @@ -3,7 +3,6 @@ from . import ( misc_util, testing_util, - _extensions ) from .misc_util import ( From 4d47e186d306247042c5096b37aee33a6532f51e Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Thu, 2 Oct 2025 13:05:32 +0530 Subject: [PATCH 26/47] bug fix --- .github/workflows/ci.yml | 19 +- .../site-packages/pydatastructs/__init__.py | 8 + .../pydatastructs/graphs/__init__.py | 28 + .../pydatastructs/graphs/_backend/__init__.py | 0 .../pydatastructs/graphs/adjacency_list.py | 101 + .../pydatastructs/graphs/adjacency_matrix.py | 100 + .../pydatastructs/graphs/algorithms.py | 1386 ++++++++++++ .../pydatastructs/graphs/graph.py | 163 ++ .../pydatastructs/graphs/tests/__init__.py | 0 .../graphs/tests/test_adjacency_list.py | 83 + .../graphs/tests/test_adjacency_matrix.py | 53 + .../graphs/tests/test_algorithms.py | 596 +++++ .../linear_data_structures/__init__.py | 53 + .../_backend/__init__.py | 0 .../linear_data_structures/algorithms.py | 2010 +++++++++++++++++ .../linear_data_structures/arrays.py | 473 ++++ .../linear_data_structures/linked_lists.py | 819 +++++++ .../linear_data_structures/tests/__init__.py | 0 .../tests/test_algorithms.py | 423 ++++ .../tests/test_arrays.py | 157 ++ .../tests/test_linked_lists.py | 193 ++ .../miscellaneous_data_structures/__init__.py | 51 + .../_backend/__init__.py | 0 .../algorithms.py | 335 +++ .../binomial_trees.py | 91 + .../disjoint_set.py | 143 ++ .../miscellaneous_data_structures/multiset.py | 42 + .../miscellaneous_data_structures/queue.py | 498 ++++ .../segment_tree.py | 225 ++ .../sparse_table.py | 108 + .../miscellaneous_data_structures/stack.py | 200 ++ .../tests/__init__.py | 0 .../tests/test_binomial_trees.py | 17 + .../tests/test_disjoint_set.py | 70 + .../tests/test_multiset.py | 39 + .../tests/test_queue.py | 116 + .../tests/test_range_query_dynamic.py | 71 + .../tests/test_range_query_static.py | 63 + .../tests/test_stack.py | 77 + .../pydatastructs/strings/__init__.py | 18 + .../pydatastructs/strings/algorithms.py | 247 ++ .../pydatastructs/strings/tests/__init__.py | 0 .../strings/tests/test_algorithms.py | 76 + .../pydatastructs/strings/tests/test_trie.py | 49 + .../pydatastructs/strings/trie.py | 201 ++ .../pydatastructs/trees/__init__.py | 40 + .../pydatastructs/trees/_backend/__init__.py | 0 .../pydatastructs/trees/binary_trees.py | 1888 ++++++++++++++++ .../pydatastructs/trees/heaps.py | 582 +++++ .../pydatastructs/trees/m_ary_trees.py | 172 ++ .../trees/space_partitioning_trees.py | 242 ++ .../pydatastructs/trees/tests/__init__.py | 0 .../trees/tests/test_binary_trees.py | 820 +++++++ .../pydatastructs/trees/tests/test_heaps.py | 236 ++ .../trees/tests/test_m_ary_trees.py | 5 + .../tests/test_space_partitioning_tree.py | 20 + .../pydatastructs/utils/__init__.py | 29 + .../pydatastructs/utils/_backend/__init__.py | 0 .../pydatastructs/utils/misc_util.py | 632 ++++++ .../pydatastructs/utils/raises_util.py | 17 + .../pydatastructs/utils/testing_util.py | 83 + .../pydatastructs/utils/tests/__init__.py | 0 .../utils/tests/test_code_quality.py | 239 ++ .../utils/tests/test_misc_util.py | 84 + .../site-packages/pydatastructs/__init__.py | 8 + .../pydatastructs/graphs/__init__.py | 28 + .../pydatastructs/graphs/_backend/__init__.py | 0 .../pydatastructs/graphs/adjacency_list.py | 101 + .../pydatastructs/graphs/adjacency_matrix.py | 100 + .../pydatastructs/graphs/algorithms.py | 1386 ++++++++++++ .../pydatastructs/graphs/graph.py | 163 ++ .../pydatastructs/graphs/tests/__init__.py | 0 .../graphs/tests/test_adjacency_list.py | 83 + .../graphs/tests/test_adjacency_matrix.py | 53 + .../graphs/tests/test_algorithms.py | 596 +++++ .../linear_data_structures/__init__.py | 53 + .../_backend/__init__.py | 0 .../linear_data_structures/algorithms.py | 2010 +++++++++++++++++ .../linear_data_structures/arrays.py | 473 ++++ .../linear_data_structures/linked_lists.py | 819 +++++++ .../linear_data_structures/tests/__init__.py | 0 .../tests/test_algorithms.py | 423 ++++ .../tests/test_arrays.py | 157 ++ .../tests/test_linked_lists.py | 193 ++ .../miscellaneous_data_structures/__init__.py | 51 + .../_backend/__init__.py | 0 .../algorithms.py | 335 +++ .../binomial_trees.py | 91 + .../disjoint_set.py | 143 ++ .../miscellaneous_data_structures/multiset.py | 42 + .../miscellaneous_data_structures/queue.py | 498 ++++ .../segment_tree.py | 225 ++ .../sparse_table.py | 108 + .../miscellaneous_data_structures/stack.py | 200 ++ .../tests/__init__.py | 0 .../tests/test_binomial_trees.py | 17 + .../tests/test_disjoint_set.py | 70 + .../tests/test_multiset.py | 39 + .../tests/test_queue.py | 116 + .../tests/test_range_query_dynamic.py | 71 + .../tests/test_range_query_static.py | 63 + .../tests/test_stack.py | 77 + .../pydatastructs/strings/__init__.py | 18 + .../pydatastructs/strings/algorithms.py | 247 ++ .../pydatastructs/strings/tests/__init__.py | 0 .../strings/tests/test_algorithms.py | 76 + .../pydatastructs/strings/tests/test_trie.py | 49 + .../pydatastructs/strings/trie.py | 201 ++ .../pydatastructs/trees/__init__.py | 40 + .../pydatastructs/trees/_backend/__init__.py | 0 .../pydatastructs/trees/binary_trees.py | 1888 ++++++++++++++++ .../pydatastructs/trees/heaps.py | 582 +++++ .../pydatastructs/trees/m_ary_trees.py | 172 ++ .../trees/space_partitioning_trees.py | 242 ++ .../pydatastructs/trees/tests/__init__.py | 0 .../trees/tests/test_binary_trees.py | 820 +++++++ .../pydatastructs/trees/tests/test_heaps.py | 236 ++ .../trees/tests/test_m_ary_trees.py | 5 + .../tests/test_space_partitioning_tree.py | 20 + .../pydatastructs/utils/__init__.py | 29 + .../pydatastructs/utils/_backend/__init__.py | 0 .../pydatastructs/utils/misc_util.py | 632 ++++++ .../pydatastructs/utils/raises_util.py | 17 + .../pydatastructs/utils/testing_util.py | 83 + .../pydatastructs/utils/tests/__init__.py | 0 .../utils/tests/test_code_quality.py | 239 ++ .../utils/tests/test_misc_util.py | 84 + pydatastructs/graphs/meson.build | 1 - pydatastructs/utils/meson.build | 1 - 129 files changed, 28957 insertions(+), 8 deletions(-) create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/__init__.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/__init__.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/_backend/__init__.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_list.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_matrix.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/algorithms.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/graph.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/__init__.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_list.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_matrix.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_algorithms.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/__init__.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/_backend/__init__.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/algorithms.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/arrays.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/linked_lists.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/__init__.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_algorithms.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_arrays.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_linked_lists.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/__init__.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/_backend/__init__.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/algorithms.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/binomial_trees.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/disjoint_set.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/multiset.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/queue.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/segment_tree.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/sparse_table.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/stack.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/__init__.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_binomial_trees.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_disjoint_set.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_multiset.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_queue.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_dynamic.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_static.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_stack.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/__init__.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/algorithms.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/tests/__init__.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/tests/test_algorithms.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/tests/test_trie.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/trie.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/__init__.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/_backend/__init__.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/binary_trees.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/heaps.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/m_ary_trees.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/space_partitioning_trees.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/__init__.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_binary_trees.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_heaps.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_m_ary_trees.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_space_partitioning_tree.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/__init__.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/_backend/__init__.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/misc_util.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/raises_util.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/testing_util.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/tests/__init__.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/tests/test_code_quality.py create mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/tests/test_misc_util.py create mode 100644 lib/python3.12/site-packages/pydatastructs/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/_backend/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/adjacency_list.py create mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/adjacency_matrix.py create mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/algorithms.py create mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/graph.py create mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/tests/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_list.py create mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_matrix.py create mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/tests/test_algorithms.py create mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/_backend/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/algorithms.py create mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/arrays.py create mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/linked_lists.py create mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_algorithms.py create mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_arrays.py create mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_linked_lists.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/_backend/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/algorithms.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/binomial_trees.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/disjoint_set.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/multiset.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/queue.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/segment_tree.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/sparse_table.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/stack.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_binomial_trees.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_disjoint_set.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_multiset.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_queue.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_dynamic.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_static.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_stack.py create mode 100644 lib/python3.12/site-packages/pydatastructs/strings/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/strings/algorithms.py create mode 100644 lib/python3.12/site-packages/pydatastructs/strings/tests/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/strings/tests/test_algorithms.py create mode 100644 lib/python3.12/site-packages/pydatastructs/strings/tests/test_trie.py create mode 100644 lib/python3.12/site-packages/pydatastructs/strings/trie.py create mode 100644 lib/python3.12/site-packages/pydatastructs/trees/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/trees/_backend/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/trees/binary_trees.py create mode 100644 lib/python3.12/site-packages/pydatastructs/trees/heaps.py create mode 100644 lib/python3.12/site-packages/pydatastructs/trees/m_ary_trees.py create mode 100644 lib/python3.12/site-packages/pydatastructs/trees/space_partitioning_trees.py create mode 100644 lib/python3.12/site-packages/pydatastructs/trees/tests/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/trees/tests/test_binary_trees.py create mode 100644 lib/python3.12/site-packages/pydatastructs/trees/tests/test_heaps.py create mode 100644 lib/python3.12/site-packages/pydatastructs/trees/tests/test_m_ary_trees.py create mode 100644 lib/python3.12/site-packages/pydatastructs/trees/tests/test_space_partitioning_tree.py create mode 100644 lib/python3.12/site-packages/pydatastructs/utils/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/utils/_backend/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/utils/misc_util.py create mode 100644 lib/python3.12/site-packages/pydatastructs/utils/raises_util.py create mode 100644 lib/python3.12/site-packages/pydatastructs/utils/testing_util.py create mode 100644 lib/python3.12/site-packages/pydatastructs/utils/tests/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/utils/tests/test_code_quality.py create mode 100644 lib/python3.12/site-packages/pydatastructs/utils/tests/test_misc_util.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c1b995530..acf6e6105 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -44,11 +44,14 @@ jobs: CXXFLAGS: "-std=c++17 --coverage" CFLAGS: "--coverage" run: | - spin build -v + meson setup build --prefix=$PWD + meson compile -C build + meson install -C build --skip-subprojects + # coverage tests - name: Run tests run: | - spin test -v + python -m pytest pydatastructs/ -v --ignore-glob="**/benchmarks/**" - name: Capture Coverage Data with lcov run: | @@ -107,11 +110,13 @@ jobs: env: CXXFLAGS: "-std=c++17" run: | - spin build -v + meson setup build --prefix=$PWD + meson compile -C build + meson install -C build --skip-subprojects - name: Run tests run: | - spin test -v + python -m pytest pydatastructs/ -v --ignore-glob="**/benchmarks/**" - name: Build Documentation run: | @@ -151,10 +156,12 @@ jobs: MACOSX_DEPLOYMENT_TARGET: 11.0 CXXFLAGS: "-std=c++17" run: | - spin build -v + meson setup build --prefix=$PWD + meson compile -C build + meson install -C build --skip-subprojects - name: Run tests run: | - spin test -v + python -m pytest pydatastructs/ -v --ignore-glob="**/benchmarks/**" - name: Build Documentation run: | diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/__init__.py new file mode 100644 index 000000000..27cc5a202 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/__init__.py @@ -0,0 +1,8 @@ +from .utils import * +from .linear_data_structures import * +from .trees import * +from .miscellaneous_data_structures import * +from .graphs import * +from .strings import * + +__version__ = "1.0.1-dev" diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/__init__.py new file mode 100644 index 000000000..21e0a5f35 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/__init__.py @@ -0,0 +1,28 @@ +__all__ = [] + +from . import graph +from .graph import ( + Graph +) +__all__.extend(graph.__all__) + +from . import algorithms +from . import adjacency_list +from . import adjacency_matrix + +from .algorithms import ( + breadth_first_search, + breadth_first_search_parallel, + minimum_spanning_tree, + minimum_spanning_tree_parallel, + strongly_connected_components, + depth_first_search, + shortest_paths, + all_pair_shortest_paths, + topological_sort, + topological_sort_parallel, + max_flow, + find_bridges +) + +__all__.extend(algorithms.__all__) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/_backend/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/_backend/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_list.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_list.py new file mode 100644 index 000000000..bd901b380 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_list.py @@ -0,0 +1,101 @@ +from pydatastructs.graphs.graph import Graph +from pydatastructs.graphs._backend.cpp import _graph +from pydatastructs.utils.misc_util import ( + GraphEdge, Backend, raise_if_backend_is_not_python) + +__all__ = [ + 'AdjacencyList' +] + +class AdjacencyList(Graph): + """ + Adjacency list implementation of graphs. + + See also + ======== + + pydatastructs.graphs.graph.Graph + """ + def __new__(cls, *vertices, **kwargs): + + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.PYTHON: + obj = object.__new__(cls) + for vertex in vertices: + obj.__setattr__(vertex.name, vertex) + obj.vertices = [vertex.name for vertex in vertices] + obj.edge_weights = {} + obj._impl = 'adjacency_list' + return obj + else: + graph = _graph.AdjacencyListGraph() + for vertice in vertices: + graph.add_vertex(vertice) + return graph + + @classmethod + def methods(self): + return ['is_adjacent', 'neighbors', + 'add_vertex', 'remove_vertex', 'add_edge', + 'get_edge', 'remove_edge', '__new__'] + + def is_adjacent(self, node1, node2): + node1 = self.__getattribute__(node1) + return hasattr(node1, node2) + + def num_vertices(self): + return len(self.vertices) + + def num_edges(self): + return sum(len(self.neighbors(v)) for v in self.vertices) + + def neighbors(self, node): + node = self.__getattribute__(node) + return [self.__getattribute__(name) for name in node.adjacent] + + def add_vertex(self, node): + if not hasattr(self, node.name): + self.vertices.append(node.name) + self.__setattr__(node.name, node) + + def remove_vertex(self, name): + delattr(self, name) + self.vertices.remove(name) + for node in self.vertices: + node_obj = self.__getattribute__(node) + if hasattr(node_obj, name): + delattr(node_obj, name) + node_obj.adjacent.remove(name) + + def add_edge(self, source, target, cost=None): + source, target = str(source), str(target) + error_msg = ("Vertex %s is not present in the graph." + "Call Graph.add_vertex to add a new" + "vertex. Graph.add_edge is only responsible" + "for adding edges and it will not add new" + "vertices on its own. This is done to maintain" + "clear separation between the functionality of" + "these two methods.") + if not hasattr(self, source): + raise ValueError(error_msg % (source)) + if not hasattr(self, target): + raise ValueError(error_msg % (target)) + + source, target = self.__getattribute__(source), \ + self.__getattribute__(target) + source.add_adjacent_node(target.name) + if cost is not None: + self.edge_weights[source.name + "_" + target.name] = \ + GraphEdge(source, target, cost) + + def get_edge(self, source, target): + return self.edge_weights.get( + source + "_" + target, + None) + + def remove_edge(self, source, target): + source, target = self.__getattribute__(source), \ + self.__getattribute__(target) + source.remove_adjacent_node(target.name) + self.edge_weights.pop(source.name + "_" + target.name, + None) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_matrix.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_matrix.py new file mode 100644 index 000000000..9c2326b86 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_matrix.py @@ -0,0 +1,100 @@ +from pydatastructs.graphs.graph import Graph +from pydatastructs.graphs._backend.cpp import _graph +from pydatastructs.utils.misc_util import ( + GraphEdge, raise_if_backend_is_not_python, + Backend) + +__all__ = [ + 'AdjacencyMatrix' +] + +class AdjacencyMatrix(Graph): + """ + Adjacency matrix implementation of graphs. + + See also + ======== + + pydatastructs.graphs.graph.Graph + """ + def __new__(cls, *vertices, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.PYTHON: + obj = object.__new__(cls) + obj.vertices = [vertex.name for vertex in vertices] + for vertex in vertices: + obj.__setattr__(vertex.name, vertex) + obj.matrix = {} + for vertex in vertices: + obj.matrix[vertex.name] = {} + obj.edge_weights = {} + obj._impl = 'adjacency_matrix' + return obj + else: + return _graph.AdjacencyMatrixGraph(vertices) + + @classmethod + def methods(self): + return ['is_adjacent', 'neighbors', + 'add_edge', 'get_edge', 'remove_edge', + '__new__'] + + def is_adjacent(self, node1, node2): + node1, node2 = str(node1), str(node2) + row = self.matrix.get(node1, {}) + return row.get(node2, False) is not False + + def num_vertices(self): + return len(self.vertices) + + def num_edges(self): + return sum(len(v) for v in self.matrix.values()) + + def neighbors(self, node): + node = str(node) + neighbors = [] + row = self.matrix.get(node, {}) + for node, presence in row.items(): + if presence: + neighbors.append(self.__getattribute__( + str(node))) + return neighbors + + def add_vertex(self, node): + raise NotImplementedError("Currently we allow " + "adjacency matrix for static graphs only") + + def remove_vertex(self, node): + raise NotImplementedError("Currently we allow " + "adjacency matrix for static graphs only.") + + def add_edge(self, source, target, cost=None): + source, target = str(source), str(target) + error_msg = ("Vertex %s is not present in the graph." + "Call Graph.add_vertex to add a new" + "vertex. Graph.add_edge is only responsible" + "for adding edges and it will not add new" + "vertices on its own. This is done to maintain" + "clear separation between the functionality of" + "these two methods.") + if source not in self.matrix: + raise ValueError(error_msg % (source)) + if target not in self.matrix: + raise ValueError(error_msg % (target)) + + self.matrix[source][target] = True + if cost is not None: + self.edge_weights[source + "_" + target] = \ + GraphEdge(self.__getattribute__(source), + self.__getattribute__(target), + cost) + + def get_edge(self, source, target): + return self.edge_weights.get( + str(source) + "_" + str(target), + None) + + def remove_edge(self, source, target): + source, target = str(source), str(target) + self.matrix[source][target] = False + self.edge_weights.pop(str(source) + "_" + str(target), None) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/algorithms.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/algorithms.py new file mode 100644 index 000000000..9324b7278 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/algorithms.py @@ -0,0 +1,1386 @@ +""" +Contains algorithms associated with graph +data structure. +""" +from collections import deque +from concurrent.futures import ThreadPoolExecutor +from pydatastructs.utils.misc_util import ( + _comp, raise_if_backend_is_not_python, Backend, AdjacencyListGraphNode) +from pydatastructs.miscellaneous_data_structures import ( + DisjointSetForest, PriorityQueue) +from pydatastructs.graphs.graph import Graph +from pydatastructs.linear_data_structures.algorithms import merge_sort_parallel +from pydatastructs import PriorityQueue + +__all__ = [ + 'breadth_first_search', + 'breadth_first_search_parallel', + 'minimum_spanning_tree', + 'minimum_spanning_tree_parallel', + 'strongly_connected_components', + 'depth_first_search', + 'shortest_paths', + 'all_pair_shortest_paths', + 'topological_sort', + 'topological_sort_parallel', + 'max_flow', + 'find_bridges' +] + +Stack = Queue = deque + +def breadth_first_search( + graph, source_node, operation, *args, **kwargs): + """ + Implementation of serial breadth first search(BFS) + algorithm. + + Parameters + ========== + + graph: Graph + The graph on which BFS is to be performed. + source_node: str + The name of the source node from where the BFS is + to be initiated. + operation: function + The function which is to be applied + on every node when it is visited. + The prototype which is to be followed is, + `function_name(curr_node, next_node, + arg_1, arg_2, . . ., arg_n)`. + Here, the first two arguments denote, the + current node and the node next to current node. + The rest of the arguments are optional and you can + provide your own stuff there. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Note + ==== + + You should pass all the arguments which you are going + to use in the prototype of your `operation` after + passing the operation function. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode + >>> V1 = AdjacencyListGraphNode("V1") + >>> V2 = AdjacencyListGraphNode("V2") + >>> V3 = AdjacencyListGraphNode("V3") + >>> G = Graph(V1, V2, V3) + >>> from pydatastructs import breadth_first_search + >>> def f(curr_node, next_node, dest_node): + ... return curr_node != dest_node + ... + >>> G.add_edge(V1.name, V2.name) + >>> G.add_edge(V2.name, V3.name) + >>> breadth_first_search(G, V1.name, f, V3.name) + """ + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.PYTHON: + import pydatastructs.graphs.algorithms as algorithms + func = "_breadth_first_search_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently breadth first search isn't implemented for " + "%s graphs."%(graph._impl)) + return getattr(algorithms, func)( + graph, source_node, operation, *args, **kwargs) + else: + from pydatastructs.graphs._backend.cpp._algorithms import bfs_adjacency_list, bfs_adjacency_matrix + if (graph._impl == "adjacency_list"): + extra_args = args if args else () + return bfs_adjacency_list(graph, source_node, operation, extra_args) + if (graph._impl == "adjacency_matrix"): + extra_args = args if args else () + return bfs_adjacency_matrix(graph, source_node, operation, extra_args) + +def _breadth_first_search_adjacency_list( + graph, source_node, operation, *args, **kwargs): + bfs_queue = Queue() + visited = {} + bfs_queue.append(source_node) + visited[source_node] = True + while len(bfs_queue) != 0: + curr_node = bfs_queue.popleft() + next_nodes = graph.neighbors(curr_node) + if len(next_nodes) != 0: + for next_node in next_nodes: + if visited.get(next_node.name, False) is False: + status = operation(curr_node, next_node.name, *args, **kwargs) + if not status: + return None + bfs_queue.append(next_node.name) + visited[next_node.name] = True + else: + status = operation(curr_node, "", *args, **kwargs) + if not status: + return None + +_breadth_first_search_adjacency_matrix = _breadth_first_search_adjacency_list + +def breadth_first_search_parallel( + graph, source_node, num_threads, operation, *args, **kwargs): + """ + Parallel implementation of breadth first search on graphs. + + Parameters + ========== + + graph: Graph + The graph on which BFS is to be performed. + source_node: str + The name of the source node from where the BFS is + to be initiated. + num_threads: int + Number of threads to be used for computation. + operation: function + The function which is to be applied + on every node when it is visited. + The prototype which is to be followed is, + `function_name(curr_node, next_node, + arg_1, arg_2, . . ., arg_n)`. + Here, the first two arguments denote, the + current node and the node next to current node. + The rest of the arguments are optional and you can + provide your own stuff there. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Note + ==== + + You should pass all the arguments which you are going + to use in the prototype of your `operation` after + passing the operation function. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode + >>> V1 = AdjacencyListGraphNode("V1") + >>> V2 = AdjacencyListGraphNode("V2") + >>> V3 = AdjacencyListGraphNode("V3") + >>> G = Graph(V1, V2, V3) + >>> from pydatastructs import breadth_first_search_parallel + >>> def f(curr_node, next_node, dest_node): + ... return curr_node != dest_node + ... + >>> G.add_edge(V1.name, V2.name) + >>> G.add_edge(V2.name, V3.name) + >>> breadth_first_search_parallel(G, V1.name, 3, f, V3.name) + """ + raise_if_backend_is_not_python( + breadth_first_search_parallel, kwargs.get('backend', Backend.PYTHON)) + import pydatastructs.graphs.algorithms as algorithms + func = "_breadth_first_search_parallel_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently breadth first search isn't implemented for " + "%s graphs."%(graph._impl)) + return getattr(algorithms, func)( + graph, source_node, num_threads, operation, *args, **kwargs) + +def _generate_layer(**kwargs): + _args, _kwargs = kwargs.get('args'), kwargs.get('kwargs') + (graph, curr_node, next_layer, visited, operation) = _args[0:5] + op_args, op_kwargs = _args[5:], _kwargs + next_nodes = graph.neighbors(curr_node) + status = True + if len(next_nodes) != 0: + for next_node in next_nodes: + if visited.get(next_node, False) is False: + status = status and operation(curr_node, next_node.name, *op_args, **op_kwargs) + next_layer.add(next_node.name) + visited[next_node.name] = True + else: + status = status and operation(curr_node, "", *op_args, **op_kwargs) + return status + +def _breadth_first_search_parallel_adjacency_list( + graph, source_node, num_threads, operation, *args, **kwargs): + visited, layers = {}, {} + layers[0] = set() + layers[0].add(source_node) + visited[source_node] = True + layer = 0 + while len(layers[layer]) != 0: + layers[layer+1] = set() + with ThreadPoolExecutor(max_workers=num_threads) as Executor: + for node in layers[layer]: + status = Executor.submit( + _generate_layer, args= + (graph, node, layers[layer+1], visited, + operation, *args), kwargs=kwargs).result() + layer += 1 + if not status: + return None + +_breadth_first_search_parallel_adjacency_matrix = _breadth_first_search_parallel_adjacency_list + +def _generate_mst_object(graph): + mst = Graph(*[getattr(graph, str(v)) for v in graph.vertices]) + return mst + +def _sort_edges(graph, num_threads=None): + edges = list(graph.edge_weights.items()) + if num_threads is None: + sort_key = lambda item: item[1].value + return sorted(edges, key=sort_key) + + merge_sort_parallel(edges, num_threads, + comp=lambda u,v: u[1].value <= v[1].value) + return edges + +def _minimum_spanning_tree_kruskal_adjacency_list(graph): + mst = _generate_mst_object(graph) + dsf = DisjointSetForest() + for v in graph.vertices: + dsf.make_set(v) + for _, edge in _sort_edges(graph): + u, v = edge.source.name, edge.target.name + if dsf.find_root(u) is not dsf.find_root(v): + mst.add_edge(u, v, edge.value) + mst.add_edge(v, u, edge.value) + dsf.union(u, v) + return mst + +_minimum_spanning_tree_kruskal_adjacency_matrix = \ + _minimum_spanning_tree_kruskal_adjacency_list + +def _minimum_spanning_tree_prim_adjacency_list(graph): + q = PriorityQueue(implementation='binomial_heap') + e = {} + mst = Graph(implementation='adjacency_list') + q.push(next(iter(graph.vertices)), 0) + while not q.is_empty: + v = q.pop() + if not hasattr(mst, v): + mst.add_vertex(graph.__getattribute__(v)) + if e.get(v, None) is not None: + edge = e[v] + mst.add_vertex(edge.target) + mst.add_edge(edge.source.name, edge.target.name, edge.value) + mst.add_edge(edge.target.name, edge.source.name, edge.value) + for w_node in graph.neighbors(v): + w = w_node.name + vw = graph.edge_weights[v + '_' + w] + q.push(w, vw.value) + if e.get(w, None) is None or \ + e[w].value > vw.value: + e[w] = vw + return mst + +def minimum_spanning_tree(graph, algorithm, **kwargs): + """ + Computes a minimum spanning tree for the given + graph and algorithm. + + Parameters + ========== + + graph: Graph + The graph whose minimum spanning tree + has to be computed. + algorithm: str + The algorithm which should be used for + computing a minimum spanning tree. + Currently the following algorithms are + supported, + + 'kruskal' -> Kruskal's algorithm as given in [1]. + + 'prim' -> Prim's algorithm as given in [2]. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + mst: Graph + A minimum spanning tree using the implementation + same as the graph provided in the input. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode + >>> from pydatastructs import minimum_spanning_tree + >>> u = AdjacencyListGraphNode('u') + >>> v = AdjacencyListGraphNode('v') + >>> G = Graph(u, v) + >>> G.add_edge(u.name, v.name, 3) + >>> mst = minimum_spanning_tree(G, 'kruskal') + >>> u_n = mst.neighbors(u.name) + >>> mst.get_edge(u.name, u_n[0].name).value + 3 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Kruskal%27s_algorithm + .. [2] https://en.wikipedia.org/wiki/Prim%27s_algorithm + + Note + ==== + + The concept of minimum spanning tree is valid only for + connected and undirected graphs. So, this function + should be used only for such graphs. Using with other + types of graphs may lead to unwanted results. + """ + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.PYTHON: + import pydatastructs.graphs.algorithms as algorithms + func = "_minimum_spanning_tree_" + algorithm + "_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently %s algoithm for %s implementation of graphs " + "isn't implemented for finding minimum spanning trees." + %(algorithm, graph._impl)) + return getattr(algorithms, func)(graph) + else: + from pydatastructs.graphs._backend.cpp._algorithms import minimum_spanning_tree_prim_adjacency_list + if graph._impl == "adjacency_list" and algorithm == 'prim': + return minimum_spanning_tree_prim_adjacency_list(graph) + +def _minimum_spanning_tree_parallel_kruskal_adjacency_list(graph, num_threads): + mst = _generate_mst_object(graph) + dsf = DisjointSetForest() + for v in graph.vertices: + dsf.make_set(v) + edges = _sort_edges(graph, num_threads) + for _, edge in edges: + u, v = edge.source.name, edge.target.name + if dsf.find_root(u) is not dsf.find_root(v): + mst.add_edge(u, v, edge.value) + mst.add_edge(v, u, edge.value) + dsf.union(u, v) + return mst + +_minimum_spanning_tree_parallel_kruskal_adjacency_matrix = \ + _minimum_spanning_tree_parallel_kruskal_adjacency_list + +def _find_min(q, v, i): + if not q.is_empty: + v[i] = q.peek + else: + v[i] = None + +def _minimum_spanning_tree_parallel_prim_adjacency_list(graph, num_threads): + q = [PriorityQueue(implementation='binomial_heap') for _ in range(num_threads)] + e = [{} for _ in range(num_threads)] + v2q = {} + mst = Graph(implementation='adjacency_list') + + itr = iter(graph.vertices) + for i in range(len(graph.vertices)): + v2q[next(itr)] = i%len(q) + q[0].push(next(iter(graph.vertices)), 0) + + while True: + + _vs = [None for _ in range(num_threads)] + with ThreadPoolExecutor(max_workers=num_threads) as Executor: + for i in range(num_threads): + Executor.submit(_find_min, q[i], _vs, i).result() + v = None + + for i in range(num_threads): + if _comp(_vs[i], v, lambda u, v: u.key < v.key): + v = _vs[i] + if v is None: + break + v = v.data + idx = v2q[v] + q[idx].pop() + + if not hasattr(mst, v): + mst.add_vertex(graph.__getattribute__(v)) + if e[idx].get(v, None) is not None: + edge = e[idx][v] + mst.add_vertex(edge.target) + mst.add_edge(edge.source.name, edge.target.name, edge.value) + mst.add_edge(edge.target.name, edge.source.name, edge.value) + for w_node in graph.neighbors(v): + w = w_node.name + vw = graph.edge_weights[v + '_' + w] + j = v2q[w] + q[j].push(w, vw.value) + if e[j].get(w, None) is None or \ + e[j][w].value > vw.value: + e[j][w] = vw + + return mst + +def minimum_spanning_tree_parallel(graph, algorithm, num_threads, **kwargs): + """ + Computes a minimum spanning tree for the given + graph and algorithm using the given number of threads. + + Parameters + ========== + + graph: Graph + The graph whose minimum spanning tree + has to be computed. + algorithm: str + The algorithm which should be used for + computing a minimum spanning tree. + Currently the following algorithms are + supported, + + 'kruskal' -> Kruskal's algorithm as given in [1]. + + 'prim' -> Prim's algorithm as given in [2]. + num_threads: int + The number of threads to be used. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + mst: Graph + A minimum spanning tree using the implementation + same as the graph provided in the input. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode + >>> from pydatastructs import minimum_spanning_tree_parallel + >>> u = AdjacencyListGraphNode('u') + >>> v = AdjacencyListGraphNode('v') + >>> G = Graph(u, v) + >>> G.add_edge(u.name, v.name, 3) + >>> mst = minimum_spanning_tree_parallel(G, 'kruskal', 3) + >>> u_n = mst.neighbors(u.name) + >>> mst.get_edge(u.name, u_n[0].name).value + 3 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Kruskal%27s_algorithm#Parallel_algorithm + .. [2] https://en.wikipedia.org/wiki/Prim%27s_algorithm#Parallel_algorithm + + Note + ==== + + The concept of minimum spanning tree is valid only for + connected and undirected graphs. So, this function + should be used only for such graphs. Using with other + types of graphs will lead to unwanted results. + """ + raise_if_backend_is_not_python( + minimum_spanning_tree_parallel, kwargs.get('backend', Backend.PYTHON)) + import pydatastructs.graphs.algorithms as algorithms + func = "_minimum_spanning_tree_parallel_" + algorithm + "_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently %s algoithm for %s implementation of graphs " + "isn't implemented for finding minimum spanning trees." + %(algorithm, graph._impl)) + return getattr(algorithms, func)(graph, num_threads) + +def _visit(graph, vertex, visited, incoming, L): + stack = [vertex] + while stack: + top = stack[-1] + if not visited.get(top, False): + visited[top] = True + for node in graph.neighbors(top): + if incoming.get(node.name, None) is None: + incoming[node.name] = [] + incoming[node.name].append(top) + if not visited.get(node.name, False): + stack.append(node.name) + if top is stack[-1]: + L.append(stack.pop()) + +def _assign(graph, u, incoming, assigned, component): + stack = [u] + while stack: + top = stack[-1] + if not assigned.get(top, False): + assigned[top] = True + component.add(top) + for u in incoming[top]: + if not assigned.get(u, False): + stack.append(u) + if top is stack[-1]: + stack.pop() + +def _strongly_connected_components_kosaraju_adjacency_list(graph): + visited, incoming, L = {}, {}, [] + for u in graph.vertices: + if not visited.get(u, False): + _visit(graph, u, visited, incoming, L) + + assigned = {} + components = [] + for i in range(-1, -len(L) - 1, -1): + comp = set() + if not assigned.get(L[i], False): + _assign(graph, L[i], incoming, assigned, comp) + if comp: + components.append(comp) + + return components + +_strongly_connected_components_kosaraju_adjacency_matrix = \ + _strongly_connected_components_kosaraju_adjacency_list + +def _tarjan_dfs(u, graph, index, stack, indices, low_links, on_stacks, components): + indices[u] = index[0] + low_links[u] = index[0] + index[0] += 1 + stack.append(u) + on_stacks[u] = True + + for node in graph.neighbors(u): + v = node.name + if indices[v] == -1: + _tarjan_dfs(v, graph, index, stack, indices, low_links, on_stacks, components) + low_links[u] = min(low_links[u], low_links[v]) + elif on_stacks[v]: + low_links[u] = min(low_links[u], low_links[v]) + + if low_links[u] == indices[u]: + component = set() + while stack: + w = stack.pop() + on_stacks[w] = False + component.add(w) + if w == u: + break + components.append(component) + +def _strongly_connected_components_tarjan_adjacency_list(graph): + index = [0] # mutable object + stack = Stack([]) + indices, low_links, on_stacks = {}, {}, {} + + for u in graph.vertices: + indices[u] = -1 + low_links[u] = -1 + on_stacks[u] = False + + components = [] + + for u in graph.vertices: + if indices[u] == -1: + _tarjan_dfs(u, graph, index, stack, indices, low_links, on_stacks, components) + + return components + +_strongly_connected_components_tarjan_adjacency_matrix = \ + _strongly_connected_components_tarjan_adjacency_list + +def strongly_connected_components(graph, algorithm, **kwargs): + """ + Computes strongly connected components for the given + graph and algorithm. + + Parameters + ========== + + graph: Graph + The graph whose minimum spanning tree + has to be computed. + algorithm: str + The algorithm which should be used for + computing strongly connected components. + Currently the following algorithms are + supported, + + 'kosaraju' -> Kosaraju's algorithm as given in [1]. + 'tarjan' -> Tarjan's algorithm as given in [2]. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + components: list + Python list with each element as set of vertices. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode + >>> from pydatastructs import strongly_connected_components + >>> v1, v2, v3 = [AdjacencyListGraphNode(i) for i in range(3)] + >>> g = Graph(v1, v2, v3) + >>> g.add_edge(v1.name, v2.name) + >>> g.add_edge(v2.name, v3.name) + >>> g.add_edge(v3.name, v1.name) + >>> scc = strongly_connected_components(g, 'kosaraju') + >>> scc == [{'2', '0', '1'}] + True + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Kosaraju%27s_algorithm + .. [2] https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm + + """ + raise_if_backend_is_not_python( + strongly_connected_components, kwargs.get('backend', Backend.PYTHON)) + import pydatastructs.graphs.algorithms as algorithms + func = "_strongly_connected_components_" + algorithm + "_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently %s algoithm for %s implementation of graphs " + "isn't implemented for finding strongly connected components." + %(algorithm, graph._impl)) + return getattr(algorithms, func)(graph) + +def depth_first_search( + graph, source_node, operation, *args, **kwargs): + """ + Implementation of depth first search (DFS) + algorithm. + + Parameters + ========== + + graph: Graph + The graph on which DFS is to be performed. + source_node: str + The name of the source node from where the DFS is + to be initiated. + operation: function + The function which is to be applied + on every node when it is visited. + The prototype which is to be followed is, + `function_name(curr_node, next_node, + arg_1, arg_2, . . ., arg_n)`. + Here, the first two arguments denote, the + current node and the node next to current node. + The rest of the arguments are optional and you can + provide your own stuff there. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Note + ==== + + You should pass all the arguments which you are going + to use in the prototype of your `operation` after + passing the operation function. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode + >>> V1 = AdjacencyListGraphNode("V1") + >>> V2 = AdjacencyListGraphNode("V2") + >>> V3 = AdjacencyListGraphNode("V3") + >>> G = Graph(V1, V2, V3) + >>> from pydatastructs import depth_first_search + >>> def f(curr_node, next_node, dest_node): + ... return curr_node != dest_node + ... + >>> G.add_edge(V1.name, V2.name) + >>> G.add_edge(V2.name, V3.name) + >>> depth_first_search(G, V1.name, f, V3.name) + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Depth-first_search + """ + raise_if_backend_is_not_python( + depth_first_search, kwargs.get('backend', Backend.PYTHON)) + import pydatastructs.graphs.algorithms as algorithms + func = "_depth_first_search_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently depth first search isn't implemented for " + "%s graphs."%(graph._impl)) + return getattr(algorithms, func)( + graph, source_node, operation, *args, **kwargs) + +def _depth_first_search_adjacency_list( + graph, source_node, operation, *args, **kwargs): + dfs_stack = Stack() + visited = {} + dfs_stack.append(source_node) + visited[source_node] = True + while len(dfs_stack) != 0: + curr_node = dfs_stack.pop() + next_nodes = graph.neighbors(curr_node) + if len(next_nodes) != 0: + for next_node in next_nodes: + if next_node.name not in visited: + status = operation(curr_node, next_node.name, *args, **kwargs) + if not status: + return None + dfs_stack.append(next_node.name) + visited[next_node.name] = True + else: + status = operation(curr_node, "", *args, **kwargs) + if not status: + return None + +_depth_first_search_adjacency_matrix = _depth_first_search_adjacency_list + +def shortest_paths(graph: Graph, algorithm: str, + source: str, target: str="", + **kwargs) -> tuple: + """ + Finds shortest paths in the given graph from a given source. + + Parameters + ========== + + graph: Graph + The graph under consideration. + algorithm: str + The algorithm to be used. Currently, the following algorithms + are implemented, + + 'bellman_ford' -> Bellman-Ford algorithm as given in [1] + + 'dijkstra' -> Dijkstra algorithm as given in [2]. + source: str + The name of the source the node. + target: str + The name of the target node. + Optional, by default, all pair shortest paths + are returned. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + (distances, predecessors): (dict, dict) + If target is not provided and algorithm used + is 'bellman_ford'/'dijkstra'. + (distances[target], predecessors): (float, dict) + If target is provided and algorithm used is + 'bellman_ford'/'dijkstra'. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode + >>> from pydatastructs import shortest_paths + >>> V1 = AdjacencyListGraphNode("V1") + >>> V2 = AdjacencyListGraphNode("V2") + >>> V3 = AdjacencyListGraphNode("V3") + >>> G = Graph(V1, V2, V3) + >>> G.add_edge('V2', 'V3', 10) + >>> G.add_edge('V1', 'V2', 11) + >>> shortest_paths(G, 'bellman_ford', 'V1') + ({'V1': 0, 'V2': 11, 'V3': 21}, {'V1': None, 'V2': 'V1', 'V3': 'V2'}) + >>> shortest_paths(G, 'dijkstra', 'V1') + ({'V2': 11, 'V3': 21, 'V1': 0}, {'V1': None, 'V2': 'V1', 'V3': 'V2'}) + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Bellman%E2%80%93Ford_algorithm + .. [2] https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm + """ + backend = kwargs.get('backend', Backend.PYTHON) + if (backend == Backend.PYTHON): + import pydatastructs.graphs.algorithms as algorithms + func = "_" + algorithm + "_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently %s algorithm isn't implemented for " + "finding shortest paths in graphs."%(algorithm)) + return getattr(algorithms, func)(graph, source, target) + else: + from pydatastructs.graphs._backend.cpp._algorithms import shortest_paths_dijkstra_adjacency_list + if graph._impl == "adjacency_list" and algorithm == 'dijkstra': + return shortest_paths_dijkstra_adjacency_list(graph, source, target) + +def _bellman_ford_adjacency_list(graph: Graph, source: str, target: str) -> tuple: + distances, predecessor, visited, cnts = {}, {}, {}, {} + + for v in graph.vertices: + distances[v] = float('inf') + predecessor[v] = None + visited[v] = False + cnts[v] = 0 + distances[source] = 0 + verticy_num = len(graph.vertices) + + que = Queue([source]) + + while que: + u = que.popleft() + visited[u] = False + neighbors = graph.neighbors(u) + for neighbor in neighbors: + v = neighbor.name + edge_str = u + '_' + v + if distances[u] != float('inf') and distances[u] + graph.edge_weights[edge_str].value < distances[v]: + distances[v] = distances[u] + graph.edge_weights[edge_str].value + predecessor[v] = u + cnts[v] = cnts[u] + 1 + if cnts[v] >= verticy_num: + raise ValueError("Graph contains a negative weight cycle.") + if not visited[v]: + que.append(v) + visited[v] = True + + if target != "": + return (distances[target], predecessor) + return (distances, predecessor) + +_bellman_ford_adjacency_matrix = _bellman_ford_adjacency_list + +def _dijkstra_adjacency_list(graph: Graph, start: str, target: str): + V = len(graph.vertices) + visited, dist, pred = {}, {}, {} + for v in graph.vertices: + visited[v] = False + pred[v] = None + if v != start: + dist[v] = float('inf') + dist[start] = 0 + pq = PriorityQueue(implementation='binomial_heap') + for vertex in dist: + pq.push(vertex, dist[vertex]) + for _ in range(V): + u = pq.pop() + visited[u] = True + for v in graph.vertices: + edge_str = u + '_' + v + if (edge_str in graph.edge_weights and graph.edge_weights[edge_str].value >= 0 and + visited[v] is False and dist[v] > dist[u] + graph.edge_weights[edge_str].value): + dist[v] = dist[u] + graph.edge_weights[edge_str].value + pred[v] = u + pq.push(v, dist[v]) + + if target != "": + return (dist[target], pred) + return dist, pred + +_dijkstra_adjacency_matrix = _dijkstra_adjacency_list + +def all_pair_shortest_paths(graph: Graph, algorithm: str, + **kwargs) -> tuple: + """ + Finds shortest paths between all pairs of vertices in the given graph. + + Parameters + ========== + + graph: Graph + The graph under consideration. + algorithm: str + The algorithm to be used. Currently, the following algorithms + are implemented, + + 'floyd_warshall' -> Floyd Warshall algorithm as given in [1]. + 'johnson' -> Johnson's Algorithm as given in [2] + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + (distances, predecessors): (dict, dict) + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode + >>> from pydatastructs import all_pair_shortest_paths + >>> V1 = AdjacencyListGraphNode("V1") + >>> V2 = AdjacencyListGraphNode("V2") + >>> V3 = AdjacencyListGraphNode("V3") + >>> G = Graph(V1, V2, V3) + >>> G.add_edge('V2', 'V3', 10) + >>> G.add_edge('V1', 'V2', 11) + >>> G.add_edge('V3', 'V1', 5) + >>> dist, _ = all_pair_shortest_paths(G, 'floyd_warshall') + >>> dist['V1']['V3'] + 21 + >>> dist['V3']['V1'] + 5 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm + .. [2] https://en.wikipedia.org/wiki/Johnson's_algorithm + """ + raise_if_backend_is_not_python( + all_pair_shortest_paths, kwargs.get('backend', Backend.PYTHON)) + import pydatastructs.graphs.algorithms as algorithms + func = "_" + algorithm + "_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently %s algorithm isn't implemented for " + "finding shortest paths in graphs."%(algorithm)) + return getattr(algorithms, func)(graph) + +def _floyd_warshall_adjacency_list(graph: Graph): + dist, next_vertex = {}, {} + V, E = graph.vertices, graph.edge_weights + + for v in V: + dist[v] = {} + next_vertex[v] = {} + + for name, edge in E.items(): + dist[edge.source.name][edge.target.name] = edge.value + next_vertex[edge.source.name][edge.target.name] = edge.source.name + + for v in V: + dist[v][v] = 0 + next_vertex[v][v] = v + + for k in V: + for i in V: + for j in V: + dist_i_j = dist.get(i, {}).get(j, float('inf')) + dist_i_k = dist.get(i, {}).get(k, float('inf')) + dist_k_j = dist.get(k, {}).get(j, float('inf')) + next_i_k = next_vertex.get(i + '_' + k, None) + if dist_i_j > dist_i_k + dist_k_j: + dist[i][j] = dist_i_k + dist_k_j + next_vertex[i][j] = next_i_k + + return (dist, next_vertex) + +_floyd_warshall_adjacency_matrix = _floyd_warshall_adjacency_list + +def _johnson_adjacency_list(graph: Graph): + new_vertex = AdjacencyListGraphNode('__q__') + graph.add_vertex(new_vertex) + + for vertex in graph.vertices: + if vertex != '__q__': + graph.add_edge('__q__', vertex, 0) + + distances, predecessors = shortest_paths(graph, 'bellman_ford', '__q__') + + edges_to_remove = [] + for edge in graph.edge_weights: + edge_node = graph.edge_weights[edge] + if edge_node.source.name == '__q__': + edges_to_remove.append((edge_node.source.name, edge_node.target.name)) + + for u, v in edges_to_remove: + graph.remove_edge(u, v) + graph.remove_vertex('__q__') + + for edge in graph.edge_weights: + edge_node = graph.edge_weights[edge] + u, v = edge_node.source.name, edge_node.target.name + graph.edge_weights[edge].value += (distances[u] - distances[v]) + + all_distances = {} + all_next_vertex = {} + + for vertex in graph.vertices: + u = vertex + dijkstra_dist, dijkstra_pred = shortest_paths(graph, 'dijkstra', u) + all_distances[u] = {} + all_next_vertex[u] = {} + for v in graph.vertices: + if dijkstra_pred[v] is None or dijkstra_pred[v] == u : + all_next_vertex[u][v] = u + else: + all_next_vertex[u][v] = None + if v in dijkstra_dist: + all_distances[u][v] = dijkstra_dist[v] - distances[u] + distances[v] + else: + all_distances[u][v] = float('inf') + + return (all_distances, all_next_vertex) + +def topological_sort(graph: Graph, algorithm: str, + **kwargs) -> list: + """ + Performs topological sort on the given graph using given algorithm. + + Parameters + ========== + + graph: Graph + The graph under consideration. + algorithm: str + The algorithm to be used. + Currently, following are supported, + + 'kahn' -> Kahn's algorithm as given in [1]. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + list + The list of topologically sorted vertices. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode, topological_sort + >>> v_1 = AdjacencyListGraphNode('v_1') + >>> v_2 = AdjacencyListGraphNode('v_2') + >>> graph = Graph(v_1, v_2) + >>> graph.add_edge('v_1', 'v_2') + >>> topological_sort(graph, 'kahn') + ['v_1', 'v_2'] + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm + """ + raise_if_backend_is_not_python( + topological_sort, kwargs.get('backend', Backend.PYTHON)) + import pydatastructs.graphs.algorithms as algorithms + func = "_" + algorithm + "_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently %s algorithm isn't implemented for " + "performing topological sort on %s graphs."%(algorithm, graph._impl)) + return getattr(algorithms, func)(graph) + +def _kahn_adjacency_list(graph: Graph) -> list: + S = Queue() + in_degree = {u: 0 for u in graph.vertices} + for u in graph.vertices: + for v in graph.neighbors(u): + in_degree[v.name] += 1 + for u in graph.vertices: + if in_degree[u] == 0: + S.append(u) + in_degree.pop(u) + + L = [] + while S: + n = S.popleft() + L.append(n) + for m in graph.neighbors(n): + graph.remove_edge(n, m.name) + in_degree[m.name] -= 1 + if in_degree[m.name] == 0: + S.append(m.name) + in_degree.pop(m.name) + + if in_degree: + raise ValueError("Graph is not acyclic.") + return L + +def topological_sort_parallel(graph: Graph, algorithm: str, num_threads: int, + **kwargs) -> list: + """ + Performs topological sort on the given graph using given algorithm using + given number of threads. + + Parameters + ========== + + graph: Graph + The graph under consideration. + algorithm: str + The algorithm to be used. + Currently, following are supported, + + 'kahn' -> Kahn's algorithm as given in [1]. + num_threads: int + The maximum number of threads to be used. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + list + The list of topologically sorted vertices. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode, topological_sort_parallel + >>> v_1 = AdjacencyListGraphNode('v_1') + >>> v_2 = AdjacencyListGraphNode('v_2') + >>> graph = Graph(v_1, v_2) + >>> graph.add_edge('v_1', 'v_2') + >>> topological_sort_parallel(graph, 'kahn', 1) + ['v_1', 'v_2'] + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm + """ + raise_if_backend_is_not_python( + topological_sort_parallel, kwargs.get('backend', Backend.PYTHON)) + import pydatastructs.graphs.algorithms as algorithms + func = "_" + algorithm + "_" + graph._impl + '_parallel' + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently %s algorithm isn't implemented for " + "performing topological sort on %s graphs."%(algorithm, graph._impl)) + return getattr(algorithms, func)(graph, num_threads) + +def _kahn_adjacency_list_parallel(graph: Graph, num_threads: int) -> list: + num_vertices = len(graph.vertices) + + def _collect_source_nodes(graph: Graph) -> list: + S = [] + in_degree = {u: 0 for u in graph.vertices} + for u in graph.vertices: + for v in graph.neighbors(u): + in_degree[v.name] += 1 + for u in in_degree: + if in_degree[u] == 0: + S.append(u) + return list(S) + + def _job(graph: Graph, u: str): + for v in graph.neighbors(u): + graph.remove_edge(u, v.name) + + L = [] + source_nodes = _collect_source_nodes(graph) + while source_nodes: + with ThreadPoolExecutor(max_workers=num_threads) as Executor: + for node in source_nodes: + L.append(node) + Executor.submit(_job, graph, node) + for node in source_nodes: + graph.remove_vertex(node) + source_nodes = _collect_source_nodes(graph) + + if len(L) != num_vertices: + raise ValueError("Graph is not acyclic.") + return L + + +def _breadth_first_search_max_flow(graph: Graph, source_node, sink_node, flow_passed, for_dinic=False): + bfs_queue = Queue() + parent, currentPathC = {}, {} + currentPathC[source_node] = float('inf') + bfs_queue.append(source_node) + while len(bfs_queue) != 0: + curr_node = bfs_queue.popleft() + next_nodes = graph.neighbors(curr_node) + if len(next_nodes) != 0: + for next_node in next_nodes: + capacity = graph.get_edge(curr_node, next_node.name).value + fp = flow_passed.get((curr_node, next_node.name), 0) + if capacity and parent.get(next_node.name, False) is False and capacity - fp > 0: + parent[next_node.name] = curr_node + next_flow = min(currentPathC[curr_node], capacity - fp) + currentPathC[next_node.name] = next_flow + if next_node.name == sink_node and not for_dinic: + return (next_flow, parent) + bfs_queue.append(next_node.name) + return (0, parent) + + +def _max_flow_edmonds_karp_(graph: Graph, source, sink): + m_flow = 0 + flow_passed = {} + new_flow, parent = _breadth_first_search_max_flow(graph, source, sink, flow_passed) + while new_flow != 0: + m_flow += new_flow + current = sink + while current != source: + prev = parent[current] + fp = flow_passed.get((prev, current), 0) + flow_passed[(prev, current)] = fp + new_flow + fp = flow_passed.get((current, prev), 0) + flow_passed[(current, prev)] = fp - new_flow + current = prev + new_flow, parent = _breadth_first_search_max_flow(graph, source, sink, flow_passed) + return m_flow + + +def _depth_first_search_max_flow_dinic(graph: Graph, u, parent, sink_node, flow, flow_passed): + if u == sink_node: + return flow + + next_nodes = graph.neighbors(u) + if len(next_nodes) != 0: + for next_node in next_nodes: + capacity = graph.get_edge(u, next_node.name).value + fp = flow_passed.get((u, next_node.name), 0) + parent_cond = parent.get(next_node.name, None) + if parent_cond and parent_cond == u and capacity - fp > 0: + path_flow = _depth_first_search_max_flow_dinic(graph, + next_node.name, + parent, sink_node, + min(flow, capacity - fp), flow_passed) + if path_flow > 0: + fp = flow_passed.get((u, next_node.name), 0) + flow_passed[(u, next_node.name)] = fp + path_flow + fp = flow_passed.get((next_node.name, u), 0) + flow_passed[(next_node.name, u)] = fp - path_flow + return path_flow + return 0 + + +def _max_flow_dinic_(graph: Graph, source, sink): + max_flow = 0 + flow_passed = {} + while True: + next_flow, parent = _breadth_first_search_max_flow(graph, source, sink, flow_passed, True) + if parent.get(sink, False) is False: + break + + while True: + path_flow = _depth_first_search_max_flow_dinic(graph, source, + parent, sink, + float('inf'), + flow_passed) + if path_flow <= 0: + break + max_flow += path_flow + + return max_flow + + +def max_flow(graph, source, sink, algorithm='edmonds_karp', **kwargs): + raise_if_backend_is_not_python( + max_flow, kwargs.get('backend', Backend.PYTHON)) + + import pydatastructs.graphs.algorithms as algorithms + func = "_max_flow_" + algorithm + "_" + if not hasattr(algorithms, func): + raise NotImplementedError( + f"Currently {algorithm} algorithm isn't implemented for " + "performing max flow on graphs.") + return getattr(algorithms, func)(graph, source, sink) + + +def find_bridges(graph): + """ + Finds all bridges in an undirected graph using Tarjan's Algorithm. + + Parameters + ========== + graph : Graph + An undirected graph instance. + + Returns + ========== + List[tuple] + A list of bridges, where each bridge is represented as a tuple (u, v) + with u <= v. + + Example + ======== + >>> from pydatastructs import Graph, AdjacencyListGraphNode, find_bridges + >>> v0 = AdjacencyListGraphNode(0) + >>> v1 = AdjacencyListGraphNode(1) + >>> v2 = AdjacencyListGraphNode(2) + >>> v3 = AdjacencyListGraphNode(3) + >>> v4 = AdjacencyListGraphNode(4) + >>> graph = Graph(v0, v1, v2, v3, v4, implementation='adjacency_list') + >>> graph.add_edge(v0.name, v1.name) + >>> graph.add_edge(v1.name, v2.name) + >>> graph.add_edge(v2.name, v3.name) + >>> graph.add_edge(v3.name, v4.name) + >>> find_bridges(graph) + [('0', '1'), ('1', '2'), ('2', '3'), ('3', '4')] + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Bridge_(graph_theory) + """ + + vertices = list(graph.vertices) + processed_vertices = [] + for v in vertices: + if hasattr(v, "name"): + processed_vertices.append(v.name) + else: + processed_vertices.append(v) + + n = len(processed_vertices) + adj = {v: [] for v in processed_vertices} + for v in processed_vertices: + for neighbor in graph.neighbors(v): + if hasattr(neighbor, "name"): + nbr = neighbor.name + else: + nbr = neighbor + adj[v].append(nbr) + + mapping = {v: idx for idx, v in enumerate(processed_vertices)} + inv_mapping = {idx: v for v, idx in mapping.items()} + + n_adj = [[] for _ in range(n)] + for v in processed_vertices: + idx_v = mapping[v] + for u in adj[v]: + idx_u = mapping[u] + n_adj[idx_v].append(idx_u) + + visited = [False] * n + disc = [0] * n + low = [0] * n + parent = [-1] * n + bridges_idx = [] + time = 0 + + def dfs(u): + nonlocal time + visited[u] = True + disc[u] = low[u] = time + time += 1 + for v in n_adj[u]: + if not visited[v]: + parent[v] = u + dfs(v) + low[u] = min(low[u], low[v]) + if low[v] > disc[u]: + bridges_idx.append((u, v)) + elif v != parent[u]: + low[u] = min(low[u], disc[v]) + + for i in range(n): + if not visited[i]: + dfs(i) + + bridges = [] + for u, v in bridges_idx: + a = inv_mapping[u] + b = inv_mapping[v] + if a <= b: + bridges.append((a, b)) + else: + bridges.append((b, a)) + bridges.sort() + return bridges diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/graph.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/graph.py new file mode 100644 index 000000000..39c2692e3 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/graph.py @@ -0,0 +1,163 @@ + +from pydatastructs.utils.misc_util import Backend, raise_if_backend_is_not_python + +__all__ = [ + 'Graph' +] + +class Graph(object): + """ + Represents generic concept of graphs. + + Parameters + ========== + + implementation: str + The implementation to be used for storing + graph in memory. It can be figured out + from type of the vertices(if passed at construction). + Currently the following implementations are supported, + + 'adjacency_list' -> Adjacency list implementation. + + 'adjacency_matrix' -> Adjacency matrix implementation. + + By default, 'adjacency_list'. + vertices: GraphNode(s) + For AdjacencyList implementation vertices + can be passed for initializing the graph. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs.graphs import Graph + >>> from pydatastructs.utils import AdjacencyListGraphNode + >>> v_1 = AdjacencyListGraphNode('v_1', 1) + >>> v_2 = AdjacencyListGraphNode('v_2', 2) + >>> g = Graph(v_1, v_2) + >>> g.add_edge('v_1', 'v_2') + >>> g.add_edge('v_2', 'v_1') + >>> g.is_adjacent('v_1', 'v_2') + True + >>> g.is_adjacent('v_2', 'v_1') + True + >>> g.remove_edge('v_1', 'v_2') + >>> g.is_adjacent('v_1', 'v_2') + False + >>> g.is_adjacent('v_2', 'v_1') + True + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Graph_(abstract_data_type) + + Note + ==== + + Make sure to create nodes (AdjacencyListGraphNode or AdjacencyMatrixGraphNode) + and them in your graph using Graph.add_vertex before adding edges whose + end points require either of the nodes that you added. In other words, + Graph.add_edge doesn't add new nodes on its own if the input + nodes are not already present in the Graph. + + """ + + __slots__ = ['_impl'] + + def __new__(cls, *args, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + try: + default_impl = args[0]._impl if args else 'adjacency_list' + except: + default_impl = 'adjacency_list' + implementation = kwargs.get('implementation', default_impl) + if implementation == 'adjacency_list': + from pydatastructs.graphs.adjacency_list import AdjacencyList + obj = AdjacencyList(*args, **kwargs) + return obj + elif implementation == 'adjacency_matrix': + from pydatastructs.graphs.adjacency_matrix import AdjacencyMatrix + obj = AdjacencyMatrix(*args, **kwargs) + return obj + else: + raise NotImplementedError("%s implementation is not a part " + "of the library currently."%(implementation)) + + def is_adjacent(self, node1, node2): + """ + Checks if the nodes with the given + with the given names are adjacent + to each other. + """ + raise NotImplementedError( + "This is an abstract method.") + + def neighbors(self, node): + """ + Lists the neighbors of the node + with given name. + """ + raise NotImplementedError( + "This is an abstract method.") + + def add_vertex(self, node): + """ + Adds the input vertex to the node, or does nothing + if the input vertex is already in the graph. + """ + raise NotImplementedError( + "This is an abstract method.") + + def remove_vertex(self, node): + """ + Removes the input vertex along with all the edges + pointing towards it. + """ + raise NotImplementedError( + "This is an abstract method.") + + def add_edge(self, source, target, cost=None): + """ + Adds the edge starting at first parameter + i.e., source and ending at the second + parameter i.e., target. + """ + raise NotImplementedError( + "This is an abstract method.") + + def get_edge(self, source, target): + """ + Returns GraphEdge object if there + is an edge between source and target + otherwise None. + """ + raise NotImplementedError( + "This is an abstract method.") + + def remove_edge(self, source, target): + """ + Removes the edge starting at first parameter + i.e., source and ending at the second + parameter i.e., target. + """ + raise NotImplementedError( + "This is an abstract method.") + + def num_vertices(self): + """ + Number of vertices + """ + raise NotImplementedError( + "This is an abstract method.") + + def num_edges(self): + """ + Number of edges + """ + raise NotImplementedError( + "This is an abstract method.") diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_list.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_list.py new file mode 100644 index 000000000..3a9cdb14f --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_list.py @@ -0,0 +1,83 @@ +from pydatastructs.graphs import Graph +from pydatastructs.utils import AdjacencyListGraphNode +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils.misc_util import Backend + +def test_adjacency_list(): + v_1 = AdjacencyListGraphNode('v_1', 1) + v_2 = AdjacencyListGraphNode('v_2', 2) + g = Graph(v_1, v_2, implementation='adjacency_list') + v_3 = AdjacencyListGraphNode('v_3', 3) + g.add_vertex(v_2) + g.add_vertex(v_3) + g.add_edge('v_1', 'v_2') + g.add_edge('v_2', 'v_3') + g.add_edge('v_3', 'v_1') + assert g.is_adjacent('v_1', 'v_2') is True + assert g.is_adjacent('v_2', 'v_3') is True + assert g.is_adjacent('v_3', 'v_1') is True + assert g.is_adjacent('v_2', 'v_1') is False + assert g.is_adjacent('v_3', 'v_2') is False + assert g.is_adjacent('v_1', 'v_3') is False + neighbors = g.neighbors('v_1') + assert neighbors == [v_2] + v = AdjacencyListGraphNode('v', 4) + g.add_vertex(v) + g.add_edge('v_1', 'v', 0) + g.add_edge('v_2', 'v', 0) + g.add_edge('v_3', 'v', 0) + assert g.is_adjacent('v_1', 'v') is True + assert g.is_adjacent('v_2', 'v') is True + assert g.is_adjacent('v_3', 'v') is True + e1 = g.get_edge('v_1', 'v') + e2 = g.get_edge('v_2', 'v') + e3 = g.get_edge('v_3', 'v') + assert (e1.source.name, e1.target.name) == ('v_1', 'v') + assert (e2.source.name, e2.target.name) == ('v_2', 'v') + assert (e3.source.name, e3.target.name) == ('v_3', 'v') + g.remove_edge('v_1', 'v') + assert g.is_adjacent('v_1', 'v') is False + g.remove_vertex('v') + assert g.is_adjacent('v_2', 'v') is False + assert g.is_adjacent('v_3', 'v') is False + + assert raises(ValueError, lambda: g.add_edge('u', 'v')) + assert raises(ValueError, lambda: g.add_edge('v', 'x')) + + v_4 = AdjacencyListGraphNode('v_4', 4, backend = Backend.CPP) + v_5 = AdjacencyListGraphNode('v_5', 5, backend = Backend.CPP) + g2 = Graph(v_4,v_5,implementation = 'adjacency_list', backend = Backend.CPP) + v_6 = AdjacencyListGraphNode('v_6', 6, backend = Backend.CPP) + assert raises(ValueError, lambda: g2.add_vertex(v_5)) + g2.add_vertex(v_6) + g2.add_edge('v_4', 'v_5') + g2.add_edge('v_5', 'v_6') + g2.add_edge('v_4', 'v_6') + assert g2.is_adjacent('v_4', 'v_5') is True + assert g2.is_adjacent('v_5', 'v_6') is True + assert g2.is_adjacent('v_4', 'v_6') is True + assert g2.is_adjacent('v_5', 'v_4') is False + assert g2.is_adjacent('v_6', 'v_5') is False + assert g2.is_adjacent('v_6', 'v_4') is False + assert g2.num_edges() == 3 + assert g2.num_vertices() == 3 + neighbors = g2.neighbors('v_4') + assert neighbors == [v_6, v_5] + v = AdjacencyListGraphNode('v', 4, backend = Backend.CPP) + g2.add_vertex(v) + g2.add_edge('v_4', 'v', 0) + g2.add_edge('v_5', 'v', 0) + g2.add_edge('v_6', 'v', "h") + assert g2.is_adjacent('v_4', 'v') is True + assert g2.is_adjacent('v_5', 'v') is True + assert g2.is_adjacent('v_6', 'v') is True + e1 = g2.get_edge('v_4', 'v') + e2 = g2.get_edge('v_5', 'v') + e3 = g2.get_edge('v_6', 'v') + assert (str(e1)) == "('v_4', 'v', 0)" + assert (str(e2)) == "('v_5', 'v', 0)" + assert (str(e3)) == "('v_6', 'v', h)" + g2.remove_edge('v_4', 'v') + assert g2.is_adjacent('v_4', 'v') is False + g2.remove_vertex('v') + assert raises(ValueError, lambda: g2.add_edge('v_4', 'v')) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_matrix.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_matrix.py new file mode 100644 index 000000000..27dc81790 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_matrix.py @@ -0,0 +1,53 @@ +from pydatastructs.graphs import Graph +from pydatastructs.utils import AdjacencyMatrixGraphNode +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils.misc_util import Backend + +def test_AdjacencyMatrix(): + v_0 = AdjacencyMatrixGraphNode(0, 0) + v_1 = AdjacencyMatrixGraphNode(1, 1) + v_2 = AdjacencyMatrixGraphNode(2, 2) + g = Graph(v_0, v_1, v_2) + g.add_edge(0, 1, 0) + g.add_edge(1, 2, 0) + g.add_edge(2, 0, 0) + e1 = g.get_edge(0, 1) + e2 = g.get_edge(1, 2) + e3 = g.get_edge(2, 0) + assert (e1.source.name, e1.target.name) == ('0', '1') + assert (e2.source.name, e2.target.name) == ('1', '2') + assert (e3.source.name, e3.target.name) == ('2', '0') + assert g.is_adjacent(0, 1) is True + assert g.is_adjacent(1, 2) is True + assert g.is_adjacent(2, 0) is True + assert g.is_adjacent(1, 0) is False + assert g.is_adjacent(2, 1) is False + assert g.is_adjacent(0, 2) is False + neighbors = g.neighbors(0) + assert neighbors == [v_1] + g.remove_edge(0, 1) + assert g.is_adjacent(0, 1) is False + assert raises(ValueError, lambda: g.add_edge('u', 'v')) + assert raises(ValueError, lambda: g.add_edge('v', 'x')) + assert raises(ValueError, lambda: g.add_edge(2, 3)) + assert raises(ValueError, lambda: g.add_edge(3, 2)) + + v_3 = AdjacencyMatrixGraphNode('0', 0, backend = Backend.CPP) + v_4 = AdjacencyMatrixGraphNode('1', 1, backend = Backend.CPP) + v_5 = AdjacencyMatrixGraphNode('2', 2, backend = Backend.CPP) + g2 = Graph(v_3, v_4, v_5, implementation = 'adjacency_matrix', backend = Backend.CPP) + g2.add_edge('0', '1', 0) + g2.add_edge('1', '2', 0) + g2.add_edge('2', '0', 0) + assert g2.is_adjacent('0', '1') is True + assert g2.is_adjacent('1', '2') is True + assert g2.is_adjacent('2', '0') is True + assert g2.is_adjacent('1', '0') is False + assert g2.is_adjacent('2', '1') is False + assert g2.is_adjacent('0', '2') is False + neighbors = g2.neighbors('0') + assert neighbors == [v_4] + g2.remove_edge('0', '1') + assert g2.is_adjacent('0', '1') is False + assert raises(ValueError, lambda: g2.add_edge('u', 'v')) + assert raises(ValueError, lambda: g2.add_edge('v', 'x')) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_algorithms.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_algorithms.py new file mode 100644 index 000000000..04ebcccda --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_algorithms.py @@ -0,0 +1,596 @@ +from pydatastructs import (breadth_first_search, Graph, +breadth_first_search_parallel, minimum_spanning_tree, +minimum_spanning_tree_parallel, strongly_connected_components, +depth_first_search, shortest_paths,all_pair_shortest_paths, topological_sort, +topological_sort_parallel, max_flow, find_bridges) +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils.misc_util import AdjacencyListGraphNode, AdjacencyMatrixGraphNode +from pydatastructs.graphs._backend.cpp import _graph +from pydatastructs.graphs._backend.cpp import _algorithms +from pydatastructs.utils.misc_util import Backend + +def test_breadth_first_search(): + + def _test_breadth_first_search(ds): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + + V1 = GraphNode(0) + V2 = GraphNode(1) + V3 = GraphNode(2) + + G1 = Graph(V1, V2, V3) + + assert G1.num_vertices() == 3 + + edges = [ + (V1.name, V2.name), + (V2.name, V3.name), + (V1.name, V3.name) + ] + + for edge in edges: + G1.add_edge(*edge) + + assert G1.num_edges() == len(edges) + + parent = {} + def bfs_tree(curr_node, next_node, parent): + if next_node != "": + parent[next_node] = curr_node + return True + + breadth_first_search(G1, V1.name, bfs_tree, parent) + assert (parent[V3.name] == V1.name and parent[V2.name] == V1.name) or \ + (parent[V3.name] == V2.name and parent[V2.name] == V1.name) + + if (ds=='List'): + parent = {} + V9 = AdjacencyListGraphNode("9",0,backend = Backend.CPP) + V10 = AdjacencyListGraphNode("10",0,backend = Backend.CPP) + V11 = AdjacencyListGraphNode("11",0,backend = Backend.CPP) + G2 = Graph(V9, V10, V11,implementation = 'adjacency_list', backend = Backend.CPP) + assert G2.num_vertices()==3 + G2.add_edge("9", "10") + G2.add_edge("10", "11") + breadth_first_search(G2, "9", bfs_tree, parent, backend = Backend.CPP) + assert parent[V10] == V9 + assert parent[V11] == V10 + + if (ds == 'Matrix'): + parent3 = {} + V12 = AdjacencyMatrixGraphNode("12", 0, backend = Backend.CPP) + V13 = AdjacencyMatrixGraphNode("13", 0, backend = Backend.CPP) + V14 = AdjacencyMatrixGraphNode("14", 0, backend = Backend.CPP) + G3 = Graph(V12, V13, V14, implementation = 'adjacency_matrix', backend = Backend.CPP) + assert G3.num_vertices() == 3 + G3.add_edge("12", "13") + G3.add_edge("13", "14") + breadth_first_search(G3, "12", bfs_tree, parent3, backend = Backend.CPP) + assert parent3[V13] == V12 + assert parent3[V14] == V13 + + V4 = GraphNode(0) + V5 = GraphNode(1) + V6 = GraphNode(2) + V7 = GraphNode(3) + V8 = GraphNode(4) + + edges = [ + (V4.name, V5.name), + (V5.name, V6.name), + (V6.name, V7.name), + (V6.name, V4.name), + (V7.name, V8.name) + ] + + G2 = Graph(V4, V5, V6, V7, V8) + + for edge in edges: + G2.add_edge(*edge) + + assert G2.num_edges() == len(edges) + + path = [] + def path_finder(curr_node, next_node, dest_node, parent, path): + if next_node != "": + parent[next_node] = curr_node + if curr_node == dest_node: + node = curr_node + path.append(node) + while node is not None: + if parent.get(node, None) is not None: + path.append(parent[node]) + node = parent.get(node, None) + path.reverse() + return False + return True + + parent.clear() + breadth_first_search(G2, V4.name, path_finder, V7.name, parent, path) + assert path == [V4.name, V5.name, V6.name, V7.name] + + _test_breadth_first_search("List") + _test_breadth_first_search("Matrix") + +def test_breadth_first_search_parallel(): + + def _test_breadth_first_search_parallel(ds): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + + V1 = GraphNode(0) + V2 = GraphNode(1) + V3 = GraphNode(2) + V4 = GraphNode(3) + V5 = GraphNode(4) + V6 = GraphNode(5) + V7 = GraphNode(6) + V8 = GraphNode(7) + + + G1 = Graph(V1, V2, V3, V4, V5, V6, V7, V8) + + edges = [ + (V1.name, V2.name), + (V1.name, V3.name), + (V1.name, V4.name), + (V2.name, V5.name), + (V2.name, V6.name), + (V3.name, V6.name), + (V3.name, V7.name), + (V4.name, V7.name), + (V4.name, V8.name) + ] + + for edge in edges: + G1.add_edge(*edge) + + parent = {} + def bfs_tree(curr_node, next_node, parent): + if next_node != "": + parent[next_node] = curr_node + return True + + breadth_first_search_parallel(G1, V1.name, 5, bfs_tree, parent) + assert (parent[V2.name] == V1.name and parent[V3.name] == V1.name and + parent[V4.name] == V1.name and parent[V5.name] == V2.name and + (parent[V6.name] in (V2.name, V3.name)) and + (parent[V7.name] in (V3.name, V4.name)) and (parent[V8.name] == V4.name)) + + _test_breadth_first_search_parallel("List") + _test_breadth_first_search_parallel("Matrix") + +def test_minimum_spanning_tree(): + + def _test_minimum_spanning_tree(func, ds, algorithm, *args): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + a, b, c, d, e = [GraphNode(x) for x in [0, 1, 2, 3, 4]] + graph = Graph(a, b, c, d, e) + graph.add_edge(a.name, c.name, 10) + graph.add_edge(c.name, a.name, 10) + graph.add_edge(a.name, d.name, 7) + graph.add_edge(d.name, a.name, 7) + graph.add_edge(c.name, d.name, 9) + graph.add_edge(d.name, c.name, 9) + graph.add_edge(d.name, b.name, 32) + graph.add_edge(b.name, d.name, 32) + graph.add_edge(d.name, e.name, 23) + graph.add_edge(e.name, d.name, 23) + mst = func(graph, algorithm, *args) + expected_mst = [('0_3', 7), ('2_3', 9), ('3_4', 23), ('3_1', 32), + ('3_0', 7), ('3_2', 9), ('4_3', 23), ('1_3', 32)] + assert len(expected_mst) == len(mst.edge_weights.items()) + for k, v in mst.edge_weights.items(): + assert (k, v.value) in expected_mst + + def _test_minimum_spanning_tree_cpp(ds, algorithm, *args): + if (ds == 'List' and algorithm == "prim"): + a1 = AdjacencyListGraphNode('a', 0, backend = Backend.CPP) + b1 = AdjacencyListGraphNode('b', 0, backend = Backend.CPP) + c1 = AdjacencyListGraphNode('c', 0, backend = Backend.CPP) + d1 = AdjacencyListGraphNode('d', 0, backend = Backend.CPP) + e1 = AdjacencyListGraphNode('e', 0, backend = Backend.CPP) + g = Graph(a1, b1, c1, d1, e1, backend = Backend.CPP) + g.add_edge(a1.name, c1.name, 10) + g.add_edge(c1.name, a1.name, 10) + g.add_edge(a1.name, d1.name, 7) + g.add_edge(d1.name, a1.name, 7) + g.add_edge(c1.name, d1.name, 9) + g.add_edge(d1.name, c1.name, 9) + g.add_edge(d1.name, b1.name, 32) + g.add_edge(b1.name, d1.name, 32) + g.add_edge(d1.name, e1.name, 23) + g.add_edge(e1.name, d1.name, 23) + mst = minimum_spanning_tree(g, "prim", backend = Backend.CPP) + expected_mst = ["('a', 'd', 7)", "('d', 'c', 9)", "('e', 'd', 23)", "('b', 'd', 32)", + "('d', 'a', 7)", "('c', 'd', 9)", "('d', 'e', 23)", "('d', 'b', 32)"] + assert str(mst.get_edge('a', 'd')) in expected_mst + assert str(mst.get_edge('e', 'd')) in expected_mst + assert str(mst.get_edge('d', 'c')) in expected_mst + assert str(mst.get_edge('b', 'd')) in expected_mst + assert mst.num_edges() == 8 + a=AdjacencyListGraphNode('0', 0, backend = Backend.CPP) + b=AdjacencyListGraphNode('1', 0, backend = Backend.CPP) + c=AdjacencyListGraphNode('2', 0, backend = Backend.CPP) + d=AdjacencyListGraphNode('3', 0, backend = Backend.CPP) + g2 = Graph(a,b,c,d,backend = Backend.CPP) + g2.add_edge('0', '1', 74) + g2.add_edge('1', '0', 74) + g2.add_edge('0', '3', 55) + g2.add_edge('3', '0', 55) + g2.add_edge('1', '2', 74) + g2.add_edge('2', '1', 74) + mst2=minimum_spanning_tree(g2, "prim", backend = Backend.CPP) + assert mst2.num_edges() == 6 + + fmst = minimum_spanning_tree + fmstp = minimum_spanning_tree_parallel + _test_minimum_spanning_tree(fmst, "List", "kruskal") + _test_minimum_spanning_tree(fmst, "Matrix", "kruskal") + _test_minimum_spanning_tree(fmst, "List", "prim") + _test_minimum_spanning_tree(fmstp, "List", "kruskal", 3) + _test_minimum_spanning_tree(fmstp, "Matrix", "kruskal", 3) + _test_minimum_spanning_tree(fmstp, "List", "prim", 3) + _test_minimum_spanning_tree_cpp("List", "prim") + +def test_strongly_connected_components(): + + def _test_strongly_connected_components(func, ds, algorithm, *args): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + a, b, c, d, e, f, g, h = \ + [GraphNode(chr(x)) for x in range(ord('a'), ord('h') + 1)] + graph = Graph(a, b, c, d, e, f, g, h) + graph.add_edge(a.name, b.name) + graph.add_edge(b.name, c.name) + graph.add_edge(b.name, f.name) + graph.add_edge(b.name, e.name) + graph.add_edge(c.name, d.name) + graph.add_edge(c.name, g.name) + graph.add_edge(d.name, h.name) + graph.add_edge(d.name, c.name) + graph.add_edge(e.name, f.name) + graph.add_edge(e.name, a.name) + graph.add_edge(f.name, g.name) + graph.add_edge(g.name, f.name) + graph.add_edge(h.name, d.name) + graph.add_edge(h.name, g.name) + comps = func(graph, algorithm) + expected_comps = [{'e', 'a', 'b'}, {'d', 'c', 'h'}, {'g', 'f'}] + assert comps.sort() == expected_comps.sort() + + scc = strongly_connected_components + _test_strongly_connected_components(scc, "List", "kosaraju") + _test_strongly_connected_components(scc, "Matrix", "kosaraju") + _test_strongly_connected_components(scc, "List", "tarjan") + _test_strongly_connected_components(scc, "Matrix", "tarjan") + +def test_depth_first_search(): + + def _test_depth_first_search(ds): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + + V1 = GraphNode(0) + V2 = GraphNode(1) + V3 = GraphNode(2) + + G1 = Graph(V1, V2, V3) + + edges = [ + (V1.name, V2.name), + (V2.name, V3.name), + (V1.name, V3.name) + ] + + for edge in edges: + G1.add_edge(*edge) + + parent = {} + def dfs_tree(curr_node, next_node, parent): + if next_node != "": + parent[next_node] = curr_node + return True + + depth_first_search(G1, V1.name, dfs_tree, parent) + assert (parent[V3.name] == V1.name and parent[V2.name] == V1.name) or \ + (parent[V3.name] == V2.name and parent[V2.name] == V1.name) + + V4 = GraphNode(0) + V5 = GraphNode(1) + V6 = GraphNode(2) + V7 = GraphNode(3) + V8 = GraphNode(4) + + edges = [ + (V4.name, V5.name), + (V5.name, V6.name), + (V6.name, V7.name), + (V6.name, V4.name), + (V7.name, V8.name) + ] + + G2 = Graph(V4, V5, V6, V7, V8) + + for edge in edges: + G2.add_edge(*edge) + + path = [] + def path_finder(curr_node, next_node, dest_node, parent, path): + if next_node != "": + parent[next_node] = curr_node + if curr_node == dest_node: + node = curr_node + path.append(node) + while node is not None: + if parent.get(node, None) is not None: + path.append(parent[node]) + node = parent.get(node, None) + path.reverse() + return False + return True + + parent.clear() + depth_first_search(G2, V4.name, path_finder, V7.name, parent, path) + assert path == [V4.name, V5.name, V6.name, V7.name] + + _test_depth_first_search("List") + _test_depth_first_search("Matrix") + +def test_shortest_paths(): + + def _test_shortest_paths_positive_edges(ds, algorithm): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + vertices = [GraphNode('S'), GraphNode('C'), + GraphNode('SLC'), GraphNode('SF'), + GraphNode('D')] + + graph = Graph(*vertices) + graph.add_edge('S', 'SLC', 2) + graph.add_edge('C', 'S', 4) + graph.add_edge('C', 'D', 2) + graph.add_edge('SLC', 'C', 2) + graph.add_edge('SLC', 'D', 3) + graph.add_edge('SF', 'SLC', 2) + graph.add_edge('SF', 'S', 2) + graph.add_edge('D', 'SF', 3) + dist, pred = shortest_paths(graph, algorithm, 'SLC') + assert dist == {'S': 6, 'C': 2, 'SLC': 0, 'SF': 6, 'D': 3} + assert pred == {'S': 'C', 'C': 'SLC', 'SLC': None, 'SF': 'D', 'D': 'SLC'} + dist, pred = shortest_paths(graph, algorithm, 'SLC', 'SF') + assert dist == 6 + assert pred == {'S': 'C', 'C': 'SLC', 'SLC': None, 'SF': 'D', 'D': 'SLC'} + graph.remove_edge('SLC', 'D') + graph.add_edge('D', 'SLC', -10) + assert raises(ValueError, lambda: shortest_paths(graph, 'bellman_ford', 'SLC')) + + if (ds == 'List' and algorithm == 'dijkstra'): + vertices2 = [AdjacencyListGraphNode('S', 0, backend = Backend.CPP), AdjacencyListGraphNode('C', 0, backend = Backend.CPP), + AdjacencyListGraphNode('SLC', 0, backend = Backend.CPP), AdjacencyListGraphNode('SF', 0, backend = Backend.CPP), + AdjacencyListGraphNode('D', 0, backend = Backend.CPP)] + graph2 = Graph(*vertices2, backend = Backend.CPP) + graph2.add_edge('S', 'SLC', 2) + graph2.add_edge('C', 'S', 4) + graph2.add_edge('C', 'D', 2) + graph2.add_edge('SLC', 'C', 2) + graph2.add_edge('SLC', 'D', 3) + graph2.add_edge('SF', 'SLC', 2) + graph2.add_edge('SF', 'S', 2) + graph2.add_edge('D', 'SF', 3) + (dist2, pred2) = shortest_paths(graph2, algorithm, 'SLC', backend = Backend.CPP) + assert dist2 == {'S': 6, 'C': 2, 'SLC': 0, 'SF': 6, 'D': 3} + assert pred2 == {'S': 'C', 'C': 'SLC', 'SLC': None, 'SF': 'D', 'D': 'SLC'} + + + + def _test_shortest_paths_negative_edges(ds, algorithm): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + vertices = [GraphNode('s'), GraphNode('a'), + GraphNode('b'), GraphNode('c'), + GraphNode('d')] + + graph = Graph(*vertices) + graph.add_edge('s', 'a', 3) + graph.add_edge('s', 'b', 2) + graph.add_edge('a', 'c', 1) + graph.add_edge('b', 'd', 1) + graph.add_edge('b', 'a', -2) + graph.add_edge('c', 'd', 1) + dist, pred = shortest_paths(graph, algorithm, 's') + assert dist == {'s': 0, 'a': 0, 'b': 2, 'c': 1, 'd': 2} + assert pred == {'s': None, 'a': 'b', 'b': 's', 'c': 'a', 'd': 'c'} + dist, pred = shortest_paths(graph, algorithm, 's', 'd') + assert dist == 2 + assert pred == {'s': None, 'a': 'b', 'b': 's', 'c': 'a', 'd': 'c'} + + _test_shortest_paths_positive_edges("List", 'bellman_ford') + _test_shortest_paths_positive_edges("Matrix", 'bellman_ford') + _test_shortest_paths_negative_edges("List", 'bellman_ford') + _test_shortest_paths_negative_edges("Matrix", 'bellman_ford') + _test_shortest_paths_positive_edges("List", 'dijkstra') + _test_shortest_paths_positive_edges("Matrix", 'dijkstra') + +def test_all_pair_shortest_paths(): + + def _test_shortest_paths_negative_edges(ds, algorithm): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + vertices = [GraphNode('1'), GraphNode('2'), + GraphNode('3'), GraphNode('4')] + + graph = Graph(*vertices) + graph.add_edge('1', '3', -2) + graph.add_edge('2', '1', 4) + graph.add_edge('2', '3', 3) + graph.add_edge('3', '4', 2) + graph.add_edge('4', '2', -1) + dist, next_v = all_pair_shortest_paths(graph, algorithm) + assert dist == {'1': {'3': -2, '1': 0, '4': 0, '2': -1}, + '2': {'1': 4, '3': 2, '2': 0, '4': 4}, + '3': {'4': 2, '3': 0, '1': 5, '2': 1}, + '4': {'2': -1, '4': 0, '1': 3, '3': 1}} + assert next_v == {'1': {'3': '1', '1': '1', '4': None, '2': None}, + '2': {'1': '2', '3': None, '2': '2', '4': None}, + '3': {'4': '3', '3': '3', '1': None, '2': None}, + '4': {'2': '4', '4': '4', '1': None, '3': None}} + + _test_shortest_paths_negative_edges("List", 'floyd_warshall') + _test_shortest_paths_negative_edges("Matrix", 'floyd_warshall') + _test_shortest_paths_negative_edges("List", 'johnson') + +def test_topological_sort(): + + def _test_topological_sort(func, ds, algorithm, threads=None): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + vertices = [GraphNode('2'), GraphNode('3'), GraphNode('5'), + GraphNode('7'), GraphNode('8'), GraphNode('10'), + GraphNode('11'), GraphNode('9')] + + graph = Graph(*vertices) + graph.add_edge('5', '11') + graph.add_edge('7', '11') + graph.add_edge('7', '8') + graph.add_edge('3', '8') + graph.add_edge('3', '10') + graph.add_edge('11', '2') + graph.add_edge('11', '9') + graph.add_edge('11', '10') + graph.add_edge('8', '9') + if threads is not None: + l = func(graph, algorithm, threads) + else: + l = func(graph, algorithm) + assert all([(l1 in l[0:3]) for l1 in ('3', '5', '7')] + + [(l2 in l[3:5]) for l2 in ('8', '11')] + + [(l3 in l[5:]) for l3 in ('10', '9', '2')]) + + _test_topological_sort(topological_sort, "List", "kahn") + _test_topological_sort(topological_sort_parallel, "List", "kahn", 3) + + +def test_max_flow(): + def _test_max_flow(ds, algorithm): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + + a = GraphNode('a') + b = GraphNode('b') + c = GraphNode('c') + d = GraphNode('d') + e = GraphNode('e') + + G = Graph(a, b, c, d, e) + + G.add_edge('a', 'b', 3) + G.add_edge('a', 'c', 4) + G.add_edge('b', 'c', 2) + G.add_edge('b', 'd', 3) + G.add_edge('c', 'd', 1) + G.add_edge('d', 'e', 6) + + assert max_flow(G, 'a', 'e', algorithm) == 4 + assert max_flow(G, 'a', 'c', algorithm) == 6 + + a = GraphNode('a') + b = GraphNode('b') + c = GraphNode('c') + d = GraphNode('d') + e = GraphNode('e') + f = GraphNode('f') + + G2 = Graph(a, b, c, d, e, f) + + G2.add_edge('a', 'b', 16) + G2.add_edge('a', 'c', 13) + G2.add_edge('b', 'c', 10) + G2.add_edge('b', 'd', 12) + G2.add_edge('c', 'b', 4) + G2.add_edge('c', 'e', 14) + G2.add_edge('d', 'c', 9) + G2.add_edge('d', 'f', 20) + G2.add_edge('e', 'd', 7) + G2.add_edge('e', 'f', 4) + + assert max_flow(G2, 'a', 'f', algorithm) == 23 + assert max_flow(G2, 'a', 'd', algorithm) == 19 + + a = GraphNode('a') + b = GraphNode('b') + c = GraphNode('c') + d = GraphNode('d') + + G3 = Graph(a, b, c, d) + + G3.add_edge('a', 'b', 3) + G3.add_edge('a', 'c', 2) + G3.add_edge('b', 'c', 2) + G3.add_edge('b', 'd', 3) + G3.add_edge('c', 'd', 2) + + assert max_flow(G3, 'a', 'd', algorithm) == 5 + assert max_flow(G3, 'a', 'b', algorithm) == 3 + + + _test_max_flow("List", "edmonds_karp") + _test_max_flow("Matrix", "edmonds_karp") + _test_max_flow("List", "dinic") + _test_max_flow("Matrix", "dinic") + + +def test_find_bridges(): + def _test_find_bridges(ds): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + + impl = 'adjacency_list' if ds == "List" else 'adjacency_matrix' + + v0 = GraphNode(0) + v1 = GraphNode(1) + v2 = GraphNode(2) + v3 = GraphNode(3) + v4 = GraphNode(4) + + G1 = Graph(v0, v1, v2, v3, v4, implementation=impl) + G1.add_edge(v0.name, v1.name) + G1.add_edge(v1.name, v2.name) + G1.add_edge(v2.name, v3.name) + G1.add_edge(v3.name, v4.name) + + bridges = find_bridges(G1) + expected_bridges = [('0', '1'), ('1', '2'), ('2', '3'), ('3', '4')] + assert sorted(bridges) == sorted(expected_bridges) + + u0 = GraphNode(0) + u1 = GraphNode(1) + u2 = GraphNode(2) + + G2 = Graph(u0, u1, u2, implementation=impl) + G2.add_edge(u0.name, u1.name) + G2.add_edge(u1.name, u2.name) + G2.add_edge(u2.name, u0.name) + + bridges = find_bridges(G2) + assert bridges == [] + + w0 = GraphNode(0) + w1 = GraphNode(1) + w2 = GraphNode(2) + w3 = GraphNode(3) + w4 = GraphNode(4) + + G3 = Graph(w0, w1, w2, w3, w4, implementation=impl) + G3.add_edge(w0.name, w1.name) + G3.add_edge(w1.name, w2.name) + G3.add_edge(w3.name, w4.name) + + bridges = find_bridges(G3) + expected_bridges = [('0', '1'), ('1', '2'), ('3', '4')] + assert sorted(bridges) == sorted(expected_bridges) + + _test_find_bridges("List") + _test_find_bridges("Matrix") diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/__init__.py new file mode 100644 index 000000000..c6b3341d2 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/__init__.py @@ -0,0 +1,53 @@ +__all__ = [] + +from . import ( + arrays, + linked_lists, + algorithms, +) + +from .arrays import ( + OneDimensionalArray, + DynamicOneDimensionalArray, + MultiDimensionalArray, + ArrayForTrees +) +__all__.extend(arrays.__all__) + +from .linked_lists import ( + SinglyLinkedList, + DoublyLinkedList, + SinglyCircularLinkedList, + DoublyCircularLinkedList, + SkipList +) +__all__.extend(linked_lists.__all__) + +from .algorithms import ( + merge_sort_parallel, + brick_sort, + brick_sort_parallel, + heapsort, + matrix_multiply_parallel, + counting_sort, + bucket_sort, + cocktail_shaker_sort, + quick_sort, + longest_common_subsequence, + is_ordered, + upper_bound, + lower_bound, + longest_increasing_subsequence, + next_permutation, + prev_permutation, + bubble_sort, + linear_search, + binary_search, + jump_search, + selection_sort, + insertion_sort, + intro_sort, + shell_sort, + radix_sort +) +__all__.extend(algorithms.__all__) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/_backend/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/_backend/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/algorithms.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/algorithms.py new file mode 100644 index 000000000..6d383fdca --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/algorithms.py @@ -0,0 +1,2010 @@ +from pydatastructs.linear_data_structures.arrays import ( + OneDimensionalArray, DynamicArray, DynamicOneDimensionalArray, Array) +from pydatastructs.linear_data_structures._backend.cpp import _algorithms, _arrays +from pydatastructs.utils.misc_util import ( + _check_type, _comp, Backend, + raise_if_backend_is_not_python) +from concurrent.futures import ThreadPoolExecutor +from math import log, floor, sqrt + +__all__ = [ + 'merge_sort_parallel', + 'brick_sort', + 'brick_sort_parallel', + 'heapsort', + 'matrix_multiply_parallel', + 'counting_sort', + 'bucket_sort', + 'cocktail_shaker_sort', + 'quick_sort', + 'longest_common_subsequence', + 'is_ordered', + 'upper_bound', + 'lower_bound', + 'longest_increasing_subsequence', + 'next_permutation', + 'prev_permutation', + 'bubble_sort', + 'linear_search', + 'binary_search', + 'jump_search', + 'selection_sort', + 'insertion_sort', + 'intro_sort', + 'shell_sort', + 'radix_sort' +] + +def _merge(array, sl, el, sr, er, end, comp): + l, r = [], [] + for i in range(sl, el + 1): + if i <= end: + l.append(array[i]) + array[i] = None + for i in range(sr, er + 1): + if i <= end: + r.append(array[i]) + array[i] = None + i, j, k = 0, 0, sl + while i < len(l) and j < len(r): + if _comp(l[i], r[j], comp): + array[k] = l[i] + i += 1 + else: + array[k] = r[j] + j += 1 + k += 1 + + while i < len(l): + array[k] = l[i] + i += 1 + k += 1 + + while j < len(r): + array[k] = r[j] + j += 1 + k += 1 + +def merge_sort_parallel(array, num_threads, **kwargs): + """ + Implements parallel merge sort. + + Parameters + ========== + + array: Array + The array which is to be sorted. + num_threads: int + The maximum number of threads + to be used for sorting. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, merge_sort_parallel + >>> arr = OneDimensionalArray(int,[3, 2, 1]) + >>> merge_sort_parallel(arr, 3) + >>> [arr[0], arr[1], arr[2]] + [1, 2, 3] + >>> merge_sort_parallel(arr, 3, comp=lambda u, v: u > v) + >>> [arr[0], arr[1], arr[2]] + [3, 2, 1] + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Merge_sort + """ + raise_if_backend_is_not_python( + merge_sort_parallel, kwargs.get('backend', Backend.PYTHON)) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get("comp", lambda u, v: u <= v) + for size in range(floor(log(end - start + 1, 2)) + 1): + pow_2 = 2**size + with ThreadPoolExecutor(max_workers=num_threads) as Executor: + i = start + while i <= end: + Executor.submit( + _merge, + array, + i, i + pow_2 - 1, + i + pow_2, i + 2*pow_2 - 1, + end, comp).result() + i = i + 2*pow_2 + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + +def brick_sort(array, **kwargs): + """ + Implements Brick Sort / Odd Even sorting algorithm + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + >>> from pydatastructs import OneDimensionalArray, brick_sort + >>> arr = OneDimensionalArray(int,[3, 2, 1]) + >>> brick_sort(arr) + >>> [arr[0], arr[1], arr[2]] + [1, 2, 3] + >>> brick_sort(arr, comp=lambda u, v: u > v) + >>> [arr[0], arr[1], arr[2]] + [3, 2, 1] + + References + ========== + .. [1] https://www.geeksforgeeks.org/odd-even-sort-brick-sort/ + """ + raise_if_backend_is_not_python( + brick_sort, kwargs.get('backend', Backend.PYTHON)) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get("comp", lambda u, v: u <= v) + + is_sorted = False + while is_sorted is False: + is_sorted = True + for i in range(start+1, end, 2): + if _comp(array[i+1], array[i], comp): + array[i], array[i+1] = array[i+1], array[i] + is_sorted = False + for i in range(start, end, 2): + if _comp(array[i+1], array[i], comp): + array[i], array[i+1] = array[i+1], array[i] + is_sorted = False + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + +def _brick_sort_swap(array, i, j, comp, is_sorted): + if _comp(array[j], array[i], comp): + array[i], array[j] = array[j], array[i] + is_sorted[0] = False + +def brick_sort_parallel(array, num_threads, **kwargs): + """ + Implements Concurrent Brick Sort / Odd Even sorting algorithm + + Parameters + ========== + + array: Array/list + The array which is to be sorted. + num_threads: int + The maximum number of threads + to be used for sorting. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, brick_sort_parallel + >>> arr = OneDimensionalArray(int,[3, 2, 1]) + >>> brick_sort_parallel(arr, num_threads=5) + >>> [arr[0], arr[1], arr[2]] + [1, 2, 3] + >>> brick_sort_parallel(arr, num_threads=5, comp=lambda u, v: u > v) + >>> [arr[0], arr[1], arr[2]] + [3, 2, 1] + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Odd%E2%80%93even_sort + """ + raise_if_backend_is_not_python( + brick_sort_parallel, kwargs.get('backend', Backend.PYTHON)) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get("comp", lambda u, v: u <= v) + + is_sorted = [False] + with ThreadPoolExecutor(max_workers=num_threads) as Executor: + while is_sorted[0] is False: + is_sorted[0] = True + for i in range(start + 1, end, 2): + Executor.submit(_brick_sort_swap, array, i, i + 1, comp, is_sorted).result() + + for i in range(start, end, 2): + Executor.submit(_brick_sort_swap, array, i, i + 1, comp, is_sorted).result() + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + +def heapsort(array, **kwargs): + """ + Implements Heapsort algorithm. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, heapsort + >>> arr = OneDimensionalArray(int,[3, 2, 1]) + >>> heapsort(arr) + >>> [arr[0], arr[1], arr[2]] + [1, 2, 3] + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Heapsort + + Note + ==== + + This function does not support custom comparators as is the case with + other sorting functions in this file. + """ + raise_if_backend_is_not_python( + heapsort, kwargs.get('backend', Backend.PYTHON)) + from pydatastructs.trees.heaps import BinaryHeap + + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + + h = BinaryHeap(heap_property="min") + for i in range(start, end+1): + if array[i] is not None: + h.insert(array[i]) + array[i] = None + + i = start + while not h.is_empty: + array[i] = h.extract().key + i += 1 + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + +def counting_sort(array: Array, **kwargs) -> Array: + """ + Performs counting sort on the given array. + + Parameters + ========== + + array: Array + The array which is to be sorted. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs import DynamicOneDimensionalArray as DODA, counting_sort + >>> arr = DODA(int, [5, 78, 1, 0]) + >>> out = counting_sort(arr) + >>> str(out) + "['0', '1', '5', '78']" + >>> arr.delete(2) + >>> out = counting_sort(arr) + >>> str(out) + "['0', '5', '78']" + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Counting_sort + + Note + ==== + + Since, counting sort is a non-comparison sorting algorithm, + custom comparators aren't allowed. + The ouput array doesn't contain any `None` value. + """ + raise_if_backend_is_not_python( + counting_sort, kwargs.get('backend', Backend.PYTHON)) + max_val, min_val = array[0], array[0] + none_count = 0 + for i in range(len(array)): + if array[i] is not None: + if max_val is None or max_val < array[i]: + max_val = array[i] + if min_val is None or array[i] < min_val: + min_val = array[i] + else: + none_count += 1 + if min_val is None or max_val is None: + return array + + count = [0 for _ in range(max_val - min_val + 1)] + for i in range(len(array)): + if array[i] is not None: + count[array[i] - min_val] += 1 + + total = 0 + for i in range(max_val - min_val + 1): + count[i], total = total, count[i] + total + + output = type(array)(array._dtype, + [array[i] for i in range(len(array)) + if array[i] is not None]) + if _check_type(output, DynamicArray): + output._modify(force=True) + + for i in range(len(array)): + x = array[i] + if x is not None: + output[count[x-min_val]] = x + count[x-min_val] += 1 + + return output + +def _matrix_multiply_helper(m1, m2, row, col): + s = 0 + for i in range(len(m1)): + s += m1[row][i] * m2[i][col] + return s + +def matrix_multiply_parallel(matrix_1, matrix_2, num_threads): + """ + Implements concurrent Matrix multiplication + + Parameters + ========== + + matrix_1: Any matrix representation + Left matrix + matrix_2: Any matrix representation + Right matrix + num_threads: int + The maximum number of threads + to be used for multiplication. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Raises + ====== + + ValueError + When the columns in matrix_1 are not equal to the rows in matrix_2 + + Returns + ======= + + C: list + The result of matrix multiplication. + + Examples + ======== + + >>> from pydatastructs import matrix_multiply_parallel + >>> I = [[1, 1, 0], [0, 1, 0], [0, 0, 1]] + >>> J = [[2, 1, 2], [1, 2, 1], [2, 2, 2]] + >>> matrix_multiply_parallel(I, J, num_threads=5) + [[3, 3, 3], [1, 2, 1], [2, 2, 2]] + + References + ========== + .. [1] https://www3.nd.edu/~zxu2/acms60212-40212/Lec-07-3.pdf + """ + row_matrix_1, col_matrix_1 = len(matrix_1), len(matrix_1[0]) + row_matrix_2, col_matrix_2 = len(matrix_2), len(matrix_2[0]) + + if col_matrix_1 != row_matrix_2: + raise ValueError("Matrix size mismatch: %s * %s"%( + (row_matrix_1, col_matrix_1), (row_matrix_2, col_matrix_2))) + + C = [[None for i in range(col_matrix_1)] for j in range(row_matrix_2)] + + with ThreadPoolExecutor(max_workers=num_threads) as Executor: + for i in range(row_matrix_1): + for j in range(col_matrix_2): + C[i][j] = Executor.submit(_matrix_multiply_helper, + matrix_1, + matrix_2, + i, j).result() + + return C + +def _bucket_sort_helper(bucket: Array) -> Array: + for i in range(1, len(bucket)): + key = bucket[i] + j = i - 1 + while j >= 0 and bucket[j] > key: + bucket[j+1] = bucket[j] + j -= 1 + bucket[j+1] = key + return bucket + +def bucket_sort(array: Array, **kwargs) -> Array: + """ + Performs bucket sort on the given array. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs import DynamicOneDimensionalArray as DODA, bucket_sort + >>> arr = DODA(int, [5, 78, 1, 0]) + >>> out = bucket_sort(arr) + >>> str(out) + "['0', '1', '5', '78']" + >>> arr.delete(2) + >>> out = bucket_sort(arr) + >>> str(out) + "['0', '1', '78']" + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Bucket_sort + + Note + ==== + + This function does not support custom comparators as is the case with + other sorting functions in this file. + """ + raise_if_backend_is_not_python( + bucket_sort, kwargs.get('backend', Backend.PYTHON)) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + + #Find maximum value in the list and use length of the list to determine which value in the list goes into which bucket + max_value = None + for i in range(start, end+1): + if array[i] is not None: + max_value = array[i] + + count = 0 + for i in range(start, end+1): + if array[i] is not None: + count += 1 + if array[i] > max_value: + max_value = array[i] + + number_of_null_values = end - start + 1 - count + size = max_value // count + + # Create n empty buckets where n is equal to the length of the input list + buckets_list = [[] for _ in range(count)] + + # Put list elements into different buckets based on the size + for i in range(start, end + 1): + if array[i] is not None: + j = array[i] // size + if j is not count: + buckets_list[j].append(array[i]) + else: + buckets_list[count-1].append(array[i]) + + # Sort elements within the buckets using Insertion Sort + for z in range(count): + _bucket_sort_helper(buckets_list[z]) + + # Concatenate buckets with sorted elements into a single array + sorted_list = [] + for x in range(count): + sorted_list.extend(buckets_list[x]) + for i in range(end, end - number_of_null_values, -1): + array[i] = None + for i in range(start, end - number_of_null_values + 1): + array[i] = sorted_list[i-start] + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + return array + +def cocktail_shaker_sort(array: Array, **kwargs) -> Array: + """ + Performs cocktail sort on the given array. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray as ODA, cocktail_shaker_sort + >>> arr = ODA(int, [5, 78, 1, 0]) + >>> out = cocktail_shaker_sort(arr) + >>> str(out) + '[0, 1, 5, 78]' + >>> arr = ODA(int, [21, 37, 5]) + >>> out = cocktail_shaker_sort(arr) + >>> str(out) + '[5, 21, 37]' + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Cocktail_shaker_sort + """ + raise_if_backend_is_not_python( + cocktail_shaker_sort, kwargs.get('backend', Backend.PYTHON)) + def swap(i, j): + array[i], array[j] = array[j], array[i] + + lower = kwargs.get('start', 0) + upper = kwargs.get('end', len(array) - 1) + comp = kwargs.get("comp", lambda u, v: u <= v) + + swapping = False + while (not swapping and upper - lower >= 1): + + swapping = True + for j in range(lower, upper): + if _comp(array[j], array[j+1], comp) is False: + swap(j + 1, j) + swapping = False + + upper = upper - 1 + for j in range(upper, lower, -1): + if _comp(array[j-1], array[j], comp) is False: + swap(j, j - 1) + swapping = False + lower = lower + 1 + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + + return array + +def quick_sort(array: Array, **kwargs) -> Array: + """ + Performs quick sort on the given array. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + pick_pivot_element: lambda/function + The function implementing the pivot picking + logic for quick sort. Should accept, `low`, + `high`, and `array` in this order, where `low` + represents the left end of the current partition, + `high` represents the right end, and `array` is + the original input array to `quick_sort` function. + Optional, by default, picks the element at `high` + index of the current partition as pivot. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray as ODA, quick_sort + >>> arr = ODA(int, [5, 78, 1, 0]) + >>> out = quick_sort(arr) + >>> str(out) + '[0, 1, 5, 78]' + >>> arr = ODA(int, [21, 37, 5]) + >>> out = quick_sort(arr) + >>> str(out) + '[5, 21, 37]' + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Quicksort + """ + backend = kwargs.pop("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _algorithms.quick_sort(array, **kwargs) + from pydatastructs import Stack + comp = kwargs.get("comp", lambda u, v: u <= v) + pick_pivot_element = kwargs.get("pick_pivot_element", + lambda low, high, array: array[high]) + + def partition(low, high, pick_pivot_element): + i = (low - 1) + x = pick_pivot_element(low, high, array) + for j in range(low , high): + if _comp(array[j], x, comp) is True: + i = i + 1 + array[i], array[j] = array[j], array[i] + array[i + 1], array[high] = array[high], array[i + 1] + return (i + 1) + + lower = kwargs.get('start', 0) + upper = kwargs.get('end', len(array) - 1) + stack = Stack() + + stack.push(lower) + stack.push(upper) + + while stack.is_empty is False: + high = stack.pop() + low = stack.pop() + p = partition(low, high, pick_pivot_element) + if p - 1 > low: + stack.push(low) + stack.push(p - 1) + if p + 1 < high: + stack.push(p + 1) + stack.push(high) + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + + return array + +def longest_common_subsequence(seq1: OneDimensionalArray, seq2: OneDimensionalArray, + **kwargs) -> OneDimensionalArray: + """ + Finds the longest common subsequence between the + two given sequences. + + Parameters + ======== + + seq1: OneDimensionalArray + The first sequence. + seq2: OneDimensionalArray + The second sequence. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: OneDimensionalArray + The longest common subsequence. + + Examples + ======== + + >>> from pydatastructs import longest_common_subsequence as LCS, OneDimensionalArray as ODA + >>> arr1 = ODA(str, ['A', 'B', 'C', 'D', 'E']) + >>> arr2 = ODA(str, ['A', 'B', 'C', 'G' ,'D', 'E', 'F']) + >>> lcs = LCS(arr1, arr2) + >>> str(lcs) + "['A', 'B', 'C', 'D', 'E']" + >>> arr1 = ODA(str, ['A', 'P', 'P']) + >>> arr2 = ODA(str, ['A', 'p', 'P', 'S', 'P']) + >>> lcs = LCS(arr1, arr2) + >>> str(lcs) + "['A', 'P', 'P']" + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Longest_common_subsequence_problem + + Note + ==== + + The data types of elements across both the sequences + should be same and should be comparable. + """ + raise_if_backend_is_not_python( + longest_common_subsequence, kwargs.get('backend', Backend.PYTHON)) + row = len(seq1) + col = len(seq2) + check_mat = {0: [(0, []) for _ in range(col + 1)]} + + for i in range(1, row + 1): + check_mat[i] = [(0, []) for _ in range(col + 1)] + for j in range(1, col + 1): + if seq1[i-1] == seq2[j-1]: + temp = check_mat[i-1][j-1][1][:] + temp.append(seq1[i-1]) + check_mat[i][j] = (check_mat[i-1][j-1][0] + 1, temp) + else: + if check_mat[i-1][j][0] > check_mat[i][j-1][0]: + check_mat[i][j] = check_mat[i-1][j] + else: + check_mat[i][j] = check_mat[i][j-1] + + return OneDimensionalArray(seq1._dtype, check_mat[row][col][-1]) + +def is_ordered(array, **kwargs): + """ + Checks whether the given array is ordered or not. + + Parameters + ========== + + array: OneDimensionalArray + The array which is to be checked for having + specified ordering among its elements. + start: int + The starting index of the portion of the array + under consideration. + Optional, by default 0 + end: int + The ending index of the portion of the array + under consideration. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for specifying the desired ordering. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + True if the specified ordering is present + from start to end (inclusive) otherwise False. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, is_ordered + >>> arr = OneDimensionalArray(int, [1, 2, 3, 4]) + >>> is_ordered(arr) + True + >>> arr1 = OneDimensionalArray(int, [1, 2, 3]) + >>> is_ordered(arr1, start=0, end=1, comp=lambda u, v: u > v) + False + """ + backend = kwargs.pop("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _algorithms.is_ordered(array, **kwargs) + lower = kwargs.get('start', 0) + upper = kwargs.get('end', len(array) - 1) + comp = kwargs.get("comp", lambda u, v: u <= v) + + for i in range(lower + 1, upper + 1): + if array[i] is None or array[i - 1] is None: + continue + if comp(array[i], array[i - 1]): + return False + return True + +def upper_bound(array, value, **kwargs): + """ + Finds the index of the first occurence of an element greater than the given + value according to specified order, in the given OneDimensionalArray using a variation of binary search method. + + Parameters + ========== + + array: OneDimensionalArray + The array in which the upper bound has to be found. + start: int + The staring index of the portion of the array in which the upper bound + of a given value has to be looked for. + Optional, by default 0 + end: int, optional + The ending index of the portion of the array in which the upper bound + of a given value has to be looked for. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for specifying the desired ordering. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + index: int + Index of the upper bound of the given value in the given OneDimensionalArray. + + Examples + ======== + + >>> from pydatastructs import upper_bound, OneDimensionalArray as ODA + >>> arr1 = ODA(int, [4, 5, 5, 6, 7]) + >>> ub = upper_bound(arr1, 5, start=0, end=4) + >>> ub + 3 + >>> arr2 = ODA(int, [7, 6, 5, 5, 4]) + >>> ub = upper_bound(arr2, 5, comp=lambda x, y: x > y) + >>> ub + 4 + + Note + ==== + + DynamicOneDimensionalArray objects may not work as expected. + """ + raise_if_backend_is_not_python( + upper_bound, kwargs.get('backend', Backend.PYTHON)) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array)) + comp = kwargs.get('comp', lambda x, y: x < y) + index = end + inclusive_end = end - 1 + if comp(value, array[start]): + index = start + while start <= inclusive_end: + mid = (start + inclusive_end)//2 + if not comp(value, array[mid]): + start = mid + 1 + else: + index = mid + inclusive_end = mid - 1 + return index + +def lower_bound(array, value, **kwargs): + """ + Finds the the index of the first occurence of an element which is not + less than the given value according to specified order, + in the given OneDimensionalArray using a variation of binary search method. + + Parameters + ========== + + array: OneDimensionalArray + The array in which the lower bound has to be found. + start: int + The staring index of the portion of the array in which the upper bound + of a given value has to be looked for. + Optional, by default 0 + end: int, optional + The ending index of the portion of the array in which the upper bound + of a given value has to be looked for. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for specifying the desired ordering. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + index: int + Index of the lower bound of the given value in the given OneDimensionalArray + + Examples + ======== + + >>> from pydatastructs import lower_bound, OneDimensionalArray as ODA + >>> arr1 = ODA(int, [4, 5, 5, 6, 7]) + >>> lb = lower_bound(arr1, 5, end=4, comp=lambda x, y : x < y) + >>> lb + 1 + >>> arr = ODA(int, [7, 6, 5, 5, 4]) + >>> lb = lower_bound(arr, 5, start=0, comp=lambda x, y : x > y) + >>> lb + 2 + + Note + ==== + + DynamicOneDimensionalArray objects may not work as expected. + """ + raise_if_backend_is_not_python( + lower_bound, kwargs.get('backend', Backend.PYTHON)) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array)) + comp = kwargs.get('comp', lambda x, y: x < y) + index = end + inclusive_end = end - 1 + if not comp(array[start], value): + index = start + while start <= inclusive_end: + mid = (start + inclusive_end)//2 + if comp(array[mid], value): + start = mid + 1 + else: + index = mid + inclusive_end = mid - 1 + return index + +def longest_increasing_subsequence(array, **kwargs): + """ + Returns the longest increasing subsequence (as a OneDimensionalArray) that + can be obtained from a given OneDimensionalArray. A subsequence + of an array is an ordered subset of the array's elements having the same + sequential ordering as the original array. Here, an increasing + sequence stands for a strictly increasing sequence of numbers. + + Parameters + ========== + + array: OneDimensionalArray + The given array in the form of a OneDimensionalArray + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: OneDimensionalArray + Returns the longest increasing subsequence that can be obtained + from the given array + + Examples + ======== + + >>> from pydatastructs import lower_bound, OneDimensionalArray as ODA + >>> from pydatastructs import longest_increasing_subsequence as LIS + >>> array = ODA(int, [2, 5, 3, 7, 11, 8, 10, 13, 6]) + >>> longest_inc_subsequence = LIS(array) + >>> str(longest_inc_subsequence) + '[2, 3, 7, 8, 10, 13]' + >>> array2 = ODA(int, [3, 4, -1, 5, 8, 2, 2 ,2, 3, 12, 7, 9, 10]) + >>> longest_inc_subsequence = LIS(array2) + >>> str(longest_inc_subsequence) + '[-1, 2, 3, 7, 9, 10]' + """ + raise_if_backend_is_not_python( + longest_increasing_subsequence, + kwargs.get('backend', Backend.PYTHON)) + n = len(array) + dp = OneDimensionalArray(int, n) + dp.fill(0) + parent = OneDimensionalArray(int, n) + parent.fill(-1) + length = 0 + for i in range(1, n): + if array[i] <= array[dp[0]]: + dp[0] = i + elif array[dp[length]] < array[i]: + length += 1 + dp[length] = i + parent[i] = dp[length - 1] + else: + curr_array = [array[dp[i]] for i in range(length)] + ceil = lower_bound(curr_array, array[i]) + dp[ceil] = i + parent[i] = dp[ceil - 1] + ans = DynamicOneDimensionalArray(int, 0) + last_index = dp[length] + while last_index != -1: + ans.append(array[last_index]) + last_index = parent[last_index] + n = ans._last_pos_filled + 1 + ans_ODA = OneDimensionalArray(int, n) + for i in range(n): + ans_ODA[n-1-i] = ans[i] + return ans_ODA + +def _permutation_util(array, start, end, comp, perm_comp): + size = end - start + 1 + permute = OneDimensionalArray(int, size) + for i, j in zip(range(start, end + 1), range(size)): + permute[j] = array[i] + i = size - 1 + while i > 0 and perm_comp(permute[i - 1], permute[i], comp): + i -= 1 + if i > 0: + left, right = i, size - 1 + while left <= right: + mid = left + (right - left) // 2 + if not perm_comp(permute[i - 1], permute[mid], comp): + left = mid + 1 + else: + right = mid - 1 + permute[i - 1], permute[left - 1] = \ + permute[left - 1], permute[i - 1] + left, right = i, size - 1 + while left < right: + permute[left], permute[right] = permute[right], permute[left] + left += 1 + right -= 1 + result = True if i > 0 else False + return result, permute + +def next_permutation(array, **kwargs): + """ + If the function can determine the next higher permutation, it + returns `True` and the permutation in a new array. + If that is not possible, because it is already at the largest possible + permutation, it returns the elements according to the first permutation + and returns `False` and the permutation in a new array. + + Parameters + ========== + + array: OneDimensionalArray + The array which is to be used for finding next permutation. + start: int + The staring index of the considered portion of the array. + Optional, by default 0 + end: int, optional + The ending index of the considered portion of the array. + Optional, by default the index of the last position filled. + comp: lambda/function + The comparator which is to be used for specifying the + desired lexicographical ordering. + Optional, by default, less than is + used for comparing two values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + + Returns + ======= + + output: bool, OneDimensionalArray + First element is `True` if the function can rearrange + the given portion of the input array as a lexicographically + greater permutation, otherwise returns `False`. + Second element is an array having the next permutation. + + + Examples + ======== + + >>> from pydatastructs import next_permutation, OneDimensionalArray as ODA + >>> array = ODA(int, [1, 2, 3, 4]) + >>> is_greater, next_permute = next_permutation(array) + >>> is_greater, str(next_permute) + (True, '[1, 2, 4, 3]') + >>> array = ODA(int, [3, 2, 1]) + >>> is_greater, next_permute = next_permutation(array) + >>> is_greater, str(next_permute) + (False, '[1, 2, 3]') + + References + ========== + + .. [1] http://www.cplusplus.com/reference/algorithm/next_permutation/ + """ + raise_if_backend_is_not_python( + next_permutation, kwargs.get('backend', Backend.PYTHON)) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get('comp', lambda x, y: x < y) + + def _next_permutation_comp(x, y, _comp): + if _comp(x, y): + return False + else: + return True + + return _permutation_util(array, start, end, comp, + _next_permutation_comp) + +def prev_permutation(array, **kwargs): + """ + If the function can determine the next lower permutation, it + returns `True` and the permutation in a new array. + If that is not possible, because it is already at the lowest possible + permutation, it returns the elements according to the last permutation + and returns `False` and the permutation in a new array. + + Parameters + ========== + + array: OneDimensionalArray + The array which is to be used for finding next permutation. + start: int + The staring index of the considered portion of the array. + Optional, by default 0 + end: int, optional + The ending index of the considered portion of the array. + Optional, by default the index of the last position filled. + comp: lambda/function + The comparator which is to be used for specifying the + desired lexicographical ordering. + Optional, by default, less than is + used for comparing two values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + + Returns + ======= + + output: bool, OneDimensionalArray + First element is `True` if the function can rearrange + the given portion of the input array as a lexicographically + smaller permutation, otherwise returns `False`. + Second element is an array having the previous permutation. + + + Examples + ======== + + >>> from pydatastructs import prev_permutation, OneDimensionalArray as ODA + >>> array = ODA(int, [1, 2, 4, 3]) + >>> is_lower, prev_permute = prev_permutation(array) + >>> is_lower, str(prev_permute) + (True, '[1, 2, 3, 4]') + >>> array = ODA(int, [1, 2, 3, 4]) + >>> is_lower, prev_permute = prev_permutation(array) + >>> is_lower, str(prev_permute) + (False, '[4, 3, 2, 1]') + + References + ========== + + .. [1] http://www.cplusplus.com/reference/algorithm/prev_permutation/ + """ + raise_if_backend_is_not_python( + prev_permutation, kwargs.get('backend', Backend.PYTHON)) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get('comp', lambda x, y: x < y) + + def _prev_permutation_comp(x, y, _comp): + if _comp(x, y): + return True + else: + return False + + return _permutation_util(array, start, end, comp, + _prev_permutation_comp) + +def bubble_sort(array, **kwargs): + """ + Implements bubble sort algorithm. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, bubble_sort + >>> arr = OneDimensionalArray(int,[3, 2, 1]) + >>> out = bubble_sort(arr) + >>> str(out) + '[1, 2, 3]' + >>> out = bubble_sort(arr, comp=lambda u, v: u > v) + >>> str(out) + '[3, 2, 1]' + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Bubble_sort + """ + backend = kwargs.pop("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _algorithms.bubble_sort(array, **kwargs) + if backend == Backend.LLVM: + return _algorithms.bubble_sort_llvm(array, **kwargs) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get("comp", lambda u, v: u <= v) + arr_len = len(array) + for i in range(arr_len - 1): + for j in range(start , end): + if not _comp(array[j], array[j + 1], comp): + array[j], array[j + 1] = array[j + 1], array[j] + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + + return array + +def selection_sort(array, **kwargs): + """ + Implements selection sort algorithm. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, selection_sort + >>> arr = OneDimensionalArray(int,[3, 2, 1]) + >>> out = selection_sort(arr) + >>> str(out) + '[1, 2, 3]' + >>> out = selection_sort(arr, comp=lambda u, v: u > v) + >>> str(out) + '[3, 2, 1]' + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Selection_sort + """ + backend = kwargs.pop("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _algorithms.bubble_sort(array, **kwargs) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get('comp', lambda u, v: u <= v) + + for i in range(start, end + 1): + jMin = i + for j in range(i + 1, end + 1): + if not _comp(array[jMin], array[j], comp): + jMin = j + if jMin != i: + array[i], array[jMin] = array[jMin], array[i] + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + + return array + +def insertion_sort(array, **kwargs): + """ + Implements insertion sort algorithm. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, insertion_sort + >>> arr = OneDimensionalArray(int,[3, 2, 1]) + >>> out = insertion_sort(arr) + >>> str(out) + '[1, 2, 3]' + >>> out = insertion_sort(arr, comp=lambda u, v: u > v) + >>> str(out) + '[3, 2, 1]' + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Insertion_sort + """ + backend = kwargs.pop("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _algorithms.insertion_sort(array, **kwargs) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get('comp', lambda u, v: u <= v) + + for i in range(start + 1, end + 1): + temp = array[i] + j = i + while j > start and not _comp(array[j - 1], temp, comp): + array[j] = array[j - 1] + j -= 1 + array[j] = temp + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + + return array + +def linear_search(array, value, **kwargs): + """ + Implements linear search algorithm. + + Parameters + ========== + + array: OneDimensionalArray + The array which is to be searched. + value: + The value which is to be searched + inside the array. + start: int + The starting index of the portion + which is to be searched. + Optional, by default 0 + end: int + The ending index of the portion which + is to be searched. + Optional, by default the index + of the last position filled. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: int + The index of value if found. + If not found, returns None. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, linear_search + >>> arr = OneDimensionalArray(int,[3, 2, 1]) + >>> linear_search(arr, 2) + 1 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Linear_search + """ + backend = kwargs.pop("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _algorithms.linear_search(array, value, **kwargs) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + + for i in range(start, end + 1): + if array[i] == value: + return i + + return None + +def binary_search(array, value, **kwargs): + """ + Implements binary search algorithm. + + Parameters + ========== + + array: OneDimensionalArray + The array which is to be searched. + value: + The value which is to be searched + inside the array. + start: int + The starting index of the portion + which is to be searched. + Optional, by default 0 + end: int + The ending index of the portion which + is to be searched. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for performing comparisons. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: int + The index of elem if found. + If not found, returns None. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, binary_search + >>> arr = OneDimensionalArray(int,[1, 2, 3, 5, 10, 12]) + >>> binary_search(arr, 5) + 3 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Binary_search_algorithm + + Note + ==== + + This algorithm assumes that the portion of the array + to be searched is already sorted. + """ + backend = kwargs.pop("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _algorithms.binary_search(array, value, **kwargs) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get("comp", lambda u, v: u <= v) + + left = start + right = end + while left <= right: + middle = left//2 + right//2 + left % 2 * right % 2 + if array[middle] == value: + return middle + if comp(array[middle], value): + left = middle + 1 + else: + right = middle - 1 + + return None + +def jump_search(array, value, **kwargs): + """ + Implements jump search algorithm. + + Parameters + ========== + + array: OneDimensionalArray + The array which is to be searched. + value: + The value which is to be searched + inside the array. + start: int + The starting index of the portion + which is to be searched. + Optional, by default 0 + end: int + The ending index of the portion which + is to be searched. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for performing comparisons. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: int + The index of elem if found. + If not found, returns None. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, jump_search + >>> arr = OneDimensionalArray(int,[1, 2, 3, 5, 10, 12]) + >>> linear_search(arr, 5) + 3 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Jump_search + + Note + ==== + + This algorithm assumes that the portion of the array + to be searched is already sorted. + """ + backend = kwargs.pop("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _algorithms.jump_search(array, value, **kwargs) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get("comp", lambda u, v: u < v) + + step = int(sqrt(end - start + 1)) + current_position = step + prev = start + while comp(array[min(current_position, end)], value): + prev = current_position + current_position += step + if prev > end: + return None + while prev <= min(current_position, end): + if array[prev] == value: + return prev + prev += 1 + + return None + +def intro_sort(array, **kwargs) -> Array: + """ + Performs intro sort on the given array. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + maxdepth: Enables the user to define the maximum + recursion depth, takes value 2*log(length(A)) + by default (ref: Wikipedia[1]). + ins_threshold: Threshold under which insertion + sort has to be performed, default value is + 16 (ref: Wikipedia[1]). + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray as ODA, intro_sort + >>> arr = ODA(int, [5, 78, 1, 0]) + >>> out = intro_sort(arr) + >>> str(out) + '[0, 1, 5, 78]' + >>> arr = ODA(int, [21, 37, 5]) + >>> out = intro_sort(arr) + >>> str(out) + '[5, 21, 37]' + + Note + ==== + + This function does not support custom comparators as + is the case with other sorting functions in this file. + This is because of heapsort's limitation. + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Introsort + """ + raise_if_backend_is_not_python( + intro_sort, kwargs.get('backend', Backend.PYTHON)) + + # Always sorts in increasing order, this is because of + # heapsort's limitation + comp = lambda u, v: u <= v + lower = kwargs.get('start', 0) + upper = kwargs.get('end', len(array) - 1) + n = upper - lower + 1 + if n <= 0: + maxdepth = 0 + else: + maxdepth = kwargs.get("maxdepth", int(2 * (log(n)/log(2)))) + + ins_threshold = kwargs.get("ins_threshold", 16) + + def partition(array, lower, upper): + pivot = array[lower] + left = lower + 1 + right = upper + done = False + while not done: + while left <= right and _comp(array[left], pivot, comp): + left += 1 + while _comp(pivot, array[right], comp) and right >= left: + right -= 1 + if right < left: + done = True + else: + array[left], array[right] = array[right], array[left] + left+=1 + right-=1 + + array[lower], array[right] = array[right], array[lower] + return right + + if n < ins_threshold: + return insertion_sort(array, start=lower, end=upper) + elif maxdepth == 0: + heapsort(array, start=lower, end=upper) + return array + else: + p = partition(array, lower, upper) + + intro_sort(array, start=lower, end=p-1, maxdepth=maxdepth-1, ins_threshold=ins_threshold) + intro_sort(array, start=p+1, end=upper, maxdepth=maxdepth-1, ins_threshold=ins_threshold) + + return array + +def shell_sort(array, *args, **kwargs): + """ + Implements shell sort algorithm. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs.linear_data_structures.algorithms import OneDimensionalArray, shell_sort + >>> arr = OneDimensionalArray(int, [3, 2, 1]) + >>> out = shell_sort(arr) + >>> str(out) + '[1, 2, 3]' + >>> out = shell_sort(arr, comp=lambda u, v: u > v) + >>> str(out) + '[3, 2, 1]' + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Shellsort + """ + start = int(kwargs.get('start', 0)) + end = int(kwargs.get('end', len(array) - 1)) + comp = kwargs.get('comp', lambda u, v: u <= v) + + n = end - start + 1 + gap = n // 2 + while gap > 0: + for i in range(start + gap, end + 1): + temp = array[i] + j = i + while j >= start + gap and not _comp(array[j - gap], temp, comp): + array[j] = array[j - gap] + j -= gap + array[j] = temp + gap //= 2 + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + + return array + +def radix_sort(array, *args, **kwargs): + """ + Implements radix sort algorithm for non-negative integers. + + Parameters + ========== + + array: Array + The array which is to be sorted. Must contain non-negative integers. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs.linear_data_structures.algorithms import OneDimensionalArray, radix_sort + >>> arr = OneDimensionalArray(int, [170, 45, 75, 90, 802, 24, 2, 66]) + >>> out = radix_sort(arr) + >>> str(out) + '[2, 24, 45, 66, 75, 90, 170, 802]' + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Radix_sort + """ + start = int(kwargs.get('start', 0)) + end = int(kwargs.get('end', len(array) - 1)) + + n = end - start + 1 + max_val = array[start] + for i in range(start + 1, end + 1): + if array[i] is not None and array[i] > max_val: + max_val = array[i] + exp = 1 + while max_val // exp > 0: + count = [0] * 10 + output = [None] * n + + for i in range(start, end + 1): + if array[i] is not None: + digit = (array[i] // exp) % 10 + count[digit] += 1 + + for i in range(1, 10): + count[i] += count[i - 1] + + for i in range(end, start - 1, -1): + if array[i] is not None: + digit = (array[i] // exp) % 10 + count[digit] -= 1 + output[count[digit]] = array[i] + + for i in range(n): + array[start + i] = output[i] + + exp *= 10 + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + + return array diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/arrays.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/arrays.py new file mode 100644 index 000000000..2e0c3fd97 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/arrays.py @@ -0,0 +1,473 @@ +from pydatastructs.utils.misc_util import ( + _check_type, NoneType, Backend, + raise_if_backend_is_not_python) +from pydatastructs.linear_data_structures._backend.cpp import _arrays + +__all__ = [ + 'OneDimensionalArray', + 'MultiDimensionalArray', + 'DynamicOneDimensionalArray' +] + +class Array(object): + """ + Abstract class for arrays in pydatastructs. + """ + def __str__(self) -> str: + return str(self._data) + +class OneDimensionalArray(Array): + """ + Represents one dimensional static arrays of + fixed size. + + Parameters + ========== + + dtype: type + A valid object type. + size: int + The number of elements in the array. + elements: list + The elements in the array, all should + be of same type. + init: a python type + The initial value with which the element has + to be initialized. By default none, used only + when the data is not given. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Raises + ====== + + ValueError + When the number of elements in the list do not + match with the size. + More than three parameters are passed as arguments. + Types of arguments is not as mentioned in the docstring. + + Note + ==== + + At least one parameter should be passed as an argument along + with the dtype. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray + >>> arr = OneDimensionalArray(int, 5) + >>> arr.fill(6) + >>> arr[0] + 6 + >>> arr[0] = 7.2 + >>> arr[0] + 7 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Array_data_structure#One-dimensional_arrays + """ + + __slots__ = ['_size', '_data', '_dtype'] + + def __new__(cls, dtype=NoneType, *args, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + return _arrays.OneDimensionalArray(dtype, *args, **kwargs) + if dtype is NoneType: + raise ValueError("Data type is not defined.") + if len(args) not in (1, 2): + raise ValueError("Too few arguments to create a 1D array," + " pass either size of the array" + " or list of elements or both.") + obj = Array.__new__(cls) + obj._dtype = dtype + if len(args) == 2: + if _check_type(args[0], list) and \ + _check_type(args[1], int): + for i in range(len(args[0])): + if _check_type(args[0][i], dtype) is False: + args[0][i] = dtype(args[0][i]) + size, data = args[1], list(args[0]) + elif _check_type(args[1], list) and \ + _check_type(args[0], int): + for i in range(len(args[1])): + if _check_type(args[1][i], dtype) is False: + args[1][i] = dtype(args[1][i]) + size, data = args[0], list(args[1]) + else: + raise TypeError("Expected type of size is int and " + "expected type of data is list/tuple.") + if size != len(data): + raise ValueError("Conflict in the size, %s and length of data, %s" + %(size, len(data))) + obj._size, obj._data = size, data + + elif len(args) == 1: + if _check_type(args[0], int): + obj._size = args[0] + init = kwargs.get('init', None) + obj._data = [init for i in range(args[0])] + elif _check_type(args[0], (list, tuple)): + for i in range(len(args[0])): + if _check_type(args[0][i], dtype) is False: + args[0][i] = dtype(args[0][i]) + obj._size, obj._data = len(args[0]), \ + list(args[0]) + else: + raise TypeError("Expected type of size is int and " + "expected type of data is list/tuple.") + + return obj + + @classmethod + def methods(cls): + return ['__new__', '__getitem__', + '__setitem__', 'fill', '__len__'] + + def __getitem__(self, i): + if i >= self._size or i < 0: + raise IndexError(("Index, {} out of range, " + "[{}, {}).".format(i, 0, self._size))) + return self._data.__getitem__(i) + + def __setitem__(self, idx, elem): + if elem is None: + self._data[idx] = None + else: + if _check_type(elem, self._dtype) is False: + elem = self._dtype(elem) + self._data[idx] = elem + + def fill(self, elem): + elem = self._dtype(elem) + for i in range(self._size): + self._data[i] = elem + + def __len__(self): + return self._size + +class MultiDimensionalArray(Array): + """ + Represents a multi-dimensional array. + + Parameters + ========== + + dtype: type + A valid object type. + *args: int + The dimensions of the array. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Raises + ====== + + IndexError + Index goes out of boundaries, or + the number of index given is not + the same as the number of dimensions. + ValueError + When there's no dimensions or the + dimension size is 0. + + Examples + ======== + + >>> from pydatastructs import MultiDimensionalArray as MDA + >>> arr = MDA(int, 5, 6, 9) + >>> arr.fill(32) + >>> arr[3, 0, 0] + 32 + >>> arr[3, 0, 0] = 7 + >>> arr[3, 0, 0] + 7 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Array_data_structure#Multidimensional_arrays + + """ + __slots__ = ['_sizes', '_data', '_dtype'] + + def __new__(cls, dtype: type = NoneType, *args, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + if dtype is NoneType: + raise ValueError("Data type is not defined.") + elif not args: + raise ValueError("Too few arguments to create a " + "multi dimensional array, pass dimensions.") + if len(args) == 1: + obj = Array.__new__(cls) + obj._dtype = dtype + obj._sizes = (args[0], 1) + obj._data = [None] * args[0] + return obj + + dimensions = args + for dimension in dimensions: + if dimension < 1: + raise ValueError("Size of dimension cannot be less than 1") + n_dimensions = len(dimensions) + d_sizes = [] + index = 0 + while n_dimensions > 1: + size = dimensions[index] + for i in range(index+1, len(dimensions)): + size = size * dimensions[i] + d_sizes.append(size) + n_dimensions -= 1 + index += 1 + d_sizes.append(dimensions[index]) + d_sizes.append(1) + obj = Array.__new__(cls) + obj._dtype = dtype + obj._sizes = tuple(d_sizes) + obj._data = [None] * obj._sizes[1] * dimensions[0] + return obj + + @classmethod + def methods(cls) -> list: + return ['__new__', '__getitem__', '__setitem__', 'fill', 'shape'] + + def __getitem__(self, indices): + self._compare_shape(indices) + if isinstance(indices, int): + return self._data[indices] + position = 0 + for i in range(0, len(indices)): + position += self._sizes[i + 1] * indices[i] + return self._data[position] + + def __setitem__(self, indices, element) -> None: + self._compare_shape(indices) + if isinstance(indices, int): + self._data[indices] = element + else: + position = 0 + for i in range(0, len(indices)): + position += self._sizes[i + 1] * indices[i] + self._data[position] = element + + def _compare_shape(self, indices) -> None: + indices = [indices] if isinstance(indices, int) else indices + if len(indices) != len(self._sizes) - 1: + raise IndexError("Shape mismatch, current shape is %s" % str(self.shape)) + if any(indices[i] >= self._sizes[i] for i in range(len(indices))): + raise IndexError("Index out of range.") + + def fill(self, element) -> None: + element = self._dtype(element) + for i in range(len(self._data)): + self._data[i] = element + + @property + def shape(self) -> tuple: + shape = [] + size = len(self._sizes) + for i in range(1, size): + shape.append(self._sizes[i-1]//self._sizes[i]) + return tuple(shape) + +class DynamicArray(Array): + """ + Abstract class for dynamic arrays. + """ + pass + +class DynamicOneDimensionalArray(DynamicArray, OneDimensionalArray): + """ + Represents resizable and dynamic one + dimensional arrays. + + Parameters + ========== + + dtype: type + A valid object type. + size: int + The number of elements in the array. + elements: list/tuple + The elements in the array, all should + be of same type. + init: a python type + The inital value with which the element has + to be initialized. By default none, used only + when the data is not given. + load_factor: float, by default 0.25 + The number below which if the ratio, Num(T)/Size(T) + falls then the array is contracted such that at + most only half the positions are filled. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Raises + ====== + + ValueError + When the number of elements in the list do not + match with the size. + More than three parameters are passed as arguments. + Types of arguments is not as mentioned in the docstring. + The load factor is not of floating point type. + + Note + ==== + + At least one parameter should be passed as an argument along + with the dtype. + Num(T) means the number of positions which are not None in the + array. + Size(T) means the maximum number of elements that the array can hold. + + Examples + ======== + + >>> from pydatastructs import DynamicOneDimensionalArray as DODA + >>> arr = DODA(int, 0) + >>> arr.append(1) + >>> arr.append(2) + >>> arr[0] + 1 + >>> arr.delete(0) + >>> arr[0] + >>> arr[1] + 2 + >>> arr.append(3) + >>> arr.append(4) + >>> [arr[i] for i in range(arr.size)] + [None, 2, 3, 4, None, None, None] + + References + ========== + + .. [1] http://www.cs.nthu.edu.tw/~wkhon/algo09/lectures/lecture16.pdf + """ + + __slots__ = ['_load_factor', '_num', '_last_pos_filled', '_size'] + + def __new__(cls, dtype=NoneType, *args, **kwargs): + backend = kwargs.get("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _arrays.DynamicOneDimensionalArray(dtype, *args, **kwargs) + obj = super().__new__(cls, dtype, *args, **kwargs) + obj._load_factor = float(kwargs.get('load_factor', 0.25)) + obj._num = 0 if obj._size == 0 or obj[0] is None else obj._size + obj._last_pos_filled = obj._num - 1 + return obj + + @classmethod + def methods(cls): + return ['__new__', '_modify', + 'append', 'delete', 'size', + '__str__', '__reversed__'] + + def _modify(self, force=False): + """ + Contracts the array if Num(T)/Size(T) falls + below load factor. + """ + if force: + i = -1 + while self._data[i] is None: + i -= 1 + self._last_pos_filled = i%self._size + if (self._num/self._size < self._load_factor): + arr_new = OneDimensionalArray(self._dtype, 2*self._num + 1) + j = 0 + for i in range(self._last_pos_filled + 1): + if self._data[i] is not None: + arr_new[j] = self[i] + j += 1 + self._last_pos_filled = j - 1 + self._data = arr_new._data + self._size = arr_new._size + + def append(self, el): + if self._last_pos_filled + 1 == self._size: + arr_new = OneDimensionalArray(self._dtype, 2*self._size + 1) + for i in range(self._last_pos_filled + 1): + arr_new[i] = self[i] + arr_new[self._last_pos_filled + 1] = el + self._size = arr_new._size + self._data = arr_new._data + else: + self[self._last_pos_filled + 1] = el + self._last_pos_filled += 1 + self._num += 1 + self._modify() + + def delete(self, idx): + if idx <= self._last_pos_filled and idx >= 0 and \ + self[idx] is not None: + self[idx] = None + self._num -= 1 + if self._last_pos_filled == idx: + self._last_pos_filled -= 1 + return self._modify() + + @property + def size(self): + return self._size + + def __str__(self): + to_be_printed = ['' for _ in range(self._last_pos_filled + 1)] + for i in range(self._last_pos_filled + 1): + if self._data[i] is not None: + to_be_printed[i] = str(self._data[i]) + return str(to_be_printed) + + def __reversed__(self): + for i in range(self._last_pos_filled, -1, -1): + yield self._data[i] + +class ArrayForTrees(DynamicOneDimensionalArray): + """ + Utility dynamic array for storing nodes of a tree. + + Parameters + ========== + + backend: pydatastructs.Backend + The backend to be used. Available backends: Python and C++ + Optional, by default, the Python backend is used. For faster execution, use the C++ backend. + + See Also + ======== + + pydatastructs.linear_data_structures.arrays.DynamicOneDimensionalArray + """ + def _modify(self): + if self._num/self._size < self._load_factor: + new_indices = {} + arr_new = OneDimensionalArray(self._dtype, 2*self._num + 1) + j = 0 + for i in range(self._last_pos_filled + 1): + if self[i] is not None: + arr_new[j] = self[i] + new_indices[self[i].key] = j + j += 1 + for i in range(j): + if arr_new[i].left is not None: + arr_new[i].left = new_indices[self[arr_new[i].left].key] + if arr_new[i].right is not None: + arr_new[i].right = new_indices[self[arr_new[i].right].key] + if arr_new[i].parent is not None: + arr_new[i].parent = new_indices[self[arr_new[i].parent].key] + self._last_pos_filled = j - 1 + self._data = arr_new._data + self._size = arr_new._size + return new_indices + return None diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/linked_lists.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/linked_lists.py new file mode 100644 index 000000000..09178daf1 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/linked_lists.py @@ -0,0 +1,819 @@ +import math, random +from pydatastructs.utils.misc_util import _check_type, LinkedListNode, SkipNode +from pydatastructs.utils.misc_util import ( + Backend, raise_if_backend_is_not_python) + +__all__ = [ + 'SinglyLinkedList', + 'DoublyLinkedList', + 'SinglyCircularLinkedList', + 'DoublyCircularLinkedList', + 'SkipList' +] + +class LinkedList(object): + """ + Abstract class for Linked List. + """ + __slots__ = ['head', 'size'] + + def __len__(self): + return self.size + + @property + def is_empty(self): + return self.size == 0 + + def search(self, key): + curr_node = self.head + while curr_node is not None: + if curr_node.key == key: + return curr_node + curr_node = curr_node.next + if curr_node is self.head: + return None + return None + + def __str__(self): + """ + For printing the linked list. + """ + elements = [] + current_node = self.head + while current_node is not None: + elements.append(str(current_node)) + current_node = current_node.next + if current_node == self.head: + break + return str(elements) + + def insert_after(self, prev_node, key, data=None): + """ + Inserts a new node after the prev_node. + + Parameters + ========== + + prev_node: LinkedListNode + The node after which the + new node is to be inserted. + + key + Any valid identifier to uniquely + identify the node in the linked list. + + data + Any valid data to be stored in the node. + """ + raise NotImplementedError('This is an abstract method') + + def insert_at(self, index, key, data=None): + """ + Inserts a new node at the input index. + + Parameters + ========== + + index: int + An integer satisfying python indexing properties. + + key + Any valid identifier to uniquely + identify the node in the linked list. + + data + Any valid data to be stored in the node. + """ + raise NotImplementedError('This is an abstract method') + + def extract(self, index): + """ + Extracts the node at the index of the list. + + Parameters + ========== + + index: int + An integer satisfying python indexing properties. + + Returns + ======= + + current_node: LinkedListNode + The node at index i. + """ + raise NotImplementedError('This is an abstract method') + + def __getitem__(self, index): + """ + Returns + ======= + + current_node: LinkedListNode + The node at given index. + """ + if index < 0: + index = self.size + index + + if index >= self.size: + raise IndexError('%d index is out of range.'%(index)) + + counter = 0 + current_node = self.head + while counter != index: + current_node = current_node.next + counter += 1 + return current_node + + def appendleft(self, key, data=None): + """ + Pushes a new node at the start i.e., + the left of the list. + + Parameters + ========== + + key + Any valid identifier to uniquely + identify the node in the linked list. + + data + Any valid data to be stored in the node. + """ + self.insert_at(0, key, data) + + def append(self, key, data=None): + """ + Appends a new node at the end of the list. + + Parameters + ========== + + key + Any valid identifier to uniquely + identify the node in the linked list. + + data + Any valid data to be stored in the node. + """ + self.insert_at(self.size, key, data) + + def insert_before(self, next_node, key, data=None): + """ + Inserts a new node before the next_node. + + Parameters + ========== + + next_node: LinkedListNode + The node before which the + new node is to be inserted. + + key + Any valid identifier to uniquely + identify the node in the linked list. + + data + Any valid data to be stored in the node. + """ + raise NotImplementedError('This is an abstract method') + + def popleft(self): + """ + Extracts the Node from the left + i.e. start of the list. + + Returns + ======= + + old_head: LinkedListNode + The leftmost element of linked + list. + """ + return self.extract(0) + + def popright(self): + """ + Extracts the node from the right + of the linked list. + + Returns + ======= + + old_tail: LinkedListNode + The leftmost element of linked + list. + """ + return self.extract(-1) + +class DoublyLinkedList(LinkedList): + """ + Represents Doubly Linked List + + Parameters + ========== + + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import DoublyLinkedList + >>> dll = DoublyLinkedList() + >>> dll.append(6) + >>> dll[0].key + 6 + >>> dll.head.key + 6 + >>> dll.append(5) + >>> dll.appendleft(2) + >>> str(dll) + "['(2, None)', '(6, None)', '(5, None)']" + >>> dll[0].key = 7.2 + >>> dll.extract(1).key + 6 + >>> str(dll) + "['(7.2, None)', '(5, None)']" + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Doubly_linked_list + + """ + __slots__ = ['head', 'tail', 'size'] + + def __new__(cls, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = LinkedList.__new__(cls) + obj.head = None + obj.tail = None + obj.size = 0 + return obj + + @classmethod + def methods(cls): + return ['__new__', 'insert_after', + 'insert_before', 'insert_at', 'extract'] + + def insert_after(self, prev_node, key, data=None): + self.size += 1 + new_node = LinkedListNode(key, data, + links=['next', 'prev'], + addrs=[None, None]) + new_node.next = prev_node.next + if new_node.next is not None: + new_node.next.prev = new_node + prev_node.next = new_node + new_node.prev = prev_node + + if new_node.next is None: + self.tail = new_node + + def insert_before(self, next_node, key, data=None): + self.size += 1 + new_node = LinkedListNode(key, data, + links=['next', 'prev'], + addrs=[None, None]) + new_node.prev = next_node.prev + next_node.prev = new_node + new_node.next = next_node + if new_node.prev is not None: + new_node.prev.next = new_node + else: + self.head = new_node + + def insert_at(self, index, key, data=None): + if self.size == 0 and (index in (0, -1)): + index = 0 + + if index < 0: + index = self.size + index + + if index > self.size: + raise IndexError('%d index is out of range.'%(index)) + + self.size += 1 + new_node = LinkedListNode(key, data, + links=['next', 'prev'], + addrs=[None, None]) + if self.size == 1: + self.head, self.tail = \ + new_node, new_node + elif index == self.size - 1: + new_node.prev = self.tail + new_node.next = self.tail.next + self.tail.next = new_node + self.tail = new_node + else: + counter = 0 + current_node = self.head + prev_node = None + while counter != index: + prev_node = current_node + current_node = current_node.next + counter += 1 + new_node.prev = prev_node + new_node.next = current_node + if prev_node is not None: + prev_node.next = new_node + if current_node is not None: + current_node.prev = new_node + if new_node.next is None: + self.tail = new_node + if new_node.prev is None: + self.head = new_node + + def extract(self, index): + if self.is_empty: + raise ValueError("The list is empty.") + + if index < 0: + index = self.size + index + + if index >= self.size: + raise IndexError('%d is out of range.'%(index)) + + self.size -= 1 + counter = 0 + current_node = self.head + prev_node = None + while counter != index: + prev_node = current_node + current_node = current_node.next + counter += 1 + if prev_node is not None: + prev_node.next = current_node.next + if current_node.next is not None: + current_node.next.prev = prev_node + if index == 0: + self.head = current_node.next + if index == self.size: + self.tail = current_node.prev + return current_node + +class SinglyLinkedList(LinkedList): + """ + Represents Singly Linked List + + Parameters + ========== + + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import SinglyLinkedList + >>> sll = SinglyLinkedList() + >>> sll.append(6) + >>> sll[0].key + 6 + >>> sll.head.key + 6 + >>> sll.append(5) + >>> sll.appendleft(2) + >>> str(sll) + "['(2, None)', '(6, None)', '(5, None)']" + >>> sll[0].key = 7.2 + >>> sll.extract(1).key + 6 + >>> str(sll) + "['(7.2, None)', '(5, None)']" + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Singly_linked_list + + """ + __slots__ = ['head', 'tail', 'size'] + + def __new__(cls, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = LinkedList.__new__(cls) + obj.head = None + obj.tail = None + obj.size = 0 + return obj + + @classmethod + def methods(cls): + return ['insert_after', 'insert_at', + 'extract'] + + def insert_after(self, prev_node, key, data=None): + self.size += 1 + new_node = LinkedListNode(key, data, + links=['next'], + addrs=[None]) + new_node.next = prev_node.next + prev_node.next = new_node + + if new_node.next is None: + self.tail = new_node + + def insert_at(self, index, key, data=None): + if self.size == 0 and (index in (0, -1)): + index = 0 + + if index < 0: + index = self.size + index + + if index > self.size: + raise IndexError('%d index is out of range.'%(index)) + + self.size += 1 + new_node = LinkedListNode(key, data, + links=['next'], + addrs=[None]) + if self.size == 1: + self.head, self.tail = \ + new_node, new_node + elif index == self.size - 1: + new_node.next = self.tail.next + self.tail.next = new_node + self.tail = new_node + else: + counter = 0 + current_node = self.head + prev_node = None + while counter != index: + prev_node = current_node + current_node = current_node.next + counter += 1 + new_node.next = current_node + if prev_node is not None: + prev_node.next = new_node + if new_node.next is None: + self.tail = new_node + if index == 0: + self.head = new_node + + def extract(self, index): + if self.is_empty: + raise ValueError("The list is empty.") + + if index < 0: + index = self.size + index + + if index >= self.size: + raise IndexError('%d is out of range.'%(index)) + + self.size -= 1 + counter = 0 + current_node = self.head + prev_node = None + while counter != index: + prev_node = current_node + current_node = current_node.next + counter += 1 + if prev_node is not None: + prev_node.next = current_node.next + if index == 0: + self.head = current_node.next + if index == self.size: + self.tail = prev_node + return current_node + +class SinglyCircularLinkedList(SinglyLinkedList): + """ + Represents Singly Circular Linked List. + + Parameters + ========== + + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + + Examples + ======== + + >>> from pydatastructs import SinglyCircularLinkedList + >>> scll = SinglyCircularLinkedList() + >>> scll.append(6) + >>> scll[0].key + 6 + >>> scll.head.key + 6 + >>> scll.append(5) + >>> scll.appendleft(2) + >>> str(scll) + "['(2, None)', '(6, None)', '(5, None)']" + >>> scll[0].key = 7.2 + >>> scll.extract(1).key + 6 + >>> str(scll) + "['(7.2, None)', '(5, None)']" + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Linked_list#Circular_linked_list + + """ + + @classmethod + def methods(cls): + return ['insert_after', 'insert_at', 'extract'] + + def insert_after(self, prev_node, key, data=None): + super(SinglyCircularLinkedList, self).\ + insert_after(prev_node, key, data) + if prev_node.next.next == self.head: + self.tail = prev_node.next + + def insert_at(self, index, key, data=None): + super(SinglyCircularLinkedList, self).insert_at(index, key, data) + if self.size == 1: + self.head.next = self.head + new_node = self.__getitem__(index) + if index == 0: + self.tail.next = new_node + if new_node.next == self.head: + self.tail = new_node + + def extract(self, index): + node = super(SinglyCircularLinkedList, self).extract(index) + if self.tail is None: + self.head = None + elif index == 0: + self.tail.next = self.head + return node + +class DoublyCircularLinkedList(DoublyLinkedList): + """ + Represents Doubly Circular Linked List + + Parameters + ========== + + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import DoublyCircularLinkedList + >>> dcll = DoublyCircularLinkedList() + >>> dcll.append(6) + >>> dcll[0].key + 6 + >>> dcll.head.key + 6 + >>> dcll.append(5) + >>> dcll.appendleft(2) + >>> str(dcll) + "['(2, None)', '(6, None)', '(5, None)']" + >>> dcll[0].key = 7.2 + >>> dcll.extract(1).key + 6 + >>> str(dcll) + "['(7.2, None)', '(5, None)']" + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Doubly_linked_list#Circular_doubly_linked_lists + + """ + + @classmethod + def methods(cls): + return ['insert_after', 'insert_before', + 'insert_at', 'extract'] + + def insert_after(self, prev_node, key, data=None): + super(DoublyCircularLinkedList, self)\ + .insert_after(prev_node, key, data) + if prev_node.next.next == self.head: + self.tail = prev_node.next + + def insert_before(self, next_node, key, data=None): + super(DoublyCircularLinkedList, self).\ + insert_before(next_node, key, data) + if next_node == self.head: + self.head = next_node.prev + + def insert_at(self, index, key, data=None): + super(DoublyCircularLinkedList, self).\ + insert_at(index, key, data) + if self.size == 1: + self.head.next = self.head + self.head.prev = self.head + new_node = self.__getitem__(index) + if index == 0: + self.tail.next = new_node + new_node.prev = self.tail + if new_node.next == self.head: + self.tail = new_node + new_node.next = self.head + self.head.prev = new_node + + def extract(self, index): + node = super(DoublyCircularLinkedList, self).extract(index) + if self.tail is None: + self.head = None + elif index == 0: + self.tail.next = self.head + return node + +class SkipList(object): + """ + Represents Skip List + + Parameters + ========== + + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import SkipList + >>> sl = SkipList() + >>> sl.insert(6) + >>> sl.insert(1) + >>> sl.insert(3) + >>> node = sl.extract(1) + >>> str(node) + '(1, None)' + >>> sl.insert(4) + >>> sl.insert(2) + >>> sl.search(4) + True + >>> sl.search(10) + False + + """ + + __slots__ = ['head', 'tail', '_levels', '_num_nodes', 'seed'] + + def __new__(cls, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.head, obj.tail = None, None + obj._num_nodes = 0 + obj._levels = 0 + obj._add_level() + return obj + + @classmethod + def methods(cls): + return ['__new__', 'levels', 'search', + 'extract', '__str__', 'size'] + + def _add_level(self): + self.tail = SkipNode(math.inf, next=None, down=self.tail) + self.head = SkipNode(-math.inf, next=self.tail, down=self.head) + self._levels += 1 + + @property + def levels(self): + """ + Returns the number of levels in the + current skip list. + """ + return self._levels + + def _search(self, key) -> list: + path = [] + node = self.head + while node: + if node.next.key >= key: + path.append(node) + node = node.down + else: + node = node.next + return path + + def search(self, key) -> bool: + return self._search(key)[-1].next.key == key + + def insert(self, key, data=None): + """ + Inserts a new node to the skip list. + + Parameters + ========== + + key + Any valid identifier to uniquely + identify the node in the linked list. + + data + Any valid data to be stored in the node. + """ + path = self._search(key) + tip = path[-1] + below = SkipNode(key=key, data=data, next=tip.next) + tip.next = below + total_level = self._levels + level = 1 + while random.getrandbits(1) % 2 == 0 and level <= total_level: + if level == total_level: + self._add_level() + prev = self.head + else: + prev = path[total_level - 1 - level] + below = SkipNode(key=key, data=None, next=prev.next, down=below) + prev.next = below + level += 1 + self._num_nodes += 1 + + @property + def size(self): + return self._num_nodes + + def extract(self, key): + """ + Extracts the node with the given key in the skip list. + + Parameters + ========== + + key + The key of the node under consideration. + + Returns + ======= + + return_node: SkipNode + The node with given key. + """ + path = self._search(key) + tip = path[-1] + if tip.next.key != key: + raise KeyError('Node with key %s is not there in %s'%(key, self)) + return_node = SkipNode(tip.next.key, tip.next.data) + total_level = self._levels + level = total_level - 1 + while level >= 0 and path[level].next.key == key: + path[level].next = path[level].next.next + level -= 1 + walk = self.head + while walk is not None: + if walk.next is self.tail: + self._levels -= 1 + self.head = walk.down + self.tail = self.tail.down + walk = walk.down + else: + break + self._num_nodes -= 1 + if self._levels == 0: + self._add_level() + return return_node + + def __str__(self): + node2row = {} + node2col = {} + walk = self.head + curr_level = self._levels - 1 + while walk is not None: + curr_node = walk + col = 0 + while curr_node is not None: + if curr_node.key != math.inf and curr_node.key != -math.inf: + node2row[curr_node] = curr_level + if walk.down is None: + node2col[curr_node.key] = col + col += 1 + curr_node = curr_node.next + walk = walk.down + curr_level -= 1 + sl_mat = [[str(None) for _ in range(self._num_nodes)] for _ in range(self._levels)] + walk = self.head + while walk is not None: + curr_node = walk + while curr_node is not None: + if curr_node in node2row: + row = node2row[curr_node] + col = node2col[curr_node.key] + sl_mat[row][col] = str(curr_node) + curr_node = curr_node.next + walk = walk.down + sl_str = "" + for level_list in sl_mat[::-1]: + for node_str in level_list: + sl_str += node_str + " " + if len(sl_str) > 0: + sl_str += "\n" + return sl_str diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_algorithms.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_algorithms.py new file mode 100644 index 000000000..3e287bb74 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_algorithms.py @@ -0,0 +1,423 @@ +from pydatastructs import ( + merge_sort_parallel, DynamicOneDimensionalArray, + OneDimensionalArray, brick_sort, brick_sort_parallel, + heapsort, matrix_multiply_parallel, counting_sort, bucket_sort, + cocktail_shaker_sort, quick_sort, longest_common_subsequence, is_ordered, + upper_bound, lower_bound, longest_increasing_subsequence, next_permutation, + prev_permutation, bubble_sort, linear_search, binary_search, jump_search, + selection_sort, insertion_sort, intro_sort, shell_sort, radix_sort, Backend) + +from pydatastructs.utils.raises_util import raises +import random + +def _test_common_sort(sort, *args, **kwargs): + random.seed(1000) + + n = random.randint(10, 20) + arr = DynamicOneDimensionalArray(int, 0) + generated_ints = [] + for _ in range(n): + integer = random.randint(1, 1000) + generated_ints.append(integer) + arr.append(integer) + for _ in range(n//3): + integer = random.randint(0, n//2) + generated_ints.append(integer) + arr.delete(integer) + expected_arr_1 = [686, 779, 102, 134, 362, 448, + 480, 548, None, None, None, + 228, 688, 247, 373, 696, None, + None, None, None, None, None, + None, None, None, None, None, + None, None, None, None] + sort(arr, *args, **kwargs, start=2, end=10) + assert arr._data == expected_arr_1 + sort(arr, *args, **kwargs) + expected_arr_2 = [102, 134, 228, 247, 362, 373, 448, + 480, 548, 686, 688, 696, 779, + None, None, None, None, None, None, + None, None, None, None, None, + None, None, None, None, None, None, None] + assert arr._data == expected_arr_2 + assert (arr._last_pos_filled, arr._num, arr._size) == (12, 13, 31) + + arr = DynamicOneDimensionalArray(int, 0, backend=Backend.CPP) + int_idx = 0 + for _ in range(n): + arr.append(generated_ints[int_idx]) + int_idx += 1 + for _ in range(n//3): + arr.delete(generated_ints[int_idx]) + int_idx += 1 + sort(arr, *args, **kwargs, start=2, end=10) + for i in range(len(expected_arr_1)): + assert arr[i] == expected_arr_1[i] + sort(arr, *args, **kwargs) + for i in range(len(expected_arr_2)): + assert arr[i] == expected_arr_2[i] + assert (arr._last_pos_filled, arr._num, arr.size) == (12, 13, 31) + + n = random.randint(10, 20) + arr = OneDimensionalArray(int, n) + generated_ints.clear() + for i in range(n): + integer = random.randint(1, 1000) + arr[i] = integer + generated_ints.append(integer) + expected_arr_3 = [42, 695, 147, 500, 768, + 998, 473, 732, 728, 426, + 709, 910] + sort(arr, *args, **kwargs, start=2, end=5) + assert arr._data == expected_arr_3 + + arr = OneDimensionalArray(int, n, backend=Backend.CPP) + int_idx = 0 + for i in range(n): + arr[i] = generated_ints[int_idx] + int_idx += 1 + sort(arr, *args, **kwargs, start=2, end=5) + for i in range(len(expected_arr_3)): + assert arr[i] == expected_arr_3[i] + +def test_merge_sort_parallel(): + _test_common_sort(merge_sort_parallel, num_threads=5) + +def test_brick_sort(): + _test_common_sort(brick_sort) + +def test_brick_sort_parallel(): + _test_common_sort(brick_sort_parallel, num_threads=3) + +def test_heapsort(): + _test_common_sort(heapsort) + +def test_bucket_sort(): + _test_common_sort(bucket_sort) + +def test_counting_sort(): + random.seed(1000) + + n = random.randint(10, 20) + arr = DynamicOneDimensionalArray(int, 0) + for _ in range(n): + arr.append(random.randint(1, 1000)) + for _ in range(n//3): + arr.delete(random.randint(0, n//2)) + + expected_arr = [102, 134, 228, 247, 362, 373, 448, + 480, 548, 686, 688, 696, 779] + assert counting_sort(arr)._data == expected_arr + +def test_cocktail_shaker_sort(): + _test_common_sort(cocktail_shaker_sort) + +def test_quick_sort(): + _test_common_sort(quick_sort) + _test_common_sort(quick_sort, backend=Backend.CPP) + +def test_intro_sort(): + _test_common_sort(intro_sort) + +def test_bubble_sort(): + _test_common_sort(bubble_sort) + _test_common_sort(bubble_sort, backend=Backend.CPP) + _test_common_sort(bubble_sort, backend=Backend.LLVM) + +def test_selection_sort(): + _test_common_sort(selection_sort) + _test_common_sort(selection_sort, backend=Backend.CPP) + +def test_insertion_sort(): + _test_common_sort(insertion_sort) + _test_common_sort(insertion_sort, backend=Backend.CPP) + +def test_matrix_multiply_parallel(): + ODA = OneDimensionalArray + + expected_result = [[3, 3, 3], [1, 2, 1], [2, 2, 2]] + + I = ODA(ODA, [ODA(int, [1, 1, 0]), ODA(int, [0, 1, 0]), ODA(int, [0, 0, 1])]) + J = ODA(ODA, [ODA(int, [2, 1, 2]), ODA(int, [1, 2, 1]), ODA(int, [2, 2, 2])]) + output = matrix_multiply_parallel(I, J, num_threads=5) + assert expected_result == output + + I = [[1, 1, 0], [0, 1, 0], [0, 0, 1]] + J = [[2, 1, 2], [1, 2, 1], [2, 2, 2]] + output = matrix_multiply_parallel(I, J, num_threads=5) + assert expected_result == output + + I = [[1, 1, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]] + J = [[2, 1, 2], [1, 2, 1], [2, 2, 2]] + assert raises(ValueError, lambda: matrix_multiply_parallel(I, J, num_threads=5)) + + I = [[1, 1, 0], [0, 1, 0], [0, 0, 1]] + J = [[2, 1, 2], [1, 2, 1], [2, 2, 2]] + output = matrix_multiply_parallel(I, J, num_threads=1) + assert expected_result == output + +def test_longest_common_sequence(): + ODA = OneDimensionalArray + expected_result = "['A', 'S', 'C', 'I', 'I']" + + str1 = ODA(str, ['A', 'A', 'S', 'C', 'C', 'I', 'I']) + str2 = ODA(str, ['A', 'S', 'S', 'C', 'I', 'I', 'I', 'I']) + output = longest_common_subsequence(str1, str2) + assert str(output) == expected_result + + expected_result = "['O', 'V', 'A']" + + I = ODA(str, ['O', 'V', 'A', 'L']) + J = ODA(str, ['F', 'O', 'R', 'V', 'A', 'E', 'W']) + output = longest_common_subsequence(I, J) + assert str(output) == expected_result + + X = ODA(int, [1, 2, 3, 4, 5, 6, 6, 5, 4, 3, 2, 1]) + Y = ODA(int, [1, 2, 3, 4, 4, 3, 2, 1]) + output = longest_common_subsequence(X, Y) + assert str(output) == '[1, 2, 3, 4, 4, 3, 2, 1]' + + Z = ODA(int, []) + output = longest_common_subsequence(Y, Z) + assert str(output) == '[]' + +def test_is_ordered(): + def _test_inner_ordered(*args, **kwargs): + ODA = OneDimensionalArray + DODA = DynamicOneDimensionalArray + + expected_result = True + arr = ODA(int, [1, 2, 5, 6]) + output = is_ordered(arr, **kwargs) + assert output == expected_result + + expected_result = False + arr1 = ODA(int, [4, 3, 2, 1]) + output = is_ordered(arr1, **kwargs) + assert output == expected_result + + expected_result = True + arr2 = ODA(int, [6, 1, 2, 3, 4, 5]) + output = is_ordered(arr2, start=1, end=5, **kwargs) + assert output == expected_result + + expected_result = True + arr3 = ODA(int, [0, -1, -2, -3, -4, 4]) + output = is_ordered(arr3, start=1, end=4, + comp=lambda u, v: u > v, **kwargs) + assert output == expected_result + + expected_result = True + arr4 = DODA(int, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + arr4.delete(0) + output = is_ordered(arr4, **kwargs) + assert output == expected_result + + _test_inner_ordered() + _test_inner_ordered(backend=Backend.CPP) + + +def test_upper_bound(): + ODA = OneDimensionalArray + arr1 = ODA(int, [3, 3, 3]) + output = upper_bound(arr1, 3) + expected_result = 3 + assert expected_result == output + + arr2 = ODA(int, [4, 4, 5, 6]) + output = upper_bound(arr2, 4, end=3) + expected_result = 2 + assert expected_result == output + + arr3 = ODA(int, [6, 6, 7, 8, 9]) + output = upper_bound(arr3, 5, start=2, end=4) + expected_result = 2 + assert expected_result == output + + arr4 = ODA(int, [3, 4, 4, 6]) + output = upper_bound(arr4, 5, start=1, end=3) + expected_result = 3 + assert expected_result == output + + arr5 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) + output = upper_bound(arr5, 6, comp=lambda x, y: x > y) + expected_result = 5 + assert expected_result == output + + arr6 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) + output = upper_bound(arr6, 2, start=2, comp=lambda x, y: x > y) + expected_result = 8 + assert expected_result == output + + arr7 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) + output = upper_bound(arr7, 9, start=3, end=7, comp=lambda x, y: x > y) + expected_result = 3 + assert expected_result == output + + arr8 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) + output = upper_bound(arr8, 6, end=3, comp=lambda x, y: x > y) + expected_result = 3 + assert expected_result == output + + +def test_lower_bound(): + ODA = OneDimensionalArray + arr1 = ODA(int, [3, 3, 3]) + output = lower_bound(arr1, 3, start=1) + expected_result = 1 + assert expected_result == output + + arr2 = ODA(int, [4, 4, 4, 4, 5, 6]) + output = lower_bound(arr2, 5, end=3) + expected_result = 3 + assert expected_result == output + + arr3 = ODA(int, [6, 6, 7, 8, 9]) + output = lower_bound(arr3, 5, end=3) + expected_result = 0 + assert expected_result == output + + arr4 = ODA(int, [3, 4, 4, 4]) + output = lower_bound(arr4, 5) + expected_result = 4 + assert expected_result == output + + arr5 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) + output = lower_bound(arr5, 5, comp=lambda x, y: x > y) + expected_result = 5 + assert expected_result == output + + arr6 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) + output = lower_bound(arr6, 2, start=4, comp=lambda x, y: x > y) + expected_result = 8 + assert expected_result == output + + arr7 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) + output = lower_bound(arr7, 9, end=5, comp=lambda x, y: x > y) + expected_result = 0 + assert expected_result == output + + arr8 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) + output = lower_bound(arr8, 6, end=3, comp=lambda x, y: x > y) + expected_result = 1 + assert expected_result == output + +def test_longest_increasing_subsequence(): + ODA = OneDimensionalArray + + arr1 = ODA(int, [2, 5, 3, 7, 11, 8, 10, 13, 6]) + output = longest_increasing_subsequence(arr1) + expected_result = [2, 3, 7, 8, 10, 13] + assert str(expected_result) == str(output) + + arr2 = ODA(int, [3, 4, -1, 5, 8, 2, 2, 2, 3, 12, 7, 9, 10]) + output = longest_increasing_subsequence(arr2) + expected_result = [-1, 2, 3, 7, 9, 10] + assert str(expected_result) == str(output) + + arr3 = ODA(int, [6, 6, 6, 19, 9]) + output = longest_increasing_subsequence(arr3) + expected_result = [6, 9] + assert str(expected_result) == str(output) + + arr4 = ODA(int, [5, 4, 4, 3, 3, 6, 6, 8]) + output = longest_increasing_subsequence(arr4) + expected_result = [3, 6, 8] + assert str(expected_result) == str(output) + + arr5 = ODA(int, [7, 6, 6, 6, 5, 4, 3]) + output = longest_increasing_subsequence(arr5) + expected_result = [3] + assert str(expected_result) == str(output) + +def _test_permutation_common(array, expected_perms, func): + num_perms = len(expected_perms) + + output = [] + for _ in range(num_perms): + signal, array = func(array) + output.append(array) + if not signal: + break + + assert len(output) == len(expected_perms) + for perm1, perm2 in zip(output, expected_perms): + assert str(perm1) == str(perm2) + +def test_next_permutation(): + ODA = OneDimensionalArray + + array = ODA(int, [1, 2, 3]) + expected_perms = [[1, 3, 2], [2, 1, 3], + [2, 3, 1], [3, 1, 2], + [3, 2, 1], [1, 2, 3]] + _test_permutation_common(array, expected_perms, next_permutation) + +def test_prev_permutation(): + ODA = OneDimensionalArray + + array = ODA(int, [3, 2, 1]) + expected_perms = [[3, 1, 2], [2, 3, 1], + [2, 1, 3], [1, 3, 2], + [1, 2, 3], [3, 2, 1]] + _test_permutation_common(array, expected_perms, prev_permutation) + +def test_next_prev_permutation(): + ODA = OneDimensionalArray + random.seed(1000) + + for i in range(100): + data = set(random.sample(range(1, 10000), 10)) + array = ODA(int, list(data)) + + _, next_array = next_permutation(array) + _, orig_array = prev_permutation(next_array) + assert str(orig_array) == str(array) + + _, prev_array = prev_permutation(array) + _, orig_array = next_permutation(prev_array) + assert str(orig_array) == str(array) + +def _test_common_search(search_func, sort_array=True, **kwargs): + ODA = OneDimensionalArray + + array = ODA(int, [1, 2, 5, 7, 10, 29, 40]) + for i in range(len(array)): + assert i == search_func(array, array[i], **kwargs) + + checker_array = [None, None, 2, 3, 4, 5, None] + for i in range(len(array)): + assert checker_array[i] == search_func(array, array[i], start=2, end=5, **kwargs) + + random.seed(1000) + + for i in range(25): + data = list(set(random.sample(range(1, 10000), 100))) + + if sort_array: + data.sort() + + array = ODA(int, list(data)) + + for i in range(len(array)): + assert search_func(array, array[i], **kwargs) == i + + for _ in range(50): + assert search_func(array, random.randint(10001, 50000), **kwargs) is None + +def test_linear_search(): + _test_common_search(linear_search, sort_array=False) + _test_common_search(linear_search, sort_array=False, backend=Backend.CPP) + +def test_binary_search(): + _test_common_search(binary_search) + _test_common_search(binary_search, backend=Backend.CPP) + +def test_jump_search(): + _test_common_search(jump_search) + _test_common_search(jump_search, backend=Backend.CPP) + +def test_shell_sort(): + _test_common_sort(shell_sort) + +def test_radix_sort(): + _test_common_sort(radix_sort) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_arrays.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_arrays.py new file mode 100644 index 000000000..886510113 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_arrays.py @@ -0,0 +1,157 @@ +from pydatastructs.linear_data_structures import ( + OneDimensionalArray, DynamicOneDimensionalArray, + MultiDimensionalArray, ArrayForTrees) +from pydatastructs.utils.misc_util import Backend +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils import TreeNode +from pydatastructs.utils._backend.cpp import _nodes + +def test_OneDimensionalArray(): + ODA = OneDimensionalArray + A = ODA(int, 5, [1.0, 2, 3, 4, 5], init=6) + A[1] = 2.0 + assert str(A) == '[1, 2, 3, 4, 5]' + assert A + assert ODA(int, [1.0, 2, 3, 4, 5], 5) + assert ODA(int, 5) + assert ODA(int, [1.0, 2, 3]) + assert raises(IndexError, lambda: A[7]) + assert raises(IndexError, lambda: A[-1]) + assert raises(ValueError, lambda: ODA()) + assert raises(ValueError, lambda: ODA(int, 1, 2, 3)) + assert raises(TypeError, lambda: ODA(int, 5.0, set([1, 2, 3]))) + assert raises(TypeError, lambda: ODA(int, 5.0)) + assert raises(TypeError, lambda: ODA(int, set([1, 2, 3]))) + assert raises(ValueError, lambda: ODA(int, 3, [1])) + + A = ODA(int, 5, [1, 2, 3, 4, 5], init=6, backend=Backend.CPP) + A[1] = 2 + assert str(A) == "['1', '2', '3', '4', '5']" + assert A + assert ODA(int, [1, 2, 3, 4, 5], 5, backend=Backend.CPP) + assert ODA(int, 5, backend=Backend.CPP) + assert ODA(int, [1, 2, 3], backend=Backend.CPP) + assert raises(TypeError, lambda: ODA(int, [1.0, 2, 3, 4, 5], 5, backend=Backend.CPP)) + assert raises(TypeError, lambda: ODA(int, [1.0, 2, 3], backend=Backend.CPP)) + assert raises(IndexError, lambda: A[7]) + assert raises(IndexError, lambda: A[-1]) + assert raises(ValueError, lambda: ODA(backend=Backend.CPP)) + assert raises(ValueError, lambda: ODA(int, 1, 2, 3, backend=Backend.CPP)) + assert raises(TypeError, lambda: ODA(int, 5.0, set([1, 2, 3]), backend=Backend.CPP)) + assert raises(TypeError, lambda: ODA(int, 5.0, backend=Backend.CPP)) + assert raises(TypeError, lambda: ODA(int, set([1, 2, 3]), backend=Backend.CPP)) + assert raises(ValueError, lambda: ODA(int, 3, [1], backend=Backend.CPP)) + assert raises(ValueError, lambda: ODA(int, 3, [1], backend=Backend.CPP)) + assert raises(TypeError, lambda: A.fill(2.0)) + + +def test_MultiDimensionalArray(): + assert raises(ValueError, lambda: MultiDimensionalArray(int, 2, -1, 3)) + assert MultiDimensionalArray(int, 10).shape == (10,) + array = MultiDimensionalArray(int, 5, 9, 3, 8) + assert array.shape == (5, 9, 3, 8) + array.fill(5) + array[1, 3, 2, 5] = 2.0 + assert array + assert array[1, 3, 2, 5] == 2.0 + assert array[1, 3, 0, 5] == 5 + assert array[1, 2, 2, 5] == 5 + assert array[2, 3, 2, 5] == 5 + assert raises(IndexError, lambda: array[5]) + assert raises(IndexError, lambda: array[4, 10]) + assert raises(IndexError, lambda: array[-1]) + assert raises(IndexError, lambda: array[2, 3, 2, 8]) + assert raises(ValueError, lambda: MultiDimensionalArray()) + assert raises(ValueError, lambda: MultiDimensionalArray(int)) + assert raises(TypeError, lambda: MultiDimensionalArray(int, 5, 6, "")) + array = MultiDimensionalArray(int, 3, 2, 2) + array.fill(1) + array[0, 0, 0] = 0 + array[0, 0, 1] = 0 + array[1, 0, 0] = 0 + array[2, 1, 1] = 0 + assert str(array) == '[0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0]' + array = MultiDimensionalArray(int, 4) + assert array.shape == (4,) + array.fill(5) + array[3] = 3 + assert array[3] == 3 + +def test_DynamicOneDimensionalArray(): + DODA = DynamicOneDimensionalArray + A = DODA(int, 0) + A.append(1) + A.append(2) + A.append(3) + A.append(4) + assert str(A) == "['1', '2', '3', '4']" + A.delete(0) + A.delete(0) + A.delete(15) + A.delete(-1) + A.delete(1) + A.delete(2) + assert A._data == [4, None, None] + assert str(A) == "['4']" + assert A.size == 3 + A.fill(4) + assert A._data == [4, 4, 4] + b = DynamicOneDimensionalArray(int, 0) + b.append(1) + b.append(2) + b.append(3) + b.append(4) + b.append(5) + assert b._data == [1, 2, 3, 4, 5, None, None] + assert list(reversed(b)) == [5, 4, 3, 2, 1] + + A = DODA(int, 0, backend=Backend.CPP) + A.append(1) + A.append(2) + A.append(3) + A.append(4) + assert str(A) == "['1', '2', '3', '4']" + A.delete(0) + A.delete(0) + A.delete(15) + A.delete(-1) + A.delete(1) + A.delete(2) + assert [A[i] for i in range(A.size)] == [4, None, None] + assert A.size == 3 + A.fill(4) + assert [A[0], A[1], A[2]] == [4, 4, 4] + b = DODA(int, 0, backend=Backend.CPP) + b.append(1) + b.append(2) + b.append(3) + b.append(4) + b.append(5) + assert [b[i] for i in range(b.size)] == [1, 2, 3, 4, 5, None, None] + +def test_DynamicOneDimensionalArray2(): + DODA = DynamicOneDimensionalArray + root = TreeNode(1, 100) + A = DODA(TreeNode, [root]) + assert str(A[0]) == "(None, 1, 100, None)" + +def _test_ArrayForTrees(backend): + AFT = ArrayForTrees + root = TreeNode(1, 100,backend=backend) + if backend==Backend.PYTHON: + A = AFT(TreeNode, [root], backend=backend) + B = AFT(TreeNode, 0, backend=backend) + else: + A = AFT(_nodes.TreeNode, [root], backend=backend) + B = AFT(_nodes.TreeNode, 0, backend=backend) + assert str(A) == "['(None, 1, 100, None)']" + node = TreeNode(2, 200, backend=backend) + A.append(node) + assert str(A) == "['(None, 1, 100, None)', '(None, 2, 200, None)']" + assert str(B) == "[]" + +def test_ArrayForTrees(): + _test_ArrayForTrees(Backend.PYTHON) + +def test_cpp_ArrayForTrees(): + _test_ArrayForTrees(Backend.CPP) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_linked_lists.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_linked_lists.py new file mode 100644 index 000000000..b7f172ddc --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_linked_lists.py @@ -0,0 +1,193 @@ +from pydatastructs.linear_data_structures import DoublyLinkedList, SinglyLinkedList, SinglyCircularLinkedList, DoublyCircularLinkedList, SkipList +from pydatastructs.utils.raises_util import raises +import copy, random + +def test_DoublyLinkedList(): + random.seed(1000) + dll = DoublyLinkedList() + assert raises(IndexError, lambda: dll[2]) + dll.appendleft(5) + dll.append(1) + dll.appendleft(2) + dll.append(3) + dll.insert_after(dll[-1], 4) + dll.insert_after(dll[2], 6) + dll.insert_before(dll[4], 1.1) + dll.insert_before(dll[0], 7) + dll.insert_at(0, 2) + dll.insert_at(-1, 9) + dll.extract(2) + assert dll.popleft().key == 2 + assert dll.popright().key == 4 + assert dll.search(3) == dll[-2] + assert dll.search(-1) is None + dll[-2].key = 0 + assert str(dll) == ("['(7, None)', '(5, None)', '(1, None)', " + "'(6, None)', '(1.1, None)', '(0, None)', " + "'(9, None)']") + assert len(dll) == 7 + assert raises(IndexError, lambda: dll.insert_at(8, None)) + assert raises(IndexError, lambda: dll.extract(20)) + dll_copy = DoublyCircularLinkedList() + for i in range(dll.size): + dll_copy.append(dll[i]) + for i in range(len(dll)): + if i%2 == 0: + dll.popleft() + else: + dll.popright() + assert str(dll) == "[]" + for _ in range(len(dll_copy)): + index = random.randint(0, len(dll_copy) - 1) + dll_copy.extract(index) + assert str(dll_copy) == "[]" + assert raises(ValueError, lambda: dll_copy.extract(1)) + +def test_SinglyLinkedList(): + random.seed(1000) + sll = SinglyLinkedList() + assert raises(IndexError, lambda: sll[2]) + sll.appendleft(5) + sll.append(1) + sll.appendleft(2) + sll.append(3) + sll.insert_after(sll[1], 4) + sll.insert_after(sll[-1], 6) + sll.insert_at(0, 2) + sll.insert_at(-1, 9) + sll.extract(2) + assert sll.popleft().key == 2 + assert sll.popright().key == 6 + sll[-2].key = 0 + assert str(sll) == ("['(2, None)', '(4, None)', '(1, None)', " + "'(0, None)', '(9, None)']") + assert len(sll) == 5 + assert raises(IndexError, lambda: sll.insert_at(6, None)) + assert raises(IndexError, lambda: sll.extract(20)) + sll_copy = DoublyCircularLinkedList() + for i in range(sll.size): + sll_copy.append(sll[i]) + for i in range(len(sll)): + if i%2 == 0: + sll.popleft() + else: + sll.popright() + assert str(sll) == "[]" + for _ in range(len(sll_copy)): + index = random.randint(0, len(sll_copy) - 1) + sll_copy.extract(index) + assert str(sll_copy) == "[]" + assert raises(ValueError, lambda: sll_copy.extract(1)) + +def test_SinglyCircularLinkedList(): + random.seed(1000) + scll = SinglyCircularLinkedList() + assert raises(IndexError, lambda: scll[2]) + scll.appendleft(5) + scll.append(1) + scll.appendleft(2) + scll.append(3) + scll.insert_after(scll[1], 4) + scll.insert_after(scll[-1], 6) + scll.insert_at(0, 2) + scll.insert_at(-1, 9) + scll.extract(2) + assert scll.popleft().key == 2 + assert scll.popright().key == 6 + assert scll.search(-1) is None + scll[-2].key = 0 + assert str(scll) == ("['(2, None)', '(4, None)', '(1, None)', " + "'(0, None)', '(9, None)']") + assert len(scll) == 5 + assert raises(IndexError, lambda: scll.insert_at(6, None)) + assert raises(IndexError, lambda: scll.extract(20)) + scll_copy = DoublyCircularLinkedList() + for i in range(scll.size): + scll_copy.append(scll[i]) + for i in range(len(scll)): + if i%2 == 0: + scll.popleft() + else: + scll.popright() + assert str(scll) == "[]" + for _ in range(len(scll_copy)): + index = random.randint(0, len(scll_copy) - 1) + scll_copy.extract(index) + assert str(scll_copy) == "[]" + assert raises(ValueError, lambda: scll_copy.extract(1)) + +def test_DoublyCircularLinkedList(): + random.seed(1000) + dcll = DoublyCircularLinkedList() + assert raises(IndexError, lambda: dcll[2]) + dcll.appendleft(5) + dcll.append(1) + dcll.appendleft(2) + dcll.append(3) + dcll.insert_after(dcll[-1], 4) + dcll.insert_after(dcll[2], 6) + dcll.insert_before(dcll[4], 1) + dcll.insert_before(dcll[0], 7) + dcll.insert_at(0, 2) + dcll.insert_at(-1, 9) + dcll.extract(2) + assert dcll.popleft().key == 2 + assert dcll.popright().key == 4 + dcll[-2].key = 0 + assert str(dcll) == ("['(7, None)', '(5, None)', '(1, None)', " + "'(6, None)', '(1, None)', '(0, None)', " + "'(9, None)']") + assert len(dcll) == 7 + assert raises(IndexError, lambda: dcll.insert_at(8, None)) + assert raises(IndexError, lambda: dcll.extract(20)) + dcll_copy = DoublyCircularLinkedList() + for i in range(dcll.size): + dcll_copy.append(dcll[i]) + for i in range(len(dcll)): + if i%2 == 0: + dcll.popleft() + else: + dcll.popright() + assert str(dcll) == "[]" + for _ in range(len(dcll_copy)): + index = random.randint(0, len(dcll_copy) - 1) + dcll_copy.extract(index) + assert str(dcll_copy) == "[]" + assert raises(ValueError, lambda: dcll_copy.extract(1)) + +def test_SkipList(): + random.seed(0) + sl = SkipList() + sl.insert(2) + sl.insert(10) + sl.insert(92) + sl.insert(1) + sl.insert(4) + sl.insert(27) + sl.extract(10) + assert str(sl) == ("(1, None) None None None None \n" + "(1, None) None None None None \n" + "(1, None) (2, None) (4, None) (27, None) (92, None) \n") + assert raises(KeyError, lambda: sl.extract(15)) + assert sl.search(1) is True + assert sl.search(47) is False + + sl = SkipList() + + for a in range(0, 20, 2): + sl.insert(a) + assert sl.search(16) is True + for a in range(4, 20, 4): + sl.extract(a) + assert sl.search(10) is True + for a in range(4, 20, 4): + sl.insert(a) + for a in range(0, 20, 2): + sl.extract(a) + assert sl.search(3) is False + + li = SkipList() + li.insert(1) + li.insert(2) + assert li.levels == 1 + assert li.size == 2 diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/__init__.py new file mode 100644 index 000000000..6ed099769 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/__init__.py @@ -0,0 +1,51 @@ +__all__ = [] + +from . import ( + stack, + binomial_trees, + queue, + disjoint_set, + sparse_table, +) + +from .binomial_trees import ( + BinomialTree +) +__all__.extend(binomial_trees.__all__) + +from .stack import ( + Stack, +) +__all__.extend(stack.__all__) + +from .queue import ( + Queue, + PriorityQueue +) +__all__.extend(queue.__all__) + +from .disjoint_set import ( + DisjointSetForest, +) +__all__.extend(disjoint_set.__all__) + +from .sparse_table import ( + SparseTable, +) +__all__.extend(sparse_table.__all__) + +from .segment_tree import ( + ArraySegmentTree, +) +__all__.extend(segment_tree.__all__) + +from .algorithms import ( + RangeQueryStatic, + RangeQueryDynamic +) +__all__.extend(algorithms.__all__) + +from .multiset import ( + Multiset +) +__all__.extend(multiset.__all__) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/_backend/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/_backend/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/algorithms.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/algorithms.py new file mode 100644 index 000000000..3c2f86516 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/algorithms.py @@ -0,0 +1,335 @@ +from pydatastructs.miscellaneous_data_structures.sparse_table import SparseTable +from pydatastructs.miscellaneous_data_structures.segment_tree import ArraySegmentTree +from pydatastructs.utils.misc_util import ( + _check_range_query_inputs, Backend, + raise_if_backend_is_not_python) + +__all__ = [ + 'RangeQueryStatic', + 'RangeQueryDynamic' +] + + +class RangeQueryStatic: + """ + Produces results for range queries of different kinds + by using specified data structure. + + Parameters + ========== + + array: OneDimensionalArray + The array for which we need to answer queries. + All the elements should be of type `int`. + func: callable + The function to be used for generating results + of a query. It should accept only one tuple as an + argument. The size of the tuple will be either 1 or 2 + and any one of the elements can be `None`. You can treat + `None` in whatever way you want according to the query + you are performing. For example, in case of range minimum + queries, `None` can be treated as infinity. We provide + the following which can be used as an argument value for this + parameter, + + `minimum` - For range minimum queries. + + `greatest_common_divisor` - For queries finding greatest + common divisor of a range. + + `summation` - For range sum queries. + data_structure: str + The data structure to be used for performing + range queries. + Currently the following data structures are supported, + + 'array' -> Array data structure. + Each query takes O(end - start) time asymptotically. + + 'sparse_table' -> Sparse table data structure. + Each query takes O(log(end - start)) time + asymptotically. + + By default, 'sparse_table'. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, RangeQueryStatic + >>> from pydatastructs import minimum + >>> arr = OneDimensionalArray(int, [4, 6, 1, 5, 7, 3]) + >>> RMQ = RangeQueryStatic(arr, minimum) + >>> RMQ.query(3, 4) + 5 + >>> RMQ.query(0, 4) + 1 + >>> RMQ.query(0, 2) + 1 + + Note + ==== + + The array once passed as an input should not be modified + once the `RangeQueryStatic` constructor is called. If you + have updated the array, then you need to create a new + `RangeQueryStatic` object with this updated array. + """ + + def __new__(cls, array, func, data_structure='sparse_table', **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + if len(array) == 0: + raise ValueError("Input %s array is empty."%(array)) + + if data_structure == 'array': + return RangeQueryStaticArray(array, func) + elif data_structure == 'sparse_table': + return RangeQueryStaticSparseTable(array, func) + else: + raise NotImplementedError( + "Currently %s data structure for range " + "query without updates isn't implemented yet." + % (data_structure)) + + @classmethod + def methods(cls): + return ['query'] + + def query(start, end): + """ + Method to perform a query in [start, end) range. + + Parameters + ========== + + start: int + The starting index of the range. + end: int + The ending index of the range. + """ + raise NotImplementedError( + "This is an abstract method.") + + +class RangeQueryStaticSparseTable(RangeQueryStatic): + + __slots__ = ["sparse_table", "bounds"] + + def __new__(cls, array, func, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + sparse_table = SparseTable(array, func) + obj.bounds = (0, len(array)) + obj.sparse_table = sparse_table + return obj + + @classmethod + def methods(cls): + return ['query'] + + def query(self, start, end): + _check_range_query_inputs((start, end + 1), self.bounds) + return self.sparse_table.query(start, end) + + +class RangeQueryStaticArray(RangeQueryStatic): + + __slots__ = ["array", "func"] + + def __new__(cls, array, func): + obj = object.__new__(cls) + obj.array = array + obj.func = func + return obj + + @classmethod + def methods(cls): + return ['query'] + + def query(self, start, end): + _check_range_query_inputs((start, end + 1), (0, len(self.array))) + + rsize = end - start + 1 + + if rsize == 1: + return self.func((self.array[start],)) + + query_ans = self.func((self.array[start], self.array[start + 1])) + for i in range(start + 2, end + 1): + query_ans = self.func((query_ans, self.array[i])) + return query_ans + +class RangeQueryDynamic: + """ + Produces results for range queries of different kinds + while allowing point updates by using specified + data structure. + + Parameters + ========== + + array: OneDimensionalArray + The array for which we need to answer queries. + All the elements should be of type `int`. + func: callable + The function to be used for generating results + of a query. It should accept only one tuple as an + argument. The size of the tuple will be either 1 or 2 + and any one of the elements can be `None`. You can treat + `None` in whatever way you want according to the query + you are performing. For example, in case of range minimum + queries, `None` can be treated as infinity. We provide + the following which can be used as an argument value for this + parameter, + + `minimum` - For range minimum queries. + + `greatest_common_divisor` - For queries finding greatest + common divisor of a range. + + `summation` - For range sum queries. + data_structure: str + The data structure to be used for performing + range queries. + Currently the following data structures are supported, + + 'array' -> Array data structure. + Each query takes O(end - start) time asymptotically. + Each point update takes O(1) time asymptotically. + + 'segment_tree' -> Segment tree data structure. + Each query takes O(log(end - start)) time + asymptotically. + Each point update takes O(log(len(array))) time + asymptotically. + + By default, 'segment_tree'. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, RangeQueryDynamic + >>> from pydatastructs import minimum + >>> arr = OneDimensionalArray(int, [4, 6, 1, 5, 7, 3]) + >>> RMQ = RangeQueryDynamic(arr, minimum) + >>> RMQ.query(3, 4) + 5 + >>> RMQ.query(0, 4) + 1 + >>> RMQ.query(0, 2) + 1 + >>> RMQ.update(2, 0) + >>> RMQ.query(0, 2) + 0 + + Note + ==== + + The array once passed as an input should be modified + only with `RangeQueryDynamic.update` method. + """ + + def __new__(cls, array, func, data_structure='segment_tree', **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + + if len(array) == 0: + raise ValueError("Input %s array is empty."%(array)) + + if data_structure == 'array': + return RangeQueryDynamicArray(array, func, **kwargs) + elif data_structure == 'segment_tree': + return RangeQueryDynamicSegmentTree(array, func, **kwargs) + else: + raise NotImplementedError( + "Currently %s data structure for range " + "query with point updates isn't implemented yet." + % (data_structure)) + + @classmethod + def methods(cls): + return ['query', 'update'] + + def query(start, end): + """ + Method to perform a query in [start, end) range. + + Parameters + ========== + + start: int + The starting index of the range. + end: int + The ending index of the range. + """ + raise NotImplementedError( + "This is an abstract method.") + + def update(self, index, value): + """ + Method to update index with a new value. + + Parameters + ========== + + index: int + The index to be update. + value: int + The new value. + """ + raise NotImplementedError( + "This is an abstract method.") + +class RangeQueryDynamicArray(RangeQueryDynamic): + + __slots__ = ["range_query_static"] + + def __new__(cls, array, func, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.range_query_static = RangeQueryStaticArray(array, func) + return obj + + @classmethod + def methods(cls): + return ['query', 'update'] + + def query(self, start, end): + return self.range_query_static.query(start, end) + + def update(self, index, value): + self.range_query_static.array[index] = value + +class RangeQueryDynamicSegmentTree(RangeQueryDynamic): + + __slots__ = ["segment_tree", "bounds"] + + def __new__(cls, array, func, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.pop('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.segment_tree = ArraySegmentTree(array, func, dimensions=1) + obj.segment_tree.build() + obj.bounds = (0, len(array)) + return obj + + @classmethod + def methods(cls): + return ['query', 'update'] + + def query(self, start, end): + _check_range_query_inputs((start, end + 1), self.bounds) + return self.segment_tree.query(start, end) + + def update(self, index, value): + self.segment_tree.update(index, value) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/binomial_trees.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/binomial_trees.py new file mode 100644 index 000000000..9ea91d828 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/binomial_trees.py @@ -0,0 +1,91 @@ +from pydatastructs.utils.misc_util import ( + BinomialTreeNode, _check_type, Backend, + raise_if_backend_is_not_python) + +__all__ = [ + 'BinomialTree' +] + +class BinomialTree(object): + """ + Represents binomial trees + + Parameters + ========== + + root: BinomialTreeNode + The root of the binomial tree. + By default, None + order: int + The order of the binomial tree. + By default, None + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import BinomialTree, BinomialTreeNode + >>> root = BinomialTreeNode(1, 1) + >>> tree = BinomialTree(root, 0) + >>> tree.is_empty + False + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Binomial_heap + """ + __slots__ = ['root', 'order'] + + def __new__(cls, root=None, order=None, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + if root is not None and \ + not _check_type(root, BinomialTreeNode): + raise TypeError("%s i.e., root should be of " + "type BinomialTreeNode."%(root)) + if order is not None and not _check_type(order, int): + raise TypeError("%s i.e., order should be of " + "type int."%(order)) + obj = object.__new__(cls) + if root is not None: + root.is_root = True + obj.root = root + obj.order = order + return obj + + @classmethod + def methods(cls): + return ['add_sub_tree', '__new__', 'is_empty'] + + def add_sub_tree(self, other_tree): + """ + Adds a sub tree to current tree. + + Parameters + ========== + + other_tree: BinomialTree + + Raises + ====== + + ValueError: If order of the two trees + are different. + """ + if not _check_type(other_tree, BinomialTree): + raise TypeError("%s i.e., other_tree should be of " + "type BinomialTree"%(other_tree)) + if self.order != other_tree.order: + raise ValueError("Orders of both the trees should be same.") + self.root.children.append(other_tree.root) + other_tree.root.parent = self.root + other_tree.root.is_root = False + self.order += 1 + + @property + def is_empty(self): + return self.root is None diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/disjoint_set.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/disjoint_set.py new file mode 100644 index 000000000..9a5caef5b --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/disjoint_set.py @@ -0,0 +1,143 @@ +from pydatastructs.utils import Set +from pydatastructs.utils.misc_util import ( + Backend, raise_if_backend_is_not_python) + +__all__ = ['DisjointSetForest'] + +class DisjointSetForest(object): + """ + Represents a forest of disjoint set trees. + + Parameters + ========== + + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import DisjointSetForest + >>> dst = DisjointSetForest() + >>> dst.make_set(1) + >>> dst.make_set(2) + >>> dst.union(1, 2) + >>> dst.find_root(2).key + 1 + >>> dst.make_root(2) + >>> dst.find_root(2).key + 2 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Disjoint-set_data_structure + """ + + __slots__ = ['tree'] + + def __new__(cls, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.tree = dict() + return obj + + @classmethod + def methods(cls): + return ['make_set', '__new__', 'find_root', 'union'] + + def make_set(self, key, data=None): + """ + Adds a singleton set to the tree + of disjoint sets with given key + and optionally data. + """ + if self.tree.get(key, None) is None: + new_set = Set(key, data) + self.tree[key] = new_set + new_set.parent = new_set + new_set.size = 1 + + def find_root(self, key): + """ + Finds the root of the set + with the given key by path + splitting algorithm. + """ + if self.tree.get(key, None) is None: + raise KeyError("Invalid key, %s"%(key)) + _set = self.tree[key] + while _set.parent is not _set: + _set, _set.parent = _set.parent, _set.parent.parent + return _set + + def union(self, key1, key2): + """ + Takes the union of the two + disjoint set trees with given + keys. The union is done by size. + """ + x_root = self.find_root(key1) + y_root = self.find_root(key2) + + if x_root is not y_root: + if x_root.size < y_root.size: + x_root, y_root = y_root, x_root + + y_root.parent = x_root + x_root.size += y_root.size + + def make_root(self, key): + """ + Finds the set to which the key belongs + and makes it as the root of the set. + """ + if self.tree.get(key, None) is None: + raise KeyError("Invalid key, %s"%(key)) + + key_set = self.tree[key] + if key_set.parent is not key_set: + current_parent = key_set.parent + # Remove this key subtree size from all its ancestors + while current_parent.parent is not current_parent: + current_parent.size -= key_set.size + current_parent = current_parent.parent + + all_set_size = current_parent.size # This is the root node + current_parent.size -= key_set.size + + # Make parent of current root as key + current_parent.parent = key_set + # size of new root will be same as previous root's size + key_set.size = all_set_size + # Make parent of key as itself + key_set.parent = key_set + + def find_size(self, key): + """ + Finds the size of set to which the key belongs. + """ + if self.tree.get(key, None) is None: + raise KeyError("Invalid key, %s"%(key)) + + return self.find_root(key).size + + def disjoint_sets(self): + """ + Returns a list of disjoint sets in the data structure. + """ + result = dict() + for key in self.tree.keys(): + parent = self.find_root(key).key + members = result.get(parent, []) + members.append(key) + result[parent] = members + sorted_groups = [] + for v in result.values(): + sorted_groups.append(v) + sorted_groups[-1].sort() + sorted_groups.sort() + return sorted_groups diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/multiset.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/multiset.py new file mode 100644 index 000000000..397978224 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/multiset.py @@ -0,0 +1,42 @@ +__all__ = [ + 'Multiset' +] + + +class Multiset: + def __init__(self, *args): + # TODO: Implement dict in pydatastructs + self.counter = dict() + from pydatastructs.trees import RedBlackTree + self.tree = RedBlackTree() + self._n = 0 + for arg in args: + self.add(arg) + + def add(self, element): + self.counter[element] = self.counter.get(element, 0) + 1 + self._n += 1 + if self.counter[element] == 1: + self.tree.insert(element) + + def remove(self, element): + if self.counter[element] == 1: + self.tree.delete(element) + if self.counter.get(element, 0) > 0: + self._n -= 1 + self.counter[element] -= 1 + + def lower_bound(self, element): + return self.tree.lower_bound(element) + + def upper_bound(self, element): + return self.tree.upper_bound(element) + + def __contains__(self, element): + return self.counter.get(element, 0) > 0 + + def __len__(self): + return self._n + + def count(self, element): + return self.counter.get(element, 0) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/queue.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/queue.py new file mode 100644 index 000000000..033ef9af3 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/queue.py @@ -0,0 +1,498 @@ +from pydatastructs.linear_data_structures import DynamicOneDimensionalArray, SinglyLinkedList +from pydatastructs.utils.misc_util import ( + NoneType, Backend, raise_if_backend_is_not_python) +from pydatastructs.trees.heaps import BinaryHeap, BinomialHeap +from copy import deepcopy as dc + +__all__ = [ + 'Queue', + 'PriorityQueue' +] + +class Queue(object): + """Representation of queue data structure. + + Parameters + ========== + + implementation : str + Implementation to be used for queue. + By default, 'array' + items : list/tuple + Optional, by default, None + The inital items in the queue. + dtype : A valid python type + Optional, by default NoneType if item + is None. + Required only for 'array' implementation. + double_ended : bool + Optional, by default, False. + Set to True if the queue should support + additional, appendleft and pop operations + from left and right sides respectively. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import Queue + >>> q = Queue() + >>> q.append(1) + >>> q.append(2) + >>> q.append(3) + >>> q.popleft() + 1 + >>> len(q) + 2 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Queue_(abstract_data_type) + """ + + def __new__(cls, implementation='array', **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + if implementation == 'array': + return ArrayQueue( + kwargs.get('items', None), + kwargs.get('dtype', int), + kwargs.get('double_ended', False)) + elif implementation == 'linked_list': + return LinkedListQueue( + kwargs.get('items', None), + kwargs.get('double_ended', False) + ) + else: + raise NotImplementedError( + "%s hasn't been implemented yet."%(implementation)) + + @classmethod + def methods(cls): + return ['__new__'] + + def _double_ended_check(self): + if not self._double_ended: + raise NotImplementedError( + "This method is only supported for " + "double ended queues.") + + def append(self, *args, **kwargs): + raise NotImplementedError( + "This is an abstract method.") + + def appendleft(self, *args, **kwargs): + raise NotImplementedError( + "This is an abstract method.") + + def pop(self, *args, **kwargs): + raise NotImplementedError( + "This is an abstract method.") + + def popleft(self, *args, **kwargs): + raise NotImplementedError( + "This is an abstract method.") + + @property + def is_empty(self): + raise NotImplementedError( + "This is an abstract method.") + + +class ArrayQueue(Queue): + + __slots__ = ['_front', '_rear', '_double_ended'] + + def __new__(cls, items=None, dtype=NoneType, double_ended=False, + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + if items is None: + items = DynamicOneDimensionalArray(dtype, 0) + else: + dtype = type(items[0]) + items = DynamicOneDimensionalArray(dtype, items) + obj = object.__new__(cls) + obj.items, obj._front = items, -1 + if items.size == 0: + obj._front = -1 + obj._rear = -1 + else: + obj._front = 0 + obj._rear = items._num - 1 + obj._double_ended = double_ended + return obj + + @classmethod + def methods(cls): + return ['__new__', 'append', 'appendleft', 'popleft', + 'pop', 'is_empty', '__len__', '__str__', 'front', + 'rear'] + + def append(self, x): + if self.is_empty: + self._front = 0 + self.items._dtype = type(x) + self.items.append(x) + self._rear += 1 + + def appendleft(self, x): + self._double_ended_check() + temp = [] + if self.is_empty: + self._front = 0 + self._rear = -1 + self.items._dtype = type(x) + temp.append(x) + for i in range(self._front, self._rear + 1): + temp.append(self.items._data[i]) + self.items = DynamicOneDimensionalArray(type(temp[0]), temp) + self._rear += 1 + + def popleft(self): + if self.is_empty: + raise IndexError("Queue is empty.") + return_value = dc(self.items[self._front]) + front_temp = self._front + if self._front == self._rear: + self._front = -1 + self._rear = -1 + else: + if (self.items._num - 1)/self.items._size < \ + self.items._load_factor: + self._front = 0 + else: + self._front += 1 + self.items.delete(front_temp) + return return_value + + def pop(self): + self._double_ended_check() + if self.is_empty: + raise IndexError("Queue is empty.") + + return_value = dc(self.items[self._rear]) + rear_temp = self._rear + if self._front == self._rear: + self._front = -1 + self._rear = -1 + else: + if (self.items._num - 1)/self.items._size < \ + self.items._load_factor: + self._front = 0 + else: + self._rear -= 1 + self.items.delete(rear_temp) + return return_value + + @property + def front(self): + return self._front + + @property + def rear(self): + return self._rear + + @property + def is_empty(self): + return self.__len__() == 0 + + def __len__(self): + return self.items._num + + def __str__(self): + _data = [] + for i in range(self._front, self._rear + 1): + _data.append(self.items._data[i]) + return str(_data) + +class LinkedListQueue(Queue): + + __slots__ = ['queue', '_double_ended'] + + def __new__(cls, items=None, double_ended=False, + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.queue = SinglyLinkedList() + if items is None: + pass + elif type(items) in (list, tuple): + for x in items: + obj.append(x) + else: + raise TypeError("Expected type: list/tuple") + obj._double_ended = double_ended + return obj + + @classmethod + def methods(cls): + return ['__new__', 'append', 'appendleft', 'pop', 'popleft', + 'is_empty', '__len__', '__str__', 'front', 'rear'] + + def append(self, x): + self.queue.append(x) + + def appendleft(self, x): + self._double_ended_check() + if self._double_ended: + self.queue.appendleft(x) + + def pop(self): + self._double_ended_check() + if self.is_empty: + raise IndexError("Queue is empty.") + return_value = self.queue.popright() + return return_value + + def popleft(self): + if self.is_empty: + raise IndexError("Queue is empty.") + return_value = self.queue.popleft() + return return_value + + @property + def is_empty(self): + return self.__len__() == 0 + + @property + def front(self): + return self.queue.head + + @property + def rear(self): + return self.queue.tail + + def __len__(self): + return self.queue.size + + def __str__(self): + return str(self.queue) + +class PriorityQueue(object): + """ + Represents the concept of priority queue. + + Parameters + ========== + + implementation: str + The implementation which is to be + used for supporting operations + of priority queue. + The following implementations are supported, + + 'linked_list' -> Linked list implementation. + + 'binary_heap' -> Binary heap implementation. + + 'binomial_heap' -> Binomial heap implementation. + Doesn't support custom comparators, minimum + key data is extracted in every pop. + + Optional, by default, 'binary_heap' implementation + is used. + comp: function + The comparator to be used while comparing priorities. + Must return a bool object. + By default, `lambda u, v: u < v` is used to compare + priorities i.e., minimum priority elements are extracted + by pop operation. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import PriorityQueue + >>> pq = PriorityQueue() + >>> pq.push(1, 2) + >>> pq.push(2, 3) + >>> pq.pop() + 1 + >>> pq2 = PriorityQueue(comp=lambda u, v: u > v) + >>> pq2.push(1, 2) + >>> pq2.push(2, 3) + >>> pq2.pop() + 2 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Priority_queue + """ + + def __new__(cls, implementation='binary_heap', **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + comp = kwargs.get("comp", lambda u, v: u < v) + if implementation == 'linked_list': + return LinkedListPriorityQueue(comp) + elif implementation == 'binary_heap': + return BinaryHeapPriorityQueue(comp) + elif implementation == 'binomial_heap': + return BinomialHeapPriorityQueue() + else: + raise NotImplementedError( + "%s implementation is not currently supported " + "by priority queue.") + + @classmethod + def methods(cls): + return ['__new__'] + + def push(self, value, priority): + """ + Pushes the value to the priority queue + according to the given priority. + + value + Value to be pushed. + priority + Priority to be given to the value. + """ + raise NotImplementedError( + "This is an abstract method.") + + def pop(self): + """ + Pops out the value from the priority queue. + """ + raise NotImplementedError( + "This is an abstract method.") + + @property + def peek(self): + """ + Returns the pointer to the value which will be + popped out by `pop` method. + """ + raise NotImplementedError( + "This is an abstract method.") + + @property + def is_empty(self): + """ + Checks if the priority queue is empty. + """ + raise NotImplementedError( + "This is an abstract method.") + +class LinkedListPriorityQueue(PriorityQueue): + + __slots__ = ['items', 'comp'] + + @classmethod + def methods(cls): + return ['__new__', 'push', 'pop', 'peek', 'is_empty'] + + def __new__(cls, comp, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.items = SinglyLinkedList() + obj.comp = comp + return obj + + def push(self, value, priority): + self.items.append(priority, value) + + def pop(self): + _, max_i = self._find_peek(return_index=True) + pop_val = self.items.extract(max_i) + return pop_val.data + + def _find_peek(self, return_index=False): + if self.is_empty: + raise IndexError("Priority queue is empty.") + + walk = self.items.head + i, max_i, max_p = 0, 0, walk + while walk is not None: + if self.comp(walk.key, max_p.key): + max_i = i + max_p = walk + i += 1 + walk = walk.next + if return_index: + return max_p, max_i + return max_p + + @property + def peek(self): + return self._find_peek() + + @property + def is_empty(self): + return self.items.size == 0 + +class BinaryHeapPriorityQueue(PriorityQueue): + + __slots__ = ['items'] + + @classmethod + def methods(cls): + return ['__new__', 'push', 'pop', 'peek', 'is_empty'] + + def __new__(cls, comp, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.items = BinaryHeap() + obj.items._comp = comp + return obj + + def push(self, value, priority): + self.items.insert(priority, value) + + def pop(self): + node = self.items.extract() + return node.data + + @property + def peek(self): + if self.items.is_empty: + raise IndexError("Priority queue is empty.") + return self.items.heap[0] + + @property + def is_empty(self): + return self.items.is_empty + +class BinomialHeapPriorityQueue(PriorityQueue): + + __slots__ = ['items'] + + @classmethod + def methods(cls): + return ['__new__', 'push', 'pop', 'peek', 'is_empty'] + + def __new__(cls, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.items = BinomialHeap() + return obj + + def push(self, value, priority): + self.items.insert(priority, value) + + def pop(self): + node = self.items.find_minimum() + self.items.delete_minimum() + return node.data + + @property + def peek(self): + return self.items.find_minimum() + + @property + def is_empty(self): + return self.items.is_empty diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/segment_tree.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/segment_tree.py new file mode 100644 index 000000000..0895ba6da --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/segment_tree.py @@ -0,0 +1,225 @@ +from .stack import Stack +from pydatastructs.utils.misc_util import (TreeNode, + Backend, raise_if_backend_is_not_python) + +__all__ = ['ArraySegmentTree'] + +class ArraySegmentTree(object): + """ + Represents the segment tree data structure, + defined on arrays. + + Parameters + ========== + + array: Array + The array to be used for filling the segment tree. + func: callable + The function to be used for filling the segment tree. + It should accept only one tuple as an argument. The + size of the tuple will be either 1 or 2 and any one + of the elements can be `None`. You can treat `None` in + whatever way you want. For example, in case of minimum + values, `None` can be treated as infinity. We provide + the following which can be used as an argument value for this + parameter, + + `minimum` - For range minimum queries. + + `greatest_common_divisor` - For queries finding greatest + common divisor of a range. + + `summation` - For range sum queries. + dimensions: int + The number of dimensions of the array to be used + for the segment tree. + Optional, by default 1. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import ArraySegmentTree, minimum + >>> from pydatastructs import OneDimensionalArray + >>> arr = OneDimensionalArray(int, [1, 2, 3, 4, 5]) + >>> s_t = ArraySegmentTree(arr, minimum) + >>> s_t.build() + >>> s_t.query(0, 1) + 1 + >>> s_t.query(1, 3) + 2 + >>> s_t.update(2, -1) + >>> s_t.query(1, 3) + -1 + >>> arr = OneDimensionalArray(int, [1, 2]) + >>> s_t = ArraySegmentTree(arr, minimum) + >>> s_t.build() + >>> str(s_t) + "['((0, 1), 1)', '((0, 0), 1)', '', '', '((1, 1), 2)', '', '']" + + References + ========== + + .. [1] https://cp-algorithms.com/data_structures/segment_tree.html + """ + def __new__(cls, array, func, **kwargs): + + dimensions = kwargs.pop("dimensions", 1) + if dimensions == 1: + return OneDimensionalArraySegmentTree(array, func, **kwargs) + else: + raise NotImplementedError("ArraySegmentTree do not support " + "{}-dimensional arrays as of now.".format(dimensions)) + + def build(self): + """ + Generates segment tree nodes when called. + Nothing happens if nodes are already generated. + """ + raise NotImplementedError( + "This is an abstract method.") + + def update(self, index, value): + """ + Updates the value at given index. + """ + raise NotImplementedError( + "This is an abstract method.") + + def query(self, start, end): + """ + Queries [start, end] range according + to the function provided while constructing + `ArraySegmentTree` object. + """ + raise NotImplementedError( + "This is an abstract method.") + + def __str__(self): + recursion_stack = Stack(implementation='linked_list') + recursion_stack.push(self._root) + to_be_printed = [] + while not recursion_stack.is_empty: + node = recursion_stack.pop().key + if node is not None: + to_be_printed.append(str((node.key, node.data))) + else: + to_be_printed.append('') + if node is not None: + recursion_stack.push(node.right) + recursion_stack.push(node.left) + return str(to_be_printed) + + +class OneDimensionalArraySegmentTree(ArraySegmentTree): + + __slots__ = ["_func", "_array", "_root", "_backend"] + + def __new__(cls, array, func, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + raise_if_backend_is_not_python(cls, backend) + + obj = object.__new__(cls) + obj._func = func + obj._array = array + obj._root = None + obj._backend = backend + return obj + + @classmethod + def methods(self): + return ['__new__', 'build', 'update', + 'query'] + + @property + def is_ready(self): + return self._root is not None + + def build(self): + if self.is_ready: + return + + recursion_stack = Stack(implementation='linked_list') + node = TreeNode((0, len(self._array) - 1), None, backend=self._backend) + node.is_root = True + self._root = node + recursion_stack.push(node) + + while not recursion_stack.is_empty: + node = recursion_stack.peek.key + start, end = node.key + if start == end: + node.data = self._array[start] + recursion_stack.pop() + continue + + if (node.left is not None and + node.right is not None): + recursion_stack.pop() + node.data = self._func((node.left.data, node.right.data)) + else: + mid = (start + end) // 2 + if node.left is None: + left_node = TreeNode((start, mid), None) + node.left = left_node + recursion_stack.push(left_node) + if node.right is None: + right_node = TreeNode((mid + 1, end), None) + node.right = right_node + recursion_stack.push(right_node) + + def update(self, index, value): + if not self.is_ready: + raise ValueError("{} tree is not built yet. ".format(self) + + "Call .build method to prepare the segment tree.") + + recursion_stack = Stack(implementation='linked_list') + recursion_stack.push((self._root, None)) + + while not recursion_stack.is_empty: + node, child = recursion_stack.peek.key + start, end = node.key + if start == end: + self._array[index] = value + node.data = value + recursion_stack.pop() + if not recursion_stack.is_empty: + parent_node = recursion_stack.pop() + recursion_stack.push((parent_node.key[0], node)) + continue + + if child is not None: + node.data = self._func((node.left.data, node.right.data)) + recursion_stack.pop() + if not recursion_stack.is_empty: + parent_node = recursion_stack.pop() + recursion_stack.push((parent_node.key[0], node)) + else: + mid = (start + end) // 2 + if start <= index and index <= mid: + recursion_stack.push((node.left, None)) + else: + recursion_stack.push((node.right, None)) + + def _query(self, node, start, end, l, r): + if r < start or end < l: + return None + + if l <= start and end <= r: + return node.data + + mid = (start + end) // 2 + left_result = self._query(node.left, start, mid, l, r) + right_result = self._query(node.right, mid + 1, end, l, r) + return self._func((left_result, right_result)) + + def query(self, start, end): + if not self.is_ready: + raise ValueError("{} tree is not built yet. ".format(self) + + "Call .build method to prepare the segment tree.") + + return self._query(self._root, 0, len(self._array) - 1, + start, end) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/sparse_table.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/sparse_table.py new file mode 100644 index 000000000..55ec4e9b3 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/sparse_table.py @@ -0,0 +1,108 @@ +from pydatastructs.linear_data_structures.arrays import OneDimensionalArray +from pydatastructs.utils.misc_util import ( + Backend, raise_if_backend_is_not_python) +import math + +__all__ = ['SparseTable'] + + +class SparseTable(object): + """ + Represents the sparse table data structure. + + Parameters + ========== + + array: OneDimensionalArray + The array to be used for filling the sparse table. + func: callable + The function to be used for filling the sparse table. + It should accept only one tuple as an argument. The + size of the tuple will be either 1 or 2 and any one + of the elements can be `None`. You can treat `None` in + whatever way you want. For example, in case of minimum + values, `None` can be treated as infinity. We provide + the following which can be used as an argument value for this + parameter, + + `minimum` - For range minimum queries. + + `greatest_common_divisor` - For queries finding greatest + common divisor of a range. + + `summation` - For range sum queries. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import SparseTable, minimum + >>> from pydatastructs import OneDimensionalArray + >>> arr = OneDimensionalArray(int, [1, 2, 3, 4, 5]) + >>> s_t = SparseTable(arr, minimum) + >>> str(s_t) + "['[1, 1, 1]', '[2, 2, 2]', '[3, 3, None]', '[4, 4, None]', '[5, None, None]']" + + References + ========== + + .. [1] https://cp-algorithms.com/data_structures/sparse-table.html + """ + + __slots__ = ['_table', 'func'] + + def __new__(cls, array, func, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + + # TODO: If possible remove the following check. + if len(array) == 0: + raise ValueError("Input %s array is empty."%(array)) + + obj = object.__new__(cls) + size = len(array) + log_size = int(math.log2(size)) + 1 + obj._table = [OneDimensionalArray(int, log_size) for _ in range(size)] + obj.func = func + + for i in range(size): + obj._table[i][0] = func((array[i],)) + + for j in range(1, log_size + 1): + for i in range(size - (1 << j) + 1): + obj._table[i][j] = func((obj._table[i][j - 1], + obj._table[i + (1 << (j - 1))][j - 1])) + + return obj + + @classmethod + def methods(cls): + return ['query', '__str__'] + + def query(self, start, end): + """ + Method to perform a query on sparse table in [start, end) + range. + + Parameters + ========== + + start: int + The starting index of the range. + end: int + The ending index of the range. + """ + j = int(math.log2(end - start + 1)) + 1 + answer = None + while j >= 0: + if start + (1 << j) - 1 <= end: + answer = self.func((answer, self._table[start][j])) + start += 1 << j + j -= 1 + return answer + + def __str__(self): + return str([str(array) for array in self._table]) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/stack.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/stack.py new file mode 100644 index 000000000..38f72b43f --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/stack.py @@ -0,0 +1,200 @@ +from pydatastructs.linear_data_structures import DynamicOneDimensionalArray, SinglyLinkedList +from pydatastructs.miscellaneous_data_structures._backend.cpp import _stack +from pydatastructs.utils.misc_util import ( + _check_type, NoneType, Backend, + raise_if_backend_is_not_python) +from copy import deepcopy as dc + +__all__ = [ + 'Stack' +] + +class Stack(object): + """Representation of stack data structure + + Parameters + ========== + + implementation : str + Implementation to be used for stack. + By default, 'array' + Currently only supports 'array' + implementation. + items : list/tuple + Optional, by default, None + The inital items in the stack. + For array implementation. + dtype : A valid python type + Optional, by default NoneType if item + is None, otherwise takes the data + type of DynamicOneDimensionalArray + For array implementation. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import Stack + >>> s = Stack() + >>> s.push(1) + >>> s.push(2) + >>> s.push(3) + >>> str(s) + '[1, 2, 3]' + >>> s.pop() + 3 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Stack_(abstract_data_type) + """ + + def __new__(cls, implementation='array', **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if implementation == 'array': + items = kwargs.get('items', None) + dtype = kwargs.get('dtype', int) + if backend == Backend.CPP: + return _stack.ArrayStack(items, dtype) + + return ArrayStack(items, dtype) + if implementation == 'linked_list': + raise_if_backend_is_not_python(cls, backend) + + return LinkedListStack( + kwargs.get('items', None) + ) + raise NotImplementedError( + "%s hasn't been implemented yet."%(implementation)) + + @classmethod + def methods(cls): + return ['__new__'] + + def push(self, *args, **kwargs): + raise NotImplementedError( + "This is an abstract method.") + + def pop(self, *args, **kwargs): + raise NotImplementedError( + "This is an abstract method.") + + @property + def is_empty(self): + raise NotImplementedError( + "This is an abstract method.") + + @property + def peek(self): + raise NotImplementedError( + "This is an abstract method.") + +class ArrayStack(Stack): + + __slots__ = ['items'] + + def __new__(cls, items=None, dtype=NoneType, + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + if items is None: + items = DynamicOneDimensionalArray(dtype, 0) + else: + items = DynamicOneDimensionalArray(dtype, items) + obj = object.__new__(cls) + obj.items = items + return obj + + @classmethod + def methods(cls): + return ['__new__', 'push', 'pop', 'is_emtpy', + 'peek', '__len__', '__str__'] + + def push(self, x): + if self.is_empty: + self.items._dtype = type(x) + self.items.append(x) + + def pop(self): + if self.is_empty: + raise IndexError("Stack is empty") + + top_element = dc(self.items[self.items._last_pos_filled]) + self.items.delete(self.items._last_pos_filled) + return top_element + + @property + def is_empty(self): + return self.items._last_pos_filled == -1 + + @property + def peek(self): + return self.items[self.items._last_pos_filled] + + def __len__(self): + return self.items._num + + def __str__(self): + """ + Used for printing. + """ + return str(self.items._data) + + +class LinkedListStack(Stack): + + __slots__ = ['stack'] + + def __new__(cls, items=None, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.stack = SinglyLinkedList() + if items is None: + pass + elif type(items) in (list, tuple): + for x in items: + obj.push(x) + else: + raise TypeError("Expected type: list/tuple") + return obj + + @classmethod + def methods(cls): + return ['__new__', 'push', 'pop', 'is_emtpy', + 'peek', '__len__', '__str__'] + + def push(self, x): + self.stack.appendleft(x) + + def pop(self): + if self.is_empty: + raise IndexError("Stack is empty") + return self.stack.popleft() + + @property + def is_empty(self): + return self.__len__() == 0 + + @property + def peek(self): + return self.stack.head + + @property + def size(self): + return self.stack.size + + def __len__(self): + return self.stack.size + + def __str__(self): + elements = [] + current_node = self.peek + while current_node is not None: + elements.append(str(current_node)) + current_node = current_node.next + return str(elements[::-1]) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_binomial_trees.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_binomial_trees.py new file mode 100644 index 000000000..1275e9aec --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_binomial_trees.py @@ -0,0 +1,17 @@ +from pydatastructs.miscellaneous_data_structures.binomial_trees import BinomialTree +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils.misc_util import BinomialTreeNode + +# only tests the corner cases +def test_BinomialTree(): + assert raises(TypeError, lambda: BinomialTree(1, 1)) + assert raises(TypeError, lambda: BinomialTree(None, 1.5)) + + bt = BinomialTree() + assert raises(TypeError, lambda: bt.add_sub_tree(None)) + bt1 = BinomialTree(BinomialTreeNode(1, 1), 0) + node = BinomialTreeNode(2, 2) + node.add_children(BinomialTreeNode(3, 3)) + bt2 = BinomialTree(node, 1) + assert raises(ValueError, lambda: bt1.add_sub_tree(bt2)) + assert bt1.is_empty is False diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_disjoint_set.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_disjoint_set.py new file mode 100644 index 000000000..fcabd3112 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_disjoint_set.py @@ -0,0 +1,70 @@ +from pydatastructs import DisjointSetForest +from pydatastructs.utils.raises_util import raises + +def test_DisjointSetForest(): + + dst = DisjointSetForest() + for i in range(8): + dst.make_set(i+1) + + dst.union(1, 2) + dst.union(1, 5) + assert dst.find_size(2) == 3 + dst.union(1, 6) + dst.union(1, 8) + dst.union(3, 4) + assert dst.find_size(3) == 2 + + assert (dst.find_root(1) == dst.find_root(2) == + dst.find_root(5) == dst.find_root(6) == dst.find_root(8)) + assert dst.disjoint_sets() == [[1, 2, 5, 6, 8], [3, 4], [7]] + assert dst.find_root(3) == dst.find_root(4) + assert dst.find_root(7).key == 7 + + assert raises(KeyError, lambda: dst.find_root(9)) + assert raises(KeyError, lambda: dst.find_size(9)) + dst.union(3, 1) + assert dst.find_root(3).key == 1 + assert dst.find_root(5).key == 1 + dst.make_root(6) + assert dst.disjoint_sets() == [[1, 2, 3, 4, 5, 6, 8], [7]] + assert dst.find_root(3).key == 6 + assert dst.find_root(5).key == 6 + dst.make_root(5) + assert dst.find_root(1).key == 5 + assert dst.find_root(5).key == 5 + assert raises(KeyError, lambda: dst.make_root(9)) + + dst = DisjointSetForest() + for i in range(6): + dst.make_set(i) + assert dst.tree[2].size == 1 + dst.union(2, 3) + assert dst.tree[2].size == 2 + assert dst.tree[3].size == 1 + dst.union(1, 4) + dst.union(2, 4) + assert dst.disjoint_sets() == [[0], [1, 2, 3, 4], [5]] + # current tree + ############### + # 2 + # / \ + # 1 3 + # / + # 4 + ############### + assert dst.tree[2].size == 4 + assert dst.tree[1].size == 2 + assert dst.tree[3].size == dst.tree[4].size == 1 + dst.make_root(4) + # New tree + ############### + # 4 + # | + # 2 + # / \ + # 1 3 + ############### + assert dst.tree[4].size == 4 + assert dst.tree[2].size == 3 + assert dst.tree[1].size == dst.tree[3].size == 1 diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_multiset.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_multiset.py new file mode 100644 index 000000000..fb412704a --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_multiset.py @@ -0,0 +1,39 @@ +from pydatastructs.miscellaneous_data_structures import Multiset + +def test_Multiset(): + + ms = Multiset() + ms.add(5) + ms.add(5) + ms.add(3) + ms.add(7) + assert len(ms) == 4 + assert 5 in ms + assert ms.count(5) == 2 + assert ms.count(3) == 1 + assert ms.count(-3) == 0 + assert not 4 in ms + ms.remove(5) + assert 5 in ms + assert ms.lower_bound(5) == 5 + assert ms.upper_bound(5) == 7 + + ms = Multiset(5, 3, 7, 2) + + assert len(ms) == 4 + assert 5 in ms + assert ms.count(7) == 1 + assert not 4 in ms + assert ms.lower_bound(3) == 3 + assert ms.upper_bound(3) == 5 + assert ms.upper_bound(7) is None + + ms.remove(5) + + assert len(ms) == 3 + assert not 5 in ms + + ms.add(4) + + assert 4 in ms + assert len(ms) == 4 diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_queue.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_queue.py new file mode 100644 index 000000000..81e1e996e --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_queue.py @@ -0,0 +1,116 @@ +from pydatastructs.miscellaneous_data_structures import Queue +from pydatastructs.miscellaneous_data_structures.queue import ( + ArrayQueue, LinkedListQueue, PriorityQueue, + LinkedListPriorityQueue) +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils.misc_util import _check_type + +def test_Queue(): + q = Queue(implementation='array') + q1 = Queue() + assert _check_type(q, ArrayQueue) is True + assert _check_type(q1, ArrayQueue) is True + q2 = Queue(implementation='linked_list') + assert _check_type(q2, LinkedListQueue) is True + assert raises(NotImplementedError, lambda: Queue(implementation='')) + +def test_ArrayQueue(): + q1 = Queue() + raises(IndexError, lambda: q1.popleft()) + q1 = Queue(implementation='array', items=[0]) + q1.append(1) + q1.append(2) + q1.append(3) + assert str(q1) == '[0, 1, 2, 3]' + assert len(q1) == 4 + assert q1.popleft() == 0 + assert q1.popleft() == 1 + assert len(q1) == 2 + assert q1.popleft() == 2 + assert q1.popleft() == 3 + assert len(q1) == 0 + + q2 = Queue(implementation='array', items=[0], double_ended=True) + q2.append(1) + q2.append(2) + q2.appendleft(3) + assert str(q2) == '[3, 0, 1, 2]' + assert len(q2) == 4 + assert q2.popleft() == 3 + assert q2.pop() == 2 + assert len(q2) == 2 + assert q2.popleft() == 0 + assert q2.pop() == 1 + assert len(q2) == 0 + + q1 = Queue(implementation='array', items=[0]) + assert raises(NotImplementedError, lambda: q1.appendleft(2)) + + +def test_LinkedListQueue(): + q1 = Queue(implementation='linked_list') + q1.append(1) + assert raises(TypeError, lambda: Queue(implementation='linked_list', items={0, 1})) + q1 = Queue(implementation='linked_list', items = [0, 1]) + q1.append(2) + q1.append(3) + assert str(q1) == ("['(0, None)', '(1, None)', " + "'(2, None)', '(3, None)']") + assert len(q1) == 4 + assert q1.popleft().key == 0 + assert q1.popleft().key == 1 + assert len(q1) == 2 + assert q1.popleft().key == 2 + assert q1.popleft().key == 3 + assert len(q1) == 0 + raises(IndexError, lambda: q1.popleft()) + + q1 = Queue(implementation='linked_list',items=['a',None,type,{}]) + assert len(q1) == 4 + + front = q1.front + assert front.key == q1.popleft().key + + rear = q1.rear + for _ in range(len(q1)-1): + q1.popleft() + + assert rear.key == q1.popleft().key + + q1 = Queue(implementation='linked_list', double_ended=True) + q1.appendleft(1) + q2 = Queue(implementation='linked_list', items=[0, 1]) + assert raises(NotImplementedError, lambda: q2.appendleft(1)) + q1 = Queue(implementation='linked_list', items = [0, 1], double_ended=True) + q1.appendleft(2) + q1.append(3) + assert str(q1) == "['(2, None)', '(0, None)', '(1, None)', '(3, None)']" + assert len(q1) == 4 + assert q1.popleft().key == 2 + assert q1.pop().key == 3 + assert len(q1) == 2 + assert q1.pop().key == 1 + assert q1.popleft().key == 0 + assert len(q1) == 0 + assert raises(IndexError, lambda: q1.popleft()) + +def test_PriorityQueue(): + pq1 = PriorityQueue(implementation='linked_list') + assert _check_type(pq1, LinkedListPriorityQueue) is True + assert raises(NotImplementedError, lambda: Queue(implementation='')) + +def test_ImplementationPriorityQueue(): + impls = ['linked_list', 'binomial_heap', 'binary_heap'] + for impl in impls: + pq1 = PriorityQueue(implementation=impl) + pq1.push(1, 4) + pq1.push(2, 3) + pq1.push(3, 2) + assert pq1.peek.data == 3 + assert pq1.pop() == 3 + assert pq1.peek.data == 2 + assert pq1.pop() == 2 + assert pq1.peek.data == 1 + assert pq1.pop() == 1 + assert pq1.is_empty is True + assert raises(IndexError, lambda: pq1.peek) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_dynamic.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_dynamic.py new file mode 100644 index 000000000..f655c546d --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_dynamic.py @@ -0,0 +1,71 @@ +from pydatastructs import ( + RangeQueryDynamic, minimum, + greatest_common_divisor, summation, + OneDimensionalArray) +from pydatastructs.utils.raises_util import raises +import random, math +from copy import deepcopy + +def _test_RangeQueryDynamic_common(func, gen_expected): + + array = OneDimensionalArray(int, []) + raises(ValueError, lambda: RangeQueryDynamic(array, func)) + + array = OneDimensionalArray(int, [1]) + rq = RangeQueryDynamic(array, func) + assert rq.query(0, 0) == 1 + raises(ValueError, lambda: rq.query(0, -1)) + raises(IndexError, lambda: rq.query(0, 1)) + + array_sizes = [3, 6, 12, 24, 48, 96] + random.seed(0) + for array_size in array_sizes: + inputs = [] + for i in range(array_size): + for j in range(i + 1, array_size): + inputs.append((i, j)) + + data_structures = ["array", "segment_tree"] + for ds in data_structures: + data = random.sample(range(-2*array_size, 2*array_size), array_size) + array = OneDimensionalArray(int, data) + rmq = RangeQueryDynamic(array, func, data_structure=ds) + for input in inputs: + assert rmq.query(input[0], input[1]) == gen_expected(data, input[0], input[1]) + + data_copy = deepcopy(data) + for _ in range(array_size//2): + index = random.randint(0, array_size - 1) + value = random.randint(0, 4 * array_size) + data_copy[index] = value + rmq.update(index, value) + + for input in inputs: + assert rmq.query(input[0], input[1]) == gen_expected(data_copy, input[0], input[1]) + +def test_RangeQueryDynamic_minimum(): + + def _gen_minimum_expected(data, i, j): + return min(data[i:j + 1]) + + _test_RangeQueryDynamic_common(minimum, _gen_minimum_expected) + +def test_RangeQueryDynamic_greatest_common_divisor(): + + def _gen_gcd_expected(data, i, j): + if j == i: + return data[i] + else: + expected_gcd = math.gcd(data[i], data[i + 1]) + for idx in range(i + 2, j + 1): + expected_gcd = math.gcd(expected_gcd, data[idx]) + return expected_gcd + + _test_RangeQueryDynamic_common(greatest_common_divisor, _gen_gcd_expected) + +def test_RangeQueryDynamic_summation(): + + def _gen_summation_expected(data, i, j): + return sum(data[i:j + 1]) + + return _test_RangeQueryDynamic_common(summation, _gen_summation_expected) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_static.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_static.py new file mode 100644 index 000000000..e898653c9 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_static.py @@ -0,0 +1,63 @@ +from pydatastructs import ( + RangeQueryStatic, minimum, + greatest_common_divisor, summation, + OneDimensionalArray) +from pydatastructs.utils.raises_util import raises +import random, math + +def _test_RangeQueryStatic_common(func, gen_expected): + + array = OneDimensionalArray(int, []) + raises(ValueError, lambda: RangeQueryStatic(array, func)) + + array = OneDimensionalArray(int, [1]) + rq = RangeQueryStatic(array, func) + assert rq.query(0, 0) == 1 + raises(ValueError, lambda: rq.query(0, -1)) + raises(IndexError, lambda: rq.query(0, 1)) + + array_sizes = [3, 6, 12, 24, 48, 96] + random.seed(0) + for array_size in array_sizes: + data = random.sample(range(-2*array_size, 2*array_size), array_size) + array = OneDimensionalArray(int, data) + + expected = [] + inputs = [] + for i in range(array_size): + for j in range(i + 1, array_size): + inputs.append((i, j)) + expected.append(gen_expected(data, i, j)) + + data_structures = ["array", "sparse_table"] + for ds in data_structures: + rmq = RangeQueryStatic(array, func, data_structure=ds) + for input, correct in zip(inputs, expected): + assert rmq.query(input[0], input[1]) == correct + +def test_RangeQueryStatic_minimum(): + + def _gen_minimum_expected(data, i, j): + return min(data[i:j + 1]) + + _test_RangeQueryStatic_common(minimum, _gen_minimum_expected) + +def test_RangeQueryStatic_greatest_common_divisor(): + + def _gen_gcd_expected(data, i, j): + if j == i: + return data[i] + else: + expected_gcd = math.gcd(data[i], data[i + 1]) + for idx in range(i + 2, j + 1): + expected_gcd = math.gcd(expected_gcd, data[idx]) + return expected_gcd + + _test_RangeQueryStatic_common(greatest_common_divisor, _gen_gcd_expected) + +def test_RangeQueryStatic_summation(): + + def _gen_summation_expected(data, i, j): + return sum(data[i:j + 1]) + + return _test_RangeQueryStatic_common(summation, _gen_summation_expected) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_stack.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_stack.py new file mode 100644 index 000000000..2d9d08b82 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_stack.py @@ -0,0 +1,77 @@ +from pydatastructs.miscellaneous_data_structures import Stack +from pydatastructs.miscellaneous_data_structures.stack import ArrayStack, LinkedListStack +from pydatastructs.miscellaneous_data_structures._backend.cpp import _stack +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils.misc_util import _check_type, Backend + + +def test_Stack(): + s = Stack(implementation='array') + s1 = Stack() + assert _check_type(s, ArrayStack) is True + assert _check_type(s1, ArrayStack) is True + s2 = Stack(implementation='linked_list') + assert _check_type(s2, LinkedListStack) is True + assert raises(NotImplementedError, lambda: Stack(implementation='')) + + s3 = Stack(backend=Backend.CPP) + assert _check_type(s3, _stack.ArrayStack) is True + s4 = Stack(implementation="array", backend=Backend.CPP) + assert _check_type(s4, _stack.ArrayStack) is True + +def test_ArrayStack(): + s = Stack(implementation='array') + s.push(1) + s.push(2) + s.push(3) + assert s.peek == 3 + assert str(s) == '[1, 2, 3]' + assert s.pop() == 3 + assert s.pop() == 2 + assert s.pop() == 1 + assert s.is_empty is True + assert raises(IndexError, lambda : s.pop()) + _s = Stack(items=[1, 2, 3]) + assert str(_s) == '[1, 2, 3]' + assert len(_s) == 3 + + # Cpp test + s1 = Stack(implementation="array", backend=Backend.CPP) + s1.push(1) + s1.push(2) + s1.push(3) + assert s1.peek == 3 + assert str(s1) == "['1', '2', '3']" + assert s1.pop() == 3 + assert s1.pop() == 2 + assert s1.pop() == 1 + assert s1.is_empty is True + assert raises(IndexError, lambda : s1.pop()) + _s1 = Stack(items=[1, 2, 3], backend=Backend.CPP) + assert str(_s1) == "['1', '2', '3']" + assert len(_s1) == 3 + +def test_LinkedListStack(): + s = Stack(implementation='linked_list') + s.push(1) + s.push(2) + s.push(3) + assert s.peek.key == 3 + assert str(s) == ("['(1, None)', '(2, None)', '(3, None)']") + assert s.pop().key == 3 + assert s.pop().key == 2 + assert s.pop().key == 1 + assert s.is_empty is True + assert raises(IndexError, lambda : s.pop()) + assert str(s) == '[]' + _s = Stack(implementation='linked_list',items=[1, 2, 3]) + assert str(_s) == "['(1, None)', '(2, None)', '(3, None)']" + assert len(_s) == 3 + + s = Stack(implementation='linked_list',items=['a',None,type,{}]) + assert len(s) == 4 + assert s.size == 4 + + peek = s.peek + assert peek.key == s.pop().key + assert raises(TypeError, lambda: Stack(implementation='linked_list', items={0, 1})) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/__init__.py new file mode 100644 index 000000000..33930b426 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/__init__.py @@ -0,0 +1,18 @@ +__all__ = [] + +from . import ( + trie, + algorithms +) + +from .trie import ( + Trie +) + +__all__.extend(trie.__all__) + +from .algorithms import ( + find +) + +__all__.extend(algorithms.__all__) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/algorithms.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/algorithms.py new file mode 100644 index 000000000..1e26b9411 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/algorithms.py @@ -0,0 +1,247 @@ +from pydatastructs.linear_data_structures.arrays import ( + DynamicOneDimensionalArray, OneDimensionalArray) +from pydatastructs.utils.misc_util import ( + Backend, raise_if_backend_is_not_python) + +__all__ = [ + 'find' +] + +PRIME_NUMBER, MOD = 257, 1000000007 + +def find(text, query, algorithm, **kwargs): + """ + Finds occurrence of a query string within the text string. + + Parameters + ========== + + text: str + The string on which query is to be performed. + query: str + The string which is to be searched in the text. + algorithm: str + The algorithm which should be used for + searching. + Currently the following algorithms are + supported, + + 'kmp' -> Knuth-Morris-Pratt as given in [1]. + + 'rabin_karp' -> Rabin–Karp algorithm as given in [2]. + + 'boyer_moore' -> Boyer-Moore algorithm as given in [3]. + + 'z_function' -> Z-function algorithm as given in [4]. + + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + DynamicOneDimensionalArray + An array of starting positions of the portions + in the text which match with the given query. + + Examples + ======== + + >>> from pydatastructs.strings.algorithms import find + >>> text = "abcdefabcabe" + >>> pos = find(text, "ab", algorithm="kmp") + >>> str(pos) + "['0', '6', '9']" + >>> pos = find(text, "abc", algorithm="kmp") + >>> str(pos) + "['0', '6']" + >>> pos = find(text, "abe", algorithm="kmp") + >>> str(pos) + "['9']" + >>> pos = find(text, "abed", algorithm="kmp") + >>> str(pos) + '[]' + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Knuth%E2%80%93Morris%E2%80%93Pratt_algorithm + .. [2] https://en.wikipedia.org/wiki/Rabin%E2%80%93Karp_algorithm + .. [3] https://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_string-search_algorithm + .. [4] https://usaco.guide/CPH.pdf#page=257 + """ + raise_if_backend_is_not_python( + find, kwargs.get('backend', Backend.PYTHON)) + import pydatastructs.strings.algorithms as algorithms + func = "_" + algorithm + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently %s algoithm for searching strings " + "inside a text isn't implemented yet." + %(algorithm)) + return getattr(algorithms, func)(text, query) + + +def _knuth_morris_pratt(text, query): + if len(text) == 0 or len(query) == 0: + return DynamicOneDimensionalArray(int, 0) + kmp_table = _build_kmp_table(query) + return _do_match(text, query, kmp_table) + +_kmp = _knuth_morris_pratt + +def _build_kmp_table(query): + pos, cnd = 1, 0 + kmp_table = OneDimensionalArray(int, len(query) + 1) + + kmp_table[0] = -1 + + while pos < len(query): + if query[pos] == query[cnd]: + kmp_table[pos] = kmp_table[cnd] + else: + kmp_table[pos] = cnd + while cnd >= 0 and query[pos] != query[cnd]: + cnd = kmp_table[cnd] + pos, cnd = pos + 1, cnd + 1 + kmp_table[pos] = cnd + + return kmp_table + + + +def _do_match(string, query, kmp_table): + j, k = 0, 0 + positions = DynamicOneDimensionalArray(int, 0) + + while j < len(string): + if query[k] == string[j]: + j = j + 1 + k = k + 1 + if k == len(query): + positions.append(j - k) + k = kmp_table[k] + else: + k = kmp_table[k] + if k < 0: + j = j + 1 + k = k + 1 + + return positions + +def _p_pow(length, p=PRIME_NUMBER, m=MOD): + p_pow = OneDimensionalArray(int, length) + p_pow[0] = 1 + for i in range(1, length): + p_pow[i] = (p_pow[i-1] * p) % m + return p_pow + +def _hash_str(string, p=PRIME_NUMBER, m=MOD): + hash_value = 0 + p_pow = _p_pow(len(string), p, m) + for i in range(len(string)): + hash_value = (hash_value + ord(string[i]) * p_pow[i]) % m + return hash_value + +def _rabin_karp(text, query): + t = len(text) + q = len(query) + positions = DynamicOneDimensionalArray(int, 0) + if q == 0 or t == 0: + return positions + + query_hash = _hash_str(query) + text_hash = OneDimensionalArray(int, t + 1) + text_hash.fill(0) + p_pow = _p_pow(t) + + for i in range(t): + text_hash[i+1] = (text_hash[i] + ord(text[i]) * p_pow[i]) % MOD + for i in range(t - q + 1): + curr_hash = (text_hash[i + q] + MOD - text_hash[i]) % MOD + if curr_hash == (query_hash * p_pow[i]) % MOD: + positions.append(i) + + return positions + +def _boyer_moore(text, query): + positions = DynamicOneDimensionalArray(int, 0) + text_length, query_length = len(text), len(query) + + if text_length == 0 or query_length == 0: + return positions + + # Preprocessing Step + bad_match_table = dict() + for i in range(query_length): + bad_match_table[query[i]] = i + + shift = 0 + # Matching procedure + while shift <= text_length-query_length: + j = query_length - 1 + while j >= 0 and query[j] == text[shift + j]: + j -= 1 + if j < 0: + positions.append(shift) + if shift + query_length < text_length: + if text[shift + query_length] in bad_match_table: + shift += query_length - bad_match_table[text[shift + query_length]] + else: + shift += query_length + 1 + else: + shift += 1 + else: + letter_pos = text[shift + j] + if letter_pos in bad_match_table: + shift += max(1, j - bad_match_table[letter_pos]) + else: + shift += max(1, j + 1) + return positions + +def _z_vector(text, query): + string = text + if query != "": + string = query + str("$") + text + + z_fct = OneDimensionalArray(int, len(string)) + z_fct.fill(0) + + curr_pos = 1 + seg_left = 0 + seg_right = 0 + + for curr_pos in range(1,len(string)): + if curr_pos <= seg_right: + z_fct[curr_pos] = min(seg_right - curr_pos + 1, z_fct[curr_pos - seg_left]) + + while curr_pos + z_fct[curr_pos] < len(string) and \ + string[z_fct[curr_pos]] == string[curr_pos + z_fct[curr_pos]]: + z_fct[curr_pos] += 1 + + if curr_pos + z_fct[curr_pos] - 1 > seg_right: + seg_left = curr_pos + seg_right = curr_pos + z_fct[curr_pos] - 1 + + final_z_fct = DynamicOneDimensionalArray(int, 0) + start_index = 0 + if query != "": + start_index = len(query) + 1 + for pos in range(start_index, len(string)): + final_z_fct.append(z_fct[pos]) + + return final_z_fct + +def _z_function(text, query): + positions = DynamicOneDimensionalArray(int, 0) + if len(text) == 0 or len(query) == 0: + return positions + + fct = _z_vector(text, query) + for pos in range(len(fct)): + if fct[pos] == len(query): + positions.append(pos) + + return positions diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/tests/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/tests/test_algorithms.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/tests/test_algorithms.py new file mode 100644 index 000000000..37622cf80 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/tests/test_algorithms.py @@ -0,0 +1,76 @@ +from pydatastructs.strings import find + +import random, string + +def test_kmp(): + _test_common_string_matching('kmp') + +def test_rka(): + _test_common_string_matching('rabin_karp') + +def test_bm(): + _test_common_string_matching('boyer_moore') + +def test_zf(): + _test_common_string_matching('z_function') + +def _test_common_string_matching(algorithm): + true_text_pattern_dictionary = { + "Knuth-Morris-Pratt": "-Morris-", + "abcabcabcabdabcabdabcabca": "abcabdabcabca", + "aefcdfaecdaefaefcdaefeaefcdcdeae": "aefcdaefeaefcd", + "aaaaaaaa": "aaa", + "fullstringmatch": "fullstringmatch", + "z-function": "z-fun" + } + for test_case_key in true_text_pattern_dictionary: + text = test_case_key + query = true_text_pattern_dictionary[test_case_key] + positions = find(text, query, algorithm) + for i in range(positions._last_pos_filled): + p = positions[i] + assert text[p:p + len(query)] == query + + false_text_pattern_dictionary = { + "Knuth-Morris-Pratt": "-Pratt-", + "abcabcabcabdabcabdabcabca": "qwertyuiopzxcvbnm", + "aefcdfaecdaefaefcdaefeaefcdcdeae": "cdaefaefe", + "fullstringmatch": "fullstrinmatch", + "z-function": "function-", + "abc": "", + "": "abc" + } + + for test_case_key in false_text_pattern_dictionary: + text = test_case_key + query = false_text_pattern_dictionary[test_case_key] + positions = find(text, query, algorithm) + assert positions.size == 0 + + random.seed(1000) + + def gen_random_string(length): + ascii = string.ascii_uppercase + digits = string.digits + return ''.join(random.choices(ascii + digits, k=length)) + + for _ in range(100): + query = gen_random_string(random.randint(3, 10)) + num_times = random.randint(1, 10) + freq = 0 + text = "" + while freq < num_times: + rand_str = gen_random_string(random.randint(5, 10)) + if rand_str != query: + freq += 1 + text += query + rand_str + query + positions = find(text, query, algorithm) + assert positions._num == num_times * 2 + for i in range(positions._last_pos_filled): + p = positions[i] + assert text[p:p + len(query)] == query + + text = gen_random_string(len(query)) + if text != query: + positions = find(text, query, algorithm) + assert positions.size == 0 diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/tests/test_trie.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/tests/test_trie.py new file mode 100644 index 000000000..059104708 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/tests/test_trie.py @@ -0,0 +1,49 @@ +from pydatastructs import Trie + +def test_Trie(): + + strings = ["A", "to", "tea", "ted", "ten", "i", + "in", "inn", "Amfn", "snbr"] + trie = Trie() + for string in strings: + trie.insert(string) + + prefix_strings = ["te", "t", "Am", "snb"] + + for string in strings: + assert trie.is_inserted(string) + + for string in strings[::-1]: + assert trie.is_inserted(string) + + for string in prefix_strings: + assert trie.is_present(string) + assert not trie.is_inserted(string) + + assert sorted(trie.strings_with_prefix("t")) == ['tea', 'ted', 'ten', 'to'] + assert sorted(trie.strings_with_prefix("te")) == ["tea", "ted", "ten"] + assert trie.strings_with_prefix("i") == ["i", "in", "inn"] + assert trie.strings_with_prefix("a") == [] + + remove_order = ["to", "tea", "ted", "ten", "inn", "in", "A"] + + assert trie.delete("z") is None + + for string in remove_order: + trie.delete(string) + for present in strings: + if present == string: + assert not trie.is_inserted(present) + else: + assert trie.is_present(present) + assert trie.is_inserted(present) + strings.remove(string) + + prefix_strings_1 = ["dict", "dicts", "dicts_lists_tuples"] + trie_1 = Trie() + + for i in range(len(prefix_strings_1)): + trie_1.insert(prefix_strings_1[i]) + for j in range(i + 1): + assert trie_1.is_inserted(prefix_strings_1[j]) + assert trie_1.is_present(prefix_strings_1[j]) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/trie.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/trie.py new file mode 100644 index 000000000..cdf6666cf --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/trie.py @@ -0,0 +1,201 @@ +from pydatastructs.utils.misc_util import ( + TrieNode, Backend, + raise_if_backend_is_not_python) +from collections import deque +import copy + +__all__ = [ + 'Trie' +] + +Stack = Queue = deque + +class Trie(object): + """ + Represents the trie data structure for storing strings. + + Parameters + ========== + + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import Trie + >>> trie = Trie() + >>> trie.insert("a") + >>> trie.insert("aa") + >>> trie.strings_with_prefix("a") + ['a', 'aa'] + >>> trie.is_present("aa") + True + >>> trie.delete("aa") + True + >>> trie.is_present("aa") + False + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Trie + """ + + __slots__ = ['root'] + + @classmethod + def methods(cls): + return ['__new__', 'insert', 'is_present', 'delete', + 'strings_with_prefix'] + + def __new__(cls, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.root = TrieNode() + return obj + + def insert(self, string: str) -> None: + """ + Inserts the given string into the trie. + + Parameters + ========== + + string: str + + Returns + ======= + + None + """ + walk = self.root + for char in string: + if walk.get_child(char) is None: + newNode = TrieNode(char) + walk.add_child(newNode) + walk = newNode + else: + walk = walk.get_child(char) + walk.is_terminal = True + + def is_present(self, string: str) -> bool: + """ + Checks if the given string is present as a prefix in the trie. + + Parameters + ========== + + string: str + + Returns + ======= + + True if the given string is present as a prefix; + False in all other cases. + """ + walk = self.root + for char in string: + if walk.get_child(char) is None: + return False + walk = walk.get_child(char) + return True + + def is_inserted(self, string: str) -> bool: + """ + Checks if the given string was inserted in the trie. + + Parameters + ========== + + string: str + + Returns + ======= + + True if the given string was inserted in trie; + False in all other cases. + """ + walk = self.root + for char in string: + if walk.get_child(char) is None: + return False + walk = walk.get_child(char) + return walk.is_terminal + + def delete(self, string: str) -> bool: + """ + Deletes the given string from the trie. + + Parameters + ========== + + string: str + + Returns + ======= + + True if successfully deleted; + None if the string is not present in the trie. + """ + path = [] + walk = self.root + size = len(string) + for i in range(size): + char = string[i] + path.append(walk) + if walk.get_child(char) is None: + return None + walk = walk.get_child(char) + path.append(walk) + i = len(path) - 1 + path[i].is_terminal = False + while not path[i]._children and i >= 1: + path[i-1].remove_child(path[i].char) + i -= 1 + if path[i].is_terminal: + return True + return True + + def strings_with_prefix(self, string: str) -> list: + """ + Generates a list of all strings with the given prefix. + + Parameters + ========== + + string: str + + Returns + ======= + + strings: list + The list of strings with the given prefix. + """ + + def _collect(prefix: str, node: TrieNode, strings: list) -> str: + TrieNode_stack = Stack() + TrieNode_stack.append((node, prefix)) + while TrieNode_stack: + walk, curr_prefix = TrieNode_stack.pop() + if walk.is_terminal: + strings.append(curr_prefix + walk.char) + for child in walk._children: + TrieNode_stack.append((walk.get_child(child), curr_prefix + walk.char)) + + strings = [] + prefix = "" + walk = self.root + for char in string: + walk = walk.get_child(char) + if walk is None: + return strings + prefix += char + if walk.is_terminal: + strings.append(walk.char) + for child in walk._children: + _collect(prefix, walk.get_child(child), strings) + return strings diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/__init__.py new file mode 100644 index 000000000..892730122 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/__init__.py @@ -0,0 +1,40 @@ +__all__ = [] + +from . import ( + binary_trees, + m_ary_trees, + space_partitioning_trees, + heaps, +) + +from .binary_trees import ( + BinaryTree, + BinarySearchTree, + BinaryTreeTraversal, + AVLTree, + BinaryIndexedTree, + CartesianTree, + Treap, + SplayTree, + RedBlackTree +) +__all__.extend(binary_trees.__all__) + +from .m_ary_trees import ( + MAryTreeNode, MAryTree +) + +__all__.extend(m_ary_trees.__all__) + +from .space_partitioning_trees import ( + OneDimensionalSegmentTree +) +__all__.extend(space_partitioning_trees.__all__) + +from .heaps import ( + BinaryHeap, + TernaryHeap, + DHeap, + BinomialHeap +) +__all__.extend(heaps.__all__) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/_backend/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/_backend/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/binary_trees.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/binary_trees.py new file mode 100644 index 000000000..48446d1d4 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/binary_trees.py @@ -0,0 +1,1888 @@ +import random +from collections import deque as Queue +from pydatastructs.utils import TreeNode, CartesianTreeNode, RedBlackTreeNode +from pydatastructs.miscellaneous_data_structures import Stack +from pydatastructs.linear_data_structures import OneDimensionalArray +from pydatastructs.linear_data_structures.arrays import ArrayForTrees +from pydatastructs.utils.misc_util import Backend +from pydatastructs.trees._backend.cpp import _trees + +__all__ = [ + 'AVLTree', + 'BinaryTree', + 'BinarySearchTree', + 'BinaryTreeTraversal', + 'BinaryIndexedTree', + 'CartesianTree', + 'Treap', + 'SplayTree', + 'RedBlackTree' +] + +class BinaryTree(object): + """ + Abstract binary tree. + + Parameters + ========== + + key + Required if tree is to be instantiated with + root otherwise not needed. + root_data + Optional, the root node of the binary tree. + If not of type TreeNode, it will consider + root as data and a new root node will + be created. + comp: lambda/function + Optional, A lambda function which will be used + for comparison of keys. Should return a + bool value. By default it implements less + than operator. + is_order_statistic: bool + Set it to True, if you want to use the + order statistic features of the tree. + backend: pydatastructs.Backend + The backend to be used. Available backends: Python and C++ + Optional, by default, the Python backend is used. For faster execution, use the C++ backend. + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Binary_tree + """ + + __slots__ = ['root_idx', 'comparator', 'tree', 'size', + 'is_order_statistic'] + + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + if comp is None: + comp = lambda key1, key2: key1 < key2 + return _trees.BinaryTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp + obj = object.__new__(cls) + if key is None and root_data is not None: + raise ValueError('Key required.') + key = None if root_data is None else key + root = TreeNode(key, root_data) + root.is_root = True + obj.root_idx = 0 + obj.tree, obj.size = ArrayForTrees(TreeNode, [root]), 1 + obj.comparator = lambda key1, key2: key1 < key2 \ + if comp is None else comp + obj.is_order_statistic = is_order_statistic + return obj + + @classmethod + def methods(cls): + return ['__new__', '__str__'] + + def insert(self, key, data=None): + """ + Inserts data by the passed key using iterative + algorithm. + + Parameters + ========== + + key + The key for comparison. + + data + The data to be inserted. + + Returns + ======= + + None + """ + raise NotImplementedError("This is an abstract method.") + + def delete(self, key, **kwargs): + """ + Deletes the data with the passed key + using iterative algorithm. + + Parameters + ========== + + key + The key of the node which is + to be deleted. + balancing_info: bool + Optional, by default, False + The information needed for updating + the tree is returned if this parameter + is set to True. It is not meant for + user facing APIs. + + Returns + ======= + + True + If the node is deleted successfully. + None + If the node to be deleted doesn't exists. + + Note + ==== + + The node is deleted means that the connection to that + node are removed but the it is still in three. This + is being done to keep the complexity of deletion, O(logn). + """ + raise NotImplementedError("This is an abstract method.") + + def search(self, key, **kwargs): + """ + Searches for the data in the binary search tree + using iterative algorithm. + + Parameters + ========== + + key + The key for searching. + parent: bool + If true then returns index of the + parent of the node with the passed + key. + By default, False + + Returns + ======= + + int + If the node with the passed key is + in the tree. + tuple + The index of the searched node and + the index of the parent of that node. + None + In all other cases. + """ + raise NotImplementedError("This is an abstract method.") + + + def __str__(self): + to_be_printed = ['' for i in range(self.tree._last_pos_filled + 1)] + for i in range(self.tree._last_pos_filled + 1): + if self.tree[i] is not None: + node = self.tree[i] + to_be_printed[i] = (node.left, node.key, node.data, node.right) + return str(to_be_printed) + +class BinarySearchTree(BinaryTree): + """ + Represents binary search trees. + + Examples + ======== + + >>> from pydatastructs.trees import BinarySearchTree as BST + >>> b = BST() + >>> b.insert(1, 1) + >>> b.insert(2, 2) + >>> child = b.tree[b.root_idx].right + >>> b.tree[child].data + 2 + >>> b.search(1) + 0 + >>> b.search(-1) is None + True + >>> b.delete(1) is True + True + >>> b.search(1) is None + True + >>> b.delete(2) is True + True + >>> b.search(2) is None + True + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Binary_search_tree + + See Also + ======== + + pydatastructs.trees.binary_tree.BinaryTree + """ + + @classmethod + def methods(cls): + return ['insert', 'search', 'delete', 'select', + 'rank', 'lowest_common_ancestor'] + + left_size = lambda self, node: self.tree[node.left].size \ + if node.left is not None else 0 + right_size = lambda self, node: self.tree[node.right].size \ + if node.right is not None else 0 + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + if comp is None: + comp = lambda key1, key2: key1 < key2 + return _trees.BinarySearchTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp + return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) + + def _update_size(self, start_idx): + if self.is_order_statistic: + walk = start_idx + while walk is not None: + self.tree[walk].size = ( + self.left_size(self.tree[walk]) + + self.right_size(self.tree[walk]) + 1) + walk = self.tree[walk].parent + + def insert(self, key, data=None): + res = self.search(key) + if res is not None: + self.tree[res].data = data + return None + walk = self.root_idx + if self.tree[walk].key is None: + self.tree[walk].key = key + self.tree[walk].data = data + return None + new_node, prev_node, flag = TreeNode(key, data), self.root_idx, True + while flag: + if not self.comparator(key, self.tree[walk].key): + if self.tree[walk].right is None: + new_node.parent = prev_node + self.tree.append(new_node) + self.tree[walk].right = self.size + self.size += 1 + flag = False + prev_node = walk = self.tree[walk].right + else: + if self.tree[walk].left is None: + new_node.parent = prev_node + self.tree.append(new_node) + self.tree[walk].left = self.size + self.size += 1 + flag = False + prev_node = walk = self.tree[walk].left + self._update_size(walk) + + def search(self, key, **kwargs): + ret_parent = kwargs.get('parent', False) + parent = None + walk = self.root_idx + if self.tree[walk].key is None: + return None + while walk is not None: + if self.tree[walk].key == key: + break + parent = walk + if self.comparator(key, self.tree[walk].key): + walk = self.tree[walk].left + else: + walk = self.tree[walk].right + return (walk, parent) if ret_parent else walk + + def _bound_helper(self, node_idx, bound_key, is_upper=False): + if node_idx is None: + return None + if self.tree[node_idx].key is None: + return None + + if self.tree[node_idx].key == bound_key: + if not is_upper: + return self.tree[node_idx].key + else: + return self._bound_helper(self.tree[node_idx].right, + bound_key, is_upper) + + if self.comparator(self.tree[node_idx].key, bound_key): + return self._bound_helper(self.tree[node_idx].right, + bound_key, is_upper) + else: + res_bound = self._bound_helper(self.tree[node_idx].left, + bound_key, is_upper) + return res_bound if res_bound is not None else self.tree[node_idx].key + + + def lower_bound(self, key, **kwargs): + """ + Finds the lower bound of the given key in the tree + + Parameters + ========== + + key + The key for comparison + + Examples + ======== + + >>> from pydatastructs.trees import BinarySearchTree as BST + >>> b = BST() + >>> b.insert(10, 10) + >>> b.insert(18, 18) + >>> b.insert(7, 7) + >>> b.lower_bound(9) + 10 + >>> b.lower_bound(7) + 7 + >>> b.lower_bound(20) is None + True + + Returns + ======= + + value + The lower bound of the given key. + Returns None if the value doesn't exist + """ + return self._bound_helper(self.root_idx, key) + + + def upper_bound(self, key, **kwargs): + """ + Finds the upper bound of the given key in the tree + + Parameters + ========== + + key + The key for comparison + + Examples + ======== + + >>> from pydatastructs.trees import BinarySearchTree as BST + >>> b = BST() + >>> b.insert(10, 10) + >>> b.insert(18, 18) + >>> b.insert(7, 7) + >>> b.upper_bound(9) + 10 + >>> b.upper_bound(7) + 10 + >>> b.upper_bound(20) is None + True + + Returns + ======= + + value + The upper bound of the given key. + Returns None if the value doesn't exist + """ + return self._bound_helper(self.root_idx, key, True) + + + def delete(self, key, **kwargs): + (walk, parent) = self.search(key, parent=True) + a = None + if walk is None: + return None + if self.tree[walk].left is None and \ + self.tree[walk].right is None: + if parent is None: + self.tree[self.root_idx].data = None + self.tree[self.root_idx].key = None + else: + if self.tree[parent].left == walk: + self.tree[parent].left = None + else: + self.tree[parent].right = None + a = parent + par_key, root_key = (self.tree[parent].key, + self.tree[self.root_idx].key) + new_indices = self.tree.delete(walk) + if new_indices is not None: + a = new_indices[par_key] + self.root_idx = new_indices[root_key] + self._update_size(a) + + elif self.tree[walk].left is not None and \ + self.tree[walk].right is not None: + twalk = self.tree[walk].right + par = walk + flag = False + while self.tree[twalk].left is not None: + flag = True + par = twalk + twalk = self.tree[twalk].left + self.tree[walk].data = self.tree[twalk].data + self.tree[walk].key = self.tree[twalk].key + if flag: + self.tree[par].left = self.tree[twalk].right + else: + self.tree[par].right = self.tree[twalk].right + if self.tree[twalk].right is not None: + self.tree[self.tree[twalk].right].parent = par + if twalk is not None: + a = par + par_key, root_key = (self.tree[par].key, + self.tree[self.root_idx].key) + new_indices = self.tree.delete(twalk) + if new_indices is not None: + a = new_indices[par_key] + self.root_idx = new_indices[root_key] + self._update_size(a) + + else: + if self.tree[walk].left is not None: + child = self.tree[walk].left + else: + child = self.tree[walk].right + if parent is None: + self.tree[self.root_idx].left = self.tree[child].left + self.tree[self.root_idx].right = self.tree[child].right + self.tree[self.root_idx].data = self.tree[child].data + self.tree[self.root_idx].key = self.tree[child].key + self.tree[self.root_idx].parent = None + root_key = self.tree[self.root_idx].key + new_indices = self.tree.delete(child) + if new_indices is not None: + self.root_idx = new_indices[root_key] + else: + if self.tree[parent].left == walk: + self.tree[parent].left = child + else: + self.tree[parent].right = child + self.tree[child].parent = parent + a = parent + par_key, root_key = (self.tree[parent].key, + self.tree[self.root_idx].key) + new_indices = self.tree.delete(walk) + if new_indices is not None: + parent = new_indices[par_key] + self.tree[child].parent = new_indices[par_key] + a = new_indices[par_key] + self.root_idx = new_indices[root_key] + self._update_size(a) + + if kwargs.get("balancing_info", False) is not False: + return a + return True + + def select(self, i): + """ + Finds the i-th smallest node in the tree. + + Parameters + ========== + + i: int + A positive integer + + Returns + ======= + + n: TreeNode + The node with the i-th smallest key + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Order_statistic_tree + """ + i -= 1 # The algorithm is based on zero indexing + if i < 0: + raise ValueError("Expected a positive integer, got %d"%(i + 1)) + if i >= self.tree._num: + raise ValueError("%d is greater than the size of the " + "tree which is, %d"%(i + 1, self.tree._num)) + walk = self.root_idx + while walk is not None: + l = self.left_size(self.tree[walk]) + if i == l: + return self.tree[walk] + left_walk = self.tree[walk].left + right_walk = self.tree[walk].right + if left_walk is None and right_walk is None: + raise IndexError("The traversal is terminated " + "due to no child nodes ahead.") + if i < l: + if left_walk is not None and \ + self.comparator(self.tree[left_walk].key, + self.tree[walk].key): + walk = left_walk + else: + walk = right_walk + else: + if right_walk is not None and \ + not self.comparator(self.tree[right_walk].key, + self.tree[walk].key): + walk = right_walk + else: + walk = left_walk + i -= (l + 1) + + def rank(self, x): + """ + Finds the rank of the given node, i.e. + its index in the sorted list of nodes + of the tree. + + Parameters + ========== + + x: key + The key of the node whose rank is to be found out. + """ + walk = self.search(x) + if walk is None: + return None + r = self.left_size(self.tree[walk]) + 1 + while self.tree[walk].key != self.tree[self.root_idx].key: + p = self.tree[walk].parent + if walk == self.tree[p].right: + r += self.left_size(self.tree[p]) + 1 + walk = p + return r + + def _simple_path(self, key, root): + """ + Utility funtion to find the simple path between root and node. + + Parameters + ========== + + key: Node.key + Key of the node to be searched + + Returns + ======= + + path: list + """ + + stack = Stack() + stack.push(root) + path = [] + node_idx = -1 + + while not stack.is_empty: + node = stack.pop() + if self.tree[node].key == key: + node_idx = node + break + if self.tree[node].left: + stack.push(self.tree[node].left) + if self.tree[node].right: + stack.push(self.tree[node].right) + + if node_idx == -1: + return path + + while node_idx != 0: + path.append(node_idx) + node_idx = self.tree[node_idx].parent + path.append(0) + path.reverse() + + return path + + def _lca_1(self, j, k): + root = self.root_idx + path1 = self._simple_path(j, root) + path2 = self._simple_path(k, root) + if not path1 or not path2: + raise ValueError("One of two path doesn't exists. See %s, %s" + %(path1, path2)) + + n, m = len(path1), len(path2) + i = j = 0 + while i < n and j < m: + if path1[i] != path2[j]: + return self.tree[path1[i - 1]].key + i += 1 + j += 1 + if path1 < path2: + return self.tree[path1[-1]].key + return self.tree[path2[-1]].key + + def _lca_2(self, j, k): + curr_root = self.root_idx + u, v = self.search(j), self.search(k) + if (u is None) or (v is None): + raise ValueError("One of the nodes with key %s " + "or %s doesn't exits"%(j, k)) + u_left = self.comparator(self.tree[u].key, \ + self.tree[curr_root].key) + v_left = self.comparator(self.tree[v].key, \ + self.tree[curr_root].key) + + while not (u_left ^ v_left): + if u_left and v_left: + curr_root = self.tree[curr_root].left + else: + curr_root = self.tree[curr_root].right + + if curr_root == u or curr_root == v: + if curr_root is None: + return None + return self.tree[curr_root].key + u_left = self.comparator(self.tree[u].key, \ + self.tree[curr_root].key) + v_left = self.comparator(self.tree[v].key, \ + self.tree[curr_root].key) + + if curr_root is None: + return curr_root + return self.tree[curr_root].key + + def lowest_common_ancestor(self, j, k, algorithm=1): + + """ + Computes the lowest common ancestor of two nodes. + + Parameters + ========== + + j: Node.key + Key of first node + + k: Node.key + Key of second node + + algorithm: int + The algorithm to be used for computing the + lowest common ancestor. + Optional, by default uses algorithm 1. + + 1 -> Determines the lowest common ancestor by finding + the first intersection of the paths from v and w + to the root. + + 2 -> Modifed version of the algorithm given in the + following publication, + D. Harel. A linear time algorithm for the + lowest common ancestors problem. In 21s + Annual Symposium On Foundations of + Computer Science, pages 308-319, 1980. + + Returns + ======= + + Node.key + The key of the lowest common ancestor in the tree. + if both the nodes are present in the tree. + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Lowest_common_ancestor + + .. [2] https://pdfs.semanticscholar.org/e75b/386cc554214aa0ebd6bd6dbdd0e490da3739.pdf + + """ + return getattr(self, "_lca_"+str(algorithm))(j, k) + +class SelfBalancingBinaryTree(BinarySearchTree): + """ + Represents Base class for all rotation based balancing trees like AVL tree, Red Black tree, Splay Tree. + """ + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + if comp is None: + comp = lambda key1, key2: key1 < key2 + return _trees.SelfBalancingBinaryTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp + return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) + + def _right_rotate(self, j, k): + y = self.tree[k].right + if y is not None: + self.tree[y].parent = j + self.tree[j].left = y + self.tree[k].parent = self.tree[j].parent + if self.tree[k].parent is not None: + if self.tree[self.tree[k].parent].left == j: + self.tree[self.tree[k].parent].left = k + else: + self.tree[self.tree[k].parent].right = k + self.tree[j].parent = k + self.tree[k].right = j + kp = self.tree[k].parent + if kp is None: + self.root_idx = k + + def _left_right_rotate(self, j, k): + i = self.tree[k].right + v, w = self.tree[i].left, self.tree[i].right + self.tree[k].right, self.tree[j].left = v, w + if v is not None: + self.tree[v].parent = k + if w is not None: + self.tree[w].parent = j + self.tree[i].left, self.tree[i].right, self.tree[i].parent = \ + k, j, self.tree[j].parent + self.tree[k].parent, self.tree[j].parent = i, i + ip = self.tree[i].parent + if ip is not None: + if self.tree[ip].left == j: + self.tree[ip].left = i + else: + self.tree[ip].right = i + else: + self.root_idx = i + + def _right_left_rotate(self, j, k): + i = self.tree[k].left + v, w = self.tree[i].left, self.tree[i].right + self.tree[k].left, self.tree[j].right = w, v + if v is not None: + self.tree[v].parent = j + if w is not None: + self.tree[w].parent = k + self.tree[i].right, self.tree[i].left, self.tree[i].parent = \ + k, j, self.tree[j].parent + self.tree[k].parent, self.tree[j].parent = i, i + ip = self.tree[i].parent + if ip is not None: + if self.tree[ip].left == j: + self.tree[ip].left = i + else: + self.tree[ip].right = i + else: + self.root_idx = i + + def _left_rotate(self, j, k): + y = self.tree[k].left + if y is not None: + self.tree[y].parent = j + self.tree[j].right = y + self.tree[k].parent = self.tree[j].parent + if self.tree[k].parent is not None: + if self.tree[self.tree[k].parent].left == j: + self.tree[self.tree[k].parent].left = k + else: + self.tree[self.tree[k].parent].right = k + self.tree[j].parent = k + self.tree[k].left = j + kp = self.tree[k].parent + if kp is None: + self.root_idx = k + +class CartesianTree(SelfBalancingBinaryTree): + """ + Represents cartesian trees. + + Examples + ======== + + >>> from pydatastructs.trees import CartesianTree as CT + >>> c = CT() + >>> c.insert(1, 4, 1) + >>> c.insert(2, 3, 2) + >>> child = c.tree[c.root_idx].left + >>> c.tree[child].data + 1 + >>> c.search(1) + 0 + >>> c.search(-1) is None + True + >>> c.delete(1) is True + True + >>> c.search(1) is None + True + >>> c.delete(2) is True + True + >>> c.search(2) is None + True + + References + ========== + + .. [1] https://www.cs.princeton.edu/courses/archive/spr09/cos423/Lectures/geo-st.pdf + + See Also + ======== + + pydatastructs.trees.binary_trees.SelfBalancingBinaryTree + """ + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + if comp is None: + comp = lambda key1, key2: key1 < key2 + return _trees.CartesianTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp + return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) + + @classmethod + def methods(cls): + return ['__new__', '__str__', 'insert', 'delete'] + + def _bubble_up(self, node_idx): + node = self.tree[node_idx] + parent_idx = self.tree[node_idx].parent + parent = self.tree[parent_idx] + while (node.parent is not None) and (parent.priority > node.priority): + if parent.right == node_idx: + self._left_rotate(parent_idx, node_idx) + else: + self._right_rotate(parent_idx, node_idx) + node = self.tree[node_idx] + parent_idx = self.tree[node_idx].parent + if parent_idx is not None: + parent = self.tree[parent_idx] + if node.parent is None: + self.tree[node_idx].is_root = True + + def _trickle_down(self, node_idx): + node = self.tree[node_idx] + while node.left is not None or node.right is not None: + if node.left is None: + self._left_rotate(node_idx, self.tree[node_idx].right) + elif node.right is None: + self._right_rotate(node_idx, self.tree[node_idx].left) + elif self.tree[node.left].priority < self.tree[node.right].priority: + self._right_rotate(node_idx, self.tree[node_idx].left) + else: + self._left_rotate(node_idx, self.tree[node_idx].right) + node = self.tree[node_idx] + + def insert(self, key, priority, data=None): + super(CartesianTree, self).insert(key, data) + node_idx = super(CartesianTree, self).search(key) + node = self.tree[node_idx] + new_node = CartesianTreeNode(key, priority, data) + new_node.parent = node.parent + new_node.left = node.left + new_node.right = node.right + self.tree[node_idx] = new_node + if node.is_root: + self.tree[node_idx].is_root = True + else: + self._bubble_up(node_idx) + + def delete(self, key, **kwargs): + balancing_info = kwargs.get('balancing_info', False) + node_idx = super(CartesianTree, self).search(key) + if node_idx is not None: + self._trickle_down(node_idx) + return super(CartesianTree, self).delete(key, balancing_info = balancing_info) + + def __str__(self): + to_be_printed = ['' for i in range(self.tree._last_pos_filled + 1)] + for i in range(self.tree._last_pos_filled + 1): + if self.tree[i] is not None: + node = self.tree[i] + to_be_printed[i] = (node.left, node.key, node.priority, node.data, node.right) + return str(to_be_printed) + +class Treap(CartesianTree): + """ + Represents treaps. + + Examples + ======== + + >>> from pydatastructs.trees import Treap as T + >>> t = T() + >>> t.insert(1, 1) + >>> t.insert(2, 2) + >>> t.search(1) + 0 + >>> t.search(-1) is None + True + >>> t.delete(1) is True + True + >>> t.search(1) is None + True + >>> t.delete(2) is True + True + >>> t.search(2) is None + True + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Treap + + """ + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + if comp is None: + comp = lambda key1, key2: key1 < key2 + return _trees.Treap(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp + return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) + + @classmethod + def methods(cls): + return ['__new__', 'insert'] + + def insert(self, key, data=None): + priority = random.random() + super(Treap, self).insert(key, priority, data) + +class AVLTree(SelfBalancingBinaryTree): + """ + Represents AVL trees. + + References + ========== + + .. [1] https://courses.cs.washington.edu/courses/cse373/06sp/handouts/lecture12.pdf + .. [2] https://en.wikipedia.org/wiki/AVL_tree + .. [3] http://faculty.cs.niu.edu/~freedman/340/340notes/340avl2.htm + + See Also + ======== + + pydatastructs.trees.binary_trees.BinaryTree + """ + + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + if comp is None: + comp = lambda key1, key2: key1 < key2 + return _trees.AVLTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp + return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) + + @classmethod + def methods(cls): + return ['__new__', 'set_tree', 'insert', 'delete'] + + left_height = lambda self, node: self.tree[node.left].height \ + if node.left is not None else -1 + right_height = lambda self, node: self.tree[node.right].height \ + if node.right is not None else -1 + balance_factor = lambda self, node: self.right_height(node) - \ + self.left_height(node) + + def set_tree(self, arr): + self.tree = arr + + def _right_rotate(self, j, k): + super(AVLTree, self)._right_rotate(j, k) + self.tree[j].height = max(self.left_height(self.tree[j]), + self.right_height(self.tree[j])) + 1 + if self.is_order_statistic: + self.tree[j].size = (self.left_size(self.tree[j]) + + self.right_size(self.tree[j]) + 1) + + def _left_right_rotate(self, j, k): + super(AVLTree, self)._left_right_rotate(j, k) + self.tree[j].height = max(self.left_height(self.tree[j]), + self.right_height(self.tree[j])) + 1 + self.tree[k].height = max(self.left_height(self.tree[k]), + self.right_height(self.tree[k])) + 1 + if self.is_order_statistic: + self.tree[j].size = (self.left_size(self.tree[j]) + + self.right_size(self.tree[j]) + 1) + self.tree[k].size = (self.left_size(self.tree[k]) + + self.right_size(self.tree[k]) + 1) + + def _right_left_rotate(self, j, k): + super(AVLTree, self)._right_left_rotate(j, k) + self.tree[j].height = max(self.left_height(self.tree[j]), + self.right_height(self.tree[j])) + 1 + self.tree[k].height = max(self.left_height(self.tree[k]), + self.right_height(self.tree[k])) + 1 + if self.is_order_statistic: + self.tree[j].size = (self.left_size(self.tree[j]) + + self.right_size(self.tree[j]) + 1) + self.tree[k].size = (self.left_size(self.tree[k]) + + self.right_size(self.tree[k]) + 1) + + def _left_rotate(self, j, k): + super(AVLTree, self)._left_rotate(j, k) + self.tree[j].height = max(self.left_height(self.tree[j]), + self.right_height(self.tree[j])) + 1 + self.tree[k].height = max(self.left_height(self.tree[k]), + self.right_height(self.tree[k])) + 1 + if self.is_order_statistic: + self.tree[j].size = (self.left_size(self.tree[j]) + + self.right_size(self.tree[j]) + 1) + + def _balance_insertion(self, curr, last): + walk = last + path = Queue() + path.append(curr), path.append(last) + while walk is not None: + self.tree[walk].height = max(self.left_height(self.tree[walk]), + self.right_height(self.tree[walk])) + 1 + if self.is_order_statistic: + self.tree[walk].size = (self.left_size(self.tree[walk]) + + self.right_size(self.tree[walk]) + 1) + last = path.popleft() + last2last = path.popleft() + if self.balance_factor(self.tree[walk]) not in (1, 0, -1): + l = self.tree[walk].left + if l is not None and l == last and self.tree[l].left == last2last: + self._right_rotate(walk, last) + r = self.tree[walk].right + if r is not None and r == last and self.tree[r].right == last2last: + self._left_rotate(walk, last) + if l is not None and l == last and self.tree[l].right == last2last: + self._left_right_rotate(walk, last) + if r is not None and r == last and self.tree[r].left == last2last: + self._right_left_rotate(walk, last) + path.append(walk), path.append(last) + walk = self.tree[walk].parent + + def insert(self, key, data=None): + super(AVLTree, self).insert(key, data) + self._balance_insertion(self.size - 1, self.tree[self.size-1].parent) + + def _balance_deletion(self, start_idx, key): + walk = start_idx + while walk is not None: + self.tree[walk].height = max(self.left_height(self.tree[walk]), + self.right_height(self.tree[walk])) + 1 + if self.is_order_statistic: + self.tree[walk].size = (self.left_size(self.tree[walk]) + + self.right_size(self.tree[walk]) + 1) + if self.balance_factor(self.tree[walk]) not in (1, 0, -1): + if self.balance_factor(self.tree[walk]) < 0: + b = self.tree[walk].left + if self.balance_factor(self.tree[b]) <= 0: + self._right_rotate(walk, b) + else: + self._left_right_rotate(walk, b) + else: + b = self.tree[walk].right + if self.balance_factor(self.tree[b]) >= 0: + self._left_rotate(walk, b) + else: + self._right_left_rotate(walk, b) + walk = self.tree[walk].parent + + + def delete(self, key, **kwargs): + a = super(AVLTree, self).delete(key, balancing_info=True) + self._balance_deletion(a, key) + return True + +class SplayTree(SelfBalancingBinaryTree): + """ + Represents Splay Trees. + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Splay_tree + + """ + + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + if comp is None: + comp = lambda key1, key2: key1 < key2 + return _trees.SplayTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp + return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) + + @classmethod + def methods(cls): + return ['__new__', 'insert', 'delete', 'join', 'split'] + + def _zig(self, x, p): + if self.tree[p].left == x: + super(SplayTree, self)._right_rotate(p, x) + else: + super(SplayTree, self)._left_rotate(p, x) + + def _zig_zig(self, x, p): + super(SplayTree, self)._right_rotate(self.tree[p].parent, p) + super(SplayTree, self)._right_rotate(p, x) + + def _zig_zag(self, p): + super(SplayTree, self)._left_right_rotate(self.tree[p].parent, p) + + def _zag_zag(self, x, p): + super(SplayTree, self)._left_rotate(self.tree[p].parent, p) + super(SplayTree, self)._left_rotate(p, x) + + def _zag_zig(self, p): + super(SplayTree, self)._right_left_rotate(self.tree[p].parent, p) + + def splay(self, x, p): + while self.tree[x].parent is not None: + if self.tree[p].parent is None: + self._zig(x, p) + elif self.tree[p].left == x and \ + self.tree[self.tree[p].parent].left == p: + self._zig_zig(x, p) + elif self.tree[p].right == x and \ + self.tree[self.tree[p].parent].right == p: + self._zag_zag(x, p) + elif self.tree[p].left == x and \ + self.tree[self.tree[p].parent].right == p: + self._zag_zig(p) + else: + self._zig_zag(p) + p = self.tree[x].parent + + def insert(self, key, x): + super(SelfBalancingBinaryTree, self).insert(key, x) + e, p = super(SelfBalancingBinaryTree, self).search(key, parent=True) + self.tree[self.size-1].parent = p + self.splay(e, p) + + def delete(self, x): + e, p = super(SelfBalancingBinaryTree, self).search(x, parent=True) + if e is None: + return + self.splay(e, p) + status = super(SelfBalancingBinaryTree, self).delete(x) + return status + + def join(self, other): + """ + Joins two trees current and other such that all elements of + the current splay tree are smaller than the elements of the other tree. + + Parameters + ========== + + other: SplayTree + SplayTree which needs to be joined with the self tree. + + """ + maxm = self.root_idx + while self.tree[maxm].right is not None: + maxm = self.tree[maxm].right + minm = other.root_idx + while other.tree[minm].left is not None: + minm = other.tree[minm].left + if not self.comparator(self.tree[maxm].key, + other.tree[minm].key): + raise ValueError("Elements of %s aren't less " + "than that of %s"%(self, other)) + self.splay(maxm, self.tree[maxm].parent) + idx_update = self.tree._size + for node in other.tree: + if node is not None: + node_copy = TreeNode(node.key, node.data) + if node.left is not None: + node_copy.left = node.left + idx_update + if node.right is not None: + node_copy.right = node.right + idx_update + self.tree.append(node_copy) + else: + self.tree.append(node) + self.tree[self.root_idx].right = \ + other.root_idx + idx_update + + def split(self, x): + """ + Splits current splay tree into two trees such that one tree contains nodes + with key less than or equal to x and the other tree containing + nodes with key greater than x. + + Parameters + ========== + + x: key + Key of the element on the basis of which split is performed. + + Returns + ======= + + other: SplayTree + SplayTree containing elements with key greater than x. + + """ + e, p = super(SelfBalancingBinaryTree, self).search(x, parent=True) + if e is None: + return + self.splay(e, p) + other = SplayTree(None, None) + if self.tree[self.root_idx].right is not None: + traverse = BinaryTreeTraversal(self) + elements = traverse.depth_first_search(order='pre_order', node=self.tree[self.root_idx].right) + for i in range(len(elements)): + super(SelfBalancingBinaryTree, other).insert(elements[i].key, elements[i].data) + for j in range(len(elements) - 1, -1, -1): + e, p = super(SelfBalancingBinaryTree, self).search(elements[j].key, parent=True) + self.tree[e] = None + self.tree[self.root_idx].right = None + return other + +class RedBlackTree(SelfBalancingBinaryTree): + """ + Represents Red Black trees. + + Examples + ======== + + >>> from pydatastructs.trees import RedBlackTree as RB + >>> b = RB() + >>> b.insert(1, 1) + >>> b.insert(2, 2) + >>> child = b.tree[b.root_idx].right + >>> b.tree[child].data + 2 + >>> b.search(1) + 0 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Red%E2%80%93black_tree + + See Also + ======== + + pydatastructs.trees.binary_trees.SelfBalancingBinaryTree + """ + + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + if comp is None: + comp = lambda key1, key2: key1 < key2 + return _trees.RedBlackTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp + return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) + + @classmethod + def methods(cls): + return ['__new__', 'insert', 'delete'] + + def _get_parent(self, node_idx): + return self.tree[node_idx].parent + + def _get_grand_parent(self, node_idx): + parent_idx=self._get_parent(node_idx) + return self.tree[parent_idx].parent + + def _get_sibling(self, node_idx): + parent_idx=self._get_parent(node_idx) + if parent_idx is None: + return None + node = self.tree[parent_idx] + if node_idx==node.left: + sibling_idx=node.right + return sibling_idx + else: + sibling_idx=node.left + return sibling_idx + + def _get_uncle(self, node_idx): + parent_idx=self._get_parent(node_idx) + return self._get_sibling(parent_idx) + + def _is_onleft(self, node_idx): + parent = self._get_parent(node_idx) + if self.tree[parent].left == node_idx: + return True + return False + + def _is_onright(self, node_idx): + if self._is_onleft(node_idx) is False: + return True + return False + + def __fix_insert(self, node_idx): + while self._get_parent(node_idx) is not None and \ + self.tree[self._get_parent(node_idx)].color == 1 and self.tree[node_idx].color==1: + parent_idx=self._get_parent(node_idx) + grand_parent_idx=self._get_grand_parent(node_idx) + uncle_idx = self._get_uncle(node_idx) + if uncle_idx is not None and self.tree[uncle_idx].color == 1: + self.tree[uncle_idx].color = 0 + self.tree[parent_idx].color = 0 + self.tree[grand_parent_idx].color = 1 + node_idx= grand_parent_idx + else: + self.tree[self.root_idx].is_root=False + if self._is_onright(parent_idx): + if self._is_onleft(node_idx): + self._right_rotate(parent_idx, node_idx) + node_idx=parent_idx + parent_idx=self._get_parent(node_idx) + node_idx=parent_idx + parent_idx=self._get_parent(node_idx) + self._left_rotate(parent_idx, node_idx) + elif self._is_onleft(parent_idx): + if self._is_onright(node_idx): + self._left_rotate(parent_idx, node_idx) + node_idx=parent_idx + parent_idx=self._get_parent(node_idx) + node_idx=parent_idx + parent_idx=self._get_parent(node_idx) + self._right_rotate(parent_idx, node_idx) + self.tree[node_idx].color = 0 + self.tree[parent_idx].color = 1 + self.tree[self.root_idx].is_root=True + if self.tree[node_idx].is_root: + break + self.tree[self.root_idx].color=0 + + def insert(self, key, data=None): + super(RedBlackTree, self).insert(key, data) + node_idx = super(RedBlackTree, self).search(key) + node = self.tree[node_idx] + new_node = RedBlackTreeNode(key, data) + new_node.parent = node.parent + new_node.left = node.left + new_node.right = node.right + self.tree[node_idx] = new_node + if node.is_root: + self.tree[node_idx].is_root = True + self.tree[node_idx].color=0 + elif self.tree[self.tree[node_idx].parent].color==1: + self.__fix_insert(node_idx) + + def _find_predecessor(self, node_idx): + while self.tree[node_idx].right is not None: + node_idx = self.tree[node_idx].right + return node_idx + + def _transplant_values(self, node_idx1, node_idx2): + parent = self.tree[node_idx1].parent + if self.tree[node_idx1].is_root and self._has_one_child(node_idx1): + self.tree[self.root_idx].key = self.tree[node_idx2].key + self.tree[self.root_idx].data = self.tree[node_idx2].data + self.tree[self.root_idx].left = self.tree[node_idx2].left + self.tree[self.root_idx].right = self.tree[node_idx2].right + self.tree[node_idx1].parent = None + return self.tree[self.root_idx].key + else: + self.tree[node_idx1].key = self.tree[node_idx2].key + self.tree[node_idx1].data = self.tree[node_idx2].data + + def _has_one_child(self, node_idx): + if self._is_leaf(node_idx) is False and self._has_two_child(node_idx) is False: + return True + return False + + def _is_leaf(self, node_idx): + if self.tree[node_idx].left is None and self.tree[node_idx].right is None: + return True + return False + + def _has_two_child(self, node_idx): + if self.tree[node_idx].left is not None and self.tree[node_idx].right is not None: + return True + return False + + def __has_red_child(self, node_idx): + left_idx = self.tree[node_idx].left + right_idx = self.tree[node_idx].right + if (left_idx is not None and self.tree[left_idx].color == 1) or \ + (right_idx is not None and self.tree[right_idx].color == 1): + return True + return False + + def _replace_node(self, node_idx): + if self._is_leaf(node_idx): + return None + elif self._has_one_child(node_idx): + if self.tree[node_idx].left is not None: + child = self.tree[node_idx].left + else: + child = self.tree[node_idx].right + return child + else: + return self._find_predecessor(self.tree[node_idx].left) + + def __walk1_walk_isblack(self, color, node_idx1): + if (node_idx1 is None or self.tree[node_idx1].color == 0) and (color == 0): + return True + return False + + def __left_left_siblingcase(self, node_idx): + left_idx = self.tree[node_idx].left + parent = self._get_parent(node_idx) + parent_color = self.tree[parent].color + self.tree[left_idx].color = self.tree[node_idx].color + self.tree[node_idx].color = parent_color + self._right_rotate(parent, node_idx) + + def __right_left_siblingcase(self, node_idx): + left_idx = self.tree[node_idx].left + parent = self._get_parent(node_idx) + parent_color = self.tree[parent].color + self.tree[left_idx].color = parent_color + self._right_rotate(node_idx, left_idx) + child = self._get_parent(node_idx) + self._left_rotate(parent, child) + + def __left_right_siblingcase(self, node_idx): + right_idx = self.tree[node_idx].right + parent = self._get_parent(node_idx) + parent_color = self.tree[parent].color + self.tree[right_idx].color = parent_color + self._left_rotate(node_idx, right_idx) + child = self._get_parent(node_idx) + self._right_rotate(parent, child) + + def __right_right_siblingcase(self, node_idx): + right_idx = self.tree[node_idx].right + parent = self._get_parent(node_idx) + parent_color = self.tree[parent].color + self.tree[right_idx].color = self.tree[node_idx].color + self.tree[node_idx].color = parent_color + self._left_rotate(parent, node_idx) + + def __fix_deletion(self, node_idx): + node = self.tree[node_idx] + color = node.color + while node_idx!= self.root_idx and color == 0: + sibling_idx = self._get_sibling(node_idx) + parent_idx = self._get_parent(node_idx) + if sibling_idx is None: + node_idx = parent_idx + continue + else: + if self.tree[sibling_idx].color == 1: + self.tree[self.root_idx].is_root = False + self.tree[parent_idx].color = 1 + self.tree[sibling_idx].color = 0 + if self._is_onleft(sibling_idx): + self._right_rotate(parent_idx, sibling_idx) + else: + self._left_rotate(parent_idx, sibling_idx) + self.tree[self.root_idx].is_root = True + continue + else: + if self.__has_red_child(sibling_idx): + self.tree[self.root_idx].is_root = False + left_idx = self.tree[sibling_idx].left + if self.tree[sibling_idx].left is not None and \ + self.tree[left_idx].color == 1: + if self._is_onleft(sibling_idx): + self.__left_left_siblingcase(sibling_idx) + else: + self.__right_left_siblingcase(sibling_idx) + else: + if self._is_onleft(sibling_idx): + self.__left_right_siblingcase(sibling_idx) + else: + self.__right_right_siblingcase(sibling_idx) + self.tree[self.root_idx].is_root = True + self.tree[parent_idx].color = 0 + else: + self.tree[sibling_idx].color = 1 + if self.tree[parent_idx].color == 0: + node_idx = parent_idx + continue + else: + self.tree[parent_idx].color = 0 + color = 1 + + def _remove_node(self, node_idx): + parent = self._get_parent(node_idx) + a = parent + if self._is_leaf(node_idx): + par_key, root_key = (self.tree[parent].key, self.tree[self.root_idx].key) + new_indices = self.tree.delete(node_idx) + if new_indices is not None: + a = new_indices[par_key] + self.root_idx = new_indices[root_key] + elif self._has_one_child(node_idx): + child = self._replace_node(node_idx) + parent = self._get_parent(node_idx) + par_key, root_key = (self.tree[parent].key, self.tree[self.root_idx].key) + new_indices = self.tree.delete(node_idx) + self._update_size(a) + + def _delete_root(self, node_idx, node_idx1): + if self._is_leaf(node_idx): + self.tree[self.root_idx].data = None + self.tree[self.root_idx].key = None + elif self._has_one_child(node_idx): + root_key = self._transplant_values(node_idx, node_idx1) + new_indices = self.tree.delete(node_idx1) + if new_indices is not None: + self.root_idx = new_indices[root_key] + + def __leaf_case(self, node_idx, node_idx1): + walk = node_idx + walk1 = node_idx1 + parent = self._get_parent(node_idx) + color = self.tree[walk].color + if parent is None: + self._delete_root(walk, walk1) + else: + if self.__walk1_walk_isblack(color, walk1): + self.__fix_deletion(walk) + else: + sibling_idx = self._get_sibling(walk) + if sibling_idx is not None: + self.tree[sibling_idx].color = 1 + if self._is_onleft(walk): + self.tree[parent].left = None + else: + self.tree[parent].right = None + self._remove_node(walk) + + def __one_child_case(self, node_idx, node_idx1): + walk = node_idx + walk1 = node_idx1 + walk_original_color = self.tree[walk].color + parent = self._get_parent(node_idx) + if parent is None: + self._delete_root(walk, walk1) + else: + if self._is_onleft(walk): + self.tree[parent].left = walk1 + else: + self.tree[parent].right = walk1 + self.tree[walk1].parent = parent + a = self._remove_node(walk) + if self.__walk1_walk_isblack(walk_original_color, walk1): + self.__fix_deletion(walk1) + else: + self.tree[walk1].color = 0 + + def __two_child_case(self, node_idx): + walk = node_idx + successor = self._replace_node(walk) + self._transplant_values(walk, successor) + walk = successor + walk1 = self._replace_node(walk) + return walk, walk1 + + def delete(self, key, **kwargs): + walk = super(RedBlackTree, self).search(key) + if walk is not None: + walk1 = self._replace_node(walk) + if self._has_two_child(walk): + walk, walk1 = self.__two_child_case(walk) + if self._is_leaf(walk): + self.__leaf_case(walk, walk1) + elif self._has_one_child(walk): + self.__one_child_case(walk, walk1) + return True + else: + return None + +class BinaryTreeTraversal(object): + """ + Represents the traversals possible in + a binary tree. + + Parameters + ========== + + tree: BinaryTree + The binary tree for whose traversal + is to be done. + backend: pydatastructs.Backend + The backend to be used. Available backends: Python and C++ + Optional, by default, the Python backend is used. For faster execution, use the C++ backend. + + Traversals + ========== + + - Depth First Search + In Order, Post Order, Pre Order Out Order + + - Breadth First Search + + Examples + ======== + + >>> from pydatastructs import BinarySearchTree as BST + >>> from pydatastructs import BinaryTreeTraversal as BTT + >>> b = BST(2, 2) + >>> b.insert(1, 1) + >>> b.insert(3, 3) + >>> trav = BTT(b) + >>> dfs = trav.depth_first_search() + >>> [str(n) for n in dfs] + ['(None, 1, 1, None)', '(1, 2, 2, 2)', '(None, 3, 3, None)'] + >>> bfs = trav.breadth_first_search() + >>> [str(n) for n in bfs] + ['(1, 2, 2, 2)', '(None, 1, 1, None)', '(None, 3, 3, None)'] + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Tree_traversal + """ + + @classmethod + def methods(cls): + return ['__new__', 'depth_first_search', + 'breadth_first_search'] + + __slots__ = ['tree'] + + def __new__(cls, tree, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + return _trees.BinaryTreeTraversal(tree, **kwargs) + if not isinstance(tree, BinaryTree): + raise TypeError("%s is not a binary tree"%(tree)) + obj = object.__new__(cls) + obj.tree = tree + return obj + + def _pre_order(self, node): + """ + Utility method for computing pre-order + of a binary tree using iterative algorithm. + """ + visit = [] + tree, size = self.tree.tree, self.tree.size + s = Stack() + s.push(node) + while not s.is_empty: + node = s.pop() + visit.append(tree[node]) + if tree[node].right is not None: + s.push(tree[node].right) + if tree[node].left is not None: + s.push(tree[node].left) + return visit + + def _in_order(self, node): + """ + Utility method for computing in-order + of a binary tree using iterative algorithm. + """ + visit = [] + tree, size = self.tree.tree, self.tree.size + s = Stack() + while not s.is_empty or node is not None: + if node is not None: + s.push(node) + node = tree[node].left + else: + node = s.pop() + visit.append(tree[node]) + node = tree[node].right + return visit + + def _post_order(self, node): + """ + Utility method for computing post-order + of a binary tree using iterative algorithm. + """ + visit = [] + tree, size = self.tree.tree, self.tree.size + s = Stack() + s.push(node) + last = OneDimensionalArray(int, size) + last.fill(False) + while not s.is_empty: + node = s.peek + l, r = tree[node].left, tree[node].right + cl, cr = l is None or last[l], r is None or last[r] + if cl and cr: + s.pop() + visit.append(tree[node]) + last[node] = True + continue + if not cr: + s.push(r) + if not cl: + s.push(l) + return visit + + def _out_order(self, node): + """ + Utility method for computing out-order + of a binary tree using iterative algorithm. + """ + return reversed(self._in_order(node)) + + def depth_first_search(self, order='in_order', node=None): + """ + Computes the depth first search traversal of the binary + trees. + + Parameters + ========== + + order : str + One of the strings, 'in_order', 'post_order', + 'pre_order', 'out_order'. + By default, it is set to, 'in_order'. + node : int + The index of the node from where the traversal + is to be instantiated. + + Returns + ======= + + list + Each element is of type 'TreeNode'. + """ + if node is None: + node = self.tree.root_idx + if order not in ('in_order', 'post_order', 'pre_order', 'out_order'): + raise NotImplementedError( + "%s order is not implemented yet." + "We only support `in_order`, `post_order`, " + "`pre_order` and `out_order` traversals.") + return getattr(self, '_' + order)(node) + + def breadth_first_search(self, node=None, strategy='queue'): + """ + Computes the breadth first search traversal of a binary tree. + + Parameters + ========== + + node : int + The index of the node from where the traversal has to be instantiated. + By default, set to, root index. + + strategy : str + The strategy using which the computation has to happen. + By default, it is set 'queue'. + + Returns + ======= + + list + Each element of the list is of type `TreeNode`. + """ + # TODO: IMPLEMENT ITERATIVE DEEPENING-DEPTH FIRST SEARCH STRATEGY + strategies = ('queue',) + if strategy not in strategies: + raise NotImplementedError( + "%s startegy is not implemented yet"%(strategy)) + if node is None: + node = self.tree.root_idx + q, visit, tree = Queue(), [], self.tree.tree + q.append(node) + while len(q) > 0: + node = q.popleft() + visit.append(tree[node]) + if tree[node].left is not None: + q.append(tree[node].left) + if tree[node].right is not None: + q.append(tree[node].right) + return visit + +class BinaryIndexedTree(object): + """ + Represents binary indexed trees + a.k.a fenwick trees. + + Parameters + ========== + + array: list/tuple + The array whose elements are to be + considered for the queries. + backend: pydatastructs.Backend + The backend to be used. Available backends: Python and C++ + Optional, by default, the Python backend is used. For faster execution, use the C++ backend. + + Examples + ======== + + >>> from pydatastructs import BinaryIndexedTree + >>> bit = BinaryIndexedTree([1, 2, 3]) + >>> bit.get_sum(0, 2) + 6 + >>> bit.update(0, 100) + >>> bit.get_sum(0, 2) + 105 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Fenwick_tree + """ + + __slots__ = ['tree', 'array', 'flag'] + + def __new__(cls, array, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + return _trees.BinaryIndexedTree(type(array[0]), array, **kwargs) + obj = object.__new__(cls) + obj.array = OneDimensionalArray(type(array[0]), array) + obj.tree = [0] * (obj.array._size + 2) + obj.flag = [0] * (obj.array._size) + for index in range(obj.array._size): + obj.update(index, array[index]) + return obj + + @classmethod + def methods(cls): + return ['update', 'get_prefix_sum', + 'get_sum'] + + def update(self, index, value): + """ + Updates value at the given index. + + Parameters + ========== + + index: int + Index of element to be updated. + + value + The value to be inserted. + """ + _index, _value = index, value + if self.flag[index] == 0: + self.flag[index] = 1 + index += 1 + while index < self.array._size + 1: + self.tree[index] += value + index = index + (index & (-index)) + else: + value = value - self.array[index] + index += 1 + while index < self.array._size + 1: + self.tree[index] += value + index = index + (index & (-index)) + self.array[_index] = _value + + def get_prefix_sum(self, index): + """ + Computes sum of elements from index 0 to given index. + + Parameters + ========== + + index: int + Index till which sum has to be calculated. + + Returns + ======= + + sum: int + The required sum. + """ + index += 1 + sum = 0 + while index > 0: + sum += self.tree[index] + index = index - (index & (-index)) + return sum + + def get_sum(self, left_index, right_index): + """ + Get sum of elements from left index to right index. + + Parameters + ========== + + left_index: int + Starting index from where sum has to be computed. + + right_index: int + Ending index till where sum has to be computed. + + Returns + ======= + + sum: int + The required sum + """ + if left_index >= 1: + return self.get_prefix_sum(right_index) - \ + self.get_prefix_sum(left_index - 1) + else: + return self.get_prefix_sum(right_index) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/heaps.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/heaps.py new file mode 100644 index 000000000..12133a6f1 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/heaps.py @@ -0,0 +1,582 @@ +from pydatastructs.utils.misc_util import ( + _check_type, TreeNode, BinomialTreeNode, + Backend, raise_if_backend_is_not_python) +from pydatastructs.linear_data_structures.arrays import ( + DynamicOneDimensionalArray, Array) +from pydatastructs.miscellaneous_data_structures.binomial_trees import BinomialTree + +__all__ = [ + 'BinaryHeap', + 'TernaryHeap', + 'DHeap', + 'BinomialHeap' +] + +class Heap(object): + """ + Abstract class for representing heaps. + """ + pass + + +class DHeap(Heap): + """ + Represents D-ary Heap. + + Parameters + ========== + + elements: list, tuple, Array + Optional, by default 'None'. + list/tuple/Array of initial TreeNode in Heap. + heap_property: str + If the key stored in each node is + either greater than or equal to + the keys in the node's children + then pass 'max'. + If the key stored in each node is + either less than or equal to + the keys in the node's children + then pass 'min'. + By default, the heap property is + set to 'min'. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs.trees.heaps import DHeap + >>> min_heap = DHeap(heap_property="min", d=3) + >>> min_heap.insert(1, 1) + >>> min_heap.insert(5, 5) + >>> min_heap.insert(7, 7) + >>> min_heap.extract().key + 1 + >>> min_heap.insert(4, 4) + >>> min_heap.extract().key + 4 + + >>> max_heap = DHeap(heap_property='max', d=2) + >>> max_heap.insert(1, 1) + >>> max_heap.insert(5, 5) + >>> max_heap.insert(7, 7) + >>> max_heap.extract().key + 7 + >>> max_heap.insert(6, 6) + >>> max_heap.extract().key + 6 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/D-ary_heap + """ + __slots__ = ['_comp', 'heap', 'd', 'heap_property', '_last_pos_filled'] + + def __new__(cls, elements=None, heap_property="min", d=4, + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = Heap.__new__(cls) + obj.heap_property = heap_property + obj.d = d + if heap_property == "min": + obj._comp = lambda key_parent, key_child: key_parent <= key_child + elif heap_property == "max": + obj._comp = lambda key_parent, key_child: key_parent >= key_child + else: + raise ValueError("%s is invalid heap property"%(heap_property)) + if elements is None: + elements = DynamicOneDimensionalArray(TreeNode, 0) + elif _check_type(elements, (list,tuple)): + elements = DynamicOneDimensionalArray(TreeNode, len(elements), elements) + elif _check_type(elements, Array): + elements = DynamicOneDimensionalArray(TreeNode, len(elements), elements._data) + else: + raise ValueError(f'Expected a list/tuple/Array of TreeNode got {type(elements)}') + obj.heap = elements + obj._last_pos_filled = obj.heap._last_pos_filled + obj._build() + return obj + + @classmethod + def methods(cls): + return ['__new__', 'insert', 'extract', '__str__', 'is_empty'] + + def _build(self): + for i in range(self._last_pos_filled + 1): + self.heap[i]._leftmost, self.heap[i]._rightmost = \ + self.d*i + 1, self.d*i + self.d + for i in range((self._last_pos_filled + 1)//self.d, -1, -1): + self._heapify(i) + + def _swap(self, idx1, idx2): + idx1_key, idx1_data = \ + self.heap[idx1].key, self.heap[idx1].data + self.heap[idx1].key, self.heap[idx1].data = \ + self.heap[idx2].key, self.heap[idx2].data + self.heap[idx2].key, self.heap[idx2].data = \ + idx1_key, idx1_data + + def _heapify(self, i): + while True: + target = i + l = self.d*i + 1 + r = self.d*i + self.d + + for j in range(l, r+1): + if j <= self._last_pos_filled: + target = j if self._comp(self.heap[j].key, self.heap[target].key) \ + else target + else: + break + + if target != i: + self._swap(target, i) + i = target + else: + break + + def insert(self, key, data=None): + """ + Insert a new element to the heap according to heap property. + + Parameters + ========== + + key + The key for comparison. + data + The data to be inserted. + + Returns + ======= + + None + """ + new_node = TreeNode(key, data) + self.heap.append(new_node) + self._last_pos_filled += 1 + i = self._last_pos_filled + self.heap[i]._leftmost, self.heap[i]._rightmost = self.d*i + 1, self.d*i + self.d + + while True: + parent = (i - 1)//self.d + if i == 0 or self._comp(self.heap[parent].key, self.heap[i].key): + break + else: + self._swap(i, parent) + i = parent + + def extract(self): + """ + Extract root element of the Heap. + + Returns + ======= + + root_element: TreeNode + The TreeNode at the root of the heap, + if the heap is not empty. + + None + If the heap is empty. + """ + if self._last_pos_filled == -1: + raise IndexError("Heap is empty.") + else: + element_to_be_extracted = TreeNode(self.heap[0].key, self.heap[0].data) + self._swap(0, self._last_pos_filled) + self.heap.delete(self._last_pos_filled) + self._last_pos_filled -= 1 + self._heapify(0) + return element_to_be_extracted + + def __str__(self): + to_be_printed = ['' for i in range(self._last_pos_filled + 1)] + for i in range(self._last_pos_filled + 1): + node = self.heap[i] + if node._leftmost <= self._last_pos_filled: + if node._rightmost <= self._last_pos_filled: + children = list(range(node._leftmost, node._rightmost + 1)) + else: + children = list(range(node._leftmost, self._last_pos_filled + 1)) + else: + children = [] + to_be_printed[i] = (node.key, node.data, children) + return str(to_be_printed) + + @property + def is_empty(self): + """ + Checks if the heap is empty. + """ + return self.heap._last_pos_filled == -1 + + +class BinaryHeap(DHeap): + """ + Represents Binary Heap. + + Parameters + ========== + + elements: list, tuple + Optional, by default 'None'. + List/tuple of initial elements in Heap. + heap_property: str + If the key stored in each node is + either greater than or equal to + the keys in the node's children + then pass 'max'. + If the key stored in each node is + either less than or equal to + the keys in the node's children + then pass 'min'. + By default, the heap property is + set to 'min'. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs.trees.heaps import BinaryHeap + >>> min_heap = BinaryHeap(heap_property="min") + >>> min_heap.insert(1, 1) + >>> min_heap.insert(5, 5) + >>> min_heap.insert(7, 7) + >>> min_heap.extract().key + 1 + >>> min_heap.insert(4, 4) + >>> min_heap.extract().key + 4 + + >>> max_heap = BinaryHeap(heap_property='max') + >>> max_heap.insert(1, 1) + >>> max_heap.insert(5, 5) + >>> max_heap.insert(7, 7) + >>> max_heap.extract().key + 7 + >>> max_heap.insert(6, 6) + >>> max_heap.extract().key + 6 + + References + ========== + + .. [1] https://en.m.wikipedia.org/wiki/Binary_heap + """ + def __new__(cls, elements=None, heap_property="min", + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = DHeap.__new__(cls, elements, heap_property, 2) + return obj + + @classmethod + def methods(cls): + return ['__new__'] + + +class TernaryHeap(DHeap): + """ + Represents Ternary Heap. + + Parameters + ========== + + elements: list, tuple + Optional, by default 'None'. + List/tuple of initial elements in Heap. + heap_property: str + If the key stored in each node is + either greater than or equal to + the keys in the node's children + then pass 'max'. + If the key stored in each node is + either less than or equal to + the keys in the node's children + then pass 'min'. + By default, the heap property is + set to 'min'. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs.trees.heaps import TernaryHeap + >>> min_heap = TernaryHeap(heap_property="min") + >>> min_heap.insert(1, 1) + >>> min_heap.insert(5, 5) + >>> min_heap.insert(7, 7) + >>> min_heap.insert(3, 3) + >>> min_heap.extract().key + 1 + >>> min_heap.insert(4, 4) + >>> min_heap.extract().key + 3 + + >>> max_heap = TernaryHeap(heap_property='max') + >>> max_heap.insert(1, 1) + >>> max_heap.insert(5, 5) + >>> max_heap.insert(7, 7) + >>> min_heap.insert(3, 3) + >>> max_heap.extract().key + 7 + >>> max_heap.insert(6, 6) + >>> max_heap.extract().key + 6 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/D-ary_heap + .. [2] https://ece.uwaterloo.ca/~dwharder/aads/Algorithms/d-ary_heaps/Ternary_heaps/ + """ + def __new__(cls, elements=None, heap_property="min", + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = DHeap.__new__(cls, elements, heap_property, 3) + return obj + + @classmethod + def methods(cls): + return ['__new__'] + + +class BinomialHeap(Heap): + """ + Represents binomial heap. + + Parameters + ========== + + root_list: list/tuple/Array + By default, [] + The list of BinomialTree object references + in sorted order. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import BinomialHeap + >>> b = BinomialHeap() + >>> b.insert(1, 1) + >>> b.insert(2, 2) + >>> b.find_minimum().key + 1 + >>> b.find_minimum().children[0].key + 2 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Binomial_heap + """ + __slots__ = ['root_list'] + + def __new__(cls, root_list=None, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + if root_list is None: + root_list = [] + if not all((_check_type(root, BinomialTree)) + for root in root_list): + raise TypeError("The root_list should contain " + "references to objects of BinomialTree.") + obj = Heap.__new__(cls) + obj.root_list = root_list + return obj + + @classmethod + def methods(cls): + return ['__new__', 'merge_tree', 'merge', 'insert', + 'find_minimum', 'is_emtpy', 'decrease_key', 'delete', + 'delete_minimum'] + + def merge_tree(self, tree1, tree2): + """ + Merges two BinomialTree objects. + + Parameters + ========== + + tree1: BinomialTree + + tree2: BinomialTree + """ + if (not _check_type(tree1, BinomialTree)) or \ + (not _check_type(tree2, BinomialTree)): + raise TypeError("Both the trees should be of type " + "BinomalTree.") + ret_value = None + if tree1.root.key <= tree2.root.key: + tree1.add_sub_tree(tree2) + ret_value = tree1 + else: + tree2.add_sub_tree(tree1) + ret_value = tree2 + return ret_value + + def _merge_heap_last_new_tree(self, new_root_list, new_tree): + """ + Merges last tree node in root list with the incoming tree. + """ + pos = -1 + if len(new_root_list) > 0 and new_root_list[pos].order == new_tree.order: + new_root_list[pos] = self.merge_tree(new_root_list[pos], new_tree) + else: + new_root_list.append(new_tree) + + def merge(self, other_heap): + """ + Merges current binomial heap with the given binomial heap. + + Parameters + ========== + + other_heap: BinomialHeap + """ + if not _check_type(other_heap, BinomialHeap): + raise TypeError("Other heap is not of type BinomialHeap.") + new_root_list = [] + i, j = 0, 0 + while (i < len(self.root_list)) and \ + (j < len(other_heap.root_list)): + new_tree = None + while self.root_list[i] is None: + i += 1 + while other_heap.root_list[j] is None: + j += 1 + if self.root_list[i].order == other_heap.root_list[j].order: + new_tree = self.merge_tree(self.root_list[i], + other_heap.root_list[j]) + i += 1 + j += 1 + else: + if self.root_list[i].order < other_heap.root_list[j].order: + new_tree = self.root_list[i] + i += 1 + else: + new_tree = other_heap.root_list[j] + j += 1 + self._merge_heap_last_new_tree(new_root_list, new_tree) + + while i < len(self.root_list): + new_tree = self.root_list[i] + self._merge_heap_last_new_tree(new_root_list, new_tree) + i += 1 + while j < len(other_heap.root_list): + new_tree = other_heap.root_list[j] + self._merge_heap_last_new_tree(new_root_list, new_tree) + j += 1 + self.root_list = new_root_list + + def insert(self, key, data=None): + """ + Inserts new node with the given key and data. + + key + The key of the node which can be operated + upon by relational operators. + + data + The data to be stored in the new node. + """ + new_node = BinomialTreeNode(key, data) + new_tree = BinomialTree(root=new_node, order=0) + new_heap = BinomialHeap(root_list=[new_tree]) + self.merge(new_heap) + + def find_minimum(self, **kwargs): + """ + Finds the node with the minimum key. + + Returns + ======= + + min_node: BinomialTreeNode + """ + if self.is_empty: + raise IndexError("Binomial heap is empty.") + min_node = None + idx, min_idx = 0, None + for tree in self.root_list: + if ((min_node is None) or + (tree is not None and tree.root is not None and + min_node.key > tree.root.key)): + min_node = tree.root + min_idx = idx + idx += 1 + if kwargs.get('get_index', None) is not None: + return min_node, min_idx + return min_node + + def delete_minimum(self): + """ + Deletes the node with minimum key. + """ + min_node, min_idx = self.find_minimum(get_index=True) + child_root_list = [] + for k, child in enumerate(min_node.children): + if child is not None: + child_root_list.append(BinomialTree(root=child, order=k)) + self.root_list.remove(self.root_list[min_idx]) + child_heap = BinomialHeap(root_list=child_root_list) + self.merge(child_heap) + + @property + def is_empty(self): + return not self.root_list + + def decrease_key(self, node, new_key): + """ + Decreases the key of the given node. + + Parameters + ========== + + node: BinomialTreeNode + The node whose key is to be reduced. + new_key + The new key of the given node, + should be less than the current key. + """ + if node.key <= new_key: + raise ValueError("The new key " + "should be less than current node's key.") + node.key = new_key + while ((not node.is_root) and + (node.parent.key > node.key)): + node.parent.key, node.key = \ + node.key, node.parent.key + node.parent.data, node.data = \ + node.data, node.parent.data + node = node.parent + + def delete(self, node): + """ + Deletes the given node. + + Parameters + ========== + + node: BinomialTreeNode + The node which is to be deleted. + """ + self.decrease_key(node, self.find_minimum().key - 1) + self.delete_minimum() diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/m_ary_trees.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/m_ary_trees.py new file mode 100644 index 000000000..a06fda9ee --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/m_ary_trees.py @@ -0,0 +1,172 @@ +from pydatastructs.utils import MAryTreeNode +from pydatastructs.linear_data_structures.arrays import ArrayForTrees +from pydatastructs.utils.misc_util import ( + Backend, raise_if_backend_is_not_python) + +__all__ = [ + 'MAryTree' +] + +class MAryTree(object): + """ + Abstract m-ary tree. + + Parameters + ========== + + key + Required if tree is to be instantiated with + root otherwise not needed. + root_data + Optional, the root node of the binary tree. + If not of type MAryTreeNode, it will consider + root as data and a new root node will + be created. + comp: lambda + Optional, A lambda function which will be used + for comparison of keys. Should return a + bool value. By default it implements less + than operator. + is_order_statistic: bool + Set it to True, if you want to use the + order statistic features of the tree. + max_children + Optional, specifies the maximum number of children + a node can have. Defaults to 2 in case nothing is + specified. + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/M-ary_tree + """ + + __slots__ = ['root_idx', 'max_children', 'comparator', 'tree', 'size', + 'is_order_statistic'] + + + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, max_children=2, + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + if key is None and root_data is not None: + raise ValueError('Key required.') + key = None if root_data is None else key + root = MAryTreeNode(key, root_data) + root.is_root = True + obj.root_idx = 0 + obj.max_children = max_children + obj.tree, obj.size = ArrayForTrees(MAryTreeNode, [root]), 1 + obj.comparator = lambda key1, key2: key1 < key2 \ + if comp is None else comp + obj.is_order_statistic = is_order_statistic + return obj + + @classmethod + def methods(cls): + return ['__new__', '__str__'] + + def insert(self, key, data=None): + """ + Inserts data by the passed key using iterative + algorithm. + + Parameters + ========== + + key + The key for comparison. + data + The data to be inserted. + + Returns + ======= + + None + """ + raise NotImplementedError("This is an abstract method.") + + def delete(self, key, **kwargs): + """ + Deletes the data with the passed key + using iterative algorithm. + + Parameters + ========== + + key + The key of the node which is + to be deleted. + + Returns + ======= + + True + If the node is deleted successfully. + + None + If the node to be deleted doesn't exists. + + Note + ==== + + The node is deleted means that the connection to that + node are removed but the it is still in tree. + """ + raise NotImplementedError("This is an abstract method.") + + def search(self, key, **kwargs): + """ + Searches for the data in the binary search tree + using iterative algorithm. + + Parameters + ========== + + key + The key for searching. + parent: bool + If true then returns index of the + parent of the node with the passed + key. + By default, False + + Returns + ======= + + int + If the node with the passed key is + in the tree. + tuple + The index of the searched node and + the index of the parent of that node. + None + In all other cases. + """ + raise NotImplementedError("This is an abstract method.") + + def to_binary_tree(self): + """ + Converts an m-ary tree to a binary tree. + + Returns + ======= + + TreeNode + The root of the newly created binary tree. + """ + raise NotImplementedError("This is an abstract method.") + + + def __str__(self): + to_be_printed = ['' for i in range(self.tree._last_pos_filled + 1)] + for i in range(self.tree._last_pos_filled + 1): + if self.tree[i] is not None: + node = self.tree[i] + to_be_printed[i] = (node.key, node.data) + for j in node.children: + if j is not None: + to_be_printed[i].append(j) + return str(to_be_printed) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/space_partitioning_trees.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/space_partitioning_trees.py new file mode 100644 index 000000000..f13c1f280 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/space_partitioning_trees.py @@ -0,0 +1,242 @@ +from pydatastructs.utils import TreeNode +from collections import deque as Queue +from pydatastructs.utils.misc_util import ( + _check_type, Backend, + raise_if_backend_is_not_python) + +__all__ = [ + 'OneDimensionalSegmentTree' +] + +class OneDimensionalSegmentTree(object): + """ + Represents one dimensional segment trees. + + Parameters + ========== + + segs: list/tuple/set + The segs should contains tuples/list/set of size 2 + denoting the start and end points of the intervals. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalSegmentTree as ODST + >>> segt = ODST([(3, 8), (9, 20)]) + >>> segt.build() + >>> segt.tree[0].key + [False, 2, 3, False] + >>> len(segt.query(4)) + 1 + + Note + ==== + + All the segments are assumed to be closed intervals, + i.e., the ends are points of segments are also included in + computation. + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Segment_tree + + """ + + __slots__ = ['segments', 'tree', 'root_idx', 'cache'] + + def __new__(cls, segs, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + if any((not isinstance(seg, (tuple, list, set)) or len(seg) != 2) + for seg in segs): + raise ValueError('%s is invalid set of intervals'%(segs)) + for i in range(len(segs)): + segs[i] = list(segs[i]) + segs[i].sort() + obj.segments = list(segs) + obj.tree, obj.root_idx, obj.cache = [], None, False + return obj + + @classmethod + def methods(cls): + return ['build', 'query', '__str__'] + + def _union(self, i1, i2): + """ + Helper function for taking union of two + intervals. + """ + return TreeNode([i1.key[0], i1.key[1], i2.key[2], i2.key[3]], None) + + def _intersect(self, i1, i2): + """ + Helper function for finding intersection of two + intervals. + """ + if i1 is None or i2 is None: + return False + if i1.key[2] < i2.key[1] or i2.key[2] < i1.key[1]: + return False + c1, c2 = None, None + if i1.key[2] == i2.key[1]: + c1 = (i1.key[3] and i2.key[0]) + if i2.key[2] == i1.key[1]: + c2 = (i2.key[3] and i1.key[0]) + if c1 is False and c2 is False: + return False + return True + + def _contains(self, i1, i2): + """ + Helper function for checking if the first interval + is contained in second interval. + """ + if i1 is None or i2 is None: + return False + if i1.key[1] < i2.key[1] and i1.key[2] > i2.key[2]: + return True + if i1.key[1] == i2.key[1] and i1.key[2] > i2.key[2]: + return (i1.key[0] or not i2.key[0]) + if i1.key[1] < i2.key[1] and i1.key[2] == i2.key[2]: + return i1.key[3] or not i2.key[3] + if i1.key[1] == i2.key[1] and i1.key[2] == i2.key[2]: + return not ((not i1.key[3] and i2.key[3]) or (not i1.key[0] and i2.key[0])) + return False + + def _iterate(self, calls, I, idx): + """ + Helper function for filling the calls + stack. Used for imitating the stack based + approach used in recursion. + """ + if self.tree[idx].right is None: + rc = None + else: + rc = self.tree[self.tree[idx].right] + if self.tree[idx].left is None: + lc = None + else: + lc = self.tree[self.tree[idx].left] + if self._intersect(I, rc): + calls.append(self.tree[idx].right) + if self._intersect(I, lc): + calls.append(self.tree[idx].left) + return calls + + def build(self): + """ + Builds the segment tree from the segments, + using iterative algorithm based on queues. + """ + if self.cache: + return None + endpoints = [] + for segment in self.segments: + endpoints.extend(segment) + endpoints.sort() + + elem_int = Queue() + elem_int.append(TreeNode([False, endpoints[0] - 1, endpoints[0], False], None)) + i = 0 + while i < len(endpoints) - 1: + elem_int.append(TreeNode([True, endpoints[i], endpoints[i], True], None)) + elem_int.append(TreeNode([False, endpoints[i], endpoints[i+1], False], None)) + i += 1 + elem_int.append(TreeNode([True, endpoints[i], endpoints[i], True], None)) + elem_int.append(TreeNode([False, endpoints[i], endpoints[i] + 1, False], None)) + + self.tree = [] + while len(elem_int) > 1: + m = len(elem_int) + while m >= 2: + I1 = elem_int.popleft() + I2 = elem_int.popleft() + I = self._union(I1, I2) + I.left = len(self.tree) + I.right = len(self.tree) + 1 + self.tree.append(I1), self.tree.append(I2) + elem_int.append(I) + m -= 2 + if m & 1 == 1: + Il = elem_int.popleft() + elem_int.append(Il) + + Ir = elem_int.popleft() + Ir.left, Ir.right = -3, -2 + self.tree.append(Ir) + self.root_idx = -1 + + for segment in self.segments: + I = TreeNode([True, segment[0], segment[1], True], None) + calls = [self.root_idx] + while calls: + idx = calls.pop() + if self._contains(I, self.tree[idx]): + if self.tree[idx].data is None: + self.tree[idx].data = [] + self.tree[idx].data.append(I) + continue + calls = self._iterate(calls, I, idx) + self.cache = True + + def query(self, qx, init_node=None): + """ + Queries the segment tree. + + Parameters + ========== + + qx: int/float + The query point + + init_node: int + The index of the node from which the query process + is to be started. + + Returns + ======= + + intervals: set + The set of the intervals which contain the query + point. + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Segment_tree + """ + if not self.cache: + self.build() + if init_node is None: + init_node = self.root_idx + qn = TreeNode([True, qx, qx, True], None) + intervals = [] + calls = [init_node] + while calls: + idx = calls.pop() + if _check_type(self.tree[idx].data, list): + intervals.extend(self.tree[idx].data) + calls = self._iterate(calls, qn, idx) + return set(intervals) + + def __str__(self): + """ + Used for printing. + """ + if not self.cache: + self.build() + str_tree = [] + for seg in self.tree: + if seg.data is None: + data = None + else: + data = [str(sd) for sd in seg.data] + str_tree.append((seg.left, seg.key, data, seg.right)) + return str(str_tree) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_binary_trees.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_binary_trees.py new file mode 100644 index 000000000..826100b78 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_binary_trees.py @@ -0,0 +1,820 @@ +from pydatastructs.trees.binary_trees import ( + BinaryTree, BinarySearchTree, BinaryTreeTraversal, AVLTree, + ArrayForTrees, BinaryIndexedTree, SelfBalancingBinaryTree, SplayTree, CartesianTree, Treap, RedBlackTree) +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils.misc_util import TreeNode +from copy import deepcopy +from pydatastructs.utils.misc_util import Backend +import random +from pydatastructs.utils._backend.cpp import _nodes + +def _test_BinarySearchTree(backend): + BST = BinarySearchTree + b = BST(8, 8, backend=backend) + b.delete(8) + b.insert(8, 8) + b.insert(3, 3) + b.insert(10, 10) + b.insert(1, 1) + b.insert(6, 6) + b.insert(4, 4) + b.insert(7, 7) + b.insert(14, 14) + b.insert(13, 13) + # Explicit check for the __str__ method of Binary Trees Class + assert str(b) == \ + ("[(1, 8, 8, 2), (3, 3, 3, 4), (None, 10, 10, 7), (None, 1, 1, None), " + "(5, 6, 6, 6), (None, 4, 4, None), (None, 7, 7, None), (8, 14, 14, None), " + "(None, 13, 13, None)]") + assert b.root_idx == 0 + + assert b.tree[0].left == 1 + assert b.tree[0].key == 8 + assert b.tree[0].data == 8 + assert b.tree[0].right == 2 + + trav = BinaryTreeTraversal(b, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [1, 3, 4, 6, 7, 8, 10, 13, 14] + assert [node.key for node in pre_order] == [8, 3, 1, 6, 4, 7, 10, 14, 13] + + assert b.search(10) == 2 + assert b.search(-1) is None + assert b.delete(13) is True + assert b.search(13) is None + assert b.delete(10) is True + assert b.search(10) is None + assert b.delete(3) is True + assert b.search(3) is None + assert b.delete(13) is None + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [1, 4, 6, 7, 8, 14] + assert [node.key for node in pre_order] == [8, 4, 1, 6, 7, 14] + + b.delete(7) + b.delete(6) + b.delete(1) + b.delete(4) + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [8, 14] + assert [node.key for node in pre_order] == [8, 14] + + bc = BST(1, 1, backend=backend) + assert bc.insert(1, 2) is None + + b = BST(-8, 8, backend=backend) + b.insert(-3, 3) + b.insert(-10, 10) + b.insert(-1, 1) + b.insert(-6, 6) + b.insert(-4, 4) + b.insert(-7, 7) + b.insert(-14, 14) + b.insert(-13, 13) + + b.delete(-13) + b.delete(-10) + b.delete(-3) + b.delete(-13) + assert str(b) == "[(7, -8, 8, 1), (4, -1, 1, None), '', '', (6, -6, 6, 5), (None, -4, 4, None), (None, -7, 7, None), (None, -14, 14, None)]" + + bl = BST(backend=backend) + nodes = [50, 30, 90, 70, 100, 60, 80, 55, 20, 40, 15, 10, 16, 17, 18] + for node in nodes: + bl.insert(node, node) + + assert bl.lowest_common_ancestor(80, 55, 2) == 70 + assert bl.lowest_common_ancestor(60, 70, 2) == 70 + assert bl.lowest_common_ancestor(18, 18, 2) == 18 + assert bl.lowest_common_ancestor(40, 90, 2) == 50 + + assert bl.lowest_common_ancestor(18, 10, 2) == 15 + assert bl.lowest_common_ancestor(55, 100, 2) == 90 + assert bl.lowest_common_ancestor(16, 80, 2) == 50 + assert bl.lowest_common_ancestor(30, 55, 2) == 50 + + assert raises(ValueError, lambda: bl.lowest_common_ancestor(60, 200, 2)) + assert raises(ValueError, lambda: bl.lowest_common_ancestor(200, 60, 2)) + assert raises(ValueError, lambda: bl.lowest_common_ancestor(-3, 4, 2)) + + assert bl.lowest_common_ancestor(80, 55, 1) == 70 + assert bl.lowest_common_ancestor(60, 70, 1) == 70 + assert bl.lowest_common_ancestor(18, 18, 1) == 18 + assert bl.lowest_common_ancestor(40, 90, 1) == 50 + + assert bl.lowest_common_ancestor(18, 10, 1) == 15 + assert bl.lowest_common_ancestor(55, 100, 1) == 90 + assert bl.lowest_common_ancestor(16, 80, 1) == 50 + assert bl.lowest_common_ancestor(30, 55, 1) == 50 + + assert raises(ValueError, lambda: bl.lowest_common_ancestor(60, 200, 1)) + assert raises(ValueError, lambda: bl.lowest_common_ancestor(200, 60, 1)) + assert raises(ValueError, lambda: bl.lowest_common_ancestor(-3, 4, 1)) + +def test_BinarySearchTree(): + _test_BinarySearchTree(Backend.PYTHON) + +def test_cpp_BinarySearchTree(): + _test_BinarySearchTree(Backend.CPP) + +def _test_BinaryTreeTraversal(backend): + BST = BinarySearchTree + BTT = BinaryTreeTraversal + b = BST('F', 'F', backend=backend) + b.insert('B', 'B') + b.insert('A', 'A') + b.insert('G', 'G') + b.insert('D', 'D') + b.insert('C', 'C') + b.insert('E', 'E') + b.insert('I', 'I') + b.insert('H', 'H') + + trav = BTT(b, backend=backend) + pre = trav.depth_first_search(order='pre_order') + assert [node.key for node in pre] == ['F', 'B', 'A', 'D', 'C', 'E', 'G', 'I', 'H'] + + ino = trav.depth_first_search() + assert [node.key for node in ino] == ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'] + + out = trav.depth_first_search(order='out_order') + assert [node.key for node in out] == ['I', 'H', 'G', 'F', 'E', 'D', 'C', 'B', 'A'] + + post = trav.depth_first_search(order='post_order') + assert [node.key for node in post] == ['A', 'C', 'E', 'D', 'B', 'H', 'I', 'G', 'F'] + + bfs = trav.breadth_first_search() + assert [node.key for node in bfs] == ['F', 'B', 'G', 'A', 'D', 'I', 'C', 'E', 'H'] + + assert raises(NotImplementedError, lambda: trav.breadth_first_search(strategy='iddfs')) + assert raises(NotImplementedError, lambda: trav.depth_first_search(order='in_out_order')) + assert raises(TypeError, lambda: BTT(1)) + +def test_BinaryTreeTraversal(): + _test_BinaryTreeTraversal(Backend.PYTHON) + +def test_cpp_BinaryTreeTraversal(): + _test_BinaryTreeTraversal(Backend.CPP) + +def _test_AVLTree(backend): + a = AVLTree('M', 'M', backend=backend) + a.insert('N', 'N') + a.insert('O', 'O') + a.insert('L', 'L') + a.insert('K', 'K') + a.insert('Q', 'Q') + a.insert('P', 'P') + a.insert('H', 'H') + a.insert('I', 'I') + a.insert('A', 'A') + assert a.root_idx == 1 + + trav = BinaryTreeTraversal(a, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == ['A', 'H', 'I', 'K', 'L', 'M', 'N', 'O', 'P', 'Q'] + assert [node.key for node in pre_order] == ['N', 'I', 'H', 'A', 'L', 'K', 'M', 'P', 'O', 'Q'] + + assert [a.balance_factor(a.tree[i]) for i in range(a.tree.size) if a.tree[i] is not None] == \ + [0, -1, 0, 0, 0, 0, 0, -1, 0, 0] + a1 = AVLTree(1, 1, backend=backend) + a1.insert(2, 2) + a1.insert(3, 3) + a1.insert(4, 4) + a1.insert(5, 5) + + trav = BinaryTreeTraversal(a1, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [1, 2, 3, 4, 5] + assert [node.key for node in pre_order] == [2, 1, 4, 3, 5] + + a3 = AVLTree(-1, 1, backend=backend) + a3.insert(-2, 2) + a3.insert(-3, 3) + a3.insert(-4, 4) + a3.insert(-5, 5) + + trav = BinaryTreeTraversal(a3, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [-5, -4, -3, -2, -1] + assert [node.key for node in pre_order] == [-2, -4, -5, -3, -1] + + a2 = AVLTree(backend=backend) + a2.insert(1, 1) + a2.insert(1, 1) + + trav = BinaryTreeTraversal(a2, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [1] + assert [node.key for node in pre_order] == [1] + + a3 = AVLTree(backend=backend) + a3.set_tree( ArrayForTrees(TreeNode, 0, backend=backend) ) + for i in range(0,7): + a3.tree.append(TreeNode(i, i, backend=backend)) + a3.tree[0].left = 1 + a3.tree[0].right = 6 + a3.tree[1].left = 5 + a3.tree[1].right = 2 + a3.tree[2].left = 3 + a3.tree[2].right = 4 + a3._left_right_rotate(0, 1) + assert str(a3) == "[(4, 0, 0, 6), (5, 1, 1, 3), (1, 2, 2, 0), (None, 3, 3, None), (None, 4, 4, None), (None, 5, 5, None), (None, 6, 6, None)]" + + trav = BinaryTreeTraversal(a3, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [5, 1, 3, 2, 4, 0, 6] + assert [node.key for node in pre_order] == [2, 1, 5, 3, 0, 4, 6] + + a4 = AVLTree(backend=backend) + a4.set_tree( ArrayForTrees(TreeNode, 0, backend=backend) ) + for i in range(0,7): + a4.tree.append(TreeNode(i, i,backend=backend)) + a4.tree[0].left = 1 + a4.tree[0].right = 2 + a4.tree[2].left = 3 + a4.tree[2].right = 4 + a4.tree[3].left = 5 + a4.tree[3].right = 6 + a4._right_left_rotate(0, 2) + + trav = BinaryTreeTraversal(a4, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [1, 0, 5, 3, 6, 2, 4] + assert [node.key for node in pre_order] == [3,0,1,5,2,6,4] + + a5 = AVLTree(is_order_statistic=True,backend=backend) + if backend==Backend.PYTHON: + a5.set_tree( ArrayForTrees(TreeNode, [ + TreeNode(10, 10), + TreeNode(5, 5), + TreeNode(17, 17), + TreeNode(2, 2), + TreeNode(9, 9), + TreeNode(12, 12), + TreeNode(20, 20), + TreeNode(3, 3), + TreeNode(11, 11), + TreeNode(15, 15), + TreeNode(18, 18), + TreeNode(30, 30), + TreeNode(13, 13), + TreeNode(33, 33) + ]) ) + else: + a5.set_tree( ArrayForTrees(_nodes.TreeNode, [ + TreeNode(10, 10,backend=backend), + TreeNode(5, 5,backend=backend), + TreeNode(17, 17,backend=backend), + TreeNode(2, 2,backend=backend), + TreeNode(9, 9,backend=backend), + TreeNode(12, 12,backend=backend), + TreeNode(20, 20,backend=backend), + TreeNode(3, 3,backend=backend), + TreeNode(11, 11,backend=backend), + TreeNode(15, 15,backend=backend), + TreeNode(18, 18,backend=backend), + TreeNode(30, 30,backend=backend), + TreeNode(13, 13,backend=backend), + TreeNode(33, 33,backend=backend) + ],backend=backend) ) + + a5.tree[0].left, a5.tree[0].right, a5.tree[0].parent, a5.tree[0].height = \ + 1, 2, None, 4 + a5.tree[1].left, a5.tree[1].right, a5.tree[1].parent, a5.tree[1].height = \ + 3, 4, 0, 2 + a5.tree[2].left, a5.tree[2].right, a5.tree[2].parent, a5.tree[2].height = \ + 5, 6, 0, 3 + a5.tree[3].left, a5.tree[3].right, a5.tree[3].parent, a5.tree[3].height = \ + None, 7, 1, 1 + a5.tree[4].left, a5.tree[4].right, a5.tree[4].parent, a5.tree[4].height = \ + None, None, 1, 0 + a5.tree[5].left, a5.tree[5].right, a5.tree[5].parent, a5.tree[5].height = \ + 8, 9, 2, 2 + a5.tree[6].left, a5.tree[6].right, a5.tree[6].parent, a5.tree[6].height = \ + 10, 11, 2, 2 + a5.tree[7].left, a5.tree[7].right, a5.tree[7].parent, a5.tree[7].height = \ + None, None, 3, 0 + a5.tree[8].left, a5.tree[8].right, a5.tree[8].parent, a5.tree[8].height = \ + None, None, 5, 0 + a5.tree[9].left, a5.tree[9].right, a5.tree[9].parent, a5.tree[9].height = \ + 12, None, 5, 1 + a5.tree[10].left, a5.tree[10].right, a5.tree[10].parent, a5.tree[10].height = \ + None, None, 6, 0 + a5.tree[11].left, a5.tree[11].right, a5.tree[11].parent, a5.tree[11].height = \ + None, 13, 6, 1 + a5.tree[12].left, a5.tree[12].right, a5.tree[12].parent, a5.tree[12].height = \ + None, None, 9, 0 + a5.tree[13].left, a5.tree[13].right, a5.tree[13].parent, a5.tree[13].height = \ + None, None, 11, 0 + + # testing order statistics + a5.tree[0].size = 14 + a5.tree[1].size = 4 + a5.tree[2].size = 9 + a5.tree[3].size = 2 + a5.tree[4].size = 1 + a5.tree[5].size = 4 + a5.tree[6].size = 4 + a5.tree[7].size = 1 + a5.tree[8].size = 1 + a5.tree[9].size = 2 + a5.tree[10].size = 1 + a5.tree[11].size = 2 + a5.tree[12].size = 1 + a5.tree[13].size = 1 + assert str(a5) == "[(1, 10, 10, 2), (3, 5, 5, 4), (5, 17, 17, 6), (None, 2, 2, 7), (None, 9, 9, None), (8, 12, 12, 9), (10, 20, 20, 11), (None, 3, 3, None), (None, 11, 11, None), (12, 15, 15, None), (None, 18, 18, None), (None, 30, 30, 13), (None, 13, 13, None), (None, 33, 33, None)]" + + assert raises(ValueError, lambda: a5.select(0)) + assert raises(ValueError, lambda: a5.select(15)) + + assert a5.rank(-1) is None + def test_select_rank(expected_output): + if backend==Backend.PYTHON: + output = [] + for i in range(len(expected_output)): + output.append(a5.select(i + 1).key) + assert output == expected_output + output = [] + expected_ranks = [i + 1 for i in range(len(expected_output))] + for i in range(len(expected_output)): + output.append(a5.rank(expected_output[i])) + assert output == expected_ranks + + test_select_rank([2, 3, 5, 9, 10, 11, 12, 13, 15, 17, 18, 20, 30, 33]) + a5.delete(9) + a5.delete(13) + a5.delete(20) + assert str(a5) == "[(7, 10, 10, 5), (None, 5, 5, None), (0, 17, 17, 6), (None, 2, 2, None), '', (8, 12, 12, 9), (10, 30, 30, 13), (3, 3, 3, 1), (None, 11, 11, None), (None, 15, 15, None), (None, 18, 18, None), '', '', (None, 33, 33, None)]" + + trav = BinaryTreeTraversal(a5, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [2, 3, 5, 10, 11, 12, 15, 17, 18, 30, 33] + assert [node.key for node in pre_order] == [17, 10, 3, 2, 5, 12, 11, 15, 30, 18, 33] + + test_select_rank([2, 3, 5, 10, 11, 12, 15, 17, 18, 30, 33]) + a5.delete(10) + a5.delete(17) + assert str(a5) == "[(7, 11, 11, 5), (None, 5, 5, None), (0, 18, 18, 6), (None, 2, 2, None), '', (None, 12, 12, 9), (None, 30, 30, 13), (3, 3, 3, 1), '', (None, 15, 15, None), '', '', '', (None, 33, 33, None)]" + test_select_rank([2, 3, 5, 11, 12, 15, 18, 30, 33]) + a5.delete(11) + a5.delete(30) + test_select_rank([2, 3, 5, 12, 15, 18, 33]) + a5.delete(12) + test_select_rank([2, 3, 5, 15, 18, 33]) + a5.delete(15) + test_select_rank([2, 3, 5, 18, 33]) + a5.delete(18) + test_select_rank([2, 3, 5, 33]) + a5.delete(33) + test_select_rank([2, 3, 5]) + a5.delete(5) + test_select_rank([2, 3]) + a5.delete(3) + test_select_rank([2]) + a5.delete(2) + test_select_rank([]) + assert str(a5) == "[(None, None, None, None)]" + +def test_AVLTree(): + _test_AVLTree(backend=Backend.PYTHON) +def test_cpp_AVLTree(): + _test_AVLTree(backend=Backend.CPP) + +def _test_BinaryIndexedTree(backend): + + FT = BinaryIndexedTree + + t = FT([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], backend=backend) + + assert t.get_sum(0, 2) == 6 + assert t.get_sum(0, 4) == 15 + assert t.get_sum(0, 9) == 55 + t.update(0, 100) + assert t.get_sum(0, 2) == 105 + assert t.get_sum(0, 4) == 114 + assert t.get_sum(1, 9) == 54 + +def test_BinaryIndexedTree(): + _test_BinaryIndexedTree(Backend.PYTHON) + +def test_cpp_BinaryIndexedTree(): + _test_BinaryIndexedTree(Backend.CPP) + +def _test_CartesianTree(backend): + tree = CartesianTree(backend=backend) + tree.insert(3, 1, 3) + tree.insert(1, 6, 1) + tree.insert(0, 9, 0) + tree.insert(5, 11, 5) + tree.insert(4, 14, 4) + tree.insert(9, 17, 9) + tree.insert(7, 22, 7) + tree.insert(6, 42, 6) + tree.insert(8, 49, 8) + tree.insert(2, 99, 2) + # Explicit check for the redefined __str__ method of Cartesian Trees Class + + trav = BinaryTreeTraversal(tree, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + assert [node.key for node in pre_order] == [3, 1, 0, 2, 5, 4, 9, 7, 6, 8] + + tree.insert(1.5, 4, 1.5) + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [0, 1, 1.5, 2, 3, 4, 5, 6, 7, 8, 9] + assert [node.key for node in pre_order] == [3, 1.5, 1, 0, 2, 5, 4, 9, 7, 6, 8] + + k = tree.search(1.5) + assert tree.tree[tree.tree[k].parent].key == 3 + tree.delete(1.5) + assert tree.root_idx == 0 + tree.tree[tree.tree[tree.root_idx].left].key == 1 + tree.delete(8) + assert tree.search(8) is None + tree.delete(7) + assert tree.search(7) is None + tree.delete(3) + assert tree.search(3) is None + assert tree.delete(18) is None + +def test_CartesianTree(): + _test_CartesianTree(backend=Backend.PYTHON) + +def test_cpp_CartesianTree(): + _test_CartesianTree(backend=Backend.CPP) + +def _test_Treap(backend): + + random.seed(0) + tree = Treap(backend=backend) + tree.insert(7, 7) + tree.insert(2, 2) + tree.insert(3, 3) + tree.insert(4, 4) + tree.insert(5, 5) + + assert isinstance(tree.tree[0].priority, float) + tree.delete(1) + assert tree.search(1) is None + assert tree.search(2) == 1 + assert tree.delete(1) is None + +def test_Treap(): + _test_Treap(Backend.PYTHON) + +def test_cpp_Treap(): + _test_Treap(Backend.CPP) + +def _test_SelfBalancingBinaryTree(backend): + """ + https://github.com/codezonediitj/pydatastructs/issues/234 + """ + tree = SelfBalancingBinaryTree(backend=backend) + tree.insert(5, 5) + tree.insert(5.5, 5.5) + tree.insert(4.5, 4.5) + tree.insert(4.6, 4.6) + tree.insert(4.4, 4.4) + tree.insert(4.55, 4.55) + tree.insert(4.65, 4.65) + original_tree = str(tree) + tree._right_rotate(3, 5) + + assert str(tree) == "[(2, 5, 5, 1), (None, 5.5, 5.5, None), (4, 4.5, 4.5, 5), (None, 4.6, 4.6, 6), (None, 4.4, 4.4, None), (None, 4.55, 4.55, 3), (None, 4.65, 4.65, None)]" + assert tree.tree[3].parent == 5 + assert tree.tree[2].right != 3 + assert tree.tree[tree.tree[5].parent].right == 5 + assert tree.root_idx == 0 + + trav = BinaryTreeTraversal(tree, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [4.4, 4.5, 4.55, 4.6, 4.65, 5, 5.5] + assert [node.key for node in pre_order] == [5, 4.5, 4.4, 4.55, 4.6, 4.65, 5.5] + + assert tree.tree[tree.tree[3].parent].right == 3 + tree._left_rotate(5, 3) + assert str(tree) == original_tree + tree.insert(4.54, 4.54) + tree.insert(4.56, 4.56) + tree._left_rotate(5, 8) + assert tree.tree[tree.tree[8].parent].left == 8 + assert str(tree) == "[(2, 5, 5, 1), (None, 5.5, 5.5, None), (4, 4.5, 4.5, 3), (8, 4.6, 4.6, 6), (None, 4.4, 4.4, None), (7, 4.55, 4.55, None), (None, 4.65, 4.65, None), (None, 4.54, 4.54, None), (5, 4.56, 4.56, None)]" + + tree._left_right_rotate(0, 2) + assert str(tree) == "[(6, 5, 5, 1), (None, 5.5, 5.5, None), (4, 4.5, 4.5, 8), (2, 4.6, 4.6, 0), (None, 4.4, 4.4, None), (7, 4.55, 4.55, None), (None, 4.65, 4.65, None), (None, 4.54, 4.54, None), (5, 4.56, 4.56, None)]" + + tree._right_left_rotate(0, 2) + assert str(tree) == "[(6, 5, 5, None), (None, 5.5, 5.5, None), (None, 4.5, 4.5, 8), (2, 4.6, 4.6, 4), (0, 4.4, 4.4, 2), (7, 4.55, 4.55, None), (None, 4.65, 4.65, None), (None, 4.54, 4.54, None), (5, 4.56, 4.56, None)]" + +def test_SelfBalancingBinaryTree(): + _test_SelfBalancingBinaryTree(Backend.PYTHON) +def test_cpp_SelfBalancingBinaryTree(): + _test_SelfBalancingBinaryTree(Backend.CPP) + +def _test_SplayTree(backend): + t = SplayTree(100, 100, backend=backend) + t.insert(50, 50) + t.insert(200, 200) + t.insert(40, 40) + t.insert(30, 30) + t.insert(20, 20) + t.insert(55, 55) + assert str(t) == "[(None, 100, 100, None), (None, 50, 50, None), (0, 200, 200, None), (None, 40, 40, 1), (5, 30, 30, 3), (None, 20, 20, None), (4, 55, 55, 2)]" + assert t.root_idx == 6 + + trav = BinaryTreeTraversal(t, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [20, 30, 40, 50, 55, 100, 200] + assert [node.key for node in pre_order] == [55, 30, 20, 40, 50, 200, 100] + + t.delete(40) + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [20, 30, 50, 55, 100, 200] + assert [node.key for node in pre_order] == [50, 30, 20, 55, 200, 100] + + t.delete(150) + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [20, 30, 50, 55, 100, 200] + assert [node.key for node in pre_order] == [50, 30, 20, 55, 200, 100] + + t1 = SplayTree(1000, 1000, backend=backend) + t1.insert(2000, 2000) + + trav2 = BinaryTreeTraversal(t1, backend=backend) + in_order = trav2.depth_first_search(order='in_order') + pre_order = trav2.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [1000, 2000] + assert [node.key for node in pre_order] == [2000, 1000] + + t.join(t1) + assert str(t) == "[(None, 100, 100, None), '', (6, 200, 200, 8), (4, 50, 50, None), (5, 30, 30, None), (None, 20, 20, None), (3, 55, 55, 0), (None, 1000, 1000, None), (7, 2000, 2000, None), '']" + + if backend == Backend.PYTHON: + trav3 = BinaryTreeTraversal(t, backend=backend) + in_order = trav3.depth_first_search(order='in_order') + pre_order = trav3.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [20, 30, 50, 55, 100, 200, 1000, 2000] + assert [node.key for node in pre_order] == [200, 55, 50, 30, 20, 100, 2000, 1000] + + s = t.split(200) + assert str(s) == "[(1, 2000, 2000, None), (None, 1000, 1000, None)]" + + trav4 = BinaryTreeTraversal(s, backend=backend) + in_order = trav4.depth_first_search(order='in_order') + pre_order = trav4.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [1000, 2000] + assert [node.key for node in pre_order] == [2000, 1000] + + if backend == Backend.PYTHON: + trav5 = BinaryTreeTraversal(t, backend=backend) + in_order = trav5.depth_first_search(order='in_order') + pre_order = trav5.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [20, 30, 50, 55, 100, 200] + assert [node.key for node in pre_order] == [200, 55, 50, 30, 20, 100] + +def test_SplayTree(): + _test_SplayTree(Backend.PYTHON) + +def test_cpp_SplayTree(): + _test_SplayTree(Backend.CPP) + +def _test_RedBlackTree(backend): + tree = RedBlackTree(backend=backend) + tree.insert(10, 10) + tree.insert(18, 18) + tree.insert(7, 7) + tree.insert(15, 15) + tree.insert(16, 16) + tree.insert(30, 30) + tree.insert(25, 25) + tree.insert(40, 40) + tree.insert(60, 60) + tree.insert(2, 2) + tree.insert(17, 17) + tree.insert(6, 6) + assert str(tree) == "[(11, 10, 10, 3), (10, 18, 18, None), (None, 7, 7, None), (None, 15, 15, None), (0, 16, 16, 6), (None, 30, 30, None), (1, 25, 25, 7), (5, 40, 40, 8), (None, 60, 60, None), (None, 2, 2, None), (None, 17, 17, None), (9, 6, 6, 2)]" + assert tree.root_idx == 4 + + trav = BinaryTreeTraversal(tree, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [2, 6, 7, 10, 15, 16, 17, 18, 25, 30, 40, 60] + assert [node.key for node in pre_order] == [16, 10, 6, 2, 7, 15, 25, 18, 17, 40, 30, 60] + + assert tree.lower_bound(0) == 2 + assert tree.lower_bound(2) == 2 + assert tree.lower_bound(3) == 6 + assert tree.lower_bound(7) == 7 + assert tree.lower_bound(25) == 25 + assert tree.lower_bound(32) == 40 + assert tree.lower_bound(41) == 60 + assert tree.lower_bound(60) == 60 + assert tree.lower_bound(61) is None + + assert tree.upper_bound(0) == 2 + assert tree.upper_bound(2) == 6 + assert tree.upper_bound(3) == 6 + assert tree.upper_bound(7) == 10 + assert tree.upper_bound(25) == 30 + assert tree.upper_bound(32) == 40 + assert tree.upper_bound(41) == 60 + assert tree.upper_bound(60) is None + assert tree.upper_bound(61) is None + + tree = RedBlackTree(backend=backend) + + assert tree.lower_bound(1) is None + assert tree.upper_bound(0) is None + + tree.insert(10) + tree.insert(20) + tree.insert(30) + tree.insert(40) + tree.insert(50) + tree.insert(60) + tree.insert(70) + tree.insert(80) + tree.insert(90) + tree.insert(100) + tree.insert(110) + tree.insert(120) + tree.insert(130) + tree.insert(140) + tree.insert(150) + tree.insert(160) + tree.insert(170) + tree.insert(180) + assert str(tree) == "[(None, 10, None, None), (0, 20, None, 2), (None, 30, None, None), (1, 40, None, 5), (None, 50, None, None), (4, 60, None, 6), (None, 70, None, None), (3, 80, None, 11), (None, 90, None, None), (8, 100, None, 10), (None, 110, None, None), (9, 120, None, 13), (None, 130, None, None), (12, 140, None, 15), (None, 150, None, None), (14, 160, None, 16), (None, 170, None, 17), (None, 180, None, None)]" + + assert tree._get_sibling(7) is None + + trav = BinaryTreeTraversal(tree, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 60, 70, 80, 90, + 100, 110, 120, 130, 140, 150, 160, 170, 180] + assert [node.key for node in pre_order] == [80, 40, 20, 10, 30, 60, 50, 70, 120, 100, + 90, 110, 140, 130, 160, 150, 170, 180] + + tree.delete(180) + tree.delete(130) + tree.delete(110) + tree.delete(190) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, + 120, 140, 150, 160, 170] + assert [node.key for node in pre_order] == [80, 40, 20, 10, 30, 60, 50, 70, 120, 100, + 90, 160, 140, 150, 170] + + tree.delete(170) + tree.delete(100) + tree.delete(60) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 70, 80, 90, 120, 140, 150, 160] + assert [node.key for node in pre_order] == [80, 40, 20, 10, 30, 50, 70, 120, 90, 150, 140, 160] + + tree.delete(70) + tree.delete(140) + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 80, 90, 120, 150, 160] + assert [node.key for node in pre_order] == [80, 40, 20, 10, 30, 50, 120, 90, 150, 160] + + tree.delete(150) + tree.delete(120) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 80, 90, 160] + assert [node.key for node in pre_order] == [40, 20, 10, 30, 80, 50, 90, 160] + + tree.delete(50) + tree.delete(80) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [10, 20, 30, 40, 90, 160] + assert [node.key for node in pre_order] == [40, 20, 10, 30, 90, 160] + + tree.delete(30) + tree.delete(20) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [10, 40, 90, 160] + assert [node.key for node in pre_order] == [40, 10, 90, 160] + + tree.delete(10) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [40, 90, 160] + assert [node.key for node in pre_order] == [90, 40, 160] + + tree.delete(40) + tree.delete(90) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [160] + assert [node.key for node in pre_order] == [160] + + tree.delete(160) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order if node.key is not None] == [] + assert [node.key for node in pre_order if node.key is not None] == [] + + tree = RedBlackTree(backend=backend) + tree.insert(50) + tree.insert(40) + tree.insert(30) + tree.insert(20) + tree.insert(10) + tree.insert(5) + + trav = BinaryTreeTraversal(tree, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [5, 10, 20, 30, 40, 50] + assert [node.key for node in pre_order] == [40, 20, 10, 5, 30, 50] + + assert tree.search(50) == 0 + assert tree.search(20) == 3 + assert tree.search(30) == 2 + tree.delete(50) + tree.delete(20) + tree.delete(30) + assert tree.search(50) is None + assert tree.search(20) is None + assert tree.search(30) is None + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [5, 10, 40] + assert [node.key for node in pre_order] == [10, 5, 40] + + tree = RedBlackTree(backend=backend) + tree.insert(10) + tree.insert(5) + tree.insert(20) + tree.insert(15) + + trav = BinaryTreeTraversal(tree, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [5, 10, 15, 20] + assert [node.key for node in pre_order] == [10, 5, 20, 15] + + tree.delete(5) + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [10, 15, 20] + assert [node.key for node in pre_order] == [15, 10, 20] + + tree = RedBlackTree(backend=backend) + tree.insert(10) + tree.insert(5) + tree.insert(20) + tree.insert(15) + tree.insert(2) + tree.insert(6) + + trav = BinaryTreeTraversal(tree,backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [2, 5, 6, 10, 15, 20] + assert [node.key for node in pre_order] == [10, 5, 2, 6, 20, 15] + + tree.delete(10) + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [2, 5, 6, 15, 20] + assert [node.key for node in pre_order] == [6, 5, 2, 20, 15] + +def test_RedBlackTree(): + _test_RedBlackTree(Backend.PYTHON) + +def test_cpp_RedBlackTree(): + _test_RedBlackTree(Backend.CPP) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_heaps.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_heaps.py new file mode 100644 index 000000000..dece2f132 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_heaps.py @@ -0,0 +1,236 @@ +from pydatastructs.trees.heaps import BinaryHeap, TernaryHeap, BinomialHeap, DHeap +from pydatastructs.linear_data_structures.arrays import DynamicOneDimensionalArray +from pydatastructs.miscellaneous_data_structures.binomial_trees import BinomialTree +from pydatastructs.utils.misc_util import TreeNode, BinomialTreeNode +from pydatastructs.utils.raises_util import raises +from collections import deque as Queue + +def test_BinaryHeap(): + + max_heap = BinaryHeap(heap_property="max") + + assert raises(IndexError, lambda: max_heap.extract()) + + max_heap.insert(100, 100) + max_heap.insert(19, 19) + max_heap.insert(36, 36) + max_heap.insert(17, 17) + max_heap.insert(3, 3) + max_heap.insert(25, 25) + max_heap.insert(1, 1) + max_heap.insert(2, 2) + max_heap.insert(7, 7) + assert str(max_heap) == \ + ("[(100, 100, [1, 2]), (19, 19, [3, 4]), " + "(36, 36, [5, 6]), (17, 17, [7, 8]), " + "(3, 3, []), (25, 25, []), (1, 1, []), " + "(2, 2, []), (7, 7, [])]") + + assert max_heap.extract().key == 100 + + expected_sorted_elements = [36, 25, 19, 17, 7, 3, 2, 1] + l = max_heap.heap[0].left + l = max_heap.heap[0].right + sorted_elements = [] + for _ in range(8): + sorted_elements.append(max_heap.extract().key) + assert expected_sorted_elements == sorted_elements + + elements = [ + TreeNode(7, 7), TreeNode(25, 25), TreeNode(100, 100), + TreeNode(1, 1), TreeNode(2, 2), TreeNode(3, 3), + TreeNode(17, 17), TreeNode(19, 19), TreeNode(36, 36) + ] + min_heap = BinaryHeap(elements=elements, heap_property="min") + assert min_heap.extract().key == 1 + + expected_sorted_elements = [2, 3, 7, 17, 19, 25, 36, 100] + sorted_elements = [min_heap.extract().key for _ in range(8)] + assert expected_sorted_elements == sorted_elements + + non_TreeNode_elements = [ + (7, 7), TreeNode(25, 25), TreeNode(100, 100), + TreeNode(1, 1), (2, 2), TreeNode(3, 3), + TreeNode(17, 17), TreeNode(19, 19), TreeNode(36, 36) + ] + assert raises(TypeError, lambda: + BinaryHeap(elements = non_TreeNode_elements, heap_property='min')) + + non_TreeNode_elements = DynamicOneDimensionalArray(int, 0) + non_TreeNode_elements.append(1) + non_TreeNode_elements.append(2) + assert raises(TypeError, lambda: + BinaryHeap(elements = non_TreeNode_elements, heap_property='min')) + + non_heapable = "[1, 2, 3]" + assert raises(ValueError, lambda: + BinaryHeap(elements = non_heapable, heap_property='min')) + +def test_TernaryHeap(): + max_heap = TernaryHeap(heap_property="max") + assert raises(IndexError, lambda: max_heap.extract()) + max_heap.insert(100, 100) + max_heap.insert(19, 19) + max_heap.insert(36, 36) + max_heap.insert(17, 17) + max_heap.insert(3, 3) + max_heap.insert(25, 25) + max_heap.insert(1, 1) + max_heap.insert(2, 2) + max_heap.insert(7, 7) + assert str(max_heap) == \ + ('[(100, 100, [1, 2, 3]), (25, 25, [4, 5, 6]), ' + '(36, 36, [7, 8]), (17, 17, []), ' + '(3, 3, []), (19, 19, []), (1, 1, []), ' + '(2, 2, []), (7, 7, [])]') + + assert max_heap.extract().key == 100 + + expected_sorted_elements = [36, 25, 19, 17, 7, 3, 2, 1] + sorted_elements = [] + for _ in range(8): + sorted_elements.append(max_heap.extract().key) + assert expected_sorted_elements == sorted_elements + + elements = [ + TreeNode(7, 7), TreeNode(25, 25), TreeNode(100, 100), + TreeNode(1, 1), TreeNode(2, 2), TreeNode(3, 3), + TreeNode(17, 17), TreeNode(19, 19), TreeNode(36, 36) + ] + min_heap = TernaryHeap(elements=elements, heap_property="min") + expected_extracted_element = min_heap.heap[0].key + assert min_heap.extract().key == expected_extracted_element + + expected_sorted_elements = [2, 3, 7, 17, 19, 25, 36, 100] + sorted_elements = [min_heap.extract().key for _ in range(8)] + assert expected_sorted_elements == sorted_elements + +def test_DHeap(): + assert raises(ValueError, lambda: DHeap(heap_property="none", d=4)) + max_heap = DHeap(heap_property="max", d=5) + assert raises(IndexError, lambda: max_heap.extract()) + max_heap.insert(100, 100) + max_heap.insert(19, 19) + max_heap.insert(36, 36) + max_heap.insert(17, 17) + max_heap.insert(3, 3) + max_heap.insert(25, 25) + max_heap.insert(1, 1) + max_heap = DHeap(max_heap.heap, heap_property="max", d=4) + max_heap.insert(2, 2) + max_heap.insert(7, 7) + assert str(max_heap) == \ + ('[(100, 100, [1, 2, 3, 4]), (25, 25, [5, 6, 7, 8]), ' + '(36, 36, []), (17, 17, []), (3, 3, []), (19, 19, []), ' + '(1, 1, []), (2, 2, []), (7, 7, [])]') + + assert max_heap.extract().key == 100 + + expected_sorted_elements = [36, 25, 19, 17, 7, 3, 2, 1] + sorted_elements = [] + for _ in range(8): + sorted_elements.append(max_heap.extract().key) + assert expected_sorted_elements == sorted_elements + + elements = [ + TreeNode(7, 7), TreeNode(25, 25), TreeNode(100, 100), + TreeNode(1, 1), TreeNode(2, 2), TreeNode(3, 3), + TreeNode(17, 17), TreeNode(19, 19), TreeNode(36, 36) + ] + min_heap = DHeap(elements=DynamicOneDimensionalArray(TreeNode, 9, elements), heap_property="min") + assert min_heap.extract().key == 1 + + expected_sorted_elements = [2, 3, 7, 17, 19, 25, 36, 100] + sorted_elements = [min_heap.extract().key for _ in range(8)] + assert expected_sorted_elements == sorted_elements + +def test_BinomialHeap(): + + # Corner cases + assert raises(TypeError, lambda: + BinomialHeap( + root_list=[BinomialTreeNode(1, 1), None]) + ) is True + tree1 = BinomialTree(BinomialTreeNode(1, 1), 0) + tree2 = BinomialTree(BinomialTreeNode(2, 2), 0) + bh = BinomialHeap(root_list=[tree1, tree2]) + assert raises(TypeError, lambda: + bh.merge_tree(BinomialTreeNode(2, 2), None)) + assert raises(TypeError, lambda: + bh.merge(None)) + + # Testing BinomialHeap.merge + nodes = [BinomialTreeNode(1, 1), # 0 + BinomialTreeNode(3, 3), # 1 + BinomialTreeNode(9, 9), # 2 + BinomialTreeNode(11, 11), # 3 + BinomialTreeNode(6, 6), # 4 + BinomialTreeNode(14, 14), # 5 + BinomialTreeNode(2, 2), # 6 + BinomialTreeNode(7, 7), # 7 + BinomialTreeNode(4, 4), # 8 + BinomialTreeNode(8, 8), # 9 + BinomialTreeNode(12, 12), # 10 + BinomialTreeNode(10, 10), # 11 + BinomialTreeNode(5, 5), # 12 + BinomialTreeNode(21, 21)] # 13 + + nodes[2].add_children(nodes[3]) + nodes[4].add_children(nodes[5]) + nodes[6].add_children(nodes[9], nodes[8], nodes[7]) + nodes[7].add_children(nodes[11], nodes[10]) + nodes[8].add_children(nodes[12]) + nodes[10].add_children(nodes[13]) + + tree11 = BinomialTree(nodes[0], 0) + tree12 = BinomialTree(nodes[2], 1) + tree13 = BinomialTree(nodes[6], 3) + tree21 = BinomialTree(nodes[1], 0) + + heap1 = BinomialHeap(root_list=[tree11, tree12, tree13]) + heap2 = BinomialHeap(root_list=[tree21]) + + def bfs(heap): + bfs_trav = [] + for i in range(len(heap.root_list)): + layer = [] + bfs_q = Queue() + bfs_q.append(heap.root_list[i].root) + while len(bfs_q) != 0: + curr_node = bfs_q.popleft() + if curr_node is not None: + layer.append(curr_node.key) + for _i in range(curr_node.children._last_pos_filled + 1): + bfs_q.append(curr_node.children[_i]) + if layer != []: + bfs_trav.append(layer) + return bfs_trav + + heap1.merge(heap2) + expected_bfs_trav = [[1, 3, 9, 11], [2, 8, 4, 7, 5, 10, 12, 21]] + assert bfs(heap1) == expected_bfs_trav + + # Testing Binomial.find_minimum + assert heap1.find_minimum().key == 1 + + # Testing Binomial.delete_minimum + heap1.delete_minimum() + assert bfs(heap1) == [[3], [9, 11], [2, 8, 4, 7, 5, 10, 12, 21]] + assert raises(ValueError, lambda: heap1.decrease_key(nodes[3], 15)) + heap1.decrease_key(nodes[3], 0) + assert bfs(heap1) == [[3], [0, 9], [2, 8, 4, 7, 5, 10, 12, 21]] + heap1.delete(nodes[12]) + assert bfs(heap1) == [[3, 8], [0, 9, 2, 7, 4, 10, 12, 21]] + + # Testing BinomialHeap.insert + heap = BinomialHeap() + assert raises(IndexError, lambda: heap.find_minimum()) + heap.insert(1, 1) + heap.insert(3, 3) + heap.insert(6, 6) + heap.insert(9, 9) + heap.insert(14, 14) + heap.insert(11, 11) + heap.insert(2, 2) + heap.insert(7, 7) + assert bfs(heap) == [[1, 3, 6, 2, 9, 7, 11, 14]] diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_m_ary_trees.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_m_ary_trees.py new file mode 100644 index 000000000..6cbc84ace --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_m_ary_trees.py @@ -0,0 +1,5 @@ +from pydatastructs import MAryTree + +def test_MAryTree(): + m = MAryTree(1, 1) + assert str(m) == '[(1, 1)]' diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_space_partitioning_tree.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_space_partitioning_tree.py new file mode 100644 index 000000000..99f0e84cc --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_space_partitioning_tree.py @@ -0,0 +1,20 @@ +from pydatastructs import OneDimensionalSegmentTree +from pydatastructs.utils.raises_util import raises + +def test_OneDimensionalSegmentTree(): + ODST = OneDimensionalSegmentTree + segt = ODST([(0, 5), (1, 6), (9, 13), (1, 2), (3, 8), (9, 20)]) + assert segt.cache is False + segt2 = ODST([(1, 4)]) + assert str(segt2) == ("[(None, [False, 0, 1, False], None, None), " + "(None, [True, 1, 1, True], ['(None, [True, 1, 4, True], None, None)'], " + "None), (None, [False, 1, 4, False], None, None), (None, [True, 4, 4, True], " + "None, None), (0, [False, 0, 1, True], None, 1), (2, [False, 1, 4, True], " + "['(None, [True, 1, 4, True], None, None)'], 3), (4, [False, 0, 4, True], " + "None, 5), (None, [False, 4, 5, False], None, None), (-3, [False, 0, 5, " + "False], None, -2)]") + assert len(segt.query(1.5)) == 3 + assert segt.cache is True + assert len(segt.query(-1)) == 0 + assert len(segt.query(2.8)) == 2 + assert raises(ValueError, lambda: ODST([(1, 2, 3)])) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/__init__.py new file mode 100644 index 000000000..c4971be32 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/__init__.py @@ -0,0 +1,29 @@ +__all__ = [] + +from . import ( + misc_util, + testing_util, +) + +from .misc_util import ( + TreeNode, + MAryTreeNode, + LinkedListNode, + BinomialTreeNode, + AdjacencyListGraphNode, + AdjacencyMatrixGraphNode, + GraphEdge, + Set, + CartesianTreeNode, + RedBlackTreeNode, + TrieNode, + SkipNode, + summation, + greatest_common_divisor, + minimum, + Backend +) +from .testing_util import test + +__all__.extend(misc_util.__all__) +__all__.extend(testing_util.__all__) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/_backend/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/_backend/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/misc_util.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/misc_util.py new file mode 100644 index 000000000..3672c58b9 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/misc_util.py @@ -0,0 +1,632 @@ +import math, pydatastructs +from enum import Enum +from pydatastructs.utils._backend.cpp import _nodes, _graph_utils + +__all__ = [ + 'TreeNode', + 'MAryTreeNode', + 'LinkedListNode', + 'BinomialTreeNode', + 'AdjacencyListGraphNode', + 'AdjacencyMatrixGraphNode', + 'GraphEdge', + 'Set', + 'CartesianTreeNode', + 'RedBlackTreeNode', + 'TrieNode', + 'SkipNode', + 'minimum', + 'summation', + 'greatest_common_divisor', + 'Backend' +] + + +class Backend(Enum): + + PYTHON = 'Python' + CPP = 'Cpp' + LLVM = 'Llvm' + + def __str__(self): + return self.value + +def raise_if_backend_is_not_python(api, backend): + if backend != Backend.PYTHON: + raise ValueError("As of {} version, only {} backend is supported for {} API".format( + pydatastructs.__version__, str(Backend.PYTHON), api)) + +_check_type = lambda a, t: isinstance(a, t) +NoneType = type(None) + +class Node(object): + """ + Abstract class representing a node. + """ + pass + +class TreeNode(Node): + """ + Represents node in trees. + + Parameters + ========== + + key + Required for comparison operations. + data + Any valid data to be stored in the node. + left: int + Optional, index of the left child node. + right: int + Optional, index of the right child node. + backend: pydatastructs.Backend + The backend to be used. Available backends: Python and C++ + Optional, by default, the Python backend is used. For faster execution, use the C++ backend. + """ + + __slots__ = ['key', 'data', 'left', 'right', 'is_root', + 'height', 'parent', 'size'] + + @classmethod + def methods(cls): + return ['__new__', '__str__'] + + def __new__(cls, key, data=None, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + return _nodes.TreeNode(key, data, **kwargs) + obj = Node.__new__(cls) + obj.data, obj.key = data, key + obj.left, obj.right, obj.parent, obj.height, obj.size = \ + None, None, None, 0, 1 + obj.is_root = False + return obj + + def __str__(self): + """ + Used for printing. + """ + return str((self.left, self.key, self.data, self.right)) + +class CartesianTreeNode(TreeNode): + """ + Represents node in cartesian trees. + + Parameters + ========== + + key + Required for comparison operations. + data + Any valid data to be stored in the node. + priority: int + An integer value for heap property. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + __slots__ = ['key', 'data', 'priority'] + + def __new__(cls, key, priority, data=None, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = TreeNode.__new__(cls, key, data) + obj.priority = priority + return obj + + def __str__(self): + """ + Used for printing. + """ + return str((self.left, self.key, self.priority, self.data, self.right)) + +class RedBlackTreeNode(TreeNode): + """ + Represents node in red-black trees. + + Parameters + ========== + + key + Required for comparison operations. + data + Any valid data to be stored in the node. + color + 0 for black and 1 for red. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + __slots__ = ['key', 'data', 'color'] + + @classmethod + def methods(cls): + return ['__new__'] + + def __new__(cls, key, data=None, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = TreeNode.__new__(cls, key, data) + obj.color = 1 + return obj + +class BinomialTreeNode(TreeNode): + """ + Represents node in binomial trees. + + Parameters + ========== + + key + Required for comparison operations. + data + Any valid data to be stored in the node. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Note + ==== + + The following are the data members of the class: + + parent: BinomialTreeNode + A reference to the BinomialTreeNode object + which is a prent of this. + children: DynamicOneDimensionalArray + An array of references to BinomialTreeNode objects + which are children this node. + is_root: bool, by default, False + If the current node is a root of the tree then + set it to True otherwise False. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + __slots__ = ['parent', 'key', 'children', 'data', 'is_root'] + + @classmethod + def methods(cls): + return ['__new__', 'add_children', '__str__'] + + def __new__(cls, key, data=None, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + from pydatastructs.linear_data_structures.arrays import DynamicOneDimensionalArray + obj = Node.__new__(cls) + obj.data, obj.key = data, key + obj.children, obj.parent, obj.is_root = ( + DynamicOneDimensionalArray(BinomialTreeNode, 0), + None, + False + ) + return obj + + def add_children(self, *children): + """ + Adds children of current node. + """ + for child in children: + self.children.append(child) + child.parent = self + + def __str__(self): + """ + For printing the key and data. + """ + return str((self.key, self.data)) + +class MAryTreeNode(TreeNode): + """ + Represents node in an M-ary trees. + + Parameters + ========== + + key + Required for comparison operations. + data + Any valid data to be stored in the node. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Note + ==== + + The following are the data members of the class: + + children: DynamicOneDimensionalArray + An array of indices which stores the children of + this node in the M-ary tree array + is_root: bool, by default, False + If the current node is a root of the tree then + set it to True otherwise False. + """ + __slots__ = ['key', 'children', 'data', 'is_root'] + + @classmethod + def methods(cls): + return ['__new__', 'add_children', '__str__'] + + def __new__(cls, key, data=None, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + from pydatastructs.linear_data_structures.arrays import DynamicOneDimensionalArray + obj = Node.__new__(cls) + obj.data = data + obj.key = key + obj.is_root = False + obj.children = DynamicOneDimensionalArray(int, 0) + return obj + + def add_children(self, *children): + """ + Adds children of current node. + """ + for child in children: + self.children.append(child) + + def __str__(self): + return str((self.key, self.data)) + + +class LinkedListNode(Node): + """ + Represents node in linked lists. + + Parameters + ========== + + key + Any valid identifier to uniquely + identify the node in the linked list. + data + Any valid data to be stored in the node. + links + List of names of attributes which should + be used as links to other nodes. + addrs + List of address of nodes to be assigned to + each of the attributes in links. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + @classmethod + def methods(cls): + return ['__new__', '__str__'] + + def __new__(cls, key, data=None, links=None, addrs=None, + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + if links is None: + links = ['next'] + if addrs is None: + addrs = [None] + obj = Node.__new__(cls) + obj.key = key + obj.data = data + for link, addr in zip(links, addrs): + obj.__setattr__(link, addr) + obj.__slots__ = ['key', 'data'] + links + return obj + + def __str__(self): + return str((self.key, self.data)) + +class SkipNode(Node): + """ + Represents node in linked lists. + + Parameters + ========== + + key + Any valid identifier to uniquely + identify the node in the skip list. + data + Any valid data to be stored in the node. + next + Reference to the node lying just forward + to the current node. + Optional, by default, None. + down + Reference to the node lying just below the + current node. + Optional, by default, None. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + + __slots__ = ['key', 'data', 'next', 'down'] + + def __new__(cls, key, data=None, next=None, down=None, + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = Node.__new__(cls) + obj.key, obj.data = key, data + obj.next, obj.down = next, down + return obj + + def __str__(self): + return str((self.key, self.data)) + +class GraphNode(Node): + """ + Abastract class for graph nodes/vertices. + """ + def __str__(self): + return str((self.name, self.data)) + +class AdjacencyListGraphNode(GraphNode): + """ + Represents nodes for adjacency list implementation + of graphs. + + Parameters + ========== + + name: str + The name of the node by which it is identified + in the graph. Must be unique. + data + The data to be stored at each graph node. + adjacency_list: list + Any valid iterator to initialize the adjacent + nodes of the current node. + Optional, by default, None + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + @classmethod + def methods(cls): + return ['__new__', 'add_adjacent_node', + 'remove_adjacent_node'] + + def __new__(cls, name, data=None, adjacency_list=[], + **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.PYTHON: + obj = GraphNode.__new__(cls) + obj.name, obj.data = str(name), data + obj._impl = 'adjacency_list' + if len(adjacency_list) > 0: + for node in adjacency_list: + obj.__setattr__(node.name, node) + obj.adjacent = adjacency_list if len(adjacency_list) > 0 \ + else [] + return obj + else: + return _graph_utils.AdjacencyListGraphNode(name, data, adjacency_list) + + def add_adjacent_node(self, name, data=None): + """ + Adds adjacent node to the current node's + adjacency list with given name and data. + """ + if hasattr(self, name): + getattr(self, name).data = data + else: + new_node = AdjacencyListGraphNode(name, data) + self.__setattr__(new_node.name, new_node) + self.adjacent.append(new_node.name) + + def remove_adjacent_node(self, name): + """ + Removes node with given name from + adjacency list. + """ + if not hasattr(self, name): + raise ValueError("%s is not adjacent to %s"%(name, self.name)) + self.adjacent.remove(name) + delattr(self, name) + +class AdjacencyMatrixGraphNode(GraphNode): + """ + Represents nodes for adjacency matrix implementation + of graphs. + + Parameters + ========== + + name: str + The index of the node in the AdjacencyMatrix. + data + The data to be stored at each graph node. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + __slots__ = ['name', 'data'] + + @classmethod + def methods(cls): + return ['__new__'] + + def __new__(cls, name, data=None, + **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.PYTHON: + obj = GraphNode.__new__(cls) + obj.name, obj.data, obj.is_connected = \ + str(name), data, None + obj._impl = 'adjacency_matrix' + return obj + else: + return _graph_utils.AdjacencyMatrixGraphNode(str(name), data) + +class GraphEdge(object): + """ + Represents the concept of edges in graphs. + + Parameters + ========== + + node1: GraphNode or it's child classes + The source node of the edge. + node2: GraphNode or it's child classes + The target node of the edge. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + @classmethod + def methods(cls): + return ['__new__', '__str__'] + + def __new__(cls, node1, node2, value=None, + **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.PYTHON: + obj = object.__new__(cls) + obj.source, obj.target = node1, node2 + obj.value = value + return obj + else: + return _graph_utils.GraphEdge(node1, node2, value) + + def __str__(self): + return str((self.source.name, self.target.name)) + +class Set(object): + """ + Represents a set in a forest of disjoint sets. + + Parameters + ========== + + key: Hashable python object + The key which uniquely identifies + the set. + data: Python object + The data to be stored in the set. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + + __slots__ = ['parent', 'size', 'key', 'data'] + + @classmethod + def methods(cls): + return ['__new__'] + + def __new__(cls, key, data=None, + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.key = key + obj.data = data + obj.parent, obj.size = [None]*2 + return obj + +class TrieNode(Node): + """ + Represents nodes in the trie data structure. + + Parameters + ========== + + char: The character stored in the current node. + Optional, by default None. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + + __slots__ = ['char', '_children', 'is_terminal'] + + @classmethod + def methods(cls): + return ['__new__', 'add_child', 'get_child', 'remove_child'] + + def __new__(cls, char=None, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = Node.__new__(cls) + obj.char = char + obj._children = {} + obj.is_terminal = False + return obj + + def add_child(self, trie_node) -> None: + self._children[trie_node.char] = trie_node + + def get_child(self, char: str): + return self._children.get(char, None) + + def remove_child(self, char: str) -> None: + self._children.pop(char) + +def _comp(u, v, tcomp): + """ + Overloaded comparator for comparing + two values where any one of them can be + `None`. + """ + if u is None and v is not None: + return False + elif u is not None and v is None: + return True + elif u is None and v is None: + return False + else: + return tcomp(u, v) + +def _check_range_query_inputs(input, bounds): + start, end = input + if start >= end: + raise ValueError("Input (%d, %d) range is empty."%(start, end)) + if start < bounds[0] or end > bounds[1]: + raise IndexError("Input (%d, %d) range is out of " + "bounds of array indices (%d, %d)." + %(start, end, bounds[0], bounds[1])) + +def minimum(x_y): + if len(x_y) == 1: + return x_y[0] + + x, y = x_y + if x is None or y is None: + return x if y is None else y + + return min(x, y) + +def greatest_common_divisor(x_y): + if len(x_y) == 1: + return x_y[0] + + x, y = x_y + if x is None or y is None: + return x if y is None else y + + return math.gcd(x, y) + +def summation(x_y): + if len(x_y) == 1: + return x_y[0] + + x, y = x_y + if x is None or y is None: + return x if y is None else y + + return x + y diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/raises_util.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/raises_util.py new file mode 100644 index 000000000..3a324d38d --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/raises_util.py @@ -0,0 +1,17 @@ +import pytest + +def raises(exception, code): + """ + Utility for testing exceptions. + + Parameters + ========== + + exception + A valid python exception + code: lambda + Code that causes exception + """ + with pytest.raises(exception): + code() + return True diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/testing_util.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/testing_util.py new file mode 100644 index 000000000..e5c0627b5 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/testing_util.py @@ -0,0 +1,83 @@ +import os +import pathlib +import glob +import types + +__all__ = ['test'] + + +# Root pydatastructs directory +ROOT_DIR = pathlib.Path(os.path.abspath(__file__)).parents[1] + + +SKIP_FILES = ['testing_util.py'] + +def test(submodules=None, only_benchmarks=False, + benchmarks_size=1000, **kwargs): + """ + Runs the library tests using pytest + + Parameters + ========== + + submodules: Optional, list[str] + List of submodules test to run. By default runs + all the tests + """ + try: + import pytest + except ImportError: + raise Exception("pytest must be installed. Use `pip install pytest` " + "to install it.") + + # set benchmarks size + os.environ["PYDATASTRUCTS_BENCHMARK_SIZE"] = str(benchmarks_size) + test_files = [] + if submodules: + if not isinstance(submodules, (list, tuple)): + submodules = [submodules] + for path in glob.glob(f'{ROOT_DIR}/**/test_*.py', recursive=True): + skip_test = False + for skip in SKIP_FILES: + if skip in path: + skip_test = True + break + if skip_test: + continue + for sub_var in submodules: + if isinstance(sub_var, types.ModuleType): + sub = sub_var.__name__.split('.')[-1] + elif isinstance(sub_var, str): + sub = sub_var + else: + raise Exception("Submodule should be of type: str or module") + if sub in path: + if not only_benchmarks: + if 'benchmarks' not in path: + test_files.append(path) + else: + if 'benchmarks' in path: + test_files.append(path) + break + else: + for path in glob.glob(f'{ROOT_DIR}/**/test_*.py', recursive=True): + skip_test = False + for skip in SKIP_FILES: + if skip in path: + skip_test = True + break + if skip_test: + continue + if not only_benchmarks: + if 'benchmarks' not in path: + test_files.append(path) + else: + if 'benchmarks' in path: + test_files.append(path) + + extra_args = [] + if kwargs.get("n", False) is not False: + extra_args.append("-n") + extra_args.append(str(kwargs["n"])) + + pytest.main(extra_args + test_files) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/tests/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/tests/test_code_quality.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/tests/test_code_quality.py new file mode 100644 index 000000000..67afe49e8 --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/tests/test_code_quality.py @@ -0,0 +1,239 @@ +import os, re, sys, pydatastructs, inspect +from typing import Type +import pytest + +def _list_files(checker): + root_path = os.path.abspath( + os.path.join( + os.path.split(__file__)[0], + os.pardir, os.pardir)) + code_files = [] + for (dirpath, _, filenames) in os.walk(root_path): + for _file in filenames: + if checker(_file): + code_files.append(os.path.join(dirpath, _file)) + return code_files + +checker = lambda _file: (re.match(r".*\.py$", _file) or + re.match(r".*\.cpp$", _file) or + re.match(r".*\.hpp$", _file)) +code_files = _list_files(checker) + +def test_trailing_white_spaces(): + messages = [("The following places in your code " + "end with white spaces.")] + msg = "{}:{}" + for file_path in code_files: + file = open(file_path, "r") + line = file.readline() + line_number = 1 + while line != "": + if line.endswith(" \n") or line.endswith("\t\n") \ + or line.endswith(" ") or line.endswith("\t"): + messages.append(msg.format(file_path, line_number)) + line = file.readline() + line_number += 1 + file.close() + + if len(messages) > 1: + assert False, '\n'.join(messages) + +def test_final_new_lines(): + messages = [("The following files in your code " + "do not end with a single new line.")] + msg1 = "No new line in {}:{}" + msg2 = "More than one new line in {}:{}" + for file_path in code_files: + file = open(file_path, "r") + lines = [] + line = file.readline() + while line != "": + lines.append(line) + line = file.readline() + if lines: + if lines[-1][-1] != "\n": + messages.append(msg1.format(file_path, len(lines))) + if lines[-1] == "\n" and lines[-2][-1] == "\n": + messages.append(msg2.format(file_path, len(lines))) + file.close() + + if len(messages) > 1: + assert False, '\n'.join(messages) + +def test_comparison_True_False_None(): + messages = [("The following places in your code " + "use `!=` or `==` for comparing True/False/None." + "Please use `is` instead.")] + msg = "{}:{}" + checker = lambda _file: re.match(r".*\.py$", _file) + py_files = _list_files(checker) + for file_path in py_files: + if file_path.find("test_code_quality.py") == -1: + file = open(file_path, "r") + line = file.readline() + line_number = 1 + while line != "": + if ((line.find("== True") != -1) or + (line.find("== False") != -1) or + (line.find("== None") != -1) or + (line.find("!= True") != -1) or + (line.find("!= False") != -1) or + (line.find("!= None") != -1)): + messages.append(msg.format(file_path, line_number)) + line = file.readline() + line_number += 1 + file.close() + + if len(messages) > 1: + assert False, '\n'.join(messages) + +@pytest.mark.xfail +def test_reinterpret_cast(): + + def is_variable(str): + for ch in str: + if not (ch == '_' or ch.isalnum()): + return False + return True + + checker = lambda _file: (re.match(r".*\.cpp$", _file) or + re.match(r".*\.hpp$", _file)) + cpp_files = _list_files(checker) + messages = [("The following lines should use reinterpret_cast" + " to cast pointers from one type to another")] + msg = "Casting to {} at {}:{}" + for file_path in cpp_files: + file = open(file_path, "r") + line = file.readline() + line_number = 1 + while line != "": + found_open = False + between_open_close = "" + for char in line: + if char == '(': + found_open = True + elif char == ')': + if (between_open_close and + between_open_close[-1] == '*' and + is_variable(between_open_close[:-1])): + messages.append(msg.format(between_open_close[:-1], + file_path, line_number)) + between_open_close = "" + found_open = False + elif char != ' ' and found_open: + between_open_close += char + line = file.readline() + line_number += 1 + file.close() + + if len(messages) > 1: + assert False, '\n'.join(messages) + +def test_presence_of_tabs(): + messages = [("The following places in your code " + "use tabs instead of spaces.")] + msg = "{}:{}" + for file_path in code_files: + file = open(file_path, "r") + line_number = 1 + line = file.readline() + while line != "": + if (line.find('\t') != -1): + messages.append(msg.format(file_path, line_number)) + line = file.readline() + line_number += 1 + file.close() + + if len(messages) > 1: + assert False, '\n'.join(messages) + +def _apis(): + import pydatastructs as pyds + return [ + pyds.graphs.adjacency_list.AdjacencyList, + pyds.graphs.adjacency_matrix.AdjacencyMatrix, + pyds.DoublyLinkedList, pyds.SinglyLinkedList, + pyds.SinglyCircularLinkedList, + pyds.DoublyCircularLinkedList, + pyds.OneDimensionalArray, pyds.MultiDimensionalArray, + pyds.DynamicOneDimensionalArray, + pyds.trees.BinaryTree, pyds.BinarySearchTree, + pyds.AVLTree, pyds.SplayTree, pyds.BinaryTreeTraversal, + pyds.DHeap, pyds.BinaryHeap, pyds.TernaryHeap, pyds.BinomialHeap, + pyds.MAryTree, pyds.OneDimensionalSegmentTree, + pyds.Queue, pyds.miscellaneous_data_structures.queue.ArrayQueue, + pyds.miscellaneous_data_structures.queue.LinkedListQueue, + pyds.PriorityQueue, + pyds.miscellaneous_data_structures.queue.LinkedListPriorityQueue, + pyds.miscellaneous_data_structures.queue.BinaryHeapPriorityQueue, + pyds.miscellaneous_data_structures.queue.BinomialHeapPriorityQueue, + pyds.Stack, pyds.miscellaneous_data_structures.stack.ArrayStack, + pyds.miscellaneous_data_structures.stack.LinkedListStack, + pyds.DisjointSetForest, pyds.BinomialTree, pyds.TreeNode, pyds.MAryTreeNode, + pyds.LinkedListNode, pyds.BinomialTreeNode, pyds.AdjacencyListGraphNode, + pyds.AdjacencyMatrixGraphNode, pyds.GraphEdge, pyds.Set, pyds.BinaryIndexedTree, + pyds.CartesianTree, pyds.CartesianTreeNode, pyds.Treap, pyds.RedBlackTreeNode, pyds.RedBlackTree, + pyds.Trie, pyds.TrieNode, pyds.SkipList, pyds.RangeQueryStatic, pyds.RangeQueryDynamic, pyds.SparseTable, + pyds.miscellaneous_data_structures.segment_tree.OneDimensionalArraySegmentTree, + pyds.bubble_sort, pyds.linear_search, pyds.binary_search, pyds.jump_search, + pyds.selection_sort, pyds.insertion_sort, pyds.quick_sort, pyds.intro_sort] + +def test_public_api(): + pyds = pydatastructs + apis = _apis() + print("\n\nAPI Report") + print("==========") + for name in apis: + if inspect.isclass(name): + _class = name + mro = _class.__mro__ + must_methods = _class.methods() + print("\n" + str(name)) + print("Methods Implemented") + print(must_methods) + print("Parent Classes") + print(mro[1:]) + for supercls in mro: + if supercls != _class: + for method in must_methods: + if hasattr(supercls, method) and \ + getattr(supercls, method) == \ + getattr(_class, method): + assert False, ("%s class doesn't " + "have %s method implemented."%( + _class, method + )) + +def test_backend_argument_message(): + + import pydatastructs as pyds + backend_implemented = [ + pyds.OneDimensionalArray, + pyds.DynamicOneDimensionalArray, + pyds.quick_sort, + pyds.AdjacencyListGraphNode, + pyds.AdjacencyMatrixGraphNode, + pyds.GraphEdge + ] + + def call_and_raise(api, pos_args_count=0): + try: + if pos_args_count == 0: + api(backend=None) + elif pos_args_count == 1: + api(None, backend=None) + elif pos_args_count == 2: + api(None, None, backend=None) + except ValueError as value_error: + assert str(api) in value_error.args[0] + except TypeError as type_error: + max_pos_args_count = 2 + if pos_args_count <= max_pos_args_count: + call_and_raise(api, pos_args_count + 1) + else: + raise type_error + + apis = _apis() + for api in apis: + if api not in backend_implemented: + call_and_raise(api, 0) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/tests/test_misc_util.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/tests/test_misc_util.py new file mode 100644 index 000000000..13ba2ec8e --- /dev/null +++ b/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/tests/test_misc_util.py @@ -0,0 +1,84 @@ +from pydatastructs.utils import (TreeNode, AdjacencyListGraphNode, AdjacencyMatrixGraphNode, + GraphEdge, BinomialTreeNode, MAryTreeNode, CartesianTreeNode, RedBlackTreeNode, SkipNode) +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils.misc_util import Backend + +def test_cpp_TreeNode(): + n = TreeNode(1,100,backend=Backend.CPP) + assert str(n) == "(None, 1, 100, None)" + +def test_AdjacencyListGraphNode(): + g_1 = AdjacencyListGraphNode('g_1', 1) + g_2 = AdjacencyListGraphNode('g_2', 2) + g = AdjacencyListGraphNode('g', 0, adjacency_list=[g_1, g_2]) + g.add_adjacent_node('g_3', 3) + assert g.g_1.name == 'g_1' + assert g.g_2.name == 'g_2' + assert g.g_3.name == 'g_3' + g.remove_adjacent_node('g_3') + assert hasattr(g, 'g_3') is False + assert raises(ValueError, lambda: g.remove_adjacent_node('g_3')) + g.add_adjacent_node('g_1', 4) + assert g.g_1.data == 4 + assert str(g) == "('g', 0)" + + h_1 = AdjacencyListGraphNode('h_1', 1, backend = Backend.CPP) + h_2 = AdjacencyListGraphNode('h_2', 2, backend = Backend.CPP) + assert str(h_1) == "('h_1', 1)" + h = AdjacencyListGraphNode('h', 0, adjacency_list = [h_1, h_2], backend = Backend.CPP) + h.add_adjacent_node('h_3', 3) + assert h.adjacent['h_1'].name == 'h_1' + assert h.adjacent['h_2'].name == 'h_2' + assert h.adjacent['h_3'].name == 'h_3' + h.remove_adjacent_node('h_3') + assert 'h_3' not in h.adjacent + assert raises(ValueError, lambda: h.remove_adjacent_node('h_3')) + h.add_adjacent_node('h_1', 4) + assert h.adjacent['h_1'] == 4 + assert str(h) == "('h', 0)" + h_5 = AdjacencyListGraphNode('h_5', h_1, backend = Backend.CPP) + assert h_5.data == h_1 + +def test_AdjacencyMatrixGraphNode(): + g = AdjacencyMatrixGraphNode("1", 3) + g2 = AdjacencyMatrixGraphNode("1", 3, backend = Backend.CPP) + assert str(g) == "('1', 3)" + assert str(g2) == "('1', 3)" + g3 = AdjacencyListGraphNode("3", g2, backend = Backend.CPP) + assert g3.data == g2 + + +def test_GraphEdge(): + g_1 = AdjacencyListGraphNode('g_1', 1) + g_2 = AdjacencyListGraphNode('g_2', 2) + e = GraphEdge(g_1, g_2, value=2) + assert str(e) == "('g_1', 'g_2')" + + h_1 = AdjacencyListGraphNode('h_1', 1, backend = Backend.CPP) + h_2 = AdjacencyListGraphNode('h_2', 2, backend = Backend.CPP) + e2 = GraphEdge(h_1, h_2, value = 2, backend = Backend.CPP) + assert str(e2) == "('h_1', 'h_2', 2)" + +def test_BinomialTreeNode(): + b = BinomialTreeNode(1,1) + b.add_children(*[BinomialTreeNode(i,i) for i in range(2,10)]) + assert str(b) == '(1, 1)' + assert str(b.children) == "['(2, 2)', '(3, 3)', '(4, 4)', '(5, 5)', '(6, 6)', '(7, 7)', '(8, 8)', '(9, 9)']" + +def test_MAryTreeNode(): + m = MAryTreeNode(1, 1) + m.add_children(*list(range(2, 10))) + assert str(m) == "(1, 1)" + assert str(m.children) == "['2', '3', '4', '5', '6', '7', '8', '9']" + +def test_CartesianTreeNode(): + c = CartesianTreeNode(1, 1, 1) + assert str(c) == "(None, 1, 1, 1, None)" + +def test_RedBlackTreeNode(): + c = RedBlackTreeNode(1, 1) + assert str(c) == "(None, 1, 1, None)" + +def test_SkipNode(): + c = SkipNode(1) + assert str(c) == '(1, None)' diff --git a/lib/python3.12/site-packages/pydatastructs/__init__.py b/lib/python3.12/site-packages/pydatastructs/__init__.py new file mode 100644 index 000000000..27cc5a202 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/__init__.py @@ -0,0 +1,8 @@ +from .utils import * +from .linear_data_structures import * +from .trees import * +from .miscellaneous_data_structures import * +from .graphs import * +from .strings import * + +__version__ = "1.0.1-dev" diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/__init__.py b/lib/python3.12/site-packages/pydatastructs/graphs/__init__.py new file mode 100644 index 000000000..21e0a5f35 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/graphs/__init__.py @@ -0,0 +1,28 @@ +__all__ = [] + +from . import graph +from .graph import ( + Graph +) +__all__.extend(graph.__all__) + +from . import algorithms +from . import adjacency_list +from . import adjacency_matrix + +from .algorithms import ( + breadth_first_search, + breadth_first_search_parallel, + minimum_spanning_tree, + minimum_spanning_tree_parallel, + strongly_connected_components, + depth_first_search, + shortest_paths, + all_pair_shortest_paths, + topological_sort, + topological_sort_parallel, + max_flow, + find_bridges +) + +__all__.extend(algorithms.__all__) diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/_backend/__init__.py b/lib/python3.12/site-packages/pydatastructs/graphs/_backend/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_list.py b/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_list.py new file mode 100644 index 000000000..bd901b380 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_list.py @@ -0,0 +1,101 @@ +from pydatastructs.graphs.graph import Graph +from pydatastructs.graphs._backend.cpp import _graph +from pydatastructs.utils.misc_util import ( + GraphEdge, Backend, raise_if_backend_is_not_python) + +__all__ = [ + 'AdjacencyList' +] + +class AdjacencyList(Graph): + """ + Adjacency list implementation of graphs. + + See also + ======== + + pydatastructs.graphs.graph.Graph + """ + def __new__(cls, *vertices, **kwargs): + + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.PYTHON: + obj = object.__new__(cls) + for vertex in vertices: + obj.__setattr__(vertex.name, vertex) + obj.vertices = [vertex.name for vertex in vertices] + obj.edge_weights = {} + obj._impl = 'adjacency_list' + return obj + else: + graph = _graph.AdjacencyListGraph() + for vertice in vertices: + graph.add_vertex(vertice) + return graph + + @classmethod + def methods(self): + return ['is_adjacent', 'neighbors', + 'add_vertex', 'remove_vertex', 'add_edge', + 'get_edge', 'remove_edge', '__new__'] + + def is_adjacent(self, node1, node2): + node1 = self.__getattribute__(node1) + return hasattr(node1, node2) + + def num_vertices(self): + return len(self.vertices) + + def num_edges(self): + return sum(len(self.neighbors(v)) for v in self.vertices) + + def neighbors(self, node): + node = self.__getattribute__(node) + return [self.__getattribute__(name) for name in node.adjacent] + + def add_vertex(self, node): + if not hasattr(self, node.name): + self.vertices.append(node.name) + self.__setattr__(node.name, node) + + def remove_vertex(self, name): + delattr(self, name) + self.vertices.remove(name) + for node in self.vertices: + node_obj = self.__getattribute__(node) + if hasattr(node_obj, name): + delattr(node_obj, name) + node_obj.adjacent.remove(name) + + def add_edge(self, source, target, cost=None): + source, target = str(source), str(target) + error_msg = ("Vertex %s is not present in the graph." + "Call Graph.add_vertex to add a new" + "vertex. Graph.add_edge is only responsible" + "for adding edges and it will not add new" + "vertices on its own. This is done to maintain" + "clear separation between the functionality of" + "these two methods.") + if not hasattr(self, source): + raise ValueError(error_msg % (source)) + if not hasattr(self, target): + raise ValueError(error_msg % (target)) + + source, target = self.__getattribute__(source), \ + self.__getattribute__(target) + source.add_adjacent_node(target.name) + if cost is not None: + self.edge_weights[source.name + "_" + target.name] = \ + GraphEdge(source, target, cost) + + def get_edge(self, source, target): + return self.edge_weights.get( + source + "_" + target, + None) + + def remove_edge(self, source, target): + source, target = self.__getattribute__(source), \ + self.__getattribute__(target) + source.remove_adjacent_node(target.name) + self.edge_weights.pop(source.name + "_" + target.name, + None) diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_matrix.py b/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_matrix.py new file mode 100644 index 000000000..9c2326b86 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_matrix.py @@ -0,0 +1,100 @@ +from pydatastructs.graphs.graph import Graph +from pydatastructs.graphs._backend.cpp import _graph +from pydatastructs.utils.misc_util import ( + GraphEdge, raise_if_backend_is_not_python, + Backend) + +__all__ = [ + 'AdjacencyMatrix' +] + +class AdjacencyMatrix(Graph): + """ + Adjacency matrix implementation of graphs. + + See also + ======== + + pydatastructs.graphs.graph.Graph + """ + def __new__(cls, *vertices, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.PYTHON: + obj = object.__new__(cls) + obj.vertices = [vertex.name for vertex in vertices] + for vertex in vertices: + obj.__setattr__(vertex.name, vertex) + obj.matrix = {} + for vertex in vertices: + obj.matrix[vertex.name] = {} + obj.edge_weights = {} + obj._impl = 'adjacency_matrix' + return obj + else: + return _graph.AdjacencyMatrixGraph(vertices) + + @classmethod + def methods(self): + return ['is_adjacent', 'neighbors', + 'add_edge', 'get_edge', 'remove_edge', + '__new__'] + + def is_adjacent(self, node1, node2): + node1, node2 = str(node1), str(node2) + row = self.matrix.get(node1, {}) + return row.get(node2, False) is not False + + def num_vertices(self): + return len(self.vertices) + + def num_edges(self): + return sum(len(v) for v in self.matrix.values()) + + def neighbors(self, node): + node = str(node) + neighbors = [] + row = self.matrix.get(node, {}) + for node, presence in row.items(): + if presence: + neighbors.append(self.__getattribute__( + str(node))) + return neighbors + + def add_vertex(self, node): + raise NotImplementedError("Currently we allow " + "adjacency matrix for static graphs only") + + def remove_vertex(self, node): + raise NotImplementedError("Currently we allow " + "adjacency matrix for static graphs only.") + + def add_edge(self, source, target, cost=None): + source, target = str(source), str(target) + error_msg = ("Vertex %s is not present in the graph." + "Call Graph.add_vertex to add a new" + "vertex. Graph.add_edge is only responsible" + "for adding edges and it will not add new" + "vertices on its own. This is done to maintain" + "clear separation between the functionality of" + "these two methods.") + if source not in self.matrix: + raise ValueError(error_msg % (source)) + if target not in self.matrix: + raise ValueError(error_msg % (target)) + + self.matrix[source][target] = True + if cost is not None: + self.edge_weights[source + "_" + target] = \ + GraphEdge(self.__getattribute__(source), + self.__getattribute__(target), + cost) + + def get_edge(self, source, target): + return self.edge_weights.get( + str(source) + "_" + str(target), + None) + + def remove_edge(self, source, target): + source, target = str(source), str(target) + self.matrix[source][target] = False + self.edge_weights.pop(str(source) + "_" + str(target), None) diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/algorithms.py b/lib/python3.12/site-packages/pydatastructs/graphs/algorithms.py new file mode 100644 index 000000000..9324b7278 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/graphs/algorithms.py @@ -0,0 +1,1386 @@ +""" +Contains algorithms associated with graph +data structure. +""" +from collections import deque +from concurrent.futures import ThreadPoolExecutor +from pydatastructs.utils.misc_util import ( + _comp, raise_if_backend_is_not_python, Backend, AdjacencyListGraphNode) +from pydatastructs.miscellaneous_data_structures import ( + DisjointSetForest, PriorityQueue) +from pydatastructs.graphs.graph import Graph +from pydatastructs.linear_data_structures.algorithms import merge_sort_parallel +from pydatastructs import PriorityQueue + +__all__ = [ + 'breadth_first_search', + 'breadth_first_search_parallel', + 'minimum_spanning_tree', + 'minimum_spanning_tree_parallel', + 'strongly_connected_components', + 'depth_first_search', + 'shortest_paths', + 'all_pair_shortest_paths', + 'topological_sort', + 'topological_sort_parallel', + 'max_flow', + 'find_bridges' +] + +Stack = Queue = deque + +def breadth_first_search( + graph, source_node, operation, *args, **kwargs): + """ + Implementation of serial breadth first search(BFS) + algorithm. + + Parameters + ========== + + graph: Graph + The graph on which BFS is to be performed. + source_node: str + The name of the source node from where the BFS is + to be initiated. + operation: function + The function which is to be applied + on every node when it is visited. + The prototype which is to be followed is, + `function_name(curr_node, next_node, + arg_1, arg_2, . . ., arg_n)`. + Here, the first two arguments denote, the + current node and the node next to current node. + The rest of the arguments are optional and you can + provide your own stuff there. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Note + ==== + + You should pass all the arguments which you are going + to use in the prototype of your `operation` after + passing the operation function. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode + >>> V1 = AdjacencyListGraphNode("V1") + >>> V2 = AdjacencyListGraphNode("V2") + >>> V3 = AdjacencyListGraphNode("V3") + >>> G = Graph(V1, V2, V3) + >>> from pydatastructs import breadth_first_search + >>> def f(curr_node, next_node, dest_node): + ... return curr_node != dest_node + ... + >>> G.add_edge(V1.name, V2.name) + >>> G.add_edge(V2.name, V3.name) + >>> breadth_first_search(G, V1.name, f, V3.name) + """ + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.PYTHON: + import pydatastructs.graphs.algorithms as algorithms + func = "_breadth_first_search_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently breadth first search isn't implemented for " + "%s graphs."%(graph._impl)) + return getattr(algorithms, func)( + graph, source_node, operation, *args, **kwargs) + else: + from pydatastructs.graphs._backend.cpp._algorithms import bfs_adjacency_list, bfs_adjacency_matrix + if (graph._impl == "adjacency_list"): + extra_args = args if args else () + return bfs_adjacency_list(graph, source_node, operation, extra_args) + if (graph._impl == "adjacency_matrix"): + extra_args = args if args else () + return bfs_adjacency_matrix(graph, source_node, operation, extra_args) + +def _breadth_first_search_adjacency_list( + graph, source_node, operation, *args, **kwargs): + bfs_queue = Queue() + visited = {} + bfs_queue.append(source_node) + visited[source_node] = True + while len(bfs_queue) != 0: + curr_node = bfs_queue.popleft() + next_nodes = graph.neighbors(curr_node) + if len(next_nodes) != 0: + for next_node in next_nodes: + if visited.get(next_node.name, False) is False: + status = operation(curr_node, next_node.name, *args, **kwargs) + if not status: + return None + bfs_queue.append(next_node.name) + visited[next_node.name] = True + else: + status = operation(curr_node, "", *args, **kwargs) + if not status: + return None + +_breadth_first_search_adjacency_matrix = _breadth_first_search_adjacency_list + +def breadth_first_search_parallel( + graph, source_node, num_threads, operation, *args, **kwargs): + """ + Parallel implementation of breadth first search on graphs. + + Parameters + ========== + + graph: Graph + The graph on which BFS is to be performed. + source_node: str + The name of the source node from where the BFS is + to be initiated. + num_threads: int + Number of threads to be used for computation. + operation: function + The function which is to be applied + on every node when it is visited. + The prototype which is to be followed is, + `function_name(curr_node, next_node, + arg_1, arg_2, . . ., arg_n)`. + Here, the first two arguments denote, the + current node and the node next to current node. + The rest of the arguments are optional and you can + provide your own stuff there. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Note + ==== + + You should pass all the arguments which you are going + to use in the prototype of your `operation` after + passing the operation function. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode + >>> V1 = AdjacencyListGraphNode("V1") + >>> V2 = AdjacencyListGraphNode("V2") + >>> V3 = AdjacencyListGraphNode("V3") + >>> G = Graph(V1, V2, V3) + >>> from pydatastructs import breadth_first_search_parallel + >>> def f(curr_node, next_node, dest_node): + ... return curr_node != dest_node + ... + >>> G.add_edge(V1.name, V2.name) + >>> G.add_edge(V2.name, V3.name) + >>> breadth_first_search_parallel(G, V1.name, 3, f, V3.name) + """ + raise_if_backend_is_not_python( + breadth_first_search_parallel, kwargs.get('backend', Backend.PYTHON)) + import pydatastructs.graphs.algorithms as algorithms + func = "_breadth_first_search_parallel_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently breadth first search isn't implemented for " + "%s graphs."%(graph._impl)) + return getattr(algorithms, func)( + graph, source_node, num_threads, operation, *args, **kwargs) + +def _generate_layer(**kwargs): + _args, _kwargs = kwargs.get('args'), kwargs.get('kwargs') + (graph, curr_node, next_layer, visited, operation) = _args[0:5] + op_args, op_kwargs = _args[5:], _kwargs + next_nodes = graph.neighbors(curr_node) + status = True + if len(next_nodes) != 0: + for next_node in next_nodes: + if visited.get(next_node, False) is False: + status = status and operation(curr_node, next_node.name, *op_args, **op_kwargs) + next_layer.add(next_node.name) + visited[next_node.name] = True + else: + status = status and operation(curr_node, "", *op_args, **op_kwargs) + return status + +def _breadth_first_search_parallel_adjacency_list( + graph, source_node, num_threads, operation, *args, **kwargs): + visited, layers = {}, {} + layers[0] = set() + layers[0].add(source_node) + visited[source_node] = True + layer = 0 + while len(layers[layer]) != 0: + layers[layer+1] = set() + with ThreadPoolExecutor(max_workers=num_threads) as Executor: + for node in layers[layer]: + status = Executor.submit( + _generate_layer, args= + (graph, node, layers[layer+1], visited, + operation, *args), kwargs=kwargs).result() + layer += 1 + if not status: + return None + +_breadth_first_search_parallel_adjacency_matrix = _breadth_first_search_parallel_adjacency_list + +def _generate_mst_object(graph): + mst = Graph(*[getattr(graph, str(v)) for v in graph.vertices]) + return mst + +def _sort_edges(graph, num_threads=None): + edges = list(graph.edge_weights.items()) + if num_threads is None: + sort_key = lambda item: item[1].value + return sorted(edges, key=sort_key) + + merge_sort_parallel(edges, num_threads, + comp=lambda u,v: u[1].value <= v[1].value) + return edges + +def _minimum_spanning_tree_kruskal_adjacency_list(graph): + mst = _generate_mst_object(graph) + dsf = DisjointSetForest() + for v in graph.vertices: + dsf.make_set(v) + for _, edge in _sort_edges(graph): + u, v = edge.source.name, edge.target.name + if dsf.find_root(u) is not dsf.find_root(v): + mst.add_edge(u, v, edge.value) + mst.add_edge(v, u, edge.value) + dsf.union(u, v) + return mst + +_minimum_spanning_tree_kruskal_adjacency_matrix = \ + _minimum_spanning_tree_kruskal_adjacency_list + +def _minimum_spanning_tree_prim_adjacency_list(graph): + q = PriorityQueue(implementation='binomial_heap') + e = {} + mst = Graph(implementation='adjacency_list') + q.push(next(iter(graph.vertices)), 0) + while not q.is_empty: + v = q.pop() + if not hasattr(mst, v): + mst.add_vertex(graph.__getattribute__(v)) + if e.get(v, None) is not None: + edge = e[v] + mst.add_vertex(edge.target) + mst.add_edge(edge.source.name, edge.target.name, edge.value) + mst.add_edge(edge.target.name, edge.source.name, edge.value) + for w_node in graph.neighbors(v): + w = w_node.name + vw = graph.edge_weights[v + '_' + w] + q.push(w, vw.value) + if e.get(w, None) is None or \ + e[w].value > vw.value: + e[w] = vw + return mst + +def minimum_spanning_tree(graph, algorithm, **kwargs): + """ + Computes a minimum spanning tree for the given + graph and algorithm. + + Parameters + ========== + + graph: Graph + The graph whose minimum spanning tree + has to be computed. + algorithm: str + The algorithm which should be used for + computing a minimum spanning tree. + Currently the following algorithms are + supported, + + 'kruskal' -> Kruskal's algorithm as given in [1]. + + 'prim' -> Prim's algorithm as given in [2]. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + mst: Graph + A minimum spanning tree using the implementation + same as the graph provided in the input. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode + >>> from pydatastructs import minimum_spanning_tree + >>> u = AdjacencyListGraphNode('u') + >>> v = AdjacencyListGraphNode('v') + >>> G = Graph(u, v) + >>> G.add_edge(u.name, v.name, 3) + >>> mst = minimum_spanning_tree(G, 'kruskal') + >>> u_n = mst.neighbors(u.name) + >>> mst.get_edge(u.name, u_n[0].name).value + 3 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Kruskal%27s_algorithm + .. [2] https://en.wikipedia.org/wiki/Prim%27s_algorithm + + Note + ==== + + The concept of minimum spanning tree is valid only for + connected and undirected graphs. So, this function + should be used only for such graphs. Using with other + types of graphs may lead to unwanted results. + """ + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.PYTHON: + import pydatastructs.graphs.algorithms as algorithms + func = "_minimum_spanning_tree_" + algorithm + "_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently %s algoithm for %s implementation of graphs " + "isn't implemented for finding minimum spanning trees." + %(algorithm, graph._impl)) + return getattr(algorithms, func)(graph) + else: + from pydatastructs.graphs._backend.cpp._algorithms import minimum_spanning_tree_prim_adjacency_list + if graph._impl == "adjacency_list" and algorithm == 'prim': + return minimum_spanning_tree_prim_adjacency_list(graph) + +def _minimum_spanning_tree_parallel_kruskal_adjacency_list(graph, num_threads): + mst = _generate_mst_object(graph) + dsf = DisjointSetForest() + for v in graph.vertices: + dsf.make_set(v) + edges = _sort_edges(graph, num_threads) + for _, edge in edges: + u, v = edge.source.name, edge.target.name + if dsf.find_root(u) is not dsf.find_root(v): + mst.add_edge(u, v, edge.value) + mst.add_edge(v, u, edge.value) + dsf.union(u, v) + return mst + +_minimum_spanning_tree_parallel_kruskal_adjacency_matrix = \ + _minimum_spanning_tree_parallel_kruskal_adjacency_list + +def _find_min(q, v, i): + if not q.is_empty: + v[i] = q.peek + else: + v[i] = None + +def _minimum_spanning_tree_parallel_prim_adjacency_list(graph, num_threads): + q = [PriorityQueue(implementation='binomial_heap') for _ in range(num_threads)] + e = [{} for _ in range(num_threads)] + v2q = {} + mst = Graph(implementation='adjacency_list') + + itr = iter(graph.vertices) + for i in range(len(graph.vertices)): + v2q[next(itr)] = i%len(q) + q[0].push(next(iter(graph.vertices)), 0) + + while True: + + _vs = [None for _ in range(num_threads)] + with ThreadPoolExecutor(max_workers=num_threads) as Executor: + for i in range(num_threads): + Executor.submit(_find_min, q[i], _vs, i).result() + v = None + + for i in range(num_threads): + if _comp(_vs[i], v, lambda u, v: u.key < v.key): + v = _vs[i] + if v is None: + break + v = v.data + idx = v2q[v] + q[idx].pop() + + if not hasattr(mst, v): + mst.add_vertex(graph.__getattribute__(v)) + if e[idx].get(v, None) is not None: + edge = e[idx][v] + mst.add_vertex(edge.target) + mst.add_edge(edge.source.name, edge.target.name, edge.value) + mst.add_edge(edge.target.name, edge.source.name, edge.value) + for w_node in graph.neighbors(v): + w = w_node.name + vw = graph.edge_weights[v + '_' + w] + j = v2q[w] + q[j].push(w, vw.value) + if e[j].get(w, None) is None or \ + e[j][w].value > vw.value: + e[j][w] = vw + + return mst + +def minimum_spanning_tree_parallel(graph, algorithm, num_threads, **kwargs): + """ + Computes a minimum spanning tree for the given + graph and algorithm using the given number of threads. + + Parameters + ========== + + graph: Graph + The graph whose minimum spanning tree + has to be computed. + algorithm: str + The algorithm which should be used for + computing a minimum spanning tree. + Currently the following algorithms are + supported, + + 'kruskal' -> Kruskal's algorithm as given in [1]. + + 'prim' -> Prim's algorithm as given in [2]. + num_threads: int + The number of threads to be used. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + mst: Graph + A minimum spanning tree using the implementation + same as the graph provided in the input. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode + >>> from pydatastructs import minimum_spanning_tree_parallel + >>> u = AdjacencyListGraphNode('u') + >>> v = AdjacencyListGraphNode('v') + >>> G = Graph(u, v) + >>> G.add_edge(u.name, v.name, 3) + >>> mst = minimum_spanning_tree_parallel(G, 'kruskal', 3) + >>> u_n = mst.neighbors(u.name) + >>> mst.get_edge(u.name, u_n[0].name).value + 3 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Kruskal%27s_algorithm#Parallel_algorithm + .. [2] https://en.wikipedia.org/wiki/Prim%27s_algorithm#Parallel_algorithm + + Note + ==== + + The concept of minimum spanning tree is valid only for + connected and undirected graphs. So, this function + should be used only for such graphs. Using with other + types of graphs will lead to unwanted results. + """ + raise_if_backend_is_not_python( + minimum_spanning_tree_parallel, kwargs.get('backend', Backend.PYTHON)) + import pydatastructs.graphs.algorithms as algorithms + func = "_minimum_spanning_tree_parallel_" + algorithm + "_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently %s algoithm for %s implementation of graphs " + "isn't implemented for finding minimum spanning trees." + %(algorithm, graph._impl)) + return getattr(algorithms, func)(graph, num_threads) + +def _visit(graph, vertex, visited, incoming, L): + stack = [vertex] + while stack: + top = stack[-1] + if not visited.get(top, False): + visited[top] = True + for node in graph.neighbors(top): + if incoming.get(node.name, None) is None: + incoming[node.name] = [] + incoming[node.name].append(top) + if not visited.get(node.name, False): + stack.append(node.name) + if top is stack[-1]: + L.append(stack.pop()) + +def _assign(graph, u, incoming, assigned, component): + stack = [u] + while stack: + top = stack[-1] + if not assigned.get(top, False): + assigned[top] = True + component.add(top) + for u in incoming[top]: + if not assigned.get(u, False): + stack.append(u) + if top is stack[-1]: + stack.pop() + +def _strongly_connected_components_kosaraju_adjacency_list(graph): + visited, incoming, L = {}, {}, [] + for u in graph.vertices: + if not visited.get(u, False): + _visit(graph, u, visited, incoming, L) + + assigned = {} + components = [] + for i in range(-1, -len(L) - 1, -1): + comp = set() + if not assigned.get(L[i], False): + _assign(graph, L[i], incoming, assigned, comp) + if comp: + components.append(comp) + + return components + +_strongly_connected_components_kosaraju_adjacency_matrix = \ + _strongly_connected_components_kosaraju_adjacency_list + +def _tarjan_dfs(u, graph, index, stack, indices, low_links, on_stacks, components): + indices[u] = index[0] + low_links[u] = index[0] + index[0] += 1 + stack.append(u) + on_stacks[u] = True + + for node in graph.neighbors(u): + v = node.name + if indices[v] == -1: + _tarjan_dfs(v, graph, index, stack, indices, low_links, on_stacks, components) + low_links[u] = min(low_links[u], low_links[v]) + elif on_stacks[v]: + low_links[u] = min(low_links[u], low_links[v]) + + if low_links[u] == indices[u]: + component = set() + while stack: + w = stack.pop() + on_stacks[w] = False + component.add(w) + if w == u: + break + components.append(component) + +def _strongly_connected_components_tarjan_adjacency_list(graph): + index = [0] # mutable object + stack = Stack([]) + indices, low_links, on_stacks = {}, {}, {} + + for u in graph.vertices: + indices[u] = -1 + low_links[u] = -1 + on_stacks[u] = False + + components = [] + + for u in graph.vertices: + if indices[u] == -1: + _tarjan_dfs(u, graph, index, stack, indices, low_links, on_stacks, components) + + return components + +_strongly_connected_components_tarjan_adjacency_matrix = \ + _strongly_connected_components_tarjan_adjacency_list + +def strongly_connected_components(graph, algorithm, **kwargs): + """ + Computes strongly connected components for the given + graph and algorithm. + + Parameters + ========== + + graph: Graph + The graph whose minimum spanning tree + has to be computed. + algorithm: str + The algorithm which should be used for + computing strongly connected components. + Currently the following algorithms are + supported, + + 'kosaraju' -> Kosaraju's algorithm as given in [1]. + 'tarjan' -> Tarjan's algorithm as given in [2]. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + components: list + Python list with each element as set of vertices. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode + >>> from pydatastructs import strongly_connected_components + >>> v1, v2, v3 = [AdjacencyListGraphNode(i) for i in range(3)] + >>> g = Graph(v1, v2, v3) + >>> g.add_edge(v1.name, v2.name) + >>> g.add_edge(v2.name, v3.name) + >>> g.add_edge(v3.name, v1.name) + >>> scc = strongly_connected_components(g, 'kosaraju') + >>> scc == [{'2', '0', '1'}] + True + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Kosaraju%27s_algorithm + .. [2] https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm + + """ + raise_if_backend_is_not_python( + strongly_connected_components, kwargs.get('backend', Backend.PYTHON)) + import pydatastructs.graphs.algorithms as algorithms + func = "_strongly_connected_components_" + algorithm + "_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently %s algoithm for %s implementation of graphs " + "isn't implemented for finding strongly connected components." + %(algorithm, graph._impl)) + return getattr(algorithms, func)(graph) + +def depth_first_search( + graph, source_node, operation, *args, **kwargs): + """ + Implementation of depth first search (DFS) + algorithm. + + Parameters + ========== + + graph: Graph + The graph on which DFS is to be performed. + source_node: str + The name of the source node from where the DFS is + to be initiated. + operation: function + The function which is to be applied + on every node when it is visited. + The prototype which is to be followed is, + `function_name(curr_node, next_node, + arg_1, arg_2, . . ., arg_n)`. + Here, the first two arguments denote, the + current node and the node next to current node. + The rest of the arguments are optional and you can + provide your own stuff there. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Note + ==== + + You should pass all the arguments which you are going + to use in the prototype of your `operation` after + passing the operation function. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode + >>> V1 = AdjacencyListGraphNode("V1") + >>> V2 = AdjacencyListGraphNode("V2") + >>> V3 = AdjacencyListGraphNode("V3") + >>> G = Graph(V1, V2, V3) + >>> from pydatastructs import depth_first_search + >>> def f(curr_node, next_node, dest_node): + ... return curr_node != dest_node + ... + >>> G.add_edge(V1.name, V2.name) + >>> G.add_edge(V2.name, V3.name) + >>> depth_first_search(G, V1.name, f, V3.name) + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Depth-first_search + """ + raise_if_backend_is_not_python( + depth_first_search, kwargs.get('backend', Backend.PYTHON)) + import pydatastructs.graphs.algorithms as algorithms + func = "_depth_first_search_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently depth first search isn't implemented for " + "%s graphs."%(graph._impl)) + return getattr(algorithms, func)( + graph, source_node, operation, *args, **kwargs) + +def _depth_first_search_adjacency_list( + graph, source_node, operation, *args, **kwargs): + dfs_stack = Stack() + visited = {} + dfs_stack.append(source_node) + visited[source_node] = True + while len(dfs_stack) != 0: + curr_node = dfs_stack.pop() + next_nodes = graph.neighbors(curr_node) + if len(next_nodes) != 0: + for next_node in next_nodes: + if next_node.name not in visited: + status = operation(curr_node, next_node.name, *args, **kwargs) + if not status: + return None + dfs_stack.append(next_node.name) + visited[next_node.name] = True + else: + status = operation(curr_node, "", *args, **kwargs) + if not status: + return None + +_depth_first_search_adjacency_matrix = _depth_first_search_adjacency_list + +def shortest_paths(graph: Graph, algorithm: str, + source: str, target: str="", + **kwargs) -> tuple: + """ + Finds shortest paths in the given graph from a given source. + + Parameters + ========== + + graph: Graph + The graph under consideration. + algorithm: str + The algorithm to be used. Currently, the following algorithms + are implemented, + + 'bellman_ford' -> Bellman-Ford algorithm as given in [1] + + 'dijkstra' -> Dijkstra algorithm as given in [2]. + source: str + The name of the source the node. + target: str + The name of the target node. + Optional, by default, all pair shortest paths + are returned. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + (distances, predecessors): (dict, dict) + If target is not provided and algorithm used + is 'bellman_ford'/'dijkstra'. + (distances[target], predecessors): (float, dict) + If target is provided and algorithm used is + 'bellman_ford'/'dijkstra'. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode + >>> from pydatastructs import shortest_paths + >>> V1 = AdjacencyListGraphNode("V1") + >>> V2 = AdjacencyListGraphNode("V2") + >>> V3 = AdjacencyListGraphNode("V3") + >>> G = Graph(V1, V2, V3) + >>> G.add_edge('V2', 'V3', 10) + >>> G.add_edge('V1', 'V2', 11) + >>> shortest_paths(G, 'bellman_ford', 'V1') + ({'V1': 0, 'V2': 11, 'V3': 21}, {'V1': None, 'V2': 'V1', 'V3': 'V2'}) + >>> shortest_paths(G, 'dijkstra', 'V1') + ({'V2': 11, 'V3': 21, 'V1': 0}, {'V1': None, 'V2': 'V1', 'V3': 'V2'}) + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Bellman%E2%80%93Ford_algorithm + .. [2] https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm + """ + backend = kwargs.get('backend', Backend.PYTHON) + if (backend == Backend.PYTHON): + import pydatastructs.graphs.algorithms as algorithms + func = "_" + algorithm + "_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently %s algorithm isn't implemented for " + "finding shortest paths in graphs."%(algorithm)) + return getattr(algorithms, func)(graph, source, target) + else: + from pydatastructs.graphs._backend.cpp._algorithms import shortest_paths_dijkstra_adjacency_list + if graph._impl == "adjacency_list" and algorithm == 'dijkstra': + return shortest_paths_dijkstra_adjacency_list(graph, source, target) + +def _bellman_ford_adjacency_list(graph: Graph, source: str, target: str) -> tuple: + distances, predecessor, visited, cnts = {}, {}, {}, {} + + for v in graph.vertices: + distances[v] = float('inf') + predecessor[v] = None + visited[v] = False + cnts[v] = 0 + distances[source] = 0 + verticy_num = len(graph.vertices) + + que = Queue([source]) + + while que: + u = que.popleft() + visited[u] = False + neighbors = graph.neighbors(u) + for neighbor in neighbors: + v = neighbor.name + edge_str = u + '_' + v + if distances[u] != float('inf') and distances[u] + graph.edge_weights[edge_str].value < distances[v]: + distances[v] = distances[u] + graph.edge_weights[edge_str].value + predecessor[v] = u + cnts[v] = cnts[u] + 1 + if cnts[v] >= verticy_num: + raise ValueError("Graph contains a negative weight cycle.") + if not visited[v]: + que.append(v) + visited[v] = True + + if target != "": + return (distances[target], predecessor) + return (distances, predecessor) + +_bellman_ford_adjacency_matrix = _bellman_ford_adjacency_list + +def _dijkstra_adjacency_list(graph: Graph, start: str, target: str): + V = len(graph.vertices) + visited, dist, pred = {}, {}, {} + for v in graph.vertices: + visited[v] = False + pred[v] = None + if v != start: + dist[v] = float('inf') + dist[start] = 0 + pq = PriorityQueue(implementation='binomial_heap') + for vertex in dist: + pq.push(vertex, dist[vertex]) + for _ in range(V): + u = pq.pop() + visited[u] = True + for v in graph.vertices: + edge_str = u + '_' + v + if (edge_str in graph.edge_weights and graph.edge_weights[edge_str].value >= 0 and + visited[v] is False and dist[v] > dist[u] + graph.edge_weights[edge_str].value): + dist[v] = dist[u] + graph.edge_weights[edge_str].value + pred[v] = u + pq.push(v, dist[v]) + + if target != "": + return (dist[target], pred) + return dist, pred + +_dijkstra_adjacency_matrix = _dijkstra_adjacency_list + +def all_pair_shortest_paths(graph: Graph, algorithm: str, + **kwargs) -> tuple: + """ + Finds shortest paths between all pairs of vertices in the given graph. + + Parameters + ========== + + graph: Graph + The graph under consideration. + algorithm: str + The algorithm to be used. Currently, the following algorithms + are implemented, + + 'floyd_warshall' -> Floyd Warshall algorithm as given in [1]. + 'johnson' -> Johnson's Algorithm as given in [2] + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + (distances, predecessors): (dict, dict) + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode + >>> from pydatastructs import all_pair_shortest_paths + >>> V1 = AdjacencyListGraphNode("V1") + >>> V2 = AdjacencyListGraphNode("V2") + >>> V3 = AdjacencyListGraphNode("V3") + >>> G = Graph(V1, V2, V3) + >>> G.add_edge('V2', 'V3', 10) + >>> G.add_edge('V1', 'V2', 11) + >>> G.add_edge('V3', 'V1', 5) + >>> dist, _ = all_pair_shortest_paths(G, 'floyd_warshall') + >>> dist['V1']['V3'] + 21 + >>> dist['V3']['V1'] + 5 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm + .. [2] https://en.wikipedia.org/wiki/Johnson's_algorithm + """ + raise_if_backend_is_not_python( + all_pair_shortest_paths, kwargs.get('backend', Backend.PYTHON)) + import pydatastructs.graphs.algorithms as algorithms + func = "_" + algorithm + "_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently %s algorithm isn't implemented for " + "finding shortest paths in graphs."%(algorithm)) + return getattr(algorithms, func)(graph) + +def _floyd_warshall_adjacency_list(graph: Graph): + dist, next_vertex = {}, {} + V, E = graph.vertices, graph.edge_weights + + for v in V: + dist[v] = {} + next_vertex[v] = {} + + for name, edge in E.items(): + dist[edge.source.name][edge.target.name] = edge.value + next_vertex[edge.source.name][edge.target.name] = edge.source.name + + for v in V: + dist[v][v] = 0 + next_vertex[v][v] = v + + for k in V: + for i in V: + for j in V: + dist_i_j = dist.get(i, {}).get(j, float('inf')) + dist_i_k = dist.get(i, {}).get(k, float('inf')) + dist_k_j = dist.get(k, {}).get(j, float('inf')) + next_i_k = next_vertex.get(i + '_' + k, None) + if dist_i_j > dist_i_k + dist_k_j: + dist[i][j] = dist_i_k + dist_k_j + next_vertex[i][j] = next_i_k + + return (dist, next_vertex) + +_floyd_warshall_adjacency_matrix = _floyd_warshall_adjacency_list + +def _johnson_adjacency_list(graph: Graph): + new_vertex = AdjacencyListGraphNode('__q__') + graph.add_vertex(new_vertex) + + for vertex in graph.vertices: + if vertex != '__q__': + graph.add_edge('__q__', vertex, 0) + + distances, predecessors = shortest_paths(graph, 'bellman_ford', '__q__') + + edges_to_remove = [] + for edge in graph.edge_weights: + edge_node = graph.edge_weights[edge] + if edge_node.source.name == '__q__': + edges_to_remove.append((edge_node.source.name, edge_node.target.name)) + + for u, v in edges_to_remove: + graph.remove_edge(u, v) + graph.remove_vertex('__q__') + + for edge in graph.edge_weights: + edge_node = graph.edge_weights[edge] + u, v = edge_node.source.name, edge_node.target.name + graph.edge_weights[edge].value += (distances[u] - distances[v]) + + all_distances = {} + all_next_vertex = {} + + for vertex in graph.vertices: + u = vertex + dijkstra_dist, dijkstra_pred = shortest_paths(graph, 'dijkstra', u) + all_distances[u] = {} + all_next_vertex[u] = {} + for v in graph.vertices: + if dijkstra_pred[v] is None or dijkstra_pred[v] == u : + all_next_vertex[u][v] = u + else: + all_next_vertex[u][v] = None + if v in dijkstra_dist: + all_distances[u][v] = dijkstra_dist[v] - distances[u] + distances[v] + else: + all_distances[u][v] = float('inf') + + return (all_distances, all_next_vertex) + +def topological_sort(graph: Graph, algorithm: str, + **kwargs) -> list: + """ + Performs topological sort on the given graph using given algorithm. + + Parameters + ========== + + graph: Graph + The graph under consideration. + algorithm: str + The algorithm to be used. + Currently, following are supported, + + 'kahn' -> Kahn's algorithm as given in [1]. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + list + The list of topologically sorted vertices. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode, topological_sort + >>> v_1 = AdjacencyListGraphNode('v_1') + >>> v_2 = AdjacencyListGraphNode('v_2') + >>> graph = Graph(v_1, v_2) + >>> graph.add_edge('v_1', 'v_2') + >>> topological_sort(graph, 'kahn') + ['v_1', 'v_2'] + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm + """ + raise_if_backend_is_not_python( + topological_sort, kwargs.get('backend', Backend.PYTHON)) + import pydatastructs.graphs.algorithms as algorithms + func = "_" + algorithm + "_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently %s algorithm isn't implemented for " + "performing topological sort on %s graphs."%(algorithm, graph._impl)) + return getattr(algorithms, func)(graph) + +def _kahn_adjacency_list(graph: Graph) -> list: + S = Queue() + in_degree = {u: 0 for u in graph.vertices} + for u in graph.vertices: + for v in graph.neighbors(u): + in_degree[v.name] += 1 + for u in graph.vertices: + if in_degree[u] == 0: + S.append(u) + in_degree.pop(u) + + L = [] + while S: + n = S.popleft() + L.append(n) + for m in graph.neighbors(n): + graph.remove_edge(n, m.name) + in_degree[m.name] -= 1 + if in_degree[m.name] == 0: + S.append(m.name) + in_degree.pop(m.name) + + if in_degree: + raise ValueError("Graph is not acyclic.") + return L + +def topological_sort_parallel(graph: Graph, algorithm: str, num_threads: int, + **kwargs) -> list: + """ + Performs topological sort on the given graph using given algorithm using + given number of threads. + + Parameters + ========== + + graph: Graph + The graph under consideration. + algorithm: str + The algorithm to be used. + Currently, following are supported, + + 'kahn' -> Kahn's algorithm as given in [1]. + num_threads: int + The maximum number of threads to be used. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + list + The list of topologically sorted vertices. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode, topological_sort_parallel + >>> v_1 = AdjacencyListGraphNode('v_1') + >>> v_2 = AdjacencyListGraphNode('v_2') + >>> graph = Graph(v_1, v_2) + >>> graph.add_edge('v_1', 'v_2') + >>> topological_sort_parallel(graph, 'kahn', 1) + ['v_1', 'v_2'] + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm + """ + raise_if_backend_is_not_python( + topological_sort_parallel, kwargs.get('backend', Backend.PYTHON)) + import pydatastructs.graphs.algorithms as algorithms + func = "_" + algorithm + "_" + graph._impl + '_parallel' + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently %s algorithm isn't implemented for " + "performing topological sort on %s graphs."%(algorithm, graph._impl)) + return getattr(algorithms, func)(graph, num_threads) + +def _kahn_adjacency_list_parallel(graph: Graph, num_threads: int) -> list: + num_vertices = len(graph.vertices) + + def _collect_source_nodes(graph: Graph) -> list: + S = [] + in_degree = {u: 0 for u in graph.vertices} + for u in graph.vertices: + for v in graph.neighbors(u): + in_degree[v.name] += 1 + for u in in_degree: + if in_degree[u] == 0: + S.append(u) + return list(S) + + def _job(graph: Graph, u: str): + for v in graph.neighbors(u): + graph.remove_edge(u, v.name) + + L = [] + source_nodes = _collect_source_nodes(graph) + while source_nodes: + with ThreadPoolExecutor(max_workers=num_threads) as Executor: + for node in source_nodes: + L.append(node) + Executor.submit(_job, graph, node) + for node in source_nodes: + graph.remove_vertex(node) + source_nodes = _collect_source_nodes(graph) + + if len(L) != num_vertices: + raise ValueError("Graph is not acyclic.") + return L + + +def _breadth_first_search_max_flow(graph: Graph, source_node, sink_node, flow_passed, for_dinic=False): + bfs_queue = Queue() + parent, currentPathC = {}, {} + currentPathC[source_node] = float('inf') + bfs_queue.append(source_node) + while len(bfs_queue) != 0: + curr_node = bfs_queue.popleft() + next_nodes = graph.neighbors(curr_node) + if len(next_nodes) != 0: + for next_node in next_nodes: + capacity = graph.get_edge(curr_node, next_node.name).value + fp = flow_passed.get((curr_node, next_node.name), 0) + if capacity and parent.get(next_node.name, False) is False and capacity - fp > 0: + parent[next_node.name] = curr_node + next_flow = min(currentPathC[curr_node], capacity - fp) + currentPathC[next_node.name] = next_flow + if next_node.name == sink_node and not for_dinic: + return (next_flow, parent) + bfs_queue.append(next_node.name) + return (0, parent) + + +def _max_flow_edmonds_karp_(graph: Graph, source, sink): + m_flow = 0 + flow_passed = {} + new_flow, parent = _breadth_first_search_max_flow(graph, source, sink, flow_passed) + while new_flow != 0: + m_flow += new_flow + current = sink + while current != source: + prev = parent[current] + fp = flow_passed.get((prev, current), 0) + flow_passed[(prev, current)] = fp + new_flow + fp = flow_passed.get((current, prev), 0) + flow_passed[(current, prev)] = fp - new_flow + current = prev + new_flow, parent = _breadth_first_search_max_flow(graph, source, sink, flow_passed) + return m_flow + + +def _depth_first_search_max_flow_dinic(graph: Graph, u, parent, sink_node, flow, flow_passed): + if u == sink_node: + return flow + + next_nodes = graph.neighbors(u) + if len(next_nodes) != 0: + for next_node in next_nodes: + capacity = graph.get_edge(u, next_node.name).value + fp = flow_passed.get((u, next_node.name), 0) + parent_cond = parent.get(next_node.name, None) + if parent_cond and parent_cond == u and capacity - fp > 0: + path_flow = _depth_first_search_max_flow_dinic(graph, + next_node.name, + parent, sink_node, + min(flow, capacity - fp), flow_passed) + if path_flow > 0: + fp = flow_passed.get((u, next_node.name), 0) + flow_passed[(u, next_node.name)] = fp + path_flow + fp = flow_passed.get((next_node.name, u), 0) + flow_passed[(next_node.name, u)] = fp - path_flow + return path_flow + return 0 + + +def _max_flow_dinic_(graph: Graph, source, sink): + max_flow = 0 + flow_passed = {} + while True: + next_flow, parent = _breadth_first_search_max_flow(graph, source, sink, flow_passed, True) + if parent.get(sink, False) is False: + break + + while True: + path_flow = _depth_first_search_max_flow_dinic(graph, source, + parent, sink, + float('inf'), + flow_passed) + if path_flow <= 0: + break + max_flow += path_flow + + return max_flow + + +def max_flow(graph, source, sink, algorithm='edmonds_karp', **kwargs): + raise_if_backend_is_not_python( + max_flow, kwargs.get('backend', Backend.PYTHON)) + + import pydatastructs.graphs.algorithms as algorithms + func = "_max_flow_" + algorithm + "_" + if not hasattr(algorithms, func): + raise NotImplementedError( + f"Currently {algorithm} algorithm isn't implemented for " + "performing max flow on graphs.") + return getattr(algorithms, func)(graph, source, sink) + + +def find_bridges(graph): + """ + Finds all bridges in an undirected graph using Tarjan's Algorithm. + + Parameters + ========== + graph : Graph + An undirected graph instance. + + Returns + ========== + List[tuple] + A list of bridges, where each bridge is represented as a tuple (u, v) + with u <= v. + + Example + ======== + >>> from pydatastructs import Graph, AdjacencyListGraphNode, find_bridges + >>> v0 = AdjacencyListGraphNode(0) + >>> v1 = AdjacencyListGraphNode(1) + >>> v2 = AdjacencyListGraphNode(2) + >>> v3 = AdjacencyListGraphNode(3) + >>> v4 = AdjacencyListGraphNode(4) + >>> graph = Graph(v0, v1, v2, v3, v4, implementation='adjacency_list') + >>> graph.add_edge(v0.name, v1.name) + >>> graph.add_edge(v1.name, v2.name) + >>> graph.add_edge(v2.name, v3.name) + >>> graph.add_edge(v3.name, v4.name) + >>> find_bridges(graph) + [('0', '1'), ('1', '2'), ('2', '3'), ('3', '4')] + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Bridge_(graph_theory) + """ + + vertices = list(graph.vertices) + processed_vertices = [] + for v in vertices: + if hasattr(v, "name"): + processed_vertices.append(v.name) + else: + processed_vertices.append(v) + + n = len(processed_vertices) + adj = {v: [] for v in processed_vertices} + for v in processed_vertices: + for neighbor in graph.neighbors(v): + if hasattr(neighbor, "name"): + nbr = neighbor.name + else: + nbr = neighbor + adj[v].append(nbr) + + mapping = {v: idx for idx, v in enumerate(processed_vertices)} + inv_mapping = {idx: v for v, idx in mapping.items()} + + n_adj = [[] for _ in range(n)] + for v in processed_vertices: + idx_v = mapping[v] + for u in adj[v]: + idx_u = mapping[u] + n_adj[idx_v].append(idx_u) + + visited = [False] * n + disc = [0] * n + low = [0] * n + parent = [-1] * n + bridges_idx = [] + time = 0 + + def dfs(u): + nonlocal time + visited[u] = True + disc[u] = low[u] = time + time += 1 + for v in n_adj[u]: + if not visited[v]: + parent[v] = u + dfs(v) + low[u] = min(low[u], low[v]) + if low[v] > disc[u]: + bridges_idx.append((u, v)) + elif v != parent[u]: + low[u] = min(low[u], disc[v]) + + for i in range(n): + if not visited[i]: + dfs(i) + + bridges = [] + for u, v in bridges_idx: + a = inv_mapping[u] + b = inv_mapping[v] + if a <= b: + bridges.append((a, b)) + else: + bridges.append((b, a)) + bridges.sort() + return bridges diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/graph.py b/lib/python3.12/site-packages/pydatastructs/graphs/graph.py new file mode 100644 index 000000000..39c2692e3 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/graphs/graph.py @@ -0,0 +1,163 @@ + +from pydatastructs.utils.misc_util import Backend, raise_if_backend_is_not_python + +__all__ = [ + 'Graph' +] + +class Graph(object): + """ + Represents generic concept of graphs. + + Parameters + ========== + + implementation: str + The implementation to be used for storing + graph in memory. It can be figured out + from type of the vertices(if passed at construction). + Currently the following implementations are supported, + + 'adjacency_list' -> Adjacency list implementation. + + 'adjacency_matrix' -> Adjacency matrix implementation. + + By default, 'adjacency_list'. + vertices: GraphNode(s) + For AdjacencyList implementation vertices + can be passed for initializing the graph. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs.graphs import Graph + >>> from pydatastructs.utils import AdjacencyListGraphNode + >>> v_1 = AdjacencyListGraphNode('v_1', 1) + >>> v_2 = AdjacencyListGraphNode('v_2', 2) + >>> g = Graph(v_1, v_2) + >>> g.add_edge('v_1', 'v_2') + >>> g.add_edge('v_2', 'v_1') + >>> g.is_adjacent('v_1', 'v_2') + True + >>> g.is_adjacent('v_2', 'v_1') + True + >>> g.remove_edge('v_1', 'v_2') + >>> g.is_adjacent('v_1', 'v_2') + False + >>> g.is_adjacent('v_2', 'v_1') + True + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Graph_(abstract_data_type) + + Note + ==== + + Make sure to create nodes (AdjacencyListGraphNode or AdjacencyMatrixGraphNode) + and them in your graph using Graph.add_vertex before adding edges whose + end points require either of the nodes that you added. In other words, + Graph.add_edge doesn't add new nodes on its own if the input + nodes are not already present in the Graph. + + """ + + __slots__ = ['_impl'] + + def __new__(cls, *args, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + try: + default_impl = args[0]._impl if args else 'adjacency_list' + except: + default_impl = 'adjacency_list' + implementation = kwargs.get('implementation', default_impl) + if implementation == 'adjacency_list': + from pydatastructs.graphs.adjacency_list import AdjacencyList + obj = AdjacencyList(*args, **kwargs) + return obj + elif implementation == 'adjacency_matrix': + from pydatastructs.graphs.adjacency_matrix import AdjacencyMatrix + obj = AdjacencyMatrix(*args, **kwargs) + return obj + else: + raise NotImplementedError("%s implementation is not a part " + "of the library currently."%(implementation)) + + def is_adjacent(self, node1, node2): + """ + Checks if the nodes with the given + with the given names are adjacent + to each other. + """ + raise NotImplementedError( + "This is an abstract method.") + + def neighbors(self, node): + """ + Lists the neighbors of the node + with given name. + """ + raise NotImplementedError( + "This is an abstract method.") + + def add_vertex(self, node): + """ + Adds the input vertex to the node, or does nothing + if the input vertex is already in the graph. + """ + raise NotImplementedError( + "This is an abstract method.") + + def remove_vertex(self, node): + """ + Removes the input vertex along with all the edges + pointing towards it. + """ + raise NotImplementedError( + "This is an abstract method.") + + def add_edge(self, source, target, cost=None): + """ + Adds the edge starting at first parameter + i.e., source and ending at the second + parameter i.e., target. + """ + raise NotImplementedError( + "This is an abstract method.") + + def get_edge(self, source, target): + """ + Returns GraphEdge object if there + is an edge between source and target + otherwise None. + """ + raise NotImplementedError( + "This is an abstract method.") + + def remove_edge(self, source, target): + """ + Removes the edge starting at first parameter + i.e., source and ending at the second + parameter i.e., target. + """ + raise NotImplementedError( + "This is an abstract method.") + + def num_vertices(self): + """ + Number of vertices + """ + raise NotImplementedError( + "This is an abstract method.") + + def num_edges(self): + """ + Number of edges + """ + raise NotImplementedError( + "This is an abstract method.") diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/tests/__init__.py b/lib/python3.12/site-packages/pydatastructs/graphs/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_list.py b/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_list.py new file mode 100644 index 000000000..3a9cdb14f --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_list.py @@ -0,0 +1,83 @@ +from pydatastructs.graphs import Graph +from pydatastructs.utils import AdjacencyListGraphNode +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils.misc_util import Backend + +def test_adjacency_list(): + v_1 = AdjacencyListGraphNode('v_1', 1) + v_2 = AdjacencyListGraphNode('v_2', 2) + g = Graph(v_1, v_2, implementation='adjacency_list') + v_3 = AdjacencyListGraphNode('v_3', 3) + g.add_vertex(v_2) + g.add_vertex(v_3) + g.add_edge('v_1', 'v_2') + g.add_edge('v_2', 'v_3') + g.add_edge('v_3', 'v_1') + assert g.is_adjacent('v_1', 'v_2') is True + assert g.is_adjacent('v_2', 'v_3') is True + assert g.is_adjacent('v_3', 'v_1') is True + assert g.is_adjacent('v_2', 'v_1') is False + assert g.is_adjacent('v_3', 'v_2') is False + assert g.is_adjacent('v_1', 'v_3') is False + neighbors = g.neighbors('v_1') + assert neighbors == [v_2] + v = AdjacencyListGraphNode('v', 4) + g.add_vertex(v) + g.add_edge('v_1', 'v', 0) + g.add_edge('v_2', 'v', 0) + g.add_edge('v_3', 'v', 0) + assert g.is_adjacent('v_1', 'v') is True + assert g.is_adjacent('v_2', 'v') is True + assert g.is_adjacent('v_3', 'v') is True + e1 = g.get_edge('v_1', 'v') + e2 = g.get_edge('v_2', 'v') + e3 = g.get_edge('v_3', 'v') + assert (e1.source.name, e1.target.name) == ('v_1', 'v') + assert (e2.source.name, e2.target.name) == ('v_2', 'v') + assert (e3.source.name, e3.target.name) == ('v_3', 'v') + g.remove_edge('v_1', 'v') + assert g.is_adjacent('v_1', 'v') is False + g.remove_vertex('v') + assert g.is_adjacent('v_2', 'v') is False + assert g.is_adjacent('v_3', 'v') is False + + assert raises(ValueError, lambda: g.add_edge('u', 'v')) + assert raises(ValueError, lambda: g.add_edge('v', 'x')) + + v_4 = AdjacencyListGraphNode('v_4', 4, backend = Backend.CPP) + v_5 = AdjacencyListGraphNode('v_5', 5, backend = Backend.CPP) + g2 = Graph(v_4,v_5,implementation = 'adjacency_list', backend = Backend.CPP) + v_6 = AdjacencyListGraphNode('v_6', 6, backend = Backend.CPP) + assert raises(ValueError, lambda: g2.add_vertex(v_5)) + g2.add_vertex(v_6) + g2.add_edge('v_4', 'v_5') + g2.add_edge('v_5', 'v_6') + g2.add_edge('v_4', 'v_6') + assert g2.is_adjacent('v_4', 'v_5') is True + assert g2.is_adjacent('v_5', 'v_6') is True + assert g2.is_adjacent('v_4', 'v_6') is True + assert g2.is_adjacent('v_5', 'v_4') is False + assert g2.is_adjacent('v_6', 'v_5') is False + assert g2.is_adjacent('v_6', 'v_4') is False + assert g2.num_edges() == 3 + assert g2.num_vertices() == 3 + neighbors = g2.neighbors('v_4') + assert neighbors == [v_6, v_5] + v = AdjacencyListGraphNode('v', 4, backend = Backend.CPP) + g2.add_vertex(v) + g2.add_edge('v_4', 'v', 0) + g2.add_edge('v_5', 'v', 0) + g2.add_edge('v_6', 'v', "h") + assert g2.is_adjacent('v_4', 'v') is True + assert g2.is_adjacent('v_5', 'v') is True + assert g2.is_adjacent('v_6', 'v') is True + e1 = g2.get_edge('v_4', 'v') + e2 = g2.get_edge('v_5', 'v') + e3 = g2.get_edge('v_6', 'v') + assert (str(e1)) == "('v_4', 'v', 0)" + assert (str(e2)) == "('v_5', 'v', 0)" + assert (str(e3)) == "('v_6', 'v', h)" + g2.remove_edge('v_4', 'v') + assert g2.is_adjacent('v_4', 'v') is False + g2.remove_vertex('v') + assert raises(ValueError, lambda: g2.add_edge('v_4', 'v')) diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_matrix.py b/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_matrix.py new file mode 100644 index 000000000..27dc81790 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_matrix.py @@ -0,0 +1,53 @@ +from pydatastructs.graphs import Graph +from pydatastructs.utils import AdjacencyMatrixGraphNode +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils.misc_util import Backend + +def test_AdjacencyMatrix(): + v_0 = AdjacencyMatrixGraphNode(0, 0) + v_1 = AdjacencyMatrixGraphNode(1, 1) + v_2 = AdjacencyMatrixGraphNode(2, 2) + g = Graph(v_0, v_1, v_2) + g.add_edge(0, 1, 0) + g.add_edge(1, 2, 0) + g.add_edge(2, 0, 0) + e1 = g.get_edge(0, 1) + e2 = g.get_edge(1, 2) + e3 = g.get_edge(2, 0) + assert (e1.source.name, e1.target.name) == ('0', '1') + assert (e2.source.name, e2.target.name) == ('1', '2') + assert (e3.source.name, e3.target.name) == ('2', '0') + assert g.is_adjacent(0, 1) is True + assert g.is_adjacent(1, 2) is True + assert g.is_adjacent(2, 0) is True + assert g.is_adjacent(1, 0) is False + assert g.is_adjacent(2, 1) is False + assert g.is_adjacent(0, 2) is False + neighbors = g.neighbors(0) + assert neighbors == [v_1] + g.remove_edge(0, 1) + assert g.is_adjacent(0, 1) is False + assert raises(ValueError, lambda: g.add_edge('u', 'v')) + assert raises(ValueError, lambda: g.add_edge('v', 'x')) + assert raises(ValueError, lambda: g.add_edge(2, 3)) + assert raises(ValueError, lambda: g.add_edge(3, 2)) + + v_3 = AdjacencyMatrixGraphNode('0', 0, backend = Backend.CPP) + v_4 = AdjacencyMatrixGraphNode('1', 1, backend = Backend.CPP) + v_5 = AdjacencyMatrixGraphNode('2', 2, backend = Backend.CPP) + g2 = Graph(v_3, v_4, v_5, implementation = 'adjacency_matrix', backend = Backend.CPP) + g2.add_edge('0', '1', 0) + g2.add_edge('1', '2', 0) + g2.add_edge('2', '0', 0) + assert g2.is_adjacent('0', '1') is True + assert g2.is_adjacent('1', '2') is True + assert g2.is_adjacent('2', '0') is True + assert g2.is_adjacent('1', '0') is False + assert g2.is_adjacent('2', '1') is False + assert g2.is_adjacent('0', '2') is False + neighbors = g2.neighbors('0') + assert neighbors == [v_4] + g2.remove_edge('0', '1') + assert g2.is_adjacent('0', '1') is False + assert raises(ValueError, lambda: g2.add_edge('u', 'v')) + assert raises(ValueError, lambda: g2.add_edge('v', 'x')) diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_algorithms.py b/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_algorithms.py new file mode 100644 index 000000000..04ebcccda --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_algorithms.py @@ -0,0 +1,596 @@ +from pydatastructs import (breadth_first_search, Graph, +breadth_first_search_parallel, minimum_spanning_tree, +minimum_spanning_tree_parallel, strongly_connected_components, +depth_first_search, shortest_paths,all_pair_shortest_paths, topological_sort, +topological_sort_parallel, max_flow, find_bridges) +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils.misc_util import AdjacencyListGraphNode, AdjacencyMatrixGraphNode +from pydatastructs.graphs._backend.cpp import _graph +from pydatastructs.graphs._backend.cpp import _algorithms +from pydatastructs.utils.misc_util import Backend + +def test_breadth_first_search(): + + def _test_breadth_first_search(ds): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + + V1 = GraphNode(0) + V2 = GraphNode(1) + V3 = GraphNode(2) + + G1 = Graph(V1, V2, V3) + + assert G1.num_vertices() == 3 + + edges = [ + (V1.name, V2.name), + (V2.name, V3.name), + (V1.name, V3.name) + ] + + for edge in edges: + G1.add_edge(*edge) + + assert G1.num_edges() == len(edges) + + parent = {} + def bfs_tree(curr_node, next_node, parent): + if next_node != "": + parent[next_node] = curr_node + return True + + breadth_first_search(G1, V1.name, bfs_tree, parent) + assert (parent[V3.name] == V1.name and parent[V2.name] == V1.name) or \ + (parent[V3.name] == V2.name and parent[V2.name] == V1.name) + + if (ds=='List'): + parent = {} + V9 = AdjacencyListGraphNode("9",0,backend = Backend.CPP) + V10 = AdjacencyListGraphNode("10",0,backend = Backend.CPP) + V11 = AdjacencyListGraphNode("11",0,backend = Backend.CPP) + G2 = Graph(V9, V10, V11,implementation = 'adjacency_list', backend = Backend.CPP) + assert G2.num_vertices()==3 + G2.add_edge("9", "10") + G2.add_edge("10", "11") + breadth_first_search(G2, "9", bfs_tree, parent, backend = Backend.CPP) + assert parent[V10] == V9 + assert parent[V11] == V10 + + if (ds == 'Matrix'): + parent3 = {} + V12 = AdjacencyMatrixGraphNode("12", 0, backend = Backend.CPP) + V13 = AdjacencyMatrixGraphNode("13", 0, backend = Backend.CPP) + V14 = AdjacencyMatrixGraphNode("14", 0, backend = Backend.CPP) + G3 = Graph(V12, V13, V14, implementation = 'adjacency_matrix', backend = Backend.CPP) + assert G3.num_vertices() == 3 + G3.add_edge("12", "13") + G3.add_edge("13", "14") + breadth_first_search(G3, "12", bfs_tree, parent3, backend = Backend.CPP) + assert parent3[V13] == V12 + assert parent3[V14] == V13 + + V4 = GraphNode(0) + V5 = GraphNode(1) + V6 = GraphNode(2) + V7 = GraphNode(3) + V8 = GraphNode(4) + + edges = [ + (V4.name, V5.name), + (V5.name, V6.name), + (V6.name, V7.name), + (V6.name, V4.name), + (V7.name, V8.name) + ] + + G2 = Graph(V4, V5, V6, V7, V8) + + for edge in edges: + G2.add_edge(*edge) + + assert G2.num_edges() == len(edges) + + path = [] + def path_finder(curr_node, next_node, dest_node, parent, path): + if next_node != "": + parent[next_node] = curr_node + if curr_node == dest_node: + node = curr_node + path.append(node) + while node is not None: + if parent.get(node, None) is not None: + path.append(parent[node]) + node = parent.get(node, None) + path.reverse() + return False + return True + + parent.clear() + breadth_first_search(G2, V4.name, path_finder, V7.name, parent, path) + assert path == [V4.name, V5.name, V6.name, V7.name] + + _test_breadth_first_search("List") + _test_breadth_first_search("Matrix") + +def test_breadth_first_search_parallel(): + + def _test_breadth_first_search_parallel(ds): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + + V1 = GraphNode(0) + V2 = GraphNode(1) + V3 = GraphNode(2) + V4 = GraphNode(3) + V5 = GraphNode(4) + V6 = GraphNode(5) + V7 = GraphNode(6) + V8 = GraphNode(7) + + + G1 = Graph(V1, V2, V3, V4, V5, V6, V7, V8) + + edges = [ + (V1.name, V2.name), + (V1.name, V3.name), + (V1.name, V4.name), + (V2.name, V5.name), + (V2.name, V6.name), + (V3.name, V6.name), + (V3.name, V7.name), + (V4.name, V7.name), + (V4.name, V8.name) + ] + + for edge in edges: + G1.add_edge(*edge) + + parent = {} + def bfs_tree(curr_node, next_node, parent): + if next_node != "": + parent[next_node] = curr_node + return True + + breadth_first_search_parallel(G1, V1.name, 5, bfs_tree, parent) + assert (parent[V2.name] == V1.name and parent[V3.name] == V1.name and + parent[V4.name] == V1.name and parent[V5.name] == V2.name and + (parent[V6.name] in (V2.name, V3.name)) and + (parent[V7.name] in (V3.name, V4.name)) and (parent[V8.name] == V4.name)) + + _test_breadth_first_search_parallel("List") + _test_breadth_first_search_parallel("Matrix") + +def test_minimum_spanning_tree(): + + def _test_minimum_spanning_tree(func, ds, algorithm, *args): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + a, b, c, d, e = [GraphNode(x) for x in [0, 1, 2, 3, 4]] + graph = Graph(a, b, c, d, e) + graph.add_edge(a.name, c.name, 10) + graph.add_edge(c.name, a.name, 10) + graph.add_edge(a.name, d.name, 7) + graph.add_edge(d.name, a.name, 7) + graph.add_edge(c.name, d.name, 9) + graph.add_edge(d.name, c.name, 9) + graph.add_edge(d.name, b.name, 32) + graph.add_edge(b.name, d.name, 32) + graph.add_edge(d.name, e.name, 23) + graph.add_edge(e.name, d.name, 23) + mst = func(graph, algorithm, *args) + expected_mst = [('0_3', 7), ('2_3', 9), ('3_4', 23), ('3_1', 32), + ('3_0', 7), ('3_2', 9), ('4_3', 23), ('1_3', 32)] + assert len(expected_mst) == len(mst.edge_weights.items()) + for k, v in mst.edge_weights.items(): + assert (k, v.value) in expected_mst + + def _test_minimum_spanning_tree_cpp(ds, algorithm, *args): + if (ds == 'List' and algorithm == "prim"): + a1 = AdjacencyListGraphNode('a', 0, backend = Backend.CPP) + b1 = AdjacencyListGraphNode('b', 0, backend = Backend.CPP) + c1 = AdjacencyListGraphNode('c', 0, backend = Backend.CPP) + d1 = AdjacencyListGraphNode('d', 0, backend = Backend.CPP) + e1 = AdjacencyListGraphNode('e', 0, backend = Backend.CPP) + g = Graph(a1, b1, c1, d1, e1, backend = Backend.CPP) + g.add_edge(a1.name, c1.name, 10) + g.add_edge(c1.name, a1.name, 10) + g.add_edge(a1.name, d1.name, 7) + g.add_edge(d1.name, a1.name, 7) + g.add_edge(c1.name, d1.name, 9) + g.add_edge(d1.name, c1.name, 9) + g.add_edge(d1.name, b1.name, 32) + g.add_edge(b1.name, d1.name, 32) + g.add_edge(d1.name, e1.name, 23) + g.add_edge(e1.name, d1.name, 23) + mst = minimum_spanning_tree(g, "prim", backend = Backend.CPP) + expected_mst = ["('a', 'd', 7)", "('d', 'c', 9)", "('e', 'd', 23)", "('b', 'd', 32)", + "('d', 'a', 7)", "('c', 'd', 9)", "('d', 'e', 23)", "('d', 'b', 32)"] + assert str(mst.get_edge('a', 'd')) in expected_mst + assert str(mst.get_edge('e', 'd')) in expected_mst + assert str(mst.get_edge('d', 'c')) in expected_mst + assert str(mst.get_edge('b', 'd')) in expected_mst + assert mst.num_edges() == 8 + a=AdjacencyListGraphNode('0', 0, backend = Backend.CPP) + b=AdjacencyListGraphNode('1', 0, backend = Backend.CPP) + c=AdjacencyListGraphNode('2', 0, backend = Backend.CPP) + d=AdjacencyListGraphNode('3', 0, backend = Backend.CPP) + g2 = Graph(a,b,c,d,backend = Backend.CPP) + g2.add_edge('0', '1', 74) + g2.add_edge('1', '0', 74) + g2.add_edge('0', '3', 55) + g2.add_edge('3', '0', 55) + g2.add_edge('1', '2', 74) + g2.add_edge('2', '1', 74) + mst2=minimum_spanning_tree(g2, "prim", backend = Backend.CPP) + assert mst2.num_edges() == 6 + + fmst = minimum_spanning_tree + fmstp = minimum_spanning_tree_parallel + _test_minimum_spanning_tree(fmst, "List", "kruskal") + _test_minimum_spanning_tree(fmst, "Matrix", "kruskal") + _test_minimum_spanning_tree(fmst, "List", "prim") + _test_minimum_spanning_tree(fmstp, "List", "kruskal", 3) + _test_minimum_spanning_tree(fmstp, "Matrix", "kruskal", 3) + _test_minimum_spanning_tree(fmstp, "List", "prim", 3) + _test_minimum_spanning_tree_cpp("List", "prim") + +def test_strongly_connected_components(): + + def _test_strongly_connected_components(func, ds, algorithm, *args): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + a, b, c, d, e, f, g, h = \ + [GraphNode(chr(x)) for x in range(ord('a'), ord('h') + 1)] + graph = Graph(a, b, c, d, e, f, g, h) + graph.add_edge(a.name, b.name) + graph.add_edge(b.name, c.name) + graph.add_edge(b.name, f.name) + graph.add_edge(b.name, e.name) + graph.add_edge(c.name, d.name) + graph.add_edge(c.name, g.name) + graph.add_edge(d.name, h.name) + graph.add_edge(d.name, c.name) + graph.add_edge(e.name, f.name) + graph.add_edge(e.name, a.name) + graph.add_edge(f.name, g.name) + graph.add_edge(g.name, f.name) + graph.add_edge(h.name, d.name) + graph.add_edge(h.name, g.name) + comps = func(graph, algorithm) + expected_comps = [{'e', 'a', 'b'}, {'d', 'c', 'h'}, {'g', 'f'}] + assert comps.sort() == expected_comps.sort() + + scc = strongly_connected_components + _test_strongly_connected_components(scc, "List", "kosaraju") + _test_strongly_connected_components(scc, "Matrix", "kosaraju") + _test_strongly_connected_components(scc, "List", "tarjan") + _test_strongly_connected_components(scc, "Matrix", "tarjan") + +def test_depth_first_search(): + + def _test_depth_first_search(ds): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + + V1 = GraphNode(0) + V2 = GraphNode(1) + V3 = GraphNode(2) + + G1 = Graph(V1, V2, V3) + + edges = [ + (V1.name, V2.name), + (V2.name, V3.name), + (V1.name, V3.name) + ] + + for edge in edges: + G1.add_edge(*edge) + + parent = {} + def dfs_tree(curr_node, next_node, parent): + if next_node != "": + parent[next_node] = curr_node + return True + + depth_first_search(G1, V1.name, dfs_tree, parent) + assert (parent[V3.name] == V1.name and parent[V2.name] == V1.name) or \ + (parent[V3.name] == V2.name and parent[V2.name] == V1.name) + + V4 = GraphNode(0) + V5 = GraphNode(1) + V6 = GraphNode(2) + V7 = GraphNode(3) + V8 = GraphNode(4) + + edges = [ + (V4.name, V5.name), + (V5.name, V6.name), + (V6.name, V7.name), + (V6.name, V4.name), + (V7.name, V8.name) + ] + + G2 = Graph(V4, V5, V6, V7, V8) + + for edge in edges: + G2.add_edge(*edge) + + path = [] + def path_finder(curr_node, next_node, dest_node, parent, path): + if next_node != "": + parent[next_node] = curr_node + if curr_node == dest_node: + node = curr_node + path.append(node) + while node is not None: + if parent.get(node, None) is not None: + path.append(parent[node]) + node = parent.get(node, None) + path.reverse() + return False + return True + + parent.clear() + depth_first_search(G2, V4.name, path_finder, V7.name, parent, path) + assert path == [V4.name, V5.name, V6.name, V7.name] + + _test_depth_first_search("List") + _test_depth_first_search("Matrix") + +def test_shortest_paths(): + + def _test_shortest_paths_positive_edges(ds, algorithm): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + vertices = [GraphNode('S'), GraphNode('C'), + GraphNode('SLC'), GraphNode('SF'), + GraphNode('D')] + + graph = Graph(*vertices) + graph.add_edge('S', 'SLC', 2) + graph.add_edge('C', 'S', 4) + graph.add_edge('C', 'D', 2) + graph.add_edge('SLC', 'C', 2) + graph.add_edge('SLC', 'D', 3) + graph.add_edge('SF', 'SLC', 2) + graph.add_edge('SF', 'S', 2) + graph.add_edge('D', 'SF', 3) + dist, pred = shortest_paths(graph, algorithm, 'SLC') + assert dist == {'S': 6, 'C': 2, 'SLC': 0, 'SF': 6, 'D': 3} + assert pred == {'S': 'C', 'C': 'SLC', 'SLC': None, 'SF': 'D', 'D': 'SLC'} + dist, pred = shortest_paths(graph, algorithm, 'SLC', 'SF') + assert dist == 6 + assert pred == {'S': 'C', 'C': 'SLC', 'SLC': None, 'SF': 'D', 'D': 'SLC'} + graph.remove_edge('SLC', 'D') + graph.add_edge('D', 'SLC', -10) + assert raises(ValueError, lambda: shortest_paths(graph, 'bellman_ford', 'SLC')) + + if (ds == 'List' and algorithm == 'dijkstra'): + vertices2 = [AdjacencyListGraphNode('S', 0, backend = Backend.CPP), AdjacencyListGraphNode('C', 0, backend = Backend.CPP), + AdjacencyListGraphNode('SLC', 0, backend = Backend.CPP), AdjacencyListGraphNode('SF', 0, backend = Backend.CPP), + AdjacencyListGraphNode('D', 0, backend = Backend.CPP)] + graph2 = Graph(*vertices2, backend = Backend.CPP) + graph2.add_edge('S', 'SLC', 2) + graph2.add_edge('C', 'S', 4) + graph2.add_edge('C', 'D', 2) + graph2.add_edge('SLC', 'C', 2) + graph2.add_edge('SLC', 'D', 3) + graph2.add_edge('SF', 'SLC', 2) + graph2.add_edge('SF', 'S', 2) + graph2.add_edge('D', 'SF', 3) + (dist2, pred2) = shortest_paths(graph2, algorithm, 'SLC', backend = Backend.CPP) + assert dist2 == {'S': 6, 'C': 2, 'SLC': 0, 'SF': 6, 'D': 3} + assert pred2 == {'S': 'C', 'C': 'SLC', 'SLC': None, 'SF': 'D', 'D': 'SLC'} + + + + def _test_shortest_paths_negative_edges(ds, algorithm): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + vertices = [GraphNode('s'), GraphNode('a'), + GraphNode('b'), GraphNode('c'), + GraphNode('d')] + + graph = Graph(*vertices) + graph.add_edge('s', 'a', 3) + graph.add_edge('s', 'b', 2) + graph.add_edge('a', 'c', 1) + graph.add_edge('b', 'd', 1) + graph.add_edge('b', 'a', -2) + graph.add_edge('c', 'd', 1) + dist, pred = shortest_paths(graph, algorithm, 's') + assert dist == {'s': 0, 'a': 0, 'b': 2, 'c': 1, 'd': 2} + assert pred == {'s': None, 'a': 'b', 'b': 's', 'c': 'a', 'd': 'c'} + dist, pred = shortest_paths(graph, algorithm, 's', 'd') + assert dist == 2 + assert pred == {'s': None, 'a': 'b', 'b': 's', 'c': 'a', 'd': 'c'} + + _test_shortest_paths_positive_edges("List", 'bellman_ford') + _test_shortest_paths_positive_edges("Matrix", 'bellman_ford') + _test_shortest_paths_negative_edges("List", 'bellman_ford') + _test_shortest_paths_negative_edges("Matrix", 'bellman_ford') + _test_shortest_paths_positive_edges("List", 'dijkstra') + _test_shortest_paths_positive_edges("Matrix", 'dijkstra') + +def test_all_pair_shortest_paths(): + + def _test_shortest_paths_negative_edges(ds, algorithm): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + vertices = [GraphNode('1'), GraphNode('2'), + GraphNode('3'), GraphNode('4')] + + graph = Graph(*vertices) + graph.add_edge('1', '3', -2) + graph.add_edge('2', '1', 4) + graph.add_edge('2', '3', 3) + graph.add_edge('3', '4', 2) + graph.add_edge('4', '2', -1) + dist, next_v = all_pair_shortest_paths(graph, algorithm) + assert dist == {'1': {'3': -2, '1': 0, '4': 0, '2': -1}, + '2': {'1': 4, '3': 2, '2': 0, '4': 4}, + '3': {'4': 2, '3': 0, '1': 5, '2': 1}, + '4': {'2': -1, '4': 0, '1': 3, '3': 1}} + assert next_v == {'1': {'3': '1', '1': '1', '4': None, '2': None}, + '2': {'1': '2', '3': None, '2': '2', '4': None}, + '3': {'4': '3', '3': '3', '1': None, '2': None}, + '4': {'2': '4', '4': '4', '1': None, '3': None}} + + _test_shortest_paths_negative_edges("List", 'floyd_warshall') + _test_shortest_paths_negative_edges("Matrix", 'floyd_warshall') + _test_shortest_paths_negative_edges("List", 'johnson') + +def test_topological_sort(): + + def _test_topological_sort(func, ds, algorithm, threads=None): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + vertices = [GraphNode('2'), GraphNode('3'), GraphNode('5'), + GraphNode('7'), GraphNode('8'), GraphNode('10'), + GraphNode('11'), GraphNode('9')] + + graph = Graph(*vertices) + graph.add_edge('5', '11') + graph.add_edge('7', '11') + graph.add_edge('7', '8') + graph.add_edge('3', '8') + graph.add_edge('3', '10') + graph.add_edge('11', '2') + graph.add_edge('11', '9') + graph.add_edge('11', '10') + graph.add_edge('8', '9') + if threads is not None: + l = func(graph, algorithm, threads) + else: + l = func(graph, algorithm) + assert all([(l1 in l[0:3]) for l1 in ('3', '5', '7')] + + [(l2 in l[3:5]) for l2 in ('8', '11')] + + [(l3 in l[5:]) for l3 in ('10', '9', '2')]) + + _test_topological_sort(topological_sort, "List", "kahn") + _test_topological_sort(topological_sort_parallel, "List", "kahn", 3) + + +def test_max_flow(): + def _test_max_flow(ds, algorithm): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + + a = GraphNode('a') + b = GraphNode('b') + c = GraphNode('c') + d = GraphNode('d') + e = GraphNode('e') + + G = Graph(a, b, c, d, e) + + G.add_edge('a', 'b', 3) + G.add_edge('a', 'c', 4) + G.add_edge('b', 'c', 2) + G.add_edge('b', 'd', 3) + G.add_edge('c', 'd', 1) + G.add_edge('d', 'e', 6) + + assert max_flow(G, 'a', 'e', algorithm) == 4 + assert max_flow(G, 'a', 'c', algorithm) == 6 + + a = GraphNode('a') + b = GraphNode('b') + c = GraphNode('c') + d = GraphNode('d') + e = GraphNode('e') + f = GraphNode('f') + + G2 = Graph(a, b, c, d, e, f) + + G2.add_edge('a', 'b', 16) + G2.add_edge('a', 'c', 13) + G2.add_edge('b', 'c', 10) + G2.add_edge('b', 'd', 12) + G2.add_edge('c', 'b', 4) + G2.add_edge('c', 'e', 14) + G2.add_edge('d', 'c', 9) + G2.add_edge('d', 'f', 20) + G2.add_edge('e', 'd', 7) + G2.add_edge('e', 'f', 4) + + assert max_flow(G2, 'a', 'f', algorithm) == 23 + assert max_flow(G2, 'a', 'd', algorithm) == 19 + + a = GraphNode('a') + b = GraphNode('b') + c = GraphNode('c') + d = GraphNode('d') + + G3 = Graph(a, b, c, d) + + G3.add_edge('a', 'b', 3) + G3.add_edge('a', 'c', 2) + G3.add_edge('b', 'c', 2) + G3.add_edge('b', 'd', 3) + G3.add_edge('c', 'd', 2) + + assert max_flow(G3, 'a', 'd', algorithm) == 5 + assert max_flow(G3, 'a', 'b', algorithm) == 3 + + + _test_max_flow("List", "edmonds_karp") + _test_max_flow("Matrix", "edmonds_karp") + _test_max_flow("List", "dinic") + _test_max_flow("Matrix", "dinic") + + +def test_find_bridges(): + def _test_find_bridges(ds): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + + impl = 'adjacency_list' if ds == "List" else 'adjacency_matrix' + + v0 = GraphNode(0) + v1 = GraphNode(1) + v2 = GraphNode(2) + v3 = GraphNode(3) + v4 = GraphNode(4) + + G1 = Graph(v0, v1, v2, v3, v4, implementation=impl) + G1.add_edge(v0.name, v1.name) + G1.add_edge(v1.name, v2.name) + G1.add_edge(v2.name, v3.name) + G1.add_edge(v3.name, v4.name) + + bridges = find_bridges(G1) + expected_bridges = [('0', '1'), ('1', '2'), ('2', '3'), ('3', '4')] + assert sorted(bridges) == sorted(expected_bridges) + + u0 = GraphNode(0) + u1 = GraphNode(1) + u2 = GraphNode(2) + + G2 = Graph(u0, u1, u2, implementation=impl) + G2.add_edge(u0.name, u1.name) + G2.add_edge(u1.name, u2.name) + G2.add_edge(u2.name, u0.name) + + bridges = find_bridges(G2) + assert bridges == [] + + w0 = GraphNode(0) + w1 = GraphNode(1) + w2 = GraphNode(2) + w3 = GraphNode(3) + w4 = GraphNode(4) + + G3 = Graph(w0, w1, w2, w3, w4, implementation=impl) + G3.add_edge(w0.name, w1.name) + G3.add_edge(w1.name, w2.name) + G3.add_edge(w3.name, w4.name) + + bridges = find_bridges(G3) + expected_bridges = [('0', '1'), ('1', '2'), ('3', '4')] + assert sorted(bridges) == sorted(expected_bridges) + + _test_find_bridges("List") + _test_find_bridges("Matrix") diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/__init__.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/__init__.py new file mode 100644 index 000000000..c6b3341d2 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/__init__.py @@ -0,0 +1,53 @@ +__all__ = [] + +from . import ( + arrays, + linked_lists, + algorithms, +) + +from .arrays import ( + OneDimensionalArray, + DynamicOneDimensionalArray, + MultiDimensionalArray, + ArrayForTrees +) +__all__.extend(arrays.__all__) + +from .linked_lists import ( + SinglyLinkedList, + DoublyLinkedList, + SinglyCircularLinkedList, + DoublyCircularLinkedList, + SkipList +) +__all__.extend(linked_lists.__all__) + +from .algorithms import ( + merge_sort_parallel, + brick_sort, + brick_sort_parallel, + heapsort, + matrix_multiply_parallel, + counting_sort, + bucket_sort, + cocktail_shaker_sort, + quick_sort, + longest_common_subsequence, + is_ordered, + upper_bound, + lower_bound, + longest_increasing_subsequence, + next_permutation, + prev_permutation, + bubble_sort, + linear_search, + binary_search, + jump_search, + selection_sort, + insertion_sort, + intro_sort, + shell_sort, + radix_sort +) +__all__.extend(algorithms.__all__) diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/_backend/__init__.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/_backend/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/algorithms.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/algorithms.py new file mode 100644 index 000000000..6d383fdca --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/algorithms.py @@ -0,0 +1,2010 @@ +from pydatastructs.linear_data_structures.arrays import ( + OneDimensionalArray, DynamicArray, DynamicOneDimensionalArray, Array) +from pydatastructs.linear_data_structures._backend.cpp import _algorithms, _arrays +from pydatastructs.utils.misc_util import ( + _check_type, _comp, Backend, + raise_if_backend_is_not_python) +from concurrent.futures import ThreadPoolExecutor +from math import log, floor, sqrt + +__all__ = [ + 'merge_sort_parallel', + 'brick_sort', + 'brick_sort_parallel', + 'heapsort', + 'matrix_multiply_parallel', + 'counting_sort', + 'bucket_sort', + 'cocktail_shaker_sort', + 'quick_sort', + 'longest_common_subsequence', + 'is_ordered', + 'upper_bound', + 'lower_bound', + 'longest_increasing_subsequence', + 'next_permutation', + 'prev_permutation', + 'bubble_sort', + 'linear_search', + 'binary_search', + 'jump_search', + 'selection_sort', + 'insertion_sort', + 'intro_sort', + 'shell_sort', + 'radix_sort' +] + +def _merge(array, sl, el, sr, er, end, comp): + l, r = [], [] + for i in range(sl, el + 1): + if i <= end: + l.append(array[i]) + array[i] = None + for i in range(sr, er + 1): + if i <= end: + r.append(array[i]) + array[i] = None + i, j, k = 0, 0, sl + while i < len(l) and j < len(r): + if _comp(l[i], r[j], comp): + array[k] = l[i] + i += 1 + else: + array[k] = r[j] + j += 1 + k += 1 + + while i < len(l): + array[k] = l[i] + i += 1 + k += 1 + + while j < len(r): + array[k] = r[j] + j += 1 + k += 1 + +def merge_sort_parallel(array, num_threads, **kwargs): + """ + Implements parallel merge sort. + + Parameters + ========== + + array: Array + The array which is to be sorted. + num_threads: int + The maximum number of threads + to be used for sorting. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, merge_sort_parallel + >>> arr = OneDimensionalArray(int,[3, 2, 1]) + >>> merge_sort_parallel(arr, 3) + >>> [arr[0], arr[1], arr[2]] + [1, 2, 3] + >>> merge_sort_parallel(arr, 3, comp=lambda u, v: u > v) + >>> [arr[0], arr[1], arr[2]] + [3, 2, 1] + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Merge_sort + """ + raise_if_backend_is_not_python( + merge_sort_parallel, kwargs.get('backend', Backend.PYTHON)) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get("comp", lambda u, v: u <= v) + for size in range(floor(log(end - start + 1, 2)) + 1): + pow_2 = 2**size + with ThreadPoolExecutor(max_workers=num_threads) as Executor: + i = start + while i <= end: + Executor.submit( + _merge, + array, + i, i + pow_2 - 1, + i + pow_2, i + 2*pow_2 - 1, + end, comp).result() + i = i + 2*pow_2 + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + +def brick_sort(array, **kwargs): + """ + Implements Brick Sort / Odd Even sorting algorithm + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + >>> from pydatastructs import OneDimensionalArray, brick_sort + >>> arr = OneDimensionalArray(int,[3, 2, 1]) + >>> brick_sort(arr) + >>> [arr[0], arr[1], arr[2]] + [1, 2, 3] + >>> brick_sort(arr, comp=lambda u, v: u > v) + >>> [arr[0], arr[1], arr[2]] + [3, 2, 1] + + References + ========== + .. [1] https://www.geeksforgeeks.org/odd-even-sort-brick-sort/ + """ + raise_if_backend_is_not_python( + brick_sort, kwargs.get('backend', Backend.PYTHON)) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get("comp", lambda u, v: u <= v) + + is_sorted = False + while is_sorted is False: + is_sorted = True + for i in range(start+1, end, 2): + if _comp(array[i+1], array[i], comp): + array[i], array[i+1] = array[i+1], array[i] + is_sorted = False + for i in range(start, end, 2): + if _comp(array[i+1], array[i], comp): + array[i], array[i+1] = array[i+1], array[i] + is_sorted = False + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + +def _brick_sort_swap(array, i, j, comp, is_sorted): + if _comp(array[j], array[i], comp): + array[i], array[j] = array[j], array[i] + is_sorted[0] = False + +def brick_sort_parallel(array, num_threads, **kwargs): + """ + Implements Concurrent Brick Sort / Odd Even sorting algorithm + + Parameters + ========== + + array: Array/list + The array which is to be sorted. + num_threads: int + The maximum number of threads + to be used for sorting. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, brick_sort_parallel + >>> arr = OneDimensionalArray(int,[3, 2, 1]) + >>> brick_sort_parallel(arr, num_threads=5) + >>> [arr[0], arr[1], arr[2]] + [1, 2, 3] + >>> brick_sort_parallel(arr, num_threads=5, comp=lambda u, v: u > v) + >>> [arr[0], arr[1], arr[2]] + [3, 2, 1] + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Odd%E2%80%93even_sort + """ + raise_if_backend_is_not_python( + brick_sort_parallel, kwargs.get('backend', Backend.PYTHON)) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get("comp", lambda u, v: u <= v) + + is_sorted = [False] + with ThreadPoolExecutor(max_workers=num_threads) as Executor: + while is_sorted[0] is False: + is_sorted[0] = True + for i in range(start + 1, end, 2): + Executor.submit(_brick_sort_swap, array, i, i + 1, comp, is_sorted).result() + + for i in range(start, end, 2): + Executor.submit(_brick_sort_swap, array, i, i + 1, comp, is_sorted).result() + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + +def heapsort(array, **kwargs): + """ + Implements Heapsort algorithm. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, heapsort + >>> arr = OneDimensionalArray(int,[3, 2, 1]) + >>> heapsort(arr) + >>> [arr[0], arr[1], arr[2]] + [1, 2, 3] + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Heapsort + + Note + ==== + + This function does not support custom comparators as is the case with + other sorting functions in this file. + """ + raise_if_backend_is_not_python( + heapsort, kwargs.get('backend', Backend.PYTHON)) + from pydatastructs.trees.heaps import BinaryHeap + + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + + h = BinaryHeap(heap_property="min") + for i in range(start, end+1): + if array[i] is not None: + h.insert(array[i]) + array[i] = None + + i = start + while not h.is_empty: + array[i] = h.extract().key + i += 1 + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + +def counting_sort(array: Array, **kwargs) -> Array: + """ + Performs counting sort on the given array. + + Parameters + ========== + + array: Array + The array which is to be sorted. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs import DynamicOneDimensionalArray as DODA, counting_sort + >>> arr = DODA(int, [5, 78, 1, 0]) + >>> out = counting_sort(arr) + >>> str(out) + "['0', '1', '5', '78']" + >>> arr.delete(2) + >>> out = counting_sort(arr) + >>> str(out) + "['0', '5', '78']" + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Counting_sort + + Note + ==== + + Since, counting sort is a non-comparison sorting algorithm, + custom comparators aren't allowed. + The ouput array doesn't contain any `None` value. + """ + raise_if_backend_is_not_python( + counting_sort, kwargs.get('backend', Backend.PYTHON)) + max_val, min_val = array[0], array[0] + none_count = 0 + for i in range(len(array)): + if array[i] is not None: + if max_val is None or max_val < array[i]: + max_val = array[i] + if min_val is None or array[i] < min_val: + min_val = array[i] + else: + none_count += 1 + if min_val is None or max_val is None: + return array + + count = [0 for _ in range(max_val - min_val + 1)] + for i in range(len(array)): + if array[i] is not None: + count[array[i] - min_val] += 1 + + total = 0 + for i in range(max_val - min_val + 1): + count[i], total = total, count[i] + total + + output = type(array)(array._dtype, + [array[i] for i in range(len(array)) + if array[i] is not None]) + if _check_type(output, DynamicArray): + output._modify(force=True) + + for i in range(len(array)): + x = array[i] + if x is not None: + output[count[x-min_val]] = x + count[x-min_val] += 1 + + return output + +def _matrix_multiply_helper(m1, m2, row, col): + s = 0 + for i in range(len(m1)): + s += m1[row][i] * m2[i][col] + return s + +def matrix_multiply_parallel(matrix_1, matrix_2, num_threads): + """ + Implements concurrent Matrix multiplication + + Parameters + ========== + + matrix_1: Any matrix representation + Left matrix + matrix_2: Any matrix representation + Right matrix + num_threads: int + The maximum number of threads + to be used for multiplication. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Raises + ====== + + ValueError + When the columns in matrix_1 are not equal to the rows in matrix_2 + + Returns + ======= + + C: list + The result of matrix multiplication. + + Examples + ======== + + >>> from pydatastructs import matrix_multiply_parallel + >>> I = [[1, 1, 0], [0, 1, 0], [0, 0, 1]] + >>> J = [[2, 1, 2], [1, 2, 1], [2, 2, 2]] + >>> matrix_multiply_parallel(I, J, num_threads=5) + [[3, 3, 3], [1, 2, 1], [2, 2, 2]] + + References + ========== + .. [1] https://www3.nd.edu/~zxu2/acms60212-40212/Lec-07-3.pdf + """ + row_matrix_1, col_matrix_1 = len(matrix_1), len(matrix_1[0]) + row_matrix_2, col_matrix_2 = len(matrix_2), len(matrix_2[0]) + + if col_matrix_1 != row_matrix_2: + raise ValueError("Matrix size mismatch: %s * %s"%( + (row_matrix_1, col_matrix_1), (row_matrix_2, col_matrix_2))) + + C = [[None for i in range(col_matrix_1)] for j in range(row_matrix_2)] + + with ThreadPoolExecutor(max_workers=num_threads) as Executor: + for i in range(row_matrix_1): + for j in range(col_matrix_2): + C[i][j] = Executor.submit(_matrix_multiply_helper, + matrix_1, + matrix_2, + i, j).result() + + return C + +def _bucket_sort_helper(bucket: Array) -> Array: + for i in range(1, len(bucket)): + key = bucket[i] + j = i - 1 + while j >= 0 and bucket[j] > key: + bucket[j+1] = bucket[j] + j -= 1 + bucket[j+1] = key + return bucket + +def bucket_sort(array: Array, **kwargs) -> Array: + """ + Performs bucket sort on the given array. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs import DynamicOneDimensionalArray as DODA, bucket_sort + >>> arr = DODA(int, [5, 78, 1, 0]) + >>> out = bucket_sort(arr) + >>> str(out) + "['0', '1', '5', '78']" + >>> arr.delete(2) + >>> out = bucket_sort(arr) + >>> str(out) + "['0', '1', '78']" + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Bucket_sort + + Note + ==== + + This function does not support custom comparators as is the case with + other sorting functions in this file. + """ + raise_if_backend_is_not_python( + bucket_sort, kwargs.get('backend', Backend.PYTHON)) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + + #Find maximum value in the list and use length of the list to determine which value in the list goes into which bucket + max_value = None + for i in range(start, end+1): + if array[i] is not None: + max_value = array[i] + + count = 0 + for i in range(start, end+1): + if array[i] is not None: + count += 1 + if array[i] > max_value: + max_value = array[i] + + number_of_null_values = end - start + 1 - count + size = max_value // count + + # Create n empty buckets where n is equal to the length of the input list + buckets_list = [[] for _ in range(count)] + + # Put list elements into different buckets based on the size + for i in range(start, end + 1): + if array[i] is not None: + j = array[i] // size + if j is not count: + buckets_list[j].append(array[i]) + else: + buckets_list[count-1].append(array[i]) + + # Sort elements within the buckets using Insertion Sort + for z in range(count): + _bucket_sort_helper(buckets_list[z]) + + # Concatenate buckets with sorted elements into a single array + sorted_list = [] + for x in range(count): + sorted_list.extend(buckets_list[x]) + for i in range(end, end - number_of_null_values, -1): + array[i] = None + for i in range(start, end - number_of_null_values + 1): + array[i] = sorted_list[i-start] + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + return array + +def cocktail_shaker_sort(array: Array, **kwargs) -> Array: + """ + Performs cocktail sort on the given array. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray as ODA, cocktail_shaker_sort + >>> arr = ODA(int, [5, 78, 1, 0]) + >>> out = cocktail_shaker_sort(arr) + >>> str(out) + '[0, 1, 5, 78]' + >>> arr = ODA(int, [21, 37, 5]) + >>> out = cocktail_shaker_sort(arr) + >>> str(out) + '[5, 21, 37]' + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Cocktail_shaker_sort + """ + raise_if_backend_is_not_python( + cocktail_shaker_sort, kwargs.get('backend', Backend.PYTHON)) + def swap(i, j): + array[i], array[j] = array[j], array[i] + + lower = kwargs.get('start', 0) + upper = kwargs.get('end', len(array) - 1) + comp = kwargs.get("comp", lambda u, v: u <= v) + + swapping = False + while (not swapping and upper - lower >= 1): + + swapping = True + for j in range(lower, upper): + if _comp(array[j], array[j+1], comp) is False: + swap(j + 1, j) + swapping = False + + upper = upper - 1 + for j in range(upper, lower, -1): + if _comp(array[j-1], array[j], comp) is False: + swap(j, j - 1) + swapping = False + lower = lower + 1 + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + + return array + +def quick_sort(array: Array, **kwargs) -> Array: + """ + Performs quick sort on the given array. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + pick_pivot_element: lambda/function + The function implementing the pivot picking + logic for quick sort. Should accept, `low`, + `high`, and `array` in this order, where `low` + represents the left end of the current partition, + `high` represents the right end, and `array` is + the original input array to `quick_sort` function. + Optional, by default, picks the element at `high` + index of the current partition as pivot. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray as ODA, quick_sort + >>> arr = ODA(int, [5, 78, 1, 0]) + >>> out = quick_sort(arr) + >>> str(out) + '[0, 1, 5, 78]' + >>> arr = ODA(int, [21, 37, 5]) + >>> out = quick_sort(arr) + >>> str(out) + '[5, 21, 37]' + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Quicksort + """ + backend = kwargs.pop("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _algorithms.quick_sort(array, **kwargs) + from pydatastructs import Stack + comp = kwargs.get("comp", lambda u, v: u <= v) + pick_pivot_element = kwargs.get("pick_pivot_element", + lambda low, high, array: array[high]) + + def partition(low, high, pick_pivot_element): + i = (low - 1) + x = pick_pivot_element(low, high, array) + for j in range(low , high): + if _comp(array[j], x, comp) is True: + i = i + 1 + array[i], array[j] = array[j], array[i] + array[i + 1], array[high] = array[high], array[i + 1] + return (i + 1) + + lower = kwargs.get('start', 0) + upper = kwargs.get('end', len(array) - 1) + stack = Stack() + + stack.push(lower) + stack.push(upper) + + while stack.is_empty is False: + high = stack.pop() + low = stack.pop() + p = partition(low, high, pick_pivot_element) + if p - 1 > low: + stack.push(low) + stack.push(p - 1) + if p + 1 < high: + stack.push(p + 1) + stack.push(high) + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + + return array + +def longest_common_subsequence(seq1: OneDimensionalArray, seq2: OneDimensionalArray, + **kwargs) -> OneDimensionalArray: + """ + Finds the longest common subsequence between the + two given sequences. + + Parameters + ======== + + seq1: OneDimensionalArray + The first sequence. + seq2: OneDimensionalArray + The second sequence. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: OneDimensionalArray + The longest common subsequence. + + Examples + ======== + + >>> from pydatastructs import longest_common_subsequence as LCS, OneDimensionalArray as ODA + >>> arr1 = ODA(str, ['A', 'B', 'C', 'D', 'E']) + >>> arr2 = ODA(str, ['A', 'B', 'C', 'G' ,'D', 'E', 'F']) + >>> lcs = LCS(arr1, arr2) + >>> str(lcs) + "['A', 'B', 'C', 'D', 'E']" + >>> arr1 = ODA(str, ['A', 'P', 'P']) + >>> arr2 = ODA(str, ['A', 'p', 'P', 'S', 'P']) + >>> lcs = LCS(arr1, arr2) + >>> str(lcs) + "['A', 'P', 'P']" + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Longest_common_subsequence_problem + + Note + ==== + + The data types of elements across both the sequences + should be same and should be comparable. + """ + raise_if_backend_is_not_python( + longest_common_subsequence, kwargs.get('backend', Backend.PYTHON)) + row = len(seq1) + col = len(seq2) + check_mat = {0: [(0, []) for _ in range(col + 1)]} + + for i in range(1, row + 1): + check_mat[i] = [(0, []) for _ in range(col + 1)] + for j in range(1, col + 1): + if seq1[i-1] == seq2[j-1]: + temp = check_mat[i-1][j-1][1][:] + temp.append(seq1[i-1]) + check_mat[i][j] = (check_mat[i-1][j-1][0] + 1, temp) + else: + if check_mat[i-1][j][0] > check_mat[i][j-1][0]: + check_mat[i][j] = check_mat[i-1][j] + else: + check_mat[i][j] = check_mat[i][j-1] + + return OneDimensionalArray(seq1._dtype, check_mat[row][col][-1]) + +def is_ordered(array, **kwargs): + """ + Checks whether the given array is ordered or not. + + Parameters + ========== + + array: OneDimensionalArray + The array which is to be checked for having + specified ordering among its elements. + start: int + The starting index of the portion of the array + under consideration. + Optional, by default 0 + end: int + The ending index of the portion of the array + under consideration. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for specifying the desired ordering. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + True if the specified ordering is present + from start to end (inclusive) otherwise False. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, is_ordered + >>> arr = OneDimensionalArray(int, [1, 2, 3, 4]) + >>> is_ordered(arr) + True + >>> arr1 = OneDimensionalArray(int, [1, 2, 3]) + >>> is_ordered(arr1, start=0, end=1, comp=lambda u, v: u > v) + False + """ + backend = kwargs.pop("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _algorithms.is_ordered(array, **kwargs) + lower = kwargs.get('start', 0) + upper = kwargs.get('end', len(array) - 1) + comp = kwargs.get("comp", lambda u, v: u <= v) + + for i in range(lower + 1, upper + 1): + if array[i] is None or array[i - 1] is None: + continue + if comp(array[i], array[i - 1]): + return False + return True + +def upper_bound(array, value, **kwargs): + """ + Finds the index of the first occurence of an element greater than the given + value according to specified order, in the given OneDimensionalArray using a variation of binary search method. + + Parameters + ========== + + array: OneDimensionalArray + The array in which the upper bound has to be found. + start: int + The staring index of the portion of the array in which the upper bound + of a given value has to be looked for. + Optional, by default 0 + end: int, optional + The ending index of the portion of the array in which the upper bound + of a given value has to be looked for. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for specifying the desired ordering. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + index: int + Index of the upper bound of the given value in the given OneDimensionalArray. + + Examples + ======== + + >>> from pydatastructs import upper_bound, OneDimensionalArray as ODA + >>> arr1 = ODA(int, [4, 5, 5, 6, 7]) + >>> ub = upper_bound(arr1, 5, start=0, end=4) + >>> ub + 3 + >>> arr2 = ODA(int, [7, 6, 5, 5, 4]) + >>> ub = upper_bound(arr2, 5, comp=lambda x, y: x > y) + >>> ub + 4 + + Note + ==== + + DynamicOneDimensionalArray objects may not work as expected. + """ + raise_if_backend_is_not_python( + upper_bound, kwargs.get('backend', Backend.PYTHON)) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array)) + comp = kwargs.get('comp', lambda x, y: x < y) + index = end + inclusive_end = end - 1 + if comp(value, array[start]): + index = start + while start <= inclusive_end: + mid = (start + inclusive_end)//2 + if not comp(value, array[mid]): + start = mid + 1 + else: + index = mid + inclusive_end = mid - 1 + return index + +def lower_bound(array, value, **kwargs): + """ + Finds the the index of the first occurence of an element which is not + less than the given value according to specified order, + in the given OneDimensionalArray using a variation of binary search method. + + Parameters + ========== + + array: OneDimensionalArray + The array in which the lower bound has to be found. + start: int + The staring index of the portion of the array in which the upper bound + of a given value has to be looked for. + Optional, by default 0 + end: int, optional + The ending index of the portion of the array in which the upper bound + of a given value has to be looked for. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for specifying the desired ordering. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + index: int + Index of the lower bound of the given value in the given OneDimensionalArray + + Examples + ======== + + >>> from pydatastructs import lower_bound, OneDimensionalArray as ODA + >>> arr1 = ODA(int, [4, 5, 5, 6, 7]) + >>> lb = lower_bound(arr1, 5, end=4, comp=lambda x, y : x < y) + >>> lb + 1 + >>> arr = ODA(int, [7, 6, 5, 5, 4]) + >>> lb = lower_bound(arr, 5, start=0, comp=lambda x, y : x > y) + >>> lb + 2 + + Note + ==== + + DynamicOneDimensionalArray objects may not work as expected. + """ + raise_if_backend_is_not_python( + lower_bound, kwargs.get('backend', Backend.PYTHON)) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array)) + comp = kwargs.get('comp', lambda x, y: x < y) + index = end + inclusive_end = end - 1 + if not comp(array[start], value): + index = start + while start <= inclusive_end: + mid = (start + inclusive_end)//2 + if comp(array[mid], value): + start = mid + 1 + else: + index = mid + inclusive_end = mid - 1 + return index + +def longest_increasing_subsequence(array, **kwargs): + """ + Returns the longest increasing subsequence (as a OneDimensionalArray) that + can be obtained from a given OneDimensionalArray. A subsequence + of an array is an ordered subset of the array's elements having the same + sequential ordering as the original array. Here, an increasing + sequence stands for a strictly increasing sequence of numbers. + + Parameters + ========== + + array: OneDimensionalArray + The given array in the form of a OneDimensionalArray + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: OneDimensionalArray + Returns the longest increasing subsequence that can be obtained + from the given array + + Examples + ======== + + >>> from pydatastructs import lower_bound, OneDimensionalArray as ODA + >>> from pydatastructs import longest_increasing_subsequence as LIS + >>> array = ODA(int, [2, 5, 3, 7, 11, 8, 10, 13, 6]) + >>> longest_inc_subsequence = LIS(array) + >>> str(longest_inc_subsequence) + '[2, 3, 7, 8, 10, 13]' + >>> array2 = ODA(int, [3, 4, -1, 5, 8, 2, 2 ,2, 3, 12, 7, 9, 10]) + >>> longest_inc_subsequence = LIS(array2) + >>> str(longest_inc_subsequence) + '[-1, 2, 3, 7, 9, 10]' + """ + raise_if_backend_is_not_python( + longest_increasing_subsequence, + kwargs.get('backend', Backend.PYTHON)) + n = len(array) + dp = OneDimensionalArray(int, n) + dp.fill(0) + parent = OneDimensionalArray(int, n) + parent.fill(-1) + length = 0 + for i in range(1, n): + if array[i] <= array[dp[0]]: + dp[0] = i + elif array[dp[length]] < array[i]: + length += 1 + dp[length] = i + parent[i] = dp[length - 1] + else: + curr_array = [array[dp[i]] for i in range(length)] + ceil = lower_bound(curr_array, array[i]) + dp[ceil] = i + parent[i] = dp[ceil - 1] + ans = DynamicOneDimensionalArray(int, 0) + last_index = dp[length] + while last_index != -1: + ans.append(array[last_index]) + last_index = parent[last_index] + n = ans._last_pos_filled + 1 + ans_ODA = OneDimensionalArray(int, n) + for i in range(n): + ans_ODA[n-1-i] = ans[i] + return ans_ODA + +def _permutation_util(array, start, end, comp, perm_comp): + size = end - start + 1 + permute = OneDimensionalArray(int, size) + for i, j in zip(range(start, end + 1), range(size)): + permute[j] = array[i] + i = size - 1 + while i > 0 and perm_comp(permute[i - 1], permute[i], comp): + i -= 1 + if i > 0: + left, right = i, size - 1 + while left <= right: + mid = left + (right - left) // 2 + if not perm_comp(permute[i - 1], permute[mid], comp): + left = mid + 1 + else: + right = mid - 1 + permute[i - 1], permute[left - 1] = \ + permute[left - 1], permute[i - 1] + left, right = i, size - 1 + while left < right: + permute[left], permute[right] = permute[right], permute[left] + left += 1 + right -= 1 + result = True if i > 0 else False + return result, permute + +def next_permutation(array, **kwargs): + """ + If the function can determine the next higher permutation, it + returns `True` and the permutation in a new array. + If that is not possible, because it is already at the largest possible + permutation, it returns the elements according to the first permutation + and returns `False` and the permutation in a new array. + + Parameters + ========== + + array: OneDimensionalArray + The array which is to be used for finding next permutation. + start: int + The staring index of the considered portion of the array. + Optional, by default 0 + end: int, optional + The ending index of the considered portion of the array. + Optional, by default the index of the last position filled. + comp: lambda/function + The comparator which is to be used for specifying the + desired lexicographical ordering. + Optional, by default, less than is + used for comparing two values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + + Returns + ======= + + output: bool, OneDimensionalArray + First element is `True` if the function can rearrange + the given portion of the input array as a lexicographically + greater permutation, otherwise returns `False`. + Second element is an array having the next permutation. + + + Examples + ======== + + >>> from pydatastructs import next_permutation, OneDimensionalArray as ODA + >>> array = ODA(int, [1, 2, 3, 4]) + >>> is_greater, next_permute = next_permutation(array) + >>> is_greater, str(next_permute) + (True, '[1, 2, 4, 3]') + >>> array = ODA(int, [3, 2, 1]) + >>> is_greater, next_permute = next_permutation(array) + >>> is_greater, str(next_permute) + (False, '[1, 2, 3]') + + References + ========== + + .. [1] http://www.cplusplus.com/reference/algorithm/next_permutation/ + """ + raise_if_backend_is_not_python( + next_permutation, kwargs.get('backend', Backend.PYTHON)) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get('comp', lambda x, y: x < y) + + def _next_permutation_comp(x, y, _comp): + if _comp(x, y): + return False + else: + return True + + return _permutation_util(array, start, end, comp, + _next_permutation_comp) + +def prev_permutation(array, **kwargs): + """ + If the function can determine the next lower permutation, it + returns `True` and the permutation in a new array. + If that is not possible, because it is already at the lowest possible + permutation, it returns the elements according to the last permutation + and returns `False` and the permutation in a new array. + + Parameters + ========== + + array: OneDimensionalArray + The array which is to be used for finding next permutation. + start: int + The staring index of the considered portion of the array. + Optional, by default 0 + end: int, optional + The ending index of the considered portion of the array. + Optional, by default the index of the last position filled. + comp: lambda/function + The comparator which is to be used for specifying the + desired lexicographical ordering. + Optional, by default, less than is + used for comparing two values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + + Returns + ======= + + output: bool, OneDimensionalArray + First element is `True` if the function can rearrange + the given portion of the input array as a lexicographically + smaller permutation, otherwise returns `False`. + Second element is an array having the previous permutation. + + + Examples + ======== + + >>> from pydatastructs import prev_permutation, OneDimensionalArray as ODA + >>> array = ODA(int, [1, 2, 4, 3]) + >>> is_lower, prev_permute = prev_permutation(array) + >>> is_lower, str(prev_permute) + (True, '[1, 2, 3, 4]') + >>> array = ODA(int, [1, 2, 3, 4]) + >>> is_lower, prev_permute = prev_permutation(array) + >>> is_lower, str(prev_permute) + (False, '[4, 3, 2, 1]') + + References + ========== + + .. [1] http://www.cplusplus.com/reference/algorithm/prev_permutation/ + """ + raise_if_backend_is_not_python( + prev_permutation, kwargs.get('backend', Backend.PYTHON)) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get('comp', lambda x, y: x < y) + + def _prev_permutation_comp(x, y, _comp): + if _comp(x, y): + return True + else: + return False + + return _permutation_util(array, start, end, comp, + _prev_permutation_comp) + +def bubble_sort(array, **kwargs): + """ + Implements bubble sort algorithm. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, bubble_sort + >>> arr = OneDimensionalArray(int,[3, 2, 1]) + >>> out = bubble_sort(arr) + >>> str(out) + '[1, 2, 3]' + >>> out = bubble_sort(arr, comp=lambda u, v: u > v) + >>> str(out) + '[3, 2, 1]' + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Bubble_sort + """ + backend = kwargs.pop("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _algorithms.bubble_sort(array, **kwargs) + if backend == Backend.LLVM: + return _algorithms.bubble_sort_llvm(array, **kwargs) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get("comp", lambda u, v: u <= v) + arr_len = len(array) + for i in range(arr_len - 1): + for j in range(start , end): + if not _comp(array[j], array[j + 1], comp): + array[j], array[j + 1] = array[j + 1], array[j] + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + + return array + +def selection_sort(array, **kwargs): + """ + Implements selection sort algorithm. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, selection_sort + >>> arr = OneDimensionalArray(int,[3, 2, 1]) + >>> out = selection_sort(arr) + >>> str(out) + '[1, 2, 3]' + >>> out = selection_sort(arr, comp=lambda u, v: u > v) + >>> str(out) + '[3, 2, 1]' + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Selection_sort + """ + backend = kwargs.pop("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _algorithms.bubble_sort(array, **kwargs) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get('comp', lambda u, v: u <= v) + + for i in range(start, end + 1): + jMin = i + for j in range(i + 1, end + 1): + if not _comp(array[jMin], array[j], comp): + jMin = j + if jMin != i: + array[i], array[jMin] = array[jMin], array[i] + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + + return array + +def insertion_sort(array, **kwargs): + """ + Implements insertion sort algorithm. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, insertion_sort + >>> arr = OneDimensionalArray(int,[3, 2, 1]) + >>> out = insertion_sort(arr) + >>> str(out) + '[1, 2, 3]' + >>> out = insertion_sort(arr, comp=lambda u, v: u > v) + >>> str(out) + '[3, 2, 1]' + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Insertion_sort + """ + backend = kwargs.pop("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _algorithms.insertion_sort(array, **kwargs) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get('comp', lambda u, v: u <= v) + + for i in range(start + 1, end + 1): + temp = array[i] + j = i + while j > start and not _comp(array[j - 1], temp, comp): + array[j] = array[j - 1] + j -= 1 + array[j] = temp + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + + return array + +def linear_search(array, value, **kwargs): + """ + Implements linear search algorithm. + + Parameters + ========== + + array: OneDimensionalArray + The array which is to be searched. + value: + The value which is to be searched + inside the array. + start: int + The starting index of the portion + which is to be searched. + Optional, by default 0 + end: int + The ending index of the portion which + is to be searched. + Optional, by default the index + of the last position filled. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: int + The index of value if found. + If not found, returns None. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, linear_search + >>> arr = OneDimensionalArray(int,[3, 2, 1]) + >>> linear_search(arr, 2) + 1 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Linear_search + """ + backend = kwargs.pop("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _algorithms.linear_search(array, value, **kwargs) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + + for i in range(start, end + 1): + if array[i] == value: + return i + + return None + +def binary_search(array, value, **kwargs): + """ + Implements binary search algorithm. + + Parameters + ========== + + array: OneDimensionalArray + The array which is to be searched. + value: + The value which is to be searched + inside the array. + start: int + The starting index of the portion + which is to be searched. + Optional, by default 0 + end: int + The ending index of the portion which + is to be searched. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for performing comparisons. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: int + The index of elem if found. + If not found, returns None. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, binary_search + >>> arr = OneDimensionalArray(int,[1, 2, 3, 5, 10, 12]) + >>> binary_search(arr, 5) + 3 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Binary_search_algorithm + + Note + ==== + + This algorithm assumes that the portion of the array + to be searched is already sorted. + """ + backend = kwargs.pop("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _algorithms.binary_search(array, value, **kwargs) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get("comp", lambda u, v: u <= v) + + left = start + right = end + while left <= right: + middle = left//2 + right//2 + left % 2 * right % 2 + if array[middle] == value: + return middle + if comp(array[middle], value): + left = middle + 1 + else: + right = middle - 1 + + return None + +def jump_search(array, value, **kwargs): + """ + Implements jump search algorithm. + + Parameters + ========== + + array: OneDimensionalArray + The array which is to be searched. + value: + The value which is to be searched + inside the array. + start: int + The starting index of the portion + which is to be searched. + Optional, by default 0 + end: int + The ending index of the portion which + is to be searched. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for performing comparisons. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: int + The index of elem if found. + If not found, returns None. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, jump_search + >>> arr = OneDimensionalArray(int,[1, 2, 3, 5, 10, 12]) + >>> linear_search(arr, 5) + 3 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Jump_search + + Note + ==== + + This algorithm assumes that the portion of the array + to be searched is already sorted. + """ + backend = kwargs.pop("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _algorithms.jump_search(array, value, **kwargs) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get("comp", lambda u, v: u < v) + + step = int(sqrt(end - start + 1)) + current_position = step + prev = start + while comp(array[min(current_position, end)], value): + prev = current_position + current_position += step + if prev > end: + return None + while prev <= min(current_position, end): + if array[prev] == value: + return prev + prev += 1 + + return None + +def intro_sort(array, **kwargs) -> Array: + """ + Performs intro sort on the given array. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + maxdepth: Enables the user to define the maximum + recursion depth, takes value 2*log(length(A)) + by default (ref: Wikipedia[1]). + ins_threshold: Threshold under which insertion + sort has to be performed, default value is + 16 (ref: Wikipedia[1]). + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray as ODA, intro_sort + >>> arr = ODA(int, [5, 78, 1, 0]) + >>> out = intro_sort(arr) + >>> str(out) + '[0, 1, 5, 78]' + >>> arr = ODA(int, [21, 37, 5]) + >>> out = intro_sort(arr) + >>> str(out) + '[5, 21, 37]' + + Note + ==== + + This function does not support custom comparators as + is the case with other sorting functions in this file. + This is because of heapsort's limitation. + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Introsort + """ + raise_if_backend_is_not_python( + intro_sort, kwargs.get('backend', Backend.PYTHON)) + + # Always sorts in increasing order, this is because of + # heapsort's limitation + comp = lambda u, v: u <= v + lower = kwargs.get('start', 0) + upper = kwargs.get('end', len(array) - 1) + n = upper - lower + 1 + if n <= 0: + maxdepth = 0 + else: + maxdepth = kwargs.get("maxdepth", int(2 * (log(n)/log(2)))) + + ins_threshold = kwargs.get("ins_threshold", 16) + + def partition(array, lower, upper): + pivot = array[lower] + left = lower + 1 + right = upper + done = False + while not done: + while left <= right and _comp(array[left], pivot, comp): + left += 1 + while _comp(pivot, array[right], comp) and right >= left: + right -= 1 + if right < left: + done = True + else: + array[left], array[right] = array[right], array[left] + left+=1 + right-=1 + + array[lower], array[right] = array[right], array[lower] + return right + + if n < ins_threshold: + return insertion_sort(array, start=lower, end=upper) + elif maxdepth == 0: + heapsort(array, start=lower, end=upper) + return array + else: + p = partition(array, lower, upper) + + intro_sort(array, start=lower, end=p-1, maxdepth=maxdepth-1, ins_threshold=ins_threshold) + intro_sort(array, start=p+1, end=upper, maxdepth=maxdepth-1, ins_threshold=ins_threshold) + + return array + +def shell_sort(array, *args, **kwargs): + """ + Implements shell sort algorithm. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs.linear_data_structures.algorithms import OneDimensionalArray, shell_sort + >>> arr = OneDimensionalArray(int, [3, 2, 1]) + >>> out = shell_sort(arr) + >>> str(out) + '[1, 2, 3]' + >>> out = shell_sort(arr, comp=lambda u, v: u > v) + >>> str(out) + '[3, 2, 1]' + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Shellsort + """ + start = int(kwargs.get('start', 0)) + end = int(kwargs.get('end', len(array) - 1)) + comp = kwargs.get('comp', lambda u, v: u <= v) + + n = end - start + 1 + gap = n // 2 + while gap > 0: + for i in range(start + gap, end + 1): + temp = array[i] + j = i + while j >= start + gap and not _comp(array[j - gap], temp, comp): + array[j] = array[j - gap] + j -= gap + array[j] = temp + gap //= 2 + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + + return array + +def radix_sort(array, *args, **kwargs): + """ + Implements radix sort algorithm for non-negative integers. + + Parameters + ========== + + array: Array + The array which is to be sorted. Must contain non-negative integers. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs.linear_data_structures.algorithms import OneDimensionalArray, radix_sort + >>> arr = OneDimensionalArray(int, [170, 45, 75, 90, 802, 24, 2, 66]) + >>> out = radix_sort(arr) + >>> str(out) + '[2, 24, 45, 66, 75, 90, 170, 802]' + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Radix_sort + """ + start = int(kwargs.get('start', 0)) + end = int(kwargs.get('end', len(array) - 1)) + + n = end - start + 1 + max_val = array[start] + for i in range(start + 1, end + 1): + if array[i] is not None and array[i] > max_val: + max_val = array[i] + exp = 1 + while max_val // exp > 0: + count = [0] * 10 + output = [None] * n + + for i in range(start, end + 1): + if array[i] is not None: + digit = (array[i] // exp) % 10 + count[digit] += 1 + + for i in range(1, 10): + count[i] += count[i - 1] + + for i in range(end, start - 1, -1): + if array[i] is not None: + digit = (array[i] // exp) % 10 + count[digit] -= 1 + output[count[digit]] = array[i] + + for i in range(n): + array[start + i] = output[i] + + exp *= 10 + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + + return array diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/arrays.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/arrays.py new file mode 100644 index 000000000..2e0c3fd97 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/arrays.py @@ -0,0 +1,473 @@ +from pydatastructs.utils.misc_util import ( + _check_type, NoneType, Backend, + raise_if_backend_is_not_python) +from pydatastructs.linear_data_structures._backend.cpp import _arrays + +__all__ = [ + 'OneDimensionalArray', + 'MultiDimensionalArray', + 'DynamicOneDimensionalArray' +] + +class Array(object): + """ + Abstract class for arrays in pydatastructs. + """ + def __str__(self) -> str: + return str(self._data) + +class OneDimensionalArray(Array): + """ + Represents one dimensional static arrays of + fixed size. + + Parameters + ========== + + dtype: type + A valid object type. + size: int + The number of elements in the array. + elements: list + The elements in the array, all should + be of same type. + init: a python type + The initial value with which the element has + to be initialized. By default none, used only + when the data is not given. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Raises + ====== + + ValueError + When the number of elements in the list do not + match with the size. + More than three parameters are passed as arguments. + Types of arguments is not as mentioned in the docstring. + + Note + ==== + + At least one parameter should be passed as an argument along + with the dtype. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray + >>> arr = OneDimensionalArray(int, 5) + >>> arr.fill(6) + >>> arr[0] + 6 + >>> arr[0] = 7.2 + >>> arr[0] + 7 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Array_data_structure#One-dimensional_arrays + """ + + __slots__ = ['_size', '_data', '_dtype'] + + def __new__(cls, dtype=NoneType, *args, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + return _arrays.OneDimensionalArray(dtype, *args, **kwargs) + if dtype is NoneType: + raise ValueError("Data type is not defined.") + if len(args) not in (1, 2): + raise ValueError("Too few arguments to create a 1D array," + " pass either size of the array" + " or list of elements or both.") + obj = Array.__new__(cls) + obj._dtype = dtype + if len(args) == 2: + if _check_type(args[0], list) and \ + _check_type(args[1], int): + for i in range(len(args[0])): + if _check_type(args[0][i], dtype) is False: + args[0][i] = dtype(args[0][i]) + size, data = args[1], list(args[0]) + elif _check_type(args[1], list) and \ + _check_type(args[0], int): + for i in range(len(args[1])): + if _check_type(args[1][i], dtype) is False: + args[1][i] = dtype(args[1][i]) + size, data = args[0], list(args[1]) + else: + raise TypeError("Expected type of size is int and " + "expected type of data is list/tuple.") + if size != len(data): + raise ValueError("Conflict in the size, %s and length of data, %s" + %(size, len(data))) + obj._size, obj._data = size, data + + elif len(args) == 1: + if _check_type(args[0], int): + obj._size = args[0] + init = kwargs.get('init', None) + obj._data = [init for i in range(args[0])] + elif _check_type(args[0], (list, tuple)): + for i in range(len(args[0])): + if _check_type(args[0][i], dtype) is False: + args[0][i] = dtype(args[0][i]) + obj._size, obj._data = len(args[0]), \ + list(args[0]) + else: + raise TypeError("Expected type of size is int and " + "expected type of data is list/tuple.") + + return obj + + @classmethod + def methods(cls): + return ['__new__', '__getitem__', + '__setitem__', 'fill', '__len__'] + + def __getitem__(self, i): + if i >= self._size or i < 0: + raise IndexError(("Index, {} out of range, " + "[{}, {}).".format(i, 0, self._size))) + return self._data.__getitem__(i) + + def __setitem__(self, idx, elem): + if elem is None: + self._data[idx] = None + else: + if _check_type(elem, self._dtype) is False: + elem = self._dtype(elem) + self._data[idx] = elem + + def fill(self, elem): + elem = self._dtype(elem) + for i in range(self._size): + self._data[i] = elem + + def __len__(self): + return self._size + +class MultiDimensionalArray(Array): + """ + Represents a multi-dimensional array. + + Parameters + ========== + + dtype: type + A valid object type. + *args: int + The dimensions of the array. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Raises + ====== + + IndexError + Index goes out of boundaries, or + the number of index given is not + the same as the number of dimensions. + ValueError + When there's no dimensions or the + dimension size is 0. + + Examples + ======== + + >>> from pydatastructs import MultiDimensionalArray as MDA + >>> arr = MDA(int, 5, 6, 9) + >>> arr.fill(32) + >>> arr[3, 0, 0] + 32 + >>> arr[3, 0, 0] = 7 + >>> arr[3, 0, 0] + 7 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Array_data_structure#Multidimensional_arrays + + """ + __slots__ = ['_sizes', '_data', '_dtype'] + + def __new__(cls, dtype: type = NoneType, *args, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + if dtype is NoneType: + raise ValueError("Data type is not defined.") + elif not args: + raise ValueError("Too few arguments to create a " + "multi dimensional array, pass dimensions.") + if len(args) == 1: + obj = Array.__new__(cls) + obj._dtype = dtype + obj._sizes = (args[0], 1) + obj._data = [None] * args[0] + return obj + + dimensions = args + for dimension in dimensions: + if dimension < 1: + raise ValueError("Size of dimension cannot be less than 1") + n_dimensions = len(dimensions) + d_sizes = [] + index = 0 + while n_dimensions > 1: + size = dimensions[index] + for i in range(index+1, len(dimensions)): + size = size * dimensions[i] + d_sizes.append(size) + n_dimensions -= 1 + index += 1 + d_sizes.append(dimensions[index]) + d_sizes.append(1) + obj = Array.__new__(cls) + obj._dtype = dtype + obj._sizes = tuple(d_sizes) + obj._data = [None] * obj._sizes[1] * dimensions[0] + return obj + + @classmethod + def methods(cls) -> list: + return ['__new__', '__getitem__', '__setitem__', 'fill', 'shape'] + + def __getitem__(self, indices): + self._compare_shape(indices) + if isinstance(indices, int): + return self._data[indices] + position = 0 + for i in range(0, len(indices)): + position += self._sizes[i + 1] * indices[i] + return self._data[position] + + def __setitem__(self, indices, element) -> None: + self._compare_shape(indices) + if isinstance(indices, int): + self._data[indices] = element + else: + position = 0 + for i in range(0, len(indices)): + position += self._sizes[i + 1] * indices[i] + self._data[position] = element + + def _compare_shape(self, indices) -> None: + indices = [indices] if isinstance(indices, int) else indices + if len(indices) != len(self._sizes) - 1: + raise IndexError("Shape mismatch, current shape is %s" % str(self.shape)) + if any(indices[i] >= self._sizes[i] for i in range(len(indices))): + raise IndexError("Index out of range.") + + def fill(self, element) -> None: + element = self._dtype(element) + for i in range(len(self._data)): + self._data[i] = element + + @property + def shape(self) -> tuple: + shape = [] + size = len(self._sizes) + for i in range(1, size): + shape.append(self._sizes[i-1]//self._sizes[i]) + return tuple(shape) + +class DynamicArray(Array): + """ + Abstract class for dynamic arrays. + """ + pass + +class DynamicOneDimensionalArray(DynamicArray, OneDimensionalArray): + """ + Represents resizable and dynamic one + dimensional arrays. + + Parameters + ========== + + dtype: type + A valid object type. + size: int + The number of elements in the array. + elements: list/tuple + The elements in the array, all should + be of same type. + init: a python type + The inital value with which the element has + to be initialized. By default none, used only + when the data is not given. + load_factor: float, by default 0.25 + The number below which if the ratio, Num(T)/Size(T) + falls then the array is contracted such that at + most only half the positions are filled. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Raises + ====== + + ValueError + When the number of elements in the list do not + match with the size. + More than three parameters are passed as arguments. + Types of arguments is not as mentioned in the docstring. + The load factor is not of floating point type. + + Note + ==== + + At least one parameter should be passed as an argument along + with the dtype. + Num(T) means the number of positions which are not None in the + array. + Size(T) means the maximum number of elements that the array can hold. + + Examples + ======== + + >>> from pydatastructs import DynamicOneDimensionalArray as DODA + >>> arr = DODA(int, 0) + >>> arr.append(1) + >>> arr.append(2) + >>> arr[0] + 1 + >>> arr.delete(0) + >>> arr[0] + >>> arr[1] + 2 + >>> arr.append(3) + >>> arr.append(4) + >>> [arr[i] for i in range(arr.size)] + [None, 2, 3, 4, None, None, None] + + References + ========== + + .. [1] http://www.cs.nthu.edu.tw/~wkhon/algo09/lectures/lecture16.pdf + """ + + __slots__ = ['_load_factor', '_num', '_last_pos_filled', '_size'] + + def __new__(cls, dtype=NoneType, *args, **kwargs): + backend = kwargs.get("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _arrays.DynamicOneDimensionalArray(dtype, *args, **kwargs) + obj = super().__new__(cls, dtype, *args, **kwargs) + obj._load_factor = float(kwargs.get('load_factor', 0.25)) + obj._num = 0 if obj._size == 0 or obj[0] is None else obj._size + obj._last_pos_filled = obj._num - 1 + return obj + + @classmethod + def methods(cls): + return ['__new__', '_modify', + 'append', 'delete', 'size', + '__str__', '__reversed__'] + + def _modify(self, force=False): + """ + Contracts the array if Num(T)/Size(T) falls + below load factor. + """ + if force: + i = -1 + while self._data[i] is None: + i -= 1 + self._last_pos_filled = i%self._size + if (self._num/self._size < self._load_factor): + arr_new = OneDimensionalArray(self._dtype, 2*self._num + 1) + j = 0 + for i in range(self._last_pos_filled + 1): + if self._data[i] is not None: + arr_new[j] = self[i] + j += 1 + self._last_pos_filled = j - 1 + self._data = arr_new._data + self._size = arr_new._size + + def append(self, el): + if self._last_pos_filled + 1 == self._size: + arr_new = OneDimensionalArray(self._dtype, 2*self._size + 1) + for i in range(self._last_pos_filled + 1): + arr_new[i] = self[i] + arr_new[self._last_pos_filled + 1] = el + self._size = arr_new._size + self._data = arr_new._data + else: + self[self._last_pos_filled + 1] = el + self._last_pos_filled += 1 + self._num += 1 + self._modify() + + def delete(self, idx): + if idx <= self._last_pos_filled and idx >= 0 and \ + self[idx] is not None: + self[idx] = None + self._num -= 1 + if self._last_pos_filled == idx: + self._last_pos_filled -= 1 + return self._modify() + + @property + def size(self): + return self._size + + def __str__(self): + to_be_printed = ['' for _ in range(self._last_pos_filled + 1)] + for i in range(self._last_pos_filled + 1): + if self._data[i] is not None: + to_be_printed[i] = str(self._data[i]) + return str(to_be_printed) + + def __reversed__(self): + for i in range(self._last_pos_filled, -1, -1): + yield self._data[i] + +class ArrayForTrees(DynamicOneDimensionalArray): + """ + Utility dynamic array for storing nodes of a tree. + + Parameters + ========== + + backend: pydatastructs.Backend + The backend to be used. Available backends: Python and C++ + Optional, by default, the Python backend is used. For faster execution, use the C++ backend. + + See Also + ======== + + pydatastructs.linear_data_structures.arrays.DynamicOneDimensionalArray + """ + def _modify(self): + if self._num/self._size < self._load_factor: + new_indices = {} + arr_new = OneDimensionalArray(self._dtype, 2*self._num + 1) + j = 0 + for i in range(self._last_pos_filled + 1): + if self[i] is not None: + arr_new[j] = self[i] + new_indices[self[i].key] = j + j += 1 + for i in range(j): + if arr_new[i].left is not None: + arr_new[i].left = new_indices[self[arr_new[i].left].key] + if arr_new[i].right is not None: + arr_new[i].right = new_indices[self[arr_new[i].right].key] + if arr_new[i].parent is not None: + arr_new[i].parent = new_indices[self[arr_new[i].parent].key] + self._last_pos_filled = j - 1 + self._data = arr_new._data + self._size = arr_new._size + return new_indices + return None diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/linked_lists.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/linked_lists.py new file mode 100644 index 000000000..09178daf1 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/linked_lists.py @@ -0,0 +1,819 @@ +import math, random +from pydatastructs.utils.misc_util import _check_type, LinkedListNode, SkipNode +from pydatastructs.utils.misc_util import ( + Backend, raise_if_backend_is_not_python) + +__all__ = [ + 'SinglyLinkedList', + 'DoublyLinkedList', + 'SinglyCircularLinkedList', + 'DoublyCircularLinkedList', + 'SkipList' +] + +class LinkedList(object): + """ + Abstract class for Linked List. + """ + __slots__ = ['head', 'size'] + + def __len__(self): + return self.size + + @property + def is_empty(self): + return self.size == 0 + + def search(self, key): + curr_node = self.head + while curr_node is not None: + if curr_node.key == key: + return curr_node + curr_node = curr_node.next + if curr_node is self.head: + return None + return None + + def __str__(self): + """ + For printing the linked list. + """ + elements = [] + current_node = self.head + while current_node is not None: + elements.append(str(current_node)) + current_node = current_node.next + if current_node == self.head: + break + return str(elements) + + def insert_after(self, prev_node, key, data=None): + """ + Inserts a new node after the prev_node. + + Parameters + ========== + + prev_node: LinkedListNode + The node after which the + new node is to be inserted. + + key + Any valid identifier to uniquely + identify the node in the linked list. + + data + Any valid data to be stored in the node. + """ + raise NotImplementedError('This is an abstract method') + + def insert_at(self, index, key, data=None): + """ + Inserts a new node at the input index. + + Parameters + ========== + + index: int + An integer satisfying python indexing properties. + + key + Any valid identifier to uniquely + identify the node in the linked list. + + data + Any valid data to be stored in the node. + """ + raise NotImplementedError('This is an abstract method') + + def extract(self, index): + """ + Extracts the node at the index of the list. + + Parameters + ========== + + index: int + An integer satisfying python indexing properties. + + Returns + ======= + + current_node: LinkedListNode + The node at index i. + """ + raise NotImplementedError('This is an abstract method') + + def __getitem__(self, index): + """ + Returns + ======= + + current_node: LinkedListNode + The node at given index. + """ + if index < 0: + index = self.size + index + + if index >= self.size: + raise IndexError('%d index is out of range.'%(index)) + + counter = 0 + current_node = self.head + while counter != index: + current_node = current_node.next + counter += 1 + return current_node + + def appendleft(self, key, data=None): + """ + Pushes a new node at the start i.e., + the left of the list. + + Parameters + ========== + + key + Any valid identifier to uniquely + identify the node in the linked list. + + data + Any valid data to be stored in the node. + """ + self.insert_at(0, key, data) + + def append(self, key, data=None): + """ + Appends a new node at the end of the list. + + Parameters + ========== + + key + Any valid identifier to uniquely + identify the node in the linked list. + + data + Any valid data to be stored in the node. + """ + self.insert_at(self.size, key, data) + + def insert_before(self, next_node, key, data=None): + """ + Inserts a new node before the next_node. + + Parameters + ========== + + next_node: LinkedListNode + The node before which the + new node is to be inserted. + + key + Any valid identifier to uniquely + identify the node in the linked list. + + data + Any valid data to be stored in the node. + """ + raise NotImplementedError('This is an abstract method') + + def popleft(self): + """ + Extracts the Node from the left + i.e. start of the list. + + Returns + ======= + + old_head: LinkedListNode + The leftmost element of linked + list. + """ + return self.extract(0) + + def popright(self): + """ + Extracts the node from the right + of the linked list. + + Returns + ======= + + old_tail: LinkedListNode + The leftmost element of linked + list. + """ + return self.extract(-1) + +class DoublyLinkedList(LinkedList): + """ + Represents Doubly Linked List + + Parameters + ========== + + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import DoublyLinkedList + >>> dll = DoublyLinkedList() + >>> dll.append(6) + >>> dll[0].key + 6 + >>> dll.head.key + 6 + >>> dll.append(5) + >>> dll.appendleft(2) + >>> str(dll) + "['(2, None)', '(6, None)', '(5, None)']" + >>> dll[0].key = 7.2 + >>> dll.extract(1).key + 6 + >>> str(dll) + "['(7.2, None)', '(5, None)']" + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Doubly_linked_list + + """ + __slots__ = ['head', 'tail', 'size'] + + def __new__(cls, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = LinkedList.__new__(cls) + obj.head = None + obj.tail = None + obj.size = 0 + return obj + + @classmethod + def methods(cls): + return ['__new__', 'insert_after', + 'insert_before', 'insert_at', 'extract'] + + def insert_after(self, prev_node, key, data=None): + self.size += 1 + new_node = LinkedListNode(key, data, + links=['next', 'prev'], + addrs=[None, None]) + new_node.next = prev_node.next + if new_node.next is not None: + new_node.next.prev = new_node + prev_node.next = new_node + new_node.prev = prev_node + + if new_node.next is None: + self.tail = new_node + + def insert_before(self, next_node, key, data=None): + self.size += 1 + new_node = LinkedListNode(key, data, + links=['next', 'prev'], + addrs=[None, None]) + new_node.prev = next_node.prev + next_node.prev = new_node + new_node.next = next_node + if new_node.prev is not None: + new_node.prev.next = new_node + else: + self.head = new_node + + def insert_at(self, index, key, data=None): + if self.size == 0 and (index in (0, -1)): + index = 0 + + if index < 0: + index = self.size + index + + if index > self.size: + raise IndexError('%d index is out of range.'%(index)) + + self.size += 1 + new_node = LinkedListNode(key, data, + links=['next', 'prev'], + addrs=[None, None]) + if self.size == 1: + self.head, self.tail = \ + new_node, new_node + elif index == self.size - 1: + new_node.prev = self.tail + new_node.next = self.tail.next + self.tail.next = new_node + self.tail = new_node + else: + counter = 0 + current_node = self.head + prev_node = None + while counter != index: + prev_node = current_node + current_node = current_node.next + counter += 1 + new_node.prev = prev_node + new_node.next = current_node + if prev_node is not None: + prev_node.next = new_node + if current_node is not None: + current_node.prev = new_node + if new_node.next is None: + self.tail = new_node + if new_node.prev is None: + self.head = new_node + + def extract(self, index): + if self.is_empty: + raise ValueError("The list is empty.") + + if index < 0: + index = self.size + index + + if index >= self.size: + raise IndexError('%d is out of range.'%(index)) + + self.size -= 1 + counter = 0 + current_node = self.head + prev_node = None + while counter != index: + prev_node = current_node + current_node = current_node.next + counter += 1 + if prev_node is not None: + prev_node.next = current_node.next + if current_node.next is not None: + current_node.next.prev = prev_node + if index == 0: + self.head = current_node.next + if index == self.size: + self.tail = current_node.prev + return current_node + +class SinglyLinkedList(LinkedList): + """ + Represents Singly Linked List + + Parameters + ========== + + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import SinglyLinkedList + >>> sll = SinglyLinkedList() + >>> sll.append(6) + >>> sll[0].key + 6 + >>> sll.head.key + 6 + >>> sll.append(5) + >>> sll.appendleft(2) + >>> str(sll) + "['(2, None)', '(6, None)', '(5, None)']" + >>> sll[0].key = 7.2 + >>> sll.extract(1).key + 6 + >>> str(sll) + "['(7.2, None)', '(5, None)']" + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Singly_linked_list + + """ + __slots__ = ['head', 'tail', 'size'] + + def __new__(cls, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = LinkedList.__new__(cls) + obj.head = None + obj.tail = None + obj.size = 0 + return obj + + @classmethod + def methods(cls): + return ['insert_after', 'insert_at', + 'extract'] + + def insert_after(self, prev_node, key, data=None): + self.size += 1 + new_node = LinkedListNode(key, data, + links=['next'], + addrs=[None]) + new_node.next = prev_node.next + prev_node.next = new_node + + if new_node.next is None: + self.tail = new_node + + def insert_at(self, index, key, data=None): + if self.size == 0 and (index in (0, -1)): + index = 0 + + if index < 0: + index = self.size + index + + if index > self.size: + raise IndexError('%d index is out of range.'%(index)) + + self.size += 1 + new_node = LinkedListNode(key, data, + links=['next'], + addrs=[None]) + if self.size == 1: + self.head, self.tail = \ + new_node, new_node + elif index == self.size - 1: + new_node.next = self.tail.next + self.tail.next = new_node + self.tail = new_node + else: + counter = 0 + current_node = self.head + prev_node = None + while counter != index: + prev_node = current_node + current_node = current_node.next + counter += 1 + new_node.next = current_node + if prev_node is not None: + prev_node.next = new_node + if new_node.next is None: + self.tail = new_node + if index == 0: + self.head = new_node + + def extract(self, index): + if self.is_empty: + raise ValueError("The list is empty.") + + if index < 0: + index = self.size + index + + if index >= self.size: + raise IndexError('%d is out of range.'%(index)) + + self.size -= 1 + counter = 0 + current_node = self.head + prev_node = None + while counter != index: + prev_node = current_node + current_node = current_node.next + counter += 1 + if prev_node is not None: + prev_node.next = current_node.next + if index == 0: + self.head = current_node.next + if index == self.size: + self.tail = prev_node + return current_node + +class SinglyCircularLinkedList(SinglyLinkedList): + """ + Represents Singly Circular Linked List. + + Parameters + ========== + + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + + Examples + ======== + + >>> from pydatastructs import SinglyCircularLinkedList + >>> scll = SinglyCircularLinkedList() + >>> scll.append(6) + >>> scll[0].key + 6 + >>> scll.head.key + 6 + >>> scll.append(5) + >>> scll.appendleft(2) + >>> str(scll) + "['(2, None)', '(6, None)', '(5, None)']" + >>> scll[0].key = 7.2 + >>> scll.extract(1).key + 6 + >>> str(scll) + "['(7.2, None)', '(5, None)']" + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Linked_list#Circular_linked_list + + """ + + @classmethod + def methods(cls): + return ['insert_after', 'insert_at', 'extract'] + + def insert_after(self, prev_node, key, data=None): + super(SinglyCircularLinkedList, self).\ + insert_after(prev_node, key, data) + if prev_node.next.next == self.head: + self.tail = prev_node.next + + def insert_at(self, index, key, data=None): + super(SinglyCircularLinkedList, self).insert_at(index, key, data) + if self.size == 1: + self.head.next = self.head + new_node = self.__getitem__(index) + if index == 0: + self.tail.next = new_node + if new_node.next == self.head: + self.tail = new_node + + def extract(self, index): + node = super(SinglyCircularLinkedList, self).extract(index) + if self.tail is None: + self.head = None + elif index == 0: + self.tail.next = self.head + return node + +class DoublyCircularLinkedList(DoublyLinkedList): + """ + Represents Doubly Circular Linked List + + Parameters + ========== + + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import DoublyCircularLinkedList + >>> dcll = DoublyCircularLinkedList() + >>> dcll.append(6) + >>> dcll[0].key + 6 + >>> dcll.head.key + 6 + >>> dcll.append(5) + >>> dcll.appendleft(2) + >>> str(dcll) + "['(2, None)', '(6, None)', '(5, None)']" + >>> dcll[0].key = 7.2 + >>> dcll.extract(1).key + 6 + >>> str(dcll) + "['(7.2, None)', '(5, None)']" + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Doubly_linked_list#Circular_doubly_linked_lists + + """ + + @classmethod + def methods(cls): + return ['insert_after', 'insert_before', + 'insert_at', 'extract'] + + def insert_after(self, prev_node, key, data=None): + super(DoublyCircularLinkedList, self)\ + .insert_after(prev_node, key, data) + if prev_node.next.next == self.head: + self.tail = prev_node.next + + def insert_before(self, next_node, key, data=None): + super(DoublyCircularLinkedList, self).\ + insert_before(next_node, key, data) + if next_node == self.head: + self.head = next_node.prev + + def insert_at(self, index, key, data=None): + super(DoublyCircularLinkedList, self).\ + insert_at(index, key, data) + if self.size == 1: + self.head.next = self.head + self.head.prev = self.head + new_node = self.__getitem__(index) + if index == 0: + self.tail.next = new_node + new_node.prev = self.tail + if new_node.next == self.head: + self.tail = new_node + new_node.next = self.head + self.head.prev = new_node + + def extract(self, index): + node = super(DoublyCircularLinkedList, self).extract(index) + if self.tail is None: + self.head = None + elif index == 0: + self.tail.next = self.head + return node + +class SkipList(object): + """ + Represents Skip List + + Parameters + ========== + + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import SkipList + >>> sl = SkipList() + >>> sl.insert(6) + >>> sl.insert(1) + >>> sl.insert(3) + >>> node = sl.extract(1) + >>> str(node) + '(1, None)' + >>> sl.insert(4) + >>> sl.insert(2) + >>> sl.search(4) + True + >>> sl.search(10) + False + + """ + + __slots__ = ['head', 'tail', '_levels', '_num_nodes', 'seed'] + + def __new__(cls, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.head, obj.tail = None, None + obj._num_nodes = 0 + obj._levels = 0 + obj._add_level() + return obj + + @classmethod + def methods(cls): + return ['__new__', 'levels', 'search', + 'extract', '__str__', 'size'] + + def _add_level(self): + self.tail = SkipNode(math.inf, next=None, down=self.tail) + self.head = SkipNode(-math.inf, next=self.tail, down=self.head) + self._levels += 1 + + @property + def levels(self): + """ + Returns the number of levels in the + current skip list. + """ + return self._levels + + def _search(self, key) -> list: + path = [] + node = self.head + while node: + if node.next.key >= key: + path.append(node) + node = node.down + else: + node = node.next + return path + + def search(self, key) -> bool: + return self._search(key)[-1].next.key == key + + def insert(self, key, data=None): + """ + Inserts a new node to the skip list. + + Parameters + ========== + + key + Any valid identifier to uniquely + identify the node in the linked list. + + data + Any valid data to be stored in the node. + """ + path = self._search(key) + tip = path[-1] + below = SkipNode(key=key, data=data, next=tip.next) + tip.next = below + total_level = self._levels + level = 1 + while random.getrandbits(1) % 2 == 0 and level <= total_level: + if level == total_level: + self._add_level() + prev = self.head + else: + prev = path[total_level - 1 - level] + below = SkipNode(key=key, data=None, next=prev.next, down=below) + prev.next = below + level += 1 + self._num_nodes += 1 + + @property + def size(self): + return self._num_nodes + + def extract(self, key): + """ + Extracts the node with the given key in the skip list. + + Parameters + ========== + + key + The key of the node under consideration. + + Returns + ======= + + return_node: SkipNode + The node with given key. + """ + path = self._search(key) + tip = path[-1] + if tip.next.key != key: + raise KeyError('Node with key %s is not there in %s'%(key, self)) + return_node = SkipNode(tip.next.key, tip.next.data) + total_level = self._levels + level = total_level - 1 + while level >= 0 and path[level].next.key == key: + path[level].next = path[level].next.next + level -= 1 + walk = self.head + while walk is not None: + if walk.next is self.tail: + self._levels -= 1 + self.head = walk.down + self.tail = self.tail.down + walk = walk.down + else: + break + self._num_nodes -= 1 + if self._levels == 0: + self._add_level() + return return_node + + def __str__(self): + node2row = {} + node2col = {} + walk = self.head + curr_level = self._levels - 1 + while walk is not None: + curr_node = walk + col = 0 + while curr_node is not None: + if curr_node.key != math.inf and curr_node.key != -math.inf: + node2row[curr_node] = curr_level + if walk.down is None: + node2col[curr_node.key] = col + col += 1 + curr_node = curr_node.next + walk = walk.down + curr_level -= 1 + sl_mat = [[str(None) for _ in range(self._num_nodes)] for _ in range(self._levels)] + walk = self.head + while walk is not None: + curr_node = walk + while curr_node is not None: + if curr_node in node2row: + row = node2row[curr_node] + col = node2col[curr_node.key] + sl_mat[row][col] = str(curr_node) + curr_node = curr_node.next + walk = walk.down + sl_str = "" + for level_list in sl_mat[::-1]: + for node_str in level_list: + sl_str += node_str + " " + if len(sl_str) > 0: + sl_str += "\n" + return sl_str diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/__init__.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_algorithms.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_algorithms.py new file mode 100644 index 000000000..3e287bb74 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_algorithms.py @@ -0,0 +1,423 @@ +from pydatastructs import ( + merge_sort_parallel, DynamicOneDimensionalArray, + OneDimensionalArray, brick_sort, brick_sort_parallel, + heapsort, matrix_multiply_parallel, counting_sort, bucket_sort, + cocktail_shaker_sort, quick_sort, longest_common_subsequence, is_ordered, + upper_bound, lower_bound, longest_increasing_subsequence, next_permutation, + prev_permutation, bubble_sort, linear_search, binary_search, jump_search, + selection_sort, insertion_sort, intro_sort, shell_sort, radix_sort, Backend) + +from pydatastructs.utils.raises_util import raises +import random + +def _test_common_sort(sort, *args, **kwargs): + random.seed(1000) + + n = random.randint(10, 20) + arr = DynamicOneDimensionalArray(int, 0) + generated_ints = [] + for _ in range(n): + integer = random.randint(1, 1000) + generated_ints.append(integer) + arr.append(integer) + for _ in range(n//3): + integer = random.randint(0, n//2) + generated_ints.append(integer) + arr.delete(integer) + expected_arr_1 = [686, 779, 102, 134, 362, 448, + 480, 548, None, None, None, + 228, 688, 247, 373, 696, None, + None, None, None, None, None, + None, None, None, None, None, + None, None, None, None] + sort(arr, *args, **kwargs, start=2, end=10) + assert arr._data == expected_arr_1 + sort(arr, *args, **kwargs) + expected_arr_2 = [102, 134, 228, 247, 362, 373, 448, + 480, 548, 686, 688, 696, 779, + None, None, None, None, None, None, + None, None, None, None, None, + None, None, None, None, None, None, None] + assert arr._data == expected_arr_2 + assert (arr._last_pos_filled, arr._num, arr._size) == (12, 13, 31) + + arr = DynamicOneDimensionalArray(int, 0, backend=Backend.CPP) + int_idx = 0 + for _ in range(n): + arr.append(generated_ints[int_idx]) + int_idx += 1 + for _ in range(n//3): + arr.delete(generated_ints[int_idx]) + int_idx += 1 + sort(arr, *args, **kwargs, start=2, end=10) + for i in range(len(expected_arr_1)): + assert arr[i] == expected_arr_1[i] + sort(arr, *args, **kwargs) + for i in range(len(expected_arr_2)): + assert arr[i] == expected_arr_2[i] + assert (arr._last_pos_filled, arr._num, arr.size) == (12, 13, 31) + + n = random.randint(10, 20) + arr = OneDimensionalArray(int, n) + generated_ints.clear() + for i in range(n): + integer = random.randint(1, 1000) + arr[i] = integer + generated_ints.append(integer) + expected_arr_3 = [42, 695, 147, 500, 768, + 998, 473, 732, 728, 426, + 709, 910] + sort(arr, *args, **kwargs, start=2, end=5) + assert arr._data == expected_arr_3 + + arr = OneDimensionalArray(int, n, backend=Backend.CPP) + int_idx = 0 + for i in range(n): + arr[i] = generated_ints[int_idx] + int_idx += 1 + sort(arr, *args, **kwargs, start=2, end=5) + for i in range(len(expected_arr_3)): + assert arr[i] == expected_arr_3[i] + +def test_merge_sort_parallel(): + _test_common_sort(merge_sort_parallel, num_threads=5) + +def test_brick_sort(): + _test_common_sort(brick_sort) + +def test_brick_sort_parallel(): + _test_common_sort(brick_sort_parallel, num_threads=3) + +def test_heapsort(): + _test_common_sort(heapsort) + +def test_bucket_sort(): + _test_common_sort(bucket_sort) + +def test_counting_sort(): + random.seed(1000) + + n = random.randint(10, 20) + arr = DynamicOneDimensionalArray(int, 0) + for _ in range(n): + arr.append(random.randint(1, 1000)) + for _ in range(n//3): + arr.delete(random.randint(0, n//2)) + + expected_arr = [102, 134, 228, 247, 362, 373, 448, + 480, 548, 686, 688, 696, 779] + assert counting_sort(arr)._data == expected_arr + +def test_cocktail_shaker_sort(): + _test_common_sort(cocktail_shaker_sort) + +def test_quick_sort(): + _test_common_sort(quick_sort) + _test_common_sort(quick_sort, backend=Backend.CPP) + +def test_intro_sort(): + _test_common_sort(intro_sort) + +def test_bubble_sort(): + _test_common_sort(bubble_sort) + _test_common_sort(bubble_sort, backend=Backend.CPP) + _test_common_sort(bubble_sort, backend=Backend.LLVM) + +def test_selection_sort(): + _test_common_sort(selection_sort) + _test_common_sort(selection_sort, backend=Backend.CPP) + +def test_insertion_sort(): + _test_common_sort(insertion_sort) + _test_common_sort(insertion_sort, backend=Backend.CPP) + +def test_matrix_multiply_parallel(): + ODA = OneDimensionalArray + + expected_result = [[3, 3, 3], [1, 2, 1], [2, 2, 2]] + + I = ODA(ODA, [ODA(int, [1, 1, 0]), ODA(int, [0, 1, 0]), ODA(int, [0, 0, 1])]) + J = ODA(ODA, [ODA(int, [2, 1, 2]), ODA(int, [1, 2, 1]), ODA(int, [2, 2, 2])]) + output = matrix_multiply_parallel(I, J, num_threads=5) + assert expected_result == output + + I = [[1, 1, 0], [0, 1, 0], [0, 0, 1]] + J = [[2, 1, 2], [1, 2, 1], [2, 2, 2]] + output = matrix_multiply_parallel(I, J, num_threads=5) + assert expected_result == output + + I = [[1, 1, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]] + J = [[2, 1, 2], [1, 2, 1], [2, 2, 2]] + assert raises(ValueError, lambda: matrix_multiply_parallel(I, J, num_threads=5)) + + I = [[1, 1, 0], [0, 1, 0], [0, 0, 1]] + J = [[2, 1, 2], [1, 2, 1], [2, 2, 2]] + output = matrix_multiply_parallel(I, J, num_threads=1) + assert expected_result == output + +def test_longest_common_sequence(): + ODA = OneDimensionalArray + expected_result = "['A', 'S', 'C', 'I', 'I']" + + str1 = ODA(str, ['A', 'A', 'S', 'C', 'C', 'I', 'I']) + str2 = ODA(str, ['A', 'S', 'S', 'C', 'I', 'I', 'I', 'I']) + output = longest_common_subsequence(str1, str2) + assert str(output) == expected_result + + expected_result = "['O', 'V', 'A']" + + I = ODA(str, ['O', 'V', 'A', 'L']) + J = ODA(str, ['F', 'O', 'R', 'V', 'A', 'E', 'W']) + output = longest_common_subsequence(I, J) + assert str(output) == expected_result + + X = ODA(int, [1, 2, 3, 4, 5, 6, 6, 5, 4, 3, 2, 1]) + Y = ODA(int, [1, 2, 3, 4, 4, 3, 2, 1]) + output = longest_common_subsequence(X, Y) + assert str(output) == '[1, 2, 3, 4, 4, 3, 2, 1]' + + Z = ODA(int, []) + output = longest_common_subsequence(Y, Z) + assert str(output) == '[]' + +def test_is_ordered(): + def _test_inner_ordered(*args, **kwargs): + ODA = OneDimensionalArray + DODA = DynamicOneDimensionalArray + + expected_result = True + arr = ODA(int, [1, 2, 5, 6]) + output = is_ordered(arr, **kwargs) + assert output == expected_result + + expected_result = False + arr1 = ODA(int, [4, 3, 2, 1]) + output = is_ordered(arr1, **kwargs) + assert output == expected_result + + expected_result = True + arr2 = ODA(int, [6, 1, 2, 3, 4, 5]) + output = is_ordered(arr2, start=1, end=5, **kwargs) + assert output == expected_result + + expected_result = True + arr3 = ODA(int, [0, -1, -2, -3, -4, 4]) + output = is_ordered(arr3, start=1, end=4, + comp=lambda u, v: u > v, **kwargs) + assert output == expected_result + + expected_result = True + arr4 = DODA(int, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + arr4.delete(0) + output = is_ordered(arr4, **kwargs) + assert output == expected_result + + _test_inner_ordered() + _test_inner_ordered(backend=Backend.CPP) + + +def test_upper_bound(): + ODA = OneDimensionalArray + arr1 = ODA(int, [3, 3, 3]) + output = upper_bound(arr1, 3) + expected_result = 3 + assert expected_result == output + + arr2 = ODA(int, [4, 4, 5, 6]) + output = upper_bound(arr2, 4, end=3) + expected_result = 2 + assert expected_result == output + + arr3 = ODA(int, [6, 6, 7, 8, 9]) + output = upper_bound(arr3, 5, start=2, end=4) + expected_result = 2 + assert expected_result == output + + arr4 = ODA(int, [3, 4, 4, 6]) + output = upper_bound(arr4, 5, start=1, end=3) + expected_result = 3 + assert expected_result == output + + arr5 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) + output = upper_bound(arr5, 6, comp=lambda x, y: x > y) + expected_result = 5 + assert expected_result == output + + arr6 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) + output = upper_bound(arr6, 2, start=2, comp=lambda x, y: x > y) + expected_result = 8 + assert expected_result == output + + arr7 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) + output = upper_bound(arr7, 9, start=3, end=7, comp=lambda x, y: x > y) + expected_result = 3 + assert expected_result == output + + arr8 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) + output = upper_bound(arr8, 6, end=3, comp=lambda x, y: x > y) + expected_result = 3 + assert expected_result == output + + +def test_lower_bound(): + ODA = OneDimensionalArray + arr1 = ODA(int, [3, 3, 3]) + output = lower_bound(arr1, 3, start=1) + expected_result = 1 + assert expected_result == output + + arr2 = ODA(int, [4, 4, 4, 4, 5, 6]) + output = lower_bound(arr2, 5, end=3) + expected_result = 3 + assert expected_result == output + + arr3 = ODA(int, [6, 6, 7, 8, 9]) + output = lower_bound(arr3, 5, end=3) + expected_result = 0 + assert expected_result == output + + arr4 = ODA(int, [3, 4, 4, 4]) + output = lower_bound(arr4, 5) + expected_result = 4 + assert expected_result == output + + arr5 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) + output = lower_bound(arr5, 5, comp=lambda x, y: x > y) + expected_result = 5 + assert expected_result == output + + arr6 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) + output = lower_bound(arr6, 2, start=4, comp=lambda x, y: x > y) + expected_result = 8 + assert expected_result == output + + arr7 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) + output = lower_bound(arr7, 9, end=5, comp=lambda x, y: x > y) + expected_result = 0 + assert expected_result == output + + arr8 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) + output = lower_bound(arr8, 6, end=3, comp=lambda x, y: x > y) + expected_result = 1 + assert expected_result == output + +def test_longest_increasing_subsequence(): + ODA = OneDimensionalArray + + arr1 = ODA(int, [2, 5, 3, 7, 11, 8, 10, 13, 6]) + output = longest_increasing_subsequence(arr1) + expected_result = [2, 3, 7, 8, 10, 13] + assert str(expected_result) == str(output) + + arr2 = ODA(int, [3, 4, -1, 5, 8, 2, 2, 2, 3, 12, 7, 9, 10]) + output = longest_increasing_subsequence(arr2) + expected_result = [-1, 2, 3, 7, 9, 10] + assert str(expected_result) == str(output) + + arr3 = ODA(int, [6, 6, 6, 19, 9]) + output = longest_increasing_subsequence(arr3) + expected_result = [6, 9] + assert str(expected_result) == str(output) + + arr4 = ODA(int, [5, 4, 4, 3, 3, 6, 6, 8]) + output = longest_increasing_subsequence(arr4) + expected_result = [3, 6, 8] + assert str(expected_result) == str(output) + + arr5 = ODA(int, [7, 6, 6, 6, 5, 4, 3]) + output = longest_increasing_subsequence(arr5) + expected_result = [3] + assert str(expected_result) == str(output) + +def _test_permutation_common(array, expected_perms, func): + num_perms = len(expected_perms) + + output = [] + for _ in range(num_perms): + signal, array = func(array) + output.append(array) + if not signal: + break + + assert len(output) == len(expected_perms) + for perm1, perm2 in zip(output, expected_perms): + assert str(perm1) == str(perm2) + +def test_next_permutation(): + ODA = OneDimensionalArray + + array = ODA(int, [1, 2, 3]) + expected_perms = [[1, 3, 2], [2, 1, 3], + [2, 3, 1], [3, 1, 2], + [3, 2, 1], [1, 2, 3]] + _test_permutation_common(array, expected_perms, next_permutation) + +def test_prev_permutation(): + ODA = OneDimensionalArray + + array = ODA(int, [3, 2, 1]) + expected_perms = [[3, 1, 2], [2, 3, 1], + [2, 1, 3], [1, 3, 2], + [1, 2, 3], [3, 2, 1]] + _test_permutation_common(array, expected_perms, prev_permutation) + +def test_next_prev_permutation(): + ODA = OneDimensionalArray + random.seed(1000) + + for i in range(100): + data = set(random.sample(range(1, 10000), 10)) + array = ODA(int, list(data)) + + _, next_array = next_permutation(array) + _, orig_array = prev_permutation(next_array) + assert str(orig_array) == str(array) + + _, prev_array = prev_permutation(array) + _, orig_array = next_permutation(prev_array) + assert str(orig_array) == str(array) + +def _test_common_search(search_func, sort_array=True, **kwargs): + ODA = OneDimensionalArray + + array = ODA(int, [1, 2, 5, 7, 10, 29, 40]) + for i in range(len(array)): + assert i == search_func(array, array[i], **kwargs) + + checker_array = [None, None, 2, 3, 4, 5, None] + for i in range(len(array)): + assert checker_array[i] == search_func(array, array[i], start=2, end=5, **kwargs) + + random.seed(1000) + + for i in range(25): + data = list(set(random.sample(range(1, 10000), 100))) + + if sort_array: + data.sort() + + array = ODA(int, list(data)) + + for i in range(len(array)): + assert search_func(array, array[i], **kwargs) == i + + for _ in range(50): + assert search_func(array, random.randint(10001, 50000), **kwargs) is None + +def test_linear_search(): + _test_common_search(linear_search, sort_array=False) + _test_common_search(linear_search, sort_array=False, backend=Backend.CPP) + +def test_binary_search(): + _test_common_search(binary_search) + _test_common_search(binary_search, backend=Backend.CPP) + +def test_jump_search(): + _test_common_search(jump_search) + _test_common_search(jump_search, backend=Backend.CPP) + +def test_shell_sort(): + _test_common_sort(shell_sort) + +def test_radix_sort(): + _test_common_sort(radix_sort) diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_arrays.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_arrays.py new file mode 100644 index 000000000..886510113 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_arrays.py @@ -0,0 +1,157 @@ +from pydatastructs.linear_data_structures import ( + OneDimensionalArray, DynamicOneDimensionalArray, + MultiDimensionalArray, ArrayForTrees) +from pydatastructs.utils.misc_util import Backend +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils import TreeNode +from pydatastructs.utils._backend.cpp import _nodes + +def test_OneDimensionalArray(): + ODA = OneDimensionalArray + A = ODA(int, 5, [1.0, 2, 3, 4, 5], init=6) + A[1] = 2.0 + assert str(A) == '[1, 2, 3, 4, 5]' + assert A + assert ODA(int, [1.0, 2, 3, 4, 5], 5) + assert ODA(int, 5) + assert ODA(int, [1.0, 2, 3]) + assert raises(IndexError, lambda: A[7]) + assert raises(IndexError, lambda: A[-1]) + assert raises(ValueError, lambda: ODA()) + assert raises(ValueError, lambda: ODA(int, 1, 2, 3)) + assert raises(TypeError, lambda: ODA(int, 5.0, set([1, 2, 3]))) + assert raises(TypeError, lambda: ODA(int, 5.0)) + assert raises(TypeError, lambda: ODA(int, set([1, 2, 3]))) + assert raises(ValueError, lambda: ODA(int, 3, [1])) + + A = ODA(int, 5, [1, 2, 3, 4, 5], init=6, backend=Backend.CPP) + A[1] = 2 + assert str(A) == "['1', '2', '3', '4', '5']" + assert A + assert ODA(int, [1, 2, 3, 4, 5], 5, backend=Backend.CPP) + assert ODA(int, 5, backend=Backend.CPP) + assert ODA(int, [1, 2, 3], backend=Backend.CPP) + assert raises(TypeError, lambda: ODA(int, [1.0, 2, 3, 4, 5], 5, backend=Backend.CPP)) + assert raises(TypeError, lambda: ODA(int, [1.0, 2, 3], backend=Backend.CPP)) + assert raises(IndexError, lambda: A[7]) + assert raises(IndexError, lambda: A[-1]) + assert raises(ValueError, lambda: ODA(backend=Backend.CPP)) + assert raises(ValueError, lambda: ODA(int, 1, 2, 3, backend=Backend.CPP)) + assert raises(TypeError, lambda: ODA(int, 5.0, set([1, 2, 3]), backend=Backend.CPP)) + assert raises(TypeError, lambda: ODA(int, 5.0, backend=Backend.CPP)) + assert raises(TypeError, lambda: ODA(int, set([1, 2, 3]), backend=Backend.CPP)) + assert raises(ValueError, lambda: ODA(int, 3, [1], backend=Backend.CPP)) + assert raises(ValueError, lambda: ODA(int, 3, [1], backend=Backend.CPP)) + assert raises(TypeError, lambda: A.fill(2.0)) + + +def test_MultiDimensionalArray(): + assert raises(ValueError, lambda: MultiDimensionalArray(int, 2, -1, 3)) + assert MultiDimensionalArray(int, 10).shape == (10,) + array = MultiDimensionalArray(int, 5, 9, 3, 8) + assert array.shape == (5, 9, 3, 8) + array.fill(5) + array[1, 3, 2, 5] = 2.0 + assert array + assert array[1, 3, 2, 5] == 2.0 + assert array[1, 3, 0, 5] == 5 + assert array[1, 2, 2, 5] == 5 + assert array[2, 3, 2, 5] == 5 + assert raises(IndexError, lambda: array[5]) + assert raises(IndexError, lambda: array[4, 10]) + assert raises(IndexError, lambda: array[-1]) + assert raises(IndexError, lambda: array[2, 3, 2, 8]) + assert raises(ValueError, lambda: MultiDimensionalArray()) + assert raises(ValueError, lambda: MultiDimensionalArray(int)) + assert raises(TypeError, lambda: MultiDimensionalArray(int, 5, 6, "")) + array = MultiDimensionalArray(int, 3, 2, 2) + array.fill(1) + array[0, 0, 0] = 0 + array[0, 0, 1] = 0 + array[1, 0, 0] = 0 + array[2, 1, 1] = 0 + assert str(array) == '[0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0]' + array = MultiDimensionalArray(int, 4) + assert array.shape == (4,) + array.fill(5) + array[3] = 3 + assert array[3] == 3 + +def test_DynamicOneDimensionalArray(): + DODA = DynamicOneDimensionalArray + A = DODA(int, 0) + A.append(1) + A.append(2) + A.append(3) + A.append(4) + assert str(A) == "['1', '2', '3', '4']" + A.delete(0) + A.delete(0) + A.delete(15) + A.delete(-1) + A.delete(1) + A.delete(2) + assert A._data == [4, None, None] + assert str(A) == "['4']" + assert A.size == 3 + A.fill(4) + assert A._data == [4, 4, 4] + b = DynamicOneDimensionalArray(int, 0) + b.append(1) + b.append(2) + b.append(3) + b.append(4) + b.append(5) + assert b._data == [1, 2, 3, 4, 5, None, None] + assert list(reversed(b)) == [5, 4, 3, 2, 1] + + A = DODA(int, 0, backend=Backend.CPP) + A.append(1) + A.append(2) + A.append(3) + A.append(4) + assert str(A) == "['1', '2', '3', '4']" + A.delete(0) + A.delete(0) + A.delete(15) + A.delete(-1) + A.delete(1) + A.delete(2) + assert [A[i] for i in range(A.size)] == [4, None, None] + assert A.size == 3 + A.fill(4) + assert [A[0], A[1], A[2]] == [4, 4, 4] + b = DODA(int, 0, backend=Backend.CPP) + b.append(1) + b.append(2) + b.append(3) + b.append(4) + b.append(5) + assert [b[i] for i in range(b.size)] == [1, 2, 3, 4, 5, None, None] + +def test_DynamicOneDimensionalArray2(): + DODA = DynamicOneDimensionalArray + root = TreeNode(1, 100) + A = DODA(TreeNode, [root]) + assert str(A[0]) == "(None, 1, 100, None)" + +def _test_ArrayForTrees(backend): + AFT = ArrayForTrees + root = TreeNode(1, 100,backend=backend) + if backend==Backend.PYTHON: + A = AFT(TreeNode, [root], backend=backend) + B = AFT(TreeNode, 0, backend=backend) + else: + A = AFT(_nodes.TreeNode, [root], backend=backend) + B = AFT(_nodes.TreeNode, 0, backend=backend) + assert str(A) == "['(None, 1, 100, None)']" + node = TreeNode(2, 200, backend=backend) + A.append(node) + assert str(A) == "['(None, 1, 100, None)', '(None, 2, 200, None)']" + assert str(B) == "[]" + +def test_ArrayForTrees(): + _test_ArrayForTrees(Backend.PYTHON) + +def test_cpp_ArrayForTrees(): + _test_ArrayForTrees(Backend.CPP) diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_linked_lists.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_linked_lists.py new file mode 100644 index 000000000..b7f172ddc --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_linked_lists.py @@ -0,0 +1,193 @@ +from pydatastructs.linear_data_structures import DoublyLinkedList, SinglyLinkedList, SinglyCircularLinkedList, DoublyCircularLinkedList, SkipList +from pydatastructs.utils.raises_util import raises +import copy, random + +def test_DoublyLinkedList(): + random.seed(1000) + dll = DoublyLinkedList() + assert raises(IndexError, lambda: dll[2]) + dll.appendleft(5) + dll.append(1) + dll.appendleft(2) + dll.append(3) + dll.insert_after(dll[-1], 4) + dll.insert_after(dll[2], 6) + dll.insert_before(dll[4], 1.1) + dll.insert_before(dll[0], 7) + dll.insert_at(0, 2) + dll.insert_at(-1, 9) + dll.extract(2) + assert dll.popleft().key == 2 + assert dll.popright().key == 4 + assert dll.search(3) == dll[-2] + assert dll.search(-1) is None + dll[-2].key = 0 + assert str(dll) == ("['(7, None)', '(5, None)', '(1, None)', " + "'(6, None)', '(1.1, None)', '(0, None)', " + "'(9, None)']") + assert len(dll) == 7 + assert raises(IndexError, lambda: dll.insert_at(8, None)) + assert raises(IndexError, lambda: dll.extract(20)) + dll_copy = DoublyCircularLinkedList() + for i in range(dll.size): + dll_copy.append(dll[i]) + for i in range(len(dll)): + if i%2 == 0: + dll.popleft() + else: + dll.popright() + assert str(dll) == "[]" + for _ in range(len(dll_copy)): + index = random.randint(0, len(dll_copy) - 1) + dll_copy.extract(index) + assert str(dll_copy) == "[]" + assert raises(ValueError, lambda: dll_copy.extract(1)) + +def test_SinglyLinkedList(): + random.seed(1000) + sll = SinglyLinkedList() + assert raises(IndexError, lambda: sll[2]) + sll.appendleft(5) + sll.append(1) + sll.appendleft(2) + sll.append(3) + sll.insert_after(sll[1], 4) + sll.insert_after(sll[-1], 6) + sll.insert_at(0, 2) + sll.insert_at(-1, 9) + sll.extract(2) + assert sll.popleft().key == 2 + assert sll.popright().key == 6 + sll[-2].key = 0 + assert str(sll) == ("['(2, None)', '(4, None)', '(1, None)', " + "'(0, None)', '(9, None)']") + assert len(sll) == 5 + assert raises(IndexError, lambda: sll.insert_at(6, None)) + assert raises(IndexError, lambda: sll.extract(20)) + sll_copy = DoublyCircularLinkedList() + for i in range(sll.size): + sll_copy.append(sll[i]) + for i in range(len(sll)): + if i%2 == 0: + sll.popleft() + else: + sll.popright() + assert str(sll) == "[]" + for _ in range(len(sll_copy)): + index = random.randint(0, len(sll_copy) - 1) + sll_copy.extract(index) + assert str(sll_copy) == "[]" + assert raises(ValueError, lambda: sll_copy.extract(1)) + +def test_SinglyCircularLinkedList(): + random.seed(1000) + scll = SinglyCircularLinkedList() + assert raises(IndexError, lambda: scll[2]) + scll.appendleft(5) + scll.append(1) + scll.appendleft(2) + scll.append(3) + scll.insert_after(scll[1], 4) + scll.insert_after(scll[-1], 6) + scll.insert_at(0, 2) + scll.insert_at(-1, 9) + scll.extract(2) + assert scll.popleft().key == 2 + assert scll.popright().key == 6 + assert scll.search(-1) is None + scll[-2].key = 0 + assert str(scll) == ("['(2, None)', '(4, None)', '(1, None)', " + "'(0, None)', '(9, None)']") + assert len(scll) == 5 + assert raises(IndexError, lambda: scll.insert_at(6, None)) + assert raises(IndexError, lambda: scll.extract(20)) + scll_copy = DoublyCircularLinkedList() + for i in range(scll.size): + scll_copy.append(scll[i]) + for i in range(len(scll)): + if i%2 == 0: + scll.popleft() + else: + scll.popright() + assert str(scll) == "[]" + for _ in range(len(scll_copy)): + index = random.randint(0, len(scll_copy) - 1) + scll_copy.extract(index) + assert str(scll_copy) == "[]" + assert raises(ValueError, lambda: scll_copy.extract(1)) + +def test_DoublyCircularLinkedList(): + random.seed(1000) + dcll = DoublyCircularLinkedList() + assert raises(IndexError, lambda: dcll[2]) + dcll.appendleft(5) + dcll.append(1) + dcll.appendleft(2) + dcll.append(3) + dcll.insert_after(dcll[-1], 4) + dcll.insert_after(dcll[2], 6) + dcll.insert_before(dcll[4], 1) + dcll.insert_before(dcll[0], 7) + dcll.insert_at(0, 2) + dcll.insert_at(-1, 9) + dcll.extract(2) + assert dcll.popleft().key == 2 + assert dcll.popright().key == 4 + dcll[-2].key = 0 + assert str(dcll) == ("['(7, None)', '(5, None)', '(1, None)', " + "'(6, None)', '(1, None)', '(0, None)', " + "'(9, None)']") + assert len(dcll) == 7 + assert raises(IndexError, lambda: dcll.insert_at(8, None)) + assert raises(IndexError, lambda: dcll.extract(20)) + dcll_copy = DoublyCircularLinkedList() + for i in range(dcll.size): + dcll_copy.append(dcll[i]) + for i in range(len(dcll)): + if i%2 == 0: + dcll.popleft() + else: + dcll.popright() + assert str(dcll) == "[]" + for _ in range(len(dcll_copy)): + index = random.randint(0, len(dcll_copy) - 1) + dcll_copy.extract(index) + assert str(dcll_copy) == "[]" + assert raises(ValueError, lambda: dcll_copy.extract(1)) + +def test_SkipList(): + random.seed(0) + sl = SkipList() + sl.insert(2) + sl.insert(10) + sl.insert(92) + sl.insert(1) + sl.insert(4) + sl.insert(27) + sl.extract(10) + assert str(sl) == ("(1, None) None None None None \n" + "(1, None) None None None None \n" + "(1, None) (2, None) (4, None) (27, None) (92, None) \n") + assert raises(KeyError, lambda: sl.extract(15)) + assert sl.search(1) is True + assert sl.search(47) is False + + sl = SkipList() + + for a in range(0, 20, 2): + sl.insert(a) + assert sl.search(16) is True + for a in range(4, 20, 4): + sl.extract(a) + assert sl.search(10) is True + for a in range(4, 20, 4): + sl.insert(a) + for a in range(0, 20, 2): + sl.extract(a) + assert sl.search(3) is False + + li = SkipList() + li.insert(1) + li.insert(2) + assert li.levels == 1 + assert li.size == 2 diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/__init__.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/__init__.py new file mode 100644 index 000000000..6ed099769 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/__init__.py @@ -0,0 +1,51 @@ +__all__ = [] + +from . import ( + stack, + binomial_trees, + queue, + disjoint_set, + sparse_table, +) + +from .binomial_trees import ( + BinomialTree +) +__all__.extend(binomial_trees.__all__) + +from .stack import ( + Stack, +) +__all__.extend(stack.__all__) + +from .queue import ( + Queue, + PriorityQueue +) +__all__.extend(queue.__all__) + +from .disjoint_set import ( + DisjointSetForest, +) +__all__.extend(disjoint_set.__all__) + +from .sparse_table import ( + SparseTable, +) +__all__.extend(sparse_table.__all__) + +from .segment_tree import ( + ArraySegmentTree, +) +__all__.extend(segment_tree.__all__) + +from .algorithms import ( + RangeQueryStatic, + RangeQueryDynamic +) +__all__.extend(algorithms.__all__) + +from .multiset import ( + Multiset +) +__all__.extend(multiset.__all__) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/_backend/__init__.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/_backend/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/algorithms.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/algorithms.py new file mode 100644 index 000000000..3c2f86516 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/algorithms.py @@ -0,0 +1,335 @@ +from pydatastructs.miscellaneous_data_structures.sparse_table import SparseTable +from pydatastructs.miscellaneous_data_structures.segment_tree import ArraySegmentTree +from pydatastructs.utils.misc_util import ( + _check_range_query_inputs, Backend, + raise_if_backend_is_not_python) + +__all__ = [ + 'RangeQueryStatic', + 'RangeQueryDynamic' +] + + +class RangeQueryStatic: + """ + Produces results for range queries of different kinds + by using specified data structure. + + Parameters + ========== + + array: OneDimensionalArray + The array for which we need to answer queries. + All the elements should be of type `int`. + func: callable + The function to be used for generating results + of a query. It should accept only one tuple as an + argument. The size of the tuple will be either 1 or 2 + and any one of the elements can be `None`. You can treat + `None` in whatever way you want according to the query + you are performing. For example, in case of range minimum + queries, `None` can be treated as infinity. We provide + the following which can be used as an argument value for this + parameter, + + `minimum` - For range minimum queries. + + `greatest_common_divisor` - For queries finding greatest + common divisor of a range. + + `summation` - For range sum queries. + data_structure: str + The data structure to be used for performing + range queries. + Currently the following data structures are supported, + + 'array' -> Array data structure. + Each query takes O(end - start) time asymptotically. + + 'sparse_table' -> Sparse table data structure. + Each query takes O(log(end - start)) time + asymptotically. + + By default, 'sparse_table'. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, RangeQueryStatic + >>> from pydatastructs import minimum + >>> arr = OneDimensionalArray(int, [4, 6, 1, 5, 7, 3]) + >>> RMQ = RangeQueryStatic(arr, minimum) + >>> RMQ.query(3, 4) + 5 + >>> RMQ.query(0, 4) + 1 + >>> RMQ.query(0, 2) + 1 + + Note + ==== + + The array once passed as an input should not be modified + once the `RangeQueryStatic` constructor is called. If you + have updated the array, then you need to create a new + `RangeQueryStatic` object with this updated array. + """ + + def __new__(cls, array, func, data_structure='sparse_table', **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + if len(array) == 0: + raise ValueError("Input %s array is empty."%(array)) + + if data_structure == 'array': + return RangeQueryStaticArray(array, func) + elif data_structure == 'sparse_table': + return RangeQueryStaticSparseTable(array, func) + else: + raise NotImplementedError( + "Currently %s data structure for range " + "query without updates isn't implemented yet." + % (data_structure)) + + @classmethod + def methods(cls): + return ['query'] + + def query(start, end): + """ + Method to perform a query in [start, end) range. + + Parameters + ========== + + start: int + The starting index of the range. + end: int + The ending index of the range. + """ + raise NotImplementedError( + "This is an abstract method.") + + +class RangeQueryStaticSparseTable(RangeQueryStatic): + + __slots__ = ["sparse_table", "bounds"] + + def __new__(cls, array, func, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + sparse_table = SparseTable(array, func) + obj.bounds = (0, len(array)) + obj.sparse_table = sparse_table + return obj + + @classmethod + def methods(cls): + return ['query'] + + def query(self, start, end): + _check_range_query_inputs((start, end + 1), self.bounds) + return self.sparse_table.query(start, end) + + +class RangeQueryStaticArray(RangeQueryStatic): + + __slots__ = ["array", "func"] + + def __new__(cls, array, func): + obj = object.__new__(cls) + obj.array = array + obj.func = func + return obj + + @classmethod + def methods(cls): + return ['query'] + + def query(self, start, end): + _check_range_query_inputs((start, end + 1), (0, len(self.array))) + + rsize = end - start + 1 + + if rsize == 1: + return self.func((self.array[start],)) + + query_ans = self.func((self.array[start], self.array[start + 1])) + for i in range(start + 2, end + 1): + query_ans = self.func((query_ans, self.array[i])) + return query_ans + +class RangeQueryDynamic: + """ + Produces results for range queries of different kinds + while allowing point updates by using specified + data structure. + + Parameters + ========== + + array: OneDimensionalArray + The array for which we need to answer queries. + All the elements should be of type `int`. + func: callable + The function to be used for generating results + of a query. It should accept only one tuple as an + argument. The size of the tuple will be either 1 or 2 + and any one of the elements can be `None`. You can treat + `None` in whatever way you want according to the query + you are performing. For example, in case of range minimum + queries, `None` can be treated as infinity. We provide + the following which can be used as an argument value for this + parameter, + + `minimum` - For range minimum queries. + + `greatest_common_divisor` - For queries finding greatest + common divisor of a range. + + `summation` - For range sum queries. + data_structure: str + The data structure to be used for performing + range queries. + Currently the following data structures are supported, + + 'array' -> Array data structure. + Each query takes O(end - start) time asymptotically. + Each point update takes O(1) time asymptotically. + + 'segment_tree' -> Segment tree data structure. + Each query takes O(log(end - start)) time + asymptotically. + Each point update takes O(log(len(array))) time + asymptotically. + + By default, 'segment_tree'. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, RangeQueryDynamic + >>> from pydatastructs import minimum + >>> arr = OneDimensionalArray(int, [4, 6, 1, 5, 7, 3]) + >>> RMQ = RangeQueryDynamic(arr, minimum) + >>> RMQ.query(3, 4) + 5 + >>> RMQ.query(0, 4) + 1 + >>> RMQ.query(0, 2) + 1 + >>> RMQ.update(2, 0) + >>> RMQ.query(0, 2) + 0 + + Note + ==== + + The array once passed as an input should be modified + only with `RangeQueryDynamic.update` method. + """ + + def __new__(cls, array, func, data_structure='segment_tree', **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + + if len(array) == 0: + raise ValueError("Input %s array is empty."%(array)) + + if data_structure == 'array': + return RangeQueryDynamicArray(array, func, **kwargs) + elif data_structure == 'segment_tree': + return RangeQueryDynamicSegmentTree(array, func, **kwargs) + else: + raise NotImplementedError( + "Currently %s data structure for range " + "query with point updates isn't implemented yet." + % (data_structure)) + + @classmethod + def methods(cls): + return ['query', 'update'] + + def query(start, end): + """ + Method to perform a query in [start, end) range. + + Parameters + ========== + + start: int + The starting index of the range. + end: int + The ending index of the range. + """ + raise NotImplementedError( + "This is an abstract method.") + + def update(self, index, value): + """ + Method to update index with a new value. + + Parameters + ========== + + index: int + The index to be update. + value: int + The new value. + """ + raise NotImplementedError( + "This is an abstract method.") + +class RangeQueryDynamicArray(RangeQueryDynamic): + + __slots__ = ["range_query_static"] + + def __new__(cls, array, func, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.range_query_static = RangeQueryStaticArray(array, func) + return obj + + @classmethod + def methods(cls): + return ['query', 'update'] + + def query(self, start, end): + return self.range_query_static.query(start, end) + + def update(self, index, value): + self.range_query_static.array[index] = value + +class RangeQueryDynamicSegmentTree(RangeQueryDynamic): + + __slots__ = ["segment_tree", "bounds"] + + def __new__(cls, array, func, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.pop('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.segment_tree = ArraySegmentTree(array, func, dimensions=1) + obj.segment_tree.build() + obj.bounds = (0, len(array)) + return obj + + @classmethod + def methods(cls): + return ['query', 'update'] + + def query(self, start, end): + _check_range_query_inputs((start, end + 1), self.bounds) + return self.segment_tree.query(start, end) + + def update(self, index, value): + self.segment_tree.update(index, value) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/binomial_trees.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/binomial_trees.py new file mode 100644 index 000000000..9ea91d828 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/binomial_trees.py @@ -0,0 +1,91 @@ +from pydatastructs.utils.misc_util import ( + BinomialTreeNode, _check_type, Backend, + raise_if_backend_is_not_python) + +__all__ = [ + 'BinomialTree' +] + +class BinomialTree(object): + """ + Represents binomial trees + + Parameters + ========== + + root: BinomialTreeNode + The root of the binomial tree. + By default, None + order: int + The order of the binomial tree. + By default, None + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import BinomialTree, BinomialTreeNode + >>> root = BinomialTreeNode(1, 1) + >>> tree = BinomialTree(root, 0) + >>> tree.is_empty + False + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Binomial_heap + """ + __slots__ = ['root', 'order'] + + def __new__(cls, root=None, order=None, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + if root is not None and \ + not _check_type(root, BinomialTreeNode): + raise TypeError("%s i.e., root should be of " + "type BinomialTreeNode."%(root)) + if order is not None and not _check_type(order, int): + raise TypeError("%s i.e., order should be of " + "type int."%(order)) + obj = object.__new__(cls) + if root is not None: + root.is_root = True + obj.root = root + obj.order = order + return obj + + @classmethod + def methods(cls): + return ['add_sub_tree', '__new__', 'is_empty'] + + def add_sub_tree(self, other_tree): + """ + Adds a sub tree to current tree. + + Parameters + ========== + + other_tree: BinomialTree + + Raises + ====== + + ValueError: If order of the two trees + are different. + """ + if not _check_type(other_tree, BinomialTree): + raise TypeError("%s i.e., other_tree should be of " + "type BinomialTree"%(other_tree)) + if self.order != other_tree.order: + raise ValueError("Orders of both the trees should be same.") + self.root.children.append(other_tree.root) + other_tree.root.parent = self.root + other_tree.root.is_root = False + self.order += 1 + + @property + def is_empty(self): + return self.root is None diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/disjoint_set.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/disjoint_set.py new file mode 100644 index 000000000..9a5caef5b --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/disjoint_set.py @@ -0,0 +1,143 @@ +from pydatastructs.utils import Set +from pydatastructs.utils.misc_util import ( + Backend, raise_if_backend_is_not_python) + +__all__ = ['DisjointSetForest'] + +class DisjointSetForest(object): + """ + Represents a forest of disjoint set trees. + + Parameters + ========== + + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import DisjointSetForest + >>> dst = DisjointSetForest() + >>> dst.make_set(1) + >>> dst.make_set(2) + >>> dst.union(1, 2) + >>> dst.find_root(2).key + 1 + >>> dst.make_root(2) + >>> dst.find_root(2).key + 2 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Disjoint-set_data_structure + """ + + __slots__ = ['tree'] + + def __new__(cls, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.tree = dict() + return obj + + @classmethod + def methods(cls): + return ['make_set', '__new__', 'find_root', 'union'] + + def make_set(self, key, data=None): + """ + Adds a singleton set to the tree + of disjoint sets with given key + and optionally data. + """ + if self.tree.get(key, None) is None: + new_set = Set(key, data) + self.tree[key] = new_set + new_set.parent = new_set + new_set.size = 1 + + def find_root(self, key): + """ + Finds the root of the set + with the given key by path + splitting algorithm. + """ + if self.tree.get(key, None) is None: + raise KeyError("Invalid key, %s"%(key)) + _set = self.tree[key] + while _set.parent is not _set: + _set, _set.parent = _set.parent, _set.parent.parent + return _set + + def union(self, key1, key2): + """ + Takes the union of the two + disjoint set trees with given + keys. The union is done by size. + """ + x_root = self.find_root(key1) + y_root = self.find_root(key2) + + if x_root is not y_root: + if x_root.size < y_root.size: + x_root, y_root = y_root, x_root + + y_root.parent = x_root + x_root.size += y_root.size + + def make_root(self, key): + """ + Finds the set to which the key belongs + and makes it as the root of the set. + """ + if self.tree.get(key, None) is None: + raise KeyError("Invalid key, %s"%(key)) + + key_set = self.tree[key] + if key_set.parent is not key_set: + current_parent = key_set.parent + # Remove this key subtree size from all its ancestors + while current_parent.parent is not current_parent: + current_parent.size -= key_set.size + current_parent = current_parent.parent + + all_set_size = current_parent.size # This is the root node + current_parent.size -= key_set.size + + # Make parent of current root as key + current_parent.parent = key_set + # size of new root will be same as previous root's size + key_set.size = all_set_size + # Make parent of key as itself + key_set.parent = key_set + + def find_size(self, key): + """ + Finds the size of set to which the key belongs. + """ + if self.tree.get(key, None) is None: + raise KeyError("Invalid key, %s"%(key)) + + return self.find_root(key).size + + def disjoint_sets(self): + """ + Returns a list of disjoint sets in the data structure. + """ + result = dict() + for key in self.tree.keys(): + parent = self.find_root(key).key + members = result.get(parent, []) + members.append(key) + result[parent] = members + sorted_groups = [] + for v in result.values(): + sorted_groups.append(v) + sorted_groups[-1].sort() + sorted_groups.sort() + return sorted_groups diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/multiset.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/multiset.py new file mode 100644 index 000000000..397978224 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/multiset.py @@ -0,0 +1,42 @@ +__all__ = [ + 'Multiset' +] + + +class Multiset: + def __init__(self, *args): + # TODO: Implement dict in pydatastructs + self.counter = dict() + from pydatastructs.trees import RedBlackTree + self.tree = RedBlackTree() + self._n = 0 + for arg in args: + self.add(arg) + + def add(self, element): + self.counter[element] = self.counter.get(element, 0) + 1 + self._n += 1 + if self.counter[element] == 1: + self.tree.insert(element) + + def remove(self, element): + if self.counter[element] == 1: + self.tree.delete(element) + if self.counter.get(element, 0) > 0: + self._n -= 1 + self.counter[element] -= 1 + + def lower_bound(self, element): + return self.tree.lower_bound(element) + + def upper_bound(self, element): + return self.tree.upper_bound(element) + + def __contains__(self, element): + return self.counter.get(element, 0) > 0 + + def __len__(self): + return self._n + + def count(self, element): + return self.counter.get(element, 0) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/queue.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/queue.py new file mode 100644 index 000000000..033ef9af3 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/queue.py @@ -0,0 +1,498 @@ +from pydatastructs.linear_data_structures import DynamicOneDimensionalArray, SinglyLinkedList +from pydatastructs.utils.misc_util import ( + NoneType, Backend, raise_if_backend_is_not_python) +from pydatastructs.trees.heaps import BinaryHeap, BinomialHeap +from copy import deepcopy as dc + +__all__ = [ + 'Queue', + 'PriorityQueue' +] + +class Queue(object): + """Representation of queue data structure. + + Parameters + ========== + + implementation : str + Implementation to be used for queue. + By default, 'array' + items : list/tuple + Optional, by default, None + The inital items in the queue. + dtype : A valid python type + Optional, by default NoneType if item + is None. + Required only for 'array' implementation. + double_ended : bool + Optional, by default, False. + Set to True if the queue should support + additional, appendleft and pop operations + from left and right sides respectively. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import Queue + >>> q = Queue() + >>> q.append(1) + >>> q.append(2) + >>> q.append(3) + >>> q.popleft() + 1 + >>> len(q) + 2 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Queue_(abstract_data_type) + """ + + def __new__(cls, implementation='array', **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + if implementation == 'array': + return ArrayQueue( + kwargs.get('items', None), + kwargs.get('dtype', int), + kwargs.get('double_ended', False)) + elif implementation == 'linked_list': + return LinkedListQueue( + kwargs.get('items', None), + kwargs.get('double_ended', False) + ) + else: + raise NotImplementedError( + "%s hasn't been implemented yet."%(implementation)) + + @classmethod + def methods(cls): + return ['__new__'] + + def _double_ended_check(self): + if not self._double_ended: + raise NotImplementedError( + "This method is only supported for " + "double ended queues.") + + def append(self, *args, **kwargs): + raise NotImplementedError( + "This is an abstract method.") + + def appendleft(self, *args, **kwargs): + raise NotImplementedError( + "This is an abstract method.") + + def pop(self, *args, **kwargs): + raise NotImplementedError( + "This is an abstract method.") + + def popleft(self, *args, **kwargs): + raise NotImplementedError( + "This is an abstract method.") + + @property + def is_empty(self): + raise NotImplementedError( + "This is an abstract method.") + + +class ArrayQueue(Queue): + + __slots__ = ['_front', '_rear', '_double_ended'] + + def __new__(cls, items=None, dtype=NoneType, double_ended=False, + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + if items is None: + items = DynamicOneDimensionalArray(dtype, 0) + else: + dtype = type(items[0]) + items = DynamicOneDimensionalArray(dtype, items) + obj = object.__new__(cls) + obj.items, obj._front = items, -1 + if items.size == 0: + obj._front = -1 + obj._rear = -1 + else: + obj._front = 0 + obj._rear = items._num - 1 + obj._double_ended = double_ended + return obj + + @classmethod + def methods(cls): + return ['__new__', 'append', 'appendleft', 'popleft', + 'pop', 'is_empty', '__len__', '__str__', 'front', + 'rear'] + + def append(self, x): + if self.is_empty: + self._front = 0 + self.items._dtype = type(x) + self.items.append(x) + self._rear += 1 + + def appendleft(self, x): + self._double_ended_check() + temp = [] + if self.is_empty: + self._front = 0 + self._rear = -1 + self.items._dtype = type(x) + temp.append(x) + for i in range(self._front, self._rear + 1): + temp.append(self.items._data[i]) + self.items = DynamicOneDimensionalArray(type(temp[0]), temp) + self._rear += 1 + + def popleft(self): + if self.is_empty: + raise IndexError("Queue is empty.") + return_value = dc(self.items[self._front]) + front_temp = self._front + if self._front == self._rear: + self._front = -1 + self._rear = -1 + else: + if (self.items._num - 1)/self.items._size < \ + self.items._load_factor: + self._front = 0 + else: + self._front += 1 + self.items.delete(front_temp) + return return_value + + def pop(self): + self._double_ended_check() + if self.is_empty: + raise IndexError("Queue is empty.") + + return_value = dc(self.items[self._rear]) + rear_temp = self._rear + if self._front == self._rear: + self._front = -1 + self._rear = -1 + else: + if (self.items._num - 1)/self.items._size < \ + self.items._load_factor: + self._front = 0 + else: + self._rear -= 1 + self.items.delete(rear_temp) + return return_value + + @property + def front(self): + return self._front + + @property + def rear(self): + return self._rear + + @property + def is_empty(self): + return self.__len__() == 0 + + def __len__(self): + return self.items._num + + def __str__(self): + _data = [] + for i in range(self._front, self._rear + 1): + _data.append(self.items._data[i]) + return str(_data) + +class LinkedListQueue(Queue): + + __slots__ = ['queue', '_double_ended'] + + def __new__(cls, items=None, double_ended=False, + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.queue = SinglyLinkedList() + if items is None: + pass + elif type(items) in (list, tuple): + for x in items: + obj.append(x) + else: + raise TypeError("Expected type: list/tuple") + obj._double_ended = double_ended + return obj + + @classmethod + def methods(cls): + return ['__new__', 'append', 'appendleft', 'pop', 'popleft', + 'is_empty', '__len__', '__str__', 'front', 'rear'] + + def append(self, x): + self.queue.append(x) + + def appendleft(self, x): + self._double_ended_check() + if self._double_ended: + self.queue.appendleft(x) + + def pop(self): + self._double_ended_check() + if self.is_empty: + raise IndexError("Queue is empty.") + return_value = self.queue.popright() + return return_value + + def popleft(self): + if self.is_empty: + raise IndexError("Queue is empty.") + return_value = self.queue.popleft() + return return_value + + @property + def is_empty(self): + return self.__len__() == 0 + + @property + def front(self): + return self.queue.head + + @property + def rear(self): + return self.queue.tail + + def __len__(self): + return self.queue.size + + def __str__(self): + return str(self.queue) + +class PriorityQueue(object): + """ + Represents the concept of priority queue. + + Parameters + ========== + + implementation: str + The implementation which is to be + used for supporting operations + of priority queue. + The following implementations are supported, + + 'linked_list' -> Linked list implementation. + + 'binary_heap' -> Binary heap implementation. + + 'binomial_heap' -> Binomial heap implementation. + Doesn't support custom comparators, minimum + key data is extracted in every pop. + + Optional, by default, 'binary_heap' implementation + is used. + comp: function + The comparator to be used while comparing priorities. + Must return a bool object. + By default, `lambda u, v: u < v` is used to compare + priorities i.e., minimum priority elements are extracted + by pop operation. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import PriorityQueue + >>> pq = PriorityQueue() + >>> pq.push(1, 2) + >>> pq.push(2, 3) + >>> pq.pop() + 1 + >>> pq2 = PriorityQueue(comp=lambda u, v: u > v) + >>> pq2.push(1, 2) + >>> pq2.push(2, 3) + >>> pq2.pop() + 2 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Priority_queue + """ + + def __new__(cls, implementation='binary_heap', **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + comp = kwargs.get("comp", lambda u, v: u < v) + if implementation == 'linked_list': + return LinkedListPriorityQueue(comp) + elif implementation == 'binary_heap': + return BinaryHeapPriorityQueue(comp) + elif implementation == 'binomial_heap': + return BinomialHeapPriorityQueue() + else: + raise NotImplementedError( + "%s implementation is not currently supported " + "by priority queue.") + + @classmethod + def methods(cls): + return ['__new__'] + + def push(self, value, priority): + """ + Pushes the value to the priority queue + according to the given priority. + + value + Value to be pushed. + priority + Priority to be given to the value. + """ + raise NotImplementedError( + "This is an abstract method.") + + def pop(self): + """ + Pops out the value from the priority queue. + """ + raise NotImplementedError( + "This is an abstract method.") + + @property + def peek(self): + """ + Returns the pointer to the value which will be + popped out by `pop` method. + """ + raise NotImplementedError( + "This is an abstract method.") + + @property + def is_empty(self): + """ + Checks if the priority queue is empty. + """ + raise NotImplementedError( + "This is an abstract method.") + +class LinkedListPriorityQueue(PriorityQueue): + + __slots__ = ['items', 'comp'] + + @classmethod + def methods(cls): + return ['__new__', 'push', 'pop', 'peek', 'is_empty'] + + def __new__(cls, comp, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.items = SinglyLinkedList() + obj.comp = comp + return obj + + def push(self, value, priority): + self.items.append(priority, value) + + def pop(self): + _, max_i = self._find_peek(return_index=True) + pop_val = self.items.extract(max_i) + return pop_val.data + + def _find_peek(self, return_index=False): + if self.is_empty: + raise IndexError("Priority queue is empty.") + + walk = self.items.head + i, max_i, max_p = 0, 0, walk + while walk is not None: + if self.comp(walk.key, max_p.key): + max_i = i + max_p = walk + i += 1 + walk = walk.next + if return_index: + return max_p, max_i + return max_p + + @property + def peek(self): + return self._find_peek() + + @property + def is_empty(self): + return self.items.size == 0 + +class BinaryHeapPriorityQueue(PriorityQueue): + + __slots__ = ['items'] + + @classmethod + def methods(cls): + return ['__new__', 'push', 'pop', 'peek', 'is_empty'] + + def __new__(cls, comp, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.items = BinaryHeap() + obj.items._comp = comp + return obj + + def push(self, value, priority): + self.items.insert(priority, value) + + def pop(self): + node = self.items.extract() + return node.data + + @property + def peek(self): + if self.items.is_empty: + raise IndexError("Priority queue is empty.") + return self.items.heap[0] + + @property + def is_empty(self): + return self.items.is_empty + +class BinomialHeapPriorityQueue(PriorityQueue): + + __slots__ = ['items'] + + @classmethod + def methods(cls): + return ['__new__', 'push', 'pop', 'peek', 'is_empty'] + + def __new__(cls, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.items = BinomialHeap() + return obj + + def push(self, value, priority): + self.items.insert(priority, value) + + def pop(self): + node = self.items.find_minimum() + self.items.delete_minimum() + return node.data + + @property + def peek(self): + return self.items.find_minimum() + + @property + def is_empty(self): + return self.items.is_empty diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/segment_tree.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/segment_tree.py new file mode 100644 index 000000000..0895ba6da --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/segment_tree.py @@ -0,0 +1,225 @@ +from .stack import Stack +from pydatastructs.utils.misc_util import (TreeNode, + Backend, raise_if_backend_is_not_python) + +__all__ = ['ArraySegmentTree'] + +class ArraySegmentTree(object): + """ + Represents the segment tree data structure, + defined on arrays. + + Parameters + ========== + + array: Array + The array to be used for filling the segment tree. + func: callable + The function to be used for filling the segment tree. + It should accept only one tuple as an argument. The + size of the tuple will be either 1 or 2 and any one + of the elements can be `None`. You can treat `None` in + whatever way you want. For example, in case of minimum + values, `None` can be treated as infinity. We provide + the following which can be used as an argument value for this + parameter, + + `minimum` - For range minimum queries. + + `greatest_common_divisor` - For queries finding greatest + common divisor of a range. + + `summation` - For range sum queries. + dimensions: int + The number of dimensions of the array to be used + for the segment tree. + Optional, by default 1. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import ArraySegmentTree, minimum + >>> from pydatastructs import OneDimensionalArray + >>> arr = OneDimensionalArray(int, [1, 2, 3, 4, 5]) + >>> s_t = ArraySegmentTree(arr, minimum) + >>> s_t.build() + >>> s_t.query(0, 1) + 1 + >>> s_t.query(1, 3) + 2 + >>> s_t.update(2, -1) + >>> s_t.query(1, 3) + -1 + >>> arr = OneDimensionalArray(int, [1, 2]) + >>> s_t = ArraySegmentTree(arr, minimum) + >>> s_t.build() + >>> str(s_t) + "['((0, 1), 1)', '((0, 0), 1)', '', '', '((1, 1), 2)', '', '']" + + References + ========== + + .. [1] https://cp-algorithms.com/data_structures/segment_tree.html + """ + def __new__(cls, array, func, **kwargs): + + dimensions = kwargs.pop("dimensions", 1) + if dimensions == 1: + return OneDimensionalArraySegmentTree(array, func, **kwargs) + else: + raise NotImplementedError("ArraySegmentTree do not support " + "{}-dimensional arrays as of now.".format(dimensions)) + + def build(self): + """ + Generates segment tree nodes when called. + Nothing happens if nodes are already generated. + """ + raise NotImplementedError( + "This is an abstract method.") + + def update(self, index, value): + """ + Updates the value at given index. + """ + raise NotImplementedError( + "This is an abstract method.") + + def query(self, start, end): + """ + Queries [start, end] range according + to the function provided while constructing + `ArraySegmentTree` object. + """ + raise NotImplementedError( + "This is an abstract method.") + + def __str__(self): + recursion_stack = Stack(implementation='linked_list') + recursion_stack.push(self._root) + to_be_printed = [] + while not recursion_stack.is_empty: + node = recursion_stack.pop().key + if node is not None: + to_be_printed.append(str((node.key, node.data))) + else: + to_be_printed.append('') + if node is not None: + recursion_stack.push(node.right) + recursion_stack.push(node.left) + return str(to_be_printed) + + +class OneDimensionalArraySegmentTree(ArraySegmentTree): + + __slots__ = ["_func", "_array", "_root", "_backend"] + + def __new__(cls, array, func, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + raise_if_backend_is_not_python(cls, backend) + + obj = object.__new__(cls) + obj._func = func + obj._array = array + obj._root = None + obj._backend = backend + return obj + + @classmethod + def methods(self): + return ['__new__', 'build', 'update', + 'query'] + + @property + def is_ready(self): + return self._root is not None + + def build(self): + if self.is_ready: + return + + recursion_stack = Stack(implementation='linked_list') + node = TreeNode((0, len(self._array) - 1), None, backend=self._backend) + node.is_root = True + self._root = node + recursion_stack.push(node) + + while not recursion_stack.is_empty: + node = recursion_stack.peek.key + start, end = node.key + if start == end: + node.data = self._array[start] + recursion_stack.pop() + continue + + if (node.left is not None and + node.right is not None): + recursion_stack.pop() + node.data = self._func((node.left.data, node.right.data)) + else: + mid = (start + end) // 2 + if node.left is None: + left_node = TreeNode((start, mid), None) + node.left = left_node + recursion_stack.push(left_node) + if node.right is None: + right_node = TreeNode((mid + 1, end), None) + node.right = right_node + recursion_stack.push(right_node) + + def update(self, index, value): + if not self.is_ready: + raise ValueError("{} tree is not built yet. ".format(self) + + "Call .build method to prepare the segment tree.") + + recursion_stack = Stack(implementation='linked_list') + recursion_stack.push((self._root, None)) + + while not recursion_stack.is_empty: + node, child = recursion_stack.peek.key + start, end = node.key + if start == end: + self._array[index] = value + node.data = value + recursion_stack.pop() + if not recursion_stack.is_empty: + parent_node = recursion_stack.pop() + recursion_stack.push((parent_node.key[0], node)) + continue + + if child is not None: + node.data = self._func((node.left.data, node.right.data)) + recursion_stack.pop() + if not recursion_stack.is_empty: + parent_node = recursion_stack.pop() + recursion_stack.push((parent_node.key[0], node)) + else: + mid = (start + end) // 2 + if start <= index and index <= mid: + recursion_stack.push((node.left, None)) + else: + recursion_stack.push((node.right, None)) + + def _query(self, node, start, end, l, r): + if r < start or end < l: + return None + + if l <= start and end <= r: + return node.data + + mid = (start + end) // 2 + left_result = self._query(node.left, start, mid, l, r) + right_result = self._query(node.right, mid + 1, end, l, r) + return self._func((left_result, right_result)) + + def query(self, start, end): + if not self.is_ready: + raise ValueError("{} tree is not built yet. ".format(self) + + "Call .build method to prepare the segment tree.") + + return self._query(self._root, 0, len(self._array) - 1, + start, end) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/sparse_table.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/sparse_table.py new file mode 100644 index 000000000..55ec4e9b3 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/sparse_table.py @@ -0,0 +1,108 @@ +from pydatastructs.linear_data_structures.arrays import OneDimensionalArray +from pydatastructs.utils.misc_util import ( + Backend, raise_if_backend_is_not_python) +import math + +__all__ = ['SparseTable'] + + +class SparseTable(object): + """ + Represents the sparse table data structure. + + Parameters + ========== + + array: OneDimensionalArray + The array to be used for filling the sparse table. + func: callable + The function to be used for filling the sparse table. + It should accept only one tuple as an argument. The + size of the tuple will be either 1 or 2 and any one + of the elements can be `None`. You can treat `None` in + whatever way you want. For example, in case of minimum + values, `None` can be treated as infinity. We provide + the following which can be used as an argument value for this + parameter, + + `minimum` - For range minimum queries. + + `greatest_common_divisor` - For queries finding greatest + common divisor of a range. + + `summation` - For range sum queries. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import SparseTable, minimum + >>> from pydatastructs import OneDimensionalArray + >>> arr = OneDimensionalArray(int, [1, 2, 3, 4, 5]) + >>> s_t = SparseTable(arr, minimum) + >>> str(s_t) + "['[1, 1, 1]', '[2, 2, 2]', '[3, 3, None]', '[4, 4, None]', '[5, None, None]']" + + References + ========== + + .. [1] https://cp-algorithms.com/data_structures/sparse-table.html + """ + + __slots__ = ['_table', 'func'] + + def __new__(cls, array, func, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + + # TODO: If possible remove the following check. + if len(array) == 0: + raise ValueError("Input %s array is empty."%(array)) + + obj = object.__new__(cls) + size = len(array) + log_size = int(math.log2(size)) + 1 + obj._table = [OneDimensionalArray(int, log_size) for _ in range(size)] + obj.func = func + + for i in range(size): + obj._table[i][0] = func((array[i],)) + + for j in range(1, log_size + 1): + for i in range(size - (1 << j) + 1): + obj._table[i][j] = func((obj._table[i][j - 1], + obj._table[i + (1 << (j - 1))][j - 1])) + + return obj + + @classmethod + def methods(cls): + return ['query', '__str__'] + + def query(self, start, end): + """ + Method to perform a query on sparse table in [start, end) + range. + + Parameters + ========== + + start: int + The starting index of the range. + end: int + The ending index of the range. + """ + j = int(math.log2(end - start + 1)) + 1 + answer = None + while j >= 0: + if start + (1 << j) - 1 <= end: + answer = self.func((answer, self._table[start][j])) + start += 1 << j + j -= 1 + return answer + + def __str__(self): + return str([str(array) for array in self._table]) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/stack.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/stack.py new file mode 100644 index 000000000..38f72b43f --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/stack.py @@ -0,0 +1,200 @@ +from pydatastructs.linear_data_structures import DynamicOneDimensionalArray, SinglyLinkedList +from pydatastructs.miscellaneous_data_structures._backend.cpp import _stack +from pydatastructs.utils.misc_util import ( + _check_type, NoneType, Backend, + raise_if_backend_is_not_python) +from copy import deepcopy as dc + +__all__ = [ + 'Stack' +] + +class Stack(object): + """Representation of stack data structure + + Parameters + ========== + + implementation : str + Implementation to be used for stack. + By default, 'array' + Currently only supports 'array' + implementation. + items : list/tuple + Optional, by default, None + The inital items in the stack. + For array implementation. + dtype : A valid python type + Optional, by default NoneType if item + is None, otherwise takes the data + type of DynamicOneDimensionalArray + For array implementation. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import Stack + >>> s = Stack() + >>> s.push(1) + >>> s.push(2) + >>> s.push(3) + >>> str(s) + '[1, 2, 3]' + >>> s.pop() + 3 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Stack_(abstract_data_type) + """ + + def __new__(cls, implementation='array', **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if implementation == 'array': + items = kwargs.get('items', None) + dtype = kwargs.get('dtype', int) + if backend == Backend.CPP: + return _stack.ArrayStack(items, dtype) + + return ArrayStack(items, dtype) + if implementation == 'linked_list': + raise_if_backend_is_not_python(cls, backend) + + return LinkedListStack( + kwargs.get('items', None) + ) + raise NotImplementedError( + "%s hasn't been implemented yet."%(implementation)) + + @classmethod + def methods(cls): + return ['__new__'] + + def push(self, *args, **kwargs): + raise NotImplementedError( + "This is an abstract method.") + + def pop(self, *args, **kwargs): + raise NotImplementedError( + "This is an abstract method.") + + @property + def is_empty(self): + raise NotImplementedError( + "This is an abstract method.") + + @property + def peek(self): + raise NotImplementedError( + "This is an abstract method.") + +class ArrayStack(Stack): + + __slots__ = ['items'] + + def __new__(cls, items=None, dtype=NoneType, + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + if items is None: + items = DynamicOneDimensionalArray(dtype, 0) + else: + items = DynamicOneDimensionalArray(dtype, items) + obj = object.__new__(cls) + obj.items = items + return obj + + @classmethod + def methods(cls): + return ['__new__', 'push', 'pop', 'is_emtpy', + 'peek', '__len__', '__str__'] + + def push(self, x): + if self.is_empty: + self.items._dtype = type(x) + self.items.append(x) + + def pop(self): + if self.is_empty: + raise IndexError("Stack is empty") + + top_element = dc(self.items[self.items._last_pos_filled]) + self.items.delete(self.items._last_pos_filled) + return top_element + + @property + def is_empty(self): + return self.items._last_pos_filled == -1 + + @property + def peek(self): + return self.items[self.items._last_pos_filled] + + def __len__(self): + return self.items._num + + def __str__(self): + """ + Used for printing. + """ + return str(self.items._data) + + +class LinkedListStack(Stack): + + __slots__ = ['stack'] + + def __new__(cls, items=None, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.stack = SinglyLinkedList() + if items is None: + pass + elif type(items) in (list, tuple): + for x in items: + obj.push(x) + else: + raise TypeError("Expected type: list/tuple") + return obj + + @classmethod + def methods(cls): + return ['__new__', 'push', 'pop', 'is_emtpy', + 'peek', '__len__', '__str__'] + + def push(self, x): + self.stack.appendleft(x) + + def pop(self): + if self.is_empty: + raise IndexError("Stack is empty") + return self.stack.popleft() + + @property + def is_empty(self): + return self.__len__() == 0 + + @property + def peek(self): + return self.stack.head + + @property + def size(self): + return self.stack.size + + def __len__(self): + return self.stack.size + + def __str__(self): + elements = [] + current_node = self.peek + while current_node is not None: + elements.append(str(current_node)) + current_node = current_node.next + return str(elements[::-1]) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/__init__.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_binomial_trees.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_binomial_trees.py new file mode 100644 index 000000000..1275e9aec --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_binomial_trees.py @@ -0,0 +1,17 @@ +from pydatastructs.miscellaneous_data_structures.binomial_trees import BinomialTree +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils.misc_util import BinomialTreeNode + +# only tests the corner cases +def test_BinomialTree(): + assert raises(TypeError, lambda: BinomialTree(1, 1)) + assert raises(TypeError, lambda: BinomialTree(None, 1.5)) + + bt = BinomialTree() + assert raises(TypeError, lambda: bt.add_sub_tree(None)) + bt1 = BinomialTree(BinomialTreeNode(1, 1), 0) + node = BinomialTreeNode(2, 2) + node.add_children(BinomialTreeNode(3, 3)) + bt2 = BinomialTree(node, 1) + assert raises(ValueError, lambda: bt1.add_sub_tree(bt2)) + assert bt1.is_empty is False diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_disjoint_set.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_disjoint_set.py new file mode 100644 index 000000000..fcabd3112 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_disjoint_set.py @@ -0,0 +1,70 @@ +from pydatastructs import DisjointSetForest +from pydatastructs.utils.raises_util import raises + +def test_DisjointSetForest(): + + dst = DisjointSetForest() + for i in range(8): + dst.make_set(i+1) + + dst.union(1, 2) + dst.union(1, 5) + assert dst.find_size(2) == 3 + dst.union(1, 6) + dst.union(1, 8) + dst.union(3, 4) + assert dst.find_size(3) == 2 + + assert (dst.find_root(1) == dst.find_root(2) == + dst.find_root(5) == dst.find_root(6) == dst.find_root(8)) + assert dst.disjoint_sets() == [[1, 2, 5, 6, 8], [3, 4], [7]] + assert dst.find_root(3) == dst.find_root(4) + assert dst.find_root(7).key == 7 + + assert raises(KeyError, lambda: dst.find_root(9)) + assert raises(KeyError, lambda: dst.find_size(9)) + dst.union(3, 1) + assert dst.find_root(3).key == 1 + assert dst.find_root(5).key == 1 + dst.make_root(6) + assert dst.disjoint_sets() == [[1, 2, 3, 4, 5, 6, 8], [7]] + assert dst.find_root(3).key == 6 + assert dst.find_root(5).key == 6 + dst.make_root(5) + assert dst.find_root(1).key == 5 + assert dst.find_root(5).key == 5 + assert raises(KeyError, lambda: dst.make_root(9)) + + dst = DisjointSetForest() + for i in range(6): + dst.make_set(i) + assert dst.tree[2].size == 1 + dst.union(2, 3) + assert dst.tree[2].size == 2 + assert dst.tree[3].size == 1 + dst.union(1, 4) + dst.union(2, 4) + assert dst.disjoint_sets() == [[0], [1, 2, 3, 4], [5]] + # current tree + ############### + # 2 + # / \ + # 1 3 + # / + # 4 + ############### + assert dst.tree[2].size == 4 + assert dst.tree[1].size == 2 + assert dst.tree[3].size == dst.tree[4].size == 1 + dst.make_root(4) + # New tree + ############### + # 4 + # | + # 2 + # / \ + # 1 3 + ############### + assert dst.tree[4].size == 4 + assert dst.tree[2].size == 3 + assert dst.tree[1].size == dst.tree[3].size == 1 diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_multiset.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_multiset.py new file mode 100644 index 000000000..fb412704a --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_multiset.py @@ -0,0 +1,39 @@ +from pydatastructs.miscellaneous_data_structures import Multiset + +def test_Multiset(): + + ms = Multiset() + ms.add(5) + ms.add(5) + ms.add(3) + ms.add(7) + assert len(ms) == 4 + assert 5 in ms + assert ms.count(5) == 2 + assert ms.count(3) == 1 + assert ms.count(-3) == 0 + assert not 4 in ms + ms.remove(5) + assert 5 in ms + assert ms.lower_bound(5) == 5 + assert ms.upper_bound(5) == 7 + + ms = Multiset(5, 3, 7, 2) + + assert len(ms) == 4 + assert 5 in ms + assert ms.count(7) == 1 + assert not 4 in ms + assert ms.lower_bound(3) == 3 + assert ms.upper_bound(3) == 5 + assert ms.upper_bound(7) is None + + ms.remove(5) + + assert len(ms) == 3 + assert not 5 in ms + + ms.add(4) + + assert 4 in ms + assert len(ms) == 4 diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_queue.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_queue.py new file mode 100644 index 000000000..81e1e996e --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_queue.py @@ -0,0 +1,116 @@ +from pydatastructs.miscellaneous_data_structures import Queue +from pydatastructs.miscellaneous_data_structures.queue import ( + ArrayQueue, LinkedListQueue, PriorityQueue, + LinkedListPriorityQueue) +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils.misc_util import _check_type + +def test_Queue(): + q = Queue(implementation='array') + q1 = Queue() + assert _check_type(q, ArrayQueue) is True + assert _check_type(q1, ArrayQueue) is True + q2 = Queue(implementation='linked_list') + assert _check_type(q2, LinkedListQueue) is True + assert raises(NotImplementedError, lambda: Queue(implementation='')) + +def test_ArrayQueue(): + q1 = Queue() + raises(IndexError, lambda: q1.popleft()) + q1 = Queue(implementation='array', items=[0]) + q1.append(1) + q1.append(2) + q1.append(3) + assert str(q1) == '[0, 1, 2, 3]' + assert len(q1) == 4 + assert q1.popleft() == 0 + assert q1.popleft() == 1 + assert len(q1) == 2 + assert q1.popleft() == 2 + assert q1.popleft() == 3 + assert len(q1) == 0 + + q2 = Queue(implementation='array', items=[0], double_ended=True) + q2.append(1) + q2.append(2) + q2.appendleft(3) + assert str(q2) == '[3, 0, 1, 2]' + assert len(q2) == 4 + assert q2.popleft() == 3 + assert q2.pop() == 2 + assert len(q2) == 2 + assert q2.popleft() == 0 + assert q2.pop() == 1 + assert len(q2) == 0 + + q1 = Queue(implementation='array', items=[0]) + assert raises(NotImplementedError, lambda: q1.appendleft(2)) + + +def test_LinkedListQueue(): + q1 = Queue(implementation='linked_list') + q1.append(1) + assert raises(TypeError, lambda: Queue(implementation='linked_list', items={0, 1})) + q1 = Queue(implementation='linked_list', items = [0, 1]) + q1.append(2) + q1.append(3) + assert str(q1) == ("['(0, None)', '(1, None)', " + "'(2, None)', '(3, None)']") + assert len(q1) == 4 + assert q1.popleft().key == 0 + assert q1.popleft().key == 1 + assert len(q1) == 2 + assert q1.popleft().key == 2 + assert q1.popleft().key == 3 + assert len(q1) == 0 + raises(IndexError, lambda: q1.popleft()) + + q1 = Queue(implementation='linked_list',items=['a',None,type,{}]) + assert len(q1) == 4 + + front = q1.front + assert front.key == q1.popleft().key + + rear = q1.rear + for _ in range(len(q1)-1): + q1.popleft() + + assert rear.key == q1.popleft().key + + q1 = Queue(implementation='linked_list', double_ended=True) + q1.appendleft(1) + q2 = Queue(implementation='linked_list', items=[0, 1]) + assert raises(NotImplementedError, lambda: q2.appendleft(1)) + q1 = Queue(implementation='linked_list', items = [0, 1], double_ended=True) + q1.appendleft(2) + q1.append(3) + assert str(q1) == "['(2, None)', '(0, None)', '(1, None)', '(3, None)']" + assert len(q1) == 4 + assert q1.popleft().key == 2 + assert q1.pop().key == 3 + assert len(q1) == 2 + assert q1.pop().key == 1 + assert q1.popleft().key == 0 + assert len(q1) == 0 + assert raises(IndexError, lambda: q1.popleft()) + +def test_PriorityQueue(): + pq1 = PriorityQueue(implementation='linked_list') + assert _check_type(pq1, LinkedListPriorityQueue) is True + assert raises(NotImplementedError, lambda: Queue(implementation='')) + +def test_ImplementationPriorityQueue(): + impls = ['linked_list', 'binomial_heap', 'binary_heap'] + for impl in impls: + pq1 = PriorityQueue(implementation=impl) + pq1.push(1, 4) + pq1.push(2, 3) + pq1.push(3, 2) + assert pq1.peek.data == 3 + assert pq1.pop() == 3 + assert pq1.peek.data == 2 + assert pq1.pop() == 2 + assert pq1.peek.data == 1 + assert pq1.pop() == 1 + assert pq1.is_empty is True + assert raises(IndexError, lambda: pq1.peek) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_dynamic.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_dynamic.py new file mode 100644 index 000000000..f655c546d --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_dynamic.py @@ -0,0 +1,71 @@ +from pydatastructs import ( + RangeQueryDynamic, minimum, + greatest_common_divisor, summation, + OneDimensionalArray) +from pydatastructs.utils.raises_util import raises +import random, math +from copy import deepcopy + +def _test_RangeQueryDynamic_common(func, gen_expected): + + array = OneDimensionalArray(int, []) + raises(ValueError, lambda: RangeQueryDynamic(array, func)) + + array = OneDimensionalArray(int, [1]) + rq = RangeQueryDynamic(array, func) + assert rq.query(0, 0) == 1 + raises(ValueError, lambda: rq.query(0, -1)) + raises(IndexError, lambda: rq.query(0, 1)) + + array_sizes = [3, 6, 12, 24, 48, 96] + random.seed(0) + for array_size in array_sizes: + inputs = [] + for i in range(array_size): + for j in range(i + 1, array_size): + inputs.append((i, j)) + + data_structures = ["array", "segment_tree"] + for ds in data_structures: + data = random.sample(range(-2*array_size, 2*array_size), array_size) + array = OneDimensionalArray(int, data) + rmq = RangeQueryDynamic(array, func, data_structure=ds) + for input in inputs: + assert rmq.query(input[0], input[1]) == gen_expected(data, input[0], input[1]) + + data_copy = deepcopy(data) + for _ in range(array_size//2): + index = random.randint(0, array_size - 1) + value = random.randint(0, 4 * array_size) + data_copy[index] = value + rmq.update(index, value) + + for input in inputs: + assert rmq.query(input[0], input[1]) == gen_expected(data_copy, input[0], input[1]) + +def test_RangeQueryDynamic_minimum(): + + def _gen_minimum_expected(data, i, j): + return min(data[i:j + 1]) + + _test_RangeQueryDynamic_common(minimum, _gen_minimum_expected) + +def test_RangeQueryDynamic_greatest_common_divisor(): + + def _gen_gcd_expected(data, i, j): + if j == i: + return data[i] + else: + expected_gcd = math.gcd(data[i], data[i + 1]) + for idx in range(i + 2, j + 1): + expected_gcd = math.gcd(expected_gcd, data[idx]) + return expected_gcd + + _test_RangeQueryDynamic_common(greatest_common_divisor, _gen_gcd_expected) + +def test_RangeQueryDynamic_summation(): + + def _gen_summation_expected(data, i, j): + return sum(data[i:j + 1]) + + return _test_RangeQueryDynamic_common(summation, _gen_summation_expected) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_static.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_static.py new file mode 100644 index 000000000..e898653c9 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_static.py @@ -0,0 +1,63 @@ +from pydatastructs import ( + RangeQueryStatic, minimum, + greatest_common_divisor, summation, + OneDimensionalArray) +from pydatastructs.utils.raises_util import raises +import random, math + +def _test_RangeQueryStatic_common(func, gen_expected): + + array = OneDimensionalArray(int, []) + raises(ValueError, lambda: RangeQueryStatic(array, func)) + + array = OneDimensionalArray(int, [1]) + rq = RangeQueryStatic(array, func) + assert rq.query(0, 0) == 1 + raises(ValueError, lambda: rq.query(0, -1)) + raises(IndexError, lambda: rq.query(0, 1)) + + array_sizes = [3, 6, 12, 24, 48, 96] + random.seed(0) + for array_size in array_sizes: + data = random.sample(range(-2*array_size, 2*array_size), array_size) + array = OneDimensionalArray(int, data) + + expected = [] + inputs = [] + for i in range(array_size): + for j in range(i + 1, array_size): + inputs.append((i, j)) + expected.append(gen_expected(data, i, j)) + + data_structures = ["array", "sparse_table"] + for ds in data_structures: + rmq = RangeQueryStatic(array, func, data_structure=ds) + for input, correct in zip(inputs, expected): + assert rmq.query(input[0], input[1]) == correct + +def test_RangeQueryStatic_minimum(): + + def _gen_minimum_expected(data, i, j): + return min(data[i:j + 1]) + + _test_RangeQueryStatic_common(minimum, _gen_minimum_expected) + +def test_RangeQueryStatic_greatest_common_divisor(): + + def _gen_gcd_expected(data, i, j): + if j == i: + return data[i] + else: + expected_gcd = math.gcd(data[i], data[i + 1]) + for idx in range(i + 2, j + 1): + expected_gcd = math.gcd(expected_gcd, data[idx]) + return expected_gcd + + _test_RangeQueryStatic_common(greatest_common_divisor, _gen_gcd_expected) + +def test_RangeQueryStatic_summation(): + + def _gen_summation_expected(data, i, j): + return sum(data[i:j + 1]) + + return _test_RangeQueryStatic_common(summation, _gen_summation_expected) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_stack.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_stack.py new file mode 100644 index 000000000..2d9d08b82 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_stack.py @@ -0,0 +1,77 @@ +from pydatastructs.miscellaneous_data_structures import Stack +from pydatastructs.miscellaneous_data_structures.stack import ArrayStack, LinkedListStack +from pydatastructs.miscellaneous_data_structures._backend.cpp import _stack +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils.misc_util import _check_type, Backend + + +def test_Stack(): + s = Stack(implementation='array') + s1 = Stack() + assert _check_type(s, ArrayStack) is True + assert _check_type(s1, ArrayStack) is True + s2 = Stack(implementation='linked_list') + assert _check_type(s2, LinkedListStack) is True + assert raises(NotImplementedError, lambda: Stack(implementation='')) + + s3 = Stack(backend=Backend.CPP) + assert _check_type(s3, _stack.ArrayStack) is True + s4 = Stack(implementation="array", backend=Backend.CPP) + assert _check_type(s4, _stack.ArrayStack) is True + +def test_ArrayStack(): + s = Stack(implementation='array') + s.push(1) + s.push(2) + s.push(3) + assert s.peek == 3 + assert str(s) == '[1, 2, 3]' + assert s.pop() == 3 + assert s.pop() == 2 + assert s.pop() == 1 + assert s.is_empty is True + assert raises(IndexError, lambda : s.pop()) + _s = Stack(items=[1, 2, 3]) + assert str(_s) == '[1, 2, 3]' + assert len(_s) == 3 + + # Cpp test + s1 = Stack(implementation="array", backend=Backend.CPP) + s1.push(1) + s1.push(2) + s1.push(3) + assert s1.peek == 3 + assert str(s1) == "['1', '2', '3']" + assert s1.pop() == 3 + assert s1.pop() == 2 + assert s1.pop() == 1 + assert s1.is_empty is True + assert raises(IndexError, lambda : s1.pop()) + _s1 = Stack(items=[1, 2, 3], backend=Backend.CPP) + assert str(_s1) == "['1', '2', '3']" + assert len(_s1) == 3 + +def test_LinkedListStack(): + s = Stack(implementation='linked_list') + s.push(1) + s.push(2) + s.push(3) + assert s.peek.key == 3 + assert str(s) == ("['(1, None)', '(2, None)', '(3, None)']") + assert s.pop().key == 3 + assert s.pop().key == 2 + assert s.pop().key == 1 + assert s.is_empty is True + assert raises(IndexError, lambda : s.pop()) + assert str(s) == '[]' + _s = Stack(implementation='linked_list',items=[1, 2, 3]) + assert str(_s) == "['(1, None)', '(2, None)', '(3, None)']" + assert len(_s) == 3 + + s = Stack(implementation='linked_list',items=['a',None,type,{}]) + assert len(s) == 4 + assert s.size == 4 + + peek = s.peek + assert peek.key == s.pop().key + assert raises(TypeError, lambda: Stack(implementation='linked_list', items={0, 1})) diff --git a/lib/python3.12/site-packages/pydatastructs/strings/__init__.py b/lib/python3.12/site-packages/pydatastructs/strings/__init__.py new file mode 100644 index 000000000..33930b426 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/strings/__init__.py @@ -0,0 +1,18 @@ +__all__ = [] + +from . import ( + trie, + algorithms +) + +from .trie import ( + Trie +) + +__all__.extend(trie.__all__) + +from .algorithms import ( + find +) + +__all__.extend(algorithms.__all__) diff --git a/lib/python3.12/site-packages/pydatastructs/strings/algorithms.py b/lib/python3.12/site-packages/pydatastructs/strings/algorithms.py new file mode 100644 index 000000000..1e26b9411 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/strings/algorithms.py @@ -0,0 +1,247 @@ +from pydatastructs.linear_data_structures.arrays import ( + DynamicOneDimensionalArray, OneDimensionalArray) +from pydatastructs.utils.misc_util import ( + Backend, raise_if_backend_is_not_python) + +__all__ = [ + 'find' +] + +PRIME_NUMBER, MOD = 257, 1000000007 + +def find(text, query, algorithm, **kwargs): + """ + Finds occurrence of a query string within the text string. + + Parameters + ========== + + text: str + The string on which query is to be performed. + query: str + The string which is to be searched in the text. + algorithm: str + The algorithm which should be used for + searching. + Currently the following algorithms are + supported, + + 'kmp' -> Knuth-Morris-Pratt as given in [1]. + + 'rabin_karp' -> Rabin–Karp algorithm as given in [2]. + + 'boyer_moore' -> Boyer-Moore algorithm as given in [3]. + + 'z_function' -> Z-function algorithm as given in [4]. + + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + DynamicOneDimensionalArray + An array of starting positions of the portions + in the text which match with the given query. + + Examples + ======== + + >>> from pydatastructs.strings.algorithms import find + >>> text = "abcdefabcabe" + >>> pos = find(text, "ab", algorithm="kmp") + >>> str(pos) + "['0', '6', '9']" + >>> pos = find(text, "abc", algorithm="kmp") + >>> str(pos) + "['0', '6']" + >>> pos = find(text, "abe", algorithm="kmp") + >>> str(pos) + "['9']" + >>> pos = find(text, "abed", algorithm="kmp") + >>> str(pos) + '[]' + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Knuth%E2%80%93Morris%E2%80%93Pratt_algorithm + .. [2] https://en.wikipedia.org/wiki/Rabin%E2%80%93Karp_algorithm + .. [3] https://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_string-search_algorithm + .. [4] https://usaco.guide/CPH.pdf#page=257 + """ + raise_if_backend_is_not_python( + find, kwargs.get('backend', Backend.PYTHON)) + import pydatastructs.strings.algorithms as algorithms + func = "_" + algorithm + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently %s algoithm for searching strings " + "inside a text isn't implemented yet." + %(algorithm)) + return getattr(algorithms, func)(text, query) + + +def _knuth_morris_pratt(text, query): + if len(text) == 0 or len(query) == 0: + return DynamicOneDimensionalArray(int, 0) + kmp_table = _build_kmp_table(query) + return _do_match(text, query, kmp_table) + +_kmp = _knuth_morris_pratt + +def _build_kmp_table(query): + pos, cnd = 1, 0 + kmp_table = OneDimensionalArray(int, len(query) + 1) + + kmp_table[0] = -1 + + while pos < len(query): + if query[pos] == query[cnd]: + kmp_table[pos] = kmp_table[cnd] + else: + kmp_table[pos] = cnd + while cnd >= 0 and query[pos] != query[cnd]: + cnd = kmp_table[cnd] + pos, cnd = pos + 1, cnd + 1 + kmp_table[pos] = cnd + + return kmp_table + + + +def _do_match(string, query, kmp_table): + j, k = 0, 0 + positions = DynamicOneDimensionalArray(int, 0) + + while j < len(string): + if query[k] == string[j]: + j = j + 1 + k = k + 1 + if k == len(query): + positions.append(j - k) + k = kmp_table[k] + else: + k = kmp_table[k] + if k < 0: + j = j + 1 + k = k + 1 + + return positions + +def _p_pow(length, p=PRIME_NUMBER, m=MOD): + p_pow = OneDimensionalArray(int, length) + p_pow[0] = 1 + for i in range(1, length): + p_pow[i] = (p_pow[i-1] * p) % m + return p_pow + +def _hash_str(string, p=PRIME_NUMBER, m=MOD): + hash_value = 0 + p_pow = _p_pow(len(string), p, m) + for i in range(len(string)): + hash_value = (hash_value + ord(string[i]) * p_pow[i]) % m + return hash_value + +def _rabin_karp(text, query): + t = len(text) + q = len(query) + positions = DynamicOneDimensionalArray(int, 0) + if q == 0 or t == 0: + return positions + + query_hash = _hash_str(query) + text_hash = OneDimensionalArray(int, t + 1) + text_hash.fill(0) + p_pow = _p_pow(t) + + for i in range(t): + text_hash[i+1] = (text_hash[i] + ord(text[i]) * p_pow[i]) % MOD + for i in range(t - q + 1): + curr_hash = (text_hash[i + q] + MOD - text_hash[i]) % MOD + if curr_hash == (query_hash * p_pow[i]) % MOD: + positions.append(i) + + return positions + +def _boyer_moore(text, query): + positions = DynamicOneDimensionalArray(int, 0) + text_length, query_length = len(text), len(query) + + if text_length == 0 or query_length == 0: + return positions + + # Preprocessing Step + bad_match_table = dict() + for i in range(query_length): + bad_match_table[query[i]] = i + + shift = 0 + # Matching procedure + while shift <= text_length-query_length: + j = query_length - 1 + while j >= 0 and query[j] == text[shift + j]: + j -= 1 + if j < 0: + positions.append(shift) + if shift + query_length < text_length: + if text[shift + query_length] in bad_match_table: + shift += query_length - bad_match_table[text[shift + query_length]] + else: + shift += query_length + 1 + else: + shift += 1 + else: + letter_pos = text[shift + j] + if letter_pos in bad_match_table: + shift += max(1, j - bad_match_table[letter_pos]) + else: + shift += max(1, j + 1) + return positions + +def _z_vector(text, query): + string = text + if query != "": + string = query + str("$") + text + + z_fct = OneDimensionalArray(int, len(string)) + z_fct.fill(0) + + curr_pos = 1 + seg_left = 0 + seg_right = 0 + + for curr_pos in range(1,len(string)): + if curr_pos <= seg_right: + z_fct[curr_pos] = min(seg_right - curr_pos + 1, z_fct[curr_pos - seg_left]) + + while curr_pos + z_fct[curr_pos] < len(string) and \ + string[z_fct[curr_pos]] == string[curr_pos + z_fct[curr_pos]]: + z_fct[curr_pos] += 1 + + if curr_pos + z_fct[curr_pos] - 1 > seg_right: + seg_left = curr_pos + seg_right = curr_pos + z_fct[curr_pos] - 1 + + final_z_fct = DynamicOneDimensionalArray(int, 0) + start_index = 0 + if query != "": + start_index = len(query) + 1 + for pos in range(start_index, len(string)): + final_z_fct.append(z_fct[pos]) + + return final_z_fct + +def _z_function(text, query): + positions = DynamicOneDimensionalArray(int, 0) + if len(text) == 0 or len(query) == 0: + return positions + + fct = _z_vector(text, query) + for pos in range(len(fct)): + if fct[pos] == len(query): + positions.append(pos) + + return positions diff --git a/lib/python3.12/site-packages/pydatastructs/strings/tests/__init__.py b/lib/python3.12/site-packages/pydatastructs/strings/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/python3.12/site-packages/pydatastructs/strings/tests/test_algorithms.py b/lib/python3.12/site-packages/pydatastructs/strings/tests/test_algorithms.py new file mode 100644 index 000000000..37622cf80 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/strings/tests/test_algorithms.py @@ -0,0 +1,76 @@ +from pydatastructs.strings import find + +import random, string + +def test_kmp(): + _test_common_string_matching('kmp') + +def test_rka(): + _test_common_string_matching('rabin_karp') + +def test_bm(): + _test_common_string_matching('boyer_moore') + +def test_zf(): + _test_common_string_matching('z_function') + +def _test_common_string_matching(algorithm): + true_text_pattern_dictionary = { + "Knuth-Morris-Pratt": "-Morris-", + "abcabcabcabdabcabdabcabca": "abcabdabcabca", + "aefcdfaecdaefaefcdaefeaefcdcdeae": "aefcdaefeaefcd", + "aaaaaaaa": "aaa", + "fullstringmatch": "fullstringmatch", + "z-function": "z-fun" + } + for test_case_key in true_text_pattern_dictionary: + text = test_case_key + query = true_text_pattern_dictionary[test_case_key] + positions = find(text, query, algorithm) + for i in range(positions._last_pos_filled): + p = positions[i] + assert text[p:p + len(query)] == query + + false_text_pattern_dictionary = { + "Knuth-Morris-Pratt": "-Pratt-", + "abcabcabcabdabcabdabcabca": "qwertyuiopzxcvbnm", + "aefcdfaecdaefaefcdaefeaefcdcdeae": "cdaefaefe", + "fullstringmatch": "fullstrinmatch", + "z-function": "function-", + "abc": "", + "": "abc" + } + + for test_case_key in false_text_pattern_dictionary: + text = test_case_key + query = false_text_pattern_dictionary[test_case_key] + positions = find(text, query, algorithm) + assert positions.size == 0 + + random.seed(1000) + + def gen_random_string(length): + ascii = string.ascii_uppercase + digits = string.digits + return ''.join(random.choices(ascii + digits, k=length)) + + for _ in range(100): + query = gen_random_string(random.randint(3, 10)) + num_times = random.randint(1, 10) + freq = 0 + text = "" + while freq < num_times: + rand_str = gen_random_string(random.randint(5, 10)) + if rand_str != query: + freq += 1 + text += query + rand_str + query + positions = find(text, query, algorithm) + assert positions._num == num_times * 2 + for i in range(positions._last_pos_filled): + p = positions[i] + assert text[p:p + len(query)] == query + + text = gen_random_string(len(query)) + if text != query: + positions = find(text, query, algorithm) + assert positions.size == 0 diff --git a/lib/python3.12/site-packages/pydatastructs/strings/tests/test_trie.py b/lib/python3.12/site-packages/pydatastructs/strings/tests/test_trie.py new file mode 100644 index 000000000..059104708 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/strings/tests/test_trie.py @@ -0,0 +1,49 @@ +from pydatastructs import Trie + +def test_Trie(): + + strings = ["A", "to", "tea", "ted", "ten", "i", + "in", "inn", "Amfn", "snbr"] + trie = Trie() + for string in strings: + trie.insert(string) + + prefix_strings = ["te", "t", "Am", "snb"] + + for string in strings: + assert trie.is_inserted(string) + + for string in strings[::-1]: + assert trie.is_inserted(string) + + for string in prefix_strings: + assert trie.is_present(string) + assert not trie.is_inserted(string) + + assert sorted(trie.strings_with_prefix("t")) == ['tea', 'ted', 'ten', 'to'] + assert sorted(trie.strings_with_prefix("te")) == ["tea", "ted", "ten"] + assert trie.strings_with_prefix("i") == ["i", "in", "inn"] + assert trie.strings_with_prefix("a") == [] + + remove_order = ["to", "tea", "ted", "ten", "inn", "in", "A"] + + assert trie.delete("z") is None + + for string in remove_order: + trie.delete(string) + for present in strings: + if present == string: + assert not trie.is_inserted(present) + else: + assert trie.is_present(present) + assert trie.is_inserted(present) + strings.remove(string) + + prefix_strings_1 = ["dict", "dicts", "dicts_lists_tuples"] + trie_1 = Trie() + + for i in range(len(prefix_strings_1)): + trie_1.insert(prefix_strings_1[i]) + for j in range(i + 1): + assert trie_1.is_inserted(prefix_strings_1[j]) + assert trie_1.is_present(prefix_strings_1[j]) diff --git a/lib/python3.12/site-packages/pydatastructs/strings/trie.py b/lib/python3.12/site-packages/pydatastructs/strings/trie.py new file mode 100644 index 000000000..cdf6666cf --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/strings/trie.py @@ -0,0 +1,201 @@ +from pydatastructs.utils.misc_util import ( + TrieNode, Backend, + raise_if_backend_is_not_python) +from collections import deque +import copy + +__all__ = [ + 'Trie' +] + +Stack = Queue = deque + +class Trie(object): + """ + Represents the trie data structure for storing strings. + + Parameters + ========== + + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import Trie + >>> trie = Trie() + >>> trie.insert("a") + >>> trie.insert("aa") + >>> trie.strings_with_prefix("a") + ['a', 'aa'] + >>> trie.is_present("aa") + True + >>> trie.delete("aa") + True + >>> trie.is_present("aa") + False + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Trie + """ + + __slots__ = ['root'] + + @classmethod + def methods(cls): + return ['__new__', 'insert', 'is_present', 'delete', + 'strings_with_prefix'] + + def __new__(cls, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.root = TrieNode() + return obj + + def insert(self, string: str) -> None: + """ + Inserts the given string into the trie. + + Parameters + ========== + + string: str + + Returns + ======= + + None + """ + walk = self.root + for char in string: + if walk.get_child(char) is None: + newNode = TrieNode(char) + walk.add_child(newNode) + walk = newNode + else: + walk = walk.get_child(char) + walk.is_terminal = True + + def is_present(self, string: str) -> bool: + """ + Checks if the given string is present as a prefix in the trie. + + Parameters + ========== + + string: str + + Returns + ======= + + True if the given string is present as a prefix; + False in all other cases. + """ + walk = self.root + for char in string: + if walk.get_child(char) is None: + return False + walk = walk.get_child(char) + return True + + def is_inserted(self, string: str) -> bool: + """ + Checks if the given string was inserted in the trie. + + Parameters + ========== + + string: str + + Returns + ======= + + True if the given string was inserted in trie; + False in all other cases. + """ + walk = self.root + for char in string: + if walk.get_child(char) is None: + return False + walk = walk.get_child(char) + return walk.is_terminal + + def delete(self, string: str) -> bool: + """ + Deletes the given string from the trie. + + Parameters + ========== + + string: str + + Returns + ======= + + True if successfully deleted; + None if the string is not present in the trie. + """ + path = [] + walk = self.root + size = len(string) + for i in range(size): + char = string[i] + path.append(walk) + if walk.get_child(char) is None: + return None + walk = walk.get_child(char) + path.append(walk) + i = len(path) - 1 + path[i].is_terminal = False + while not path[i]._children and i >= 1: + path[i-1].remove_child(path[i].char) + i -= 1 + if path[i].is_terminal: + return True + return True + + def strings_with_prefix(self, string: str) -> list: + """ + Generates a list of all strings with the given prefix. + + Parameters + ========== + + string: str + + Returns + ======= + + strings: list + The list of strings with the given prefix. + """ + + def _collect(prefix: str, node: TrieNode, strings: list) -> str: + TrieNode_stack = Stack() + TrieNode_stack.append((node, prefix)) + while TrieNode_stack: + walk, curr_prefix = TrieNode_stack.pop() + if walk.is_terminal: + strings.append(curr_prefix + walk.char) + for child in walk._children: + TrieNode_stack.append((walk.get_child(child), curr_prefix + walk.char)) + + strings = [] + prefix = "" + walk = self.root + for char in string: + walk = walk.get_child(char) + if walk is None: + return strings + prefix += char + if walk.is_terminal: + strings.append(walk.char) + for child in walk._children: + _collect(prefix, walk.get_child(child), strings) + return strings diff --git a/lib/python3.12/site-packages/pydatastructs/trees/__init__.py b/lib/python3.12/site-packages/pydatastructs/trees/__init__.py new file mode 100644 index 000000000..892730122 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/trees/__init__.py @@ -0,0 +1,40 @@ +__all__ = [] + +from . import ( + binary_trees, + m_ary_trees, + space_partitioning_trees, + heaps, +) + +from .binary_trees import ( + BinaryTree, + BinarySearchTree, + BinaryTreeTraversal, + AVLTree, + BinaryIndexedTree, + CartesianTree, + Treap, + SplayTree, + RedBlackTree +) +__all__.extend(binary_trees.__all__) + +from .m_ary_trees import ( + MAryTreeNode, MAryTree +) + +__all__.extend(m_ary_trees.__all__) + +from .space_partitioning_trees import ( + OneDimensionalSegmentTree +) +__all__.extend(space_partitioning_trees.__all__) + +from .heaps import ( + BinaryHeap, + TernaryHeap, + DHeap, + BinomialHeap +) +__all__.extend(heaps.__all__) diff --git a/lib/python3.12/site-packages/pydatastructs/trees/_backend/__init__.py b/lib/python3.12/site-packages/pydatastructs/trees/_backend/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/python3.12/site-packages/pydatastructs/trees/binary_trees.py b/lib/python3.12/site-packages/pydatastructs/trees/binary_trees.py new file mode 100644 index 000000000..48446d1d4 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/trees/binary_trees.py @@ -0,0 +1,1888 @@ +import random +from collections import deque as Queue +from pydatastructs.utils import TreeNode, CartesianTreeNode, RedBlackTreeNode +from pydatastructs.miscellaneous_data_structures import Stack +from pydatastructs.linear_data_structures import OneDimensionalArray +from pydatastructs.linear_data_structures.arrays import ArrayForTrees +from pydatastructs.utils.misc_util import Backend +from pydatastructs.trees._backend.cpp import _trees + +__all__ = [ + 'AVLTree', + 'BinaryTree', + 'BinarySearchTree', + 'BinaryTreeTraversal', + 'BinaryIndexedTree', + 'CartesianTree', + 'Treap', + 'SplayTree', + 'RedBlackTree' +] + +class BinaryTree(object): + """ + Abstract binary tree. + + Parameters + ========== + + key + Required if tree is to be instantiated with + root otherwise not needed. + root_data + Optional, the root node of the binary tree. + If not of type TreeNode, it will consider + root as data and a new root node will + be created. + comp: lambda/function + Optional, A lambda function which will be used + for comparison of keys. Should return a + bool value. By default it implements less + than operator. + is_order_statistic: bool + Set it to True, if you want to use the + order statistic features of the tree. + backend: pydatastructs.Backend + The backend to be used. Available backends: Python and C++ + Optional, by default, the Python backend is used. For faster execution, use the C++ backend. + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Binary_tree + """ + + __slots__ = ['root_idx', 'comparator', 'tree', 'size', + 'is_order_statistic'] + + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + if comp is None: + comp = lambda key1, key2: key1 < key2 + return _trees.BinaryTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp + obj = object.__new__(cls) + if key is None and root_data is not None: + raise ValueError('Key required.') + key = None if root_data is None else key + root = TreeNode(key, root_data) + root.is_root = True + obj.root_idx = 0 + obj.tree, obj.size = ArrayForTrees(TreeNode, [root]), 1 + obj.comparator = lambda key1, key2: key1 < key2 \ + if comp is None else comp + obj.is_order_statistic = is_order_statistic + return obj + + @classmethod + def methods(cls): + return ['__new__', '__str__'] + + def insert(self, key, data=None): + """ + Inserts data by the passed key using iterative + algorithm. + + Parameters + ========== + + key + The key for comparison. + + data + The data to be inserted. + + Returns + ======= + + None + """ + raise NotImplementedError("This is an abstract method.") + + def delete(self, key, **kwargs): + """ + Deletes the data with the passed key + using iterative algorithm. + + Parameters + ========== + + key + The key of the node which is + to be deleted. + balancing_info: bool + Optional, by default, False + The information needed for updating + the tree is returned if this parameter + is set to True. It is not meant for + user facing APIs. + + Returns + ======= + + True + If the node is deleted successfully. + None + If the node to be deleted doesn't exists. + + Note + ==== + + The node is deleted means that the connection to that + node are removed but the it is still in three. This + is being done to keep the complexity of deletion, O(logn). + """ + raise NotImplementedError("This is an abstract method.") + + def search(self, key, **kwargs): + """ + Searches for the data in the binary search tree + using iterative algorithm. + + Parameters + ========== + + key + The key for searching. + parent: bool + If true then returns index of the + parent of the node with the passed + key. + By default, False + + Returns + ======= + + int + If the node with the passed key is + in the tree. + tuple + The index of the searched node and + the index of the parent of that node. + None + In all other cases. + """ + raise NotImplementedError("This is an abstract method.") + + + def __str__(self): + to_be_printed = ['' for i in range(self.tree._last_pos_filled + 1)] + for i in range(self.tree._last_pos_filled + 1): + if self.tree[i] is not None: + node = self.tree[i] + to_be_printed[i] = (node.left, node.key, node.data, node.right) + return str(to_be_printed) + +class BinarySearchTree(BinaryTree): + """ + Represents binary search trees. + + Examples + ======== + + >>> from pydatastructs.trees import BinarySearchTree as BST + >>> b = BST() + >>> b.insert(1, 1) + >>> b.insert(2, 2) + >>> child = b.tree[b.root_idx].right + >>> b.tree[child].data + 2 + >>> b.search(1) + 0 + >>> b.search(-1) is None + True + >>> b.delete(1) is True + True + >>> b.search(1) is None + True + >>> b.delete(2) is True + True + >>> b.search(2) is None + True + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Binary_search_tree + + See Also + ======== + + pydatastructs.trees.binary_tree.BinaryTree + """ + + @classmethod + def methods(cls): + return ['insert', 'search', 'delete', 'select', + 'rank', 'lowest_common_ancestor'] + + left_size = lambda self, node: self.tree[node.left].size \ + if node.left is not None else 0 + right_size = lambda self, node: self.tree[node.right].size \ + if node.right is not None else 0 + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + if comp is None: + comp = lambda key1, key2: key1 < key2 + return _trees.BinarySearchTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp + return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) + + def _update_size(self, start_idx): + if self.is_order_statistic: + walk = start_idx + while walk is not None: + self.tree[walk].size = ( + self.left_size(self.tree[walk]) + + self.right_size(self.tree[walk]) + 1) + walk = self.tree[walk].parent + + def insert(self, key, data=None): + res = self.search(key) + if res is not None: + self.tree[res].data = data + return None + walk = self.root_idx + if self.tree[walk].key is None: + self.tree[walk].key = key + self.tree[walk].data = data + return None + new_node, prev_node, flag = TreeNode(key, data), self.root_idx, True + while flag: + if not self.comparator(key, self.tree[walk].key): + if self.tree[walk].right is None: + new_node.parent = prev_node + self.tree.append(new_node) + self.tree[walk].right = self.size + self.size += 1 + flag = False + prev_node = walk = self.tree[walk].right + else: + if self.tree[walk].left is None: + new_node.parent = prev_node + self.tree.append(new_node) + self.tree[walk].left = self.size + self.size += 1 + flag = False + prev_node = walk = self.tree[walk].left + self._update_size(walk) + + def search(self, key, **kwargs): + ret_parent = kwargs.get('parent', False) + parent = None + walk = self.root_idx + if self.tree[walk].key is None: + return None + while walk is not None: + if self.tree[walk].key == key: + break + parent = walk + if self.comparator(key, self.tree[walk].key): + walk = self.tree[walk].left + else: + walk = self.tree[walk].right + return (walk, parent) if ret_parent else walk + + def _bound_helper(self, node_idx, bound_key, is_upper=False): + if node_idx is None: + return None + if self.tree[node_idx].key is None: + return None + + if self.tree[node_idx].key == bound_key: + if not is_upper: + return self.tree[node_idx].key + else: + return self._bound_helper(self.tree[node_idx].right, + bound_key, is_upper) + + if self.comparator(self.tree[node_idx].key, bound_key): + return self._bound_helper(self.tree[node_idx].right, + bound_key, is_upper) + else: + res_bound = self._bound_helper(self.tree[node_idx].left, + bound_key, is_upper) + return res_bound if res_bound is not None else self.tree[node_idx].key + + + def lower_bound(self, key, **kwargs): + """ + Finds the lower bound of the given key in the tree + + Parameters + ========== + + key + The key for comparison + + Examples + ======== + + >>> from pydatastructs.trees import BinarySearchTree as BST + >>> b = BST() + >>> b.insert(10, 10) + >>> b.insert(18, 18) + >>> b.insert(7, 7) + >>> b.lower_bound(9) + 10 + >>> b.lower_bound(7) + 7 + >>> b.lower_bound(20) is None + True + + Returns + ======= + + value + The lower bound of the given key. + Returns None if the value doesn't exist + """ + return self._bound_helper(self.root_idx, key) + + + def upper_bound(self, key, **kwargs): + """ + Finds the upper bound of the given key in the tree + + Parameters + ========== + + key + The key for comparison + + Examples + ======== + + >>> from pydatastructs.trees import BinarySearchTree as BST + >>> b = BST() + >>> b.insert(10, 10) + >>> b.insert(18, 18) + >>> b.insert(7, 7) + >>> b.upper_bound(9) + 10 + >>> b.upper_bound(7) + 10 + >>> b.upper_bound(20) is None + True + + Returns + ======= + + value + The upper bound of the given key. + Returns None if the value doesn't exist + """ + return self._bound_helper(self.root_idx, key, True) + + + def delete(self, key, **kwargs): + (walk, parent) = self.search(key, parent=True) + a = None + if walk is None: + return None + if self.tree[walk].left is None and \ + self.tree[walk].right is None: + if parent is None: + self.tree[self.root_idx].data = None + self.tree[self.root_idx].key = None + else: + if self.tree[parent].left == walk: + self.tree[parent].left = None + else: + self.tree[parent].right = None + a = parent + par_key, root_key = (self.tree[parent].key, + self.tree[self.root_idx].key) + new_indices = self.tree.delete(walk) + if new_indices is not None: + a = new_indices[par_key] + self.root_idx = new_indices[root_key] + self._update_size(a) + + elif self.tree[walk].left is not None and \ + self.tree[walk].right is not None: + twalk = self.tree[walk].right + par = walk + flag = False + while self.tree[twalk].left is not None: + flag = True + par = twalk + twalk = self.tree[twalk].left + self.tree[walk].data = self.tree[twalk].data + self.tree[walk].key = self.tree[twalk].key + if flag: + self.tree[par].left = self.tree[twalk].right + else: + self.tree[par].right = self.tree[twalk].right + if self.tree[twalk].right is not None: + self.tree[self.tree[twalk].right].parent = par + if twalk is not None: + a = par + par_key, root_key = (self.tree[par].key, + self.tree[self.root_idx].key) + new_indices = self.tree.delete(twalk) + if new_indices is not None: + a = new_indices[par_key] + self.root_idx = new_indices[root_key] + self._update_size(a) + + else: + if self.tree[walk].left is not None: + child = self.tree[walk].left + else: + child = self.tree[walk].right + if parent is None: + self.tree[self.root_idx].left = self.tree[child].left + self.tree[self.root_idx].right = self.tree[child].right + self.tree[self.root_idx].data = self.tree[child].data + self.tree[self.root_idx].key = self.tree[child].key + self.tree[self.root_idx].parent = None + root_key = self.tree[self.root_idx].key + new_indices = self.tree.delete(child) + if new_indices is not None: + self.root_idx = new_indices[root_key] + else: + if self.tree[parent].left == walk: + self.tree[parent].left = child + else: + self.tree[parent].right = child + self.tree[child].parent = parent + a = parent + par_key, root_key = (self.tree[parent].key, + self.tree[self.root_idx].key) + new_indices = self.tree.delete(walk) + if new_indices is not None: + parent = new_indices[par_key] + self.tree[child].parent = new_indices[par_key] + a = new_indices[par_key] + self.root_idx = new_indices[root_key] + self._update_size(a) + + if kwargs.get("balancing_info", False) is not False: + return a + return True + + def select(self, i): + """ + Finds the i-th smallest node in the tree. + + Parameters + ========== + + i: int + A positive integer + + Returns + ======= + + n: TreeNode + The node with the i-th smallest key + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Order_statistic_tree + """ + i -= 1 # The algorithm is based on zero indexing + if i < 0: + raise ValueError("Expected a positive integer, got %d"%(i + 1)) + if i >= self.tree._num: + raise ValueError("%d is greater than the size of the " + "tree which is, %d"%(i + 1, self.tree._num)) + walk = self.root_idx + while walk is not None: + l = self.left_size(self.tree[walk]) + if i == l: + return self.tree[walk] + left_walk = self.tree[walk].left + right_walk = self.tree[walk].right + if left_walk is None and right_walk is None: + raise IndexError("The traversal is terminated " + "due to no child nodes ahead.") + if i < l: + if left_walk is not None and \ + self.comparator(self.tree[left_walk].key, + self.tree[walk].key): + walk = left_walk + else: + walk = right_walk + else: + if right_walk is not None and \ + not self.comparator(self.tree[right_walk].key, + self.tree[walk].key): + walk = right_walk + else: + walk = left_walk + i -= (l + 1) + + def rank(self, x): + """ + Finds the rank of the given node, i.e. + its index in the sorted list of nodes + of the tree. + + Parameters + ========== + + x: key + The key of the node whose rank is to be found out. + """ + walk = self.search(x) + if walk is None: + return None + r = self.left_size(self.tree[walk]) + 1 + while self.tree[walk].key != self.tree[self.root_idx].key: + p = self.tree[walk].parent + if walk == self.tree[p].right: + r += self.left_size(self.tree[p]) + 1 + walk = p + return r + + def _simple_path(self, key, root): + """ + Utility funtion to find the simple path between root and node. + + Parameters + ========== + + key: Node.key + Key of the node to be searched + + Returns + ======= + + path: list + """ + + stack = Stack() + stack.push(root) + path = [] + node_idx = -1 + + while not stack.is_empty: + node = stack.pop() + if self.tree[node].key == key: + node_idx = node + break + if self.tree[node].left: + stack.push(self.tree[node].left) + if self.tree[node].right: + stack.push(self.tree[node].right) + + if node_idx == -1: + return path + + while node_idx != 0: + path.append(node_idx) + node_idx = self.tree[node_idx].parent + path.append(0) + path.reverse() + + return path + + def _lca_1(self, j, k): + root = self.root_idx + path1 = self._simple_path(j, root) + path2 = self._simple_path(k, root) + if not path1 or not path2: + raise ValueError("One of two path doesn't exists. See %s, %s" + %(path1, path2)) + + n, m = len(path1), len(path2) + i = j = 0 + while i < n and j < m: + if path1[i] != path2[j]: + return self.tree[path1[i - 1]].key + i += 1 + j += 1 + if path1 < path2: + return self.tree[path1[-1]].key + return self.tree[path2[-1]].key + + def _lca_2(self, j, k): + curr_root = self.root_idx + u, v = self.search(j), self.search(k) + if (u is None) or (v is None): + raise ValueError("One of the nodes with key %s " + "or %s doesn't exits"%(j, k)) + u_left = self.comparator(self.tree[u].key, \ + self.tree[curr_root].key) + v_left = self.comparator(self.tree[v].key, \ + self.tree[curr_root].key) + + while not (u_left ^ v_left): + if u_left and v_left: + curr_root = self.tree[curr_root].left + else: + curr_root = self.tree[curr_root].right + + if curr_root == u or curr_root == v: + if curr_root is None: + return None + return self.tree[curr_root].key + u_left = self.comparator(self.tree[u].key, \ + self.tree[curr_root].key) + v_left = self.comparator(self.tree[v].key, \ + self.tree[curr_root].key) + + if curr_root is None: + return curr_root + return self.tree[curr_root].key + + def lowest_common_ancestor(self, j, k, algorithm=1): + + """ + Computes the lowest common ancestor of two nodes. + + Parameters + ========== + + j: Node.key + Key of first node + + k: Node.key + Key of second node + + algorithm: int + The algorithm to be used for computing the + lowest common ancestor. + Optional, by default uses algorithm 1. + + 1 -> Determines the lowest common ancestor by finding + the first intersection of the paths from v and w + to the root. + + 2 -> Modifed version of the algorithm given in the + following publication, + D. Harel. A linear time algorithm for the + lowest common ancestors problem. In 21s + Annual Symposium On Foundations of + Computer Science, pages 308-319, 1980. + + Returns + ======= + + Node.key + The key of the lowest common ancestor in the tree. + if both the nodes are present in the tree. + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Lowest_common_ancestor + + .. [2] https://pdfs.semanticscholar.org/e75b/386cc554214aa0ebd6bd6dbdd0e490da3739.pdf + + """ + return getattr(self, "_lca_"+str(algorithm))(j, k) + +class SelfBalancingBinaryTree(BinarySearchTree): + """ + Represents Base class for all rotation based balancing trees like AVL tree, Red Black tree, Splay Tree. + """ + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + if comp is None: + comp = lambda key1, key2: key1 < key2 + return _trees.SelfBalancingBinaryTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp + return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) + + def _right_rotate(self, j, k): + y = self.tree[k].right + if y is not None: + self.tree[y].parent = j + self.tree[j].left = y + self.tree[k].parent = self.tree[j].parent + if self.tree[k].parent is not None: + if self.tree[self.tree[k].parent].left == j: + self.tree[self.tree[k].parent].left = k + else: + self.tree[self.tree[k].parent].right = k + self.tree[j].parent = k + self.tree[k].right = j + kp = self.tree[k].parent + if kp is None: + self.root_idx = k + + def _left_right_rotate(self, j, k): + i = self.tree[k].right + v, w = self.tree[i].left, self.tree[i].right + self.tree[k].right, self.tree[j].left = v, w + if v is not None: + self.tree[v].parent = k + if w is not None: + self.tree[w].parent = j + self.tree[i].left, self.tree[i].right, self.tree[i].parent = \ + k, j, self.tree[j].parent + self.tree[k].parent, self.tree[j].parent = i, i + ip = self.tree[i].parent + if ip is not None: + if self.tree[ip].left == j: + self.tree[ip].left = i + else: + self.tree[ip].right = i + else: + self.root_idx = i + + def _right_left_rotate(self, j, k): + i = self.tree[k].left + v, w = self.tree[i].left, self.tree[i].right + self.tree[k].left, self.tree[j].right = w, v + if v is not None: + self.tree[v].parent = j + if w is not None: + self.tree[w].parent = k + self.tree[i].right, self.tree[i].left, self.tree[i].parent = \ + k, j, self.tree[j].parent + self.tree[k].parent, self.tree[j].parent = i, i + ip = self.tree[i].parent + if ip is not None: + if self.tree[ip].left == j: + self.tree[ip].left = i + else: + self.tree[ip].right = i + else: + self.root_idx = i + + def _left_rotate(self, j, k): + y = self.tree[k].left + if y is not None: + self.tree[y].parent = j + self.tree[j].right = y + self.tree[k].parent = self.tree[j].parent + if self.tree[k].parent is not None: + if self.tree[self.tree[k].parent].left == j: + self.tree[self.tree[k].parent].left = k + else: + self.tree[self.tree[k].parent].right = k + self.tree[j].parent = k + self.tree[k].left = j + kp = self.tree[k].parent + if kp is None: + self.root_idx = k + +class CartesianTree(SelfBalancingBinaryTree): + """ + Represents cartesian trees. + + Examples + ======== + + >>> from pydatastructs.trees import CartesianTree as CT + >>> c = CT() + >>> c.insert(1, 4, 1) + >>> c.insert(2, 3, 2) + >>> child = c.tree[c.root_idx].left + >>> c.tree[child].data + 1 + >>> c.search(1) + 0 + >>> c.search(-1) is None + True + >>> c.delete(1) is True + True + >>> c.search(1) is None + True + >>> c.delete(2) is True + True + >>> c.search(2) is None + True + + References + ========== + + .. [1] https://www.cs.princeton.edu/courses/archive/spr09/cos423/Lectures/geo-st.pdf + + See Also + ======== + + pydatastructs.trees.binary_trees.SelfBalancingBinaryTree + """ + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + if comp is None: + comp = lambda key1, key2: key1 < key2 + return _trees.CartesianTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp + return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) + + @classmethod + def methods(cls): + return ['__new__', '__str__', 'insert', 'delete'] + + def _bubble_up(self, node_idx): + node = self.tree[node_idx] + parent_idx = self.tree[node_idx].parent + parent = self.tree[parent_idx] + while (node.parent is not None) and (parent.priority > node.priority): + if parent.right == node_idx: + self._left_rotate(parent_idx, node_idx) + else: + self._right_rotate(parent_idx, node_idx) + node = self.tree[node_idx] + parent_idx = self.tree[node_idx].parent + if parent_idx is not None: + parent = self.tree[parent_idx] + if node.parent is None: + self.tree[node_idx].is_root = True + + def _trickle_down(self, node_idx): + node = self.tree[node_idx] + while node.left is not None or node.right is not None: + if node.left is None: + self._left_rotate(node_idx, self.tree[node_idx].right) + elif node.right is None: + self._right_rotate(node_idx, self.tree[node_idx].left) + elif self.tree[node.left].priority < self.tree[node.right].priority: + self._right_rotate(node_idx, self.tree[node_idx].left) + else: + self._left_rotate(node_idx, self.tree[node_idx].right) + node = self.tree[node_idx] + + def insert(self, key, priority, data=None): + super(CartesianTree, self).insert(key, data) + node_idx = super(CartesianTree, self).search(key) + node = self.tree[node_idx] + new_node = CartesianTreeNode(key, priority, data) + new_node.parent = node.parent + new_node.left = node.left + new_node.right = node.right + self.tree[node_idx] = new_node + if node.is_root: + self.tree[node_idx].is_root = True + else: + self._bubble_up(node_idx) + + def delete(self, key, **kwargs): + balancing_info = kwargs.get('balancing_info', False) + node_idx = super(CartesianTree, self).search(key) + if node_idx is not None: + self._trickle_down(node_idx) + return super(CartesianTree, self).delete(key, balancing_info = balancing_info) + + def __str__(self): + to_be_printed = ['' for i in range(self.tree._last_pos_filled + 1)] + for i in range(self.tree._last_pos_filled + 1): + if self.tree[i] is not None: + node = self.tree[i] + to_be_printed[i] = (node.left, node.key, node.priority, node.data, node.right) + return str(to_be_printed) + +class Treap(CartesianTree): + """ + Represents treaps. + + Examples + ======== + + >>> from pydatastructs.trees import Treap as T + >>> t = T() + >>> t.insert(1, 1) + >>> t.insert(2, 2) + >>> t.search(1) + 0 + >>> t.search(-1) is None + True + >>> t.delete(1) is True + True + >>> t.search(1) is None + True + >>> t.delete(2) is True + True + >>> t.search(2) is None + True + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Treap + + """ + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + if comp is None: + comp = lambda key1, key2: key1 < key2 + return _trees.Treap(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp + return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) + + @classmethod + def methods(cls): + return ['__new__', 'insert'] + + def insert(self, key, data=None): + priority = random.random() + super(Treap, self).insert(key, priority, data) + +class AVLTree(SelfBalancingBinaryTree): + """ + Represents AVL trees. + + References + ========== + + .. [1] https://courses.cs.washington.edu/courses/cse373/06sp/handouts/lecture12.pdf + .. [2] https://en.wikipedia.org/wiki/AVL_tree + .. [3] http://faculty.cs.niu.edu/~freedman/340/340notes/340avl2.htm + + See Also + ======== + + pydatastructs.trees.binary_trees.BinaryTree + """ + + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + if comp is None: + comp = lambda key1, key2: key1 < key2 + return _trees.AVLTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp + return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) + + @classmethod + def methods(cls): + return ['__new__', 'set_tree', 'insert', 'delete'] + + left_height = lambda self, node: self.tree[node.left].height \ + if node.left is not None else -1 + right_height = lambda self, node: self.tree[node.right].height \ + if node.right is not None else -1 + balance_factor = lambda self, node: self.right_height(node) - \ + self.left_height(node) + + def set_tree(self, arr): + self.tree = arr + + def _right_rotate(self, j, k): + super(AVLTree, self)._right_rotate(j, k) + self.tree[j].height = max(self.left_height(self.tree[j]), + self.right_height(self.tree[j])) + 1 + if self.is_order_statistic: + self.tree[j].size = (self.left_size(self.tree[j]) + + self.right_size(self.tree[j]) + 1) + + def _left_right_rotate(self, j, k): + super(AVLTree, self)._left_right_rotate(j, k) + self.tree[j].height = max(self.left_height(self.tree[j]), + self.right_height(self.tree[j])) + 1 + self.tree[k].height = max(self.left_height(self.tree[k]), + self.right_height(self.tree[k])) + 1 + if self.is_order_statistic: + self.tree[j].size = (self.left_size(self.tree[j]) + + self.right_size(self.tree[j]) + 1) + self.tree[k].size = (self.left_size(self.tree[k]) + + self.right_size(self.tree[k]) + 1) + + def _right_left_rotate(self, j, k): + super(AVLTree, self)._right_left_rotate(j, k) + self.tree[j].height = max(self.left_height(self.tree[j]), + self.right_height(self.tree[j])) + 1 + self.tree[k].height = max(self.left_height(self.tree[k]), + self.right_height(self.tree[k])) + 1 + if self.is_order_statistic: + self.tree[j].size = (self.left_size(self.tree[j]) + + self.right_size(self.tree[j]) + 1) + self.tree[k].size = (self.left_size(self.tree[k]) + + self.right_size(self.tree[k]) + 1) + + def _left_rotate(self, j, k): + super(AVLTree, self)._left_rotate(j, k) + self.tree[j].height = max(self.left_height(self.tree[j]), + self.right_height(self.tree[j])) + 1 + self.tree[k].height = max(self.left_height(self.tree[k]), + self.right_height(self.tree[k])) + 1 + if self.is_order_statistic: + self.tree[j].size = (self.left_size(self.tree[j]) + + self.right_size(self.tree[j]) + 1) + + def _balance_insertion(self, curr, last): + walk = last + path = Queue() + path.append(curr), path.append(last) + while walk is not None: + self.tree[walk].height = max(self.left_height(self.tree[walk]), + self.right_height(self.tree[walk])) + 1 + if self.is_order_statistic: + self.tree[walk].size = (self.left_size(self.tree[walk]) + + self.right_size(self.tree[walk]) + 1) + last = path.popleft() + last2last = path.popleft() + if self.balance_factor(self.tree[walk]) not in (1, 0, -1): + l = self.tree[walk].left + if l is not None and l == last and self.tree[l].left == last2last: + self._right_rotate(walk, last) + r = self.tree[walk].right + if r is not None and r == last and self.tree[r].right == last2last: + self._left_rotate(walk, last) + if l is not None and l == last and self.tree[l].right == last2last: + self._left_right_rotate(walk, last) + if r is not None and r == last and self.tree[r].left == last2last: + self._right_left_rotate(walk, last) + path.append(walk), path.append(last) + walk = self.tree[walk].parent + + def insert(self, key, data=None): + super(AVLTree, self).insert(key, data) + self._balance_insertion(self.size - 1, self.tree[self.size-1].parent) + + def _balance_deletion(self, start_idx, key): + walk = start_idx + while walk is not None: + self.tree[walk].height = max(self.left_height(self.tree[walk]), + self.right_height(self.tree[walk])) + 1 + if self.is_order_statistic: + self.tree[walk].size = (self.left_size(self.tree[walk]) + + self.right_size(self.tree[walk]) + 1) + if self.balance_factor(self.tree[walk]) not in (1, 0, -1): + if self.balance_factor(self.tree[walk]) < 0: + b = self.tree[walk].left + if self.balance_factor(self.tree[b]) <= 0: + self._right_rotate(walk, b) + else: + self._left_right_rotate(walk, b) + else: + b = self.tree[walk].right + if self.balance_factor(self.tree[b]) >= 0: + self._left_rotate(walk, b) + else: + self._right_left_rotate(walk, b) + walk = self.tree[walk].parent + + + def delete(self, key, **kwargs): + a = super(AVLTree, self).delete(key, balancing_info=True) + self._balance_deletion(a, key) + return True + +class SplayTree(SelfBalancingBinaryTree): + """ + Represents Splay Trees. + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Splay_tree + + """ + + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + if comp is None: + comp = lambda key1, key2: key1 < key2 + return _trees.SplayTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp + return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) + + @classmethod + def methods(cls): + return ['__new__', 'insert', 'delete', 'join', 'split'] + + def _zig(self, x, p): + if self.tree[p].left == x: + super(SplayTree, self)._right_rotate(p, x) + else: + super(SplayTree, self)._left_rotate(p, x) + + def _zig_zig(self, x, p): + super(SplayTree, self)._right_rotate(self.tree[p].parent, p) + super(SplayTree, self)._right_rotate(p, x) + + def _zig_zag(self, p): + super(SplayTree, self)._left_right_rotate(self.tree[p].parent, p) + + def _zag_zag(self, x, p): + super(SplayTree, self)._left_rotate(self.tree[p].parent, p) + super(SplayTree, self)._left_rotate(p, x) + + def _zag_zig(self, p): + super(SplayTree, self)._right_left_rotate(self.tree[p].parent, p) + + def splay(self, x, p): + while self.tree[x].parent is not None: + if self.tree[p].parent is None: + self._zig(x, p) + elif self.tree[p].left == x and \ + self.tree[self.tree[p].parent].left == p: + self._zig_zig(x, p) + elif self.tree[p].right == x and \ + self.tree[self.tree[p].parent].right == p: + self._zag_zag(x, p) + elif self.tree[p].left == x and \ + self.tree[self.tree[p].parent].right == p: + self._zag_zig(p) + else: + self._zig_zag(p) + p = self.tree[x].parent + + def insert(self, key, x): + super(SelfBalancingBinaryTree, self).insert(key, x) + e, p = super(SelfBalancingBinaryTree, self).search(key, parent=True) + self.tree[self.size-1].parent = p + self.splay(e, p) + + def delete(self, x): + e, p = super(SelfBalancingBinaryTree, self).search(x, parent=True) + if e is None: + return + self.splay(e, p) + status = super(SelfBalancingBinaryTree, self).delete(x) + return status + + def join(self, other): + """ + Joins two trees current and other such that all elements of + the current splay tree are smaller than the elements of the other tree. + + Parameters + ========== + + other: SplayTree + SplayTree which needs to be joined with the self tree. + + """ + maxm = self.root_idx + while self.tree[maxm].right is not None: + maxm = self.tree[maxm].right + minm = other.root_idx + while other.tree[minm].left is not None: + minm = other.tree[minm].left + if not self.comparator(self.tree[maxm].key, + other.tree[minm].key): + raise ValueError("Elements of %s aren't less " + "than that of %s"%(self, other)) + self.splay(maxm, self.tree[maxm].parent) + idx_update = self.tree._size + for node in other.tree: + if node is not None: + node_copy = TreeNode(node.key, node.data) + if node.left is not None: + node_copy.left = node.left + idx_update + if node.right is not None: + node_copy.right = node.right + idx_update + self.tree.append(node_copy) + else: + self.tree.append(node) + self.tree[self.root_idx].right = \ + other.root_idx + idx_update + + def split(self, x): + """ + Splits current splay tree into two trees such that one tree contains nodes + with key less than or equal to x and the other tree containing + nodes with key greater than x. + + Parameters + ========== + + x: key + Key of the element on the basis of which split is performed. + + Returns + ======= + + other: SplayTree + SplayTree containing elements with key greater than x. + + """ + e, p = super(SelfBalancingBinaryTree, self).search(x, parent=True) + if e is None: + return + self.splay(e, p) + other = SplayTree(None, None) + if self.tree[self.root_idx].right is not None: + traverse = BinaryTreeTraversal(self) + elements = traverse.depth_first_search(order='pre_order', node=self.tree[self.root_idx].right) + for i in range(len(elements)): + super(SelfBalancingBinaryTree, other).insert(elements[i].key, elements[i].data) + for j in range(len(elements) - 1, -1, -1): + e, p = super(SelfBalancingBinaryTree, self).search(elements[j].key, parent=True) + self.tree[e] = None + self.tree[self.root_idx].right = None + return other + +class RedBlackTree(SelfBalancingBinaryTree): + """ + Represents Red Black trees. + + Examples + ======== + + >>> from pydatastructs.trees import RedBlackTree as RB + >>> b = RB() + >>> b.insert(1, 1) + >>> b.insert(2, 2) + >>> child = b.tree[b.root_idx].right + >>> b.tree[child].data + 2 + >>> b.search(1) + 0 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Red%E2%80%93black_tree + + See Also + ======== + + pydatastructs.trees.binary_trees.SelfBalancingBinaryTree + """ + + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + if comp is None: + comp = lambda key1, key2: key1 < key2 + return _trees.RedBlackTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp + return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) + + @classmethod + def methods(cls): + return ['__new__', 'insert', 'delete'] + + def _get_parent(self, node_idx): + return self.tree[node_idx].parent + + def _get_grand_parent(self, node_idx): + parent_idx=self._get_parent(node_idx) + return self.tree[parent_idx].parent + + def _get_sibling(self, node_idx): + parent_idx=self._get_parent(node_idx) + if parent_idx is None: + return None + node = self.tree[parent_idx] + if node_idx==node.left: + sibling_idx=node.right + return sibling_idx + else: + sibling_idx=node.left + return sibling_idx + + def _get_uncle(self, node_idx): + parent_idx=self._get_parent(node_idx) + return self._get_sibling(parent_idx) + + def _is_onleft(self, node_idx): + parent = self._get_parent(node_idx) + if self.tree[parent].left == node_idx: + return True + return False + + def _is_onright(self, node_idx): + if self._is_onleft(node_idx) is False: + return True + return False + + def __fix_insert(self, node_idx): + while self._get_parent(node_idx) is not None and \ + self.tree[self._get_parent(node_idx)].color == 1 and self.tree[node_idx].color==1: + parent_idx=self._get_parent(node_idx) + grand_parent_idx=self._get_grand_parent(node_idx) + uncle_idx = self._get_uncle(node_idx) + if uncle_idx is not None and self.tree[uncle_idx].color == 1: + self.tree[uncle_idx].color = 0 + self.tree[parent_idx].color = 0 + self.tree[grand_parent_idx].color = 1 + node_idx= grand_parent_idx + else: + self.tree[self.root_idx].is_root=False + if self._is_onright(parent_idx): + if self._is_onleft(node_idx): + self._right_rotate(parent_idx, node_idx) + node_idx=parent_idx + parent_idx=self._get_parent(node_idx) + node_idx=parent_idx + parent_idx=self._get_parent(node_idx) + self._left_rotate(parent_idx, node_idx) + elif self._is_onleft(parent_idx): + if self._is_onright(node_idx): + self._left_rotate(parent_idx, node_idx) + node_idx=parent_idx + parent_idx=self._get_parent(node_idx) + node_idx=parent_idx + parent_idx=self._get_parent(node_idx) + self._right_rotate(parent_idx, node_idx) + self.tree[node_idx].color = 0 + self.tree[parent_idx].color = 1 + self.tree[self.root_idx].is_root=True + if self.tree[node_idx].is_root: + break + self.tree[self.root_idx].color=0 + + def insert(self, key, data=None): + super(RedBlackTree, self).insert(key, data) + node_idx = super(RedBlackTree, self).search(key) + node = self.tree[node_idx] + new_node = RedBlackTreeNode(key, data) + new_node.parent = node.parent + new_node.left = node.left + new_node.right = node.right + self.tree[node_idx] = new_node + if node.is_root: + self.tree[node_idx].is_root = True + self.tree[node_idx].color=0 + elif self.tree[self.tree[node_idx].parent].color==1: + self.__fix_insert(node_idx) + + def _find_predecessor(self, node_idx): + while self.tree[node_idx].right is not None: + node_idx = self.tree[node_idx].right + return node_idx + + def _transplant_values(self, node_idx1, node_idx2): + parent = self.tree[node_idx1].parent + if self.tree[node_idx1].is_root and self._has_one_child(node_idx1): + self.tree[self.root_idx].key = self.tree[node_idx2].key + self.tree[self.root_idx].data = self.tree[node_idx2].data + self.tree[self.root_idx].left = self.tree[node_idx2].left + self.tree[self.root_idx].right = self.tree[node_idx2].right + self.tree[node_idx1].parent = None + return self.tree[self.root_idx].key + else: + self.tree[node_idx1].key = self.tree[node_idx2].key + self.tree[node_idx1].data = self.tree[node_idx2].data + + def _has_one_child(self, node_idx): + if self._is_leaf(node_idx) is False and self._has_two_child(node_idx) is False: + return True + return False + + def _is_leaf(self, node_idx): + if self.tree[node_idx].left is None and self.tree[node_idx].right is None: + return True + return False + + def _has_two_child(self, node_idx): + if self.tree[node_idx].left is not None and self.tree[node_idx].right is not None: + return True + return False + + def __has_red_child(self, node_idx): + left_idx = self.tree[node_idx].left + right_idx = self.tree[node_idx].right + if (left_idx is not None and self.tree[left_idx].color == 1) or \ + (right_idx is not None and self.tree[right_idx].color == 1): + return True + return False + + def _replace_node(self, node_idx): + if self._is_leaf(node_idx): + return None + elif self._has_one_child(node_idx): + if self.tree[node_idx].left is not None: + child = self.tree[node_idx].left + else: + child = self.tree[node_idx].right + return child + else: + return self._find_predecessor(self.tree[node_idx].left) + + def __walk1_walk_isblack(self, color, node_idx1): + if (node_idx1 is None or self.tree[node_idx1].color == 0) and (color == 0): + return True + return False + + def __left_left_siblingcase(self, node_idx): + left_idx = self.tree[node_idx].left + parent = self._get_parent(node_idx) + parent_color = self.tree[parent].color + self.tree[left_idx].color = self.tree[node_idx].color + self.tree[node_idx].color = parent_color + self._right_rotate(parent, node_idx) + + def __right_left_siblingcase(self, node_idx): + left_idx = self.tree[node_idx].left + parent = self._get_parent(node_idx) + parent_color = self.tree[parent].color + self.tree[left_idx].color = parent_color + self._right_rotate(node_idx, left_idx) + child = self._get_parent(node_idx) + self._left_rotate(parent, child) + + def __left_right_siblingcase(self, node_idx): + right_idx = self.tree[node_idx].right + parent = self._get_parent(node_idx) + parent_color = self.tree[parent].color + self.tree[right_idx].color = parent_color + self._left_rotate(node_idx, right_idx) + child = self._get_parent(node_idx) + self._right_rotate(parent, child) + + def __right_right_siblingcase(self, node_idx): + right_idx = self.tree[node_idx].right + parent = self._get_parent(node_idx) + parent_color = self.tree[parent].color + self.tree[right_idx].color = self.tree[node_idx].color + self.tree[node_idx].color = parent_color + self._left_rotate(parent, node_idx) + + def __fix_deletion(self, node_idx): + node = self.tree[node_idx] + color = node.color + while node_idx!= self.root_idx and color == 0: + sibling_idx = self._get_sibling(node_idx) + parent_idx = self._get_parent(node_idx) + if sibling_idx is None: + node_idx = parent_idx + continue + else: + if self.tree[sibling_idx].color == 1: + self.tree[self.root_idx].is_root = False + self.tree[parent_idx].color = 1 + self.tree[sibling_idx].color = 0 + if self._is_onleft(sibling_idx): + self._right_rotate(parent_idx, sibling_idx) + else: + self._left_rotate(parent_idx, sibling_idx) + self.tree[self.root_idx].is_root = True + continue + else: + if self.__has_red_child(sibling_idx): + self.tree[self.root_idx].is_root = False + left_idx = self.tree[sibling_idx].left + if self.tree[sibling_idx].left is not None and \ + self.tree[left_idx].color == 1: + if self._is_onleft(sibling_idx): + self.__left_left_siblingcase(sibling_idx) + else: + self.__right_left_siblingcase(sibling_idx) + else: + if self._is_onleft(sibling_idx): + self.__left_right_siblingcase(sibling_idx) + else: + self.__right_right_siblingcase(sibling_idx) + self.tree[self.root_idx].is_root = True + self.tree[parent_idx].color = 0 + else: + self.tree[sibling_idx].color = 1 + if self.tree[parent_idx].color == 0: + node_idx = parent_idx + continue + else: + self.tree[parent_idx].color = 0 + color = 1 + + def _remove_node(self, node_idx): + parent = self._get_parent(node_idx) + a = parent + if self._is_leaf(node_idx): + par_key, root_key = (self.tree[parent].key, self.tree[self.root_idx].key) + new_indices = self.tree.delete(node_idx) + if new_indices is not None: + a = new_indices[par_key] + self.root_idx = new_indices[root_key] + elif self._has_one_child(node_idx): + child = self._replace_node(node_idx) + parent = self._get_parent(node_idx) + par_key, root_key = (self.tree[parent].key, self.tree[self.root_idx].key) + new_indices = self.tree.delete(node_idx) + self._update_size(a) + + def _delete_root(self, node_idx, node_idx1): + if self._is_leaf(node_idx): + self.tree[self.root_idx].data = None + self.tree[self.root_idx].key = None + elif self._has_one_child(node_idx): + root_key = self._transplant_values(node_idx, node_idx1) + new_indices = self.tree.delete(node_idx1) + if new_indices is not None: + self.root_idx = new_indices[root_key] + + def __leaf_case(self, node_idx, node_idx1): + walk = node_idx + walk1 = node_idx1 + parent = self._get_parent(node_idx) + color = self.tree[walk].color + if parent is None: + self._delete_root(walk, walk1) + else: + if self.__walk1_walk_isblack(color, walk1): + self.__fix_deletion(walk) + else: + sibling_idx = self._get_sibling(walk) + if sibling_idx is not None: + self.tree[sibling_idx].color = 1 + if self._is_onleft(walk): + self.tree[parent].left = None + else: + self.tree[parent].right = None + self._remove_node(walk) + + def __one_child_case(self, node_idx, node_idx1): + walk = node_idx + walk1 = node_idx1 + walk_original_color = self.tree[walk].color + parent = self._get_parent(node_idx) + if parent is None: + self._delete_root(walk, walk1) + else: + if self._is_onleft(walk): + self.tree[parent].left = walk1 + else: + self.tree[parent].right = walk1 + self.tree[walk1].parent = parent + a = self._remove_node(walk) + if self.__walk1_walk_isblack(walk_original_color, walk1): + self.__fix_deletion(walk1) + else: + self.tree[walk1].color = 0 + + def __two_child_case(self, node_idx): + walk = node_idx + successor = self._replace_node(walk) + self._transplant_values(walk, successor) + walk = successor + walk1 = self._replace_node(walk) + return walk, walk1 + + def delete(self, key, **kwargs): + walk = super(RedBlackTree, self).search(key) + if walk is not None: + walk1 = self._replace_node(walk) + if self._has_two_child(walk): + walk, walk1 = self.__two_child_case(walk) + if self._is_leaf(walk): + self.__leaf_case(walk, walk1) + elif self._has_one_child(walk): + self.__one_child_case(walk, walk1) + return True + else: + return None + +class BinaryTreeTraversal(object): + """ + Represents the traversals possible in + a binary tree. + + Parameters + ========== + + tree: BinaryTree + The binary tree for whose traversal + is to be done. + backend: pydatastructs.Backend + The backend to be used. Available backends: Python and C++ + Optional, by default, the Python backend is used. For faster execution, use the C++ backend. + + Traversals + ========== + + - Depth First Search + In Order, Post Order, Pre Order Out Order + + - Breadth First Search + + Examples + ======== + + >>> from pydatastructs import BinarySearchTree as BST + >>> from pydatastructs import BinaryTreeTraversal as BTT + >>> b = BST(2, 2) + >>> b.insert(1, 1) + >>> b.insert(3, 3) + >>> trav = BTT(b) + >>> dfs = trav.depth_first_search() + >>> [str(n) for n in dfs] + ['(None, 1, 1, None)', '(1, 2, 2, 2)', '(None, 3, 3, None)'] + >>> bfs = trav.breadth_first_search() + >>> [str(n) for n in bfs] + ['(1, 2, 2, 2)', '(None, 1, 1, None)', '(None, 3, 3, None)'] + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Tree_traversal + """ + + @classmethod + def methods(cls): + return ['__new__', 'depth_first_search', + 'breadth_first_search'] + + __slots__ = ['tree'] + + def __new__(cls, tree, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + return _trees.BinaryTreeTraversal(tree, **kwargs) + if not isinstance(tree, BinaryTree): + raise TypeError("%s is not a binary tree"%(tree)) + obj = object.__new__(cls) + obj.tree = tree + return obj + + def _pre_order(self, node): + """ + Utility method for computing pre-order + of a binary tree using iterative algorithm. + """ + visit = [] + tree, size = self.tree.tree, self.tree.size + s = Stack() + s.push(node) + while not s.is_empty: + node = s.pop() + visit.append(tree[node]) + if tree[node].right is not None: + s.push(tree[node].right) + if tree[node].left is not None: + s.push(tree[node].left) + return visit + + def _in_order(self, node): + """ + Utility method for computing in-order + of a binary tree using iterative algorithm. + """ + visit = [] + tree, size = self.tree.tree, self.tree.size + s = Stack() + while not s.is_empty or node is not None: + if node is not None: + s.push(node) + node = tree[node].left + else: + node = s.pop() + visit.append(tree[node]) + node = tree[node].right + return visit + + def _post_order(self, node): + """ + Utility method for computing post-order + of a binary tree using iterative algorithm. + """ + visit = [] + tree, size = self.tree.tree, self.tree.size + s = Stack() + s.push(node) + last = OneDimensionalArray(int, size) + last.fill(False) + while not s.is_empty: + node = s.peek + l, r = tree[node].left, tree[node].right + cl, cr = l is None or last[l], r is None or last[r] + if cl and cr: + s.pop() + visit.append(tree[node]) + last[node] = True + continue + if not cr: + s.push(r) + if not cl: + s.push(l) + return visit + + def _out_order(self, node): + """ + Utility method for computing out-order + of a binary tree using iterative algorithm. + """ + return reversed(self._in_order(node)) + + def depth_first_search(self, order='in_order', node=None): + """ + Computes the depth first search traversal of the binary + trees. + + Parameters + ========== + + order : str + One of the strings, 'in_order', 'post_order', + 'pre_order', 'out_order'. + By default, it is set to, 'in_order'. + node : int + The index of the node from where the traversal + is to be instantiated. + + Returns + ======= + + list + Each element is of type 'TreeNode'. + """ + if node is None: + node = self.tree.root_idx + if order not in ('in_order', 'post_order', 'pre_order', 'out_order'): + raise NotImplementedError( + "%s order is not implemented yet." + "We only support `in_order`, `post_order`, " + "`pre_order` and `out_order` traversals.") + return getattr(self, '_' + order)(node) + + def breadth_first_search(self, node=None, strategy='queue'): + """ + Computes the breadth first search traversal of a binary tree. + + Parameters + ========== + + node : int + The index of the node from where the traversal has to be instantiated. + By default, set to, root index. + + strategy : str + The strategy using which the computation has to happen. + By default, it is set 'queue'. + + Returns + ======= + + list + Each element of the list is of type `TreeNode`. + """ + # TODO: IMPLEMENT ITERATIVE DEEPENING-DEPTH FIRST SEARCH STRATEGY + strategies = ('queue',) + if strategy not in strategies: + raise NotImplementedError( + "%s startegy is not implemented yet"%(strategy)) + if node is None: + node = self.tree.root_idx + q, visit, tree = Queue(), [], self.tree.tree + q.append(node) + while len(q) > 0: + node = q.popleft() + visit.append(tree[node]) + if tree[node].left is not None: + q.append(tree[node].left) + if tree[node].right is not None: + q.append(tree[node].right) + return visit + +class BinaryIndexedTree(object): + """ + Represents binary indexed trees + a.k.a fenwick trees. + + Parameters + ========== + + array: list/tuple + The array whose elements are to be + considered for the queries. + backend: pydatastructs.Backend + The backend to be used. Available backends: Python and C++ + Optional, by default, the Python backend is used. For faster execution, use the C++ backend. + + Examples + ======== + + >>> from pydatastructs import BinaryIndexedTree + >>> bit = BinaryIndexedTree([1, 2, 3]) + >>> bit.get_sum(0, 2) + 6 + >>> bit.update(0, 100) + >>> bit.get_sum(0, 2) + 105 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Fenwick_tree + """ + + __slots__ = ['tree', 'array', 'flag'] + + def __new__(cls, array, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + return _trees.BinaryIndexedTree(type(array[0]), array, **kwargs) + obj = object.__new__(cls) + obj.array = OneDimensionalArray(type(array[0]), array) + obj.tree = [0] * (obj.array._size + 2) + obj.flag = [0] * (obj.array._size) + for index in range(obj.array._size): + obj.update(index, array[index]) + return obj + + @classmethod + def methods(cls): + return ['update', 'get_prefix_sum', + 'get_sum'] + + def update(self, index, value): + """ + Updates value at the given index. + + Parameters + ========== + + index: int + Index of element to be updated. + + value + The value to be inserted. + """ + _index, _value = index, value + if self.flag[index] == 0: + self.flag[index] = 1 + index += 1 + while index < self.array._size + 1: + self.tree[index] += value + index = index + (index & (-index)) + else: + value = value - self.array[index] + index += 1 + while index < self.array._size + 1: + self.tree[index] += value + index = index + (index & (-index)) + self.array[_index] = _value + + def get_prefix_sum(self, index): + """ + Computes sum of elements from index 0 to given index. + + Parameters + ========== + + index: int + Index till which sum has to be calculated. + + Returns + ======= + + sum: int + The required sum. + """ + index += 1 + sum = 0 + while index > 0: + sum += self.tree[index] + index = index - (index & (-index)) + return sum + + def get_sum(self, left_index, right_index): + """ + Get sum of elements from left index to right index. + + Parameters + ========== + + left_index: int + Starting index from where sum has to be computed. + + right_index: int + Ending index till where sum has to be computed. + + Returns + ======= + + sum: int + The required sum + """ + if left_index >= 1: + return self.get_prefix_sum(right_index) - \ + self.get_prefix_sum(left_index - 1) + else: + return self.get_prefix_sum(right_index) diff --git a/lib/python3.12/site-packages/pydatastructs/trees/heaps.py b/lib/python3.12/site-packages/pydatastructs/trees/heaps.py new file mode 100644 index 000000000..12133a6f1 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/trees/heaps.py @@ -0,0 +1,582 @@ +from pydatastructs.utils.misc_util import ( + _check_type, TreeNode, BinomialTreeNode, + Backend, raise_if_backend_is_not_python) +from pydatastructs.linear_data_structures.arrays import ( + DynamicOneDimensionalArray, Array) +from pydatastructs.miscellaneous_data_structures.binomial_trees import BinomialTree + +__all__ = [ + 'BinaryHeap', + 'TernaryHeap', + 'DHeap', + 'BinomialHeap' +] + +class Heap(object): + """ + Abstract class for representing heaps. + """ + pass + + +class DHeap(Heap): + """ + Represents D-ary Heap. + + Parameters + ========== + + elements: list, tuple, Array + Optional, by default 'None'. + list/tuple/Array of initial TreeNode in Heap. + heap_property: str + If the key stored in each node is + either greater than or equal to + the keys in the node's children + then pass 'max'. + If the key stored in each node is + either less than or equal to + the keys in the node's children + then pass 'min'. + By default, the heap property is + set to 'min'. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs.trees.heaps import DHeap + >>> min_heap = DHeap(heap_property="min", d=3) + >>> min_heap.insert(1, 1) + >>> min_heap.insert(5, 5) + >>> min_heap.insert(7, 7) + >>> min_heap.extract().key + 1 + >>> min_heap.insert(4, 4) + >>> min_heap.extract().key + 4 + + >>> max_heap = DHeap(heap_property='max', d=2) + >>> max_heap.insert(1, 1) + >>> max_heap.insert(5, 5) + >>> max_heap.insert(7, 7) + >>> max_heap.extract().key + 7 + >>> max_heap.insert(6, 6) + >>> max_heap.extract().key + 6 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/D-ary_heap + """ + __slots__ = ['_comp', 'heap', 'd', 'heap_property', '_last_pos_filled'] + + def __new__(cls, elements=None, heap_property="min", d=4, + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = Heap.__new__(cls) + obj.heap_property = heap_property + obj.d = d + if heap_property == "min": + obj._comp = lambda key_parent, key_child: key_parent <= key_child + elif heap_property == "max": + obj._comp = lambda key_parent, key_child: key_parent >= key_child + else: + raise ValueError("%s is invalid heap property"%(heap_property)) + if elements is None: + elements = DynamicOneDimensionalArray(TreeNode, 0) + elif _check_type(elements, (list,tuple)): + elements = DynamicOneDimensionalArray(TreeNode, len(elements), elements) + elif _check_type(elements, Array): + elements = DynamicOneDimensionalArray(TreeNode, len(elements), elements._data) + else: + raise ValueError(f'Expected a list/tuple/Array of TreeNode got {type(elements)}') + obj.heap = elements + obj._last_pos_filled = obj.heap._last_pos_filled + obj._build() + return obj + + @classmethod + def methods(cls): + return ['__new__', 'insert', 'extract', '__str__', 'is_empty'] + + def _build(self): + for i in range(self._last_pos_filled + 1): + self.heap[i]._leftmost, self.heap[i]._rightmost = \ + self.d*i + 1, self.d*i + self.d + for i in range((self._last_pos_filled + 1)//self.d, -1, -1): + self._heapify(i) + + def _swap(self, idx1, idx2): + idx1_key, idx1_data = \ + self.heap[idx1].key, self.heap[idx1].data + self.heap[idx1].key, self.heap[idx1].data = \ + self.heap[idx2].key, self.heap[idx2].data + self.heap[idx2].key, self.heap[idx2].data = \ + idx1_key, idx1_data + + def _heapify(self, i): + while True: + target = i + l = self.d*i + 1 + r = self.d*i + self.d + + for j in range(l, r+1): + if j <= self._last_pos_filled: + target = j if self._comp(self.heap[j].key, self.heap[target].key) \ + else target + else: + break + + if target != i: + self._swap(target, i) + i = target + else: + break + + def insert(self, key, data=None): + """ + Insert a new element to the heap according to heap property. + + Parameters + ========== + + key + The key for comparison. + data + The data to be inserted. + + Returns + ======= + + None + """ + new_node = TreeNode(key, data) + self.heap.append(new_node) + self._last_pos_filled += 1 + i = self._last_pos_filled + self.heap[i]._leftmost, self.heap[i]._rightmost = self.d*i + 1, self.d*i + self.d + + while True: + parent = (i - 1)//self.d + if i == 0 or self._comp(self.heap[parent].key, self.heap[i].key): + break + else: + self._swap(i, parent) + i = parent + + def extract(self): + """ + Extract root element of the Heap. + + Returns + ======= + + root_element: TreeNode + The TreeNode at the root of the heap, + if the heap is not empty. + + None + If the heap is empty. + """ + if self._last_pos_filled == -1: + raise IndexError("Heap is empty.") + else: + element_to_be_extracted = TreeNode(self.heap[0].key, self.heap[0].data) + self._swap(0, self._last_pos_filled) + self.heap.delete(self._last_pos_filled) + self._last_pos_filled -= 1 + self._heapify(0) + return element_to_be_extracted + + def __str__(self): + to_be_printed = ['' for i in range(self._last_pos_filled + 1)] + for i in range(self._last_pos_filled + 1): + node = self.heap[i] + if node._leftmost <= self._last_pos_filled: + if node._rightmost <= self._last_pos_filled: + children = list(range(node._leftmost, node._rightmost + 1)) + else: + children = list(range(node._leftmost, self._last_pos_filled + 1)) + else: + children = [] + to_be_printed[i] = (node.key, node.data, children) + return str(to_be_printed) + + @property + def is_empty(self): + """ + Checks if the heap is empty. + """ + return self.heap._last_pos_filled == -1 + + +class BinaryHeap(DHeap): + """ + Represents Binary Heap. + + Parameters + ========== + + elements: list, tuple + Optional, by default 'None'. + List/tuple of initial elements in Heap. + heap_property: str + If the key stored in each node is + either greater than or equal to + the keys in the node's children + then pass 'max'. + If the key stored in each node is + either less than or equal to + the keys in the node's children + then pass 'min'. + By default, the heap property is + set to 'min'. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs.trees.heaps import BinaryHeap + >>> min_heap = BinaryHeap(heap_property="min") + >>> min_heap.insert(1, 1) + >>> min_heap.insert(5, 5) + >>> min_heap.insert(7, 7) + >>> min_heap.extract().key + 1 + >>> min_heap.insert(4, 4) + >>> min_heap.extract().key + 4 + + >>> max_heap = BinaryHeap(heap_property='max') + >>> max_heap.insert(1, 1) + >>> max_heap.insert(5, 5) + >>> max_heap.insert(7, 7) + >>> max_heap.extract().key + 7 + >>> max_heap.insert(6, 6) + >>> max_heap.extract().key + 6 + + References + ========== + + .. [1] https://en.m.wikipedia.org/wiki/Binary_heap + """ + def __new__(cls, elements=None, heap_property="min", + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = DHeap.__new__(cls, elements, heap_property, 2) + return obj + + @classmethod + def methods(cls): + return ['__new__'] + + +class TernaryHeap(DHeap): + """ + Represents Ternary Heap. + + Parameters + ========== + + elements: list, tuple + Optional, by default 'None'. + List/tuple of initial elements in Heap. + heap_property: str + If the key stored in each node is + either greater than or equal to + the keys in the node's children + then pass 'max'. + If the key stored in each node is + either less than or equal to + the keys in the node's children + then pass 'min'. + By default, the heap property is + set to 'min'. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs.trees.heaps import TernaryHeap + >>> min_heap = TernaryHeap(heap_property="min") + >>> min_heap.insert(1, 1) + >>> min_heap.insert(5, 5) + >>> min_heap.insert(7, 7) + >>> min_heap.insert(3, 3) + >>> min_heap.extract().key + 1 + >>> min_heap.insert(4, 4) + >>> min_heap.extract().key + 3 + + >>> max_heap = TernaryHeap(heap_property='max') + >>> max_heap.insert(1, 1) + >>> max_heap.insert(5, 5) + >>> max_heap.insert(7, 7) + >>> min_heap.insert(3, 3) + >>> max_heap.extract().key + 7 + >>> max_heap.insert(6, 6) + >>> max_heap.extract().key + 6 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/D-ary_heap + .. [2] https://ece.uwaterloo.ca/~dwharder/aads/Algorithms/d-ary_heaps/Ternary_heaps/ + """ + def __new__(cls, elements=None, heap_property="min", + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = DHeap.__new__(cls, elements, heap_property, 3) + return obj + + @classmethod + def methods(cls): + return ['__new__'] + + +class BinomialHeap(Heap): + """ + Represents binomial heap. + + Parameters + ========== + + root_list: list/tuple/Array + By default, [] + The list of BinomialTree object references + in sorted order. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import BinomialHeap + >>> b = BinomialHeap() + >>> b.insert(1, 1) + >>> b.insert(2, 2) + >>> b.find_minimum().key + 1 + >>> b.find_minimum().children[0].key + 2 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Binomial_heap + """ + __slots__ = ['root_list'] + + def __new__(cls, root_list=None, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + if root_list is None: + root_list = [] + if not all((_check_type(root, BinomialTree)) + for root in root_list): + raise TypeError("The root_list should contain " + "references to objects of BinomialTree.") + obj = Heap.__new__(cls) + obj.root_list = root_list + return obj + + @classmethod + def methods(cls): + return ['__new__', 'merge_tree', 'merge', 'insert', + 'find_minimum', 'is_emtpy', 'decrease_key', 'delete', + 'delete_minimum'] + + def merge_tree(self, tree1, tree2): + """ + Merges two BinomialTree objects. + + Parameters + ========== + + tree1: BinomialTree + + tree2: BinomialTree + """ + if (not _check_type(tree1, BinomialTree)) or \ + (not _check_type(tree2, BinomialTree)): + raise TypeError("Both the trees should be of type " + "BinomalTree.") + ret_value = None + if tree1.root.key <= tree2.root.key: + tree1.add_sub_tree(tree2) + ret_value = tree1 + else: + tree2.add_sub_tree(tree1) + ret_value = tree2 + return ret_value + + def _merge_heap_last_new_tree(self, new_root_list, new_tree): + """ + Merges last tree node in root list with the incoming tree. + """ + pos = -1 + if len(new_root_list) > 0 and new_root_list[pos].order == new_tree.order: + new_root_list[pos] = self.merge_tree(new_root_list[pos], new_tree) + else: + new_root_list.append(new_tree) + + def merge(self, other_heap): + """ + Merges current binomial heap with the given binomial heap. + + Parameters + ========== + + other_heap: BinomialHeap + """ + if not _check_type(other_heap, BinomialHeap): + raise TypeError("Other heap is not of type BinomialHeap.") + new_root_list = [] + i, j = 0, 0 + while (i < len(self.root_list)) and \ + (j < len(other_heap.root_list)): + new_tree = None + while self.root_list[i] is None: + i += 1 + while other_heap.root_list[j] is None: + j += 1 + if self.root_list[i].order == other_heap.root_list[j].order: + new_tree = self.merge_tree(self.root_list[i], + other_heap.root_list[j]) + i += 1 + j += 1 + else: + if self.root_list[i].order < other_heap.root_list[j].order: + new_tree = self.root_list[i] + i += 1 + else: + new_tree = other_heap.root_list[j] + j += 1 + self._merge_heap_last_new_tree(new_root_list, new_tree) + + while i < len(self.root_list): + new_tree = self.root_list[i] + self._merge_heap_last_new_tree(new_root_list, new_tree) + i += 1 + while j < len(other_heap.root_list): + new_tree = other_heap.root_list[j] + self._merge_heap_last_new_tree(new_root_list, new_tree) + j += 1 + self.root_list = new_root_list + + def insert(self, key, data=None): + """ + Inserts new node with the given key and data. + + key + The key of the node which can be operated + upon by relational operators. + + data + The data to be stored in the new node. + """ + new_node = BinomialTreeNode(key, data) + new_tree = BinomialTree(root=new_node, order=0) + new_heap = BinomialHeap(root_list=[new_tree]) + self.merge(new_heap) + + def find_minimum(self, **kwargs): + """ + Finds the node with the minimum key. + + Returns + ======= + + min_node: BinomialTreeNode + """ + if self.is_empty: + raise IndexError("Binomial heap is empty.") + min_node = None + idx, min_idx = 0, None + for tree in self.root_list: + if ((min_node is None) or + (tree is not None and tree.root is not None and + min_node.key > tree.root.key)): + min_node = tree.root + min_idx = idx + idx += 1 + if kwargs.get('get_index', None) is not None: + return min_node, min_idx + return min_node + + def delete_minimum(self): + """ + Deletes the node with minimum key. + """ + min_node, min_idx = self.find_minimum(get_index=True) + child_root_list = [] + for k, child in enumerate(min_node.children): + if child is not None: + child_root_list.append(BinomialTree(root=child, order=k)) + self.root_list.remove(self.root_list[min_idx]) + child_heap = BinomialHeap(root_list=child_root_list) + self.merge(child_heap) + + @property + def is_empty(self): + return not self.root_list + + def decrease_key(self, node, new_key): + """ + Decreases the key of the given node. + + Parameters + ========== + + node: BinomialTreeNode + The node whose key is to be reduced. + new_key + The new key of the given node, + should be less than the current key. + """ + if node.key <= new_key: + raise ValueError("The new key " + "should be less than current node's key.") + node.key = new_key + while ((not node.is_root) and + (node.parent.key > node.key)): + node.parent.key, node.key = \ + node.key, node.parent.key + node.parent.data, node.data = \ + node.data, node.parent.data + node = node.parent + + def delete(self, node): + """ + Deletes the given node. + + Parameters + ========== + + node: BinomialTreeNode + The node which is to be deleted. + """ + self.decrease_key(node, self.find_minimum().key - 1) + self.delete_minimum() diff --git a/lib/python3.12/site-packages/pydatastructs/trees/m_ary_trees.py b/lib/python3.12/site-packages/pydatastructs/trees/m_ary_trees.py new file mode 100644 index 000000000..a06fda9ee --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/trees/m_ary_trees.py @@ -0,0 +1,172 @@ +from pydatastructs.utils import MAryTreeNode +from pydatastructs.linear_data_structures.arrays import ArrayForTrees +from pydatastructs.utils.misc_util import ( + Backend, raise_if_backend_is_not_python) + +__all__ = [ + 'MAryTree' +] + +class MAryTree(object): + """ + Abstract m-ary tree. + + Parameters + ========== + + key + Required if tree is to be instantiated with + root otherwise not needed. + root_data + Optional, the root node of the binary tree. + If not of type MAryTreeNode, it will consider + root as data and a new root node will + be created. + comp: lambda + Optional, A lambda function which will be used + for comparison of keys. Should return a + bool value. By default it implements less + than operator. + is_order_statistic: bool + Set it to True, if you want to use the + order statistic features of the tree. + max_children + Optional, specifies the maximum number of children + a node can have. Defaults to 2 in case nothing is + specified. + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/M-ary_tree + """ + + __slots__ = ['root_idx', 'max_children', 'comparator', 'tree', 'size', + 'is_order_statistic'] + + + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, max_children=2, + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + if key is None and root_data is not None: + raise ValueError('Key required.') + key = None if root_data is None else key + root = MAryTreeNode(key, root_data) + root.is_root = True + obj.root_idx = 0 + obj.max_children = max_children + obj.tree, obj.size = ArrayForTrees(MAryTreeNode, [root]), 1 + obj.comparator = lambda key1, key2: key1 < key2 \ + if comp is None else comp + obj.is_order_statistic = is_order_statistic + return obj + + @classmethod + def methods(cls): + return ['__new__', '__str__'] + + def insert(self, key, data=None): + """ + Inserts data by the passed key using iterative + algorithm. + + Parameters + ========== + + key + The key for comparison. + data + The data to be inserted. + + Returns + ======= + + None + """ + raise NotImplementedError("This is an abstract method.") + + def delete(self, key, **kwargs): + """ + Deletes the data with the passed key + using iterative algorithm. + + Parameters + ========== + + key + The key of the node which is + to be deleted. + + Returns + ======= + + True + If the node is deleted successfully. + + None + If the node to be deleted doesn't exists. + + Note + ==== + + The node is deleted means that the connection to that + node are removed but the it is still in tree. + """ + raise NotImplementedError("This is an abstract method.") + + def search(self, key, **kwargs): + """ + Searches for the data in the binary search tree + using iterative algorithm. + + Parameters + ========== + + key + The key for searching. + parent: bool + If true then returns index of the + parent of the node with the passed + key. + By default, False + + Returns + ======= + + int + If the node with the passed key is + in the tree. + tuple + The index of the searched node and + the index of the parent of that node. + None + In all other cases. + """ + raise NotImplementedError("This is an abstract method.") + + def to_binary_tree(self): + """ + Converts an m-ary tree to a binary tree. + + Returns + ======= + + TreeNode + The root of the newly created binary tree. + """ + raise NotImplementedError("This is an abstract method.") + + + def __str__(self): + to_be_printed = ['' for i in range(self.tree._last_pos_filled + 1)] + for i in range(self.tree._last_pos_filled + 1): + if self.tree[i] is not None: + node = self.tree[i] + to_be_printed[i] = (node.key, node.data) + for j in node.children: + if j is not None: + to_be_printed[i].append(j) + return str(to_be_printed) diff --git a/lib/python3.12/site-packages/pydatastructs/trees/space_partitioning_trees.py b/lib/python3.12/site-packages/pydatastructs/trees/space_partitioning_trees.py new file mode 100644 index 000000000..f13c1f280 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/trees/space_partitioning_trees.py @@ -0,0 +1,242 @@ +from pydatastructs.utils import TreeNode +from collections import deque as Queue +from pydatastructs.utils.misc_util import ( + _check_type, Backend, + raise_if_backend_is_not_python) + +__all__ = [ + 'OneDimensionalSegmentTree' +] + +class OneDimensionalSegmentTree(object): + """ + Represents one dimensional segment trees. + + Parameters + ========== + + segs: list/tuple/set + The segs should contains tuples/list/set of size 2 + denoting the start and end points of the intervals. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalSegmentTree as ODST + >>> segt = ODST([(3, 8), (9, 20)]) + >>> segt.build() + >>> segt.tree[0].key + [False, 2, 3, False] + >>> len(segt.query(4)) + 1 + + Note + ==== + + All the segments are assumed to be closed intervals, + i.e., the ends are points of segments are also included in + computation. + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Segment_tree + + """ + + __slots__ = ['segments', 'tree', 'root_idx', 'cache'] + + def __new__(cls, segs, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + if any((not isinstance(seg, (tuple, list, set)) or len(seg) != 2) + for seg in segs): + raise ValueError('%s is invalid set of intervals'%(segs)) + for i in range(len(segs)): + segs[i] = list(segs[i]) + segs[i].sort() + obj.segments = list(segs) + obj.tree, obj.root_idx, obj.cache = [], None, False + return obj + + @classmethod + def methods(cls): + return ['build', 'query', '__str__'] + + def _union(self, i1, i2): + """ + Helper function for taking union of two + intervals. + """ + return TreeNode([i1.key[0], i1.key[1], i2.key[2], i2.key[3]], None) + + def _intersect(self, i1, i2): + """ + Helper function for finding intersection of two + intervals. + """ + if i1 is None or i2 is None: + return False + if i1.key[2] < i2.key[1] or i2.key[2] < i1.key[1]: + return False + c1, c2 = None, None + if i1.key[2] == i2.key[1]: + c1 = (i1.key[3] and i2.key[0]) + if i2.key[2] == i1.key[1]: + c2 = (i2.key[3] and i1.key[0]) + if c1 is False and c2 is False: + return False + return True + + def _contains(self, i1, i2): + """ + Helper function for checking if the first interval + is contained in second interval. + """ + if i1 is None or i2 is None: + return False + if i1.key[1] < i2.key[1] and i1.key[2] > i2.key[2]: + return True + if i1.key[1] == i2.key[1] and i1.key[2] > i2.key[2]: + return (i1.key[0] or not i2.key[0]) + if i1.key[1] < i2.key[1] and i1.key[2] == i2.key[2]: + return i1.key[3] or not i2.key[3] + if i1.key[1] == i2.key[1] and i1.key[2] == i2.key[2]: + return not ((not i1.key[3] and i2.key[3]) or (not i1.key[0] and i2.key[0])) + return False + + def _iterate(self, calls, I, idx): + """ + Helper function for filling the calls + stack. Used for imitating the stack based + approach used in recursion. + """ + if self.tree[idx].right is None: + rc = None + else: + rc = self.tree[self.tree[idx].right] + if self.tree[idx].left is None: + lc = None + else: + lc = self.tree[self.tree[idx].left] + if self._intersect(I, rc): + calls.append(self.tree[idx].right) + if self._intersect(I, lc): + calls.append(self.tree[idx].left) + return calls + + def build(self): + """ + Builds the segment tree from the segments, + using iterative algorithm based on queues. + """ + if self.cache: + return None + endpoints = [] + for segment in self.segments: + endpoints.extend(segment) + endpoints.sort() + + elem_int = Queue() + elem_int.append(TreeNode([False, endpoints[0] - 1, endpoints[0], False], None)) + i = 0 + while i < len(endpoints) - 1: + elem_int.append(TreeNode([True, endpoints[i], endpoints[i], True], None)) + elem_int.append(TreeNode([False, endpoints[i], endpoints[i+1], False], None)) + i += 1 + elem_int.append(TreeNode([True, endpoints[i], endpoints[i], True], None)) + elem_int.append(TreeNode([False, endpoints[i], endpoints[i] + 1, False], None)) + + self.tree = [] + while len(elem_int) > 1: + m = len(elem_int) + while m >= 2: + I1 = elem_int.popleft() + I2 = elem_int.popleft() + I = self._union(I1, I2) + I.left = len(self.tree) + I.right = len(self.tree) + 1 + self.tree.append(I1), self.tree.append(I2) + elem_int.append(I) + m -= 2 + if m & 1 == 1: + Il = elem_int.popleft() + elem_int.append(Il) + + Ir = elem_int.popleft() + Ir.left, Ir.right = -3, -2 + self.tree.append(Ir) + self.root_idx = -1 + + for segment in self.segments: + I = TreeNode([True, segment[0], segment[1], True], None) + calls = [self.root_idx] + while calls: + idx = calls.pop() + if self._contains(I, self.tree[idx]): + if self.tree[idx].data is None: + self.tree[idx].data = [] + self.tree[idx].data.append(I) + continue + calls = self._iterate(calls, I, idx) + self.cache = True + + def query(self, qx, init_node=None): + """ + Queries the segment tree. + + Parameters + ========== + + qx: int/float + The query point + + init_node: int + The index of the node from which the query process + is to be started. + + Returns + ======= + + intervals: set + The set of the intervals which contain the query + point. + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Segment_tree + """ + if not self.cache: + self.build() + if init_node is None: + init_node = self.root_idx + qn = TreeNode([True, qx, qx, True], None) + intervals = [] + calls = [init_node] + while calls: + idx = calls.pop() + if _check_type(self.tree[idx].data, list): + intervals.extend(self.tree[idx].data) + calls = self._iterate(calls, qn, idx) + return set(intervals) + + def __str__(self): + """ + Used for printing. + """ + if not self.cache: + self.build() + str_tree = [] + for seg in self.tree: + if seg.data is None: + data = None + else: + data = [str(sd) for sd in seg.data] + str_tree.append((seg.left, seg.key, data, seg.right)) + return str(str_tree) diff --git a/lib/python3.12/site-packages/pydatastructs/trees/tests/__init__.py b/lib/python3.12/site-packages/pydatastructs/trees/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/python3.12/site-packages/pydatastructs/trees/tests/test_binary_trees.py b/lib/python3.12/site-packages/pydatastructs/trees/tests/test_binary_trees.py new file mode 100644 index 000000000..826100b78 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/trees/tests/test_binary_trees.py @@ -0,0 +1,820 @@ +from pydatastructs.trees.binary_trees import ( + BinaryTree, BinarySearchTree, BinaryTreeTraversal, AVLTree, + ArrayForTrees, BinaryIndexedTree, SelfBalancingBinaryTree, SplayTree, CartesianTree, Treap, RedBlackTree) +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils.misc_util import TreeNode +from copy import deepcopy +from pydatastructs.utils.misc_util import Backend +import random +from pydatastructs.utils._backend.cpp import _nodes + +def _test_BinarySearchTree(backend): + BST = BinarySearchTree + b = BST(8, 8, backend=backend) + b.delete(8) + b.insert(8, 8) + b.insert(3, 3) + b.insert(10, 10) + b.insert(1, 1) + b.insert(6, 6) + b.insert(4, 4) + b.insert(7, 7) + b.insert(14, 14) + b.insert(13, 13) + # Explicit check for the __str__ method of Binary Trees Class + assert str(b) == \ + ("[(1, 8, 8, 2), (3, 3, 3, 4), (None, 10, 10, 7), (None, 1, 1, None), " + "(5, 6, 6, 6), (None, 4, 4, None), (None, 7, 7, None), (8, 14, 14, None), " + "(None, 13, 13, None)]") + assert b.root_idx == 0 + + assert b.tree[0].left == 1 + assert b.tree[0].key == 8 + assert b.tree[0].data == 8 + assert b.tree[0].right == 2 + + trav = BinaryTreeTraversal(b, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [1, 3, 4, 6, 7, 8, 10, 13, 14] + assert [node.key for node in pre_order] == [8, 3, 1, 6, 4, 7, 10, 14, 13] + + assert b.search(10) == 2 + assert b.search(-1) is None + assert b.delete(13) is True + assert b.search(13) is None + assert b.delete(10) is True + assert b.search(10) is None + assert b.delete(3) is True + assert b.search(3) is None + assert b.delete(13) is None + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [1, 4, 6, 7, 8, 14] + assert [node.key for node in pre_order] == [8, 4, 1, 6, 7, 14] + + b.delete(7) + b.delete(6) + b.delete(1) + b.delete(4) + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [8, 14] + assert [node.key for node in pre_order] == [8, 14] + + bc = BST(1, 1, backend=backend) + assert bc.insert(1, 2) is None + + b = BST(-8, 8, backend=backend) + b.insert(-3, 3) + b.insert(-10, 10) + b.insert(-1, 1) + b.insert(-6, 6) + b.insert(-4, 4) + b.insert(-7, 7) + b.insert(-14, 14) + b.insert(-13, 13) + + b.delete(-13) + b.delete(-10) + b.delete(-3) + b.delete(-13) + assert str(b) == "[(7, -8, 8, 1), (4, -1, 1, None), '', '', (6, -6, 6, 5), (None, -4, 4, None), (None, -7, 7, None), (None, -14, 14, None)]" + + bl = BST(backend=backend) + nodes = [50, 30, 90, 70, 100, 60, 80, 55, 20, 40, 15, 10, 16, 17, 18] + for node in nodes: + bl.insert(node, node) + + assert bl.lowest_common_ancestor(80, 55, 2) == 70 + assert bl.lowest_common_ancestor(60, 70, 2) == 70 + assert bl.lowest_common_ancestor(18, 18, 2) == 18 + assert bl.lowest_common_ancestor(40, 90, 2) == 50 + + assert bl.lowest_common_ancestor(18, 10, 2) == 15 + assert bl.lowest_common_ancestor(55, 100, 2) == 90 + assert bl.lowest_common_ancestor(16, 80, 2) == 50 + assert bl.lowest_common_ancestor(30, 55, 2) == 50 + + assert raises(ValueError, lambda: bl.lowest_common_ancestor(60, 200, 2)) + assert raises(ValueError, lambda: bl.lowest_common_ancestor(200, 60, 2)) + assert raises(ValueError, lambda: bl.lowest_common_ancestor(-3, 4, 2)) + + assert bl.lowest_common_ancestor(80, 55, 1) == 70 + assert bl.lowest_common_ancestor(60, 70, 1) == 70 + assert bl.lowest_common_ancestor(18, 18, 1) == 18 + assert bl.lowest_common_ancestor(40, 90, 1) == 50 + + assert bl.lowest_common_ancestor(18, 10, 1) == 15 + assert bl.lowest_common_ancestor(55, 100, 1) == 90 + assert bl.lowest_common_ancestor(16, 80, 1) == 50 + assert bl.lowest_common_ancestor(30, 55, 1) == 50 + + assert raises(ValueError, lambda: bl.lowest_common_ancestor(60, 200, 1)) + assert raises(ValueError, lambda: bl.lowest_common_ancestor(200, 60, 1)) + assert raises(ValueError, lambda: bl.lowest_common_ancestor(-3, 4, 1)) + +def test_BinarySearchTree(): + _test_BinarySearchTree(Backend.PYTHON) + +def test_cpp_BinarySearchTree(): + _test_BinarySearchTree(Backend.CPP) + +def _test_BinaryTreeTraversal(backend): + BST = BinarySearchTree + BTT = BinaryTreeTraversal + b = BST('F', 'F', backend=backend) + b.insert('B', 'B') + b.insert('A', 'A') + b.insert('G', 'G') + b.insert('D', 'D') + b.insert('C', 'C') + b.insert('E', 'E') + b.insert('I', 'I') + b.insert('H', 'H') + + trav = BTT(b, backend=backend) + pre = trav.depth_first_search(order='pre_order') + assert [node.key for node in pre] == ['F', 'B', 'A', 'D', 'C', 'E', 'G', 'I', 'H'] + + ino = trav.depth_first_search() + assert [node.key for node in ino] == ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'] + + out = trav.depth_first_search(order='out_order') + assert [node.key for node in out] == ['I', 'H', 'G', 'F', 'E', 'D', 'C', 'B', 'A'] + + post = trav.depth_first_search(order='post_order') + assert [node.key for node in post] == ['A', 'C', 'E', 'D', 'B', 'H', 'I', 'G', 'F'] + + bfs = trav.breadth_first_search() + assert [node.key for node in bfs] == ['F', 'B', 'G', 'A', 'D', 'I', 'C', 'E', 'H'] + + assert raises(NotImplementedError, lambda: trav.breadth_first_search(strategy='iddfs')) + assert raises(NotImplementedError, lambda: trav.depth_first_search(order='in_out_order')) + assert raises(TypeError, lambda: BTT(1)) + +def test_BinaryTreeTraversal(): + _test_BinaryTreeTraversal(Backend.PYTHON) + +def test_cpp_BinaryTreeTraversal(): + _test_BinaryTreeTraversal(Backend.CPP) + +def _test_AVLTree(backend): + a = AVLTree('M', 'M', backend=backend) + a.insert('N', 'N') + a.insert('O', 'O') + a.insert('L', 'L') + a.insert('K', 'K') + a.insert('Q', 'Q') + a.insert('P', 'P') + a.insert('H', 'H') + a.insert('I', 'I') + a.insert('A', 'A') + assert a.root_idx == 1 + + trav = BinaryTreeTraversal(a, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == ['A', 'H', 'I', 'K', 'L', 'M', 'N', 'O', 'P', 'Q'] + assert [node.key for node in pre_order] == ['N', 'I', 'H', 'A', 'L', 'K', 'M', 'P', 'O', 'Q'] + + assert [a.balance_factor(a.tree[i]) for i in range(a.tree.size) if a.tree[i] is not None] == \ + [0, -1, 0, 0, 0, 0, 0, -1, 0, 0] + a1 = AVLTree(1, 1, backend=backend) + a1.insert(2, 2) + a1.insert(3, 3) + a1.insert(4, 4) + a1.insert(5, 5) + + trav = BinaryTreeTraversal(a1, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [1, 2, 3, 4, 5] + assert [node.key for node in pre_order] == [2, 1, 4, 3, 5] + + a3 = AVLTree(-1, 1, backend=backend) + a3.insert(-2, 2) + a3.insert(-3, 3) + a3.insert(-4, 4) + a3.insert(-5, 5) + + trav = BinaryTreeTraversal(a3, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [-5, -4, -3, -2, -1] + assert [node.key for node in pre_order] == [-2, -4, -5, -3, -1] + + a2 = AVLTree(backend=backend) + a2.insert(1, 1) + a2.insert(1, 1) + + trav = BinaryTreeTraversal(a2, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [1] + assert [node.key for node in pre_order] == [1] + + a3 = AVLTree(backend=backend) + a3.set_tree( ArrayForTrees(TreeNode, 0, backend=backend) ) + for i in range(0,7): + a3.tree.append(TreeNode(i, i, backend=backend)) + a3.tree[0].left = 1 + a3.tree[0].right = 6 + a3.tree[1].left = 5 + a3.tree[1].right = 2 + a3.tree[2].left = 3 + a3.tree[2].right = 4 + a3._left_right_rotate(0, 1) + assert str(a3) == "[(4, 0, 0, 6), (5, 1, 1, 3), (1, 2, 2, 0), (None, 3, 3, None), (None, 4, 4, None), (None, 5, 5, None), (None, 6, 6, None)]" + + trav = BinaryTreeTraversal(a3, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [5, 1, 3, 2, 4, 0, 6] + assert [node.key for node in pre_order] == [2, 1, 5, 3, 0, 4, 6] + + a4 = AVLTree(backend=backend) + a4.set_tree( ArrayForTrees(TreeNode, 0, backend=backend) ) + for i in range(0,7): + a4.tree.append(TreeNode(i, i,backend=backend)) + a4.tree[0].left = 1 + a4.tree[0].right = 2 + a4.tree[2].left = 3 + a4.tree[2].right = 4 + a4.tree[3].left = 5 + a4.tree[3].right = 6 + a4._right_left_rotate(0, 2) + + trav = BinaryTreeTraversal(a4, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [1, 0, 5, 3, 6, 2, 4] + assert [node.key for node in pre_order] == [3,0,1,5,2,6,4] + + a5 = AVLTree(is_order_statistic=True,backend=backend) + if backend==Backend.PYTHON: + a5.set_tree( ArrayForTrees(TreeNode, [ + TreeNode(10, 10), + TreeNode(5, 5), + TreeNode(17, 17), + TreeNode(2, 2), + TreeNode(9, 9), + TreeNode(12, 12), + TreeNode(20, 20), + TreeNode(3, 3), + TreeNode(11, 11), + TreeNode(15, 15), + TreeNode(18, 18), + TreeNode(30, 30), + TreeNode(13, 13), + TreeNode(33, 33) + ]) ) + else: + a5.set_tree( ArrayForTrees(_nodes.TreeNode, [ + TreeNode(10, 10,backend=backend), + TreeNode(5, 5,backend=backend), + TreeNode(17, 17,backend=backend), + TreeNode(2, 2,backend=backend), + TreeNode(9, 9,backend=backend), + TreeNode(12, 12,backend=backend), + TreeNode(20, 20,backend=backend), + TreeNode(3, 3,backend=backend), + TreeNode(11, 11,backend=backend), + TreeNode(15, 15,backend=backend), + TreeNode(18, 18,backend=backend), + TreeNode(30, 30,backend=backend), + TreeNode(13, 13,backend=backend), + TreeNode(33, 33,backend=backend) + ],backend=backend) ) + + a5.tree[0].left, a5.tree[0].right, a5.tree[0].parent, a5.tree[0].height = \ + 1, 2, None, 4 + a5.tree[1].left, a5.tree[1].right, a5.tree[1].parent, a5.tree[1].height = \ + 3, 4, 0, 2 + a5.tree[2].left, a5.tree[2].right, a5.tree[2].parent, a5.tree[2].height = \ + 5, 6, 0, 3 + a5.tree[3].left, a5.tree[3].right, a5.tree[3].parent, a5.tree[3].height = \ + None, 7, 1, 1 + a5.tree[4].left, a5.tree[4].right, a5.tree[4].parent, a5.tree[4].height = \ + None, None, 1, 0 + a5.tree[5].left, a5.tree[5].right, a5.tree[5].parent, a5.tree[5].height = \ + 8, 9, 2, 2 + a5.tree[6].left, a5.tree[6].right, a5.tree[6].parent, a5.tree[6].height = \ + 10, 11, 2, 2 + a5.tree[7].left, a5.tree[7].right, a5.tree[7].parent, a5.tree[7].height = \ + None, None, 3, 0 + a5.tree[8].left, a5.tree[8].right, a5.tree[8].parent, a5.tree[8].height = \ + None, None, 5, 0 + a5.tree[9].left, a5.tree[9].right, a5.tree[9].parent, a5.tree[9].height = \ + 12, None, 5, 1 + a5.tree[10].left, a5.tree[10].right, a5.tree[10].parent, a5.tree[10].height = \ + None, None, 6, 0 + a5.tree[11].left, a5.tree[11].right, a5.tree[11].parent, a5.tree[11].height = \ + None, 13, 6, 1 + a5.tree[12].left, a5.tree[12].right, a5.tree[12].parent, a5.tree[12].height = \ + None, None, 9, 0 + a5.tree[13].left, a5.tree[13].right, a5.tree[13].parent, a5.tree[13].height = \ + None, None, 11, 0 + + # testing order statistics + a5.tree[0].size = 14 + a5.tree[1].size = 4 + a5.tree[2].size = 9 + a5.tree[3].size = 2 + a5.tree[4].size = 1 + a5.tree[5].size = 4 + a5.tree[6].size = 4 + a5.tree[7].size = 1 + a5.tree[8].size = 1 + a5.tree[9].size = 2 + a5.tree[10].size = 1 + a5.tree[11].size = 2 + a5.tree[12].size = 1 + a5.tree[13].size = 1 + assert str(a5) == "[(1, 10, 10, 2), (3, 5, 5, 4), (5, 17, 17, 6), (None, 2, 2, 7), (None, 9, 9, None), (8, 12, 12, 9), (10, 20, 20, 11), (None, 3, 3, None), (None, 11, 11, None), (12, 15, 15, None), (None, 18, 18, None), (None, 30, 30, 13), (None, 13, 13, None), (None, 33, 33, None)]" + + assert raises(ValueError, lambda: a5.select(0)) + assert raises(ValueError, lambda: a5.select(15)) + + assert a5.rank(-1) is None + def test_select_rank(expected_output): + if backend==Backend.PYTHON: + output = [] + for i in range(len(expected_output)): + output.append(a5.select(i + 1).key) + assert output == expected_output + output = [] + expected_ranks = [i + 1 for i in range(len(expected_output))] + for i in range(len(expected_output)): + output.append(a5.rank(expected_output[i])) + assert output == expected_ranks + + test_select_rank([2, 3, 5, 9, 10, 11, 12, 13, 15, 17, 18, 20, 30, 33]) + a5.delete(9) + a5.delete(13) + a5.delete(20) + assert str(a5) == "[(7, 10, 10, 5), (None, 5, 5, None), (0, 17, 17, 6), (None, 2, 2, None), '', (8, 12, 12, 9), (10, 30, 30, 13), (3, 3, 3, 1), (None, 11, 11, None), (None, 15, 15, None), (None, 18, 18, None), '', '', (None, 33, 33, None)]" + + trav = BinaryTreeTraversal(a5, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [2, 3, 5, 10, 11, 12, 15, 17, 18, 30, 33] + assert [node.key for node in pre_order] == [17, 10, 3, 2, 5, 12, 11, 15, 30, 18, 33] + + test_select_rank([2, 3, 5, 10, 11, 12, 15, 17, 18, 30, 33]) + a5.delete(10) + a5.delete(17) + assert str(a5) == "[(7, 11, 11, 5), (None, 5, 5, None), (0, 18, 18, 6), (None, 2, 2, None), '', (None, 12, 12, 9), (None, 30, 30, 13), (3, 3, 3, 1), '', (None, 15, 15, None), '', '', '', (None, 33, 33, None)]" + test_select_rank([2, 3, 5, 11, 12, 15, 18, 30, 33]) + a5.delete(11) + a5.delete(30) + test_select_rank([2, 3, 5, 12, 15, 18, 33]) + a5.delete(12) + test_select_rank([2, 3, 5, 15, 18, 33]) + a5.delete(15) + test_select_rank([2, 3, 5, 18, 33]) + a5.delete(18) + test_select_rank([2, 3, 5, 33]) + a5.delete(33) + test_select_rank([2, 3, 5]) + a5.delete(5) + test_select_rank([2, 3]) + a5.delete(3) + test_select_rank([2]) + a5.delete(2) + test_select_rank([]) + assert str(a5) == "[(None, None, None, None)]" + +def test_AVLTree(): + _test_AVLTree(backend=Backend.PYTHON) +def test_cpp_AVLTree(): + _test_AVLTree(backend=Backend.CPP) + +def _test_BinaryIndexedTree(backend): + + FT = BinaryIndexedTree + + t = FT([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], backend=backend) + + assert t.get_sum(0, 2) == 6 + assert t.get_sum(0, 4) == 15 + assert t.get_sum(0, 9) == 55 + t.update(0, 100) + assert t.get_sum(0, 2) == 105 + assert t.get_sum(0, 4) == 114 + assert t.get_sum(1, 9) == 54 + +def test_BinaryIndexedTree(): + _test_BinaryIndexedTree(Backend.PYTHON) + +def test_cpp_BinaryIndexedTree(): + _test_BinaryIndexedTree(Backend.CPP) + +def _test_CartesianTree(backend): + tree = CartesianTree(backend=backend) + tree.insert(3, 1, 3) + tree.insert(1, 6, 1) + tree.insert(0, 9, 0) + tree.insert(5, 11, 5) + tree.insert(4, 14, 4) + tree.insert(9, 17, 9) + tree.insert(7, 22, 7) + tree.insert(6, 42, 6) + tree.insert(8, 49, 8) + tree.insert(2, 99, 2) + # Explicit check for the redefined __str__ method of Cartesian Trees Class + + trav = BinaryTreeTraversal(tree, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + assert [node.key for node in pre_order] == [3, 1, 0, 2, 5, 4, 9, 7, 6, 8] + + tree.insert(1.5, 4, 1.5) + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [0, 1, 1.5, 2, 3, 4, 5, 6, 7, 8, 9] + assert [node.key for node in pre_order] == [3, 1.5, 1, 0, 2, 5, 4, 9, 7, 6, 8] + + k = tree.search(1.5) + assert tree.tree[tree.tree[k].parent].key == 3 + tree.delete(1.5) + assert tree.root_idx == 0 + tree.tree[tree.tree[tree.root_idx].left].key == 1 + tree.delete(8) + assert tree.search(8) is None + tree.delete(7) + assert tree.search(7) is None + tree.delete(3) + assert tree.search(3) is None + assert tree.delete(18) is None + +def test_CartesianTree(): + _test_CartesianTree(backend=Backend.PYTHON) + +def test_cpp_CartesianTree(): + _test_CartesianTree(backend=Backend.CPP) + +def _test_Treap(backend): + + random.seed(0) + tree = Treap(backend=backend) + tree.insert(7, 7) + tree.insert(2, 2) + tree.insert(3, 3) + tree.insert(4, 4) + tree.insert(5, 5) + + assert isinstance(tree.tree[0].priority, float) + tree.delete(1) + assert tree.search(1) is None + assert tree.search(2) == 1 + assert tree.delete(1) is None + +def test_Treap(): + _test_Treap(Backend.PYTHON) + +def test_cpp_Treap(): + _test_Treap(Backend.CPP) + +def _test_SelfBalancingBinaryTree(backend): + """ + https://github.com/codezonediitj/pydatastructs/issues/234 + """ + tree = SelfBalancingBinaryTree(backend=backend) + tree.insert(5, 5) + tree.insert(5.5, 5.5) + tree.insert(4.5, 4.5) + tree.insert(4.6, 4.6) + tree.insert(4.4, 4.4) + tree.insert(4.55, 4.55) + tree.insert(4.65, 4.65) + original_tree = str(tree) + tree._right_rotate(3, 5) + + assert str(tree) == "[(2, 5, 5, 1), (None, 5.5, 5.5, None), (4, 4.5, 4.5, 5), (None, 4.6, 4.6, 6), (None, 4.4, 4.4, None), (None, 4.55, 4.55, 3), (None, 4.65, 4.65, None)]" + assert tree.tree[3].parent == 5 + assert tree.tree[2].right != 3 + assert tree.tree[tree.tree[5].parent].right == 5 + assert tree.root_idx == 0 + + trav = BinaryTreeTraversal(tree, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [4.4, 4.5, 4.55, 4.6, 4.65, 5, 5.5] + assert [node.key for node in pre_order] == [5, 4.5, 4.4, 4.55, 4.6, 4.65, 5.5] + + assert tree.tree[tree.tree[3].parent].right == 3 + tree._left_rotate(5, 3) + assert str(tree) == original_tree + tree.insert(4.54, 4.54) + tree.insert(4.56, 4.56) + tree._left_rotate(5, 8) + assert tree.tree[tree.tree[8].parent].left == 8 + assert str(tree) == "[(2, 5, 5, 1), (None, 5.5, 5.5, None), (4, 4.5, 4.5, 3), (8, 4.6, 4.6, 6), (None, 4.4, 4.4, None), (7, 4.55, 4.55, None), (None, 4.65, 4.65, None), (None, 4.54, 4.54, None), (5, 4.56, 4.56, None)]" + + tree._left_right_rotate(0, 2) + assert str(tree) == "[(6, 5, 5, 1), (None, 5.5, 5.5, None), (4, 4.5, 4.5, 8), (2, 4.6, 4.6, 0), (None, 4.4, 4.4, None), (7, 4.55, 4.55, None), (None, 4.65, 4.65, None), (None, 4.54, 4.54, None), (5, 4.56, 4.56, None)]" + + tree._right_left_rotate(0, 2) + assert str(tree) == "[(6, 5, 5, None), (None, 5.5, 5.5, None), (None, 4.5, 4.5, 8), (2, 4.6, 4.6, 4), (0, 4.4, 4.4, 2), (7, 4.55, 4.55, None), (None, 4.65, 4.65, None), (None, 4.54, 4.54, None), (5, 4.56, 4.56, None)]" + +def test_SelfBalancingBinaryTree(): + _test_SelfBalancingBinaryTree(Backend.PYTHON) +def test_cpp_SelfBalancingBinaryTree(): + _test_SelfBalancingBinaryTree(Backend.CPP) + +def _test_SplayTree(backend): + t = SplayTree(100, 100, backend=backend) + t.insert(50, 50) + t.insert(200, 200) + t.insert(40, 40) + t.insert(30, 30) + t.insert(20, 20) + t.insert(55, 55) + assert str(t) == "[(None, 100, 100, None), (None, 50, 50, None), (0, 200, 200, None), (None, 40, 40, 1), (5, 30, 30, 3), (None, 20, 20, None), (4, 55, 55, 2)]" + assert t.root_idx == 6 + + trav = BinaryTreeTraversal(t, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [20, 30, 40, 50, 55, 100, 200] + assert [node.key for node in pre_order] == [55, 30, 20, 40, 50, 200, 100] + + t.delete(40) + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [20, 30, 50, 55, 100, 200] + assert [node.key for node in pre_order] == [50, 30, 20, 55, 200, 100] + + t.delete(150) + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [20, 30, 50, 55, 100, 200] + assert [node.key for node in pre_order] == [50, 30, 20, 55, 200, 100] + + t1 = SplayTree(1000, 1000, backend=backend) + t1.insert(2000, 2000) + + trav2 = BinaryTreeTraversal(t1, backend=backend) + in_order = trav2.depth_first_search(order='in_order') + pre_order = trav2.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [1000, 2000] + assert [node.key for node in pre_order] == [2000, 1000] + + t.join(t1) + assert str(t) == "[(None, 100, 100, None), '', (6, 200, 200, 8), (4, 50, 50, None), (5, 30, 30, None), (None, 20, 20, None), (3, 55, 55, 0), (None, 1000, 1000, None), (7, 2000, 2000, None), '']" + + if backend == Backend.PYTHON: + trav3 = BinaryTreeTraversal(t, backend=backend) + in_order = trav3.depth_first_search(order='in_order') + pre_order = trav3.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [20, 30, 50, 55, 100, 200, 1000, 2000] + assert [node.key for node in pre_order] == [200, 55, 50, 30, 20, 100, 2000, 1000] + + s = t.split(200) + assert str(s) == "[(1, 2000, 2000, None), (None, 1000, 1000, None)]" + + trav4 = BinaryTreeTraversal(s, backend=backend) + in_order = trav4.depth_first_search(order='in_order') + pre_order = trav4.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [1000, 2000] + assert [node.key for node in pre_order] == [2000, 1000] + + if backend == Backend.PYTHON: + trav5 = BinaryTreeTraversal(t, backend=backend) + in_order = trav5.depth_first_search(order='in_order') + pre_order = trav5.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [20, 30, 50, 55, 100, 200] + assert [node.key for node in pre_order] == [200, 55, 50, 30, 20, 100] + +def test_SplayTree(): + _test_SplayTree(Backend.PYTHON) + +def test_cpp_SplayTree(): + _test_SplayTree(Backend.CPP) + +def _test_RedBlackTree(backend): + tree = RedBlackTree(backend=backend) + tree.insert(10, 10) + tree.insert(18, 18) + tree.insert(7, 7) + tree.insert(15, 15) + tree.insert(16, 16) + tree.insert(30, 30) + tree.insert(25, 25) + tree.insert(40, 40) + tree.insert(60, 60) + tree.insert(2, 2) + tree.insert(17, 17) + tree.insert(6, 6) + assert str(tree) == "[(11, 10, 10, 3), (10, 18, 18, None), (None, 7, 7, None), (None, 15, 15, None), (0, 16, 16, 6), (None, 30, 30, None), (1, 25, 25, 7), (5, 40, 40, 8), (None, 60, 60, None), (None, 2, 2, None), (None, 17, 17, None), (9, 6, 6, 2)]" + assert tree.root_idx == 4 + + trav = BinaryTreeTraversal(tree, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [2, 6, 7, 10, 15, 16, 17, 18, 25, 30, 40, 60] + assert [node.key for node in pre_order] == [16, 10, 6, 2, 7, 15, 25, 18, 17, 40, 30, 60] + + assert tree.lower_bound(0) == 2 + assert tree.lower_bound(2) == 2 + assert tree.lower_bound(3) == 6 + assert tree.lower_bound(7) == 7 + assert tree.lower_bound(25) == 25 + assert tree.lower_bound(32) == 40 + assert tree.lower_bound(41) == 60 + assert tree.lower_bound(60) == 60 + assert tree.lower_bound(61) is None + + assert tree.upper_bound(0) == 2 + assert tree.upper_bound(2) == 6 + assert tree.upper_bound(3) == 6 + assert tree.upper_bound(7) == 10 + assert tree.upper_bound(25) == 30 + assert tree.upper_bound(32) == 40 + assert tree.upper_bound(41) == 60 + assert tree.upper_bound(60) is None + assert tree.upper_bound(61) is None + + tree = RedBlackTree(backend=backend) + + assert tree.lower_bound(1) is None + assert tree.upper_bound(0) is None + + tree.insert(10) + tree.insert(20) + tree.insert(30) + tree.insert(40) + tree.insert(50) + tree.insert(60) + tree.insert(70) + tree.insert(80) + tree.insert(90) + tree.insert(100) + tree.insert(110) + tree.insert(120) + tree.insert(130) + tree.insert(140) + tree.insert(150) + tree.insert(160) + tree.insert(170) + tree.insert(180) + assert str(tree) == "[(None, 10, None, None), (0, 20, None, 2), (None, 30, None, None), (1, 40, None, 5), (None, 50, None, None), (4, 60, None, 6), (None, 70, None, None), (3, 80, None, 11), (None, 90, None, None), (8, 100, None, 10), (None, 110, None, None), (9, 120, None, 13), (None, 130, None, None), (12, 140, None, 15), (None, 150, None, None), (14, 160, None, 16), (None, 170, None, 17), (None, 180, None, None)]" + + assert tree._get_sibling(7) is None + + trav = BinaryTreeTraversal(tree, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 60, 70, 80, 90, + 100, 110, 120, 130, 140, 150, 160, 170, 180] + assert [node.key for node in pre_order] == [80, 40, 20, 10, 30, 60, 50, 70, 120, 100, + 90, 110, 140, 130, 160, 150, 170, 180] + + tree.delete(180) + tree.delete(130) + tree.delete(110) + tree.delete(190) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, + 120, 140, 150, 160, 170] + assert [node.key for node in pre_order] == [80, 40, 20, 10, 30, 60, 50, 70, 120, 100, + 90, 160, 140, 150, 170] + + tree.delete(170) + tree.delete(100) + tree.delete(60) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 70, 80, 90, 120, 140, 150, 160] + assert [node.key for node in pre_order] == [80, 40, 20, 10, 30, 50, 70, 120, 90, 150, 140, 160] + + tree.delete(70) + tree.delete(140) + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 80, 90, 120, 150, 160] + assert [node.key for node in pre_order] == [80, 40, 20, 10, 30, 50, 120, 90, 150, 160] + + tree.delete(150) + tree.delete(120) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 80, 90, 160] + assert [node.key for node in pre_order] == [40, 20, 10, 30, 80, 50, 90, 160] + + tree.delete(50) + tree.delete(80) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [10, 20, 30, 40, 90, 160] + assert [node.key for node in pre_order] == [40, 20, 10, 30, 90, 160] + + tree.delete(30) + tree.delete(20) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [10, 40, 90, 160] + assert [node.key for node in pre_order] == [40, 10, 90, 160] + + tree.delete(10) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [40, 90, 160] + assert [node.key for node in pre_order] == [90, 40, 160] + + tree.delete(40) + tree.delete(90) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [160] + assert [node.key for node in pre_order] == [160] + + tree.delete(160) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order if node.key is not None] == [] + assert [node.key for node in pre_order if node.key is not None] == [] + + tree = RedBlackTree(backend=backend) + tree.insert(50) + tree.insert(40) + tree.insert(30) + tree.insert(20) + tree.insert(10) + tree.insert(5) + + trav = BinaryTreeTraversal(tree, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [5, 10, 20, 30, 40, 50] + assert [node.key for node in pre_order] == [40, 20, 10, 5, 30, 50] + + assert tree.search(50) == 0 + assert tree.search(20) == 3 + assert tree.search(30) == 2 + tree.delete(50) + tree.delete(20) + tree.delete(30) + assert tree.search(50) is None + assert tree.search(20) is None + assert tree.search(30) is None + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [5, 10, 40] + assert [node.key for node in pre_order] == [10, 5, 40] + + tree = RedBlackTree(backend=backend) + tree.insert(10) + tree.insert(5) + tree.insert(20) + tree.insert(15) + + trav = BinaryTreeTraversal(tree, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [5, 10, 15, 20] + assert [node.key for node in pre_order] == [10, 5, 20, 15] + + tree.delete(5) + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [10, 15, 20] + assert [node.key for node in pre_order] == [15, 10, 20] + + tree = RedBlackTree(backend=backend) + tree.insert(10) + tree.insert(5) + tree.insert(20) + tree.insert(15) + tree.insert(2) + tree.insert(6) + + trav = BinaryTreeTraversal(tree,backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [2, 5, 6, 10, 15, 20] + assert [node.key for node in pre_order] == [10, 5, 2, 6, 20, 15] + + tree.delete(10) + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [2, 5, 6, 15, 20] + assert [node.key for node in pre_order] == [6, 5, 2, 20, 15] + +def test_RedBlackTree(): + _test_RedBlackTree(Backend.PYTHON) + +def test_cpp_RedBlackTree(): + _test_RedBlackTree(Backend.CPP) diff --git a/lib/python3.12/site-packages/pydatastructs/trees/tests/test_heaps.py b/lib/python3.12/site-packages/pydatastructs/trees/tests/test_heaps.py new file mode 100644 index 000000000..dece2f132 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/trees/tests/test_heaps.py @@ -0,0 +1,236 @@ +from pydatastructs.trees.heaps import BinaryHeap, TernaryHeap, BinomialHeap, DHeap +from pydatastructs.linear_data_structures.arrays import DynamicOneDimensionalArray +from pydatastructs.miscellaneous_data_structures.binomial_trees import BinomialTree +from pydatastructs.utils.misc_util import TreeNode, BinomialTreeNode +from pydatastructs.utils.raises_util import raises +from collections import deque as Queue + +def test_BinaryHeap(): + + max_heap = BinaryHeap(heap_property="max") + + assert raises(IndexError, lambda: max_heap.extract()) + + max_heap.insert(100, 100) + max_heap.insert(19, 19) + max_heap.insert(36, 36) + max_heap.insert(17, 17) + max_heap.insert(3, 3) + max_heap.insert(25, 25) + max_heap.insert(1, 1) + max_heap.insert(2, 2) + max_heap.insert(7, 7) + assert str(max_heap) == \ + ("[(100, 100, [1, 2]), (19, 19, [3, 4]), " + "(36, 36, [5, 6]), (17, 17, [7, 8]), " + "(3, 3, []), (25, 25, []), (1, 1, []), " + "(2, 2, []), (7, 7, [])]") + + assert max_heap.extract().key == 100 + + expected_sorted_elements = [36, 25, 19, 17, 7, 3, 2, 1] + l = max_heap.heap[0].left + l = max_heap.heap[0].right + sorted_elements = [] + for _ in range(8): + sorted_elements.append(max_heap.extract().key) + assert expected_sorted_elements == sorted_elements + + elements = [ + TreeNode(7, 7), TreeNode(25, 25), TreeNode(100, 100), + TreeNode(1, 1), TreeNode(2, 2), TreeNode(3, 3), + TreeNode(17, 17), TreeNode(19, 19), TreeNode(36, 36) + ] + min_heap = BinaryHeap(elements=elements, heap_property="min") + assert min_heap.extract().key == 1 + + expected_sorted_elements = [2, 3, 7, 17, 19, 25, 36, 100] + sorted_elements = [min_heap.extract().key for _ in range(8)] + assert expected_sorted_elements == sorted_elements + + non_TreeNode_elements = [ + (7, 7), TreeNode(25, 25), TreeNode(100, 100), + TreeNode(1, 1), (2, 2), TreeNode(3, 3), + TreeNode(17, 17), TreeNode(19, 19), TreeNode(36, 36) + ] + assert raises(TypeError, lambda: + BinaryHeap(elements = non_TreeNode_elements, heap_property='min')) + + non_TreeNode_elements = DynamicOneDimensionalArray(int, 0) + non_TreeNode_elements.append(1) + non_TreeNode_elements.append(2) + assert raises(TypeError, lambda: + BinaryHeap(elements = non_TreeNode_elements, heap_property='min')) + + non_heapable = "[1, 2, 3]" + assert raises(ValueError, lambda: + BinaryHeap(elements = non_heapable, heap_property='min')) + +def test_TernaryHeap(): + max_heap = TernaryHeap(heap_property="max") + assert raises(IndexError, lambda: max_heap.extract()) + max_heap.insert(100, 100) + max_heap.insert(19, 19) + max_heap.insert(36, 36) + max_heap.insert(17, 17) + max_heap.insert(3, 3) + max_heap.insert(25, 25) + max_heap.insert(1, 1) + max_heap.insert(2, 2) + max_heap.insert(7, 7) + assert str(max_heap) == \ + ('[(100, 100, [1, 2, 3]), (25, 25, [4, 5, 6]), ' + '(36, 36, [7, 8]), (17, 17, []), ' + '(3, 3, []), (19, 19, []), (1, 1, []), ' + '(2, 2, []), (7, 7, [])]') + + assert max_heap.extract().key == 100 + + expected_sorted_elements = [36, 25, 19, 17, 7, 3, 2, 1] + sorted_elements = [] + for _ in range(8): + sorted_elements.append(max_heap.extract().key) + assert expected_sorted_elements == sorted_elements + + elements = [ + TreeNode(7, 7), TreeNode(25, 25), TreeNode(100, 100), + TreeNode(1, 1), TreeNode(2, 2), TreeNode(3, 3), + TreeNode(17, 17), TreeNode(19, 19), TreeNode(36, 36) + ] + min_heap = TernaryHeap(elements=elements, heap_property="min") + expected_extracted_element = min_heap.heap[0].key + assert min_heap.extract().key == expected_extracted_element + + expected_sorted_elements = [2, 3, 7, 17, 19, 25, 36, 100] + sorted_elements = [min_heap.extract().key for _ in range(8)] + assert expected_sorted_elements == sorted_elements + +def test_DHeap(): + assert raises(ValueError, lambda: DHeap(heap_property="none", d=4)) + max_heap = DHeap(heap_property="max", d=5) + assert raises(IndexError, lambda: max_heap.extract()) + max_heap.insert(100, 100) + max_heap.insert(19, 19) + max_heap.insert(36, 36) + max_heap.insert(17, 17) + max_heap.insert(3, 3) + max_heap.insert(25, 25) + max_heap.insert(1, 1) + max_heap = DHeap(max_heap.heap, heap_property="max", d=4) + max_heap.insert(2, 2) + max_heap.insert(7, 7) + assert str(max_heap) == \ + ('[(100, 100, [1, 2, 3, 4]), (25, 25, [5, 6, 7, 8]), ' + '(36, 36, []), (17, 17, []), (3, 3, []), (19, 19, []), ' + '(1, 1, []), (2, 2, []), (7, 7, [])]') + + assert max_heap.extract().key == 100 + + expected_sorted_elements = [36, 25, 19, 17, 7, 3, 2, 1] + sorted_elements = [] + for _ in range(8): + sorted_elements.append(max_heap.extract().key) + assert expected_sorted_elements == sorted_elements + + elements = [ + TreeNode(7, 7), TreeNode(25, 25), TreeNode(100, 100), + TreeNode(1, 1), TreeNode(2, 2), TreeNode(3, 3), + TreeNode(17, 17), TreeNode(19, 19), TreeNode(36, 36) + ] + min_heap = DHeap(elements=DynamicOneDimensionalArray(TreeNode, 9, elements), heap_property="min") + assert min_heap.extract().key == 1 + + expected_sorted_elements = [2, 3, 7, 17, 19, 25, 36, 100] + sorted_elements = [min_heap.extract().key for _ in range(8)] + assert expected_sorted_elements == sorted_elements + +def test_BinomialHeap(): + + # Corner cases + assert raises(TypeError, lambda: + BinomialHeap( + root_list=[BinomialTreeNode(1, 1), None]) + ) is True + tree1 = BinomialTree(BinomialTreeNode(1, 1), 0) + tree2 = BinomialTree(BinomialTreeNode(2, 2), 0) + bh = BinomialHeap(root_list=[tree1, tree2]) + assert raises(TypeError, lambda: + bh.merge_tree(BinomialTreeNode(2, 2), None)) + assert raises(TypeError, lambda: + bh.merge(None)) + + # Testing BinomialHeap.merge + nodes = [BinomialTreeNode(1, 1), # 0 + BinomialTreeNode(3, 3), # 1 + BinomialTreeNode(9, 9), # 2 + BinomialTreeNode(11, 11), # 3 + BinomialTreeNode(6, 6), # 4 + BinomialTreeNode(14, 14), # 5 + BinomialTreeNode(2, 2), # 6 + BinomialTreeNode(7, 7), # 7 + BinomialTreeNode(4, 4), # 8 + BinomialTreeNode(8, 8), # 9 + BinomialTreeNode(12, 12), # 10 + BinomialTreeNode(10, 10), # 11 + BinomialTreeNode(5, 5), # 12 + BinomialTreeNode(21, 21)] # 13 + + nodes[2].add_children(nodes[3]) + nodes[4].add_children(nodes[5]) + nodes[6].add_children(nodes[9], nodes[8], nodes[7]) + nodes[7].add_children(nodes[11], nodes[10]) + nodes[8].add_children(nodes[12]) + nodes[10].add_children(nodes[13]) + + tree11 = BinomialTree(nodes[0], 0) + tree12 = BinomialTree(nodes[2], 1) + tree13 = BinomialTree(nodes[6], 3) + tree21 = BinomialTree(nodes[1], 0) + + heap1 = BinomialHeap(root_list=[tree11, tree12, tree13]) + heap2 = BinomialHeap(root_list=[tree21]) + + def bfs(heap): + bfs_trav = [] + for i in range(len(heap.root_list)): + layer = [] + bfs_q = Queue() + bfs_q.append(heap.root_list[i].root) + while len(bfs_q) != 0: + curr_node = bfs_q.popleft() + if curr_node is not None: + layer.append(curr_node.key) + for _i in range(curr_node.children._last_pos_filled + 1): + bfs_q.append(curr_node.children[_i]) + if layer != []: + bfs_trav.append(layer) + return bfs_trav + + heap1.merge(heap2) + expected_bfs_trav = [[1, 3, 9, 11], [2, 8, 4, 7, 5, 10, 12, 21]] + assert bfs(heap1) == expected_bfs_trav + + # Testing Binomial.find_minimum + assert heap1.find_minimum().key == 1 + + # Testing Binomial.delete_minimum + heap1.delete_minimum() + assert bfs(heap1) == [[3], [9, 11], [2, 8, 4, 7, 5, 10, 12, 21]] + assert raises(ValueError, lambda: heap1.decrease_key(nodes[3], 15)) + heap1.decrease_key(nodes[3], 0) + assert bfs(heap1) == [[3], [0, 9], [2, 8, 4, 7, 5, 10, 12, 21]] + heap1.delete(nodes[12]) + assert bfs(heap1) == [[3, 8], [0, 9, 2, 7, 4, 10, 12, 21]] + + # Testing BinomialHeap.insert + heap = BinomialHeap() + assert raises(IndexError, lambda: heap.find_minimum()) + heap.insert(1, 1) + heap.insert(3, 3) + heap.insert(6, 6) + heap.insert(9, 9) + heap.insert(14, 14) + heap.insert(11, 11) + heap.insert(2, 2) + heap.insert(7, 7) + assert bfs(heap) == [[1, 3, 6, 2, 9, 7, 11, 14]] diff --git a/lib/python3.12/site-packages/pydatastructs/trees/tests/test_m_ary_trees.py b/lib/python3.12/site-packages/pydatastructs/trees/tests/test_m_ary_trees.py new file mode 100644 index 000000000..6cbc84ace --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/trees/tests/test_m_ary_trees.py @@ -0,0 +1,5 @@ +from pydatastructs import MAryTree + +def test_MAryTree(): + m = MAryTree(1, 1) + assert str(m) == '[(1, 1)]' diff --git a/lib/python3.12/site-packages/pydatastructs/trees/tests/test_space_partitioning_tree.py b/lib/python3.12/site-packages/pydatastructs/trees/tests/test_space_partitioning_tree.py new file mode 100644 index 000000000..99f0e84cc --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/trees/tests/test_space_partitioning_tree.py @@ -0,0 +1,20 @@ +from pydatastructs import OneDimensionalSegmentTree +from pydatastructs.utils.raises_util import raises + +def test_OneDimensionalSegmentTree(): + ODST = OneDimensionalSegmentTree + segt = ODST([(0, 5), (1, 6), (9, 13), (1, 2), (3, 8), (9, 20)]) + assert segt.cache is False + segt2 = ODST([(1, 4)]) + assert str(segt2) == ("[(None, [False, 0, 1, False], None, None), " + "(None, [True, 1, 1, True], ['(None, [True, 1, 4, True], None, None)'], " + "None), (None, [False, 1, 4, False], None, None), (None, [True, 4, 4, True], " + "None, None), (0, [False, 0, 1, True], None, 1), (2, [False, 1, 4, True], " + "['(None, [True, 1, 4, True], None, None)'], 3), (4, [False, 0, 4, True], " + "None, 5), (None, [False, 4, 5, False], None, None), (-3, [False, 0, 5, " + "False], None, -2)]") + assert len(segt.query(1.5)) == 3 + assert segt.cache is True + assert len(segt.query(-1)) == 0 + assert len(segt.query(2.8)) == 2 + assert raises(ValueError, lambda: ODST([(1, 2, 3)])) diff --git a/lib/python3.12/site-packages/pydatastructs/utils/__init__.py b/lib/python3.12/site-packages/pydatastructs/utils/__init__.py new file mode 100644 index 000000000..c4971be32 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/utils/__init__.py @@ -0,0 +1,29 @@ +__all__ = [] + +from . import ( + misc_util, + testing_util, +) + +from .misc_util import ( + TreeNode, + MAryTreeNode, + LinkedListNode, + BinomialTreeNode, + AdjacencyListGraphNode, + AdjacencyMatrixGraphNode, + GraphEdge, + Set, + CartesianTreeNode, + RedBlackTreeNode, + TrieNode, + SkipNode, + summation, + greatest_common_divisor, + minimum, + Backend +) +from .testing_util import test + +__all__.extend(misc_util.__all__) +__all__.extend(testing_util.__all__) diff --git a/lib/python3.12/site-packages/pydatastructs/utils/_backend/__init__.py b/lib/python3.12/site-packages/pydatastructs/utils/_backend/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/python3.12/site-packages/pydatastructs/utils/misc_util.py b/lib/python3.12/site-packages/pydatastructs/utils/misc_util.py new file mode 100644 index 000000000..3672c58b9 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/utils/misc_util.py @@ -0,0 +1,632 @@ +import math, pydatastructs +from enum import Enum +from pydatastructs.utils._backend.cpp import _nodes, _graph_utils + +__all__ = [ + 'TreeNode', + 'MAryTreeNode', + 'LinkedListNode', + 'BinomialTreeNode', + 'AdjacencyListGraphNode', + 'AdjacencyMatrixGraphNode', + 'GraphEdge', + 'Set', + 'CartesianTreeNode', + 'RedBlackTreeNode', + 'TrieNode', + 'SkipNode', + 'minimum', + 'summation', + 'greatest_common_divisor', + 'Backend' +] + + +class Backend(Enum): + + PYTHON = 'Python' + CPP = 'Cpp' + LLVM = 'Llvm' + + def __str__(self): + return self.value + +def raise_if_backend_is_not_python(api, backend): + if backend != Backend.PYTHON: + raise ValueError("As of {} version, only {} backend is supported for {} API".format( + pydatastructs.__version__, str(Backend.PYTHON), api)) + +_check_type = lambda a, t: isinstance(a, t) +NoneType = type(None) + +class Node(object): + """ + Abstract class representing a node. + """ + pass + +class TreeNode(Node): + """ + Represents node in trees. + + Parameters + ========== + + key + Required for comparison operations. + data + Any valid data to be stored in the node. + left: int + Optional, index of the left child node. + right: int + Optional, index of the right child node. + backend: pydatastructs.Backend + The backend to be used. Available backends: Python and C++ + Optional, by default, the Python backend is used. For faster execution, use the C++ backend. + """ + + __slots__ = ['key', 'data', 'left', 'right', 'is_root', + 'height', 'parent', 'size'] + + @classmethod + def methods(cls): + return ['__new__', '__str__'] + + def __new__(cls, key, data=None, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + return _nodes.TreeNode(key, data, **kwargs) + obj = Node.__new__(cls) + obj.data, obj.key = data, key + obj.left, obj.right, obj.parent, obj.height, obj.size = \ + None, None, None, 0, 1 + obj.is_root = False + return obj + + def __str__(self): + """ + Used for printing. + """ + return str((self.left, self.key, self.data, self.right)) + +class CartesianTreeNode(TreeNode): + """ + Represents node in cartesian trees. + + Parameters + ========== + + key + Required for comparison operations. + data + Any valid data to be stored in the node. + priority: int + An integer value for heap property. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + __slots__ = ['key', 'data', 'priority'] + + def __new__(cls, key, priority, data=None, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = TreeNode.__new__(cls, key, data) + obj.priority = priority + return obj + + def __str__(self): + """ + Used for printing. + """ + return str((self.left, self.key, self.priority, self.data, self.right)) + +class RedBlackTreeNode(TreeNode): + """ + Represents node in red-black trees. + + Parameters + ========== + + key + Required for comparison operations. + data + Any valid data to be stored in the node. + color + 0 for black and 1 for red. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + __slots__ = ['key', 'data', 'color'] + + @classmethod + def methods(cls): + return ['__new__'] + + def __new__(cls, key, data=None, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = TreeNode.__new__(cls, key, data) + obj.color = 1 + return obj + +class BinomialTreeNode(TreeNode): + """ + Represents node in binomial trees. + + Parameters + ========== + + key + Required for comparison operations. + data + Any valid data to be stored in the node. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Note + ==== + + The following are the data members of the class: + + parent: BinomialTreeNode + A reference to the BinomialTreeNode object + which is a prent of this. + children: DynamicOneDimensionalArray + An array of references to BinomialTreeNode objects + which are children this node. + is_root: bool, by default, False + If the current node is a root of the tree then + set it to True otherwise False. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + __slots__ = ['parent', 'key', 'children', 'data', 'is_root'] + + @classmethod + def methods(cls): + return ['__new__', 'add_children', '__str__'] + + def __new__(cls, key, data=None, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + from pydatastructs.linear_data_structures.arrays import DynamicOneDimensionalArray + obj = Node.__new__(cls) + obj.data, obj.key = data, key + obj.children, obj.parent, obj.is_root = ( + DynamicOneDimensionalArray(BinomialTreeNode, 0), + None, + False + ) + return obj + + def add_children(self, *children): + """ + Adds children of current node. + """ + for child in children: + self.children.append(child) + child.parent = self + + def __str__(self): + """ + For printing the key and data. + """ + return str((self.key, self.data)) + +class MAryTreeNode(TreeNode): + """ + Represents node in an M-ary trees. + + Parameters + ========== + + key + Required for comparison operations. + data + Any valid data to be stored in the node. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Note + ==== + + The following are the data members of the class: + + children: DynamicOneDimensionalArray + An array of indices which stores the children of + this node in the M-ary tree array + is_root: bool, by default, False + If the current node is a root of the tree then + set it to True otherwise False. + """ + __slots__ = ['key', 'children', 'data', 'is_root'] + + @classmethod + def methods(cls): + return ['__new__', 'add_children', '__str__'] + + def __new__(cls, key, data=None, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + from pydatastructs.linear_data_structures.arrays import DynamicOneDimensionalArray + obj = Node.__new__(cls) + obj.data = data + obj.key = key + obj.is_root = False + obj.children = DynamicOneDimensionalArray(int, 0) + return obj + + def add_children(self, *children): + """ + Adds children of current node. + """ + for child in children: + self.children.append(child) + + def __str__(self): + return str((self.key, self.data)) + + +class LinkedListNode(Node): + """ + Represents node in linked lists. + + Parameters + ========== + + key + Any valid identifier to uniquely + identify the node in the linked list. + data + Any valid data to be stored in the node. + links + List of names of attributes which should + be used as links to other nodes. + addrs + List of address of nodes to be assigned to + each of the attributes in links. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + @classmethod + def methods(cls): + return ['__new__', '__str__'] + + def __new__(cls, key, data=None, links=None, addrs=None, + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + if links is None: + links = ['next'] + if addrs is None: + addrs = [None] + obj = Node.__new__(cls) + obj.key = key + obj.data = data + for link, addr in zip(links, addrs): + obj.__setattr__(link, addr) + obj.__slots__ = ['key', 'data'] + links + return obj + + def __str__(self): + return str((self.key, self.data)) + +class SkipNode(Node): + """ + Represents node in linked lists. + + Parameters + ========== + + key + Any valid identifier to uniquely + identify the node in the skip list. + data + Any valid data to be stored in the node. + next + Reference to the node lying just forward + to the current node. + Optional, by default, None. + down + Reference to the node lying just below the + current node. + Optional, by default, None. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + + __slots__ = ['key', 'data', 'next', 'down'] + + def __new__(cls, key, data=None, next=None, down=None, + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = Node.__new__(cls) + obj.key, obj.data = key, data + obj.next, obj.down = next, down + return obj + + def __str__(self): + return str((self.key, self.data)) + +class GraphNode(Node): + """ + Abastract class for graph nodes/vertices. + """ + def __str__(self): + return str((self.name, self.data)) + +class AdjacencyListGraphNode(GraphNode): + """ + Represents nodes for adjacency list implementation + of graphs. + + Parameters + ========== + + name: str + The name of the node by which it is identified + in the graph. Must be unique. + data + The data to be stored at each graph node. + adjacency_list: list + Any valid iterator to initialize the adjacent + nodes of the current node. + Optional, by default, None + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + @classmethod + def methods(cls): + return ['__new__', 'add_adjacent_node', + 'remove_adjacent_node'] + + def __new__(cls, name, data=None, adjacency_list=[], + **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.PYTHON: + obj = GraphNode.__new__(cls) + obj.name, obj.data = str(name), data + obj._impl = 'adjacency_list' + if len(adjacency_list) > 0: + for node in adjacency_list: + obj.__setattr__(node.name, node) + obj.adjacent = adjacency_list if len(adjacency_list) > 0 \ + else [] + return obj + else: + return _graph_utils.AdjacencyListGraphNode(name, data, adjacency_list) + + def add_adjacent_node(self, name, data=None): + """ + Adds adjacent node to the current node's + adjacency list with given name and data. + """ + if hasattr(self, name): + getattr(self, name).data = data + else: + new_node = AdjacencyListGraphNode(name, data) + self.__setattr__(new_node.name, new_node) + self.adjacent.append(new_node.name) + + def remove_adjacent_node(self, name): + """ + Removes node with given name from + adjacency list. + """ + if not hasattr(self, name): + raise ValueError("%s is not adjacent to %s"%(name, self.name)) + self.adjacent.remove(name) + delattr(self, name) + +class AdjacencyMatrixGraphNode(GraphNode): + """ + Represents nodes for adjacency matrix implementation + of graphs. + + Parameters + ========== + + name: str + The index of the node in the AdjacencyMatrix. + data + The data to be stored at each graph node. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + __slots__ = ['name', 'data'] + + @classmethod + def methods(cls): + return ['__new__'] + + def __new__(cls, name, data=None, + **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.PYTHON: + obj = GraphNode.__new__(cls) + obj.name, obj.data, obj.is_connected = \ + str(name), data, None + obj._impl = 'adjacency_matrix' + return obj + else: + return _graph_utils.AdjacencyMatrixGraphNode(str(name), data) + +class GraphEdge(object): + """ + Represents the concept of edges in graphs. + + Parameters + ========== + + node1: GraphNode or it's child classes + The source node of the edge. + node2: GraphNode or it's child classes + The target node of the edge. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + @classmethod + def methods(cls): + return ['__new__', '__str__'] + + def __new__(cls, node1, node2, value=None, + **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.PYTHON: + obj = object.__new__(cls) + obj.source, obj.target = node1, node2 + obj.value = value + return obj + else: + return _graph_utils.GraphEdge(node1, node2, value) + + def __str__(self): + return str((self.source.name, self.target.name)) + +class Set(object): + """ + Represents a set in a forest of disjoint sets. + + Parameters + ========== + + key: Hashable python object + The key which uniquely identifies + the set. + data: Python object + The data to be stored in the set. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + + __slots__ = ['parent', 'size', 'key', 'data'] + + @classmethod + def methods(cls): + return ['__new__'] + + def __new__(cls, key, data=None, + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.key = key + obj.data = data + obj.parent, obj.size = [None]*2 + return obj + +class TrieNode(Node): + """ + Represents nodes in the trie data structure. + + Parameters + ========== + + char: The character stored in the current node. + Optional, by default None. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + + __slots__ = ['char', '_children', 'is_terminal'] + + @classmethod + def methods(cls): + return ['__new__', 'add_child', 'get_child', 'remove_child'] + + def __new__(cls, char=None, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = Node.__new__(cls) + obj.char = char + obj._children = {} + obj.is_terminal = False + return obj + + def add_child(self, trie_node) -> None: + self._children[trie_node.char] = trie_node + + def get_child(self, char: str): + return self._children.get(char, None) + + def remove_child(self, char: str) -> None: + self._children.pop(char) + +def _comp(u, v, tcomp): + """ + Overloaded comparator for comparing + two values where any one of them can be + `None`. + """ + if u is None and v is not None: + return False + elif u is not None and v is None: + return True + elif u is None and v is None: + return False + else: + return tcomp(u, v) + +def _check_range_query_inputs(input, bounds): + start, end = input + if start >= end: + raise ValueError("Input (%d, %d) range is empty."%(start, end)) + if start < bounds[0] or end > bounds[1]: + raise IndexError("Input (%d, %d) range is out of " + "bounds of array indices (%d, %d)." + %(start, end, bounds[0], bounds[1])) + +def minimum(x_y): + if len(x_y) == 1: + return x_y[0] + + x, y = x_y + if x is None or y is None: + return x if y is None else y + + return min(x, y) + +def greatest_common_divisor(x_y): + if len(x_y) == 1: + return x_y[0] + + x, y = x_y + if x is None or y is None: + return x if y is None else y + + return math.gcd(x, y) + +def summation(x_y): + if len(x_y) == 1: + return x_y[0] + + x, y = x_y + if x is None or y is None: + return x if y is None else y + + return x + y diff --git a/lib/python3.12/site-packages/pydatastructs/utils/raises_util.py b/lib/python3.12/site-packages/pydatastructs/utils/raises_util.py new file mode 100644 index 000000000..3a324d38d --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/utils/raises_util.py @@ -0,0 +1,17 @@ +import pytest + +def raises(exception, code): + """ + Utility for testing exceptions. + + Parameters + ========== + + exception + A valid python exception + code: lambda + Code that causes exception + """ + with pytest.raises(exception): + code() + return True diff --git a/lib/python3.12/site-packages/pydatastructs/utils/testing_util.py b/lib/python3.12/site-packages/pydatastructs/utils/testing_util.py new file mode 100644 index 000000000..e5c0627b5 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/utils/testing_util.py @@ -0,0 +1,83 @@ +import os +import pathlib +import glob +import types + +__all__ = ['test'] + + +# Root pydatastructs directory +ROOT_DIR = pathlib.Path(os.path.abspath(__file__)).parents[1] + + +SKIP_FILES = ['testing_util.py'] + +def test(submodules=None, only_benchmarks=False, + benchmarks_size=1000, **kwargs): + """ + Runs the library tests using pytest + + Parameters + ========== + + submodules: Optional, list[str] + List of submodules test to run. By default runs + all the tests + """ + try: + import pytest + except ImportError: + raise Exception("pytest must be installed. Use `pip install pytest` " + "to install it.") + + # set benchmarks size + os.environ["PYDATASTRUCTS_BENCHMARK_SIZE"] = str(benchmarks_size) + test_files = [] + if submodules: + if not isinstance(submodules, (list, tuple)): + submodules = [submodules] + for path in glob.glob(f'{ROOT_DIR}/**/test_*.py', recursive=True): + skip_test = False + for skip in SKIP_FILES: + if skip in path: + skip_test = True + break + if skip_test: + continue + for sub_var in submodules: + if isinstance(sub_var, types.ModuleType): + sub = sub_var.__name__.split('.')[-1] + elif isinstance(sub_var, str): + sub = sub_var + else: + raise Exception("Submodule should be of type: str or module") + if sub in path: + if not only_benchmarks: + if 'benchmarks' not in path: + test_files.append(path) + else: + if 'benchmarks' in path: + test_files.append(path) + break + else: + for path in glob.glob(f'{ROOT_DIR}/**/test_*.py', recursive=True): + skip_test = False + for skip in SKIP_FILES: + if skip in path: + skip_test = True + break + if skip_test: + continue + if not only_benchmarks: + if 'benchmarks' not in path: + test_files.append(path) + else: + if 'benchmarks' in path: + test_files.append(path) + + extra_args = [] + if kwargs.get("n", False) is not False: + extra_args.append("-n") + extra_args.append(str(kwargs["n"])) + + pytest.main(extra_args + test_files) diff --git a/lib/python3.12/site-packages/pydatastructs/utils/tests/__init__.py b/lib/python3.12/site-packages/pydatastructs/utils/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/python3.12/site-packages/pydatastructs/utils/tests/test_code_quality.py b/lib/python3.12/site-packages/pydatastructs/utils/tests/test_code_quality.py new file mode 100644 index 000000000..67afe49e8 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/utils/tests/test_code_quality.py @@ -0,0 +1,239 @@ +import os, re, sys, pydatastructs, inspect +from typing import Type +import pytest + +def _list_files(checker): + root_path = os.path.abspath( + os.path.join( + os.path.split(__file__)[0], + os.pardir, os.pardir)) + code_files = [] + for (dirpath, _, filenames) in os.walk(root_path): + for _file in filenames: + if checker(_file): + code_files.append(os.path.join(dirpath, _file)) + return code_files + +checker = lambda _file: (re.match(r".*\.py$", _file) or + re.match(r".*\.cpp$", _file) or + re.match(r".*\.hpp$", _file)) +code_files = _list_files(checker) + +def test_trailing_white_spaces(): + messages = [("The following places in your code " + "end with white spaces.")] + msg = "{}:{}" + for file_path in code_files: + file = open(file_path, "r") + line = file.readline() + line_number = 1 + while line != "": + if line.endswith(" \n") or line.endswith("\t\n") \ + or line.endswith(" ") or line.endswith("\t"): + messages.append(msg.format(file_path, line_number)) + line = file.readline() + line_number += 1 + file.close() + + if len(messages) > 1: + assert False, '\n'.join(messages) + +def test_final_new_lines(): + messages = [("The following files in your code " + "do not end with a single new line.")] + msg1 = "No new line in {}:{}" + msg2 = "More than one new line in {}:{}" + for file_path in code_files: + file = open(file_path, "r") + lines = [] + line = file.readline() + while line != "": + lines.append(line) + line = file.readline() + if lines: + if lines[-1][-1] != "\n": + messages.append(msg1.format(file_path, len(lines))) + if lines[-1] == "\n" and lines[-2][-1] == "\n": + messages.append(msg2.format(file_path, len(lines))) + file.close() + + if len(messages) > 1: + assert False, '\n'.join(messages) + +def test_comparison_True_False_None(): + messages = [("The following places in your code " + "use `!=` or `==` for comparing True/False/None." + "Please use `is` instead.")] + msg = "{}:{}" + checker = lambda _file: re.match(r".*\.py$", _file) + py_files = _list_files(checker) + for file_path in py_files: + if file_path.find("test_code_quality.py") == -1: + file = open(file_path, "r") + line = file.readline() + line_number = 1 + while line != "": + if ((line.find("== True") != -1) or + (line.find("== False") != -1) or + (line.find("== None") != -1) or + (line.find("!= True") != -1) or + (line.find("!= False") != -1) or + (line.find("!= None") != -1)): + messages.append(msg.format(file_path, line_number)) + line = file.readline() + line_number += 1 + file.close() + + if len(messages) > 1: + assert False, '\n'.join(messages) + +@pytest.mark.xfail +def test_reinterpret_cast(): + + def is_variable(str): + for ch in str: + if not (ch == '_' or ch.isalnum()): + return False + return True + + checker = lambda _file: (re.match(r".*\.cpp$", _file) or + re.match(r".*\.hpp$", _file)) + cpp_files = _list_files(checker) + messages = [("The following lines should use reinterpret_cast" + " to cast pointers from one type to another")] + msg = "Casting to {} at {}:{}" + for file_path in cpp_files: + file = open(file_path, "r") + line = file.readline() + line_number = 1 + while line != "": + found_open = False + between_open_close = "" + for char in line: + if char == '(': + found_open = True + elif char == ')': + if (between_open_close and + between_open_close[-1] == '*' and + is_variable(between_open_close[:-1])): + messages.append(msg.format(between_open_close[:-1], + file_path, line_number)) + between_open_close = "" + found_open = False + elif char != ' ' and found_open: + between_open_close += char + line = file.readline() + line_number += 1 + file.close() + + if len(messages) > 1: + assert False, '\n'.join(messages) + +def test_presence_of_tabs(): + messages = [("The following places in your code " + "use tabs instead of spaces.")] + msg = "{}:{}" + for file_path in code_files: + file = open(file_path, "r") + line_number = 1 + line = file.readline() + while line != "": + if (line.find('\t') != -1): + messages.append(msg.format(file_path, line_number)) + line = file.readline() + line_number += 1 + file.close() + + if len(messages) > 1: + assert False, '\n'.join(messages) + +def _apis(): + import pydatastructs as pyds + return [ + pyds.graphs.adjacency_list.AdjacencyList, + pyds.graphs.adjacency_matrix.AdjacencyMatrix, + pyds.DoublyLinkedList, pyds.SinglyLinkedList, + pyds.SinglyCircularLinkedList, + pyds.DoublyCircularLinkedList, + pyds.OneDimensionalArray, pyds.MultiDimensionalArray, + pyds.DynamicOneDimensionalArray, + pyds.trees.BinaryTree, pyds.BinarySearchTree, + pyds.AVLTree, pyds.SplayTree, pyds.BinaryTreeTraversal, + pyds.DHeap, pyds.BinaryHeap, pyds.TernaryHeap, pyds.BinomialHeap, + pyds.MAryTree, pyds.OneDimensionalSegmentTree, + pyds.Queue, pyds.miscellaneous_data_structures.queue.ArrayQueue, + pyds.miscellaneous_data_structures.queue.LinkedListQueue, + pyds.PriorityQueue, + pyds.miscellaneous_data_structures.queue.LinkedListPriorityQueue, + pyds.miscellaneous_data_structures.queue.BinaryHeapPriorityQueue, + pyds.miscellaneous_data_structures.queue.BinomialHeapPriorityQueue, + pyds.Stack, pyds.miscellaneous_data_structures.stack.ArrayStack, + pyds.miscellaneous_data_structures.stack.LinkedListStack, + pyds.DisjointSetForest, pyds.BinomialTree, pyds.TreeNode, pyds.MAryTreeNode, + pyds.LinkedListNode, pyds.BinomialTreeNode, pyds.AdjacencyListGraphNode, + pyds.AdjacencyMatrixGraphNode, pyds.GraphEdge, pyds.Set, pyds.BinaryIndexedTree, + pyds.CartesianTree, pyds.CartesianTreeNode, pyds.Treap, pyds.RedBlackTreeNode, pyds.RedBlackTree, + pyds.Trie, pyds.TrieNode, pyds.SkipList, pyds.RangeQueryStatic, pyds.RangeQueryDynamic, pyds.SparseTable, + pyds.miscellaneous_data_structures.segment_tree.OneDimensionalArraySegmentTree, + pyds.bubble_sort, pyds.linear_search, pyds.binary_search, pyds.jump_search, + pyds.selection_sort, pyds.insertion_sort, pyds.quick_sort, pyds.intro_sort] + +def test_public_api(): + pyds = pydatastructs + apis = _apis() + print("\n\nAPI Report") + print("==========") + for name in apis: + if inspect.isclass(name): + _class = name + mro = _class.__mro__ + must_methods = _class.methods() + print("\n" + str(name)) + print("Methods Implemented") + print(must_methods) + print("Parent Classes") + print(mro[1:]) + for supercls in mro: + if supercls != _class: + for method in must_methods: + if hasattr(supercls, method) and \ + getattr(supercls, method) == \ + getattr(_class, method): + assert False, ("%s class doesn't " + "have %s method implemented."%( + _class, method + )) + +def test_backend_argument_message(): + + import pydatastructs as pyds + backend_implemented = [ + pyds.OneDimensionalArray, + pyds.DynamicOneDimensionalArray, + pyds.quick_sort, + pyds.AdjacencyListGraphNode, + pyds.AdjacencyMatrixGraphNode, + pyds.GraphEdge + ] + + def call_and_raise(api, pos_args_count=0): + try: + if pos_args_count == 0: + api(backend=None) + elif pos_args_count == 1: + api(None, backend=None) + elif pos_args_count == 2: + api(None, None, backend=None) + except ValueError as value_error: + assert str(api) in value_error.args[0] + except TypeError as type_error: + max_pos_args_count = 2 + if pos_args_count <= max_pos_args_count: + call_and_raise(api, pos_args_count + 1) + else: + raise type_error + + apis = _apis() + for api in apis: + if api not in backend_implemented: + call_and_raise(api, 0) diff --git a/lib/python3.12/site-packages/pydatastructs/utils/tests/test_misc_util.py b/lib/python3.12/site-packages/pydatastructs/utils/tests/test_misc_util.py new file mode 100644 index 000000000..13ba2ec8e --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/utils/tests/test_misc_util.py @@ -0,0 +1,84 @@ +from pydatastructs.utils import (TreeNode, AdjacencyListGraphNode, AdjacencyMatrixGraphNode, + GraphEdge, BinomialTreeNode, MAryTreeNode, CartesianTreeNode, RedBlackTreeNode, SkipNode) +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils.misc_util import Backend + +def test_cpp_TreeNode(): + n = TreeNode(1,100,backend=Backend.CPP) + assert str(n) == "(None, 1, 100, None)" + +def test_AdjacencyListGraphNode(): + g_1 = AdjacencyListGraphNode('g_1', 1) + g_2 = AdjacencyListGraphNode('g_2', 2) + g = AdjacencyListGraphNode('g', 0, adjacency_list=[g_1, g_2]) + g.add_adjacent_node('g_3', 3) + assert g.g_1.name == 'g_1' + assert g.g_2.name == 'g_2' + assert g.g_3.name == 'g_3' + g.remove_adjacent_node('g_3') + assert hasattr(g, 'g_3') is False + assert raises(ValueError, lambda: g.remove_adjacent_node('g_3')) + g.add_adjacent_node('g_1', 4) + assert g.g_1.data == 4 + assert str(g) == "('g', 0)" + + h_1 = AdjacencyListGraphNode('h_1', 1, backend = Backend.CPP) + h_2 = AdjacencyListGraphNode('h_2', 2, backend = Backend.CPP) + assert str(h_1) == "('h_1', 1)" + h = AdjacencyListGraphNode('h', 0, adjacency_list = [h_1, h_2], backend = Backend.CPP) + h.add_adjacent_node('h_3', 3) + assert h.adjacent['h_1'].name == 'h_1' + assert h.adjacent['h_2'].name == 'h_2' + assert h.adjacent['h_3'].name == 'h_3' + h.remove_adjacent_node('h_3') + assert 'h_3' not in h.adjacent + assert raises(ValueError, lambda: h.remove_adjacent_node('h_3')) + h.add_adjacent_node('h_1', 4) + assert h.adjacent['h_1'] == 4 + assert str(h) == "('h', 0)" + h_5 = AdjacencyListGraphNode('h_5', h_1, backend = Backend.CPP) + assert h_5.data == h_1 + +def test_AdjacencyMatrixGraphNode(): + g = AdjacencyMatrixGraphNode("1", 3) + g2 = AdjacencyMatrixGraphNode("1", 3, backend = Backend.CPP) + assert str(g) == "('1', 3)" + assert str(g2) == "('1', 3)" + g3 = AdjacencyListGraphNode("3", g2, backend = Backend.CPP) + assert g3.data == g2 + + +def test_GraphEdge(): + g_1 = AdjacencyListGraphNode('g_1', 1) + g_2 = AdjacencyListGraphNode('g_2', 2) + e = GraphEdge(g_1, g_2, value=2) + assert str(e) == "('g_1', 'g_2')" + + h_1 = AdjacencyListGraphNode('h_1', 1, backend = Backend.CPP) + h_2 = AdjacencyListGraphNode('h_2', 2, backend = Backend.CPP) + e2 = GraphEdge(h_1, h_2, value = 2, backend = Backend.CPP) + assert str(e2) == "('h_1', 'h_2', 2)" + +def test_BinomialTreeNode(): + b = BinomialTreeNode(1,1) + b.add_children(*[BinomialTreeNode(i,i) for i in range(2,10)]) + assert str(b) == '(1, 1)' + assert str(b.children) == "['(2, 2)', '(3, 3)', '(4, 4)', '(5, 5)', '(6, 6)', '(7, 7)', '(8, 8)', '(9, 9)']" + +def test_MAryTreeNode(): + m = MAryTreeNode(1, 1) + m.add_children(*list(range(2, 10))) + assert str(m) == "(1, 1)" + assert str(m.children) == "['2', '3', '4', '5', '6', '7', '8', '9']" + +def test_CartesianTreeNode(): + c = CartesianTreeNode(1, 1, 1) + assert str(c) == "(None, 1, 1, 1, None)" + +def test_RedBlackTreeNode(): + c = RedBlackTreeNode(1, 1) + assert str(c) == "(None, 1, 1, None)" + +def test_SkipNode(): + c = SkipNode(1) + assert str(c) == '(1, None)' diff --git a/pydatastructs/graphs/meson.build b/pydatastructs/graphs/meson.build index f6e084142..1b7e452b7 100644 --- a/pydatastructs/graphs/meson.build +++ b/pydatastructs/graphs/meson.build @@ -3,7 +3,6 @@ python = import('python').find_installation(pure: false) python.install_sources( [ '__init__.py', - '_extensions.py', 'adjacency_list.py', 'adjacency_matrix.py', 'algorithms.py', diff --git a/pydatastructs/utils/meson.build b/pydatastructs/utils/meson.build index de2f70f1d..3288115b1 100644 --- a/pydatastructs/utils/meson.build +++ b/pydatastructs/utils/meson.build @@ -3,7 +3,6 @@ python = import('python').find_installation(pure: false) python.install_sources( [ '__init__.py', - '_extensions.py', 'misc_util.py', 'raises_util.py', 'testing_util.py' From 9ae70495d55ca816489bc639fa8052147ab115a3 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Thu, 2 Oct 2025 13:07:31 +0530 Subject: [PATCH 27/47] bug fix --- .../site-packages/pydatastructs/__init__.py | 8 - .../pydatastructs/graphs/__init__.py | 28 - .../pydatastructs/graphs/_backend/__init__.py | 0 .../pydatastructs/graphs/adjacency_list.py | 101 - .../pydatastructs/graphs/adjacency_matrix.py | 100 - .../pydatastructs/graphs/algorithms.py | 1386 ------------ .../pydatastructs/graphs/graph.py | 163 -- .../pydatastructs/graphs/tests/__init__.py | 0 .../graphs/tests/test_adjacency_list.py | 83 - .../graphs/tests/test_adjacency_matrix.py | 53 - .../graphs/tests/test_algorithms.py | 596 ----- .../linear_data_structures/__init__.py | 53 - .../_backend/__init__.py | 0 .../linear_data_structures/algorithms.py | 2010 ----------------- .../linear_data_structures/arrays.py | 473 ---- .../linear_data_structures/linked_lists.py | 819 ------- .../linear_data_structures/tests/__init__.py | 0 .../tests/test_algorithms.py | 423 ---- .../tests/test_arrays.py | 157 -- .../tests/test_linked_lists.py | 193 -- .../miscellaneous_data_structures/__init__.py | 51 - .../_backend/__init__.py | 0 .../algorithms.py | 335 --- .../binomial_trees.py | 91 - .../disjoint_set.py | 143 -- .../miscellaneous_data_structures/multiset.py | 42 - .../miscellaneous_data_structures/queue.py | 498 ---- .../segment_tree.py | 225 -- .../sparse_table.py | 108 - .../miscellaneous_data_structures/stack.py | 200 -- .../tests/__init__.py | 0 .../tests/test_binomial_trees.py | 17 - .../tests/test_disjoint_set.py | 70 - .../tests/test_multiset.py | 39 - .../tests/test_queue.py | 116 - .../tests/test_range_query_dynamic.py | 71 - .../tests/test_range_query_static.py | 63 - .../tests/test_stack.py | 77 - .../pydatastructs/strings/__init__.py | 18 - .../pydatastructs/strings/algorithms.py | 247 -- .../pydatastructs/strings/tests/__init__.py | 0 .../strings/tests/test_algorithms.py | 76 - .../pydatastructs/strings/tests/test_trie.py | 49 - .../pydatastructs/strings/trie.py | 201 -- .../pydatastructs/trees/__init__.py | 40 - .../pydatastructs/trees/_backend/__init__.py | 0 .../pydatastructs/trees/binary_trees.py | 1888 ---------------- .../pydatastructs/trees/heaps.py | 582 ----- .../pydatastructs/trees/m_ary_trees.py | 172 -- .../trees/space_partitioning_trees.py | 242 -- .../pydatastructs/trees/tests/__init__.py | 0 .../trees/tests/test_binary_trees.py | 820 ------- .../pydatastructs/trees/tests/test_heaps.py | 236 -- .../trees/tests/test_m_ary_trees.py | 5 - .../tests/test_space_partitioning_tree.py | 20 - .../pydatastructs/utils/__init__.py | 29 - .../pydatastructs/utils/_backend/__init__.py | 0 .../pydatastructs/utils/misc_util.py | 632 ------ .../pydatastructs/utils/raises_util.py | 17 - .../pydatastructs/utils/testing_util.py | 83 - .../pydatastructs/utils/tests/__init__.py | 0 .../utils/tests/test_code_quality.py | 239 -- .../utils/tests/test_misc_util.py | 84 - 63 files changed, 14472 deletions(-) delete mode 100644 lib/python3.12/site-packages/pydatastructs/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/_backend/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/adjacency_list.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/adjacency_matrix.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/algorithms.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/graph.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/tests/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_list.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_matrix.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/tests/test_algorithms.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/_backend/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/algorithms.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/arrays.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/linked_lists.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_algorithms.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_arrays.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_linked_lists.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/_backend/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/algorithms.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/binomial_trees.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/disjoint_set.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/multiset.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/queue.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/segment_tree.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/sparse_table.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/stack.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_binomial_trees.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_disjoint_set.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_multiset.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_queue.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_dynamic.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_static.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_stack.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/strings/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/strings/algorithms.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/strings/tests/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/strings/tests/test_algorithms.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/strings/tests/test_trie.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/strings/trie.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/trees/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/trees/_backend/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/trees/binary_trees.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/trees/heaps.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/trees/m_ary_trees.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/trees/space_partitioning_trees.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/trees/tests/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/trees/tests/test_binary_trees.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/trees/tests/test_heaps.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/trees/tests/test_m_ary_trees.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/trees/tests/test_space_partitioning_tree.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/utils/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/utils/_backend/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/utils/misc_util.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/utils/raises_util.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/utils/testing_util.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/utils/tests/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/utils/tests/test_code_quality.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/utils/tests/test_misc_util.py diff --git a/lib/python3.12/site-packages/pydatastructs/__init__.py b/lib/python3.12/site-packages/pydatastructs/__init__.py deleted file mode 100644 index 27cc5a202..000000000 --- a/lib/python3.12/site-packages/pydatastructs/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .utils import * -from .linear_data_structures import * -from .trees import * -from .miscellaneous_data_structures import * -from .graphs import * -from .strings import * - -__version__ = "1.0.1-dev" diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/__init__.py b/lib/python3.12/site-packages/pydatastructs/graphs/__init__.py deleted file mode 100644 index 21e0a5f35..000000000 --- a/lib/python3.12/site-packages/pydatastructs/graphs/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -__all__ = [] - -from . import graph -from .graph import ( - Graph -) -__all__.extend(graph.__all__) - -from . import algorithms -from . import adjacency_list -from . import adjacency_matrix - -from .algorithms import ( - breadth_first_search, - breadth_first_search_parallel, - minimum_spanning_tree, - minimum_spanning_tree_parallel, - strongly_connected_components, - depth_first_search, - shortest_paths, - all_pair_shortest_paths, - topological_sort, - topological_sort_parallel, - max_flow, - find_bridges -) - -__all__.extend(algorithms.__all__) diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/_backend/__init__.py b/lib/python3.12/site-packages/pydatastructs/graphs/_backend/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_list.py b/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_list.py deleted file mode 100644 index bd901b380..000000000 --- a/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_list.py +++ /dev/null @@ -1,101 +0,0 @@ -from pydatastructs.graphs.graph import Graph -from pydatastructs.graphs._backend.cpp import _graph -from pydatastructs.utils.misc_util import ( - GraphEdge, Backend, raise_if_backend_is_not_python) - -__all__ = [ - 'AdjacencyList' -] - -class AdjacencyList(Graph): - """ - Adjacency list implementation of graphs. - - See also - ======== - - pydatastructs.graphs.graph.Graph - """ - def __new__(cls, *vertices, **kwargs): - - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.PYTHON: - obj = object.__new__(cls) - for vertex in vertices: - obj.__setattr__(vertex.name, vertex) - obj.vertices = [vertex.name for vertex in vertices] - obj.edge_weights = {} - obj._impl = 'adjacency_list' - return obj - else: - graph = _graph.AdjacencyListGraph() - for vertice in vertices: - graph.add_vertex(vertice) - return graph - - @classmethod - def methods(self): - return ['is_adjacent', 'neighbors', - 'add_vertex', 'remove_vertex', 'add_edge', - 'get_edge', 'remove_edge', '__new__'] - - def is_adjacent(self, node1, node2): - node1 = self.__getattribute__(node1) - return hasattr(node1, node2) - - def num_vertices(self): - return len(self.vertices) - - def num_edges(self): - return sum(len(self.neighbors(v)) for v in self.vertices) - - def neighbors(self, node): - node = self.__getattribute__(node) - return [self.__getattribute__(name) for name in node.adjacent] - - def add_vertex(self, node): - if not hasattr(self, node.name): - self.vertices.append(node.name) - self.__setattr__(node.name, node) - - def remove_vertex(self, name): - delattr(self, name) - self.vertices.remove(name) - for node in self.vertices: - node_obj = self.__getattribute__(node) - if hasattr(node_obj, name): - delattr(node_obj, name) - node_obj.adjacent.remove(name) - - def add_edge(self, source, target, cost=None): - source, target = str(source), str(target) - error_msg = ("Vertex %s is not present in the graph." - "Call Graph.add_vertex to add a new" - "vertex. Graph.add_edge is only responsible" - "for adding edges and it will not add new" - "vertices on its own. This is done to maintain" - "clear separation between the functionality of" - "these two methods.") - if not hasattr(self, source): - raise ValueError(error_msg % (source)) - if not hasattr(self, target): - raise ValueError(error_msg % (target)) - - source, target = self.__getattribute__(source), \ - self.__getattribute__(target) - source.add_adjacent_node(target.name) - if cost is not None: - self.edge_weights[source.name + "_" + target.name] = \ - GraphEdge(source, target, cost) - - def get_edge(self, source, target): - return self.edge_weights.get( - source + "_" + target, - None) - - def remove_edge(self, source, target): - source, target = self.__getattribute__(source), \ - self.__getattribute__(target) - source.remove_adjacent_node(target.name) - self.edge_weights.pop(source.name + "_" + target.name, - None) diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_matrix.py b/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_matrix.py deleted file mode 100644 index 9c2326b86..000000000 --- a/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_matrix.py +++ /dev/null @@ -1,100 +0,0 @@ -from pydatastructs.graphs.graph import Graph -from pydatastructs.graphs._backend.cpp import _graph -from pydatastructs.utils.misc_util import ( - GraphEdge, raise_if_backend_is_not_python, - Backend) - -__all__ = [ - 'AdjacencyMatrix' -] - -class AdjacencyMatrix(Graph): - """ - Adjacency matrix implementation of graphs. - - See also - ======== - - pydatastructs.graphs.graph.Graph - """ - def __new__(cls, *vertices, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.PYTHON: - obj = object.__new__(cls) - obj.vertices = [vertex.name for vertex in vertices] - for vertex in vertices: - obj.__setattr__(vertex.name, vertex) - obj.matrix = {} - for vertex in vertices: - obj.matrix[vertex.name] = {} - obj.edge_weights = {} - obj._impl = 'adjacency_matrix' - return obj - else: - return _graph.AdjacencyMatrixGraph(vertices) - - @classmethod - def methods(self): - return ['is_adjacent', 'neighbors', - 'add_edge', 'get_edge', 'remove_edge', - '__new__'] - - def is_adjacent(self, node1, node2): - node1, node2 = str(node1), str(node2) - row = self.matrix.get(node1, {}) - return row.get(node2, False) is not False - - def num_vertices(self): - return len(self.vertices) - - def num_edges(self): - return sum(len(v) for v in self.matrix.values()) - - def neighbors(self, node): - node = str(node) - neighbors = [] - row = self.matrix.get(node, {}) - for node, presence in row.items(): - if presence: - neighbors.append(self.__getattribute__( - str(node))) - return neighbors - - def add_vertex(self, node): - raise NotImplementedError("Currently we allow " - "adjacency matrix for static graphs only") - - def remove_vertex(self, node): - raise NotImplementedError("Currently we allow " - "adjacency matrix for static graphs only.") - - def add_edge(self, source, target, cost=None): - source, target = str(source), str(target) - error_msg = ("Vertex %s is not present in the graph." - "Call Graph.add_vertex to add a new" - "vertex. Graph.add_edge is only responsible" - "for adding edges and it will not add new" - "vertices on its own. This is done to maintain" - "clear separation between the functionality of" - "these two methods.") - if source not in self.matrix: - raise ValueError(error_msg % (source)) - if target not in self.matrix: - raise ValueError(error_msg % (target)) - - self.matrix[source][target] = True - if cost is not None: - self.edge_weights[source + "_" + target] = \ - GraphEdge(self.__getattribute__(source), - self.__getattribute__(target), - cost) - - def get_edge(self, source, target): - return self.edge_weights.get( - str(source) + "_" + str(target), - None) - - def remove_edge(self, source, target): - source, target = str(source), str(target) - self.matrix[source][target] = False - self.edge_weights.pop(str(source) + "_" + str(target), None) diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/algorithms.py b/lib/python3.12/site-packages/pydatastructs/graphs/algorithms.py deleted file mode 100644 index 9324b7278..000000000 --- a/lib/python3.12/site-packages/pydatastructs/graphs/algorithms.py +++ /dev/null @@ -1,1386 +0,0 @@ -""" -Contains algorithms associated with graph -data structure. -""" -from collections import deque -from concurrent.futures import ThreadPoolExecutor -from pydatastructs.utils.misc_util import ( - _comp, raise_if_backend_is_not_python, Backend, AdjacencyListGraphNode) -from pydatastructs.miscellaneous_data_structures import ( - DisjointSetForest, PriorityQueue) -from pydatastructs.graphs.graph import Graph -from pydatastructs.linear_data_structures.algorithms import merge_sort_parallel -from pydatastructs import PriorityQueue - -__all__ = [ - 'breadth_first_search', - 'breadth_first_search_parallel', - 'minimum_spanning_tree', - 'minimum_spanning_tree_parallel', - 'strongly_connected_components', - 'depth_first_search', - 'shortest_paths', - 'all_pair_shortest_paths', - 'topological_sort', - 'topological_sort_parallel', - 'max_flow', - 'find_bridges' -] - -Stack = Queue = deque - -def breadth_first_search( - graph, source_node, operation, *args, **kwargs): - """ - Implementation of serial breadth first search(BFS) - algorithm. - - Parameters - ========== - - graph: Graph - The graph on which BFS is to be performed. - source_node: str - The name of the source node from where the BFS is - to be initiated. - operation: function - The function which is to be applied - on every node when it is visited. - The prototype which is to be followed is, - `function_name(curr_node, next_node, - arg_1, arg_2, . . ., arg_n)`. - Here, the first two arguments denote, the - current node and the node next to current node. - The rest of the arguments are optional and you can - provide your own stuff there. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Note - ==== - - You should pass all the arguments which you are going - to use in the prototype of your `operation` after - passing the operation function. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode - >>> V1 = AdjacencyListGraphNode("V1") - >>> V2 = AdjacencyListGraphNode("V2") - >>> V3 = AdjacencyListGraphNode("V3") - >>> G = Graph(V1, V2, V3) - >>> from pydatastructs import breadth_first_search - >>> def f(curr_node, next_node, dest_node): - ... return curr_node != dest_node - ... - >>> G.add_edge(V1.name, V2.name) - >>> G.add_edge(V2.name, V3.name) - >>> breadth_first_search(G, V1.name, f, V3.name) - """ - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.PYTHON: - import pydatastructs.graphs.algorithms as algorithms - func = "_breadth_first_search_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently breadth first search isn't implemented for " - "%s graphs."%(graph._impl)) - return getattr(algorithms, func)( - graph, source_node, operation, *args, **kwargs) - else: - from pydatastructs.graphs._backend.cpp._algorithms import bfs_adjacency_list, bfs_adjacency_matrix - if (graph._impl == "adjacency_list"): - extra_args = args if args else () - return bfs_adjacency_list(graph, source_node, operation, extra_args) - if (graph._impl == "adjacency_matrix"): - extra_args = args if args else () - return bfs_adjacency_matrix(graph, source_node, operation, extra_args) - -def _breadth_first_search_adjacency_list( - graph, source_node, operation, *args, **kwargs): - bfs_queue = Queue() - visited = {} - bfs_queue.append(source_node) - visited[source_node] = True - while len(bfs_queue) != 0: - curr_node = bfs_queue.popleft() - next_nodes = graph.neighbors(curr_node) - if len(next_nodes) != 0: - for next_node in next_nodes: - if visited.get(next_node.name, False) is False: - status = operation(curr_node, next_node.name, *args, **kwargs) - if not status: - return None - bfs_queue.append(next_node.name) - visited[next_node.name] = True - else: - status = operation(curr_node, "", *args, **kwargs) - if not status: - return None - -_breadth_first_search_adjacency_matrix = _breadth_first_search_adjacency_list - -def breadth_first_search_parallel( - graph, source_node, num_threads, operation, *args, **kwargs): - """ - Parallel implementation of breadth first search on graphs. - - Parameters - ========== - - graph: Graph - The graph on which BFS is to be performed. - source_node: str - The name of the source node from where the BFS is - to be initiated. - num_threads: int - Number of threads to be used for computation. - operation: function - The function which is to be applied - on every node when it is visited. - The prototype which is to be followed is, - `function_name(curr_node, next_node, - arg_1, arg_2, . . ., arg_n)`. - Here, the first two arguments denote, the - current node and the node next to current node. - The rest of the arguments are optional and you can - provide your own stuff there. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Note - ==== - - You should pass all the arguments which you are going - to use in the prototype of your `operation` after - passing the operation function. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode - >>> V1 = AdjacencyListGraphNode("V1") - >>> V2 = AdjacencyListGraphNode("V2") - >>> V3 = AdjacencyListGraphNode("V3") - >>> G = Graph(V1, V2, V3) - >>> from pydatastructs import breadth_first_search_parallel - >>> def f(curr_node, next_node, dest_node): - ... return curr_node != dest_node - ... - >>> G.add_edge(V1.name, V2.name) - >>> G.add_edge(V2.name, V3.name) - >>> breadth_first_search_parallel(G, V1.name, 3, f, V3.name) - """ - raise_if_backend_is_not_python( - breadth_first_search_parallel, kwargs.get('backend', Backend.PYTHON)) - import pydatastructs.graphs.algorithms as algorithms - func = "_breadth_first_search_parallel_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently breadth first search isn't implemented for " - "%s graphs."%(graph._impl)) - return getattr(algorithms, func)( - graph, source_node, num_threads, operation, *args, **kwargs) - -def _generate_layer(**kwargs): - _args, _kwargs = kwargs.get('args'), kwargs.get('kwargs') - (graph, curr_node, next_layer, visited, operation) = _args[0:5] - op_args, op_kwargs = _args[5:], _kwargs - next_nodes = graph.neighbors(curr_node) - status = True - if len(next_nodes) != 0: - for next_node in next_nodes: - if visited.get(next_node, False) is False: - status = status and operation(curr_node, next_node.name, *op_args, **op_kwargs) - next_layer.add(next_node.name) - visited[next_node.name] = True - else: - status = status and operation(curr_node, "", *op_args, **op_kwargs) - return status - -def _breadth_first_search_parallel_adjacency_list( - graph, source_node, num_threads, operation, *args, **kwargs): - visited, layers = {}, {} - layers[0] = set() - layers[0].add(source_node) - visited[source_node] = True - layer = 0 - while len(layers[layer]) != 0: - layers[layer+1] = set() - with ThreadPoolExecutor(max_workers=num_threads) as Executor: - for node in layers[layer]: - status = Executor.submit( - _generate_layer, args= - (graph, node, layers[layer+1], visited, - operation, *args), kwargs=kwargs).result() - layer += 1 - if not status: - return None - -_breadth_first_search_parallel_adjacency_matrix = _breadth_first_search_parallel_adjacency_list - -def _generate_mst_object(graph): - mst = Graph(*[getattr(graph, str(v)) for v in graph.vertices]) - return mst - -def _sort_edges(graph, num_threads=None): - edges = list(graph.edge_weights.items()) - if num_threads is None: - sort_key = lambda item: item[1].value - return sorted(edges, key=sort_key) - - merge_sort_parallel(edges, num_threads, - comp=lambda u,v: u[1].value <= v[1].value) - return edges - -def _minimum_spanning_tree_kruskal_adjacency_list(graph): - mst = _generate_mst_object(graph) - dsf = DisjointSetForest() - for v in graph.vertices: - dsf.make_set(v) - for _, edge in _sort_edges(graph): - u, v = edge.source.name, edge.target.name - if dsf.find_root(u) is not dsf.find_root(v): - mst.add_edge(u, v, edge.value) - mst.add_edge(v, u, edge.value) - dsf.union(u, v) - return mst - -_minimum_spanning_tree_kruskal_adjacency_matrix = \ - _minimum_spanning_tree_kruskal_adjacency_list - -def _minimum_spanning_tree_prim_adjacency_list(graph): - q = PriorityQueue(implementation='binomial_heap') - e = {} - mst = Graph(implementation='adjacency_list') - q.push(next(iter(graph.vertices)), 0) - while not q.is_empty: - v = q.pop() - if not hasattr(mst, v): - mst.add_vertex(graph.__getattribute__(v)) - if e.get(v, None) is not None: - edge = e[v] - mst.add_vertex(edge.target) - mst.add_edge(edge.source.name, edge.target.name, edge.value) - mst.add_edge(edge.target.name, edge.source.name, edge.value) - for w_node in graph.neighbors(v): - w = w_node.name - vw = graph.edge_weights[v + '_' + w] - q.push(w, vw.value) - if e.get(w, None) is None or \ - e[w].value > vw.value: - e[w] = vw - return mst - -def minimum_spanning_tree(graph, algorithm, **kwargs): - """ - Computes a minimum spanning tree for the given - graph and algorithm. - - Parameters - ========== - - graph: Graph - The graph whose minimum spanning tree - has to be computed. - algorithm: str - The algorithm which should be used for - computing a minimum spanning tree. - Currently the following algorithms are - supported, - - 'kruskal' -> Kruskal's algorithm as given in [1]. - - 'prim' -> Prim's algorithm as given in [2]. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - mst: Graph - A minimum spanning tree using the implementation - same as the graph provided in the input. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode - >>> from pydatastructs import minimum_spanning_tree - >>> u = AdjacencyListGraphNode('u') - >>> v = AdjacencyListGraphNode('v') - >>> G = Graph(u, v) - >>> G.add_edge(u.name, v.name, 3) - >>> mst = minimum_spanning_tree(G, 'kruskal') - >>> u_n = mst.neighbors(u.name) - >>> mst.get_edge(u.name, u_n[0].name).value - 3 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Kruskal%27s_algorithm - .. [2] https://en.wikipedia.org/wiki/Prim%27s_algorithm - - Note - ==== - - The concept of minimum spanning tree is valid only for - connected and undirected graphs. So, this function - should be used only for such graphs. Using with other - types of graphs may lead to unwanted results. - """ - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.PYTHON: - import pydatastructs.graphs.algorithms as algorithms - func = "_minimum_spanning_tree_" + algorithm + "_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently %s algoithm for %s implementation of graphs " - "isn't implemented for finding minimum spanning trees." - %(algorithm, graph._impl)) - return getattr(algorithms, func)(graph) - else: - from pydatastructs.graphs._backend.cpp._algorithms import minimum_spanning_tree_prim_adjacency_list - if graph._impl == "adjacency_list" and algorithm == 'prim': - return minimum_spanning_tree_prim_adjacency_list(graph) - -def _minimum_spanning_tree_parallel_kruskal_adjacency_list(graph, num_threads): - mst = _generate_mst_object(graph) - dsf = DisjointSetForest() - for v in graph.vertices: - dsf.make_set(v) - edges = _sort_edges(graph, num_threads) - for _, edge in edges: - u, v = edge.source.name, edge.target.name - if dsf.find_root(u) is not dsf.find_root(v): - mst.add_edge(u, v, edge.value) - mst.add_edge(v, u, edge.value) - dsf.union(u, v) - return mst - -_minimum_spanning_tree_parallel_kruskal_adjacency_matrix = \ - _minimum_spanning_tree_parallel_kruskal_adjacency_list - -def _find_min(q, v, i): - if not q.is_empty: - v[i] = q.peek - else: - v[i] = None - -def _minimum_spanning_tree_parallel_prim_adjacency_list(graph, num_threads): - q = [PriorityQueue(implementation='binomial_heap') for _ in range(num_threads)] - e = [{} for _ in range(num_threads)] - v2q = {} - mst = Graph(implementation='adjacency_list') - - itr = iter(graph.vertices) - for i in range(len(graph.vertices)): - v2q[next(itr)] = i%len(q) - q[0].push(next(iter(graph.vertices)), 0) - - while True: - - _vs = [None for _ in range(num_threads)] - with ThreadPoolExecutor(max_workers=num_threads) as Executor: - for i in range(num_threads): - Executor.submit(_find_min, q[i], _vs, i).result() - v = None - - for i in range(num_threads): - if _comp(_vs[i], v, lambda u, v: u.key < v.key): - v = _vs[i] - if v is None: - break - v = v.data - idx = v2q[v] - q[idx].pop() - - if not hasattr(mst, v): - mst.add_vertex(graph.__getattribute__(v)) - if e[idx].get(v, None) is not None: - edge = e[idx][v] - mst.add_vertex(edge.target) - mst.add_edge(edge.source.name, edge.target.name, edge.value) - mst.add_edge(edge.target.name, edge.source.name, edge.value) - for w_node in graph.neighbors(v): - w = w_node.name - vw = graph.edge_weights[v + '_' + w] - j = v2q[w] - q[j].push(w, vw.value) - if e[j].get(w, None) is None or \ - e[j][w].value > vw.value: - e[j][w] = vw - - return mst - -def minimum_spanning_tree_parallel(graph, algorithm, num_threads, **kwargs): - """ - Computes a minimum spanning tree for the given - graph and algorithm using the given number of threads. - - Parameters - ========== - - graph: Graph - The graph whose minimum spanning tree - has to be computed. - algorithm: str - The algorithm which should be used for - computing a minimum spanning tree. - Currently the following algorithms are - supported, - - 'kruskal' -> Kruskal's algorithm as given in [1]. - - 'prim' -> Prim's algorithm as given in [2]. - num_threads: int - The number of threads to be used. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - mst: Graph - A minimum spanning tree using the implementation - same as the graph provided in the input. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode - >>> from pydatastructs import minimum_spanning_tree_parallel - >>> u = AdjacencyListGraphNode('u') - >>> v = AdjacencyListGraphNode('v') - >>> G = Graph(u, v) - >>> G.add_edge(u.name, v.name, 3) - >>> mst = minimum_spanning_tree_parallel(G, 'kruskal', 3) - >>> u_n = mst.neighbors(u.name) - >>> mst.get_edge(u.name, u_n[0].name).value - 3 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Kruskal%27s_algorithm#Parallel_algorithm - .. [2] https://en.wikipedia.org/wiki/Prim%27s_algorithm#Parallel_algorithm - - Note - ==== - - The concept of minimum spanning tree is valid only for - connected and undirected graphs. So, this function - should be used only for such graphs. Using with other - types of graphs will lead to unwanted results. - """ - raise_if_backend_is_not_python( - minimum_spanning_tree_parallel, kwargs.get('backend', Backend.PYTHON)) - import pydatastructs.graphs.algorithms as algorithms - func = "_minimum_spanning_tree_parallel_" + algorithm + "_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently %s algoithm for %s implementation of graphs " - "isn't implemented for finding minimum spanning trees." - %(algorithm, graph._impl)) - return getattr(algorithms, func)(graph, num_threads) - -def _visit(graph, vertex, visited, incoming, L): - stack = [vertex] - while stack: - top = stack[-1] - if not visited.get(top, False): - visited[top] = True - for node in graph.neighbors(top): - if incoming.get(node.name, None) is None: - incoming[node.name] = [] - incoming[node.name].append(top) - if not visited.get(node.name, False): - stack.append(node.name) - if top is stack[-1]: - L.append(stack.pop()) - -def _assign(graph, u, incoming, assigned, component): - stack = [u] - while stack: - top = stack[-1] - if not assigned.get(top, False): - assigned[top] = True - component.add(top) - for u in incoming[top]: - if not assigned.get(u, False): - stack.append(u) - if top is stack[-1]: - stack.pop() - -def _strongly_connected_components_kosaraju_adjacency_list(graph): - visited, incoming, L = {}, {}, [] - for u in graph.vertices: - if not visited.get(u, False): - _visit(graph, u, visited, incoming, L) - - assigned = {} - components = [] - for i in range(-1, -len(L) - 1, -1): - comp = set() - if not assigned.get(L[i], False): - _assign(graph, L[i], incoming, assigned, comp) - if comp: - components.append(comp) - - return components - -_strongly_connected_components_kosaraju_adjacency_matrix = \ - _strongly_connected_components_kosaraju_adjacency_list - -def _tarjan_dfs(u, graph, index, stack, indices, low_links, on_stacks, components): - indices[u] = index[0] - low_links[u] = index[0] - index[0] += 1 - stack.append(u) - on_stacks[u] = True - - for node in graph.neighbors(u): - v = node.name - if indices[v] == -1: - _tarjan_dfs(v, graph, index, stack, indices, low_links, on_stacks, components) - low_links[u] = min(low_links[u], low_links[v]) - elif on_stacks[v]: - low_links[u] = min(low_links[u], low_links[v]) - - if low_links[u] == indices[u]: - component = set() - while stack: - w = stack.pop() - on_stacks[w] = False - component.add(w) - if w == u: - break - components.append(component) - -def _strongly_connected_components_tarjan_adjacency_list(graph): - index = [0] # mutable object - stack = Stack([]) - indices, low_links, on_stacks = {}, {}, {} - - for u in graph.vertices: - indices[u] = -1 - low_links[u] = -1 - on_stacks[u] = False - - components = [] - - for u in graph.vertices: - if indices[u] == -1: - _tarjan_dfs(u, graph, index, stack, indices, low_links, on_stacks, components) - - return components - -_strongly_connected_components_tarjan_adjacency_matrix = \ - _strongly_connected_components_tarjan_adjacency_list - -def strongly_connected_components(graph, algorithm, **kwargs): - """ - Computes strongly connected components for the given - graph and algorithm. - - Parameters - ========== - - graph: Graph - The graph whose minimum spanning tree - has to be computed. - algorithm: str - The algorithm which should be used for - computing strongly connected components. - Currently the following algorithms are - supported, - - 'kosaraju' -> Kosaraju's algorithm as given in [1]. - 'tarjan' -> Tarjan's algorithm as given in [2]. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - components: list - Python list with each element as set of vertices. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode - >>> from pydatastructs import strongly_connected_components - >>> v1, v2, v3 = [AdjacencyListGraphNode(i) for i in range(3)] - >>> g = Graph(v1, v2, v3) - >>> g.add_edge(v1.name, v2.name) - >>> g.add_edge(v2.name, v3.name) - >>> g.add_edge(v3.name, v1.name) - >>> scc = strongly_connected_components(g, 'kosaraju') - >>> scc == [{'2', '0', '1'}] - True - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Kosaraju%27s_algorithm - .. [2] https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm - - """ - raise_if_backend_is_not_python( - strongly_connected_components, kwargs.get('backend', Backend.PYTHON)) - import pydatastructs.graphs.algorithms as algorithms - func = "_strongly_connected_components_" + algorithm + "_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently %s algoithm for %s implementation of graphs " - "isn't implemented for finding strongly connected components." - %(algorithm, graph._impl)) - return getattr(algorithms, func)(graph) - -def depth_first_search( - graph, source_node, operation, *args, **kwargs): - """ - Implementation of depth first search (DFS) - algorithm. - - Parameters - ========== - - graph: Graph - The graph on which DFS is to be performed. - source_node: str - The name of the source node from where the DFS is - to be initiated. - operation: function - The function which is to be applied - on every node when it is visited. - The prototype which is to be followed is, - `function_name(curr_node, next_node, - arg_1, arg_2, . . ., arg_n)`. - Here, the first two arguments denote, the - current node and the node next to current node. - The rest of the arguments are optional and you can - provide your own stuff there. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Note - ==== - - You should pass all the arguments which you are going - to use in the prototype of your `operation` after - passing the operation function. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode - >>> V1 = AdjacencyListGraphNode("V1") - >>> V2 = AdjacencyListGraphNode("V2") - >>> V3 = AdjacencyListGraphNode("V3") - >>> G = Graph(V1, V2, V3) - >>> from pydatastructs import depth_first_search - >>> def f(curr_node, next_node, dest_node): - ... return curr_node != dest_node - ... - >>> G.add_edge(V1.name, V2.name) - >>> G.add_edge(V2.name, V3.name) - >>> depth_first_search(G, V1.name, f, V3.name) - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Depth-first_search - """ - raise_if_backend_is_not_python( - depth_first_search, kwargs.get('backend', Backend.PYTHON)) - import pydatastructs.graphs.algorithms as algorithms - func = "_depth_first_search_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently depth first search isn't implemented for " - "%s graphs."%(graph._impl)) - return getattr(algorithms, func)( - graph, source_node, operation, *args, **kwargs) - -def _depth_first_search_adjacency_list( - graph, source_node, operation, *args, **kwargs): - dfs_stack = Stack() - visited = {} - dfs_stack.append(source_node) - visited[source_node] = True - while len(dfs_stack) != 0: - curr_node = dfs_stack.pop() - next_nodes = graph.neighbors(curr_node) - if len(next_nodes) != 0: - for next_node in next_nodes: - if next_node.name not in visited: - status = operation(curr_node, next_node.name, *args, **kwargs) - if not status: - return None - dfs_stack.append(next_node.name) - visited[next_node.name] = True - else: - status = operation(curr_node, "", *args, **kwargs) - if not status: - return None - -_depth_first_search_adjacency_matrix = _depth_first_search_adjacency_list - -def shortest_paths(graph: Graph, algorithm: str, - source: str, target: str="", - **kwargs) -> tuple: - """ - Finds shortest paths in the given graph from a given source. - - Parameters - ========== - - graph: Graph - The graph under consideration. - algorithm: str - The algorithm to be used. Currently, the following algorithms - are implemented, - - 'bellman_ford' -> Bellman-Ford algorithm as given in [1] - - 'dijkstra' -> Dijkstra algorithm as given in [2]. - source: str - The name of the source the node. - target: str - The name of the target node. - Optional, by default, all pair shortest paths - are returned. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - (distances, predecessors): (dict, dict) - If target is not provided and algorithm used - is 'bellman_ford'/'dijkstra'. - (distances[target], predecessors): (float, dict) - If target is provided and algorithm used is - 'bellman_ford'/'dijkstra'. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode - >>> from pydatastructs import shortest_paths - >>> V1 = AdjacencyListGraphNode("V1") - >>> V2 = AdjacencyListGraphNode("V2") - >>> V3 = AdjacencyListGraphNode("V3") - >>> G = Graph(V1, V2, V3) - >>> G.add_edge('V2', 'V3', 10) - >>> G.add_edge('V1', 'V2', 11) - >>> shortest_paths(G, 'bellman_ford', 'V1') - ({'V1': 0, 'V2': 11, 'V3': 21}, {'V1': None, 'V2': 'V1', 'V3': 'V2'}) - >>> shortest_paths(G, 'dijkstra', 'V1') - ({'V2': 11, 'V3': 21, 'V1': 0}, {'V1': None, 'V2': 'V1', 'V3': 'V2'}) - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Bellman%E2%80%93Ford_algorithm - .. [2] https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm - """ - backend = kwargs.get('backend', Backend.PYTHON) - if (backend == Backend.PYTHON): - import pydatastructs.graphs.algorithms as algorithms - func = "_" + algorithm + "_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently %s algorithm isn't implemented for " - "finding shortest paths in graphs."%(algorithm)) - return getattr(algorithms, func)(graph, source, target) - else: - from pydatastructs.graphs._backend.cpp._algorithms import shortest_paths_dijkstra_adjacency_list - if graph._impl == "adjacency_list" and algorithm == 'dijkstra': - return shortest_paths_dijkstra_adjacency_list(graph, source, target) - -def _bellman_ford_adjacency_list(graph: Graph, source: str, target: str) -> tuple: - distances, predecessor, visited, cnts = {}, {}, {}, {} - - for v in graph.vertices: - distances[v] = float('inf') - predecessor[v] = None - visited[v] = False - cnts[v] = 0 - distances[source] = 0 - verticy_num = len(graph.vertices) - - que = Queue([source]) - - while que: - u = que.popleft() - visited[u] = False - neighbors = graph.neighbors(u) - for neighbor in neighbors: - v = neighbor.name - edge_str = u + '_' + v - if distances[u] != float('inf') and distances[u] + graph.edge_weights[edge_str].value < distances[v]: - distances[v] = distances[u] + graph.edge_weights[edge_str].value - predecessor[v] = u - cnts[v] = cnts[u] + 1 - if cnts[v] >= verticy_num: - raise ValueError("Graph contains a negative weight cycle.") - if not visited[v]: - que.append(v) - visited[v] = True - - if target != "": - return (distances[target], predecessor) - return (distances, predecessor) - -_bellman_ford_adjacency_matrix = _bellman_ford_adjacency_list - -def _dijkstra_adjacency_list(graph: Graph, start: str, target: str): - V = len(graph.vertices) - visited, dist, pred = {}, {}, {} - for v in graph.vertices: - visited[v] = False - pred[v] = None - if v != start: - dist[v] = float('inf') - dist[start] = 0 - pq = PriorityQueue(implementation='binomial_heap') - for vertex in dist: - pq.push(vertex, dist[vertex]) - for _ in range(V): - u = pq.pop() - visited[u] = True - for v in graph.vertices: - edge_str = u + '_' + v - if (edge_str in graph.edge_weights and graph.edge_weights[edge_str].value >= 0 and - visited[v] is False and dist[v] > dist[u] + graph.edge_weights[edge_str].value): - dist[v] = dist[u] + graph.edge_weights[edge_str].value - pred[v] = u - pq.push(v, dist[v]) - - if target != "": - return (dist[target], pred) - return dist, pred - -_dijkstra_adjacency_matrix = _dijkstra_adjacency_list - -def all_pair_shortest_paths(graph: Graph, algorithm: str, - **kwargs) -> tuple: - """ - Finds shortest paths between all pairs of vertices in the given graph. - - Parameters - ========== - - graph: Graph - The graph under consideration. - algorithm: str - The algorithm to be used. Currently, the following algorithms - are implemented, - - 'floyd_warshall' -> Floyd Warshall algorithm as given in [1]. - 'johnson' -> Johnson's Algorithm as given in [2] - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - (distances, predecessors): (dict, dict) - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode - >>> from pydatastructs import all_pair_shortest_paths - >>> V1 = AdjacencyListGraphNode("V1") - >>> V2 = AdjacencyListGraphNode("V2") - >>> V3 = AdjacencyListGraphNode("V3") - >>> G = Graph(V1, V2, V3) - >>> G.add_edge('V2', 'V3', 10) - >>> G.add_edge('V1', 'V2', 11) - >>> G.add_edge('V3', 'V1', 5) - >>> dist, _ = all_pair_shortest_paths(G, 'floyd_warshall') - >>> dist['V1']['V3'] - 21 - >>> dist['V3']['V1'] - 5 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm - .. [2] https://en.wikipedia.org/wiki/Johnson's_algorithm - """ - raise_if_backend_is_not_python( - all_pair_shortest_paths, kwargs.get('backend', Backend.PYTHON)) - import pydatastructs.graphs.algorithms as algorithms - func = "_" + algorithm + "_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently %s algorithm isn't implemented for " - "finding shortest paths in graphs."%(algorithm)) - return getattr(algorithms, func)(graph) - -def _floyd_warshall_adjacency_list(graph: Graph): - dist, next_vertex = {}, {} - V, E = graph.vertices, graph.edge_weights - - for v in V: - dist[v] = {} - next_vertex[v] = {} - - for name, edge in E.items(): - dist[edge.source.name][edge.target.name] = edge.value - next_vertex[edge.source.name][edge.target.name] = edge.source.name - - for v in V: - dist[v][v] = 0 - next_vertex[v][v] = v - - for k in V: - for i in V: - for j in V: - dist_i_j = dist.get(i, {}).get(j, float('inf')) - dist_i_k = dist.get(i, {}).get(k, float('inf')) - dist_k_j = dist.get(k, {}).get(j, float('inf')) - next_i_k = next_vertex.get(i + '_' + k, None) - if dist_i_j > dist_i_k + dist_k_j: - dist[i][j] = dist_i_k + dist_k_j - next_vertex[i][j] = next_i_k - - return (dist, next_vertex) - -_floyd_warshall_adjacency_matrix = _floyd_warshall_adjacency_list - -def _johnson_adjacency_list(graph: Graph): - new_vertex = AdjacencyListGraphNode('__q__') - graph.add_vertex(new_vertex) - - for vertex in graph.vertices: - if vertex != '__q__': - graph.add_edge('__q__', vertex, 0) - - distances, predecessors = shortest_paths(graph, 'bellman_ford', '__q__') - - edges_to_remove = [] - for edge in graph.edge_weights: - edge_node = graph.edge_weights[edge] - if edge_node.source.name == '__q__': - edges_to_remove.append((edge_node.source.name, edge_node.target.name)) - - for u, v in edges_to_remove: - graph.remove_edge(u, v) - graph.remove_vertex('__q__') - - for edge in graph.edge_weights: - edge_node = graph.edge_weights[edge] - u, v = edge_node.source.name, edge_node.target.name - graph.edge_weights[edge].value += (distances[u] - distances[v]) - - all_distances = {} - all_next_vertex = {} - - for vertex in graph.vertices: - u = vertex - dijkstra_dist, dijkstra_pred = shortest_paths(graph, 'dijkstra', u) - all_distances[u] = {} - all_next_vertex[u] = {} - for v in graph.vertices: - if dijkstra_pred[v] is None or dijkstra_pred[v] == u : - all_next_vertex[u][v] = u - else: - all_next_vertex[u][v] = None - if v in dijkstra_dist: - all_distances[u][v] = dijkstra_dist[v] - distances[u] + distances[v] - else: - all_distances[u][v] = float('inf') - - return (all_distances, all_next_vertex) - -def topological_sort(graph: Graph, algorithm: str, - **kwargs) -> list: - """ - Performs topological sort on the given graph using given algorithm. - - Parameters - ========== - - graph: Graph - The graph under consideration. - algorithm: str - The algorithm to be used. - Currently, following are supported, - - 'kahn' -> Kahn's algorithm as given in [1]. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - list - The list of topologically sorted vertices. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode, topological_sort - >>> v_1 = AdjacencyListGraphNode('v_1') - >>> v_2 = AdjacencyListGraphNode('v_2') - >>> graph = Graph(v_1, v_2) - >>> graph.add_edge('v_1', 'v_2') - >>> topological_sort(graph, 'kahn') - ['v_1', 'v_2'] - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm - """ - raise_if_backend_is_not_python( - topological_sort, kwargs.get('backend', Backend.PYTHON)) - import pydatastructs.graphs.algorithms as algorithms - func = "_" + algorithm + "_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently %s algorithm isn't implemented for " - "performing topological sort on %s graphs."%(algorithm, graph._impl)) - return getattr(algorithms, func)(graph) - -def _kahn_adjacency_list(graph: Graph) -> list: - S = Queue() - in_degree = {u: 0 for u in graph.vertices} - for u in graph.vertices: - for v in graph.neighbors(u): - in_degree[v.name] += 1 - for u in graph.vertices: - if in_degree[u] == 0: - S.append(u) - in_degree.pop(u) - - L = [] - while S: - n = S.popleft() - L.append(n) - for m in graph.neighbors(n): - graph.remove_edge(n, m.name) - in_degree[m.name] -= 1 - if in_degree[m.name] == 0: - S.append(m.name) - in_degree.pop(m.name) - - if in_degree: - raise ValueError("Graph is not acyclic.") - return L - -def topological_sort_parallel(graph: Graph, algorithm: str, num_threads: int, - **kwargs) -> list: - """ - Performs topological sort on the given graph using given algorithm using - given number of threads. - - Parameters - ========== - - graph: Graph - The graph under consideration. - algorithm: str - The algorithm to be used. - Currently, following are supported, - - 'kahn' -> Kahn's algorithm as given in [1]. - num_threads: int - The maximum number of threads to be used. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - list - The list of topologically sorted vertices. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode, topological_sort_parallel - >>> v_1 = AdjacencyListGraphNode('v_1') - >>> v_2 = AdjacencyListGraphNode('v_2') - >>> graph = Graph(v_1, v_2) - >>> graph.add_edge('v_1', 'v_2') - >>> topological_sort_parallel(graph, 'kahn', 1) - ['v_1', 'v_2'] - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm - """ - raise_if_backend_is_not_python( - topological_sort_parallel, kwargs.get('backend', Backend.PYTHON)) - import pydatastructs.graphs.algorithms as algorithms - func = "_" + algorithm + "_" + graph._impl + '_parallel' - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently %s algorithm isn't implemented for " - "performing topological sort on %s graphs."%(algorithm, graph._impl)) - return getattr(algorithms, func)(graph, num_threads) - -def _kahn_adjacency_list_parallel(graph: Graph, num_threads: int) -> list: - num_vertices = len(graph.vertices) - - def _collect_source_nodes(graph: Graph) -> list: - S = [] - in_degree = {u: 0 for u in graph.vertices} - for u in graph.vertices: - for v in graph.neighbors(u): - in_degree[v.name] += 1 - for u in in_degree: - if in_degree[u] == 0: - S.append(u) - return list(S) - - def _job(graph: Graph, u: str): - for v in graph.neighbors(u): - graph.remove_edge(u, v.name) - - L = [] - source_nodes = _collect_source_nodes(graph) - while source_nodes: - with ThreadPoolExecutor(max_workers=num_threads) as Executor: - for node in source_nodes: - L.append(node) - Executor.submit(_job, graph, node) - for node in source_nodes: - graph.remove_vertex(node) - source_nodes = _collect_source_nodes(graph) - - if len(L) != num_vertices: - raise ValueError("Graph is not acyclic.") - return L - - -def _breadth_first_search_max_flow(graph: Graph, source_node, sink_node, flow_passed, for_dinic=False): - bfs_queue = Queue() - parent, currentPathC = {}, {} - currentPathC[source_node] = float('inf') - bfs_queue.append(source_node) - while len(bfs_queue) != 0: - curr_node = bfs_queue.popleft() - next_nodes = graph.neighbors(curr_node) - if len(next_nodes) != 0: - for next_node in next_nodes: - capacity = graph.get_edge(curr_node, next_node.name).value - fp = flow_passed.get((curr_node, next_node.name), 0) - if capacity and parent.get(next_node.name, False) is False and capacity - fp > 0: - parent[next_node.name] = curr_node - next_flow = min(currentPathC[curr_node], capacity - fp) - currentPathC[next_node.name] = next_flow - if next_node.name == sink_node and not for_dinic: - return (next_flow, parent) - bfs_queue.append(next_node.name) - return (0, parent) - - -def _max_flow_edmonds_karp_(graph: Graph, source, sink): - m_flow = 0 - flow_passed = {} - new_flow, parent = _breadth_first_search_max_flow(graph, source, sink, flow_passed) - while new_flow != 0: - m_flow += new_flow - current = sink - while current != source: - prev = parent[current] - fp = flow_passed.get((prev, current), 0) - flow_passed[(prev, current)] = fp + new_flow - fp = flow_passed.get((current, prev), 0) - flow_passed[(current, prev)] = fp - new_flow - current = prev - new_flow, parent = _breadth_first_search_max_flow(graph, source, sink, flow_passed) - return m_flow - - -def _depth_first_search_max_flow_dinic(graph: Graph, u, parent, sink_node, flow, flow_passed): - if u == sink_node: - return flow - - next_nodes = graph.neighbors(u) - if len(next_nodes) != 0: - for next_node in next_nodes: - capacity = graph.get_edge(u, next_node.name).value - fp = flow_passed.get((u, next_node.name), 0) - parent_cond = parent.get(next_node.name, None) - if parent_cond and parent_cond == u and capacity - fp > 0: - path_flow = _depth_first_search_max_flow_dinic(graph, - next_node.name, - parent, sink_node, - min(flow, capacity - fp), flow_passed) - if path_flow > 0: - fp = flow_passed.get((u, next_node.name), 0) - flow_passed[(u, next_node.name)] = fp + path_flow - fp = flow_passed.get((next_node.name, u), 0) - flow_passed[(next_node.name, u)] = fp - path_flow - return path_flow - return 0 - - -def _max_flow_dinic_(graph: Graph, source, sink): - max_flow = 0 - flow_passed = {} - while True: - next_flow, parent = _breadth_first_search_max_flow(graph, source, sink, flow_passed, True) - if parent.get(sink, False) is False: - break - - while True: - path_flow = _depth_first_search_max_flow_dinic(graph, source, - parent, sink, - float('inf'), - flow_passed) - if path_flow <= 0: - break - max_flow += path_flow - - return max_flow - - -def max_flow(graph, source, sink, algorithm='edmonds_karp', **kwargs): - raise_if_backend_is_not_python( - max_flow, kwargs.get('backend', Backend.PYTHON)) - - import pydatastructs.graphs.algorithms as algorithms - func = "_max_flow_" + algorithm + "_" - if not hasattr(algorithms, func): - raise NotImplementedError( - f"Currently {algorithm} algorithm isn't implemented for " - "performing max flow on graphs.") - return getattr(algorithms, func)(graph, source, sink) - - -def find_bridges(graph): - """ - Finds all bridges in an undirected graph using Tarjan's Algorithm. - - Parameters - ========== - graph : Graph - An undirected graph instance. - - Returns - ========== - List[tuple] - A list of bridges, where each bridge is represented as a tuple (u, v) - with u <= v. - - Example - ======== - >>> from pydatastructs import Graph, AdjacencyListGraphNode, find_bridges - >>> v0 = AdjacencyListGraphNode(0) - >>> v1 = AdjacencyListGraphNode(1) - >>> v2 = AdjacencyListGraphNode(2) - >>> v3 = AdjacencyListGraphNode(3) - >>> v4 = AdjacencyListGraphNode(4) - >>> graph = Graph(v0, v1, v2, v3, v4, implementation='adjacency_list') - >>> graph.add_edge(v0.name, v1.name) - >>> graph.add_edge(v1.name, v2.name) - >>> graph.add_edge(v2.name, v3.name) - >>> graph.add_edge(v3.name, v4.name) - >>> find_bridges(graph) - [('0', '1'), ('1', '2'), ('2', '3'), ('3', '4')] - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Bridge_(graph_theory) - """ - - vertices = list(graph.vertices) - processed_vertices = [] - for v in vertices: - if hasattr(v, "name"): - processed_vertices.append(v.name) - else: - processed_vertices.append(v) - - n = len(processed_vertices) - adj = {v: [] for v in processed_vertices} - for v in processed_vertices: - for neighbor in graph.neighbors(v): - if hasattr(neighbor, "name"): - nbr = neighbor.name - else: - nbr = neighbor - adj[v].append(nbr) - - mapping = {v: idx for idx, v in enumerate(processed_vertices)} - inv_mapping = {idx: v for v, idx in mapping.items()} - - n_adj = [[] for _ in range(n)] - for v in processed_vertices: - idx_v = mapping[v] - for u in adj[v]: - idx_u = mapping[u] - n_adj[idx_v].append(idx_u) - - visited = [False] * n - disc = [0] * n - low = [0] * n - parent = [-1] * n - bridges_idx = [] - time = 0 - - def dfs(u): - nonlocal time - visited[u] = True - disc[u] = low[u] = time - time += 1 - for v in n_adj[u]: - if not visited[v]: - parent[v] = u - dfs(v) - low[u] = min(low[u], low[v]) - if low[v] > disc[u]: - bridges_idx.append((u, v)) - elif v != parent[u]: - low[u] = min(low[u], disc[v]) - - for i in range(n): - if not visited[i]: - dfs(i) - - bridges = [] - for u, v in bridges_idx: - a = inv_mapping[u] - b = inv_mapping[v] - if a <= b: - bridges.append((a, b)) - else: - bridges.append((b, a)) - bridges.sort() - return bridges diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/graph.py b/lib/python3.12/site-packages/pydatastructs/graphs/graph.py deleted file mode 100644 index 39c2692e3..000000000 --- a/lib/python3.12/site-packages/pydatastructs/graphs/graph.py +++ /dev/null @@ -1,163 +0,0 @@ - -from pydatastructs.utils.misc_util import Backend, raise_if_backend_is_not_python - -__all__ = [ - 'Graph' -] - -class Graph(object): - """ - Represents generic concept of graphs. - - Parameters - ========== - - implementation: str - The implementation to be used for storing - graph in memory. It can be figured out - from type of the vertices(if passed at construction). - Currently the following implementations are supported, - - 'adjacency_list' -> Adjacency list implementation. - - 'adjacency_matrix' -> Adjacency matrix implementation. - - By default, 'adjacency_list'. - vertices: GraphNode(s) - For AdjacencyList implementation vertices - can be passed for initializing the graph. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs.graphs import Graph - >>> from pydatastructs.utils import AdjacencyListGraphNode - >>> v_1 = AdjacencyListGraphNode('v_1', 1) - >>> v_2 = AdjacencyListGraphNode('v_2', 2) - >>> g = Graph(v_1, v_2) - >>> g.add_edge('v_1', 'v_2') - >>> g.add_edge('v_2', 'v_1') - >>> g.is_adjacent('v_1', 'v_2') - True - >>> g.is_adjacent('v_2', 'v_1') - True - >>> g.remove_edge('v_1', 'v_2') - >>> g.is_adjacent('v_1', 'v_2') - False - >>> g.is_adjacent('v_2', 'v_1') - True - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Graph_(abstract_data_type) - - Note - ==== - - Make sure to create nodes (AdjacencyListGraphNode or AdjacencyMatrixGraphNode) - and them in your graph using Graph.add_vertex before adding edges whose - end points require either of the nodes that you added. In other words, - Graph.add_edge doesn't add new nodes on its own if the input - nodes are not already present in the Graph. - - """ - - __slots__ = ['_impl'] - - def __new__(cls, *args, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - try: - default_impl = args[0]._impl if args else 'adjacency_list' - except: - default_impl = 'adjacency_list' - implementation = kwargs.get('implementation', default_impl) - if implementation == 'adjacency_list': - from pydatastructs.graphs.adjacency_list import AdjacencyList - obj = AdjacencyList(*args, **kwargs) - return obj - elif implementation == 'adjacency_matrix': - from pydatastructs.graphs.adjacency_matrix import AdjacencyMatrix - obj = AdjacencyMatrix(*args, **kwargs) - return obj - else: - raise NotImplementedError("%s implementation is not a part " - "of the library currently."%(implementation)) - - def is_adjacent(self, node1, node2): - """ - Checks if the nodes with the given - with the given names are adjacent - to each other. - """ - raise NotImplementedError( - "This is an abstract method.") - - def neighbors(self, node): - """ - Lists the neighbors of the node - with given name. - """ - raise NotImplementedError( - "This is an abstract method.") - - def add_vertex(self, node): - """ - Adds the input vertex to the node, or does nothing - if the input vertex is already in the graph. - """ - raise NotImplementedError( - "This is an abstract method.") - - def remove_vertex(self, node): - """ - Removes the input vertex along with all the edges - pointing towards it. - """ - raise NotImplementedError( - "This is an abstract method.") - - def add_edge(self, source, target, cost=None): - """ - Adds the edge starting at first parameter - i.e., source and ending at the second - parameter i.e., target. - """ - raise NotImplementedError( - "This is an abstract method.") - - def get_edge(self, source, target): - """ - Returns GraphEdge object if there - is an edge between source and target - otherwise None. - """ - raise NotImplementedError( - "This is an abstract method.") - - def remove_edge(self, source, target): - """ - Removes the edge starting at first parameter - i.e., source and ending at the second - parameter i.e., target. - """ - raise NotImplementedError( - "This is an abstract method.") - - def num_vertices(self): - """ - Number of vertices - """ - raise NotImplementedError( - "This is an abstract method.") - - def num_edges(self): - """ - Number of edges - """ - raise NotImplementedError( - "This is an abstract method.") diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/tests/__init__.py b/lib/python3.12/site-packages/pydatastructs/graphs/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_list.py b/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_list.py deleted file mode 100644 index 3a9cdb14f..000000000 --- a/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_list.py +++ /dev/null @@ -1,83 +0,0 @@ -from pydatastructs.graphs import Graph -from pydatastructs.utils import AdjacencyListGraphNode -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils.misc_util import Backend - -def test_adjacency_list(): - v_1 = AdjacencyListGraphNode('v_1', 1) - v_2 = AdjacencyListGraphNode('v_2', 2) - g = Graph(v_1, v_2, implementation='adjacency_list') - v_3 = AdjacencyListGraphNode('v_3', 3) - g.add_vertex(v_2) - g.add_vertex(v_3) - g.add_edge('v_1', 'v_2') - g.add_edge('v_2', 'v_3') - g.add_edge('v_3', 'v_1') - assert g.is_adjacent('v_1', 'v_2') is True - assert g.is_adjacent('v_2', 'v_3') is True - assert g.is_adjacent('v_3', 'v_1') is True - assert g.is_adjacent('v_2', 'v_1') is False - assert g.is_adjacent('v_3', 'v_2') is False - assert g.is_adjacent('v_1', 'v_3') is False - neighbors = g.neighbors('v_1') - assert neighbors == [v_2] - v = AdjacencyListGraphNode('v', 4) - g.add_vertex(v) - g.add_edge('v_1', 'v', 0) - g.add_edge('v_2', 'v', 0) - g.add_edge('v_3', 'v', 0) - assert g.is_adjacent('v_1', 'v') is True - assert g.is_adjacent('v_2', 'v') is True - assert g.is_adjacent('v_3', 'v') is True - e1 = g.get_edge('v_1', 'v') - e2 = g.get_edge('v_2', 'v') - e3 = g.get_edge('v_3', 'v') - assert (e1.source.name, e1.target.name) == ('v_1', 'v') - assert (e2.source.name, e2.target.name) == ('v_2', 'v') - assert (e3.source.name, e3.target.name) == ('v_3', 'v') - g.remove_edge('v_1', 'v') - assert g.is_adjacent('v_1', 'v') is False - g.remove_vertex('v') - assert g.is_adjacent('v_2', 'v') is False - assert g.is_adjacent('v_3', 'v') is False - - assert raises(ValueError, lambda: g.add_edge('u', 'v')) - assert raises(ValueError, lambda: g.add_edge('v', 'x')) - - v_4 = AdjacencyListGraphNode('v_4', 4, backend = Backend.CPP) - v_5 = AdjacencyListGraphNode('v_5', 5, backend = Backend.CPP) - g2 = Graph(v_4,v_5,implementation = 'adjacency_list', backend = Backend.CPP) - v_6 = AdjacencyListGraphNode('v_6', 6, backend = Backend.CPP) - assert raises(ValueError, lambda: g2.add_vertex(v_5)) - g2.add_vertex(v_6) - g2.add_edge('v_4', 'v_5') - g2.add_edge('v_5', 'v_6') - g2.add_edge('v_4', 'v_6') - assert g2.is_adjacent('v_4', 'v_5') is True - assert g2.is_adjacent('v_5', 'v_6') is True - assert g2.is_adjacent('v_4', 'v_6') is True - assert g2.is_adjacent('v_5', 'v_4') is False - assert g2.is_adjacent('v_6', 'v_5') is False - assert g2.is_adjacent('v_6', 'v_4') is False - assert g2.num_edges() == 3 - assert g2.num_vertices() == 3 - neighbors = g2.neighbors('v_4') - assert neighbors == [v_6, v_5] - v = AdjacencyListGraphNode('v', 4, backend = Backend.CPP) - g2.add_vertex(v) - g2.add_edge('v_4', 'v', 0) - g2.add_edge('v_5', 'v', 0) - g2.add_edge('v_6', 'v', "h") - assert g2.is_adjacent('v_4', 'v') is True - assert g2.is_adjacent('v_5', 'v') is True - assert g2.is_adjacent('v_6', 'v') is True - e1 = g2.get_edge('v_4', 'v') - e2 = g2.get_edge('v_5', 'v') - e3 = g2.get_edge('v_6', 'v') - assert (str(e1)) == "('v_4', 'v', 0)" - assert (str(e2)) == "('v_5', 'v', 0)" - assert (str(e3)) == "('v_6', 'v', h)" - g2.remove_edge('v_4', 'v') - assert g2.is_adjacent('v_4', 'v') is False - g2.remove_vertex('v') - assert raises(ValueError, lambda: g2.add_edge('v_4', 'v')) diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_matrix.py b/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_matrix.py deleted file mode 100644 index 27dc81790..000000000 --- a/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_matrix.py +++ /dev/null @@ -1,53 +0,0 @@ -from pydatastructs.graphs import Graph -from pydatastructs.utils import AdjacencyMatrixGraphNode -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils.misc_util import Backend - -def test_AdjacencyMatrix(): - v_0 = AdjacencyMatrixGraphNode(0, 0) - v_1 = AdjacencyMatrixGraphNode(1, 1) - v_2 = AdjacencyMatrixGraphNode(2, 2) - g = Graph(v_0, v_1, v_2) - g.add_edge(0, 1, 0) - g.add_edge(1, 2, 0) - g.add_edge(2, 0, 0) - e1 = g.get_edge(0, 1) - e2 = g.get_edge(1, 2) - e3 = g.get_edge(2, 0) - assert (e1.source.name, e1.target.name) == ('0', '1') - assert (e2.source.name, e2.target.name) == ('1', '2') - assert (e3.source.name, e3.target.name) == ('2', '0') - assert g.is_adjacent(0, 1) is True - assert g.is_adjacent(1, 2) is True - assert g.is_adjacent(2, 0) is True - assert g.is_adjacent(1, 0) is False - assert g.is_adjacent(2, 1) is False - assert g.is_adjacent(0, 2) is False - neighbors = g.neighbors(0) - assert neighbors == [v_1] - g.remove_edge(0, 1) - assert g.is_adjacent(0, 1) is False - assert raises(ValueError, lambda: g.add_edge('u', 'v')) - assert raises(ValueError, lambda: g.add_edge('v', 'x')) - assert raises(ValueError, lambda: g.add_edge(2, 3)) - assert raises(ValueError, lambda: g.add_edge(3, 2)) - - v_3 = AdjacencyMatrixGraphNode('0', 0, backend = Backend.CPP) - v_4 = AdjacencyMatrixGraphNode('1', 1, backend = Backend.CPP) - v_5 = AdjacencyMatrixGraphNode('2', 2, backend = Backend.CPP) - g2 = Graph(v_3, v_4, v_5, implementation = 'adjacency_matrix', backend = Backend.CPP) - g2.add_edge('0', '1', 0) - g2.add_edge('1', '2', 0) - g2.add_edge('2', '0', 0) - assert g2.is_adjacent('0', '1') is True - assert g2.is_adjacent('1', '2') is True - assert g2.is_adjacent('2', '0') is True - assert g2.is_adjacent('1', '0') is False - assert g2.is_adjacent('2', '1') is False - assert g2.is_adjacent('0', '2') is False - neighbors = g2.neighbors('0') - assert neighbors == [v_4] - g2.remove_edge('0', '1') - assert g2.is_adjacent('0', '1') is False - assert raises(ValueError, lambda: g2.add_edge('u', 'v')) - assert raises(ValueError, lambda: g2.add_edge('v', 'x')) diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_algorithms.py b/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_algorithms.py deleted file mode 100644 index 04ebcccda..000000000 --- a/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_algorithms.py +++ /dev/null @@ -1,596 +0,0 @@ -from pydatastructs import (breadth_first_search, Graph, -breadth_first_search_parallel, minimum_spanning_tree, -minimum_spanning_tree_parallel, strongly_connected_components, -depth_first_search, shortest_paths,all_pair_shortest_paths, topological_sort, -topological_sort_parallel, max_flow, find_bridges) -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils.misc_util import AdjacencyListGraphNode, AdjacencyMatrixGraphNode -from pydatastructs.graphs._backend.cpp import _graph -from pydatastructs.graphs._backend.cpp import _algorithms -from pydatastructs.utils.misc_util import Backend - -def test_breadth_first_search(): - - def _test_breadth_first_search(ds): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - - V1 = GraphNode(0) - V2 = GraphNode(1) - V3 = GraphNode(2) - - G1 = Graph(V1, V2, V3) - - assert G1.num_vertices() == 3 - - edges = [ - (V1.name, V2.name), - (V2.name, V3.name), - (V1.name, V3.name) - ] - - for edge in edges: - G1.add_edge(*edge) - - assert G1.num_edges() == len(edges) - - parent = {} - def bfs_tree(curr_node, next_node, parent): - if next_node != "": - parent[next_node] = curr_node - return True - - breadth_first_search(G1, V1.name, bfs_tree, parent) - assert (parent[V3.name] == V1.name and parent[V2.name] == V1.name) or \ - (parent[V3.name] == V2.name and parent[V2.name] == V1.name) - - if (ds=='List'): - parent = {} - V9 = AdjacencyListGraphNode("9",0,backend = Backend.CPP) - V10 = AdjacencyListGraphNode("10",0,backend = Backend.CPP) - V11 = AdjacencyListGraphNode("11",0,backend = Backend.CPP) - G2 = Graph(V9, V10, V11,implementation = 'adjacency_list', backend = Backend.CPP) - assert G2.num_vertices()==3 - G2.add_edge("9", "10") - G2.add_edge("10", "11") - breadth_first_search(G2, "9", bfs_tree, parent, backend = Backend.CPP) - assert parent[V10] == V9 - assert parent[V11] == V10 - - if (ds == 'Matrix'): - parent3 = {} - V12 = AdjacencyMatrixGraphNode("12", 0, backend = Backend.CPP) - V13 = AdjacencyMatrixGraphNode("13", 0, backend = Backend.CPP) - V14 = AdjacencyMatrixGraphNode("14", 0, backend = Backend.CPP) - G3 = Graph(V12, V13, V14, implementation = 'adjacency_matrix', backend = Backend.CPP) - assert G3.num_vertices() == 3 - G3.add_edge("12", "13") - G3.add_edge("13", "14") - breadth_first_search(G3, "12", bfs_tree, parent3, backend = Backend.CPP) - assert parent3[V13] == V12 - assert parent3[V14] == V13 - - V4 = GraphNode(0) - V5 = GraphNode(1) - V6 = GraphNode(2) - V7 = GraphNode(3) - V8 = GraphNode(4) - - edges = [ - (V4.name, V5.name), - (V5.name, V6.name), - (V6.name, V7.name), - (V6.name, V4.name), - (V7.name, V8.name) - ] - - G2 = Graph(V4, V5, V6, V7, V8) - - for edge in edges: - G2.add_edge(*edge) - - assert G2.num_edges() == len(edges) - - path = [] - def path_finder(curr_node, next_node, dest_node, parent, path): - if next_node != "": - parent[next_node] = curr_node - if curr_node == dest_node: - node = curr_node - path.append(node) - while node is not None: - if parent.get(node, None) is not None: - path.append(parent[node]) - node = parent.get(node, None) - path.reverse() - return False - return True - - parent.clear() - breadth_first_search(G2, V4.name, path_finder, V7.name, parent, path) - assert path == [V4.name, V5.name, V6.name, V7.name] - - _test_breadth_first_search("List") - _test_breadth_first_search("Matrix") - -def test_breadth_first_search_parallel(): - - def _test_breadth_first_search_parallel(ds): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - - V1 = GraphNode(0) - V2 = GraphNode(1) - V3 = GraphNode(2) - V4 = GraphNode(3) - V5 = GraphNode(4) - V6 = GraphNode(5) - V7 = GraphNode(6) - V8 = GraphNode(7) - - - G1 = Graph(V1, V2, V3, V4, V5, V6, V7, V8) - - edges = [ - (V1.name, V2.name), - (V1.name, V3.name), - (V1.name, V4.name), - (V2.name, V5.name), - (V2.name, V6.name), - (V3.name, V6.name), - (V3.name, V7.name), - (V4.name, V7.name), - (V4.name, V8.name) - ] - - for edge in edges: - G1.add_edge(*edge) - - parent = {} - def bfs_tree(curr_node, next_node, parent): - if next_node != "": - parent[next_node] = curr_node - return True - - breadth_first_search_parallel(G1, V1.name, 5, bfs_tree, parent) - assert (parent[V2.name] == V1.name and parent[V3.name] == V1.name and - parent[V4.name] == V1.name and parent[V5.name] == V2.name and - (parent[V6.name] in (V2.name, V3.name)) and - (parent[V7.name] in (V3.name, V4.name)) and (parent[V8.name] == V4.name)) - - _test_breadth_first_search_parallel("List") - _test_breadth_first_search_parallel("Matrix") - -def test_minimum_spanning_tree(): - - def _test_minimum_spanning_tree(func, ds, algorithm, *args): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - a, b, c, d, e = [GraphNode(x) for x in [0, 1, 2, 3, 4]] - graph = Graph(a, b, c, d, e) - graph.add_edge(a.name, c.name, 10) - graph.add_edge(c.name, a.name, 10) - graph.add_edge(a.name, d.name, 7) - graph.add_edge(d.name, a.name, 7) - graph.add_edge(c.name, d.name, 9) - graph.add_edge(d.name, c.name, 9) - graph.add_edge(d.name, b.name, 32) - graph.add_edge(b.name, d.name, 32) - graph.add_edge(d.name, e.name, 23) - graph.add_edge(e.name, d.name, 23) - mst = func(graph, algorithm, *args) - expected_mst = [('0_3', 7), ('2_3', 9), ('3_4', 23), ('3_1', 32), - ('3_0', 7), ('3_2', 9), ('4_3', 23), ('1_3', 32)] - assert len(expected_mst) == len(mst.edge_weights.items()) - for k, v in mst.edge_weights.items(): - assert (k, v.value) in expected_mst - - def _test_minimum_spanning_tree_cpp(ds, algorithm, *args): - if (ds == 'List' and algorithm == "prim"): - a1 = AdjacencyListGraphNode('a', 0, backend = Backend.CPP) - b1 = AdjacencyListGraphNode('b', 0, backend = Backend.CPP) - c1 = AdjacencyListGraphNode('c', 0, backend = Backend.CPP) - d1 = AdjacencyListGraphNode('d', 0, backend = Backend.CPP) - e1 = AdjacencyListGraphNode('e', 0, backend = Backend.CPP) - g = Graph(a1, b1, c1, d1, e1, backend = Backend.CPP) - g.add_edge(a1.name, c1.name, 10) - g.add_edge(c1.name, a1.name, 10) - g.add_edge(a1.name, d1.name, 7) - g.add_edge(d1.name, a1.name, 7) - g.add_edge(c1.name, d1.name, 9) - g.add_edge(d1.name, c1.name, 9) - g.add_edge(d1.name, b1.name, 32) - g.add_edge(b1.name, d1.name, 32) - g.add_edge(d1.name, e1.name, 23) - g.add_edge(e1.name, d1.name, 23) - mst = minimum_spanning_tree(g, "prim", backend = Backend.CPP) - expected_mst = ["('a', 'd', 7)", "('d', 'c', 9)", "('e', 'd', 23)", "('b', 'd', 32)", - "('d', 'a', 7)", "('c', 'd', 9)", "('d', 'e', 23)", "('d', 'b', 32)"] - assert str(mst.get_edge('a', 'd')) in expected_mst - assert str(mst.get_edge('e', 'd')) in expected_mst - assert str(mst.get_edge('d', 'c')) in expected_mst - assert str(mst.get_edge('b', 'd')) in expected_mst - assert mst.num_edges() == 8 - a=AdjacencyListGraphNode('0', 0, backend = Backend.CPP) - b=AdjacencyListGraphNode('1', 0, backend = Backend.CPP) - c=AdjacencyListGraphNode('2', 0, backend = Backend.CPP) - d=AdjacencyListGraphNode('3', 0, backend = Backend.CPP) - g2 = Graph(a,b,c,d,backend = Backend.CPP) - g2.add_edge('0', '1', 74) - g2.add_edge('1', '0', 74) - g2.add_edge('0', '3', 55) - g2.add_edge('3', '0', 55) - g2.add_edge('1', '2', 74) - g2.add_edge('2', '1', 74) - mst2=minimum_spanning_tree(g2, "prim", backend = Backend.CPP) - assert mst2.num_edges() == 6 - - fmst = minimum_spanning_tree - fmstp = minimum_spanning_tree_parallel - _test_minimum_spanning_tree(fmst, "List", "kruskal") - _test_minimum_spanning_tree(fmst, "Matrix", "kruskal") - _test_minimum_spanning_tree(fmst, "List", "prim") - _test_minimum_spanning_tree(fmstp, "List", "kruskal", 3) - _test_minimum_spanning_tree(fmstp, "Matrix", "kruskal", 3) - _test_minimum_spanning_tree(fmstp, "List", "prim", 3) - _test_minimum_spanning_tree_cpp("List", "prim") - -def test_strongly_connected_components(): - - def _test_strongly_connected_components(func, ds, algorithm, *args): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - a, b, c, d, e, f, g, h = \ - [GraphNode(chr(x)) for x in range(ord('a'), ord('h') + 1)] - graph = Graph(a, b, c, d, e, f, g, h) - graph.add_edge(a.name, b.name) - graph.add_edge(b.name, c.name) - graph.add_edge(b.name, f.name) - graph.add_edge(b.name, e.name) - graph.add_edge(c.name, d.name) - graph.add_edge(c.name, g.name) - graph.add_edge(d.name, h.name) - graph.add_edge(d.name, c.name) - graph.add_edge(e.name, f.name) - graph.add_edge(e.name, a.name) - graph.add_edge(f.name, g.name) - graph.add_edge(g.name, f.name) - graph.add_edge(h.name, d.name) - graph.add_edge(h.name, g.name) - comps = func(graph, algorithm) - expected_comps = [{'e', 'a', 'b'}, {'d', 'c', 'h'}, {'g', 'f'}] - assert comps.sort() == expected_comps.sort() - - scc = strongly_connected_components - _test_strongly_connected_components(scc, "List", "kosaraju") - _test_strongly_connected_components(scc, "Matrix", "kosaraju") - _test_strongly_connected_components(scc, "List", "tarjan") - _test_strongly_connected_components(scc, "Matrix", "tarjan") - -def test_depth_first_search(): - - def _test_depth_first_search(ds): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - - V1 = GraphNode(0) - V2 = GraphNode(1) - V3 = GraphNode(2) - - G1 = Graph(V1, V2, V3) - - edges = [ - (V1.name, V2.name), - (V2.name, V3.name), - (V1.name, V3.name) - ] - - for edge in edges: - G1.add_edge(*edge) - - parent = {} - def dfs_tree(curr_node, next_node, parent): - if next_node != "": - parent[next_node] = curr_node - return True - - depth_first_search(G1, V1.name, dfs_tree, parent) - assert (parent[V3.name] == V1.name and parent[V2.name] == V1.name) or \ - (parent[V3.name] == V2.name and parent[V2.name] == V1.name) - - V4 = GraphNode(0) - V5 = GraphNode(1) - V6 = GraphNode(2) - V7 = GraphNode(3) - V8 = GraphNode(4) - - edges = [ - (V4.name, V5.name), - (V5.name, V6.name), - (V6.name, V7.name), - (V6.name, V4.name), - (V7.name, V8.name) - ] - - G2 = Graph(V4, V5, V6, V7, V8) - - for edge in edges: - G2.add_edge(*edge) - - path = [] - def path_finder(curr_node, next_node, dest_node, parent, path): - if next_node != "": - parent[next_node] = curr_node - if curr_node == dest_node: - node = curr_node - path.append(node) - while node is not None: - if parent.get(node, None) is not None: - path.append(parent[node]) - node = parent.get(node, None) - path.reverse() - return False - return True - - parent.clear() - depth_first_search(G2, V4.name, path_finder, V7.name, parent, path) - assert path == [V4.name, V5.name, V6.name, V7.name] - - _test_depth_first_search("List") - _test_depth_first_search("Matrix") - -def test_shortest_paths(): - - def _test_shortest_paths_positive_edges(ds, algorithm): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - vertices = [GraphNode('S'), GraphNode('C'), - GraphNode('SLC'), GraphNode('SF'), - GraphNode('D')] - - graph = Graph(*vertices) - graph.add_edge('S', 'SLC', 2) - graph.add_edge('C', 'S', 4) - graph.add_edge('C', 'D', 2) - graph.add_edge('SLC', 'C', 2) - graph.add_edge('SLC', 'D', 3) - graph.add_edge('SF', 'SLC', 2) - graph.add_edge('SF', 'S', 2) - graph.add_edge('D', 'SF', 3) - dist, pred = shortest_paths(graph, algorithm, 'SLC') - assert dist == {'S': 6, 'C': 2, 'SLC': 0, 'SF': 6, 'D': 3} - assert pred == {'S': 'C', 'C': 'SLC', 'SLC': None, 'SF': 'D', 'D': 'SLC'} - dist, pred = shortest_paths(graph, algorithm, 'SLC', 'SF') - assert dist == 6 - assert pred == {'S': 'C', 'C': 'SLC', 'SLC': None, 'SF': 'D', 'D': 'SLC'} - graph.remove_edge('SLC', 'D') - graph.add_edge('D', 'SLC', -10) - assert raises(ValueError, lambda: shortest_paths(graph, 'bellman_ford', 'SLC')) - - if (ds == 'List' and algorithm == 'dijkstra'): - vertices2 = [AdjacencyListGraphNode('S', 0, backend = Backend.CPP), AdjacencyListGraphNode('C', 0, backend = Backend.CPP), - AdjacencyListGraphNode('SLC', 0, backend = Backend.CPP), AdjacencyListGraphNode('SF', 0, backend = Backend.CPP), - AdjacencyListGraphNode('D', 0, backend = Backend.CPP)] - graph2 = Graph(*vertices2, backend = Backend.CPP) - graph2.add_edge('S', 'SLC', 2) - graph2.add_edge('C', 'S', 4) - graph2.add_edge('C', 'D', 2) - graph2.add_edge('SLC', 'C', 2) - graph2.add_edge('SLC', 'D', 3) - graph2.add_edge('SF', 'SLC', 2) - graph2.add_edge('SF', 'S', 2) - graph2.add_edge('D', 'SF', 3) - (dist2, pred2) = shortest_paths(graph2, algorithm, 'SLC', backend = Backend.CPP) - assert dist2 == {'S': 6, 'C': 2, 'SLC': 0, 'SF': 6, 'D': 3} - assert pred2 == {'S': 'C', 'C': 'SLC', 'SLC': None, 'SF': 'D', 'D': 'SLC'} - - - - def _test_shortest_paths_negative_edges(ds, algorithm): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - vertices = [GraphNode('s'), GraphNode('a'), - GraphNode('b'), GraphNode('c'), - GraphNode('d')] - - graph = Graph(*vertices) - graph.add_edge('s', 'a', 3) - graph.add_edge('s', 'b', 2) - graph.add_edge('a', 'c', 1) - graph.add_edge('b', 'd', 1) - graph.add_edge('b', 'a', -2) - graph.add_edge('c', 'd', 1) - dist, pred = shortest_paths(graph, algorithm, 's') - assert dist == {'s': 0, 'a': 0, 'b': 2, 'c': 1, 'd': 2} - assert pred == {'s': None, 'a': 'b', 'b': 's', 'c': 'a', 'd': 'c'} - dist, pred = shortest_paths(graph, algorithm, 's', 'd') - assert dist == 2 - assert pred == {'s': None, 'a': 'b', 'b': 's', 'c': 'a', 'd': 'c'} - - _test_shortest_paths_positive_edges("List", 'bellman_ford') - _test_shortest_paths_positive_edges("Matrix", 'bellman_ford') - _test_shortest_paths_negative_edges("List", 'bellman_ford') - _test_shortest_paths_negative_edges("Matrix", 'bellman_ford') - _test_shortest_paths_positive_edges("List", 'dijkstra') - _test_shortest_paths_positive_edges("Matrix", 'dijkstra') - -def test_all_pair_shortest_paths(): - - def _test_shortest_paths_negative_edges(ds, algorithm): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - vertices = [GraphNode('1'), GraphNode('2'), - GraphNode('3'), GraphNode('4')] - - graph = Graph(*vertices) - graph.add_edge('1', '3', -2) - graph.add_edge('2', '1', 4) - graph.add_edge('2', '3', 3) - graph.add_edge('3', '4', 2) - graph.add_edge('4', '2', -1) - dist, next_v = all_pair_shortest_paths(graph, algorithm) - assert dist == {'1': {'3': -2, '1': 0, '4': 0, '2': -1}, - '2': {'1': 4, '3': 2, '2': 0, '4': 4}, - '3': {'4': 2, '3': 0, '1': 5, '2': 1}, - '4': {'2': -1, '4': 0, '1': 3, '3': 1}} - assert next_v == {'1': {'3': '1', '1': '1', '4': None, '2': None}, - '2': {'1': '2', '3': None, '2': '2', '4': None}, - '3': {'4': '3', '3': '3', '1': None, '2': None}, - '4': {'2': '4', '4': '4', '1': None, '3': None}} - - _test_shortest_paths_negative_edges("List", 'floyd_warshall') - _test_shortest_paths_negative_edges("Matrix", 'floyd_warshall') - _test_shortest_paths_negative_edges("List", 'johnson') - -def test_topological_sort(): - - def _test_topological_sort(func, ds, algorithm, threads=None): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - vertices = [GraphNode('2'), GraphNode('3'), GraphNode('5'), - GraphNode('7'), GraphNode('8'), GraphNode('10'), - GraphNode('11'), GraphNode('9')] - - graph = Graph(*vertices) - graph.add_edge('5', '11') - graph.add_edge('7', '11') - graph.add_edge('7', '8') - graph.add_edge('3', '8') - graph.add_edge('3', '10') - graph.add_edge('11', '2') - graph.add_edge('11', '9') - graph.add_edge('11', '10') - graph.add_edge('8', '9') - if threads is not None: - l = func(graph, algorithm, threads) - else: - l = func(graph, algorithm) - assert all([(l1 in l[0:3]) for l1 in ('3', '5', '7')] + - [(l2 in l[3:5]) for l2 in ('8', '11')] + - [(l3 in l[5:]) for l3 in ('10', '9', '2')]) - - _test_topological_sort(topological_sort, "List", "kahn") - _test_topological_sort(topological_sort_parallel, "List", "kahn", 3) - - -def test_max_flow(): - def _test_max_flow(ds, algorithm): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - - a = GraphNode('a') - b = GraphNode('b') - c = GraphNode('c') - d = GraphNode('d') - e = GraphNode('e') - - G = Graph(a, b, c, d, e) - - G.add_edge('a', 'b', 3) - G.add_edge('a', 'c', 4) - G.add_edge('b', 'c', 2) - G.add_edge('b', 'd', 3) - G.add_edge('c', 'd', 1) - G.add_edge('d', 'e', 6) - - assert max_flow(G, 'a', 'e', algorithm) == 4 - assert max_flow(G, 'a', 'c', algorithm) == 6 - - a = GraphNode('a') - b = GraphNode('b') - c = GraphNode('c') - d = GraphNode('d') - e = GraphNode('e') - f = GraphNode('f') - - G2 = Graph(a, b, c, d, e, f) - - G2.add_edge('a', 'b', 16) - G2.add_edge('a', 'c', 13) - G2.add_edge('b', 'c', 10) - G2.add_edge('b', 'd', 12) - G2.add_edge('c', 'b', 4) - G2.add_edge('c', 'e', 14) - G2.add_edge('d', 'c', 9) - G2.add_edge('d', 'f', 20) - G2.add_edge('e', 'd', 7) - G2.add_edge('e', 'f', 4) - - assert max_flow(G2, 'a', 'f', algorithm) == 23 - assert max_flow(G2, 'a', 'd', algorithm) == 19 - - a = GraphNode('a') - b = GraphNode('b') - c = GraphNode('c') - d = GraphNode('d') - - G3 = Graph(a, b, c, d) - - G3.add_edge('a', 'b', 3) - G3.add_edge('a', 'c', 2) - G3.add_edge('b', 'c', 2) - G3.add_edge('b', 'd', 3) - G3.add_edge('c', 'd', 2) - - assert max_flow(G3, 'a', 'd', algorithm) == 5 - assert max_flow(G3, 'a', 'b', algorithm) == 3 - - - _test_max_flow("List", "edmonds_karp") - _test_max_flow("Matrix", "edmonds_karp") - _test_max_flow("List", "dinic") - _test_max_flow("Matrix", "dinic") - - -def test_find_bridges(): - def _test_find_bridges(ds): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - - impl = 'adjacency_list' if ds == "List" else 'adjacency_matrix' - - v0 = GraphNode(0) - v1 = GraphNode(1) - v2 = GraphNode(2) - v3 = GraphNode(3) - v4 = GraphNode(4) - - G1 = Graph(v0, v1, v2, v3, v4, implementation=impl) - G1.add_edge(v0.name, v1.name) - G1.add_edge(v1.name, v2.name) - G1.add_edge(v2.name, v3.name) - G1.add_edge(v3.name, v4.name) - - bridges = find_bridges(G1) - expected_bridges = [('0', '1'), ('1', '2'), ('2', '3'), ('3', '4')] - assert sorted(bridges) == sorted(expected_bridges) - - u0 = GraphNode(0) - u1 = GraphNode(1) - u2 = GraphNode(2) - - G2 = Graph(u0, u1, u2, implementation=impl) - G2.add_edge(u0.name, u1.name) - G2.add_edge(u1.name, u2.name) - G2.add_edge(u2.name, u0.name) - - bridges = find_bridges(G2) - assert bridges == [] - - w0 = GraphNode(0) - w1 = GraphNode(1) - w2 = GraphNode(2) - w3 = GraphNode(3) - w4 = GraphNode(4) - - G3 = Graph(w0, w1, w2, w3, w4, implementation=impl) - G3.add_edge(w0.name, w1.name) - G3.add_edge(w1.name, w2.name) - G3.add_edge(w3.name, w4.name) - - bridges = find_bridges(G3) - expected_bridges = [('0', '1'), ('1', '2'), ('3', '4')] - assert sorted(bridges) == sorted(expected_bridges) - - _test_find_bridges("List") - _test_find_bridges("Matrix") diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/__init__.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/__init__.py deleted file mode 100644 index c6b3341d2..000000000 --- a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/__init__.py +++ /dev/null @@ -1,53 +0,0 @@ -__all__ = [] - -from . import ( - arrays, - linked_lists, - algorithms, -) - -from .arrays import ( - OneDimensionalArray, - DynamicOneDimensionalArray, - MultiDimensionalArray, - ArrayForTrees -) -__all__.extend(arrays.__all__) - -from .linked_lists import ( - SinglyLinkedList, - DoublyLinkedList, - SinglyCircularLinkedList, - DoublyCircularLinkedList, - SkipList -) -__all__.extend(linked_lists.__all__) - -from .algorithms import ( - merge_sort_parallel, - brick_sort, - brick_sort_parallel, - heapsort, - matrix_multiply_parallel, - counting_sort, - bucket_sort, - cocktail_shaker_sort, - quick_sort, - longest_common_subsequence, - is_ordered, - upper_bound, - lower_bound, - longest_increasing_subsequence, - next_permutation, - prev_permutation, - bubble_sort, - linear_search, - binary_search, - jump_search, - selection_sort, - insertion_sort, - intro_sort, - shell_sort, - radix_sort -) -__all__.extend(algorithms.__all__) diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/_backend/__init__.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/_backend/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/algorithms.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/algorithms.py deleted file mode 100644 index 6d383fdca..000000000 --- a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/algorithms.py +++ /dev/null @@ -1,2010 +0,0 @@ -from pydatastructs.linear_data_structures.arrays import ( - OneDimensionalArray, DynamicArray, DynamicOneDimensionalArray, Array) -from pydatastructs.linear_data_structures._backend.cpp import _algorithms, _arrays -from pydatastructs.utils.misc_util import ( - _check_type, _comp, Backend, - raise_if_backend_is_not_python) -from concurrent.futures import ThreadPoolExecutor -from math import log, floor, sqrt - -__all__ = [ - 'merge_sort_parallel', - 'brick_sort', - 'brick_sort_parallel', - 'heapsort', - 'matrix_multiply_parallel', - 'counting_sort', - 'bucket_sort', - 'cocktail_shaker_sort', - 'quick_sort', - 'longest_common_subsequence', - 'is_ordered', - 'upper_bound', - 'lower_bound', - 'longest_increasing_subsequence', - 'next_permutation', - 'prev_permutation', - 'bubble_sort', - 'linear_search', - 'binary_search', - 'jump_search', - 'selection_sort', - 'insertion_sort', - 'intro_sort', - 'shell_sort', - 'radix_sort' -] - -def _merge(array, sl, el, sr, er, end, comp): - l, r = [], [] - for i in range(sl, el + 1): - if i <= end: - l.append(array[i]) - array[i] = None - for i in range(sr, er + 1): - if i <= end: - r.append(array[i]) - array[i] = None - i, j, k = 0, 0, sl - while i < len(l) and j < len(r): - if _comp(l[i], r[j], comp): - array[k] = l[i] - i += 1 - else: - array[k] = r[j] - j += 1 - k += 1 - - while i < len(l): - array[k] = l[i] - i += 1 - k += 1 - - while j < len(r): - array[k] = r[j] - j += 1 - k += 1 - -def merge_sort_parallel(array, num_threads, **kwargs): - """ - Implements parallel merge sort. - - Parameters - ========== - - array: Array - The array which is to be sorted. - num_threads: int - The maximum number of threads - to be used for sorting. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, merge_sort_parallel - >>> arr = OneDimensionalArray(int,[3, 2, 1]) - >>> merge_sort_parallel(arr, 3) - >>> [arr[0], arr[1], arr[2]] - [1, 2, 3] - >>> merge_sort_parallel(arr, 3, comp=lambda u, v: u > v) - >>> [arr[0], arr[1], arr[2]] - [3, 2, 1] - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Merge_sort - """ - raise_if_backend_is_not_python( - merge_sort_parallel, kwargs.get('backend', Backend.PYTHON)) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get("comp", lambda u, v: u <= v) - for size in range(floor(log(end - start + 1, 2)) + 1): - pow_2 = 2**size - with ThreadPoolExecutor(max_workers=num_threads) as Executor: - i = start - while i <= end: - Executor.submit( - _merge, - array, - i, i + pow_2 - 1, - i + pow_2, i + 2*pow_2 - 1, - end, comp).result() - i = i + 2*pow_2 - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - -def brick_sort(array, **kwargs): - """ - Implements Brick Sort / Odd Even sorting algorithm - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - >>> from pydatastructs import OneDimensionalArray, brick_sort - >>> arr = OneDimensionalArray(int,[3, 2, 1]) - >>> brick_sort(arr) - >>> [arr[0], arr[1], arr[2]] - [1, 2, 3] - >>> brick_sort(arr, comp=lambda u, v: u > v) - >>> [arr[0], arr[1], arr[2]] - [3, 2, 1] - - References - ========== - .. [1] https://www.geeksforgeeks.org/odd-even-sort-brick-sort/ - """ - raise_if_backend_is_not_python( - brick_sort, kwargs.get('backend', Backend.PYTHON)) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get("comp", lambda u, v: u <= v) - - is_sorted = False - while is_sorted is False: - is_sorted = True - for i in range(start+1, end, 2): - if _comp(array[i+1], array[i], comp): - array[i], array[i+1] = array[i+1], array[i] - is_sorted = False - for i in range(start, end, 2): - if _comp(array[i+1], array[i], comp): - array[i], array[i+1] = array[i+1], array[i] - is_sorted = False - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - -def _brick_sort_swap(array, i, j, comp, is_sorted): - if _comp(array[j], array[i], comp): - array[i], array[j] = array[j], array[i] - is_sorted[0] = False - -def brick_sort_parallel(array, num_threads, **kwargs): - """ - Implements Concurrent Brick Sort / Odd Even sorting algorithm - - Parameters - ========== - - array: Array/list - The array which is to be sorted. - num_threads: int - The maximum number of threads - to be used for sorting. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, brick_sort_parallel - >>> arr = OneDimensionalArray(int,[3, 2, 1]) - >>> brick_sort_parallel(arr, num_threads=5) - >>> [arr[0], arr[1], arr[2]] - [1, 2, 3] - >>> brick_sort_parallel(arr, num_threads=5, comp=lambda u, v: u > v) - >>> [arr[0], arr[1], arr[2]] - [3, 2, 1] - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Odd%E2%80%93even_sort - """ - raise_if_backend_is_not_python( - brick_sort_parallel, kwargs.get('backend', Backend.PYTHON)) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get("comp", lambda u, v: u <= v) - - is_sorted = [False] - with ThreadPoolExecutor(max_workers=num_threads) as Executor: - while is_sorted[0] is False: - is_sorted[0] = True - for i in range(start + 1, end, 2): - Executor.submit(_brick_sort_swap, array, i, i + 1, comp, is_sorted).result() - - for i in range(start, end, 2): - Executor.submit(_brick_sort_swap, array, i, i + 1, comp, is_sorted).result() - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - -def heapsort(array, **kwargs): - """ - Implements Heapsort algorithm. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, heapsort - >>> arr = OneDimensionalArray(int,[3, 2, 1]) - >>> heapsort(arr) - >>> [arr[0], arr[1], arr[2]] - [1, 2, 3] - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Heapsort - - Note - ==== - - This function does not support custom comparators as is the case with - other sorting functions in this file. - """ - raise_if_backend_is_not_python( - heapsort, kwargs.get('backend', Backend.PYTHON)) - from pydatastructs.trees.heaps import BinaryHeap - - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - - h = BinaryHeap(heap_property="min") - for i in range(start, end+1): - if array[i] is not None: - h.insert(array[i]) - array[i] = None - - i = start - while not h.is_empty: - array[i] = h.extract().key - i += 1 - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - -def counting_sort(array: Array, **kwargs) -> Array: - """ - Performs counting sort on the given array. - - Parameters - ========== - - array: Array - The array which is to be sorted. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs import DynamicOneDimensionalArray as DODA, counting_sort - >>> arr = DODA(int, [5, 78, 1, 0]) - >>> out = counting_sort(arr) - >>> str(out) - "['0', '1', '5', '78']" - >>> arr.delete(2) - >>> out = counting_sort(arr) - >>> str(out) - "['0', '5', '78']" - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Counting_sort - - Note - ==== - - Since, counting sort is a non-comparison sorting algorithm, - custom comparators aren't allowed. - The ouput array doesn't contain any `None` value. - """ - raise_if_backend_is_not_python( - counting_sort, kwargs.get('backend', Backend.PYTHON)) - max_val, min_val = array[0], array[0] - none_count = 0 - for i in range(len(array)): - if array[i] is not None: - if max_val is None or max_val < array[i]: - max_val = array[i] - if min_val is None or array[i] < min_val: - min_val = array[i] - else: - none_count += 1 - if min_val is None or max_val is None: - return array - - count = [0 for _ in range(max_val - min_val + 1)] - for i in range(len(array)): - if array[i] is not None: - count[array[i] - min_val] += 1 - - total = 0 - for i in range(max_val - min_val + 1): - count[i], total = total, count[i] + total - - output = type(array)(array._dtype, - [array[i] for i in range(len(array)) - if array[i] is not None]) - if _check_type(output, DynamicArray): - output._modify(force=True) - - for i in range(len(array)): - x = array[i] - if x is not None: - output[count[x-min_val]] = x - count[x-min_val] += 1 - - return output - -def _matrix_multiply_helper(m1, m2, row, col): - s = 0 - for i in range(len(m1)): - s += m1[row][i] * m2[i][col] - return s - -def matrix_multiply_parallel(matrix_1, matrix_2, num_threads): - """ - Implements concurrent Matrix multiplication - - Parameters - ========== - - matrix_1: Any matrix representation - Left matrix - matrix_2: Any matrix representation - Right matrix - num_threads: int - The maximum number of threads - to be used for multiplication. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Raises - ====== - - ValueError - When the columns in matrix_1 are not equal to the rows in matrix_2 - - Returns - ======= - - C: list - The result of matrix multiplication. - - Examples - ======== - - >>> from pydatastructs import matrix_multiply_parallel - >>> I = [[1, 1, 0], [0, 1, 0], [0, 0, 1]] - >>> J = [[2, 1, 2], [1, 2, 1], [2, 2, 2]] - >>> matrix_multiply_parallel(I, J, num_threads=5) - [[3, 3, 3], [1, 2, 1], [2, 2, 2]] - - References - ========== - .. [1] https://www3.nd.edu/~zxu2/acms60212-40212/Lec-07-3.pdf - """ - row_matrix_1, col_matrix_1 = len(matrix_1), len(matrix_1[0]) - row_matrix_2, col_matrix_2 = len(matrix_2), len(matrix_2[0]) - - if col_matrix_1 != row_matrix_2: - raise ValueError("Matrix size mismatch: %s * %s"%( - (row_matrix_1, col_matrix_1), (row_matrix_2, col_matrix_2))) - - C = [[None for i in range(col_matrix_1)] for j in range(row_matrix_2)] - - with ThreadPoolExecutor(max_workers=num_threads) as Executor: - for i in range(row_matrix_1): - for j in range(col_matrix_2): - C[i][j] = Executor.submit(_matrix_multiply_helper, - matrix_1, - matrix_2, - i, j).result() - - return C - -def _bucket_sort_helper(bucket: Array) -> Array: - for i in range(1, len(bucket)): - key = bucket[i] - j = i - 1 - while j >= 0 and bucket[j] > key: - bucket[j+1] = bucket[j] - j -= 1 - bucket[j+1] = key - return bucket - -def bucket_sort(array: Array, **kwargs) -> Array: - """ - Performs bucket sort on the given array. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs import DynamicOneDimensionalArray as DODA, bucket_sort - >>> arr = DODA(int, [5, 78, 1, 0]) - >>> out = bucket_sort(arr) - >>> str(out) - "['0', '1', '5', '78']" - >>> arr.delete(2) - >>> out = bucket_sort(arr) - >>> str(out) - "['0', '1', '78']" - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Bucket_sort - - Note - ==== - - This function does not support custom comparators as is the case with - other sorting functions in this file. - """ - raise_if_backend_is_not_python( - bucket_sort, kwargs.get('backend', Backend.PYTHON)) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - - #Find maximum value in the list and use length of the list to determine which value in the list goes into which bucket - max_value = None - for i in range(start, end+1): - if array[i] is not None: - max_value = array[i] - - count = 0 - for i in range(start, end+1): - if array[i] is not None: - count += 1 - if array[i] > max_value: - max_value = array[i] - - number_of_null_values = end - start + 1 - count - size = max_value // count - - # Create n empty buckets where n is equal to the length of the input list - buckets_list = [[] for _ in range(count)] - - # Put list elements into different buckets based on the size - for i in range(start, end + 1): - if array[i] is not None: - j = array[i] // size - if j is not count: - buckets_list[j].append(array[i]) - else: - buckets_list[count-1].append(array[i]) - - # Sort elements within the buckets using Insertion Sort - for z in range(count): - _bucket_sort_helper(buckets_list[z]) - - # Concatenate buckets with sorted elements into a single array - sorted_list = [] - for x in range(count): - sorted_list.extend(buckets_list[x]) - for i in range(end, end - number_of_null_values, -1): - array[i] = None - for i in range(start, end - number_of_null_values + 1): - array[i] = sorted_list[i-start] - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - return array - -def cocktail_shaker_sort(array: Array, **kwargs) -> Array: - """ - Performs cocktail sort on the given array. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray as ODA, cocktail_shaker_sort - >>> arr = ODA(int, [5, 78, 1, 0]) - >>> out = cocktail_shaker_sort(arr) - >>> str(out) - '[0, 1, 5, 78]' - >>> arr = ODA(int, [21, 37, 5]) - >>> out = cocktail_shaker_sort(arr) - >>> str(out) - '[5, 21, 37]' - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Cocktail_shaker_sort - """ - raise_if_backend_is_not_python( - cocktail_shaker_sort, kwargs.get('backend', Backend.PYTHON)) - def swap(i, j): - array[i], array[j] = array[j], array[i] - - lower = kwargs.get('start', 0) - upper = kwargs.get('end', len(array) - 1) - comp = kwargs.get("comp", lambda u, v: u <= v) - - swapping = False - while (not swapping and upper - lower >= 1): - - swapping = True - for j in range(lower, upper): - if _comp(array[j], array[j+1], comp) is False: - swap(j + 1, j) - swapping = False - - upper = upper - 1 - for j in range(upper, lower, -1): - if _comp(array[j-1], array[j], comp) is False: - swap(j, j - 1) - swapping = False - lower = lower + 1 - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - - return array - -def quick_sort(array: Array, **kwargs) -> Array: - """ - Performs quick sort on the given array. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - pick_pivot_element: lambda/function - The function implementing the pivot picking - logic for quick sort. Should accept, `low`, - `high`, and `array` in this order, where `low` - represents the left end of the current partition, - `high` represents the right end, and `array` is - the original input array to `quick_sort` function. - Optional, by default, picks the element at `high` - index of the current partition as pivot. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray as ODA, quick_sort - >>> arr = ODA(int, [5, 78, 1, 0]) - >>> out = quick_sort(arr) - >>> str(out) - '[0, 1, 5, 78]' - >>> arr = ODA(int, [21, 37, 5]) - >>> out = quick_sort(arr) - >>> str(out) - '[5, 21, 37]' - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Quicksort - """ - backend = kwargs.pop("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _algorithms.quick_sort(array, **kwargs) - from pydatastructs import Stack - comp = kwargs.get("comp", lambda u, v: u <= v) - pick_pivot_element = kwargs.get("pick_pivot_element", - lambda low, high, array: array[high]) - - def partition(low, high, pick_pivot_element): - i = (low - 1) - x = pick_pivot_element(low, high, array) - for j in range(low , high): - if _comp(array[j], x, comp) is True: - i = i + 1 - array[i], array[j] = array[j], array[i] - array[i + 1], array[high] = array[high], array[i + 1] - return (i + 1) - - lower = kwargs.get('start', 0) - upper = kwargs.get('end', len(array) - 1) - stack = Stack() - - stack.push(lower) - stack.push(upper) - - while stack.is_empty is False: - high = stack.pop() - low = stack.pop() - p = partition(low, high, pick_pivot_element) - if p - 1 > low: - stack.push(low) - stack.push(p - 1) - if p + 1 < high: - stack.push(p + 1) - stack.push(high) - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - - return array - -def longest_common_subsequence(seq1: OneDimensionalArray, seq2: OneDimensionalArray, - **kwargs) -> OneDimensionalArray: - """ - Finds the longest common subsequence between the - two given sequences. - - Parameters - ======== - - seq1: OneDimensionalArray - The first sequence. - seq2: OneDimensionalArray - The second sequence. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: OneDimensionalArray - The longest common subsequence. - - Examples - ======== - - >>> from pydatastructs import longest_common_subsequence as LCS, OneDimensionalArray as ODA - >>> arr1 = ODA(str, ['A', 'B', 'C', 'D', 'E']) - >>> arr2 = ODA(str, ['A', 'B', 'C', 'G' ,'D', 'E', 'F']) - >>> lcs = LCS(arr1, arr2) - >>> str(lcs) - "['A', 'B', 'C', 'D', 'E']" - >>> arr1 = ODA(str, ['A', 'P', 'P']) - >>> arr2 = ODA(str, ['A', 'p', 'P', 'S', 'P']) - >>> lcs = LCS(arr1, arr2) - >>> str(lcs) - "['A', 'P', 'P']" - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Longest_common_subsequence_problem - - Note - ==== - - The data types of elements across both the sequences - should be same and should be comparable. - """ - raise_if_backend_is_not_python( - longest_common_subsequence, kwargs.get('backend', Backend.PYTHON)) - row = len(seq1) - col = len(seq2) - check_mat = {0: [(0, []) for _ in range(col + 1)]} - - for i in range(1, row + 1): - check_mat[i] = [(0, []) for _ in range(col + 1)] - for j in range(1, col + 1): - if seq1[i-1] == seq2[j-1]: - temp = check_mat[i-1][j-1][1][:] - temp.append(seq1[i-1]) - check_mat[i][j] = (check_mat[i-1][j-1][0] + 1, temp) - else: - if check_mat[i-1][j][0] > check_mat[i][j-1][0]: - check_mat[i][j] = check_mat[i-1][j] - else: - check_mat[i][j] = check_mat[i][j-1] - - return OneDimensionalArray(seq1._dtype, check_mat[row][col][-1]) - -def is_ordered(array, **kwargs): - """ - Checks whether the given array is ordered or not. - - Parameters - ========== - - array: OneDimensionalArray - The array which is to be checked for having - specified ordering among its elements. - start: int - The starting index of the portion of the array - under consideration. - Optional, by default 0 - end: int - The ending index of the portion of the array - under consideration. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for specifying the desired ordering. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - True if the specified ordering is present - from start to end (inclusive) otherwise False. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, is_ordered - >>> arr = OneDimensionalArray(int, [1, 2, 3, 4]) - >>> is_ordered(arr) - True - >>> arr1 = OneDimensionalArray(int, [1, 2, 3]) - >>> is_ordered(arr1, start=0, end=1, comp=lambda u, v: u > v) - False - """ - backend = kwargs.pop("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _algorithms.is_ordered(array, **kwargs) - lower = kwargs.get('start', 0) - upper = kwargs.get('end', len(array) - 1) - comp = kwargs.get("comp", lambda u, v: u <= v) - - for i in range(lower + 1, upper + 1): - if array[i] is None or array[i - 1] is None: - continue - if comp(array[i], array[i - 1]): - return False - return True - -def upper_bound(array, value, **kwargs): - """ - Finds the index of the first occurence of an element greater than the given - value according to specified order, in the given OneDimensionalArray using a variation of binary search method. - - Parameters - ========== - - array: OneDimensionalArray - The array in which the upper bound has to be found. - start: int - The staring index of the portion of the array in which the upper bound - of a given value has to be looked for. - Optional, by default 0 - end: int, optional - The ending index of the portion of the array in which the upper bound - of a given value has to be looked for. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for specifying the desired ordering. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - index: int - Index of the upper bound of the given value in the given OneDimensionalArray. - - Examples - ======== - - >>> from pydatastructs import upper_bound, OneDimensionalArray as ODA - >>> arr1 = ODA(int, [4, 5, 5, 6, 7]) - >>> ub = upper_bound(arr1, 5, start=0, end=4) - >>> ub - 3 - >>> arr2 = ODA(int, [7, 6, 5, 5, 4]) - >>> ub = upper_bound(arr2, 5, comp=lambda x, y: x > y) - >>> ub - 4 - - Note - ==== - - DynamicOneDimensionalArray objects may not work as expected. - """ - raise_if_backend_is_not_python( - upper_bound, kwargs.get('backend', Backend.PYTHON)) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array)) - comp = kwargs.get('comp', lambda x, y: x < y) - index = end - inclusive_end = end - 1 - if comp(value, array[start]): - index = start - while start <= inclusive_end: - mid = (start + inclusive_end)//2 - if not comp(value, array[mid]): - start = mid + 1 - else: - index = mid - inclusive_end = mid - 1 - return index - -def lower_bound(array, value, **kwargs): - """ - Finds the the index of the first occurence of an element which is not - less than the given value according to specified order, - in the given OneDimensionalArray using a variation of binary search method. - - Parameters - ========== - - array: OneDimensionalArray - The array in which the lower bound has to be found. - start: int - The staring index of the portion of the array in which the upper bound - of a given value has to be looked for. - Optional, by default 0 - end: int, optional - The ending index of the portion of the array in which the upper bound - of a given value has to be looked for. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for specifying the desired ordering. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - index: int - Index of the lower bound of the given value in the given OneDimensionalArray - - Examples - ======== - - >>> from pydatastructs import lower_bound, OneDimensionalArray as ODA - >>> arr1 = ODA(int, [4, 5, 5, 6, 7]) - >>> lb = lower_bound(arr1, 5, end=4, comp=lambda x, y : x < y) - >>> lb - 1 - >>> arr = ODA(int, [7, 6, 5, 5, 4]) - >>> lb = lower_bound(arr, 5, start=0, comp=lambda x, y : x > y) - >>> lb - 2 - - Note - ==== - - DynamicOneDimensionalArray objects may not work as expected. - """ - raise_if_backend_is_not_python( - lower_bound, kwargs.get('backend', Backend.PYTHON)) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array)) - comp = kwargs.get('comp', lambda x, y: x < y) - index = end - inclusive_end = end - 1 - if not comp(array[start], value): - index = start - while start <= inclusive_end: - mid = (start + inclusive_end)//2 - if comp(array[mid], value): - start = mid + 1 - else: - index = mid - inclusive_end = mid - 1 - return index - -def longest_increasing_subsequence(array, **kwargs): - """ - Returns the longest increasing subsequence (as a OneDimensionalArray) that - can be obtained from a given OneDimensionalArray. A subsequence - of an array is an ordered subset of the array's elements having the same - sequential ordering as the original array. Here, an increasing - sequence stands for a strictly increasing sequence of numbers. - - Parameters - ========== - - array: OneDimensionalArray - The given array in the form of a OneDimensionalArray - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: OneDimensionalArray - Returns the longest increasing subsequence that can be obtained - from the given array - - Examples - ======== - - >>> from pydatastructs import lower_bound, OneDimensionalArray as ODA - >>> from pydatastructs import longest_increasing_subsequence as LIS - >>> array = ODA(int, [2, 5, 3, 7, 11, 8, 10, 13, 6]) - >>> longest_inc_subsequence = LIS(array) - >>> str(longest_inc_subsequence) - '[2, 3, 7, 8, 10, 13]' - >>> array2 = ODA(int, [3, 4, -1, 5, 8, 2, 2 ,2, 3, 12, 7, 9, 10]) - >>> longest_inc_subsequence = LIS(array2) - >>> str(longest_inc_subsequence) - '[-1, 2, 3, 7, 9, 10]' - """ - raise_if_backend_is_not_python( - longest_increasing_subsequence, - kwargs.get('backend', Backend.PYTHON)) - n = len(array) - dp = OneDimensionalArray(int, n) - dp.fill(0) - parent = OneDimensionalArray(int, n) - parent.fill(-1) - length = 0 - for i in range(1, n): - if array[i] <= array[dp[0]]: - dp[0] = i - elif array[dp[length]] < array[i]: - length += 1 - dp[length] = i - parent[i] = dp[length - 1] - else: - curr_array = [array[dp[i]] for i in range(length)] - ceil = lower_bound(curr_array, array[i]) - dp[ceil] = i - parent[i] = dp[ceil - 1] - ans = DynamicOneDimensionalArray(int, 0) - last_index = dp[length] - while last_index != -1: - ans.append(array[last_index]) - last_index = parent[last_index] - n = ans._last_pos_filled + 1 - ans_ODA = OneDimensionalArray(int, n) - for i in range(n): - ans_ODA[n-1-i] = ans[i] - return ans_ODA - -def _permutation_util(array, start, end, comp, perm_comp): - size = end - start + 1 - permute = OneDimensionalArray(int, size) - for i, j in zip(range(start, end + 1), range(size)): - permute[j] = array[i] - i = size - 1 - while i > 0 and perm_comp(permute[i - 1], permute[i], comp): - i -= 1 - if i > 0: - left, right = i, size - 1 - while left <= right: - mid = left + (right - left) // 2 - if not perm_comp(permute[i - 1], permute[mid], comp): - left = mid + 1 - else: - right = mid - 1 - permute[i - 1], permute[left - 1] = \ - permute[left - 1], permute[i - 1] - left, right = i, size - 1 - while left < right: - permute[left], permute[right] = permute[right], permute[left] - left += 1 - right -= 1 - result = True if i > 0 else False - return result, permute - -def next_permutation(array, **kwargs): - """ - If the function can determine the next higher permutation, it - returns `True` and the permutation in a new array. - If that is not possible, because it is already at the largest possible - permutation, it returns the elements according to the first permutation - and returns `False` and the permutation in a new array. - - Parameters - ========== - - array: OneDimensionalArray - The array which is to be used for finding next permutation. - start: int - The staring index of the considered portion of the array. - Optional, by default 0 - end: int, optional - The ending index of the considered portion of the array. - Optional, by default the index of the last position filled. - comp: lambda/function - The comparator which is to be used for specifying the - desired lexicographical ordering. - Optional, by default, less than is - used for comparing two values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - - Returns - ======= - - output: bool, OneDimensionalArray - First element is `True` if the function can rearrange - the given portion of the input array as a lexicographically - greater permutation, otherwise returns `False`. - Second element is an array having the next permutation. - - - Examples - ======== - - >>> from pydatastructs import next_permutation, OneDimensionalArray as ODA - >>> array = ODA(int, [1, 2, 3, 4]) - >>> is_greater, next_permute = next_permutation(array) - >>> is_greater, str(next_permute) - (True, '[1, 2, 4, 3]') - >>> array = ODA(int, [3, 2, 1]) - >>> is_greater, next_permute = next_permutation(array) - >>> is_greater, str(next_permute) - (False, '[1, 2, 3]') - - References - ========== - - .. [1] http://www.cplusplus.com/reference/algorithm/next_permutation/ - """ - raise_if_backend_is_not_python( - next_permutation, kwargs.get('backend', Backend.PYTHON)) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get('comp', lambda x, y: x < y) - - def _next_permutation_comp(x, y, _comp): - if _comp(x, y): - return False - else: - return True - - return _permutation_util(array, start, end, comp, - _next_permutation_comp) - -def prev_permutation(array, **kwargs): - """ - If the function can determine the next lower permutation, it - returns `True` and the permutation in a new array. - If that is not possible, because it is already at the lowest possible - permutation, it returns the elements according to the last permutation - and returns `False` and the permutation in a new array. - - Parameters - ========== - - array: OneDimensionalArray - The array which is to be used for finding next permutation. - start: int - The staring index of the considered portion of the array. - Optional, by default 0 - end: int, optional - The ending index of the considered portion of the array. - Optional, by default the index of the last position filled. - comp: lambda/function - The comparator which is to be used for specifying the - desired lexicographical ordering. - Optional, by default, less than is - used for comparing two values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - - Returns - ======= - - output: bool, OneDimensionalArray - First element is `True` if the function can rearrange - the given portion of the input array as a lexicographically - smaller permutation, otherwise returns `False`. - Second element is an array having the previous permutation. - - - Examples - ======== - - >>> from pydatastructs import prev_permutation, OneDimensionalArray as ODA - >>> array = ODA(int, [1, 2, 4, 3]) - >>> is_lower, prev_permute = prev_permutation(array) - >>> is_lower, str(prev_permute) - (True, '[1, 2, 3, 4]') - >>> array = ODA(int, [1, 2, 3, 4]) - >>> is_lower, prev_permute = prev_permutation(array) - >>> is_lower, str(prev_permute) - (False, '[4, 3, 2, 1]') - - References - ========== - - .. [1] http://www.cplusplus.com/reference/algorithm/prev_permutation/ - """ - raise_if_backend_is_not_python( - prev_permutation, kwargs.get('backend', Backend.PYTHON)) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get('comp', lambda x, y: x < y) - - def _prev_permutation_comp(x, y, _comp): - if _comp(x, y): - return True - else: - return False - - return _permutation_util(array, start, end, comp, - _prev_permutation_comp) - -def bubble_sort(array, **kwargs): - """ - Implements bubble sort algorithm. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, bubble_sort - >>> arr = OneDimensionalArray(int,[3, 2, 1]) - >>> out = bubble_sort(arr) - >>> str(out) - '[1, 2, 3]' - >>> out = bubble_sort(arr, comp=lambda u, v: u > v) - >>> str(out) - '[3, 2, 1]' - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Bubble_sort - """ - backend = kwargs.pop("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _algorithms.bubble_sort(array, **kwargs) - if backend == Backend.LLVM: - return _algorithms.bubble_sort_llvm(array, **kwargs) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get("comp", lambda u, v: u <= v) - arr_len = len(array) - for i in range(arr_len - 1): - for j in range(start , end): - if not _comp(array[j], array[j + 1], comp): - array[j], array[j + 1] = array[j + 1], array[j] - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - - return array - -def selection_sort(array, **kwargs): - """ - Implements selection sort algorithm. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, selection_sort - >>> arr = OneDimensionalArray(int,[3, 2, 1]) - >>> out = selection_sort(arr) - >>> str(out) - '[1, 2, 3]' - >>> out = selection_sort(arr, comp=lambda u, v: u > v) - >>> str(out) - '[3, 2, 1]' - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Selection_sort - """ - backend = kwargs.pop("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _algorithms.bubble_sort(array, **kwargs) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get('comp', lambda u, v: u <= v) - - for i in range(start, end + 1): - jMin = i - for j in range(i + 1, end + 1): - if not _comp(array[jMin], array[j], comp): - jMin = j - if jMin != i: - array[i], array[jMin] = array[jMin], array[i] - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - - return array - -def insertion_sort(array, **kwargs): - """ - Implements insertion sort algorithm. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, insertion_sort - >>> arr = OneDimensionalArray(int,[3, 2, 1]) - >>> out = insertion_sort(arr) - >>> str(out) - '[1, 2, 3]' - >>> out = insertion_sort(arr, comp=lambda u, v: u > v) - >>> str(out) - '[3, 2, 1]' - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Insertion_sort - """ - backend = kwargs.pop("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _algorithms.insertion_sort(array, **kwargs) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get('comp', lambda u, v: u <= v) - - for i in range(start + 1, end + 1): - temp = array[i] - j = i - while j > start and not _comp(array[j - 1], temp, comp): - array[j] = array[j - 1] - j -= 1 - array[j] = temp - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - - return array - -def linear_search(array, value, **kwargs): - """ - Implements linear search algorithm. - - Parameters - ========== - - array: OneDimensionalArray - The array which is to be searched. - value: - The value which is to be searched - inside the array. - start: int - The starting index of the portion - which is to be searched. - Optional, by default 0 - end: int - The ending index of the portion which - is to be searched. - Optional, by default the index - of the last position filled. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: int - The index of value if found. - If not found, returns None. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, linear_search - >>> arr = OneDimensionalArray(int,[3, 2, 1]) - >>> linear_search(arr, 2) - 1 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Linear_search - """ - backend = kwargs.pop("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _algorithms.linear_search(array, value, **kwargs) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - - for i in range(start, end + 1): - if array[i] == value: - return i - - return None - -def binary_search(array, value, **kwargs): - """ - Implements binary search algorithm. - - Parameters - ========== - - array: OneDimensionalArray - The array which is to be searched. - value: - The value which is to be searched - inside the array. - start: int - The starting index of the portion - which is to be searched. - Optional, by default 0 - end: int - The ending index of the portion which - is to be searched. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for performing comparisons. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: int - The index of elem if found. - If not found, returns None. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, binary_search - >>> arr = OneDimensionalArray(int,[1, 2, 3, 5, 10, 12]) - >>> binary_search(arr, 5) - 3 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Binary_search_algorithm - - Note - ==== - - This algorithm assumes that the portion of the array - to be searched is already sorted. - """ - backend = kwargs.pop("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _algorithms.binary_search(array, value, **kwargs) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get("comp", lambda u, v: u <= v) - - left = start - right = end - while left <= right: - middle = left//2 + right//2 + left % 2 * right % 2 - if array[middle] == value: - return middle - if comp(array[middle], value): - left = middle + 1 - else: - right = middle - 1 - - return None - -def jump_search(array, value, **kwargs): - """ - Implements jump search algorithm. - - Parameters - ========== - - array: OneDimensionalArray - The array which is to be searched. - value: - The value which is to be searched - inside the array. - start: int - The starting index of the portion - which is to be searched. - Optional, by default 0 - end: int - The ending index of the portion which - is to be searched. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for performing comparisons. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: int - The index of elem if found. - If not found, returns None. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, jump_search - >>> arr = OneDimensionalArray(int,[1, 2, 3, 5, 10, 12]) - >>> linear_search(arr, 5) - 3 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Jump_search - - Note - ==== - - This algorithm assumes that the portion of the array - to be searched is already sorted. - """ - backend = kwargs.pop("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _algorithms.jump_search(array, value, **kwargs) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get("comp", lambda u, v: u < v) - - step = int(sqrt(end - start + 1)) - current_position = step - prev = start - while comp(array[min(current_position, end)], value): - prev = current_position - current_position += step - if prev > end: - return None - while prev <= min(current_position, end): - if array[prev] == value: - return prev - prev += 1 - - return None - -def intro_sort(array, **kwargs) -> Array: - """ - Performs intro sort on the given array. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - maxdepth: Enables the user to define the maximum - recursion depth, takes value 2*log(length(A)) - by default (ref: Wikipedia[1]). - ins_threshold: Threshold under which insertion - sort has to be performed, default value is - 16 (ref: Wikipedia[1]). - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray as ODA, intro_sort - >>> arr = ODA(int, [5, 78, 1, 0]) - >>> out = intro_sort(arr) - >>> str(out) - '[0, 1, 5, 78]' - >>> arr = ODA(int, [21, 37, 5]) - >>> out = intro_sort(arr) - >>> str(out) - '[5, 21, 37]' - - Note - ==== - - This function does not support custom comparators as - is the case with other sorting functions in this file. - This is because of heapsort's limitation. - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Introsort - """ - raise_if_backend_is_not_python( - intro_sort, kwargs.get('backend', Backend.PYTHON)) - - # Always sorts in increasing order, this is because of - # heapsort's limitation - comp = lambda u, v: u <= v - lower = kwargs.get('start', 0) - upper = kwargs.get('end', len(array) - 1) - n = upper - lower + 1 - if n <= 0: - maxdepth = 0 - else: - maxdepth = kwargs.get("maxdepth", int(2 * (log(n)/log(2)))) - - ins_threshold = kwargs.get("ins_threshold", 16) - - def partition(array, lower, upper): - pivot = array[lower] - left = lower + 1 - right = upper - done = False - while not done: - while left <= right and _comp(array[left], pivot, comp): - left += 1 - while _comp(pivot, array[right], comp) and right >= left: - right -= 1 - if right < left: - done = True - else: - array[left], array[right] = array[right], array[left] - left+=1 - right-=1 - - array[lower], array[right] = array[right], array[lower] - return right - - if n < ins_threshold: - return insertion_sort(array, start=lower, end=upper) - elif maxdepth == 0: - heapsort(array, start=lower, end=upper) - return array - else: - p = partition(array, lower, upper) - - intro_sort(array, start=lower, end=p-1, maxdepth=maxdepth-1, ins_threshold=ins_threshold) - intro_sort(array, start=p+1, end=upper, maxdepth=maxdepth-1, ins_threshold=ins_threshold) - - return array - -def shell_sort(array, *args, **kwargs): - """ - Implements shell sort algorithm. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs.linear_data_structures.algorithms import OneDimensionalArray, shell_sort - >>> arr = OneDimensionalArray(int, [3, 2, 1]) - >>> out = shell_sort(arr) - >>> str(out) - '[1, 2, 3]' - >>> out = shell_sort(arr, comp=lambda u, v: u > v) - >>> str(out) - '[3, 2, 1]' - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Shellsort - """ - start = int(kwargs.get('start', 0)) - end = int(kwargs.get('end', len(array) - 1)) - comp = kwargs.get('comp', lambda u, v: u <= v) - - n = end - start + 1 - gap = n // 2 - while gap > 0: - for i in range(start + gap, end + 1): - temp = array[i] - j = i - while j >= start + gap and not _comp(array[j - gap], temp, comp): - array[j] = array[j - gap] - j -= gap - array[j] = temp - gap //= 2 - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - - return array - -def radix_sort(array, *args, **kwargs): - """ - Implements radix sort algorithm for non-negative integers. - - Parameters - ========== - - array: Array - The array which is to be sorted. Must contain non-negative integers. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs.linear_data_structures.algorithms import OneDimensionalArray, radix_sort - >>> arr = OneDimensionalArray(int, [170, 45, 75, 90, 802, 24, 2, 66]) - >>> out = radix_sort(arr) - >>> str(out) - '[2, 24, 45, 66, 75, 90, 170, 802]' - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Radix_sort - """ - start = int(kwargs.get('start', 0)) - end = int(kwargs.get('end', len(array) - 1)) - - n = end - start + 1 - max_val = array[start] - for i in range(start + 1, end + 1): - if array[i] is not None and array[i] > max_val: - max_val = array[i] - exp = 1 - while max_val // exp > 0: - count = [0] * 10 - output = [None] * n - - for i in range(start, end + 1): - if array[i] is not None: - digit = (array[i] // exp) % 10 - count[digit] += 1 - - for i in range(1, 10): - count[i] += count[i - 1] - - for i in range(end, start - 1, -1): - if array[i] is not None: - digit = (array[i] // exp) % 10 - count[digit] -= 1 - output[count[digit]] = array[i] - - for i in range(n): - array[start + i] = output[i] - - exp *= 10 - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - - return array diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/arrays.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/arrays.py deleted file mode 100644 index 2e0c3fd97..000000000 --- a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/arrays.py +++ /dev/null @@ -1,473 +0,0 @@ -from pydatastructs.utils.misc_util import ( - _check_type, NoneType, Backend, - raise_if_backend_is_not_python) -from pydatastructs.linear_data_structures._backend.cpp import _arrays - -__all__ = [ - 'OneDimensionalArray', - 'MultiDimensionalArray', - 'DynamicOneDimensionalArray' -] - -class Array(object): - """ - Abstract class for arrays in pydatastructs. - """ - def __str__(self) -> str: - return str(self._data) - -class OneDimensionalArray(Array): - """ - Represents one dimensional static arrays of - fixed size. - - Parameters - ========== - - dtype: type - A valid object type. - size: int - The number of elements in the array. - elements: list - The elements in the array, all should - be of same type. - init: a python type - The initial value with which the element has - to be initialized. By default none, used only - when the data is not given. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Raises - ====== - - ValueError - When the number of elements in the list do not - match with the size. - More than three parameters are passed as arguments. - Types of arguments is not as mentioned in the docstring. - - Note - ==== - - At least one parameter should be passed as an argument along - with the dtype. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray - >>> arr = OneDimensionalArray(int, 5) - >>> arr.fill(6) - >>> arr[0] - 6 - >>> arr[0] = 7.2 - >>> arr[0] - 7 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Array_data_structure#One-dimensional_arrays - """ - - __slots__ = ['_size', '_data', '_dtype'] - - def __new__(cls, dtype=NoneType, *args, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - return _arrays.OneDimensionalArray(dtype, *args, **kwargs) - if dtype is NoneType: - raise ValueError("Data type is not defined.") - if len(args) not in (1, 2): - raise ValueError("Too few arguments to create a 1D array," - " pass either size of the array" - " or list of elements or both.") - obj = Array.__new__(cls) - obj._dtype = dtype - if len(args) == 2: - if _check_type(args[0], list) and \ - _check_type(args[1], int): - for i in range(len(args[0])): - if _check_type(args[0][i], dtype) is False: - args[0][i] = dtype(args[0][i]) - size, data = args[1], list(args[0]) - elif _check_type(args[1], list) and \ - _check_type(args[0], int): - for i in range(len(args[1])): - if _check_type(args[1][i], dtype) is False: - args[1][i] = dtype(args[1][i]) - size, data = args[0], list(args[1]) - else: - raise TypeError("Expected type of size is int and " - "expected type of data is list/tuple.") - if size != len(data): - raise ValueError("Conflict in the size, %s and length of data, %s" - %(size, len(data))) - obj._size, obj._data = size, data - - elif len(args) == 1: - if _check_type(args[0], int): - obj._size = args[0] - init = kwargs.get('init', None) - obj._data = [init for i in range(args[0])] - elif _check_type(args[0], (list, tuple)): - for i in range(len(args[0])): - if _check_type(args[0][i], dtype) is False: - args[0][i] = dtype(args[0][i]) - obj._size, obj._data = len(args[0]), \ - list(args[0]) - else: - raise TypeError("Expected type of size is int and " - "expected type of data is list/tuple.") - - return obj - - @classmethod - def methods(cls): - return ['__new__', '__getitem__', - '__setitem__', 'fill', '__len__'] - - def __getitem__(self, i): - if i >= self._size or i < 0: - raise IndexError(("Index, {} out of range, " - "[{}, {}).".format(i, 0, self._size))) - return self._data.__getitem__(i) - - def __setitem__(self, idx, elem): - if elem is None: - self._data[idx] = None - else: - if _check_type(elem, self._dtype) is False: - elem = self._dtype(elem) - self._data[idx] = elem - - def fill(self, elem): - elem = self._dtype(elem) - for i in range(self._size): - self._data[i] = elem - - def __len__(self): - return self._size - -class MultiDimensionalArray(Array): - """ - Represents a multi-dimensional array. - - Parameters - ========== - - dtype: type - A valid object type. - *args: int - The dimensions of the array. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Raises - ====== - - IndexError - Index goes out of boundaries, or - the number of index given is not - the same as the number of dimensions. - ValueError - When there's no dimensions or the - dimension size is 0. - - Examples - ======== - - >>> from pydatastructs import MultiDimensionalArray as MDA - >>> arr = MDA(int, 5, 6, 9) - >>> arr.fill(32) - >>> arr[3, 0, 0] - 32 - >>> arr[3, 0, 0] = 7 - >>> arr[3, 0, 0] - 7 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Array_data_structure#Multidimensional_arrays - - """ - __slots__ = ['_sizes', '_data', '_dtype'] - - def __new__(cls, dtype: type = NoneType, *args, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - if dtype is NoneType: - raise ValueError("Data type is not defined.") - elif not args: - raise ValueError("Too few arguments to create a " - "multi dimensional array, pass dimensions.") - if len(args) == 1: - obj = Array.__new__(cls) - obj._dtype = dtype - obj._sizes = (args[0], 1) - obj._data = [None] * args[0] - return obj - - dimensions = args - for dimension in dimensions: - if dimension < 1: - raise ValueError("Size of dimension cannot be less than 1") - n_dimensions = len(dimensions) - d_sizes = [] - index = 0 - while n_dimensions > 1: - size = dimensions[index] - for i in range(index+1, len(dimensions)): - size = size * dimensions[i] - d_sizes.append(size) - n_dimensions -= 1 - index += 1 - d_sizes.append(dimensions[index]) - d_sizes.append(1) - obj = Array.__new__(cls) - obj._dtype = dtype - obj._sizes = tuple(d_sizes) - obj._data = [None] * obj._sizes[1] * dimensions[0] - return obj - - @classmethod - def methods(cls) -> list: - return ['__new__', '__getitem__', '__setitem__', 'fill', 'shape'] - - def __getitem__(self, indices): - self._compare_shape(indices) - if isinstance(indices, int): - return self._data[indices] - position = 0 - for i in range(0, len(indices)): - position += self._sizes[i + 1] * indices[i] - return self._data[position] - - def __setitem__(self, indices, element) -> None: - self._compare_shape(indices) - if isinstance(indices, int): - self._data[indices] = element - else: - position = 0 - for i in range(0, len(indices)): - position += self._sizes[i + 1] * indices[i] - self._data[position] = element - - def _compare_shape(self, indices) -> None: - indices = [indices] if isinstance(indices, int) else indices - if len(indices) != len(self._sizes) - 1: - raise IndexError("Shape mismatch, current shape is %s" % str(self.shape)) - if any(indices[i] >= self._sizes[i] for i in range(len(indices))): - raise IndexError("Index out of range.") - - def fill(self, element) -> None: - element = self._dtype(element) - for i in range(len(self._data)): - self._data[i] = element - - @property - def shape(self) -> tuple: - shape = [] - size = len(self._sizes) - for i in range(1, size): - shape.append(self._sizes[i-1]//self._sizes[i]) - return tuple(shape) - -class DynamicArray(Array): - """ - Abstract class for dynamic arrays. - """ - pass - -class DynamicOneDimensionalArray(DynamicArray, OneDimensionalArray): - """ - Represents resizable and dynamic one - dimensional arrays. - - Parameters - ========== - - dtype: type - A valid object type. - size: int - The number of elements in the array. - elements: list/tuple - The elements in the array, all should - be of same type. - init: a python type - The inital value with which the element has - to be initialized. By default none, used only - when the data is not given. - load_factor: float, by default 0.25 - The number below which if the ratio, Num(T)/Size(T) - falls then the array is contracted such that at - most only half the positions are filled. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Raises - ====== - - ValueError - When the number of elements in the list do not - match with the size. - More than three parameters are passed as arguments. - Types of arguments is not as mentioned in the docstring. - The load factor is not of floating point type. - - Note - ==== - - At least one parameter should be passed as an argument along - with the dtype. - Num(T) means the number of positions which are not None in the - array. - Size(T) means the maximum number of elements that the array can hold. - - Examples - ======== - - >>> from pydatastructs import DynamicOneDimensionalArray as DODA - >>> arr = DODA(int, 0) - >>> arr.append(1) - >>> arr.append(2) - >>> arr[0] - 1 - >>> arr.delete(0) - >>> arr[0] - >>> arr[1] - 2 - >>> arr.append(3) - >>> arr.append(4) - >>> [arr[i] for i in range(arr.size)] - [None, 2, 3, 4, None, None, None] - - References - ========== - - .. [1] http://www.cs.nthu.edu.tw/~wkhon/algo09/lectures/lecture16.pdf - """ - - __slots__ = ['_load_factor', '_num', '_last_pos_filled', '_size'] - - def __new__(cls, dtype=NoneType, *args, **kwargs): - backend = kwargs.get("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _arrays.DynamicOneDimensionalArray(dtype, *args, **kwargs) - obj = super().__new__(cls, dtype, *args, **kwargs) - obj._load_factor = float(kwargs.get('load_factor', 0.25)) - obj._num = 0 if obj._size == 0 or obj[0] is None else obj._size - obj._last_pos_filled = obj._num - 1 - return obj - - @classmethod - def methods(cls): - return ['__new__', '_modify', - 'append', 'delete', 'size', - '__str__', '__reversed__'] - - def _modify(self, force=False): - """ - Contracts the array if Num(T)/Size(T) falls - below load factor. - """ - if force: - i = -1 - while self._data[i] is None: - i -= 1 - self._last_pos_filled = i%self._size - if (self._num/self._size < self._load_factor): - arr_new = OneDimensionalArray(self._dtype, 2*self._num + 1) - j = 0 - for i in range(self._last_pos_filled + 1): - if self._data[i] is not None: - arr_new[j] = self[i] - j += 1 - self._last_pos_filled = j - 1 - self._data = arr_new._data - self._size = arr_new._size - - def append(self, el): - if self._last_pos_filled + 1 == self._size: - arr_new = OneDimensionalArray(self._dtype, 2*self._size + 1) - for i in range(self._last_pos_filled + 1): - arr_new[i] = self[i] - arr_new[self._last_pos_filled + 1] = el - self._size = arr_new._size - self._data = arr_new._data - else: - self[self._last_pos_filled + 1] = el - self._last_pos_filled += 1 - self._num += 1 - self._modify() - - def delete(self, idx): - if idx <= self._last_pos_filled and idx >= 0 and \ - self[idx] is not None: - self[idx] = None - self._num -= 1 - if self._last_pos_filled == idx: - self._last_pos_filled -= 1 - return self._modify() - - @property - def size(self): - return self._size - - def __str__(self): - to_be_printed = ['' for _ in range(self._last_pos_filled + 1)] - for i in range(self._last_pos_filled + 1): - if self._data[i] is not None: - to_be_printed[i] = str(self._data[i]) - return str(to_be_printed) - - def __reversed__(self): - for i in range(self._last_pos_filled, -1, -1): - yield self._data[i] - -class ArrayForTrees(DynamicOneDimensionalArray): - """ - Utility dynamic array for storing nodes of a tree. - - Parameters - ========== - - backend: pydatastructs.Backend - The backend to be used. Available backends: Python and C++ - Optional, by default, the Python backend is used. For faster execution, use the C++ backend. - - See Also - ======== - - pydatastructs.linear_data_structures.arrays.DynamicOneDimensionalArray - """ - def _modify(self): - if self._num/self._size < self._load_factor: - new_indices = {} - arr_new = OneDimensionalArray(self._dtype, 2*self._num + 1) - j = 0 - for i in range(self._last_pos_filled + 1): - if self[i] is not None: - arr_new[j] = self[i] - new_indices[self[i].key] = j - j += 1 - for i in range(j): - if arr_new[i].left is not None: - arr_new[i].left = new_indices[self[arr_new[i].left].key] - if arr_new[i].right is not None: - arr_new[i].right = new_indices[self[arr_new[i].right].key] - if arr_new[i].parent is not None: - arr_new[i].parent = new_indices[self[arr_new[i].parent].key] - self._last_pos_filled = j - 1 - self._data = arr_new._data - self._size = arr_new._size - return new_indices - return None diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/linked_lists.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/linked_lists.py deleted file mode 100644 index 09178daf1..000000000 --- a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/linked_lists.py +++ /dev/null @@ -1,819 +0,0 @@ -import math, random -from pydatastructs.utils.misc_util import _check_type, LinkedListNode, SkipNode -from pydatastructs.utils.misc_util import ( - Backend, raise_if_backend_is_not_python) - -__all__ = [ - 'SinglyLinkedList', - 'DoublyLinkedList', - 'SinglyCircularLinkedList', - 'DoublyCircularLinkedList', - 'SkipList' -] - -class LinkedList(object): - """ - Abstract class for Linked List. - """ - __slots__ = ['head', 'size'] - - def __len__(self): - return self.size - - @property - def is_empty(self): - return self.size == 0 - - def search(self, key): - curr_node = self.head - while curr_node is not None: - if curr_node.key == key: - return curr_node - curr_node = curr_node.next - if curr_node is self.head: - return None - return None - - def __str__(self): - """ - For printing the linked list. - """ - elements = [] - current_node = self.head - while current_node is not None: - elements.append(str(current_node)) - current_node = current_node.next - if current_node == self.head: - break - return str(elements) - - def insert_after(self, prev_node, key, data=None): - """ - Inserts a new node after the prev_node. - - Parameters - ========== - - prev_node: LinkedListNode - The node after which the - new node is to be inserted. - - key - Any valid identifier to uniquely - identify the node in the linked list. - - data - Any valid data to be stored in the node. - """ - raise NotImplementedError('This is an abstract method') - - def insert_at(self, index, key, data=None): - """ - Inserts a new node at the input index. - - Parameters - ========== - - index: int - An integer satisfying python indexing properties. - - key - Any valid identifier to uniquely - identify the node in the linked list. - - data - Any valid data to be stored in the node. - """ - raise NotImplementedError('This is an abstract method') - - def extract(self, index): - """ - Extracts the node at the index of the list. - - Parameters - ========== - - index: int - An integer satisfying python indexing properties. - - Returns - ======= - - current_node: LinkedListNode - The node at index i. - """ - raise NotImplementedError('This is an abstract method') - - def __getitem__(self, index): - """ - Returns - ======= - - current_node: LinkedListNode - The node at given index. - """ - if index < 0: - index = self.size + index - - if index >= self.size: - raise IndexError('%d index is out of range.'%(index)) - - counter = 0 - current_node = self.head - while counter != index: - current_node = current_node.next - counter += 1 - return current_node - - def appendleft(self, key, data=None): - """ - Pushes a new node at the start i.e., - the left of the list. - - Parameters - ========== - - key - Any valid identifier to uniquely - identify the node in the linked list. - - data - Any valid data to be stored in the node. - """ - self.insert_at(0, key, data) - - def append(self, key, data=None): - """ - Appends a new node at the end of the list. - - Parameters - ========== - - key - Any valid identifier to uniquely - identify the node in the linked list. - - data - Any valid data to be stored in the node. - """ - self.insert_at(self.size, key, data) - - def insert_before(self, next_node, key, data=None): - """ - Inserts a new node before the next_node. - - Parameters - ========== - - next_node: LinkedListNode - The node before which the - new node is to be inserted. - - key - Any valid identifier to uniquely - identify the node in the linked list. - - data - Any valid data to be stored in the node. - """ - raise NotImplementedError('This is an abstract method') - - def popleft(self): - """ - Extracts the Node from the left - i.e. start of the list. - - Returns - ======= - - old_head: LinkedListNode - The leftmost element of linked - list. - """ - return self.extract(0) - - def popright(self): - """ - Extracts the node from the right - of the linked list. - - Returns - ======= - - old_tail: LinkedListNode - The leftmost element of linked - list. - """ - return self.extract(-1) - -class DoublyLinkedList(LinkedList): - """ - Represents Doubly Linked List - - Parameters - ========== - - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import DoublyLinkedList - >>> dll = DoublyLinkedList() - >>> dll.append(6) - >>> dll[0].key - 6 - >>> dll.head.key - 6 - >>> dll.append(5) - >>> dll.appendleft(2) - >>> str(dll) - "['(2, None)', '(6, None)', '(5, None)']" - >>> dll[0].key = 7.2 - >>> dll.extract(1).key - 6 - >>> str(dll) - "['(7.2, None)', '(5, None)']" - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Doubly_linked_list - - """ - __slots__ = ['head', 'tail', 'size'] - - def __new__(cls, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = LinkedList.__new__(cls) - obj.head = None - obj.tail = None - obj.size = 0 - return obj - - @classmethod - def methods(cls): - return ['__new__', 'insert_after', - 'insert_before', 'insert_at', 'extract'] - - def insert_after(self, prev_node, key, data=None): - self.size += 1 - new_node = LinkedListNode(key, data, - links=['next', 'prev'], - addrs=[None, None]) - new_node.next = prev_node.next - if new_node.next is not None: - new_node.next.prev = new_node - prev_node.next = new_node - new_node.prev = prev_node - - if new_node.next is None: - self.tail = new_node - - def insert_before(self, next_node, key, data=None): - self.size += 1 - new_node = LinkedListNode(key, data, - links=['next', 'prev'], - addrs=[None, None]) - new_node.prev = next_node.prev - next_node.prev = new_node - new_node.next = next_node - if new_node.prev is not None: - new_node.prev.next = new_node - else: - self.head = new_node - - def insert_at(self, index, key, data=None): - if self.size == 0 and (index in (0, -1)): - index = 0 - - if index < 0: - index = self.size + index - - if index > self.size: - raise IndexError('%d index is out of range.'%(index)) - - self.size += 1 - new_node = LinkedListNode(key, data, - links=['next', 'prev'], - addrs=[None, None]) - if self.size == 1: - self.head, self.tail = \ - new_node, new_node - elif index == self.size - 1: - new_node.prev = self.tail - new_node.next = self.tail.next - self.tail.next = new_node - self.tail = new_node - else: - counter = 0 - current_node = self.head - prev_node = None - while counter != index: - prev_node = current_node - current_node = current_node.next - counter += 1 - new_node.prev = prev_node - new_node.next = current_node - if prev_node is not None: - prev_node.next = new_node - if current_node is not None: - current_node.prev = new_node - if new_node.next is None: - self.tail = new_node - if new_node.prev is None: - self.head = new_node - - def extract(self, index): - if self.is_empty: - raise ValueError("The list is empty.") - - if index < 0: - index = self.size + index - - if index >= self.size: - raise IndexError('%d is out of range.'%(index)) - - self.size -= 1 - counter = 0 - current_node = self.head - prev_node = None - while counter != index: - prev_node = current_node - current_node = current_node.next - counter += 1 - if prev_node is not None: - prev_node.next = current_node.next - if current_node.next is not None: - current_node.next.prev = prev_node - if index == 0: - self.head = current_node.next - if index == self.size: - self.tail = current_node.prev - return current_node - -class SinglyLinkedList(LinkedList): - """ - Represents Singly Linked List - - Parameters - ========== - - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import SinglyLinkedList - >>> sll = SinglyLinkedList() - >>> sll.append(6) - >>> sll[0].key - 6 - >>> sll.head.key - 6 - >>> sll.append(5) - >>> sll.appendleft(2) - >>> str(sll) - "['(2, None)', '(6, None)', '(5, None)']" - >>> sll[0].key = 7.2 - >>> sll.extract(1).key - 6 - >>> str(sll) - "['(7.2, None)', '(5, None)']" - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Singly_linked_list - - """ - __slots__ = ['head', 'tail', 'size'] - - def __new__(cls, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = LinkedList.__new__(cls) - obj.head = None - obj.tail = None - obj.size = 0 - return obj - - @classmethod - def methods(cls): - return ['insert_after', 'insert_at', - 'extract'] - - def insert_after(self, prev_node, key, data=None): - self.size += 1 - new_node = LinkedListNode(key, data, - links=['next'], - addrs=[None]) - new_node.next = prev_node.next - prev_node.next = new_node - - if new_node.next is None: - self.tail = new_node - - def insert_at(self, index, key, data=None): - if self.size == 0 and (index in (0, -1)): - index = 0 - - if index < 0: - index = self.size + index - - if index > self.size: - raise IndexError('%d index is out of range.'%(index)) - - self.size += 1 - new_node = LinkedListNode(key, data, - links=['next'], - addrs=[None]) - if self.size == 1: - self.head, self.tail = \ - new_node, new_node - elif index == self.size - 1: - new_node.next = self.tail.next - self.tail.next = new_node - self.tail = new_node - else: - counter = 0 - current_node = self.head - prev_node = None - while counter != index: - prev_node = current_node - current_node = current_node.next - counter += 1 - new_node.next = current_node - if prev_node is not None: - prev_node.next = new_node - if new_node.next is None: - self.tail = new_node - if index == 0: - self.head = new_node - - def extract(self, index): - if self.is_empty: - raise ValueError("The list is empty.") - - if index < 0: - index = self.size + index - - if index >= self.size: - raise IndexError('%d is out of range.'%(index)) - - self.size -= 1 - counter = 0 - current_node = self.head - prev_node = None - while counter != index: - prev_node = current_node - current_node = current_node.next - counter += 1 - if prev_node is not None: - prev_node.next = current_node.next - if index == 0: - self.head = current_node.next - if index == self.size: - self.tail = prev_node - return current_node - -class SinglyCircularLinkedList(SinglyLinkedList): - """ - Represents Singly Circular Linked List. - - Parameters - ========== - - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - - Examples - ======== - - >>> from pydatastructs import SinglyCircularLinkedList - >>> scll = SinglyCircularLinkedList() - >>> scll.append(6) - >>> scll[0].key - 6 - >>> scll.head.key - 6 - >>> scll.append(5) - >>> scll.appendleft(2) - >>> str(scll) - "['(2, None)', '(6, None)', '(5, None)']" - >>> scll[0].key = 7.2 - >>> scll.extract(1).key - 6 - >>> str(scll) - "['(7.2, None)', '(5, None)']" - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Linked_list#Circular_linked_list - - """ - - @classmethod - def methods(cls): - return ['insert_after', 'insert_at', 'extract'] - - def insert_after(self, prev_node, key, data=None): - super(SinglyCircularLinkedList, self).\ - insert_after(prev_node, key, data) - if prev_node.next.next == self.head: - self.tail = prev_node.next - - def insert_at(self, index, key, data=None): - super(SinglyCircularLinkedList, self).insert_at(index, key, data) - if self.size == 1: - self.head.next = self.head - new_node = self.__getitem__(index) - if index == 0: - self.tail.next = new_node - if new_node.next == self.head: - self.tail = new_node - - def extract(self, index): - node = super(SinglyCircularLinkedList, self).extract(index) - if self.tail is None: - self.head = None - elif index == 0: - self.tail.next = self.head - return node - -class DoublyCircularLinkedList(DoublyLinkedList): - """ - Represents Doubly Circular Linked List - - Parameters - ========== - - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import DoublyCircularLinkedList - >>> dcll = DoublyCircularLinkedList() - >>> dcll.append(6) - >>> dcll[0].key - 6 - >>> dcll.head.key - 6 - >>> dcll.append(5) - >>> dcll.appendleft(2) - >>> str(dcll) - "['(2, None)', '(6, None)', '(5, None)']" - >>> dcll[0].key = 7.2 - >>> dcll.extract(1).key - 6 - >>> str(dcll) - "['(7.2, None)', '(5, None)']" - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Doubly_linked_list#Circular_doubly_linked_lists - - """ - - @classmethod - def methods(cls): - return ['insert_after', 'insert_before', - 'insert_at', 'extract'] - - def insert_after(self, prev_node, key, data=None): - super(DoublyCircularLinkedList, self)\ - .insert_after(prev_node, key, data) - if prev_node.next.next == self.head: - self.tail = prev_node.next - - def insert_before(self, next_node, key, data=None): - super(DoublyCircularLinkedList, self).\ - insert_before(next_node, key, data) - if next_node == self.head: - self.head = next_node.prev - - def insert_at(self, index, key, data=None): - super(DoublyCircularLinkedList, self).\ - insert_at(index, key, data) - if self.size == 1: - self.head.next = self.head - self.head.prev = self.head - new_node = self.__getitem__(index) - if index == 0: - self.tail.next = new_node - new_node.prev = self.tail - if new_node.next == self.head: - self.tail = new_node - new_node.next = self.head - self.head.prev = new_node - - def extract(self, index): - node = super(DoublyCircularLinkedList, self).extract(index) - if self.tail is None: - self.head = None - elif index == 0: - self.tail.next = self.head - return node - -class SkipList(object): - """ - Represents Skip List - - Parameters - ========== - - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import SkipList - >>> sl = SkipList() - >>> sl.insert(6) - >>> sl.insert(1) - >>> sl.insert(3) - >>> node = sl.extract(1) - >>> str(node) - '(1, None)' - >>> sl.insert(4) - >>> sl.insert(2) - >>> sl.search(4) - True - >>> sl.search(10) - False - - """ - - __slots__ = ['head', 'tail', '_levels', '_num_nodes', 'seed'] - - def __new__(cls, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.head, obj.tail = None, None - obj._num_nodes = 0 - obj._levels = 0 - obj._add_level() - return obj - - @classmethod - def methods(cls): - return ['__new__', 'levels', 'search', - 'extract', '__str__', 'size'] - - def _add_level(self): - self.tail = SkipNode(math.inf, next=None, down=self.tail) - self.head = SkipNode(-math.inf, next=self.tail, down=self.head) - self._levels += 1 - - @property - def levels(self): - """ - Returns the number of levels in the - current skip list. - """ - return self._levels - - def _search(self, key) -> list: - path = [] - node = self.head - while node: - if node.next.key >= key: - path.append(node) - node = node.down - else: - node = node.next - return path - - def search(self, key) -> bool: - return self._search(key)[-1].next.key == key - - def insert(self, key, data=None): - """ - Inserts a new node to the skip list. - - Parameters - ========== - - key - Any valid identifier to uniquely - identify the node in the linked list. - - data - Any valid data to be stored in the node. - """ - path = self._search(key) - tip = path[-1] - below = SkipNode(key=key, data=data, next=tip.next) - tip.next = below - total_level = self._levels - level = 1 - while random.getrandbits(1) % 2 == 0 and level <= total_level: - if level == total_level: - self._add_level() - prev = self.head - else: - prev = path[total_level - 1 - level] - below = SkipNode(key=key, data=None, next=prev.next, down=below) - prev.next = below - level += 1 - self._num_nodes += 1 - - @property - def size(self): - return self._num_nodes - - def extract(self, key): - """ - Extracts the node with the given key in the skip list. - - Parameters - ========== - - key - The key of the node under consideration. - - Returns - ======= - - return_node: SkipNode - The node with given key. - """ - path = self._search(key) - tip = path[-1] - if tip.next.key != key: - raise KeyError('Node with key %s is not there in %s'%(key, self)) - return_node = SkipNode(tip.next.key, tip.next.data) - total_level = self._levels - level = total_level - 1 - while level >= 0 and path[level].next.key == key: - path[level].next = path[level].next.next - level -= 1 - walk = self.head - while walk is not None: - if walk.next is self.tail: - self._levels -= 1 - self.head = walk.down - self.tail = self.tail.down - walk = walk.down - else: - break - self._num_nodes -= 1 - if self._levels == 0: - self._add_level() - return return_node - - def __str__(self): - node2row = {} - node2col = {} - walk = self.head - curr_level = self._levels - 1 - while walk is not None: - curr_node = walk - col = 0 - while curr_node is not None: - if curr_node.key != math.inf and curr_node.key != -math.inf: - node2row[curr_node] = curr_level - if walk.down is None: - node2col[curr_node.key] = col - col += 1 - curr_node = curr_node.next - walk = walk.down - curr_level -= 1 - sl_mat = [[str(None) for _ in range(self._num_nodes)] for _ in range(self._levels)] - walk = self.head - while walk is not None: - curr_node = walk - while curr_node is not None: - if curr_node in node2row: - row = node2row[curr_node] - col = node2col[curr_node.key] - sl_mat[row][col] = str(curr_node) - curr_node = curr_node.next - walk = walk.down - sl_str = "" - for level_list in sl_mat[::-1]: - for node_str in level_list: - sl_str += node_str + " " - if len(sl_str) > 0: - sl_str += "\n" - return sl_str diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/__init__.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_algorithms.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_algorithms.py deleted file mode 100644 index 3e287bb74..000000000 --- a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_algorithms.py +++ /dev/null @@ -1,423 +0,0 @@ -from pydatastructs import ( - merge_sort_parallel, DynamicOneDimensionalArray, - OneDimensionalArray, brick_sort, brick_sort_parallel, - heapsort, matrix_multiply_parallel, counting_sort, bucket_sort, - cocktail_shaker_sort, quick_sort, longest_common_subsequence, is_ordered, - upper_bound, lower_bound, longest_increasing_subsequence, next_permutation, - prev_permutation, bubble_sort, linear_search, binary_search, jump_search, - selection_sort, insertion_sort, intro_sort, shell_sort, radix_sort, Backend) - -from pydatastructs.utils.raises_util import raises -import random - -def _test_common_sort(sort, *args, **kwargs): - random.seed(1000) - - n = random.randint(10, 20) - arr = DynamicOneDimensionalArray(int, 0) - generated_ints = [] - for _ in range(n): - integer = random.randint(1, 1000) - generated_ints.append(integer) - arr.append(integer) - for _ in range(n//3): - integer = random.randint(0, n//2) - generated_ints.append(integer) - arr.delete(integer) - expected_arr_1 = [686, 779, 102, 134, 362, 448, - 480, 548, None, None, None, - 228, 688, 247, 373, 696, None, - None, None, None, None, None, - None, None, None, None, None, - None, None, None, None] - sort(arr, *args, **kwargs, start=2, end=10) - assert arr._data == expected_arr_1 - sort(arr, *args, **kwargs) - expected_arr_2 = [102, 134, 228, 247, 362, 373, 448, - 480, 548, 686, 688, 696, 779, - None, None, None, None, None, None, - None, None, None, None, None, - None, None, None, None, None, None, None] - assert arr._data == expected_arr_2 - assert (arr._last_pos_filled, arr._num, arr._size) == (12, 13, 31) - - arr = DynamicOneDimensionalArray(int, 0, backend=Backend.CPP) - int_idx = 0 - for _ in range(n): - arr.append(generated_ints[int_idx]) - int_idx += 1 - for _ in range(n//3): - arr.delete(generated_ints[int_idx]) - int_idx += 1 - sort(arr, *args, **kwargs, start=2, end=10) - for i in range(len(expected_arr_1)): - assert arr[i] == expected_arr_1[i] - sort(arr, *args, **kwargs) - for i in range(len(expected_arr_2)): - assert arr[i] == expected_arr_2[i] - assert (arr._last_pos_filled, arr._num, arr.size) == (12, 13, 31) - - n = random.randint(10, 20) - arr = OneDimensionalArray(int, n) - generated_ints.clear() - for i in range(n): - integer = random.randint(1, 1000) - arr[i] = integer - generated_ints.append(integer) - expected_arr_3 = [42, 695, 147, 500, 768, - 998, 473, 732, 728, 426, - 709, 910] - sort(arr, *args, **kwargs, start=2, end=5) - assert arr._data == expected_arr_3 - - arr = OneDimensionalArray(int, n, backend=Backend.CPP) - int_idx = 0 - for i in range(n): - arr[i] = generated_ints[int_idx] - int_idx += 1 - sort(arr, *args, **kwargs, start=2, end=5) - for i in range(len(expected_arr_3)): - assert arr[i] == expected_arr_3[i] - -def test_merge_sort_parallel(): - _test_common_sort(merge_sort_parallel, num_threads=5) - -def test_brick_sort(): - _test_common_sort(brick_sort) - -def test_brick_sort_parallel(): - _test_common_sort(brick_sort_parallel, num_threads=3) - -def test_heapsort(): - _test_common_sort(heapsort) - -def test_bucket_sort(): - _test_common_sort(bucket_sort) - -def test_counting_sort(): - random.seed(1000) - - n = random.randint(10, 20) - arr = DynamicOneDimensionalArray(int, 0) - for _ in range(n): - arr.append(random.randint(1, 1000)) - for _ in range(n//3): - arr.delete(random.randint(0, n//2)) - - expected_arr = [102, 134, 228, 247, 362, 373, 448, - 480, 548, 686, 688, 696, 779] - assert counting_sort(arr)._data == expected_arr - -def test_cocktail_shaker_sort(): - _test_common_sort(cocktail_shaker_sort) - -def test_quick_sort(): - _test_common_sort(quick_sort) - _test_common_sort(quick_sort, backend=Backend.CPP) - -def test_intro_sort(): - _test_common_sort(intro_sort) - -def test_bubble_sort(): - _test_common_sort(bubble_sort) - _test_common_sort(bubble_sort, backend=Backend.CPP) - _test_common_sort(bubble_sort, backend=Backend.LLVM) - -def test_selection_sort(): - _test_common_sort(selection_sort) - _test_common_sort(selection_sort, backend=Backend.CPP) - -def test_insertion_sort(): - _test_common_sort(insertion_sort) - _test_common_sort(insertion_sort, backend=Backend.CPP) - -def test_matrix_multiply_parallel(): - ODA = OneDimensionalArray - - expected_result = [[3, 3, 3], [1, 2, 1], [2, 2, 2]] - - I = ODA(ODA, [ODA(int, [1, 1, 0]), ODA(int, [0, 1, 0]), ODA(int, [0, 0, 1])]) - J = ODA(ODA, [ODA(int, [2, 1, 2]), ODA(int, [1, 2, 1]), ODA(int, [2, 2, 2])]) - output = matrix_multiply_parallel(I, J, num_threads=5) - assert expected_result == output - - I = [[1, 1, 0], [0, 1, 0], [0, 0, 1]] - J = [[2, 1, 2], [1, 2, 1], [2, 2, 2]] - output = matrix_multiply_parallel(I, J, num_threads=5) - assert expected_result == output - - I = [[1, 1, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]] - J = [[2, 1, 2], [1, 2, 1], [2, 2, 2]] - assert raises(ValueError, lambda: matrix_multiply_parallel(I, J, num_threads=5)) - - I = [[1, 1, 0], [0, 1, 0], [0, 0, 1]] - J = [[2, 1, 2], [1, 2, 1], [2, 2, 2]] - output = matrix_multiply_parallel(I, J, num_threads=1) - assert expected_result == output - -def test_longest_common_sequence(): - ODA = OneDimensionalArray - expected_result = "['A', 'S', 'C', 'I', 'I']" - - str1 = ODA(str, ['A', 'A', 'S', 'C', 'C', 'I', 'I']) - str2 = ODA(str, ['A', 'S', 'S', 'C', 'I', 'I', 'I', 'I']) - output = longest_common_subsequence(str1, str2) - assert str(output) == expected_result - - expected_result = "['O', 'V', 'A']" - - I = ODA(str, ['O', 'V', 'A', 'L']) - J = ODA(str, ['F', 'O', 'R', 'V', 'A', 'E', 'W']) - output = longest_common_subsequence(I, J) - assert str(output) == expected_result - - X = ODA(int, [1, 2, 3, 4, 5, 6, 6, 5, 4, 3, 2, 1]) - Y = ODA(int, [1, 2, 3, 4, 4, 3, 2, 1]) - output = longest_common_subsequence(X, Y) - assert str(output) == '[1, 2, 3, 4, 4, 3, 2, 1]' - - Z = ODA(int, []) - output = longest_common_subsequence(Y, Z) - assert str(output) == '[]' - -def test_is_ordered(): - def _test_inner_ordered(*args, **kwargs): - ODA = OneDimensionalArray - DODA = DynamicOneDimensionalArray - - expected_result = True - arr = ODA(int, [1, 2, 5, 6]) - output = is_ordered(arr, **kwargs) - assert output == expected_result - - expected_result = False - arr1 = ODA(int, [4, 3, 2, 1]) - output = is_ordered(arr1, **kwargs) - assert output == expected_result - - expected_result = True - arr2 = ODA(int, [6, 1, 2, 3, 4, 5]) - output = is_ordered(arr2, start=1, end=5, **kwargs) - assert output == expected_result - - expected_result = True - arr3 = ODA(int, [0, -1, -2, -3, -4, 4]) - output = is_ordered(arr3, start=1, end=4, - comp=lambda u, v: u > v, **kwargs) - assert output == expected_result - - expected_result = True - arr4 = DODA(int, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) - arr4.delete(0) - output = is_ordered(arr4, **kwargs) - assert output == expected_result - - _test_inner_ordered() - _test_inner_ordered(backend=Backend.CPP) - - -def test_upper_bound(): - ODA = OneDimensionalArray - arr1 = ODA(int, [3, 3, 3]) - output = upper_bound(arr1, 3) - expected_result = 3 - assert expected_result == output - - arr2 = ODA(int, [4, 4, 5, 6]) - output = upper_bound(arr2, 4, end=3) - expected_result = 2 - assert expected_result == output - - arr3 = ODA(int, [6, 6, 7, 8, 9]) - output = upper_bound(arr3, 5, start=2, end=4) - expected_result = 2 - assert expected_result == output - - arr4 = ODA(int, [3, 4, 4, 6]) - output = upper_bound(arr4, 5, start=1, end=3) - expected_result = 3 - assert expected_result == output - - arr5 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) - output = upper_bound(arr5, 6, comp=lambda x, y: x > y) - expected_result = 5 - assert expected_result == output - - arr6 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) - output = upper_bound(arr6, 2, start=2, comp=lambda x, y: x > y) - expected_result = 8 - assert expected_result == output - - arr7 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) - output = upper_bound(arr7, 9, start=3, end=7, comp=lambda x, y: x > y) - expected_result = 3 - assert expected_result == output - - arr8 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) - output = upper_bound(arr8, 6, end=3, comp=lambda x, y: x > y) - expected_result = 3 - assert expected_result == output - - -def test_lower_bound(): - ODA = OneDimensionalArray - arr1 = ODA(int, [3, 3, 3]) - output = lower_bound(arr1, 3, start=1) - expected_result = 1 - assert expected_result == output - - arr2 = ODA(int, [4, 4, 4, 4, 5, 6]) - output = lower_bound(arr2, 5, end=3) - expected_result = 3 - assert expected_result == output - - arr3 = ODA(int, [6, 6, 7, 8, 9]) - output = lower_bound(arr3, 5, end=3) - expected_result = 0 - assert expected_result == output - - arr4 = ODA(int, [3, 4, 4, 4]) - output = lower_bound(arr4, 5) - expected_result = 4 - assert expected_result == output - - arr5 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) - output = lower_bound(arr5, 5, comp=lambda x, y: x > y) - expected_result = 5 - assert expected_result == output - - arr6 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) - output = lower_bound(arr6, 2, start=4, comp=lambda x, y: x > y) - expected_result = 8 - assert expected_result == output - - arr7 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) - output = lower_bound(arr7, 9, end=5, comp=lambda x, y: x > y) - expected_result = 0 - assert expected_result == output - - arr8 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) - output = lower_bound(arr8, 6, end=3, comp=lambda x, y: x > y) - expected_result = 1 - assert expected_result == output - -def test_longest_increasing_subsequence(): - ODA = OneDimensionalArray - - arr1 = ODA(int, [2, 5, 3, 7, 11, 8, 10, 13, 6]) - output = longest_increasing_subsequence(arr1) - expected_result = [2, 3, 7, 8, 10, 13] - assert str(expected_result) == str(output) - - arr2 = ODA(int, [3, 4, -1, 5, 8, 2, 2, 2, 3, 12, 7, 9, 10]) - output = longest_increasing_subsequence(arr2) - expected_result = [-1, 2, 3, 7, 9, 10] - assert str(expected_result) == str(output) - - arr3 = ODA(int, [6, 6, 6, 19, 9]) - output = longest_increasing_subsequence(arr3) - expected_result = [6, 9] - assert str(expected_result) == str(output) - - arr4 = ODA(int, [5, 4, 4, 3, 3, 6, 6, 8]) - output = longest_increasing_subsequence(arr4) - expected_result = [3, 6, 8] - assert str(expected_result) == str(output) - - arr5 = ODA(int, [7, 6, 6, 6, 5, 4, 3]) - output = longest_increasing_subsequence(arr5) - expected_result = [3] - assert str(expected_result) == str(output) - -def _test_permutation_common(array, expected_perms, func): - num_perms = len(expected_perms) - - output = [] - for _ in range(num_perms): - signal, array = func(array) - output.append(array) - if not signal: - break - - assert len(output) == len(expected_perms) - for perm1, perm2 in zip(output, expected_perms): - assert str(perm1) == str(perm2) - -def test_next_permutation(): - ODA = OneDimensionalArray - - array = ODA(int, [1, 2, 3]) - expected_perms = [[1, 3, 2], [2, 1, 3], - [2, 3, 1], [3, 1, 2], - [3, 2, 1], [1, 2, 3]] - _test_permutation_common(array, expected_perms, next_permutation) - -def test_prev_permutation(): - ODA = OneDimensionalArray - - array = ODA(int, [3, 2, 1]) - expected_perms = [[3, 1, 2], [2, 3, 1], - [2, 1, 3], [1, 3, 2], - [1, 2, 3], [3, 2, 1]] - _test_permutation_common(array, expected_perms, prev_permutation) - -def test_next_prev_permutation(): - ODA = OneDimensionalArray - random.seed(1000) - - for i in range(100): - data = set(random.sample(range(1, 10000), 10)) - array = ODA(int, list(data)) - - _, next_array = next_permutation(array) - _, orig_array = prev_permutation(next_array) - assert str(orig_array) == str(array) - - _, prev_array = prev_permutation(array) - _, orig_array = next_permutation(prev_array) - assert str(orig_array) == str(array) - -def _test_common_search(search_func, sort_array=True, **kwargs): - ODA = OneDimensionalArray - - array = ODA(int, [1, 2, 5, 7, 10, 29, 40]) - for i in range(len(array)): - assert i == search_func(array, array[i], **kwargs) - - checker_array = [None, None, 2, 3, 4, 5, None] - for i in range(len(array)): - assert checker_array[i] == search_func(array, array[i], start=2, end=5, **kwargs) - - random.seed(1000) - - for i in range(25): - data = list(set(random.sample(range(1, 10000), 100))) - - if sort_array: - data.sort() - - array = ODA(int, list(data)) - - for i in range(len(array)): - assert search_func(array, array[i], **kwargs) == i - - for _ in range(50): - assert search_func(array, random.randint(10001, 50000), **kwargs) is None - -def test_linear_search(): - _test_common_search(linear_search, sort_array=False) - _test_common_search(linear_search, sort_array=False, backend=Backend.CPP) - -def test_binary_search(): - _test_common_search(binary_search) - _test_common_search(binary_search, backend=Backend.CPP) - -def test_jump_search(): - _test_common_search(jump_search) - _test_common_search(jump_search, backend=Backend.CPP) - -def test_shell_sort(): - _test_common_sort(shell_sort) - -def test_radix_sort(): - _test_common_sort(radix_sort) diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_arrays.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_arrays.py deleted file mode 100644 index 886510113..000000000 --- a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_arrays.py +++ /dev/null @@ -1,157 +0,0 @@ -from pydatastructs.linear_data_structures import ( - OneDimensionalArray, DynamicOneDimensionalArray, - MultiDimensionalArray, ArrayForTrees) -from pydatastructs.utils.misc_util import Backend -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils import TreeNode -from pydatastructs.utils._backend.cpp import _nodes - -def test_OneDimensionalArray(): - ODA = OneDimensionalArray - A = ODA(int, 5, [1.0, 2, 3, 4, 5], init=6) - A[1] = 2.0 - assert str(A) == '[1, 2, 3, 4, 5]' - assert A - assert ODA(int, [1.0, 2, 3, 4, 5], 5) - assert ODA(int, 5) - assert ODA(int, [1.0, 2, 3]) - assert raises(IndexError, lambda: A[7]) - assert raises(IndexError, lambda: A[-1]) - assert raises(ValueError, lambda: ODA()) - assert raises(ValueError, lambda: ODA(int, 1, 2, 3)) - assert raises(TypeError, lambda: ODA(int, 5.0, set([1, 2, 3]))) - assert raises(TypeError, lambda: ODA(int, 5.0)) - assert raises(TypeError, lambda: ODA(int, set([1, 2, 3]))) - assert raises(ValueError, lambda: ODA(int, 3, [1])) - - A = ODA(int, 5, [1, 2, 3, 4, 5], init=6, backend=Backend.CPP) - A[1] = 2 - assert str(A) == "['1', '2', '3', '4', '5']" - assert A - assert ODA(int, [1, 2, 3, 4, 5], 5, backend=Backend.CPP) - assert ODA(int, 5, backend=Backend.CPP) - assert ODA(int, [1, 2, 3], backend=Backend.CPP) - assert raises(TypeError, lambda: ODA(int, [1.0, 2, 3, 4, 5], 5, backend=Backend.CPP)) - assert raises(TypeError, lambda: ODA(int, [1.0, 2, 3], backend=Backend.CPP)) - assert raises(IndexError, lambda: A[7]) - assert raises(IndexError, lambda: A[-1]) - assert raises(ValueError, lambda: ODA(backend=Backend.CPP)) - assert raises(ValueError, lambda: ODA(int, 1, 2, 3, backend=Backend.CPP)) - assert raises(TypeError, lambda: ODA(int, 5.0, set([1, 2, 3]), backend=Backend.CPP)) - assert raises(TypeError, lambda: ODA(int, 5.0, backend=Backend.CPP)) - assert raises(TypeError, lambda: ODA(int, set([1, 2, 3]), backend=Backend.CPP)) - assert raises(ValueError, lambda: ODA(int, 3, [1], backend=Backend.CPP)) - assert raises(ValueError, lambda: ODA(int, 3, [1], backend=Backend.CPP)) - assert raises(TypeError, lambda: A.fill(2.0)) - - -def test_MultiDimensionalArray(): - assert raises(ValueError, lambda: MultiDimensionalArray(int, 2, -1, 3)) - assert MultiDimensionalArray(int, 10).shape == (10,) - array = MultiDimensionalArray(int, 5, 9, 3, 8) - assert array.shape == (5, 9, 3, 8) - array.fill(5) - array[1, 3, 2, 5] = 2.0 - assert array - assert array[1, 3, 2, 5] == 2.0 - assert array[1, 3, 0, 5] == 5 - assert array[1, 2, 2, 5] == 5 - assert array[2, 3, 2, 5] == 5 - assert raises(IndexError, lambda: array[5]) - assert raises(IndexError, lambda: array[4, 10]) - assert raises(IndexError, lambda: array[-1]) - assert raises(IndexError, lambda: array[2, 3, 2, 8]) - assert raises(ValueError, lambda: MultiDimensionalArray()) - assert raises(ValueError, lambda: MultiDimensionalArray(int)) - assert raises(TypeError, lambda: MultiDimensionalArray(int, 5, 6, "")) - array = MultiDimensionalArray(int, 3, 2, 2) - array.fill(1) - array[0, 0, 0] = 0 - array[0, 0, 1] = 0 - array[1, 0, 0] = 0 - array[2, 1, 1] = 0 - assert str(array) == '[0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0]' - array = MultiDimensionalArray(int, 4) - assert array.shape == (4,) - array.fill(5) - array[3] = 3 - assert array[3] == 3 - -def test_DynamicOneDimensionalArray(): - DODA = DynamicOneDimensionalArray - A = DODA(int, 0) - A.append(1) - A.append(2) - A.append(3) - A.append(4) - assert str(A) == "['1', '2', '3', '4']" - A.delete(0) - A.delete(0) - A.delete(15) - A.delete(-1) - A.delete(1) - A.delete(2) - assert A._data == [4, None, None] - assert str(A) == "['4']" - assert A.size == 3 - A.fill(4) - assert A._data == [4, 4, 4] - b = DynamicOneDimensionalArray(int, 0) - b.append(1) - b.append(2) - b.append(3) - b.append(4) - b.append(5) - assert b._data == [1, 2, 3, 4, 5, None, None] - assert list(reversed(b)) == [5, 4, 3, 2, 1] - - A = DODA(int, 0, backend=Backend.CPP) - A.append(1) - A.append(2) - A.append(3) - A.append(4) - assert str(A) == "['1', '2', '3', '4']" - A.delete(0) - A.delete(0) - A.delete(15) - A.delete(-1) - A.delete(1) - A.delete(2) - assert [A[i] for i in range(A.size)] == [4, None, None] - assert A.size == 3 - A.fill(4) - assert [A[0], A[1], A[2]] == [4, 4, 4] - b = DODA(int, 0, backend=Backend.CPP) - b.append(1) - b.append(2) - b.append(3) - b.append(4) - b.append(5) - assert [b[i] for i in range(b.size)] == [1, 2, 3, 4, 5, None, None] - -def test_DynamicOneDimensionalArray2(): - DODA = DynamicOneDimensionalArray - root = TreeNode(1, 100) - A = DODA(TreeNode, [root]) - assert str(A[0]) == "(None, 1, 100, None)" - -def _test_ArrayForTrees(backend): - AFT = ArrayForTrees - root = TreeNode(1, 100,backend=backend) - if backend==Backend.PYTHON: - A = AFT(TreeNode, [root], backend=backend) - B = AFT(TreeNode, 0, backend=backend) - else: - A = AFT(_nodes.TreeNode, [root], backend=backend) - B = AFT(_nodes.TreeNode, 0, backend=backend) - assert str(A) == "['(None, 1, 100, None)']" - node = TreeNode(2, 200, backend=backend) - A.append(node) - assert str(A) == "['(None, 1, 100, None)', '(None, 2, 200, None)']" - assert str(B) == "[]" - -def test_ArrayForTrees(): - _test_ArrayForTrees(Backend.PYTHON) - -def test_cpp_ArrayForTrees(): - _test_ArrayForTrees(Backend.CPP) diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_linked_lists.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_linked_lists.py deleted file mode 100644 index b7f172ddc..000000000 --- a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_linked_lists.py +++ /dev/null @@ -1,193 +0,0 @@ -from pydatastructs.linear_data_structures import DoublyLinkedList, SinglyLinkedList, SinglyCircularLinkedList, DoublyCircularLinkedList, SkipList -from pydatastructs.utils.raises_util import raises -import copy, random - -def test_DoublyLinkedList(): - random.seed(1000) - dll = DoublyLinkedList() - assert raises(IndexError, lambda: dll[2]) - dll.appendleft(5) - dll.append(1) - dll.appendleft(2) - dll.append(3) - dll.insert_after(dll[-1], 4) - dll.insert_after(dll[2], 6) - dll.insert_before(dll[4], 1.1) - dll.insert_before(dll[0], 7) - dll.insert_at(0, 2) - dll.insert_at(-1, 9) - dll.extract(2) - assert dll.popleft().key == 2 - assert dll.popright().key == 4 - assert dll.search(3) == dll[-2] - assert dll.search(-1) is None - dll[-2].key = 0 - assert str(dll) == ("['(7, None)', '(5, None)', '(1, None)', " - "'(6, None)', '(1.1, None)', '(0, None)', " - "'(9, None)']") - assert len(dll) == 7 - assert raises(IndexError, lambda: dll.insert_at(8, None)) - assert raises(IndexError, lambda: dll.extract(20)) - dll_copy = DoublyCircularLinkedList() - for i in range(dll.size): - dll_copy.append(dll[i]) - for i in range(len(dll)): - if i%2 == 0: - dll.popleft() - else: - dll.popright() - assert str(dll) == "[]" - for _ in range(len(dll_copy)): - index = random.randint(0, len(dll_copy) - 1) - dll_copy.extract(index) - assert str(dll_copy) == "[]" - assert raises(ValueError, lambda: dll_copy.extract(1)) - -def test_SinglyLinkedList(): - random.seed(1000) - sll = SinglyLinkedList() - assert raises(IndexError, lambda: sll[2]) - sll.appendleft(5) - sll.append(1) - sll.appendleft(2) - sll.append(3) - sll.insert_after(sll[1], 4) - sll.insert_after(sll[-1], 6) - sll.insert_at(0, 2) - sll.insert_at(-1, 9) - sll.extract(2) - assert sll.popleft().key == 2 - assert sll.popright().key == 6 - sll[-2].key = 0 - assert str(sll) == ("['(2, None)', '(4, None)', '(1, None)', " - "'(0, None)', '(9, None)']") - assert len(sll) == 5 - assert raises(IndexError, lambda: sll.insert_at(6, None)) - assert raises(IndexError, lambda: sll.extract(20)) - sll_copy = DoublyCircularLinkedList() - for i in range(sll.size): - sll_copy.append(sll[i]) - for i in range(len(sll)): - if i%2 == 0: - sll.popleft() - else: - sll.popright() - assert str(sll) == "[]" - for _ in range(len(sll_copy)): - index = random.randint(0, len(sll_copy) - 1) - sll_copy.extract(index) - assert str(sll_copy) == "[]" - assert raises(ValueError, lambda: sll_copy.extract(1)) - -def test_SinglyCircularLinkedList(): - random.seed(1000) - scll = SinglyCircularLinkedList() - assert raises(IndexError, lambda: scll[2]) - scll.appendleft(5) - scll.append(1) - scll.appendleft(2) - scll.append(3) - scll.insert_after(scll[1], 4) - scll.insert_after(scll[-1], 6) - scll.insert_at(0, 2) - scll.insert_at(-1, 9) - scll.extract(2) - assert scll.popleft().key == 2 - assert scll.popright().key == 6 - assert scll.search(-1) is None - scll[-2].key = 0 - assert str(scll) == ("['(2, None)', '(4, None)', '(1, None)', " - "'(0, None)', '(9, None)']") - assert len(scll) == 5 - assert raises(IndexError, lambda: scll.insert_at(6, None)) - assert raises(IndexError, lambda: scll.extract(20)) - scll_copy = DoublyCircularLinkedList() - for i in range(scll.size): - scll_copy.append(scll[i]) - for i in range(len(scll)): - if i%2 == 0: - scll.popleft() - else: - scll.popright() - assert str(scll) == "[]" - for _ in range(len(scll_copy)): - index = random.randint(0, len(scll_copy) - 1) - scll_copy.extract(index) - assert str(scll_copy) == "[]" - assert raises(ValueError, lambda: scll_copy.extract(1)) - -def test_DoublyCircularLinkedList(): - random.seed(1000) - dcll = DoublyCircularLinkedList() - assert raises(IndexError, lambda: dcll[2]) - dcll.appendleft(5) - dcll.append(1) - dcll.appendleft(2) - dcll.append(3) - dcll.insert_after(dcll[-1], 4) - dcll.insert_after(dcll[2], 6) - dcll.insert_before(dcll[4], 1) - dcll.insert_before(dcll[0], 7) - dcll.insert_at(0, 2) - dcll.insert_at(-1, 9) - dcll.extract(2) - assert dcll.popleft().key == 2 - assert dcll.popright().key == 4 - dcll[-2].key = 0 - assert str(dcll) == ("['(7, None)', '(5, None)', '(1, None)', " - "'(6, None)', '(1, None)', '(0, None)', " - "'(9, None)']") - assert len(dcll) == 7 - assert raises(IndexError, lambda: dcll.insert_at(8, None)) - assert raises(IndexError, lambda: dcll.extract(20)) - dcll_copy = DoublyCircularLinkedList() - for i in range(dcll.size): - dcll_copy.append(dcll[i]) - for i in range(len(dcll)): - if i%2 == 0: - dcll.popleft() - else: - dcll.popright() - assert str(dcll) == "[]" - for _ in range(len(dcll_copy)): - index = random.randint(0, len(dcll_copy) - 1) - dcll_copy.extract(index) - assert str(dcll_copy) == "[]" - assert raises(ValueError, lambda: dcll_copy.extract(1)) - -def test_SkipList(): - random.seed(0) - sl = SkipList() - sl.insert(2) - sl.insert(10) - sl.insert(92) - sl.insert(1) - sl.insert(4) - sl.insert(27) - sl.extract(10) - assert str(sl) == ("(1, None) None None None None \n" - "(1, None) None None None None \n" - "(1, None) (2, None) (4, None) (27, None) (92, None) \n") - assert raises(KeyError, lambda: sl.extract(15)) - assert sl.search(1) is True - assert sl.search(47) is False - - sl = SkipList() - - for a in range(0, 20, 2): - sl.insert(a) - assert sl.search(16) is True - for a in range(4, 20, 4): - sl.extract(a) - assert sl.search(10) is True - for a in range(4, 20, 4): - sl.insert(a) - for a in range(0, 20, 2): - sl.extract(a) - assert sl.search(3) is False - - li = SkipList() - li.insert(1) - li.insert(2) - assert li.levels == 1 - assert li.size == 2 diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/__init__.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/__init__.py deleted file mode 100644 index 6ed099769..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/__init__.py +++ /dev/null @@ -1,51 +0,0 @@ -__all__ = [] - -from . import ( - stack, - binomial_trees, - queue, - disjoint_set, - sparse_table, -) - -from .binomial_trees import ( - BinomialTree -) -__all__.extend(binomial_trees.__all__) - -from .stack import ( - Stack, -) -__all__.extend(stack.__all__) - -from .queue import ( - Queue, - PriorityQueue -) -__all__.extend(queue.__all__) - -from .disjoint_set import ( - DisjointSetForest, -) -__all__.extend(disjoint_set.__all__) - -from .sparse_table import ( - SparseTable, -) -__all__.extend(sparse_table.__all__) - -from .segment_tree import ( - ArraySegmentTree, -) -__all__.extend(segment_tree.__all__) - -from .algorithms import ( - RangeQueryStatic, - RangeQueryDynamic -) -__all__.extend(algorithms.__all__) - -from .multiset import ( - Multiset -) -__all__.extend(multiset.__all__) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/_backend/__init__.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/_backend/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/algorithms.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/algorithms.py deleted file mode 100644 index 3c2f86516..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/algorithms.py +++ /dev/null @@ -1,335 +0,0 @@ -from pydatastructs.miscellaneous_data_structures.sparse_table import SparseTable -from pydatastructs.miscellaneous_data_structures.segment_tree import ArraySegmentTree -from pydatastructs.utils.misc_util import ( - _check_range_query_inputs, Backend, - raise_if_backend_is_not_python) - -__all__ = [ - 'RangeQueryStatic', - 'RangeQueryDynamic' -] - - -class RangeQueryStatic: - """ - Produces results for range queries of different kinds - by using specified data structure. - - Parameters - ========== - - array: OneDimensionalArray - The array for which we need to answer queries. - All the elements should be of type `int`. - func: callable - The function to be used for generating results - of a query. It should accept only one tuple as an - argument. The size of the tuple will be either 1 or 2 - and any one of the elements can be `None`. You can treat - `None` in whatever way you want according to the query - you are performing. For example, in case of range minimum - queries, `None` can be treated as infinity. We provide - the following which can be used as an argument value for this - parameter, - - `minimum` - For range minimum queries. - - `greatest_common_divisor` - For queries finding greatest - common divisor of a range. - - `summation` - For range sum queries. - data_structure: str - The data structure to be used for performing - range queries. - Currently the following data structures are supported, - - 'array' -> Array data structure. - Each query takes O(end - start) time asymptotically. - - 'sparse_table' -> Sparse table data structure. - Each query takes O(log(end - start)) time - asymptotically. - - By default, 'sparse_table'. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, RangeQueryStatic - >>> from pydatastructs import minimum - >>> arr = OneDimensionalArray(int, [4, 6, 1, 5, 7, 3]) - >>> RMQ = RangeQueryStatic(arr, minimum) - >>> RMQ.query(3, 4) - 5 - >>> RMQ.query(0, 4) - 1 - >>> RMQ.query(0, 2) - 1 - - Note - ==== - - The array once passed as an input should not be modified - once the `RangeQueryStatic` constructor is called. If you - have updated the array, then you need to create a new - `RangeQueryStatic` object with this updated array. - """ - - def __new__(cls, array, func, data_structure='sparse_table', **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - if len(array) == 0: - raise ValueError("Input %s array is empty."%(array)) - - if data_structure == 'array': - return RangeQueryStaticArray(array, func) - elif data_structure == 'sparse_table': - return RangeQueryStaticSparseTable(array, func) - else: - raise NotImplementedError( - "Currently %s data structure for range " - "query without updates isn't implemented yet." - % (data_structure)) - - @classmethod - def methods(cls): - return ['query'] - - def query(start, end): - """ - Method to perform a query in [start, end) range. - - Parameters - ========== - - start: int - The starting index of the range. - end: int - The ending index of the range. - """ - raise NotImplementedError( - "This is an abstract method.") - - -class RangeQueryStaticSparseTable(RangeQueryStatic): - - __slots__ = ["sparse_table", "bounds"] - - def __new__(cls, array, func, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - sparse_table = SparseTable(array, func) - obj.bounds = (0, len(array)) - obj.sparse_table = sparse_table - return obj - - @classmethod - def methods(cls): - return ['query'] - - def query(self, start, end): - _check_range_query_inputs((start, end + 1), self.bounds) - return self.sparse_table.query(start, end) - - -class RangeQueryStaticArray(RangeQueryStatic): - - __slots__ = ["array", "func"] - - def __new__(cls, array, func): - obj = object.__new__(cls) - obj.array = array - obj.func = func - return obj - - @classmethod - def methods(cls): - return ['query'] - - def query(self, start, end): - _check_range_query_inputs((start, end + 1), (0, len(self.array))) - - rsize = end - start + 1 - - if rsize == 1: - return self.func((self.array[start],)) - - query_ans = self.func((self.array[start], self.array[start + 1])) - for i in range(start + 2, end + 1): - query_ans = self.func((query_ans, self.array[i])) - return query_ans - -class RangeQueryDynamic: - """ - Produces results for range queries of different kinds - while allowing point updates by using specified - data structure. - - Parameters - ========== - - array: OneDimensionalArray - The array for which we need to answer queries. - All the elements should be of type `int`. - func: callable - The function to be used for generating results - of a query. It should accept only one tuple as an - argument. The size of the tuple will be either 1 or 2 - and any one of the elements can be `None`. You can treat - `None` in whatever way you want according to the query - you are performing. For example, in case of range minimum - queries, `None` can be treated as infinity. We provide - the following which can be used as an argument value for this - parameter, - - `minimum` - For range minimum queries. - - `greatest_common_divisor` - For queries finding greatest - common divisor of a range. - - `summation` - For range sum queries. - data_structure: str - The data structure to be used for performing - range queries. - Currently the following data structures are supported, - - 'array' -> Array data structure. - Each query takes O(end - start) time asymptotically. - Each point update takes O(1) time asymptotically. - - 'segment_tree' -> Segment tree data structure. - Each query takes O(log(end - start)) time - asymptotically. - Each point update takes O(log(len(array))) time - asymptotically. - - By default, 'segment_tree'. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, RangeQueryDynamic - >>> from pydatastructs import minimum - >>> arr = OneDimensionalArray(int, [4, 6, 1, 5, 7, 3]) - >>> RMQ = RangeQueryDynamic(arr, minimum) - >>> RMQ.query(3, 4) - 5 - >>> RMQ.query(0, 4) - 1 - >>> RMQ.query(0, 2) - 1 - >>> RMQ.update(2, 0) - >>> RMQ.query(0, 2) - 0 - - Note - ==== - - The array once passed as an input should be modified - only with `RangeQueryDynamic.update` method. - """ - - def __new__(cls, array, func, data_structure='segment_tree', **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - - if len(array) == 0: - raise ValueError("Input %s array is empty."%(array)) - - if data_structure == 'array': - return RangeQueryDynamicArray(array, func, **kwargs) - elif data_structure == 'segment_tree': - return RangeQueryDynamicSegmentTree(array, func, **kwargs) - else: - raise NotImplementedError( - "Currently %s data structure for range " - "query with point updates isn't implemented yet." - % (data_structure)) - - @classmethod - def methods(cls): - return ['query', 'update'] - - def query(start, end): - """ - Method to perform a query in [start, end) range. - - Parameters - ========== - - start: int - The starting index of the range. - end: int - The ending index of the range. - """ - raise NotImplementedError( - "This is an abstract method.") - - def update(self, index, value): - """ - Method to update index with a new value. - - Parameters - ========== - - index: int - The index to be update. - value: int - The new value. - """ - raise NotImplementedError( - "This is an abstract method.") - -class RangeQueryDynamicArray(RangeQueryDynamic): - - __slots__ = ["range_query_static"] - - def __new__(cls, array, func, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.range_query_static = RangeQueryStaticArray(array, func) - return obj - - @classmethod - def methods(cls): - return ['query', 'update'] - - def query(self, start, end): - return self.range_query_static.query(start, end) - - def update(self, index, value): - self.range_query_static.array[index] = value - -class RangeQueryDynamicSegmentTree(RangeQueryDynamic): - - __slots__ = ["segment_tree", "bounds"] - - def __new__(cls, array, func, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.pop('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.segment_tree = ArraySegmentTree(array, func, dimensions=1) - obj.segment_tree.build() - obj.bounds = (0, len(array)) - return obj - - @classmethod - def methods(cls): - return ['query', 'update'] - - def query(self, start, end): - _check_range_query_inputs((start, end + 1), self.bounds) - return self.segment_tree.query(start, end) - - def update(self, index, value): - self.segment_tree.update(index, value) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/binomial_trees.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/binomial_trees.py deleted file mode 100644 index 9ea91d828..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/binomial_trees.py +++ /dev/null @@ -1,91 +0,0 @@ -from pydatastructs.utils.misc_util import ( - BinomialTreeNode, _check_type, Backend, - raise_if_backend_is_not_python) - -__all__ = [ - 'BinomialTree' -] - -class BinomialTree(object): - """ - Represents binomial trees - - Parameters - ========== - - root: BinomialTreeNode - The root of the binomial tree. - By default, None - order: int - The order of the binomial tree. - By default, None - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import BinomialTree, BinomialTreeNode - >>> root = BinomialTreeNode(1, 1) - >>> tree = BinomialTree(root, 0) - >>> tree.is_empty - False - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Binomial_heap - """ - __slots__ = ['root', 'order'] - - def __new__(cls, root=None, order=None, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - if root is not None and \ - not _check_type(root, BinomialTreeNode): - raise TypeError("%s i.e., root should be of " - "type BinomialTreeNode."%(root)) - if order is not None and not _check_type(order, int): - raise TypeError("%s i.e., order should be of " - "type int."%(order)) - obj = object.__new__(cls) - if root is not None: - root.is_root = True - obj.root = root - obj.order = order - return obj - - @classmethod - def methods(cls): - return ['add_sub_tree', '__new__', 'is_empty'] - - def add_sub_tree(self, other_tree): - """ - Adds a sub tree to current tree. - - Parameters - ========== - - other_tree: BinomialTree - - Raises - ====== - - ValueError: If order of the two trees - are different. - """ - if not _check_type(other_tree, BinomialTree): - raise TypeError("%s i.e., other_tree should be of " - "type BinomialTree"%(other_tree)) - if self.order != other_tree.order: - raise ValueError("Orders of both the trees should be same.") - self.root.children.append(other_tree.root) - other_tree.root.parent = self.root - other_tree.root.is_root = False - self.order += 1 - - @property - def is_empty(self): - return self.root is None diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/disjoint_set.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/disjoint_set.py deleted file mode 100644 index 9a5caef5b..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/disjoint_set.py +++ /dev/null @@ -1,143 +0,0 @@ -from pydatastructs.utils import Set -from pydatastructs.utils.misc_util import ( - Backend, raise_if_backend_is_not_python) - -__all__ = ['DisjointSetForest'] - -class DisjointSetForest(object): - """ - Represents a forest of disjoint set trees. - - Parameters - ========== - - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import DisjointSetForest - >>> dst = DisjointSetForest() - >>> dst.make_set(1) - >>> dst.make_set(2) - >>> dst.union(1, 2) - >>> dst.find_root(2).key - 1 - >>> dst.make_root(2) - >>> dst.find_root(2).key - 2 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Disjoint-set_data_structure - """ - - __slots__ = ['tree'] - - def __new__(cls, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.tree = dict() - return obj - - @classmethod - def methods(cls): - return ['make_set', '__new__', 'find_root', 'union'] - - def make_set(self, key, data=None): - """ - Adds a singleton set to the tree - of disjoint sets with given key - and optionally data. - """ - if self.tree.get(key, None) is None: - new_set = Set(key, data) - self.tree[key] = new_set - new_set.parent = new_set - new_set.size = 1 - - def find_root(self, key): - """ - Finds the root of the set - with the given key by path - splitting algorithm. - """ - if self.tree.get(key, None) is None: - raise KeyError("Invalid key, %s"%(key)) - _set = self.tree[key] - while _set.parent is not _set: - _set, _set.parent = _set.parent, _set.parent.parent - return _set - - def union(self, key1, key2): - """ - Takes the union of the two - disjoint set trees with given - keys. The union is done by size. - """ - x_root = self.find_root(key1) - y_root = self.find_root(key2) - - if x_root is not y_root: - if x_root.size < y_root.size: - x_root, y_root = y_root, x_root - - y_root.parent = x_root - x_root.size += y_root.size - - def make_root(self, key): - """ - Finds the set to which the key belongs - and makes it as the root of the set. - """ - if self.tree.get(key, None) is None: - raise KeyError("Invalid key, %s"%(key)) - - key_set = self.tree[key] - if key_set.parent is not key_set: - current_parent = key_set.parent - # Remove this key subtree size from all its ancestors - while current_parent.parent is not current_parent: - current_parent.size -= key_set.size - current_parent = current_parent.parent - - all_set_size = current_parent.size # This is the root node - current_parent.size -= key_set.size - - # Make parent of current root as key - current_parent.parent = key_set - # size of new root will be same as previous root's size - key_set.size = all_set_size - # Make parent of key as itself - key_set.parent = key_set - - def find_size(self, key): - """ - Finds the size of set to which the key belongs. - """ - if self.tree.get(key, None) is None: - raise KeyError("Invalid key, %s"%(key)) - - return self.find_root(key).size - - def disjoint_sets(self): - """ - Returns a list of disjoint sets in the data structure. - """ - result = dict() - for key in self.tree.keys(): - parent = self.find_root(key).key - members = result.get(parent, []) - members.append(key) - result[parent] = members - sorted_groups = [] - for v in result.values(): - sorted_groups.append(v) - sorted_groups[-1].sort() - sorted_groups.sort() - return sorted_groups diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/multiset.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/multiset.py deleted file mode 100644 index 397978224..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/multiset.py +++ /dev/null @@ -1,42 +0,0 @@ -__all__ = [ - 'Multiset' -] - - -class Multiset: - def __init__(self, *args): - # TODO: Implement dict in pydatastructs - self.counter = dict() - from pydatastructs.trees import RedBlackTree - self.tree = RedBlackTree() - self._n = 0 - for arg in args: - self.add(arg) - - def add(self, element): - self.counter[element] = self.counter.get(element, 0) + 1 - self._n += 1 - if self.counter[element] == 1: - self.tree.insert(element) - - def remove(self, element): - if self.counter[element] == 1: - self.tree.delete(element) - if self.counter.get(element, 0) > 0: - self._n -= 1 - self.counter[element] -= 1 - - def lower_bound(self, element): - return self.tree.lower_bound(element) - - def upper_bound(self, element): - return self.tree.upper_bound(element) - - def __contains__(self, element): - return self.counter.get(element, 0) > 0 - - def __len__(self): - return self._n - - def count(self, element): - return self.counter.get(element, 0) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/queue.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/queue.py deleted file mode 100644 index 033ef9af3..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/queue.py +++ /dev/null @@ -1,498 +0,0 @@ -from pydatastructs.linear_data_structures import DynamicOneDimensionalArray, SinglyLinkedList -from pydatastructs.utils.misc_util import ( - NoneType, Backend, raise_if_backend_is_not_python) -from pydatastructs.trees.heaps import BinaryHeap, BinomialHeap -from copy import deepcopy as dc - -__all__ = [ - 'Queue', - 'PriorityQueue' -] - -class Queue(object): - """Representation of queue data structure. - - Parameters - ========== - - implementation : str - Implementation to be used for queue. - By default, 'array' - items : list/tuple - Optional, by default, None - The inital items in the queue. - dtype : A valid python type - Optional, by default NoneType if item - is None. - Required only for 'array' implementation. - double_ended : bool - Optional, by default, False. - Set to True if the queue should support - additional, appendleft and pop operations - from left and right sides respectively. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import Queue - >>> q = Queue() - >>> q.append(1) - >>> q.append(2) - >>> q.append(3) - >>> q.popleft() - 1 - >>> len(q) - 2 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Queue_(abstract_data_type) - """ - - def __new__(cls, implementation='array', **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - if implementation == 'array': - return ArrayQueue( - kwargs.get('items', None), - kwargs.get('dtype', int), - kwargs.get('double_ended', False)) - elif implementation == 'linked_list': - return LinkedListQueue( - kwargs.get('items', None), - kwargs.get('double_ended', False) - ) - else: - raise NotImplementedError( - "%s hasn't been implemented yet."%(implementation)) - - @classmethod - def methods(cls): - return ['__new__'] - - def _double_ended_check(self): - if not self._double_ended: - raise NotImplementedError( - "This method is only supported for " - "double ended queues.") - - def append(self, *args, **kwargs): - raise NotImplementedError( - "This is an abstract method.") - - def appendleft(self, *args, **kwargs): - raise NotImplementedError( - "This is an abstract method.") - - def pop(self, *args, **kwargs): - raise NotImplementedError( - "This is an abstract method.") - - def popleft(self, *args, **kwargs): - raise NotImplementedError( - "This is an abstract method.") - - @property - def is_empty(self): - raise NotImplementedError( - "This is an abstract method.") - - -class ArrayQueue(Queue): - - __slots__ = ['_front', '_rear', '_double_ended'] - - def __new__(cls, items=None, dtype=NoneType, double_ended=False, - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - if items is None: - items = DynamicOneDimensionalArray(dtype, 0) - else: - dtype = type(items[0]) - items = DynamicOneDimensionalArray(dtype, items) - obj = object.__new__(cls) - obj.items, obj._front = items, -1 - if items.size == 0: - obj._front = -1 - obj._rear = -1 - else: - obj._front = 0 - obj._rear = items._num - 1 - obj._double_ended = double_ended - return obj - - @classmethod - def methods(cls): - return ['__new__', 'append', 'appendleft', 'popleft', - 'pop', 'is_empty', '__len__', '__str__', 'front', - 'rear'] - - def append(self, x): - if self.is_empty: - self._front = 0 - self.items._dtype = type(x) - self.items.append(x) - self._rear += 1 - - def appendleft(self, x): - self._double_ended_check() - temp = [] - if self.is_empty: - self._front = 0 - self._rear = -1 - self.items._dtype = type(x) - temp.append(x) - for i in range(self._front, self._rear + 1): - temp.append(self.items._data[i]) - self.items = DynamicOneDimensionalArray(type(temp[0]), temp) - self._rear += 1 - - def popleft(self): - if self.is_empty: - raise IndexError("Queue is empty.") - return_value = dc(self.items[self._front]) - front_temp = self._front - if self._front == self._rear: - self._front = -1 - self._rear = -1 - else: - if (self.items._num - 1)/self.items._size < \ - self.items._load_factor: - self._front = 0 - else: - self._front += 1 - self.items.delete(front_temp) - return return_value - - def pop(self): - self._double_ended_check() - if self.is_empty: - raise IndexError("Queue is empty.") - - return_value = dc(self.items[self._rear]) - rear_temp = self._rear - if self._front == self._rear: - self._front = -1 - self._rear = -1 - else: - if (self.items._num - 1)/self.items._size < \ - self.items._load_factor: - self._front = 0 - else: - self._rear -= 1 - self.items.delete(rear_temp) - return return_value - - @property - def front(self): - return self._front - - @property - def rear(self): - return self._rear - - @property - def is_empty(self): - return self.__len__() == 0 - - def __len__(self): - return self.items._num - - def __str__(self): - _data = [] - for i in range(self._front, self._rear + 1): - _data.append(self.items._data[i]) - return str(_data) - -class LinkedListQueue(Queue): - - __slots__ = ['queue', '_double_ended'] - - def __new__(cls, items=None, double_ended=False, - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.queue = SinglyLinkedList() - if items is None: - pass - elif type(items) in (list, tuple): - for x in items: - obj.append(x) - else: - raise TypeError("Expected type: list/tuple") - obj._double_ended = double_ended - return obj - - @classmethod - def methods(cls): - return ['__new__', 'append', 'appendleft', 'pop', 'popleft', - 'is_empty', '__len__', '__str__', 'front', 'rear'] - - def append(self, x): - self.queue.append(x) - - def appendleft(self, x): - self._double_ended_check() - if self._double_ended: - self.queue.appendleft(x) - - def pop(self): - self._double_ended_check() - if self.is_empty: - raise IndexError("Queue is empty.") - return_value = self.queue.popright() - return return_value - - def popleft(self): - if self.is_empty: - raise IndexError("Queue is empty.") - return_value = self.queue.popleft() - return return_value - - @property - def is_empty(self): - return self.__len__() == 0 - - @property - def front(self): - return self.queue.head - - @property - def rear(self): - return self.queue.tail - - def __len__(self): - return self.queue.size - - def __str__(self): - return str(self.queue) - -class PriorityQueue(object): - """ - Represents the concept of priority queue. - - Parameters - ========== - - implementation: str - The implementation which is to be - used for supporting operations - of priority queue. - The following implementations are supported, - - 'linked_list' -> Linked list implementation. - - 'binary_heap' -> Binary heap implementation. - - 'binomial_heap' -> Binomial heap implementation. - Doesn't support custom comparators, minimum - key data is extracted in every pop. - - Optional, by default, 'binary_heap' implementation - is used. - comp: function - The comparator to be used while comparing priorities. - Must return a bool object. - By default, `lambda u, v: u < v` is used to compare - priorities i.e., minimum priority elements are extracted - by pop operation. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import PriorityQueue - >>> pq = PriorityQueue() - >>> pq.push(1, 2) - >>> pq.push(2, 3) - >>> pq.pop() - 1 - >>> pq2 = PriorityQueue(comp=lambda u, v: u > v) - >>> pq2.push(1, 2) - >>> pq2.push(2, 3) - >>> pq2.pop() - 2 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Priority_queue - """ - - def __new__(cls, implementation='binary_heap', **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - comp = kwargs.get("comp", lambda u, v: u < v) - if implementation == 'linked_list': - return LinkedListPriorityQueue(comp) - elif implementation == 'binary_heap': - return BinaryHeapPriorityQueue(comp) - elif implementation == 'binomial_heap': - return BinomialHeapPriorityQueue() - else: - raise NotImplementedError( - "%s implementation is not currently supported " - "by priority queue.") - - @classmethod - def methods(cls): - return ['__new__'] - - def push(self, value, priority): - """ - Pushes the value to the priority queue - according to the given priority. - - value - Value to be pushed. - priority - Priority to be given to the value. - """ - raise NotImplementedError( - "This is an abstract method.") - - def pop(self): - """ - Pops out the value from the priority queue. - """ - raise NotImplementedError( - "This is an abstract method.") - - @property - def peek(self): - """ - Returns the pointer to the value which will be - popped out by `pop` method. - """ - raise NotImplementedError( - "This is an abstract method.") - - @property - def is_empty(self): - """ - Checks if the priority queue is empty. - """ - raise NotImplementedError( - "This is an abstract method.") - -class LinkedListPriorityQueue(PriorityQueue): - - __slots__ = ['items', 'comp'] - - @classmethod - def methods(cls): - return ['__new__', 'push', 'pop', 'peek', 'is_empty'] - - def __new__(cls, comp, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.items = SinglyLinkedList() - obj.comp = comp - return obj - - def push(self, value, priority): - self.items.append(priority, value) - - def pop(self): - _, max_i = self._find_peek(return_index=True) - pop_val = self.items.extract(max_i) - return pop_val.data - - def _find_peek(self, return_index=False): - if self.is_empty: - raise IndexError("Priority queue is empty.") - - walk = self.items.head - i, max_i, max_p = 0, 0, walk - while walk is not None: - if self.comp(walk.key, max_p.key): - max_i = i - max_p = walk - i += 1 - walk = walk.next - if return_index: - return max_p, max_i - return max_p - - @property - def peek(self): - return self._find_peek() - - @property - def is_empty(self): - return self.items.size == 0 - -class BinaryHeapPriorityQueue(PriorityQueue): - - __slots__ = ['items'] - - @classmethod - def methods(cls): - return ['__new__', 'push', 'pop', 'peek', 'is_empty'] - - def __new__(cls, comp, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.items = BinaryHeap() - obj.items._comp = comp - return obj - - def push(self, value, priority): - self.items.insert(priority, value) - - def pop(self): - node = self.items.extract() - return node.data - - @property - def peek(self): - if self.items.is_empty: - raise IndexError("Priority queue is empty.") - return self.items.heap[0] - - @property - def is_empty(self): - return self.items.is_empty - -class BinomialHeapPriorityQueue(PriorityQueue): - - __slots__ = ['items'] - - @classmethod - def methods(cls): - return ['__new__', 'push', 'pop', 'peek', 'is_empty'] - - def __new__(cls, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.items = BinomialHeap() - return obj - - def push(self, value, priority): - self.items.insert(priority, value) - - def pop(self): - node = self.items.find_minimum() - self.items.delete_minimum() - return node.data - - @property - def peek(self): - return self.items.find_minimum() - - @property - def is_empty(self): - return self.items.is_empty diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/segment_tree.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/segment_tree.py deleted file mode 100644 index 0895ba6da..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/segment_tree.py +++ /dev/null @@ -1,225 +0,0 @@ -from .stack import Stack -from pydatastructs.utils.misc_util import (TreeNode, - Backend, raise_if_backend_is_not_python) - -__all__ = ['ArraySegmentTree'] - -class ArraySegmentTree(object): - """ - Represents the segment tree data structure, - defined on arrays. - - Parameters - ========== - - array: Array - The array to be used for filling the segment tree. - func: callable - The function to be used for filling the segment tree. - It should accept only one tuple as an argument. The - size of the tuple will be either 1 or 2 and any one - of the elements can be `None`. You can treat `None` in - whatever way you want. For example, in case of minimum - values, `None` can be treated as infinity. We provide - the following which can be used as an argument value for this - parameter, - - `minimum` - For range minimum queries. - - `greatest_common_divisor` - For queries finding greatest - common divisor of a range. - - `summation` - For range sum queries. - dimensions: int - The number of dimensions of the array to be used - for the segment tree. - Optional, by default 1. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import ArraySegmentTree, minimum - >>> from pydatastructs import OneDimensionalArray - >>> arr = OneDimensionalArray(int, [1, 2, 3, 4, 5]) - >>> s_t = ArraySegmentTree(arr, minimum) - >>> s_t.build() - >>> s_t.query(0, 1) - 1 - >>> s_t.query(1, 3) - 2 - >>> s_t.update(2, -1) - >>> s_t.query(1, 3) - -1 - >>> arr = OneDimensionalArray(int, [1, 2]) - >>> s_t = ArraySegmentTree(arr, minimum) - >>> s_t.build() - >>> str(s_t) - "['((0, 1), 1)', '((0, 0), 1)', '', '', '((1, 1), 2)', '', '']" - - References - ========== - - .. [1] https://cp-algorithms.com/data_structures/segment_tree.html - """ - def __new__(cls, array, func, **kwargs): - - dimensions = kwargs.pop("dimensions", 1) - if dimensions == 1: - return OneDimensionalArraySegmentTree(array, func, **kwargs) - else: - raise NotImplementedError("ArraySegmentTree do not support " - "{}-dimensional arrays as of now.".format(dimensions)) - - def build(self): - """ - Generates segment tree nodes when called. - Nothing happens if nodes are already generated. - """ - raise NotImplementedError( - "This is an abstract method.") - - def update(self, index, value): - """ - Updates the value at given index. - """ - raise NotImplementedError( - "This is an abstract method.") - - def query(self, start, end): - """ - Queries [start, end] range according - to the function provided while constructing - `ArraySegmentTree` object. - """ - raise NotImplementedError( - "This is an abstract method.") - - def __str__(self): - recursion_stack = Stack(implementation='linked_list') - recursion_stack.push(self._root) - to_be_printed = [] - while not recursion_stack.is_empty: - node = recursion_stack.pop().key - if node is not None: - to_be_printed.append(str((node.key, node.data))) - else: - to_be_printed.append('') - if node is not None: - recursion_stack.push(node.right) - recursion_stack.push(node.left) - return str(to_be_printed) - - -class OneDimensionalArraySegmentTree(ArraySegmentTree): - - __slots__ = ["_func", "_array", "_root", "_backend"] - - def __new__(cls, array, func, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - raise_if_backend_is_not_python(cls, backend) - - obj = object.__new__(cls) - obj._func = func - obj._array = array - obj._root = None - obj._backend = backend - return obj - - @classmethod - def methods(self): - return ['__new__', 'build', 'update', - 'query'] - - @property - def is_ready(self): - return self._root is not None - - def build(self): - if self.is_ready: - return - - recursion_stack = Stack(implementation='linked_list') - node = TreeNode((0, len(self._array) - 1), None, backend=self._backend) - node.is_root = True - self._root = node - recursion_stack.push(node) - - while not recursion_stack.is_empty: - node = recursion_stack.peek.key - start, end = node.key - if start == end: - node.data = self._array[start] - recursion_stack.pop() - continue - - if (node.left is not None and - node.right is not None): - recursion_stack.pop() - node.data = self._func((node.left.data, node.right.data)) - else: - mid = (start + end) // 2 - if node.left is None: - left_node = TreeNode((start, mid), None) - node.left = left_node - recursion_stack.push(left_node) - if node.right is None: - right_node = TreeNode((mid + 1, end), None) - node.right = right_node - recursion_stack.push(right_node) - - def update(self, index, value): - if not self.is_ready: - raise ValueError("{} tree is not built yet. ".format(self) + - "Call .build method to prepare the segment tree.") - - recursion_stack = Stack(implementation='linked_list') - recursion_stack.push((self._root, None)) - - while not recursion_stack.is_empty: - node, child = recursion_stack.peek.key - start, end = node.key - if start == end: - self._array[index] = value - node.data = value - recursion_stack.pop() - if not recursion_stack.is_empty: - parent_node = recursion_stack.pop() - recursion_stack.push((parent_node.key[0], node)) - continue - - if child is not None: - node.data = self._func((node.left.data, node.right.data)) - recursion_stack.pop() - if not recursion_stack.is_empty: - parent_node = recursion_stack.pop() - recursion_stack.push((parent_node.key[0], node)) - else: - mid = (start + end) // 2 - if start <= index and index <= mid: - recursion_stack.push((node.left, None)) - else: - recursion_stack.push((node.right, None)) - - def _query(self, node, start, end, l, r): - if r < start or end < l: - return None - - if l <= start and end <= r: - return node.data - - mid = (start + end) // 2 - left_result = self._query(node.left, start, mid, l, r) - right_result = self._query(node.right, mid + 1, end, l, r) - return self._func((left_result, right_result)) - - def query(self, start, end): - if not self.is_ready: - raise ValueError("{} tree is not built yet. ".format(self) + - "Call .build method to prepare the segment tree.") - - return self._query(self._root, 0, len(self._array) - 1, - start, end) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/sparse_table.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/sparse_table.py deleted file mode 100644 index 55ec4e9b3..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/sparse_table.py +++ /dev/null @@ -1,108 +0,0 @@ -from pydatastructs.linear_data_structures.arrays import OneDimensionalArray -from pydatastructs.utils.misc_util import ( - Backend, raise_if_backend_is_not_python) -import math - -__all__ = ['SparseTable'] - - -class SparseTable(object): - """ - Represents the sparse table data structure. - - Parameters - ========== - - array: OneDimensionalArray - The array to be used for filling the sparse table. - func: callable - The function to be used for filling the sparse table. - It should accept only one tuple as an argument. The - size of the tuple will be either 1 or 2 and any one - of the elements can be `None`. You can treat `None` in - whatever way you want. For example, in case of minimum - values, `None` can be treated as infinity. We provide - the following which can be used as an argument value for this - parameter, - - `minimum` - For range minimum queries. - - `greatest_common_divisor` - For queries finding greatest - common divisor of a range. - - `summation` - For range sum queries. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import SparseTable, minimum - >>> from pydatastructs import OneDimensionalArray - >>> arr = OneDimensionalArray(int, [1, 2, 3, 4, 5]) - >>> s_t = SparseTable(arr, minimum) - >>> str(s_t) - "['[1, 1, 1]', '[2, 2, 2]', '[3, 3, None]', '[4, 4, None]', '[5, None, None]']" - - References - ========== - - .. [1] https://cp-algorithms.com/data_structures/sparse-table.html - """ - - __slots__ = ['_table', 'func'] - - def __new__(cls, array, func, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - - # TODO: If possible remove the following check. - if len(array) == 0: - raise ValueError("Input %s array is empty."%(array)) - - obj = object.__new__(cls) - size = len(array) - log_size = int(math.log2(size)) + 1 - obj._table = [OneDimensionalArray(int, log_size) for _ in range(size)] - obj.func = func - - for i in range(size): - obj._table[i][0] = func((array[i],)) - - for j in range(1, log_size + 1): - for i in range(size - (1 << j) + 1): - obj._table[i][j] = func((obj._table[i][j - 1], - obj._table[i + (1 << (j - 1))][j - 1])) - - return obj - - @classmethod - def methods(cls): - return ['query', '__str__'] - - def query(self, start, end): - """ - Method to perform a query on sparse table in [start, end) - range. - - Parameters - ========== - - start: int - The starting index of the range. - end: int - The ending index of the range. - """ - j = int(math.log2(end - start + 1)) + 1 - answer = None - while j >= 0: - if start + (1 << j) - 1 <= end: - answer = self.func((answer, self._table[start][j])) - start += 1 << j - j -= 1 - return answer - - def __str__(self): - return str([str(array) for array in self._table]) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/stack.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/stack.py deleted file mode 100644 index 38f72b43f..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/stack.py +++ /dev/null @@ -1,200 +0,0 @@ -from pydatastructs.linear_data_structures import DynamicOneDimensionalArray, SinglyLinkedList -from pydatastructs.miscellaneous_data_structures._backend.cpp import _stack -from pydatastructs.utils.misc_util import ( - _check_type, NoneType, Backend, - raise_if_backend_is_not_python) -from copy import deepcopy as dc - -__all__ = [ - 'Stack' -] - -class Stack(object): - """Representation of stack data structure - - Parameters - ========== - - implementation : str - Implementation to be used for stack. - By default, 'array' - Currently only supports 'array' - implementation. - items : list/tuple - Optional, by default, None - The inital items in the stack. - For array implementation. - dtype : A valid python type - Optional, by default NoneType if item - is None, otherwise takes the data - type of DynamicOneDimensionalArray - For array implementation. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import Stack - >>> s = Stack() - >>> s.push(1) - >>> s.push(2) - >>> s.push(3) - >>> str(s) - '[1, 2, 3]' - >>> s.pop() - 3 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Stack_(abstract_data_type) - """ - - def __new__(cls, implementation='array', **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if implementation == 'array': - items = kwargs.get('items', None) - dtype = kwargs.get('dtype', int) - if backend == Backend.CPP: - return _stack.ArrayStack(items, dtype) - - return ArrayStack(items, dtype) - if implementation == 'linked_list': - raise_if_backend_is_not_python(cls, backend) - - return LinkedListStack( - kwargs.get('items', None) - ) - raise NotImplementedError( - "%s hasn't been implemented yet."%(implementation)) - - @classmethod - def methods(cls): - return ['__new__'] - - def push(self, *args, **kwargs): - raise NotImplementedError( - "This is an abstract method.") - - def pop(self, *args, **kwargs): - raise NotImplementedError( - "This is an abstract method.") - - @property - def is_empty(self): - raise NotImplementedError( - "This is an abstract method.") - - @property - def peek(self): - raise NotImplementedError( - "This is an abstract method.") - -class ArrayStack(Stack): - - __slots__ = ['items'] - - def __new__(cls, items=None, dtype=NoneType, - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - if items is None: - items = DynamicOneDimensionalArray(dtype, 0) - else: - items = DynamicOneDimensionalArray(dtype, items) - obj = object.__new__(cls) - obj.items = items - return obj - - @classmethod - def methods(cls): - return ['__new__', 'push', 'pop', 'is_emtpy', - 'peek', '__len__', '__str__'] - - def push(self, x): - if self.is_empty: - self.items._dtype = type(x) - self.items.append(x) - - def pop(self): - if self.is_empty: - raise IndexError("Stack is empty") - - top_element = dc(self.items[self.items._last_pos_filled]) - self.items.delete(self.items._last_pos_filled) - return top_element - - @property - def is_empty(self): - return self.items._last_pos_filled == -1 - - @property - def peek(self): - return self.items[self.items._last_pos_filled] - - def __len__(self): - return self.items._num - - def __str__(self): - """ - Used for printing. - """ - return str(self.items._data) - - -class LinkedListStack(Stack): - - __slots__ = ['stack'] - - def __new__(cls, items=None, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.stack = SinglyLinkedList() - if items is None: - pass - elif type(items) in (list, tuple): - for x in items: - obj.push(x) - else: - raise TypeError("Expected type: list/tuple") - return obj - - @classmethod - def methods(cls): - return ['__new__', 'push', 'pop', 'is_emtpy', - 'peek', '__len__', '__str__'] - - def push(self, x): - self.stack.appendleft(x) - - def pop(self): - if self.is_empty: - raise IndexError("Stack is empty") - return self.stack.popleft() - - @property - def is_empty(self): - return self.__len__() == 0 - - @property - def peek(self): - return self.stack.head - - @property - def size(self): - return self.stack.size - - def __len__(self): - return self.stack.size - - def __str__(self): - elements = [] - current_node = self.peek - while current_node is not None: - elements.append(str(current_node)) - current_node = current_node.next - return str(elements[::-1]) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/__init__.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_binomial_trees.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_binomial_trees.py deleted file mode 100644 index 1275e9aec..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_binomial_trees.py +++ /dev/null @@ -1,17 +0,0 @@ -from pydatastructs.miscellaneous_data_structures.binomial_trees import BinomialTree -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils.misc_util import BinomialTreeNode - -# only tests the corner cases -def test_BinomialTree(): - assert raises(TypeError, lambda: BinomialTree(1, 1)) - assert raises(TypeError, lambda: BinomialTree(None, 1.5)) - - bt = BinomialTree() - assert raises(TypeError, lambda: bt.add_sub_tree(None)) - bt1 = BinomialTree(BinomialTreeNode(1, 1), 0) - node = BinomialTreeNode(2, 2) - node.add_children(BinomialTreeNode(3, 3)) - bt2 = BinomialTree(node, 1) - assert raises(ValueError, lambda: bt1.add_sub_tree(bt2)) - assert bt1.is_empty is False diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_disjoint_set.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_disjoint_set.py deleted file mode 100644 index fcabd3112..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_disjoint_set.py +++ /dev/null @@ -1,70 +0,0 @@ -from pydatastructs import DisjointSetForest -from pydatastructs.utils.raises_util import raises - -def test_DisjointSetForest(): - - dst = DisjointSetForest() - for i in range(8): - dst.make_set(i+1) - - dst.union(1, 2) - dst.union(1, 5) - assert dst.find_size(2) == 3 - dst.union(1, 6) - dst.union(1, 8) - dst.union(3, 4) - assert dst.find_size(3) == 2 - - assert (dst.find_root(1) == dst.find_root(2) == - dst.find_root(5) == dst.find_root(6) == dst.find_root(8)) - assert dst.disjoint_sets() == [[1, 2, 5, 6, 8], [3, 4], [7]] - assert dst.find_root(3) == dst.find_root(4) - assert dst.find_root(7).key == 7 - - assert raises(KeyError, lambda: dst.find_root(9)) - assert raises(KeyError, lambda: dst.find_size(9)) - dst.union(3, 1) - assert dst.find_root(3).key == 1 - assert dst.find_root(5).key == 1 - dst.make_root(6) - assert dst.disjoint_sets() == [[1, 2, 3, 4, 5, 6, 8], [7]] - assert dst.find_root(3).key == 6 - assert dst.find_root(5).key == 6 - dst.make_root(5) - assert dst.find_root(1).key == 5 - assert dst.find_root(5).key == 5 - assert raises(KeyError, lambda: dst.make_root(9)) - - dst = DisjointSetForest() - for i in range(6): - dst.make_set(i) - assert dst.tree[2].size == 1 - dst.union(2, 3) - assert dst.tree[2].size == 2 - assert dst.tree[3].size == 1 - dst.union(1, 4) - dst.union(2, 4) - assert dst.disjoint_sets() == [[0], [1, 2, 3, 4], [5]] - # current tree - ############### - # 2 - # / \ - # 1 3 - # / - # 4 - ############### - assert dst.tree[2].size == 4 - assert dst.tree[1].size == 2 - assert dst.tree[3].size == dst.tree[4].size == 1 - dst.make_root(4) - # New tree - ############### - # 4 - # | - # 2 - # / \ - # 1 3 - ############### - assert dst.tree[4].size == 4 - assert dst.tree[2].size == 3 - assert dst.tree[1].size == dst.tree[3].size == 1 diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_multiset.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_multiset.py deleted file mode 100644 index fb412704a..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_multiset.py +++ /dev/null @@ -1,39 +0,0 @@ -from pydatastructs.miscellaneous_data_structures import Multiset - -def test_Multiset(): - - ms = Multiset() - ms.add(5) - ms.add(5) - ms.add(3) - ms.add(7) - assert len(ms) == 4 - assert 5 in ms - assert ms.count(5) == 2 - assert ms.count(3) == 1 - assert ms.count(-3) == 0 - assert not 4 in ms - ms.remove(5) - assert 5 in ms - assert ms.lower_bound(5) == 5 - assert ms.upper_bound(5) == 7 - - ms = Multiset(5, 3, 7, 2) - - assert len(ms) == 4 - assert 5 in ms - assert ms.count(7) == 1 - assert not 4 in ms - assert ms.lower_bound(3) == 3 - assert ms.upper_bound(3) == 5 - assert ms.upper_bound(7) is None - - ms.remove(5) - - assert len(ms) == 3 - assert not 5 in ms - - ms.add(4) - - assert 4 in ms - assert len(ms) == 4 diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_queue.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_queue.py deleted file mode 100644 index 81e1e996e..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_queue.py +++ /dev/null @@ -1,116 +0,0 @@ -from pydatastructs.miscellaneous_data_structures import Queue -from pydatastructs.miscellaneous_data_structures.queue import ( - ArrayQueue, LinkedListQueue, PriorityQueue, - LinkedListPriorityQueue) -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils.misc_util import _check_type - -def test_Queue(): - q = Queue(implementation='array') - q1 = Queue() - assert _check_type(q, ArrayQueue) is True - assert _check_type(q1, ArrayQueue) is True - q2 = Queue(implementation='linked_list') - assert _check_type(q2, LinkedListQueue) is True - assert raises(NotImplementedError, lambda: Queue(implementation='')) - -def test_ArrayQueue(): - q1 = Queue() - raises(IndexError, lambda: q1.popleft()) - q1 = Queue(implementation='array', items=[0]) - q1.append(1) - q1.append(2) - q1.append(3) - assert str(q1) == '[0, 1, 2, 3]' - assert len(q1) == 4 - assert q1.popleft() == 0 - assert q1.popleft() == 1 - assert len(q1) == 2 - assert q1.popleft() == 2 - assert q1.popleft() == 3 - assert len(q1) == 0 - - q2 = Queue(implementation='array', items=[0], double_ended=True) - q2.append(1) - q2.append(2) - q2.appendleft(3) - assert str(q2) == '[3, 0, 1, 2]' - assert len(q2) == 4 - assert q2.popleft() == 3 - assert q2.pop() == 2 - assert len(q2) == 2 - assert q2.popleft() == 0 - assert q2.pop() == 1 - assert len(q2) == 0 - - q1 = Queue(implementation='array', items=[0]) - assert raises(NotImplementedError, lambda: q1.appendleft(2)) - - -def test_LinkedListQueue(): - q1 = Queue(implementation='linked_list') - q1.append(1) - assert raises(TypeError, lambda: Queue(implementation='linked_list', items={0, 1})) - q1 = Queue(implementation='linked_list', items = [0, 1]) - q1.append(2) - q1.append(3) - assert str(q1) == ("['(0, None)', '(1, None)', " - "'(2, None)', '(3, None)']") - assert len(q1) == 4 - assert q1.popleft().key == 0 - assert q1.popleft().key == 1 - assert len(q1) == 2 - assert q1.popleft().key == 2 - assert q1.popleft().key == 3 - assert len(q1) == 0 - raises(IndexError, lambda: q1.popleft()) - - q1 = Queue(implementation='linked_list',items=['a',None,type,{}]) - assert len(q1) == 4 - - front = q1.front - assert front.key == q1.popleft().key - - rear = q1.rear - for _ in range(len(q1)-1): - q1.popleft() - - assert rear.key == q1.popleft().key - - q1 = Queue(implementation='linked_list', double_ended=True) - q1.appendleft(1) - q2 = Queue(implementation='linked_list', items=[0, 1]) - assert raises(NotImplementedError, lambda: q2.appendleft(1)) - q1 = Queue(implementation='linked_list', items = [0, 1], double_ended=True) - q1.appendleft(2) - q1.append(3) - assert str(q1) == "['(2, None)', '(0, None)', '(1, None)', '(3, None)']" - assert len(q1) == 4 - assert q1.popleft().key == 2 - assert q1.pop().key == 3 - assert len(q1) == 2 - assert q1.pop().key == 1 - assert q1.popleft().key == 0 - assert len(q1) == 0 - assert raises(IndexError, lambda: q1.popleft()) - -def test_PriorityQueue(): - pq1 = PriorityQueue(implementation='linked_list') - assert _check_type(pq1, LinkedListPriorityQueue) is True - assert raises(NotImplementedError, lambda: Queue(implementation='')) - -def test_ImplementationPriorityQueue(): - impls = ['linked_list', 'binomial_heap', 'binary_heap'] - for impl in impls: - pq1 = PriorityQueue(implementation=impl) - pq1.push(1, 4) - pq1.push(2, 3) - pq1.push(3, 2) - assert pq1.peek.data == 3 - assert pq1.pop() == 3 - assert pq1.peek.data == 2 - assert pq1.pop() == 2 - assert pq1.peek.data == 1 - assert pq1.pop() == 1 - assert pq1.is_empty is True - assert raises(IndexError, lambda: pq1.peek) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_dynamic.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_dynamic.py deleted file mode 100644 index f655c546d..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_dynamic.py +++ /dev/null @@ -1,71 +0,0 @@ -from pydatastructs import ( - RangeQueryDynamic, minimum, - greatest_common_divisor, summation, - OneDimensionalArray) -from pydatastructs.utils.raises_util import raises -import random, math -from copy import deepcopy - -def _test_RangeQueryDynamic_common(func, gen_expected): - - array = OneDimensionalArray(int, []) - raises(ValueError, lambda: RangeQueryDynamic(array, func)) - - array = OneDimensionalArray(int, [1]) - rq = RangeQueryDynamic(array, func) - assert rq.query(0, 0) == 1 - raises(ValueError, lambda: rq.query(0, -1)) - raises(IndexError, lambda: rq.query(0, 1)) - - array_sizes = [3, 6, 12, 24, 48, 96] - random.seed(0) - for array_size in array_sizes: - inputs = [] - for i in range(array_size): - for j in range(i + 1, array_size): - inputs.append((i, j)) - - data_structures = ["array", "segment_tree"] - for ds in data_structures: - data = random.sample(range(-2*array_size, 2*array_size), array_size) - array = OneDimensionalArray(int, data) - rmq = RangeQueryDynamic(array, func, data_structure=ds) - for input in inputs: - assert rmq.query(input[0], input[1]) == gen_expected(data, input[0], input[1]) - - data_copy = deepcopy(data) - for _ in range(array_size//2): - index = random.randint(0, array_size - 1) - value = random.randint(0, 4 * array_size) - data_copy[index] = value - rmq.update(index, value) - - for input in inputs: - assert rmq.query(input[0], input[1]) == gen_expected(data_copy, input[0], input[1]) - -def test_RangeQueryDynamic_minimum(): - - def _gen_minimum_expected(data, i, j): - return min(data[i:j + 1]) - - _test_RangeQueryDynamic_common(minimum, _gen_minimum_expected) - -def test_RangeQueryDynamic_greatest_common_divisor(): - - def _gen_gcd_expected(data, i, j): - if j == i: - return data[i] - else: - expected_gcd = math.gcd(data[i], data[i + 1]) - for idx in range(i + 2, j + 1): - expected_gcd = math.gcd(expected_gcd, data[idx]) - return expected_gcd - - _test_RangeQueryDynamic_common(greatest_common_divisor, _gen_gcd_expected) - -def test_RangeQueryDynamic_summation(): - - def _gen_summation_expected(data, i, j): - return sum(data[i:j + 1]) - - return _test_RangeQueryDynamic_common(summation, _gen_summation_expected) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_static.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_static.py deleted file mode 100644 index e898653c9..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_static.py +++ /dev/null @@ -1,63 +0,0 @@ -from pydatastructs import ( - RangeQueryStatic, minimum, - greatest_common_divisor, summation, - OneDimensionalArray) -from pydatastructs.utils.raises_util import raises -import random, math - -def _test_RangeQueryStatic_common(func, gen_expected): - - array = OneDimensionalArray(int, []) - raises(ValueError, lambda: RangeQueryStatic(array, func)) - - array = OneDimensionalArray(int, [1]) - rq = RangeQueryStatic(array, func) - assert rq.query(0, 0) == 1 - raises(ValueError, lambda: rq.query(0, -1)) - raises(IndexError, lambda: rq.query(0, 1)) - - array_sizes = [3, 6, 12, 24, 48, 96] - random.seed(0) - for array_size in array_sizes: - data = random.sample(range(-2*array_size, 2*array_size), array_size) - array = OneDimensionalArray(int, data) - - expected = [] - inputs = [] - for i in range(array_size): - for j in range(i + 1, array_size): - inputs.append((i, j)) - expected.append(gen_expected(data, i, j)) - - data_structures = ["array", "sparse_table"] - for ds in data_structures: - rmq = RangeQueryStatic(array, func, data_structure=ds) - for input, correct in zip(inputs, expected): - assert rmq.query(input[0], input[1]) == correct - -def test_RangeQueryStatic_minimum(): - - def _gen_minimum_expected(data, i, j): - return min(data[i:j + 1]) - - _test_RangeQueryStatic_common(minimum, _gen_minimum_expected) - -def test_RangeQueryStatic_greatest_common_divisor(): - - def _gen_gcd_expected(data, i, j): - if j == i: - return data[i] - else: - expected_gcd = math.gcd(data[i], data[i + 1]) - for idx in range(i + 2, j + 1): - expected_gcd = math.gcd(expected_gcd, data[idx]) - return expected_gcd - - _test_RangeQueryStatic_common(greatest_common_divisor, _gen_gcd_expected) - -def test_RangeQueryStatic_summation(): - - def _gen_summation_expected(data, i, j): - return sum(data[i:j + 1]) - - return _test_RangeQueryStatic_common(summation, _gen_summation_expected) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_stack.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_stack.py deleted file mode 100644 index 2d9d08b82..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_stack.py +++ /dev/null @@ -1,77 +0,0 @@ -from pydatastructs.miscellaneous_data_structures import Stack -from pydatastructs.miscellaneous_data_structures.stack import ArrayStack, LinkedListStack -from pydatastructs.miscellaneous_data_structures._backend.cpp import _stack -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils.misc_util import _check_type, Backend - - -def test_Stack(): - s = Stack(implementation='array') - s1 = Stack() - assert _check_type(s, ArrayStack) is True - assert _check_type(s1, ArrayStack) is True - s2 = Stack(implementation='linked_list') - assert _check_type(s2, LinkedListStack) is True - assert raises(NotImplementedError, lambda: Stack(implementation='')) - - s3 = Stack(backend=Backend.CPP) - assert _check_type(s3, _stack.ArrayStack) is True - s4 = Stack(implementation="array", backend=Backend.CPP) - assert _check_type(s4, _stack.ArrayStack) is True - -def test_ArrayStack(): - s = Stack(implementation='array') - s.push(1) - s.push(2) - s.push(3) - assert s.peek == 3 - assert str(s) == '[1, 2, 3]' - assert s.pop() == 3 - assert s.pop() == 2 - assert s.pop() == 1 - assert s.is_empty is True - assert raises(IndexError, lambda : s.pop()) - _s = Stack(items=[1, 2, 3]) - assert str(_s) == '[1, 2, 3]' - assert len(_s) == 3 - - # Cpp test - s1 = Stack(implementation="array", backend=Backend.CPP) - s1.push(1) - s1.push(2) - s1.push(3) - assert s1.peek == 3 - assert str(s1) == "['1', '2', '3']" - assert s1.pop() == 3 - assert s1.pop() == 2 - assert s1.pop() == 1 - assert s1.is_empty is True - assert raises(IndexError, lambda : s1.pop()) - _s1 = Stack(items=[1, 2, 3], backend=Backend.CPP) - assert str(_s1) == "['1', '2', '3']" - assert len(_s1) == 3 - -def test_LinkedListStack(): - s = Stack(implementation='linked_list') - s.push(1) - s.push(2) - s.push(3) - assert s.peek.key == 3 - assert str(s) == ("['(1, None)', '(2, None)', '(3, None)']") - assert s.pop().key == 3 - assert s.pop().key == 2 - assert s.pop().key == 1 - assert s.is_empty is True - assert raises(IndexError, lambda : s.pop()) - assert str(s) == '[]' - _s = Stack(implementation='linked_list',items=[1, 2, 3]) - assert str(_s) == "['(1, None)', '(2, None)', '(3, None)']" - assert len(_s) == 3 - - s = Stack(implementation='linked_list',items=['a',None,type,{}]) - assert len(s) == 4 - assert s.size == 4 - - peek = s.peek - assert peek.key == s.pop().key - assert raises(TypeError, lambda: Stack(implementation='linked_list', items={0, 1})) diff --git a/lib/python3.12/site-packages/pydatastructs/strings/__init__.py b/lib/python3.12/site-packages/pydatastructs/strings/__init__.py deleted file mode 100644 index 33930b426..000000000 --- a/lib/python3.12/site-packages/pydatastructs/strings/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -__all__ = [] - -from . import ( - trie, - algorithms -) - -from .trie import ( - Trie -) - -__all__.extend(trie.__all__) - -from .algorithms import ( - find -) - -__all__.extend(algorithms.__all__) diff --git a/lib/python3.12/site-packages/pydatastructs/strings/algorithms.py b/lib/python3.12/site-packages/pydatastructs/strings/algorithms.py deleted file mode 100644 index 1e26b9411..000000000 --- a/lib/python3.12/site-packages/pydatastructs/strings/algorithms.py +++ /dev/null @@ -1,247 +0,0 @@ -from pydatastructs.linear_data_structures.arrays import ( - DynamicOneDimensionalArray, OneDimensionalArray) -from pydatastructs.utils.misc_util import ( - Backend, raise_if_backend_is_not_python) - -__all__ = [ - 'find' -] - -PRIME_NUMBER, MOD = 257, 1000000007 - -def find(text, query, algorithm, **kwargs): - """ - Finds occurrence of a query string within the text string. - - Parameters - ========== - - text: str - The string on which query is to be performed. - query: str - The string which is to be searched in the text. - algorithm: str - The algorithm which should be used for - searching. - Currently the following algorithms are - supported, - - 'kmp' -> Knuth-Morris-Pratt as given in [1]. - - 'rabin_karp' -> Rabin–Karp algorithm as given in [2]. - - 'boyer_moore' -> Boyer-Moore algorithm as given in [3]. - - 'z_function' -> Z-function algorithm as given in [4]. - - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - DynamicOneDimensionalArray - An array of starting positions of the portions - in the text which match with the given query. - - Examples - ======== - - >>> from pydatastructs.strings.algorithms import find - >>> text = "abcdefabcabe" - >>> pos = find(text, "ab", algorithm="kmp") - >>> str(pos) - "['0', '6', '9']" - >>> pos = find(text, "abc", algorithm="kmp") - >>> str(pos) - "['0', '6']" - >>> pos = find(text, "abe", algorithm="kmp") - >>> str(pos) - "['9']" - >>> pos = find(text, "abed", algorithm="kmp") - >>> str(pos) - '[]' - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Knuth%E2%80%93Morris%E2%80%93Pratt_algorithm - .. [2] https://en.wikipedia.org/wiki/Rabin%E2%80%93Karp_algorithm - .. [3] https://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_string-search_algorithm - .. [4] https://usaco.guide/CPH.pdf#page=257 - """ - raise_if_backend_is_not_python( - find, kwargs.get('backend', Backend.PYTHON)) - import pydatastructs.strings.algorithms as algorithms - func = "_" + algorithm - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently %s algoithm for searching strings " - "inside a text isn't implemented yet." - %(algorithm)) - return getattr(algorithms, func)(text, query) - - -def _knuth_morris_pratt(text, query): - if len(text) == 0 or len(query) == 0: - return DynamicOneDimensionalArray(int, 0) - kmp_table = _build_kmp_table(query) - return _do_match(text, query, kmp_table) - -_kmp = _knuth_morris_pratt - -def _build_kmp_table(query): - pos, cnd = 1, 0 - kmp_table = OneDimensionalArray(int, len(query) + 1) - - kmp_table[0] = -1 - - while pos < len(query): - if query[pos] == query[cnd]: - kmp_table[pos] = kmp_table[cnd] - else: - kmp_table[pos] = cnd - while cnd >= 0 and query[pos] != query[cnd]: - cnd = kmp_table[cnd] - pos, cnd = pos + 1, cnd + 1 - kmp_table[pos] = cnd - - return kmp_table - - - -def _do_match(string, query, kmp_table): - j, k = 0, 0 - positions = DynamicOneDimensionalArray(int, 0) - - while j < len(string): - if query[k] == string[j]: - j = j + 1 - k = k + 1 - if k == len(query): - positions.append(j - k) - k = kmp_table[k] - else: - k = kmp_table[k] - if k < 0: - j = j + 1 - k = k + 1 - - return positions - -def _p_pow(length, p=PRIME_NUMBER, m=MOD): - p_pow = OneDimensionalArray(int, length) - p_pow[0] = 1 - for i in range(1, length): - p_pow[i] = (p_pow[i-1] * p) % m - return p_pow - -def _hash_str(string, p=PRIME_NUMBER, m=MOD): - hash_value = 0 - p_pow = _p_pow(len(string), p, m) - for i in range(len(string)): - hash_value = (hash_value + ord(string[i]) * p_pow[i]) % m - return hash_value - -def _rabin_karp(text, query): - t = len(text) - q = len(query) - positions = DynamicOneDimensionalArray(int, 0) - if q == 0 or t == 0: - return positions - - query_hash = _hash_str(query) - text_hash = OneDimensionalArray(int, t + 1) - text_hash.fill(0) - p_pow = _p_pow(t) - - for i in range(t): - text_hash[i+1] = (text_hash[i] + ord(text[i]) * p_pow[i]) % MOD - for i in range(t - q + 1): - curr_hash = (text_hash[i + q] + MOD - text_hash[i]) % MOD - if curr_hash == (query_hash * p_pow[i]) % MOD: - positions.append(i) - - return positions - -def _boyer_moore(text, query): - positions = DynamicOneDimensionalArray(int, 0) - text_length, query_length = len(text), len(query) - - if text_length == 0 or query_length == 0: - return positions - - # Preprocessing Step - bad_match_table = dict() - for i in range(query_length): - bad_match_table[query[i]] = i - - shift = 0 - # Matching procedure - while shift <= text_length-query_length: - j = query_length - 1 - while j >= 0 and query[j] == text[shift + j]: - j -= 1 - if j < 0: - positions.append(shift) - if shift + query_length < text_length: - if text[shift + query_length] in bad_match_table: - shift += query_length - bad_match_table[text[shift + query_length]] - else: - shift += query_length + 1 - else: - shift += 1 - else: - letter_pos = text[shift + j] - if letter_pos in bad_match_table: - shift += max(1, j - bad_match_table[letter_pos]) - else: - shift += max(1, j + 1) - return positions - -def _z_vector(text, query): - string = text - if query != "": - string = query + str("$") + text - - z_fct = OneDimensionalArray(int, len(string)) - z_fct.fill(0) - - curr_pos = 1 - seg_left = 0 - seg_right = 0 - - for curr_pos in range(1,len(string)): - if curr_pos <= seg_right: - z_fct[curr_pos] = min(seg_right - curr_pos + 1, z_fct[curr_pos - seg_left]) - - while curr_pos + z_fct[curr_pos] < len(string) and \ - string[z_fct[curr_pos]] == string[curr_pos + z_fct[curr_pos]]: - z_fct[curr_pos] += 1 - - if curr_pos + z_fct[curr_pos] - 1 > seg_right: - seg_left = curr_pos - seg_right = curr_pos + z_fct[curr_pos] - 1 - - final_z_fct = DynamicOneDimensionalArray(int, 0) - start_index = 0 - if query != "": - start_index = len(query) + 1 - for pos in range(start_index, len(string)): - final_z_fct.append(z_fct[pos]) - - return final_z_fct - -def _z_function(text, query): - positions = DynamicOneDimensionalArray(int, 0) - if len(text) == 0 or len(query) == 0: - return positions - - fct = _z_vector(text, query) - for pos in range(len(fct)): - if fct[pos] == len(query): - positions.append(pos) - - return positions diff --git a/lib/python3.12/site-packages/pydatastructs/strings/tests/__init__.py b/lib/python3.12/site-packages/pydatastructs/strings/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/lib/python3.12/site-packages/pydatastructs/strings/tests/test_algorithms.py b/lib/python3.12/site-packages/pydatastructs/strings/tests/test_algorithms.py deleted file mode 100644 index 37622cf80..000000000 --- a/lib/python3.12/site-packages/pydatastructs/strings/tests/test_algorithms.py +++ /dev/null @@ -1,76 +0,0 @@ -from pydatastructs.strings import find - -import random, string - -def test_kmp(): - _test_common_string_matching('kmp') - -def test_rka(): - _test_common_string_matching('rabin_karp') - -def test_bm(): - _test_common_string_matching('boyer_moore') - -def test_zf(): - _test_common_string_matching('z_function') - -def _test_common_string_matching(algorithm): - true_text_pattern_dictionary = { - "Knuth-Morris-Pratt": "-Morris-", - "abcabcabcabdabcabdabcabca": "abcabdabcabca", - "aefcdfaecdaefaefcdaefeaefcdcdeae": "aefcdaefeaefcd", - "aaaaaaaa": "aaa", - "fullstringmatch": "fullstringmatch", - "z-function": "z-fun" - } - for test_case_key in true_text_pattern_dictionary: - text = test_case_key - query = true_text_pattern_dictionary[test_case_key] - positions = find(text, query, algorithm) - for i in range(positions._last_pos_filled): - p = positions[i] - assert text[p:p + len(query)] == query - - false_text_pattern_dictionary = { - "Knuth-Morris-Pratt": "-Pratt-", - "abcabcabcabdabcabdabcabca": "qwertyuiopzxcvbnm", - "aefcdfaecdaefaefcdaefeaefcdcdeae": "cdaefaefe", - "fullstringmatch": "fullstrinmatch", - "z-function": "function-", - "abc": "", - "": "abc" - } - - for test_case_key in false_text_pattern_dictionary: - text = test_case_key - query = false_text_pattern_dictionary[test_case_key] - positions = find(text, query, algorithm) - assert positions.size == 0 - - random.seed(1000) - - def gen_random_string(length): - ascii = string.ascii_uppercase - digits = string.digits - return ''.join(random.choices(ascii + digits, k=length)) - - for _ in range(100): - query = gen_random_string(random.randint(3, 10)) - num_times = random.randint(1, 10) - freq = 0 - text = "" - while freq < num_times: - rand_str = gen_random_string(random.randint(5, 10)) - if rand_str != query: - freq += 1 - text += query + rand_str + query - positions = find(text, query, algorithm) - assert positions._num == num_times * 2 - for i in range(positions._last_pos_filled): - p = positions[i] - assert text[p:p + len(query)] == query - - text = gen_random_string(len(query)) - if text != query: - positions = find(text, query, algorithm) - assert positions.size == 0 diff --git a/lib/python3.12/site-packages/pydatastructs/strings/tests/test_trie.py b/lib/python3.12/site-packages/pydatastructs/strings/tests/test_trie.py deleted file mode 100644 index 059104708..000000000 --- a/lib/python3.12/site-packages/pydatastructs/strings/tests/test_trie.py +++ /dev/null @@ -1,49 +0,0 @@ -from pydatastructs import Trie - -def test_Trie(): - - strings = ["A", "to", "tea", "ted", "ten", "i", - "in", "inn", "Amfn", "snbr"] - trie = Trie() - for string in strings: - trie.insert(string) - - prefix_strings = ["te", "t", "Am", "snb"] - - for string in strings: - assert trie.is_inserted(string) - - for string in strings[::-1]: - assert trie.is_inserted(string) - - for string in prefix_strings: - assert trie.is_present(string) - assert not trie.is_inserted(string) - - assert sorted(trie.strings_with_prefix("t")) == ['tea', 'ted', 'ten', 'to'] - assert sorted(trie.strings_with_prefix("te")) == ["tea", "ted", "ten"] - assert trie.strings_with_prefix("i") == ["i", "in", "inn"] - assert trie.strings_with_prefix("a") == [] - - remove_order = ["to", "tea", "ted", "ten", "inn", "in", "A"] - - assert trie.delete("z") is None - - for string in remove_order: - trie.delete(string) - for present in strings: - if present == string: - assert not trie.is_inserted(present) - else: - assert trie.is_present(present) - assert trie.is_inserted(present) - strings.remove(string) - - prefix_strings_1 = ["dict", "dicts", "dicts_lists_tuples"] - trie_1 = Trie() - - for i in range(len(prefix_strings_1)): - trie_1.insert(prefix_strings_1[i]) - for j in range(i + 1): - assert trie_1.is_inserted(prefix_strings_1[j]) - assert trie_1.is_present(prefix_strings_1[j]) diff --git a/lib/python3.12/site-packages/pydatastructs/strings/trie.py b/lib/python3.12/site-packages/pydatastructs/strings/trie.py deleted file mode 100644 index cdf6666cf..000000000 --- a/lib/python3.12/site-packages/pydatastructs/strings/trie.py +++ /dev/null @@ -1,201 +0,0 @@ -from pydatastructs.utils.misc_util import ( - TrieNode, Backend, - raise_if_backend_is_not_python) -from collections import deque -import copy - -__all__ = [ - 'Trie' -] - -Stack = Queue = deque - -class Trie(object): - """ - Represents the trie data structure for storing strings. - - Parameters - ========== - - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import Trie - >>> trie = Trie() - >>> trie.insert("a") - >>> trie.insert("aa") - >>> trie.strings_with_prefix("a") - ['a', 'aa'] - >>> trie.is_present("aa") - True - >>> trie.delete("aa") - True - >>> trie.is_present("aa") - False - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Trie - """ - - __slots__ = ['root'] - - @classmethod - def methods(cls): - return ['__new__', 'insert', 'is_present', 'delete', - 'strings_with_prefix'] - - def __new__(cls, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.root = TrieNode() - return obj - - def insert(self, string: str) -> None: - """ - Inserts the given string into the trie. - - Parameters - ========== - - string: str - - Returns - ======= - - None - """ - walk = self.root - for char in string: - if walk.get_child(char) is None: - newNode = TrieNode(char) - walk.add_child(newNode) - walk = newNode - else: - walk = walk.get_child(char) - walk.is_terminal = True - - def is_present(self, string: str) -> bool: - """ - Checks if the given string is present as a prefix in the trie. - - Parameters - ========== - - string: str - - Returns - ======= - - True if the given string is present as a prefix; - False in all other cases. - """ - walk = self.root - for char in string: - if walk.get_child(char) is None: - return False - walk = walk.get_child(char) - return True - - def is_inserted(self, string: str) -> bool: - """ - Checks if the given string was inserted in the trie. - - Parameters - ========== - - string: str - - Returns - ======= - - True if the given string was inserted in trie; - False in all other cases. - """ - walk = self.root - for char in string: - if walk.get_child(char) is None: - return False - walk = walk.get_child(char) - return walk.is_terminal - - def delete(self, string: str) -> bool: - """ - Deletes the given string from the trie. - - Parameters - ========== - - string: str - - Returns - ======= - - True if successfully deleted; - None if the string is not present in the trie. - """ - path = [] - walk = self.root - size = len(string) - for i in range(size): - char = string[i] - path.append(walk) - if walk.get_child(char) is None: - return None - walk = walk.get_child(char) - path.append(walk) - i = len(path) - 1 - path[i].is_terminal = False - while not path[i]._children and i >= 1: - path[i-1].remove_child(path[i].char) - i -= 1 - if path[i].is_terminal: - return True - return True - - def strings_with_prefix(self, string: str) -> list: - """ - Generates a list of all strings with the given prefix. - - Parameters - ========== - - string: str - - Returns - ======= - - strings: list - The list of strings with the given prefix. - """ - - def _collect(prefix: str, node: TrieNode, strings: list) -> str: - TrieNode_stack = Stack() - TrieNode_stack.append((node, prefix)) - while TrieNode_stack: - walk, curr_prefix = TrieNode_stack.pop() - if walk.is_terminal: - strings.append(curr_prefix + walk.char) - for child in walk._children: - TrieNode_stack.append((walk.get_child(child), curr_prefix + walk.char)) - - strings = [] - prefix = "" - walk = self.root - for char in string: - walk = walk.get_child(char) - if walk is None: - return strings - prefix += char - if walk.is_terminal: - strings.append(walk.char) - for child in walk._children: - _collect(prefix, walk.get_child(child), strings) - return strings diff --git a/lib/python3.12/site-packages/pydatastructs/trees/__init__.py b/lib/python3.12/site-packages/pydatastructs/trees/__init__.py deleted file mode 100644 index 892730122..000000000 --- a/lib/python3.12/site-packages/pydatastructs/trees/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -__all__ = [] - -from . import ( - binary_trees, - m_ary_trees, - space_partitioning_trees, - heaps, -) - -from .binary_trees import ( - BinaryTree, - BinarySearchTree, - BinaryTreeTraversal, - AVLTree, - BinaryIndexedTree, - CartesianTree, - Treap, - SplayTree, - RedBlackTree -) -__all__.extend(binary_trees.__all__) - -from .m_ary_trees import ( - MAryTreeNode, MAryTree -) - -__all__.extend(m_ary_trees.__all__) - -from .space_partitioning_trees import ( - OneDimensionalSegmentTree -) -__all__.extend(space_partitioning_trees.__all__) - -from .heaps import ( - BinaryHeap, - TernaryHeap, - DHeap, - BinomialHeap -) -__all__.extend(heaps.__all__) diff --git a/lib/python3.12/site-packages/pydatastructs/trees/_backend/__init__.py b/lib/python3.12/site-packages/pydatastructs/trees/_backend/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/lib/python3.12/site-packages/pydatastructs/trees/binary_trees.py b/lib/python3.12/site-packages/pydatastructs/trees/binary_trees.py deleted file mode 100644 index 48446d1d4..000000000 --- a/lib/python3.12/site-packages/pydatastructs/trees/binary_trees.py +++ /dev/null @@ -1,1888 +0,0 @@ -import random -from collections import deque as Queue -from pydatastructs.utils import TreeNode, CartesianTreeNode, RedBlackTreeNode -from pydatastructs.miscellaneous_data_structures import Stack -from pydatastructs.linear_data_structures import OneDimensionalArray -from pydatastructs.linear_data_structures.arrays import ArrayForTrees -from pydatastructs.utils.misc_util import Backend -from pydatastructs.trees._backend.cpp import _trees - -__all__ = [ - 'AVLTree', - 'BinaryTree', - 'BinarySearchTree', - 'BinaryTreeTraversal', - 'BinaryIndexedTree', - 'CartesianTree', - 'Treap', - 'SplayTree', - 'RedBlackTree' -] - -class BinaryTree(object): - """ - Abstract binary tree. - - Parameters - ========== - - key - Required if tree is to be instantiated with - root otherwise not needed. - root_data - Optional, the root node of the binary tree. - If not of type TreeNode, it will consider - root as data and a new root node will - be created. - comp: lambda/function - Optional, A lambda function which will be used - for comparison of keys. Should return a - bool value. By default it implements less - than operator. - is_order_statistic: bool - Set it to True, if you want to use the - order statistic features of the tree. - backend: pydatastructs.Backend - The backend to be used. Available backends: Python and C++ - Optional, by default, the Python backend is used. For faster execution, use the C++ backend. - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Binary_tree - """ - - __slots__ = ['root_idx', 'comparator', 'tree', 'size', - 'is_order_statistic'] - - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - if comp is None: - comp = lambda key1, key2: key1 < key2 - return _trees.BinaryTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp - obj = object.__new__(cls) - if key is None and root_data is not None: - raise ValueError('Key required.') - key = None if root_data is None else key - root = TreeNode(key, root_data) - root.is_root = True - obj.root_idx = 0 - obj.tree, obj.size = ArrayForTrees(TreeNode, [root]), 1 - obj.comparator = lambda key1, key2: key1 < key2 \ - if comp is None else comp - obj.is_order_statistic = is_order_statistic - return obj - - @classmethod - def methods(cls): - return ['__new__', '__str__'] - - def insert(self, key, data=None): - """ - Inserts data by the passed key using iterative - algorithm. - - Parameters - ========== - - key - The key for comparison. - - data - The data to be inserted. - - Returns - ======= - - None - """ - raise NotImplementedError("This is an abstract method.") - - def delete(self, key, **kwargs): - """ - Deletes the data with the passed key - using iterative algorithm. - - Parameters - ========== - - key - The key of the node which is - to be deleted. - balancing_info: bool - Optional, by default, False - The information needed for updating - the tree is returned if this parameter - is set to True. It is not meant for - user facing APIs. - - Returns - ======= - - True - If the node is deleted successfully. - None - If the node to be deleted doesn't exists. - - Note - ==== - - The node is deleted means that the connection to that - node are removed but the it is still in three. This - is being done to keep the complexity of deletion, O(logn). - """ - raise NotImplementedError("This is an abstract method.") - - def search(self, key, **kwargs): - """ - Searches for the data in the binary search tree - using iterative algorithm. - - Parameters - ========== - - key - The key for searching. - parent: bool - If true then returns index of the - parent of the node with the passed - key. - By default, False - - Returns - ======= - - int - If the node with the passed key is - in the tree. - tuple - The index of the searched node and - the index of the parent of that node. - None - In all other cases. - """ - raise NotImplementedError("This is an abstract method.") - - - def __str__(self): - to_be_printed = ['' for i in range(self.tree._last_pos_filled + 1)] - for i in range(self.tree._last_pos_filled + 1): - if self.tree[i] is not None: - node = self.tree[i] - to_be_printed[i] = (node.left, node.key, node.data, node.right) - return str(to_be_printed) - -class BinarySearchTree(BinaryTree): - """ - Represents binary search trees. - - Examples - ======== - - >>> from pydatastructs.trees import BinarySearchTree as BST - >>> b = BST() - >>> b.insert(1, 1) - >>> b.insert(2, 2) - >>> child = b.tree[b.root_idx].right - >>> b.tree[child].data - 2 - >>> b.search(1) - 0 - >>> b.search(-1) is None - True - >>> b.delete(1) is True - True - >>> b.search(1) is None - True - >>> b.delete(2) is True - True - >>> b.search(2) is None - True - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Binary_search_tree - - See Also - ======== - - pydatastructs.trees.binary_tree.BinaryTree - """ - - @classmethod - def methods(cls): - return ['insert', 'search', 'delete', 'select', - 'rank', 'lowest_common_ancestor'] - - left_size = lambda self, node: self.tree[node.left].size \ - if node.left is not None else 0 - right_size = lambda self, node: self.tree[node.right].size \ - if node.right is not None else 0 - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - if comp is None: - comp = lambda key1, key2: key1 < key2 - return _trees.BinarySearchTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp - return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) - - def _update_size(self, start_idx): - if self.is_order_statistic: - walk = start_idx - while walk is not None: - self.tree[walk].size = ( - self.left_size(self.tree[walk]) + - self.right_size(self.tree[walk]) + 1) - walk = self.tree[walk].parent - - def insert(self, key, data=None): - res = self.search(key) - if res is not None: - self.tree[res].data = data - return None - walk = self.root_idx - if self.tree[walk].key is None: - self.tree[walk].key = key - self.tree[walk].data = data - return None - new_node, prev_node, flag = TreeNode(key, data), self.root_idx, True - while flag: - if not self.comparator(key, self.tree[walk].key): - if self.tree[walk].right is None: - new_node.parent = prev_node - self.tree.append(new_node) - self.tree[walk].right = self.size - self.size += 1 - flag = False - prev_node = walk = self.tree[walk].right - else: - if self.tree[walk].left is None: - new_node.parent = prev_node - self.tree.append(new_node) - self.tree[walk].left = self.size - self.size += 1 - flag = False - prev_node = walk = self.tree[walk].left - self._update_size(walk) - - def search(self, key, **kwargs): - ret_parent = kwargs.get('parent', False) - parent = None - walk = self.root_idx - if self.tree[walk].key is None: - return None - while walk is not None: - if self.tree[walk].key == key: - break - parent = walk - if self.comparator(key, self.tree[walk].key): - walk = self.tree[walk].left - else: - walk = self.tree[walk].right - return (walk, parent) if ret_parent else walk - - def _bound_helper(self, node_idx, bound_key, is_upper=False): - if node_idx is None: - return None - if self.tree[node_idx].key is None: - return None - - if self.tree[node_idx].key == bound_key: - if not is_upper: - return self.tree[node_idx].key - else: - return self._bound_helper(self.tree[node_idx].right, - bound_key, is_upper) - - if self.comparator(self.tree[node_idx].key, bound_key): - return self._bound_helper(self.tree[node_idx].right, - bound_key, is_upper) - else: - res_bound = self._bound_helper(self.tree[node_idx].left, - bound_key, is_upper) - return res_bound if res_bound is not None else self.tree[node_idx].key - - - def lower_bound(self, key, **kwargs): - """ - Finds the lower bound of the given key in the tree - - Parameters - ========== - - key - The key for comparison - - Examples - ======== - - >>> from pydatastructs.trees import BinarySearchTree as BST - >>> b = BST() - >>> b.insert(10, 10) - >>> b.insert(18, 18) - >>> b.insert(7, 7) - >>> b.lower_bound(9) - 10 - >>> b.lower_bound(7) - 7 - >>> b.lower_bound(20) is None - True - - Returns - ======= - - value - The lower bound of the given key. - Returns None if the value doesn't exist - """ - return self._bound_helper(self.root_idx, key) - - - def upper_bound(self, key, **kwargs): - """ - Finds the upper bound of the given key in the tree - - Parameters - ========== - - key - The key for comparison - - Examples - ======== - - >>> from pydatastructs.trees import BinarySearchTree as BST - >>> b = BST() - >>> b.insert(10, 10) - >>> b.insert(18, 18) - >>> b.insert(7, 7) - >>> b.upper_bound(9) - 10 - >>> b.upper_bound(7) - 10 - >>> b.upper_bound(20) is None - True - - Returns - ======= - - value - The upper bound of the given key. - Returns None if the value doesn't exist - """ - return self._bound_helper(self.root_idx, key, True) - - - def delete(self, key, **kwargs): - (walk, parent) = self.search(key, parent=True) - a = None - if walk is None: - return None - if self.tree[walk].left is None and \ - self.tree[walk].right is None: - if parent is None: - self.tree[self.root_idx].data = None - self.tree[self.root_idx].key = None - else: - if self.tree[parent].left == walk: - self.tree[parent].left = None - else: - self.tree[parent].right = None - a = parent - par_key, root_key = (self.tree[parent].key, - self.tree[self.root_idx].key) - new_indices = self.tree.delete(walk) - if new_indices is not None: - a = new_indices[par_key] - self.root_idx = new_indices[root_key] - self._update_size(a) - - elif self.tree[walk].left is not None and \ - self.tree[walk].right is not None: - twalk = self.tree[walk].right - par = walk - flag = False - while self.tree[twalk].left is not None: - flag = True - par = twalk - twalk = self.tree[twalk].left - self.tree[walk].data = self.tree[twalk].data - self.tree[walk].key = self.tree[twalk].key - if flag: - self.tree[par].left = self.tree[twalk].right - else: - self.tree[par].right = self.tree[twalk].right - if self.tree[twalk].right is not None: - self.tree[self.tree[twalk].right].parent = par - if twalk is not None: - a = par - par_key, root_key = (self.tree[par].key, - self.tree[self.root_idx].key) - new_indices = self.tree.delete(twalk) - if new_indices is not None: - a = new_indices[par_key] - self.root_idx = new_indices[root_key] - self._update_size(a) - - else: - if self.tree[walk].left is not None: - child = self.tree[walk].left - else: - child = self.tree[walk].right - if parent is None: - self.tree[self.root_idx].left = self.tree[child].left - self.tree[self.root_idx].right = self.tree[child].right - self.tree[self.root_idx].data = self.tree[child].data - self.tree[self.root_idx].key = self.tree[child].key - self.tree[self.root_idx].parent = None - root_key = self.tree[self.root_idx].key - new_indices = self.tree.delete(child) - if new_indices is not None: - self.root_idx = new_indices[root_key] - else: - if self.tree[parent].left == walk: - self.tree[parent].left = child - else: - self.tree[parent].right = child - self.tree[child].parent = parent - a = parent - par_key, root_key = (self.tree[parent].key, - self.tree[self.root_idx].key) - new_indices = self.tree.delete(walk) - if new_indices is not None: - parent = new_indices[par_key] - self.tree[child].parent = new_indices[par_key] - a = new_indices[par_key] - self.root_idx = new_indices[root_key] - self._update_size(a) - - if kwargs.get("balancing_info", False) is not False: - return a - return True - - def select(self, i): - """ - Finds the i-th smallest node in the tree. - - Parameters - ========== - - i: int - A positive integer - - Returns - ======= - - n: TreeNode - The node with the i-th smallest key - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Order_statistic_tree - """ - i -= 1 # The algorithm is based on zero indexing - if i < 0: - raise ValueError("Expected a positive integer, got %d"%(i + 1)) - if i >= self.tree._num: - raise ValueError("%d is greater than the size of the " - "tree which is, %d"%(i + 1, self.tree._num)) - walk = self.root_idx - while walk is not None: - l = self.left_size(self.tree[walk]) - if i == l: - return self.tree[walk] - left_walk = self.tree[walk].left - right_walk = self.tree[walk].right - if left_walk is None and right_walk is None: - raise IndexError("The traversal is terminated " - "due to no child nodes ahead.") - if i < l: - if left_walk is not None and \ - self.comparator(self.tree[left_walk].key, - self.tree[walk].key): - walk = left_walk - else: - walk = right_walk - else: - if right_walk is not None and \ - not self.comparator(self.tree[right_walk].key, - self.tree[walk].key): - walk = right_walk - else: - walk = left_walk - i -= (l + 1) - - def rank(self, x): - """ - Finds the rank of the given node, i.e. - its index in the sorted list of nodes - of the tree. - - Parameters - ========== - - x: key - The key of the node whose rank is to be found out. - """ - walk = self.search(x) - if walk is None: - return None - r = self.left_size(self.tree[walk]) + 1 - while self.tree[walk].key != self.tree[self.root_idx].key: - p = self.tree[walk].parent - if walk == self.tree[p].right: - r += self.left_size(self.tree[p]) + 1 - walk = p - return r - - def _simple_path(self, key, root): - """ - Utility funtion to find the simple path between root and node. - - Parameters - ========== - - key: Node.key - Key of the node to be searched - - Returns - ======= - - path: list - """ - - stack = Stack() - stack.push(root) - path = [] - node_idx = -1 - - while not stack.is_empty: - node = stack.pop() - if self.tree[node].key == key: - node_idx = node - break - if self.tree[node].left: - stack.push(self.tree[node].left) - if self.tree[node].right: - stack.push(self.tree[node].right) - - if node_idx == -1: - return path - - while node_idx != 0: - path.append(node_idx) - node_idx = self.tree[node_idx].parent - path.append(0) - path.reverse() - - return path - - def _lca_1(self, j, k): - root = self.root_idx - path1 = self._simple_path(j, root) - path2 = self._simple_path(k, root) - if not path1 or not path2: - raise ValueError("One of two path doesn't exists. See %s, %s" - %(path1, path2)) - - n, m = len(path1), len(path2) - i = j = 0 - while i < n and j < m: - if path1[i] != path2[j]: - return self.tree[path1[i - 1]].key - i += 1 - j += 1 - if path1 < path2: - return self.tree[path1[-1]].key - return self.tree[path2[-1]].key - - def _lca_2(self, j, k): - curr_root = self.root_idx - u, v = self.search(j), self.search(k) - if (u is None) or (v is None): - raise ValueError("One of the nodes with key %s " - "or %s doesn't exits"%(j, k)) - u_left = self.comparator(self.tree[u].key, \ - self.tree[curr_root].key) - v_left = self.comparator(self.tree[v].key, \ - self.tree[curr_root].key) - - while not (u_left ^ v_left): - if u_left and v_left: - curr_root = self.tree[curr_root].left - else: - curr_root = self.tree[curr_root].right - - if curr_root == u or curr_root == v: - if curr_root is None: - return None - return self.tree[curr_root].key - u_left = self.comparator(self.tree[u].key, \ - self.tree[curr_root].key) - v_left = self.comparator(self.tree[v].key, \ - self.tree[curr_root].key) - - if curr_root is None: - return curr_root - return self.tree[curr_root].key - - def lowest_common_ancestor(self, j, k, algorithm=1): - - """ - Computes the lowest common ancestor of two nodes. - - Parameters - ========== - - j: Node.key - Key of first node - - k: Node.key - Key of second node - - algorithm: int - The algorithm to be used for computing the - lowest common ancestor. - Optional, by default uses algorithm 1. - - 1 -> Determines the lowest common ancestor by finding - the first intersection of the paths from v and w - to the root. - - 2 -> Modifed version of the algorithm given in the - following publication, - D. Harel. A linear time algorithm for the - lowest common ancestors problem. In 21s - Annual Symposium On Foundations of - Computer Science, pages 308-319, 1980. - - Returns - ======= - - Node.key - The key of the lowest common ancestor in the tree. - if both the nodes are present in the tree. - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Lowest_common_ancestor - - .. [2] https://pdfs.semanticscholar.org/e75b/386cc554214aa0ebd6bd6dbdd0e490da3739.pdf - - """ - return getattr(self, "_lca_"+str(algorithm))(j, k) - -class SelfBalancingBinaryTree(BinarySearchTree): - """ - Represents Base class for all rotation based balancing trees like AVL tree, Red Black tree, Splay Tree. - """ - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - if comp is None: - comp = lambda key1, key2: key1 < key2 - return _trees.SelfBalancingBinaryTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp - return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) - - def _right_rotate(self, j, k): - y = self.tree[k].right - if y is not None: - self.tree[y].parent = j - self.tree[j].left = y - self.tree[k].parent = self.tree[j].parent - if self.tree[k].parent is not None: - if self.tree[self.tree[k].parent].left == j: - self.tree[self.tree[k].parent].left = k - else: - self.tree[self.tree[k].parent].right = k - self.tree[j].parent = k - self.tree[k].right = j - kp = self.tree[k].parent - if kp is None: - self.root_idx = k - - def _left_right_rotate(self, j, k): - i = self.tree[k].right - v, w = self.tree[i].left, self.tree[i].right - self.tree[k].right, self.tree[j].left = v, w - if v is not None: - self.tree[v].parent = k - if w is not None: - self.tree[w].parent = j - self.tree[i].left, self.tree[i].right, self.tree[i].parent = \ - k, j, self.tree[j].parent - self.tree[k].parent, self.tree[j].parent = i, i - ip = self.tree[i].parent - if ip is not None: - if self.tree[ip].left == j: - self.tree[ip].left = i - else: - self.tree[ip].right = i - else: - self.root_idx = i - - def _right_left_rotate(self, j, k): - i = self.tree[k].left - v, w = self.tree[i].left, self.tree[i].right - self.tree[k].left, self.tree[j].right = w, v - if v is not None: - self.tree[v].parent = j - if w is not None: - self.tree[w].parent = k - self.tree[i].right, self.tree[i].left, self.tree[i].parent = \ - k, j, self.tree[j].parent - self.tree[k].parent, self.tree[j].parent = i, i - ip = self.tree[i].parent - if ip is not None: - if self.tree[ip].left == j: - self.tree[ip].left = i - else: - self.tree[ip].right = i - else: - self.root_idx = i - - def _left_rotate(self, j, k): - y = self.tree[k].left - if y is not None: - self.tree[y].parent = j - self.tree[j].right = y - self.tree[k].parent = self.tree[j].parent - if self.tree[k].parent is not None: - if self.tree[self.tree[k].parent].left == j: - self.tree[self.tree[k].parent].left = k - else: - self.tree[self.tree[k].parent].right = k - self.tree[j].parent = k - self.tree[k].left = j - kp = self.tree[k].parent - if kp is None: - self.root_idx = k - -class CartesianTree(SelfBalancingBinaryTree): - """ - Represents cartesian trees. - - Examples - ======== - - >>> from pydatastructs.trees import CartesianTree as CT - >>> c = CT() - >>> c.insert(1, 4, 1) - >>> c.insert(2, 3, 2) - >>> child = c.tree[c.root_idx].left - >>> c.tree[child].data - 1 - >>> c.search(1) - 0 - >>> c.search(-1) is None - True - >>> c.delete(1) is True - True - >>> c.search(1) is None - True - >>> c.delete(2) is True - True - >>> c.search(2) is None - True - - References - ========== - - .. [1] https://www.cs.princeton.edu/courses/archive/spr09/cos423/Lectures/geo-st.pdf - - See Also - ======== - - pydatastructs.trees.binary_trees.SelfBalancingBinaryTree - """ - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - if comp is None: - comp = lambda key1, key2: key1 < key2 - return _trees.CartesianTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp - return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) - - @classmethod - def methods(cls): - return ['__new__', '__str__', 'insert', 'delete'] - - def _bubble_up(self, node_idx): - node = self.tree[node_idx] - parent_idx = self.tree[node_idx].parent - parent = self.tree[parent_idx] - while (node.parent is not None) and (parent.priority > node.priority): - if parent.right == node_idx: - self._left_rotate(parent_idx, node_idx) - else: - self._right_rotate(parent_idx, node_idx) - node = self.tree[node_idx] - parent_idx = self.tree[node_idx].parent - if parent_idx is not None: - parent = self.tree[parent_idx] - if node.parent is None: - self.tree[node_idx].is_root = True - - def _trickle_down(self, node_idx): - node = self.tree[node_idx] - while node.left is not None or node.right is not None: - if node.left is None: - self._left_rotate(node_idx, self.tree[node_idx].right) - elif node.right is None: - self._right_rotate(node_idx, self.tree[node_idx].left) - elif self.tree[node.left].priority < self.tree[node.right].priority: - self._right_rotate(node_idx, self.tree[node_idx].left) - else: - self._left_rotate(node_idx, self.tree[node_idx].right) - node = self.tree[node_idx] - - def insert(self, key, priority, data=None): - super(CartesianTree, self).insert(key, data) - node_idx = super(CartesianTree, self).search(key) - node = self.tree[node_idx] - new_node = CartesianTreeNode(key, priority, data) - new_node.parent = node.parent - new_node.left = node.left - new_node.right = node.right - self.tree[node_idx] = new_node - if node.is_root: - self.tree[node_idx].is_root = True - else: - self._bubble_up(node_idx) - - def delete(self, key, **kwargs): - balancing_info = kwargs.get('balancing_info', False) - node_idx = super(CartesianTree, self).search(key) - if node_idx is not None: - self._trickle_down(node_idx) - return super(CartesianTree, self).delete(key, balancing_info = balancing_info) - - def __str__(self): - to_be_printed = ['' for i in range(self.tree._last_pos_filled + 1)] - for i in range(self.tree._last_pos_filled + 1): - if self.tree[i] is not None: - node = self.tree[i] - to_be_printed[i] = (node.left, node.key, node.priority, node.data, node.right) - return str(to_be_printed) - -class Treap(CartesianTree): - """ - Represents treaps. - - Examples - ======== - - >>> from pydatastructs.trees import Treap as T - >>> t = T() - >>> t.insert(1, 1) - >>> t.insert(2, 2) - >>> t.search(1) - 0 - >>> t.search(-1) is None - True - >>> t.delete(1) is True - True - >>> t.search(1) is None - True - >>> t.delete(2) is True - True - >>> t.search(2) is None - True - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Treap - - """ - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - if comp is None: - comp = lambda key1, key2: key1 < key2 - return _trees.Treap(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp - return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) - - @classmethod - def methods(cls): - return ['__new__', 'insert'] - - def insert(self, key, data=None): - priority = random.random() - super(Treap, self).insert(key, priority, data) - -class AVLTree(SelfBalancingBinaryTree): - """ - Represents AVL trees. - - References - ========== - - .. [1] https://courses.cs.washington.edu/courses/cse373/06sp/handouts/lecture12.pdf - .. [2] https://en.wikipedia.org/wiki/AVL_tree - .. [3] http://faculty.cs.niu.edu/~freedman/340/340notes/340avl2.htm - - See Also - ======== - - pydatastructs.trees.binary_trees.BinaryTree - """ - - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - if comp is None: - comp = lambda key1, key2: key1 < key2 - return _trees.AVLTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp - return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) - - @classmethod - def methods(cls): - return ['__new__', 'set_tree', 'insert', 'delete'] - - left_height = lambda self, node: self.tree[node.left].height \ - if node.left is not None else -1 - right_height = lambda self, node: self.tree[node.right].height \ - if node.right is not None else -1 - balance_factor = lambda self, node: self.right_height(node) - \ - self.left_height(node) - - def set_tree(self, arr): - self.tree = arr - - def _right_rotate(self, j, k): - super(AVLTree, self)._right_rotate(j, k) - self.tree[j].height = max(self.left_height(self.tree[j]), - self.right_height(self.tree[j])) + 1 - if self.is_order_statistic: - self.tree[j].size = (self.left_size(self.tree[j]) + - self.right_size(self.tree[j]) + 1) - - def _left_right_rotate(self, j, k): - super(AVLTree, self)._left_right_rotate(j, k) - self.tree[j].height = max(self.left_height(self.tree[j]), - self.right_height(self.tree[j])) + 1 - self.tree[k].height = max(self.left_height(self.tree[k]), - self.right_height(self.tree[k])) + 1 - if self.is_order_statistic: - self.tree[j].size = (self.left_size(self.tree[j]) + - self.right_size(self.tree[j]) + 1) - self.tree[k].size = (self.left_size(self.tree[k]) + - self.right_size(self.tree[k]) + 1) - - def _right_left_rotate(self, j, k): - super(AVLTree, self)._right_left_rotate(j, k) - self.tree[j].height = max(self.left_height(self.tree[j]), - self.right_height(self.tree[j])) + 1 - self.tree[k].height = max(self.left_height(self.tree[k]), - self.right_height(self.tree[k])) + 1 - if self.is_order_statistic: - self.tree[j].size = (self.left_size(self.tree[j]) + - self.right_size(self.tree[j]) + 1) - self.tree[k].size = (self.left_size(self.tree[k]) + - self.right_size(self.tree[k]) + 1) - - def _left_rotate(self, j, k): - super(AVLTree, self)._left_rotate(j, k) - self.tree[j].height = max(self.left_height(self.tree[j]), - self.right_height(self.tree[j])) + 1 - self.tree[k].height = max(self.left_height(self.tree[k]), - self.right_height(self.tree[k])) + 1 - if self.is_order_statistic: - self.tree[j].size = (self.left_size(self.tree[j]) + - self.right_size(self.tree[j]) + 1) - - def _balance_insertion(self, curr, last): - walk = last - path = Queue() - path.append(curr), path.append(last) - while walk is not None: - self.tree[walk].height = max(self.left_height(self.tree[walk]), - self.right_height(self.tree[walk])) + 1 - if self.is_order_statistic: - self.tree[walk].size = (self.left_size(self.tree[walk]) + - self.right_size(self.tree[walk]) + 1) - last = path.popleft() - last2last = path.popleft() - if self.balance_factor(self.tree[walk]) not in (1, 0, -1): - l = self.tree[walk].left - if l is not None and l == last and self.tree[l].left == last2last: - self._right_rotate(walk, last) - r = self.tree[walk].right - if r is not None and r == last and self.tree[r].right == last2last: - self._left_rotate(walk, last) - if l is not None and l == last and self.tree[l].right == last2last: - self._left_right_rotate(walk, last) - if r is not None and r == last and self.tree[r].left == last2last: - self._right_left_rotate(walk, last) - path.append(walk), path.append(last) - walk = self.tree[walk].parent - - def insert(self, key, data=None): - super(AVLTree, self).insert(key, data) - self._balance_insertion(self.size - 1, self.tree[self.size-1].parent) - - def _balance_deletion(self, start_idx, key): - walk = start_idx - while walk is not None: - self.tree[walk].height = max(self.left_height(self.tree[walk]), - self.right_height(self.tree[walk])) + 1 - if self.is_order_statistic: - self.tree[walk].size = (self.left_size(self.tree[walk]) + - self.right_size(self.tree[walk]) + 1) - if self.balance_factor(self.tree[walk]) not in (1, 0, -1): - if self.balance_factor(self.tree[walk]) < 0: - b = self.tree[walk].left - if self.balance_factor(self.tree[b]) <= 0: - self._right_rotate(walk, b) - else: - self._left_right_rotate(walk, b) - else: - b = self.tree[walk].right - if self.balance_factor(self.tree[b]) >= 0: - self._left_rotate(walk, b) - else: - self._right_left_rotate(walk, b) - walk = self.tree[walk].parent - - - def delete(self, key, **kwargs): - a = super(AVLTree, self).delete(key, balancing_info=True) - self._balance_deletion(a, key) - return True - -class SplayTree(SelfBalancingBinaryTree): - """ - Represents Splay Trees. - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Splay_tree - - """ - - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - if comp is None: - comp = lambda key1, key2: key1 < key2 - return _trees.SplayTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp - return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) - - @classmethod - def methods(cls): - return ['__new__', 'insert', 'delete', 'join', 'split'] - - def _zig(self, x, p): - if self.tree[p].left == x: - super(SplayTree, self)._right_rotate(p, x) - else: - super(SplayTree, self)._left_rotate(p, x) - - def _zig_zig(self, x, p): - super(SplayTree, self)._right_rotate(self.tree[p].parent, p) - super(SplayTree, self)._right_rotate(p, x) - - def _zig_zag(self, p): - super(SplayTree, self)._left_right_rotate(self.tree[p].parent, p) - - def _zag_zag(self, x, p): - super(SplayTree, self)._left_rotate(self.tree[p].parent, p) - super(SplayTree, self)._left_rotate(p, x) - - def _zag_zig(self, p): - super(SplayTree, self)._right_left_rotate(self.tree[p].parent, p) - - def splay(self, x, p): - while self.tree[x].parent is not None: - if self.tree[p].parent is None: - self._zig(x, p) - elif self.tree[p].left == x and \ - self.tree[self.tree[p].parent].left == p: - self._zig_zig(x, p) - elif self.tree[p].right == x and \ - self.tree[self.tree[p].parent].right == p: - self._zag_zag(x, p) - elif self.tree[p].left == x and \ - self.tree[self.tree[p].parent].right == p: - self._zag_zig(p) - else: - self._zig_zag(p) - p = self.tree[x].parent - - def insert(self, key, x): - super(SelfBalancingBinaryTree, self).insert(key, x) - e, p = super(SelfBalancingBinaryTree, self).search(key, parent=True) - self.tree[self.size-1].parent = p - self.splay(e, p) - - def delete(self, x): - e, p = super(SelfBalancingBinaryTree, self).search(x, parent=True) - if e is None: - return - self.splay(e, p) - status = super(SelfBalancingBinaryTree, self).delete(x) - return status - - def join(self, other): - """ - Joins two trees current and other such that all elements of - the current splay tree are smaller than the elements of the other tree. - - Parameters - ========== - - other: SplayTree - SplayTree which needs to be joined with the self tree. - - """ - maxm = self.root_idx - while self.tree[maxm].right is not None: - maxm = self.tree[maxm].right - minm = other.root_idx - while other.tree[minm].left is not None: - minm = other.tree[minm].left - if not self.comparator(self.tree[maxm].key, - other.tree[minm].key): - raise ValueError("Elements of %s aren't less " - "than that of %s"%(self, other)) - self.splay(maxm, self.tree[maxm].parent) - idx_update = self.tree._size - for node in other.tree: - if node is not None: - node_copy = TreeNode(node.key, node.data) - if node.left is not None: - node_copy.left = node.left + idx_update - if node.right is not None: - node_copy.right = node.right + idx_update - self.tree.append(node_copy) - else: - self.tree.append(node) - self.tree[self.root_idx].right = \ - other.root_idx + idx_update - - def split(self, x): - """ - Splits current splay tree into two trees such that one tree contains nodes - with key less than or equal to x and the other tree containing - nodes with key greater than x. - - Parameters - ========== - - x: key - Key of the element on the basis of which split is performed. - - Returns - ======= - - other: SplayTree - SplayTree containing elements with key greater than x. - - """ - e, p = super(SelfBalancingBinaryTree, self).search(x, parent=True) - if e is None: - return - self.splay(e, p) - other = SplayTree(None, None) - if self.tree[self.root_idx].right is not None: - traverse = BinaryTreeTraversal(self) - elements = traverse.depth_first_search(order='pre_order', node=self.tree[self.root_idx].right) - for i in range(len(elements)): - super(SelfBalancingBinaryTree, other).insert(elements[i].key, elements[i].data) - for j in range(len(elements) - 1, -1, -1): - e, p = super(SelfBalancingBinaryTree, self).search(elements[j].key, parent=True) - self.tree[e] = None - self.tree[self.root_idx].right = None - return other - -class RedBlackTree(SelfBalancingBinaryTree): - """ - Represents Red Black trees. - - Examples - ======== - - >>> from pydatastructs.trees import RedBlackTree as RB - >>> b = RB() - >>> b.insert(1, 1) - >>> b.insert(2, 2) - >>> child = b.tree[b.root_idx].right - >>> b.tree[child].data - 2 - >>> b.search(1) - 0 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Red%E2%80%93black_tree - - See Also - ======== - - pydatastructs.trees.binary_trees.SelfBalancingBinaryTree - """ - - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - if comp is None: - comp = lambda key1, key2: key1 < key2 - return _trees.RedBlackTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp - return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) - - @classmethod - def methods(cls): - return ['__new__', 'insert', 'delete'] - - def _get_parent(self, node_idx): - return self.tree[node_idx].parent - - def _get_grand_parent(self, node_idx): - parent_idx=self._get_parent(node_idx) - return self.tree[parent_idx].parent - - def _get_sibling(self, node_idx): - parent_idx=self._get_parent(node_idx) - if parent_idx is None: - return None - node = self.tree[parent_idx] - if node_idx==node.left: - sibling_idx=node.right - return sibling_idx - else: - sibling_idx=node.left - return sibling_idx - - def _get_uncle(self, node_idx): - parent_idx=self._get_parent(node_idx) - return self._get_sibling(parent_idx) - - def _is_onleft(self, node_idx): - parent = self._get_parent(node_idx) - if self.tree[parent].left == node_idx: - return True - return False - - def _is_onright(self, node_idx): - if self._is_onleft(node_idx) is False: - return True - return False - - def __fix_insert(self, node_idx): - while self._get_parent(node_idx) is not None and \ - self.tree[self._get_parent(node_idx)].color == 1 and self.tree[node_idx].color==1: - parent_idx=self._get_parent(node_idx) - grand_parent_idx=self._get_grand_parent(node_idx) - uncle_idx = self._get_uncle(node_idx) - if uncle_idx is not None and self.tree[uncle_idx].color == 1: - self.tree[uncle_idx].color = 0 - self.tree[parent_idx].color = 0 - self.tree[grand_parent_idx].color = 1 - node_idx= grand_parent_idx - else: - self.tree[self.root_idx].is_root=False - if self._is_onright(parent_idx): - if self._is_onleft(node_idx): - self._right_rotate(parent_idx, node_idx) - node_idx=parent_idx - parent_idx=self._get_parent(node_idx) - node_idx=parent_idx - parent_idx=self._get_parent(node_idx) - self._left_rotate(parent_idx, node_idx) - elif self._is_onleft(parent_idx): - if self._is_onright(node_idx): - self._left_rotate(parent_idx, node_idx) - node_idx=parent_idx - parent_idx=self._get_parent(node_idx) - node_idx=parent_idx - parent_idx=self._get_parent(node_idx) - self._right_rotate(parent_idx, node_idx) - self.tree[node_idx].color = 0 - self.tree[parent_idx].color = 1 - self.tree[self.root_idx].is_root=True - if self.tree[node_idx].is_root: - break - self.tree[self.root_idx].color=0 - - def insert(self, key, data=None): - super(RedBlackTree, self).insert(key, data) - node_idx = super(RedBlackTree, self).search(key) - node = self.tree[node_idx] - new_node = RedBlackTreeNode(key, data) - new_node.parent = node.parent - new_node.left = node.left - new_node.right = node.right - self.tree[node_idx] = new_node - if node.is_root: - self.tree[node_idx].is_root = True - self.tree[node_idx].color=0 - elif self.tree[self.tree[node_idx].parent].color==1: - self.__fix_insert(node_idx) - - def _find_predecessor(self, node_idx): - while self.tree[node_idx].right is not None: - node_idx = self.tree[node_idx].right - return node_idx - - def _transplant_values(self, node_idx1, node_idx2): - parent = self.tree[node_idx1].parent - if self.tree[node_idx1].is_root and self._has_one_child(node_idx1): - self.tree[self.root_idx].key = self.tree[node_idx2].key - self.tree[self.root_idx].data = self.tree[node_idx2].data - self.tree[self.root_idx].left = self.tree[node_idx2].left - self.tree[self.root_idx].right = self.tree[node_idx2].right - self.tree[node_idx1].parent = None - return self.tree[self.root_idx].key - else: - self.tree[node_idx1].key = self.tree[node_idx2].key - self.tree[node_idx1].data = self.tree[node_idx2].data - - def _has_one_child(self, node_idx): - if self._is_leaf(node_idx) is False and self._has_two_child(node_idx) is False: - return True - return False - - def _is_leaf(self, node_idx): - if self.tree[node_idx].left is None and self.tree[node_idx].right is None: - return True - return False - - def _has_two_child(self, node_idx): - if self.tree[node_idx].left is not None and self.tree[node_idx].right is not None: - return True - return False - - def __has_red_child(self, node_idx): - left_idx = self.tree[node_idx].left - right_idx = self.tree[node_idx].right - if (left_idx is not None and self.tree[left_idx].color == 1) or \ - (right_idx is not None and self.tree[right_idx].color == 1): - return True - return False - - def _replace_node(self, node_idx): - if self._is_leaf(node_idx): - return None - elif self._has_one_child(node_idx): - if self.tree[node_idx].left is not None: - child = self.tree[node_idx].left - else: - child = self.tree[node_idx].right - return child - else: - return self._find_predecessor(self.tree[node_idx].left) - - def __walk1_walk_isblack(self, color, node_idx1): - if (node_idx1 is None or self.tree[node_idx1].color == 0) and (color == 0): - return True - return False - - def __left_left_siblingcase(self, node_idx): - left_idx = self.tree[node_idx].left - parent = self._get_parent(node_idx) - parent_color = self.tree[parent].color - self.tree[left_idx].color = self.tree[node_idx].color - self.tree[node_idx].color = parent_color - self._right_rotate(parent, node_idx) - - def __right_left_siblingcase(self, node_idx): - left_idx = self.tree[node_idx].left - parent = self._get_parent(node_idx) - parent_color = self.tree[parent].color - self.tree[left_idx].color = parent_color - self._right_rotate(node_idx, left_idx) - child = self._get_parent(node_idx) - self._left_rotate(parent, child) - - def __left_right_siblingcase(self, node_idx): - right_idx = self.tree[node_idx].right - parent = self._get_parent(node_idx) - parent_color = self.tree[parent].color - self.tree[right_idx].color = parent_color - self._left_rotate(node_idx, right_idx) - child = self._get_parent(node_idx) - self._right_rotate(parent, child) - - def __right_right_siblingcase(self, node_idx): - right_idx = self.tree[node_idx].right - parent = self._get_parent(node_idx) - parent_color = self.tree[parent].color - self.tree[right_idx].color = self.tree[node_idx].color - self.tree[node_idx].color = parent_color - self._left_rotate(parent, node_idx) - - def __fix_deletion(self, node_idx): - node = self.tree[node_idx] - color = node.color - while node_idx!= self.root_idx and color == 0: - sibling_idx = self._get_sibling(node_idx) - parent_idx = self._get_parent(node_idx) - if sibling_idx is None: - node_idx = parent_idx - continue - else: - if self.tree[sibling_idx].color == 1: - self.tree[self.root_idx].is_root = False - self.tree[parent_idx].color = 1 - self.tree[sibling_idx].color = 0 - if self._is_onleft(sibling_idx): - self._right_rotate(parent_idx, sibling_idx) - else: - self._left_rotate(parent_idx, sibling_idx) - self.tree[self.root_idx].is_root = True - continue - else: - if self.__has_red_child(sibling_idx): - self.tree[self.root_idx].is_root = False - left_idx = self.tree[sibling_idx].left - if self.tree[sibling_idx].left is not None and \ - self.tree[left_idx].color == 1: - if self._is_onleft(sibling_idx): - self.__left_left_siblingcase(sibling_idx) - else: - self.__right_left_siblingcase(sibling_idx) - else: - if self._is_onleft(sibling_idx): - self.__left_right_siblingcase(sibling_idx) - else: - self.__right_right_siblingcase(sibling_idx) - self.tree[self.root_idx].is_root = True - self.tree[parent_idx].color = 0 - else: - self.tree[sibling_idx].color = 1 - if self.tree[parent_idx].color == 0: - node_idx = parent_idx - continue - else: - self.tree[parent_idx].color = 0 - color = 1 - - def _remove_node(self, node_idx): - parent = self._get_parent(node_idx) - a = parent - if self._is_leaf(node_idx): - par_key, root_key = (self.tree[parent].key, self.tree[self.root_idx].key) - new_indices = self.tree.delete(node_idx) - if new_indices is not None: - a = new_indices[par_key] - self.root_idx = new_indices[root_key] - elif self._has_one_child(node_idx): - child = self._replace_node(node_idx) - parent = self._get_parent(node_idx) - par_key, root_key = (self.tree[parent].key, self.tree[self.root_idx].key) - new_indices = self.tree.delete(node_idx) - self._update_size(a) - - def _delete_root(self, node_idx, node_idx1): - if self._is_leaf(node_idx): - self.tree[self.root_idx].data = None - self.tree[self.root_idx].key = None - elif self._has_one_child(node_idx): - root_key = self._transplant_values(node_idx, node_idx1) - new_indices = self.tree.delete(node_idx1) - if new_indices is not None: - self.root_idx = new_indices[root_key] - - def __leaf_case(self, node_idx, node_idx1): - walk = node_idx - walk1 = node_idx1 - parent = self._get_parent(node_idx) - color = self.tree[walk].color - if parent is None: - self._delete_root(walk, walk1) - else: - if self.__walk1_walk_isblack(color, walk1): - self.__fix_deletion(walk) - else: - sibling_idx = self._get_sibling(walk) - if sibling_idx is not None: - self.tree[sibling_idx].color = 1 - if self._is_onleft(walk): - self.tree[parent].left = None - else: - self.tree[parent].right = None - self._remove_node(walk) - - def __one_child_case(self, node_idx, node_idx1): - walk = node_idx - walk1 = node_idx1 - walk_original_color = self.tree[walk].color - parent = self._get_parent(node_idx) - if parent is None: - self._delete_root(walk, walk1) - else: - if self._is_onleft(walk): - self.tree[parent].left = walk1 - else: - self.tree[parent].right = walk1 - self.tree[walk1].parent = parent - a = self._remove_node(walk) - if self.__walk1_walk_isblack(walk_original_color, walk1): - self.__fix_deletion(walk1) - else: - self.tree[walk1].color = 0 - - def __two_child_case(self, node_idx): - walk = node_idx - successor = self._replace_node(walk) - self._transplant_values(walk, successor) - walk = successor - walk1 = self._replace_node(walk) - return walk, walk1 - - def delete(self, key, **kwargs): - walk = super(RedBlackTree, self).search(key) - if walk is not None: - walk1 = self._replace_node(walk) - if self._has_two_child(walk): - walk, walk1 = self.__two_child_case(walk) - if self._is_leaf(walk): - self.__leaf_case(walk, walk1) - elif self._has_one_child(walk): - self.__one_child_case(walk, walk1) - return True - else: - return None - -class BinaryTreeTraversal(object): - """ - Represents the traversals possible in - a binary tree. - - Parameters - ========== - - tree: BinaryTree - The binary tree for whose traversal - is to be done. - backend: pydatastructs.Backend - The backend to be used. Available backends: Python and C++ - Optional, by default, the Python backend is used. For faster execution, use the C++ backend. - - Traversals - ========== - - - Depth First Search - In Order, Post Order, Pre Order Out Order - - - Breadth First Search - - Examples - ======== - - >>> from pydatastructs import BinarySearchTree as BST - >>> from pydatastructs import BinaryTreeTraversal as BTT - >>> b = BST(2, 2) - >>> b.insert(1, 1) - >>> b.insert(3, 3) - >>> trav = BTT(b) - >>> dfs = trav.depth_first_search() - >>> [str(n) for n in dfs] - ['(None, 1, 1, None)', '(1, 2, 2, 2)', '(None, 3, 3, None)'] - >>> bfs = trav.breadth_first_search() - >>> [str(n) for n in bfs] - ['(1, 2, 2, 2)', '(None, 1, 1, None)', '(None, 3, 3, None)'] - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Tree_traversal - """ - - @classmethod - def methods(cls): - return ['__new__', 'depth_first_search', - 'breadth_first_search'] - - __slots__ = ['tree'] - - def __new__(cls, tree, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - return _trees.BinaryTreeTraversal(tree, **kwargs) - if not isinstance(tree, BinaryTree): - raise TypeError("%s is not a binary tree"%(tree)) - obj = object.__new__(cls) - obj.tree = tree - return obj - - def _pre_order(self, node): - """ - Utility method for computing pre-order - of a binary tree using iterative algorithm. - """ - visit = [] - tree, size = self.tree.tree, self.tree.size - s = Stack() - s.push(node) - while not s.is_empty: - node = s.pop() - visit.append(tree[node]) - if tree[node].right is not None: - s.push(tree[node].right) - if tree[node].left is not None: - s.push(tree[node].left) - return visit - - def _in_order(self, node): - """ - Utility method for computing in-order - of a binary tree using iterative algorithm. - """ - visit = [] - tree, size = self.tree.tree, self.tree.size - s = Stack() - while not s.is_empty or node is not None: - if node is not None: - s.push(node) - node = tree[node].left - else: - node = s.pop() - visit.append(tree[node]) - node = tree[node].right - return visit - - def _post_order(self, node): - """ - Utility method for computing post-order - of a binary tree using iterative algorithm. - """ - visit = [] - tree, size = self.tree.tree, self.tree.size - s = Stack() - s.push(node) - last = OneDimensionalArray(int, size) - last.fill(False) - while not s.is_empty: - node = s.peek - l, r = tree[node].left, tree[node].right - cl, cr = l is None or last[l], r is None or last[r] - if cl and cr: - s.pop() - visit.append(tree[node]) - last[node] = True - continue - if not cr: - s.push(r) - if not cl: - s.push(l) - return visit - - def _out_order(self, node): - """ - Utility method for computing out-order - of a binary tree using iterative algorithm. - """ - return reversed(self._in_order(node)) - - def depth_first_search(self, order='in_order', node=None): - """ - Computes the depth first search traversal of the binary - trees. - - Parameters - ========== - - order : str - One of the strings, 'in_order', 'post_order', - 'pre_order', 'out_order'. - By default, it is set to, 'in_order'. - node : int - The index of the node from where the traversal - is to be instantiated. - - Returns - ======= - - list - Each element is of type 'TreeNode'. - """ - if node is None: - node = self.tree.root_idx - if order not in ('in_order', 'post_order', 'pre_order', 'out_order'): - raise NotImplementedError( - "%s order is not implemented yet." - "We only support `in_order`, `post_order`, " - "`pre_order` and `out_order` traversals.") - return getattr(self, '_' + order)(node) - - def breadth_first_search(self, node=None, strategy='queue'): - """ - Computes the breadth first search traversal of a binary tree. - - Parameters - ========== - - node : int - The index of the node from where the traversal has to be instantiated. - By default, set to, root index. - - strategy : str - The strategy using which the computation has to happen. - By default, it is set 'queue'. - - Returns - ======= - - list - Each element of the list is of type `TreeNode`. - """ - # TODO: IMPLEMENT ITERATIVE DEEPENING-DEPTH FIRST SEARCH STRATEGY - strategies = ('queue',) - if strategy not in strategies: - raise NotImplementedError( - "%s startegy is not implemented yet"%(strategy)) - if node is None: - node = self.tree.root_idx - q, visit, tree = Queue(), [], self.tree.tree - q.append(node) - while len(q) > 0: - node = q.popleft() - visit.append(tree[node]) - if tree[node].left is not None: - q.append(tree[node].left) - if tree[node].right is not None: - q.append(tree[node].right) - return visit - -class BinaryIndexedTree(object): - """ - Represents binary indexed trees - a.k.a fenwick trees. - - Parameters - ========== - - array: list/tuple - The array whose elements are to be - considered for the queries. - backend: pydatastructs.Backend - The backend to be used. Available backends: Python and C++ - Optional, by default, the Python backend is used. For faster execution, use the C++ backend. - - Examples - ======== - - >>> from pydatastructs import BinaryIndexedTree - >>> bit = BinaryIndexedTree([1, 2, 3]) - >>> bit.get_sum(0, 2) - 6 - >>> bit.update(0, 100) - >>> bit.get_sum(0, 2) - 105 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Fenwick_tree - """ - - __slots__ = ['tree', 'array', 'flag'] - - def __new__(cls, array, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - return _trees.BinaryIndexedTree(type(array[0]), array, **kwargs) - obj = object.__new__(cls) - obj.array = OneDimensionalArray(type(array[0]), array) - obj.tree = [0] * (obj.array._size + 2) - obj.flag = [0] * (obj.array._size) - for index in range(obj.array._size): - obj.update(index, array[index]) - return obj - - @classmethod - def methods(cls): - return ['update', 'get_prefix_sum', - 'get_sum'] - - def update(self, index, value): - """ - Updates value at the given index. - - Parameters - ========== - - index: int - Index of element to be updated. - - value - The value to be inserted. - """ - _index, _value = index, value - if self.flag[index] == 0: - self.flag[index] = 1 - index += 1 - while index < self.array._size + 1: - self.tree[index] += value - index = index + (index & (-index)) - else: - value = value - self.array[index] - index += 1 - while index < self.array._size + 1: - self.tree[index] += value - index = index + (index & (-index)) - self.array[_index] = _value - - def get_prefix_sum(self, index): - """ - Computes sum of elements from index 0 to given index. - - Parameters - ========== - - index: int - Index till which sum has to be calculated. - - Returns - ======= - - sum: int - The required sum. - """ - index += 1 - sum = 0 - while index > 0: - sum += self.tree[index] - index = index - (index & (-index)) - return sum - - def get_sum(self, left_index, right_index): - """ - Get sum of elements from left index to right index. - - Parameters - ========== - - left_index: int - Starting index from where sum has to be computed. - - right_index: int - Ending index till where sum has to be computed. - - Returns - ======= - - sum: int - The required sum - """ - if left_index >= 1: - return self.get_prefix_sum(right_index) - \ - self.get_prefix_sum(left_index - 1) - else: - return self.get_prefix_sum(right_index) diff --git a/lib/python3.12/site-packages/pydatastructs/trees/heaps.py b/lib/python3.12/site-packages/pydatastructs/trees/heaps.py deleted file mode 100644 index 12133a6f1..000000000 --- a/lib/python3.12/site-packages/pydatastructs/trees/heaps.py +++ /dev/null @@ -1,582 +0,0 @@ -from pydatastructs.utils.misc_util import ( - _check_type, TreeNode, BinomialTreeNode, - Backend, raise_if_backend_is_not_python) -from pydatastructs.linear_data_structures.arrays import ( - DynamicOneDimensionalArray, Array) -from pydatastructs.miscellaneous_data_structures.binomial_trees import BinomialTree - -__all__ = [ - 'BinaryHeap', - 'TernaryHeap', - 'DHeap', - 'BinomialHeap' -] - -class Heap(object): - """ - Abstract class for representing heaps. - """ - pass - - -class DHeap(Heap): - """ - Represents D-ary Heap. - - Parameters - ========== - - elements: list, tuple, Array - Optional, by default 'None'. - list/tuple/Array of initial TreeNode in Heap. - heap_property: str - If the key stored in each node is - either greater than or equal to - the keys in the node's children - then pass 'max'. - If the key stored in each node is - either less than or equal to - the keys in the node's children - then pass 'min'. - By default, the heap property is - set to 'min'. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs.trees.heaps import DHeap - >>> min_heap = DHeap(heap_property="min", d=3) - >>> min_heap.insert(1, 1) - >>> min_heap.insert(5, 5) - >>> min_heap.insert(7, 7) - >>> min_heap.extract().key - 1 - >>> min_heap.insert(4, 4) - >>> min_heap.extract().key - 4 - - >>> max_heap = DHeap(heap_property='max', d=2) - >>> max_heap.insert(1, 1) - >>> max_heap.insert(5, 5) - >>> max_heap.insert(7, 7) - >>> max_heap.extract().key - 7 - >>> max_heap.insert(6, 6) - >>> max_heap.extract().key - 6 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/D-ary_heap - """ - __slots__ = ['_comp', 'heap', 'd', 'heap_property', '_last_pos_filled'] - - def __new__(cls, elements=None, heap_property="min", d=4, - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = Heap.__new__(cls) - obj.heap_property = heap_property - obj.d = d - if heap_property == "min": - obj._comp = lambda key_parent, key_child: key_parent <= key_child - elif heap_property == "max": - obj._comp = lambda key_parent, key_child: key_parent >= key_child - else: - raise ValueError("%s is invalid heap property"%(heap_property)) - if elements is None: - elements = DynamicOneDimensionalArray(TreeNode, 0) - elif _check_type(elements, (list,tuple)): - elements = DynamicOneDimensionalArray(TreeNode, len(elements), elements) - elif _check_type(elements, Array): - elements = DynamicOneDimensionalArray(TreeNode, len(elements), elements._data) - else: - raise ValueError(f'Expected a list/tuple/Array of TreeNode got {type(elements)}') - obj.heap = elements - obj._last_pos_filled = obj.heap._last_pos_filled - obj._build() - return obj - - @classmethod - def methods(cls): - return ['__new__', 'insert', 'extract', '__str__', 'is_empty'] - - def _build(self): - for i in range(self._last_pos_filled + 1): - self.heap[i]._leftmost, self.heap[i]._rightmost = \ - self.d*i + 1, self.d*i + self.d - for i in range((self._last_pos_filled + 1)//self.d, -1, -1): - self._heapify(i) - - def _swap(self, idx1, idx2): - idx1_key, idx1_data = \ - self.heap[idx1].key, self.heap[idx1].data - self.heap[idx1].key, self.heap[idx1].data = \ - self.heap[idx2].key, self.heap[idx2].data - self.heap[idx2].key, self.heap[idx2].data = \ - idx1_key, idx1_data - - def _heapify(self, i): - while True: - target = i - l = self.d*i + 1 - r = self.d*i + self.d - - for j in range(l, r+1): - if j <= self._last_pos_filled: - target = j if self._comp(self.heap[j].key, self.heap[target].key) \ - else target - else: - break - - if target != i: - self._swap(target, i) - i = target - else: - break - - def insert(self, key, data=None): - """ - Insert a new element to the heap according to heap property. - - Parameters - ========== - - key - The key for comparison. - data - The data to be inserted. - - Returns - ======= - - None - """ - new_node = TreeNode(key, data) - self.heap.append(new_node) - self._last_pos_filled += 1 - i = self._last_pos_filled - self.heap[i]._leftmost, self.heap[i]._rightmost = self.d*i + 1, self.d*i + self.d - - while True: - parent = (i - 1)//self.d - if i == 0 or self._comp(self.heap[parent].key, self.heap[i].key): - break - else: - self._swap(i, parent) - i = parent - - def extract(self): - """ - Extract root element of the Heap. - - Returns - ======= - - root_element: TreeNode - The TreeNode at the root of the heap, - if the heap is not empty. - - None - If the heap is empty. - """ - if self._last_pos_filled == -1: - raise IndexError("Heap is empty.") - else: - element_to_be_extracted = TreeNode(self.heap[0].key, self.heap[0].data) - self._swap(0, self._last_pos_filled) - self.heap.delete(self._last_pos_filled) - self._last_pos_filled -= 1 - self._heapify(0) - return element_to_be_extracted - - def __str__(self): - to_be_printed = ['' for i in range(self._last_pos_filled + 1)] - for i in range(self._last_pos_filled + 1): - node = self.heap[i] - if node._leftmost <= self._last_pos_filled: - if node._rightmost <= self._last_pos_filled: - children = list(range(node._leftmost, node._rightmost + 1)) - else: - children = list(range(node._leftmost, self._last_pos_filled + 1)) - else: - children = [] - to_be_printed[i] = (node.key, node.data, children) - return str(to_be_printed) - - @property - def is_empty(self): - """ - Checks if the heap is empty. - """ - return self.heap._last_pos_filled == -1 - - -class BinaryHeap(DHeap): - """ - Represents Binary Heap. - - Parameters - ========== - - elements: list, tuple - Optional, by default 'None'. - List/tuple of initial elements in Heap. - heap_property: str - If the key stored in each node is - either greater than or equal to - the keys in the node's children - then pass 'max'. - If the key stored in each node is - either less than or equal to - the keys in the node's children - then pass 'min'. - By default, the heap property is - set to 'min'. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs.trees.heaps import BinaryHeap - >>> min_heap = BinaryHeap(heap_property="min") - >>> min_heap.insert(1, 1) - >>> min_heap.insert(5, 5) - >>> min_heap.insert(7, 7) - >>> min_heap.extract().key - 1 - >>> min_heap.insert(4, 4) - >>> min_heap.extract().key - 4 - - >>> max_heap = BinaryHeap(heap_property='max') - >>> max_heap.insert(1, 1) - >>> max_heap.insert(5, 5) - >>> max_heap.insert(7, 7) - >>> max_heap.extract().key - 7 - >>> max_heap.insert(6, 6) - >>> max_heap.extract().key - 6 - - References - ========== - - .. [1] https://en.m.wikipedia.org/wiki/Binary_heap - """ - def __new__(cls, elements=None, heap_property="min", - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = DHeap.__new__(cls, elements, heap_property, 2) - return obj - - @classmethod - def methods(cls): - return ['__new__'] - - -class TernaryHeap(DHeap): - """ - Represents Ternary Heap. - - Parameters - ========== - - elements: list, tuple - Optional, by default 'None'. - List/tuple of initial elements in Heap. - heap_property: str - If the key stored in each node is - either greater than or equal to - the keys in the node's children - then pass 'max'. - If the key stored in each node is - either less than or equal to - the keys in the node's children - then pass 'min'. - By default, the heap property is - set to 'min'. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs.trees.heaps import TernaryHeap - >>> min_heap = TernaryHeap(heap_property="min") - >>> min_heap.insert(1, 1) - >>> min_heap.insert(5, 5) - >>> min_heap.insert(7, 7) - >>> min_heap.insert(3, 3) - >>> min_heap.extract().key - 1 - >>> min_heap.insert(4, 4) - >>> min_heap.extract().key - 3 - - >>> max_heap = TernaryHeap(heap_property='max') - >>> max_heap.insert(1, 1) - >>> max_heap.insert(5, 5) - >>> max_heap.insert(7, 7) - >>> min_heap.insert(3, 3) - >>> max_heap.extract().key - 7 - >>> max_heap.insert(6, 6) - >>> max_heap.extract().key - 6 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/D-ary_heap - .. [2] https://ece.uwaterloo.ca/~dwharder/aads/Algorithms/d-ary_heaps/Ternary_heaps/ - """ - def __new__(cls, elements=None, heap_property="min", - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = DHeap.__new__(cls, elements, heap_property, 3) - return obj - - @classmethod - def methods(cls): - return ['__new__'] - - -class BinomialHeap(Heap): - """ - Represents binomial heap. - - Parameters - ========== - - root_list: list/tuple/Array - By default, [] - The list of BinomialTree object references - in sorted order. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import BinomialHeap - >>> b = BinomialHeap() - >>> b.insert(1, 1) - >>> b.insert(2, 2) - >>> b.find_minimum().key - 1 - >>> b.find_minimum().children[0].key - 2 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Binomial_heap - """ - __slots__ = ['root_list'] - - def __new__(cls, root_list=None, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - if root_list is None: - root_list = [] - if not all((_check_type(root, BinomialTree)) - for root in root_list): - raise TypeError("The root_list should contain " - "references to objects of BinomialTree.") - obj = Heap.__new__(cls) - obj.root_list = root_list - return obj - - @classmethod - def methods(cls): - return ['__new__', 'merge_tree', 'merge', 'insert', - 'find_minimum', 'is_emtpy', 'decrease_key', 'delete', - 'delete_minimum'] - - def merge_tree(self, tree1, tree2): - """ - Merges two BinomialTree objects. - - Parameters - ========== - - tree1: BinomialTree - - tree2: BinomialTree - """ - if (not _check_type(tree1, BinomialTree)) or \ - (not _check_type(tree2, BinomialTree)): - raise TypeError("Both the trees should be of type " - "BinomalTree.") - ret_value = None - if tree1.root.key <= tree2.root.key: - tree1.add_sub_tree(tree2) - ret_value = tree1 - else: - tree2.add_sub_tree(tree1) - ret_value = tree2 - return ret_value - - def _merge_heap_last_new_tree(self, new_root_list, new_tree): - """ - Merges last tree node in root list with the incoming tree. - """ - pos = -1 - if len(new_root_list) > 0 and new_root_list[pos].order == new_tree.order: - new_root_list[pos] = self.merge_tree(new_root_list[pos], new_tree) - else: - new_root_list.append(new_tree) - - def merge(self, other_heap): - """ - Merges current binomial heap with the given binomial heap. - - Parameters - ========== - - other_heap: BinomialHeap - """ - if not _check_type(other_heap, BinomialHeap): - raise TypeError("Other heap is not of type BinomialHeap.") - new_root_list = [] - i, j = 0, 0 - while (i < len(self.root_list)) and \ - (j < len(other_heap.root_list)): - new_tree = None - while self.root_list[i] is None: - i += 1 - while other_heap.root_list[j] is None: - j += 1 - if self.root_list[i].order == other_heap.root_list[j].order: - new_tree = self.merge_tree(self.root_list[i], - other_heap.root_list[j]) - i += 1 - j += 1 - else: - if self.root_list[i].order < other_heap.root_list[j].order: - new_tree = self.root_list[i] - i += 1 - else: - new_tree = other_heap.root_list[j] - j += 1 - self._merge_heap_last_new_tree(new_root_list, new_tree) - - while i < len(self.root_list): - new_tree = self.root_list[i] - self._merge_heap_last_new_tree(new_root_list, new_tree) - i += 1 - while j < len(other_heap.root_list): - new_tree = other_heap.root_list[j] - self._merge_heap_last_new_tree(new_root_list, new_tree) - j += 1 - self.root_list = new_root_list - - def insert(self, key, data=None): - """ - Inserts new node with the given key and data. - - key - The key of the node which can be operated - upon by relational operators. - - data - The data to be stored in the new node. - """ - new_node = BinomialTreeNode(key, data) - new_tree = BinomialTree(root=new_node, order=0) - new_heap = BinomialHeap(root_list=[new_tree]) - self.merge(new_heap) - - def find_minimum(self, **kwargs): - """ - Finds the node with the minimum key. - - Returns - ======= - - min_node: BinomialTreeNode - """ - if self.is_empty: - raise IndexError("Binomial heap is empty.") - min_node = None - idx, min_idx = 0, None - for tree in self.root_list: - if ((min_node is None) or - (tree is not None and tree.root is not None and - min_node.key > tree.root.key)): - min_node = tree.root - min_idx = idx - idx += 1 - if kwargs.get('get_index', None) is not None: - return min_node, min_idx - return min_node - - def delete_minimum(self): - """ - Deletes the node with minimum key. - """ - min_node, min_idx = self.find_minimum(get_index=True) - child_root_list = [] - for k, child in enumerate(min_node.children): - if child is not None: - child_root_list.append(BinomialTree(root=child, order=k)) - self.root_list.remove(self.root_list[min_idx]) - child_heap = BinomialHeap(root_list=child_root_list) - self.merge(child_heap) - - @property - def is_empty(self): - return not self.root_list - - def decrease_key(self, node, new_key): - """ - Decreases the key of the given node. - - Parameters - ========== - - node: BinomialTreeNode - The node whose key is to be reduced. - new_key - The new key of the given node, - should be less than the current key. - """ - if node.key <= new_key: - raise ValueError("The new key " - "should be less than current node's key.") - node.key = new_key - while ((not node.is_root) and - (node.parent.key > node.key)): - node.parent.key, node.key = \ - node.key, node.parent.key - node.parent.data, node.data = \ - node.data, node.parent.data - node = node.parent - - def delete(self, node): - """ - Deletes the given node. - - Parameters - ========== - - node: BinomialTreeNode - The node which is to be deleted. - """ - self.decrease_key(node, self.find_minimum().key - 1) - self.delete_minimum() diff --git a/lib/python3.12/site-packages/pydatastructs/trees/m_ary_trees.py b/lib/python3.12/site-packages/pydatastructs/trees/m_ary_trees.py deleted file mode 100644 index a06fda9ee..000000000 --- a/lib/python3.12/site-packages/pydatastructs/trees/m_ary_trees.py +++ /dev/null @@ -1,172 +0,0 @@ -from pydatastructs.utils import MAryTreeNode -from pydatastructs.linear_data_structures.arrays import ArrayForTrees -from pydatastructs.utils.misc_util import ( - Backend, raise_if_backend_is_not_python) - -__all__ = [ - 'MAryTree' -] - -class MAryTree(object): - """ - Abstract m-ary tree. - - Parameters - ========== - - key - Required if tree is to be instantiated with - root otherwise not needed. - root_data - Optional, the root node of the binary tree. - If not of type MAryTreeNode, it will consider - root as data and a new root node will - be created. - comp: lambda - Optional, A lambda function which will be used - for comparison of keys. Should return a - bool value. By default it implements less - than operator. - is_order_statistic: bool - Set it to True, if you want to use the - order statistic features of the tree. - max_children - Optional, specifies the maximum number of children - a node can have. Defaults to 2 in case nothing is - specified. - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/M-ary_tree - """ - - __slots__ = ['root_idx', 'max_children', 'comparator', 'tree', 'size', - 'is_order_statistic'] - - - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, max_children=2, - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - if key is None and root_data is not None: - raise ValueError('Key required.') - key = None if root_data is None else key - root = MAryTreeNode(key, root_data) - root.is_root = True - obj.root_idx = 0 - obj.max_children = max_children - obj.tree, obj.size = ArrayForTrees(MAryTreeNode, [root]), 1 - obj.comparator = lambda key1, key2: key1 < key2 \ - if comp is None else comp - obj.is_order_statistic = is_order_statistic - return obj - - @classmethod - def methods(cls): - return ['__new__', '__str__'] - - def insert(self, key, data=None): - """ - Inserts data by the passed key using iterative - algorithm. - - Parameters - ========== - - key - The key for comparison. - data - The data to be inserted. - - Returns - ======= - - None - """ - raise NotImplementedError("This is an abstract method.") - - def delete(self, key, **kwargs): - """ - Deletes the data with the passed key - using iterative algorithm. - - Parameters - ========== - - key - The key of the node which is - to be deleted. - - Returns - ======= - - True - If the node is deleted successfully. - - None - If the node to be deleted doesn't exists. - - Note - ==== - - The node is deleted means that the connection to that - node are removed but the it is still in tree. - """ - raise NotImplementedError("This is an abstract method.") - - def search(self, key, **kwargs): - """ - Searches for the data in the binary search tree - using iterative algorithm. - - Parameters - ========== - - key - The key for searching. - parent: bool - If true then returns index of the - parent of the node with the passed - key. - By default, False - - Returns - ======= - - int - If the node with the passed key is - in the tree. - tuple - The index of the searched node and - the index of the parent of that node. - None - In all other cases. - """ - raise NotImplementedError("This is an abstract method.") - - def to_binary_tree(self): - """ - Converts an m-ary tree to a binary tree. - - Returns - ======= - - TreeNode - The root of the newly created binary tree. - """ - raise NotImplementedError("This is an abstract method.") - - - def __str__(self): - to_be_printed = ['' for i in range(self.tree._last_pos_filled + 1)] - for i in range(self.tree._last_pos_filled + 1): - if self.tree[i] is not None: - node = self.tree[i] - to_be_printed[i] = (node.key, node.data) - for j in node.children: - if j is not None: - to_be_printed[i].append(j) - return str(to_be_printed) diff --git a/lib/python3.12/site-packages/pydatastructs/trees/space_partitioning_trees.py b/lib/python3.12/site-packages/pydatastructs/trees/space_partitioning_trees.py deleted file mode 100644 index f13c1f280..000000000 --- a/lib/python3.12/site-packages/pydatastructs/trees/space_partitioning_trees.py +++ /dev/null @@ -1,242 +0,0 @@ -from pydatastructs.utils import TreeNode -from collections import deque as Queue -from pydatastructs.utils.misc_util import ( - _check_type, Backend, - raise_if_backend_is_not_python) - -__all__ = [ - 'OneDimensionalSegmentTree' -] - -class OneDimensionalSegmentTree(object): - """ - Represents one dimensional segment trees. - - Parameters - ========== - - segs: list/tuple/set - The segs should contains tuples/list/set of size 2 - denoting the start and end points of the intervals. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalSegmentTree as ODST - >>> segt = ODST([(3, 8), (9, 20)]) - >>> segt.build() - >>> segt.tree[0].key - [False, 2, 3, False] - >>> len(segt.query(4)) - 1 - - Note - ==== - - All the segments are assumed to be closed intervals, - i.e., the ends are points of segments are also included in - computation. - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Segment_tree - - """ - - __slots__ = ['segments', 'tree', 'root_idx', 'cache'] - - def __new__(cls, segs, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - if any((not isinstance(seg, (tuple, list, set)) or len(seg) != 2) - for seg in segs): - raise ValueError('%s is invalid set of intervals'%(segs)) - for i in range(len(segs)): - segs[i] = list(segs[i]) - segs[i].sort() - obj.segments = list(segs) - obj.tree, obj.root_idx, obj.cache = [], None, False - return obj - - @classmethod - def methods(cls): - return ['build', 'query', '__str__'] - - def _union(self, i1, i2): - """ - Helper function for taking union of two - intervals. - """ - return TreeNode([i1.key[0], i1.key[1], i2.key[2], i2.key[3]], None) - - def _intersect(self, i1, i2): - """ - Helper function for finding intersection of two - intervals. - """ - if i1 is None or i2 is None: - return False - if i1.key[2] < i2.key[1] or i2.key[2] < i1.key[1]: - return False - c1, c2 = None, None - if i1.key[2] == i2.key[1]: - c1 = (i1.key[3] and i2.key[0]) - if i2.key[2] == i1.key[1]: - c2 = (i2.key[3] and i1.key[0]) - if c1 is False and c2 is False: - return False - return True - - def _contains(self, i1, i2): - """ - Helper function for checking if the first interval - is contained in second interval. - """ - if i1 is None or i2 is None: - return False - if i1.key[1] < i2.key[1] and i1.key[2] > i2.key[2]: - return True - if i1.key[1] == i2.key[1] and i1.key[2] > i2.key[2]: - return (i1.key[0] or not i2.key[0]) - if i1.key[1] < i2.key[1] and i1.key[2] == i2.key[2]: - return i1.key[3] or not i2.key[3] - if i1.key[1] == i2.key[1] and i1.key[2] == i2.key[2]: - return not ((not i1.key[3] and i2.key[3]) or (not i1.key[0] and i2.key[0])) - return False - - def _iterate(self, calls, I, idx): - """ - Helper function for filling the calls - stack. Used for imitating the stack based - approach used in recursion. - """ - if self.tree[idx].right is None: - rc = None - else: - rc = self.tree[self.tree[idx].right] - if self.tree[idx].left is None: - lc = None - else: - lc = self.tree[self.tree[idx].left] - if self._intersect(I, rc): - calls.append(self.tree[idx].right) - if self._intersect(I, lc): - calls.append(self.tree[idx].left) - return calls - - def build(self): - """ - Builds the segment tree from the segments, - using iterative algorithm based on queues. - """ - if self.cache: - return None - endpoints = [] - for segment in self.segments: - endpoints.extend(segment) - endpoints.sort() - - elem_int = Queue() - elem_int.append(TreeNode([False, endpoints[0] - 1, endpoints[0], False], None)) - i = 0 - while i < len(endpoints) - 1: - elem_int.append(TreeNode([True, endpoints[i], endpoints[i], True], None)) - elem_int.append(TreeNode([False, endpoints[i], endpoints[i+1], False], None)) - i += 1 - elem_int.append(TreeNode([True, endpoints[i], endpoints[i], True], None)) - elem_int.append(TreeNode([False, endpoints[i], endpoints[i] + 1, False], None)) - - self.tree = [] - while len(elem_int) > 1: - m = len(elem_int) - while m >= 2: - I1 = elem_int.popleft() - I2 = elem_int.popleft() - I = self._union(I1, I2) - I.left = len(self.tree) - I.right = len(self.tree) + 1 - self.tree.append(I1), self.tree.append(I2) - elem_int.append(I) - m -= 2 - if m & 1 == 1: - Il = elem_int.popleft() - elem_int.append(Il) - - Ir = elem_int.popleft() - Ir.left, Ir.right = -3, -2 - self.tree.append(Ir) - self.root_idx = -1 - - for segment in self.segments: - I = TreeNode([True, segment[0], segment[1], True], None) - calls = [self.root_idx] - while calls: - idx = calls.pop() - if self._contains(I, self.tree[idx]): - if self.tree[idx].data is None: - self.tree[idx].data = [] - self.tree[idx].data.append(I) - continue - calls = self._iterate(calls, I, idx) - self.cache = True - - def query(self, qx, init_node=None): - """ - Queries the segment tree. - - Parameters - ========== - - qx: int/float - The query point - - init_node: int - The index of the node from which the query process - is to be started. - - Returns - ======= - - intervals: set - The set of the intervals which contain the query - point. - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Segment_tree - """ - if not self.cache: - self.build() - if init_node is None: - init_node = self.root_idx - qn = TreeNode([True, qx, qx, True], None) - intervals = [] - calls = [init_node] - while calls: - idx = calls.pop() - if _check_type(self.tree[idx].data, list): - intervals.extend(self.tree[idx].data) - calls = self._iterate(calls, qn, idx) - return set(intervals) - - def __str__(self): - """ - Used for printing. - """ - if not self.cache: - self.build() - str_tree = [] - for seg in self.tree: - if seg.data is None: - data = None - else: - data = [str(sd) for sd in seg.data] - str_tree.append((seg.left, seg.key, data, seg.right)) - return str(str_tree) diff --git a/lib/python3.12/site-packages/pydatastructs/trees/tests/__init__.py b/lib/python3.12/site-packages/pydatastructs/trees/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/lib/python3.12/site-packages/pydatastructs/trees/tests/test_binary_trees.py b/lib/python3.12/site-packages/pydatastructs/trees/tests/test_binary_trees.py deleted file mode 100644 index 826100b78..000000000 --- a/lib/python3.12/site-packages/pydatastructs/trees/tests/test_binary_trees.py +++ /dev/null @@ -1,820 +0,0 @@ -from pydatastructs.trees.binary_trees import ( - BinaryTree, BinarySearchTree, BinaryTreeTraversal, AVLTree, - ArrayForTrees, BinaryIndexedTree, SelfBalancingBinaryTree, SplayTree, CartesianTree, Treap, RedBlackTree) -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils.misc_util import TreeNode -from copy import deepcopy -from pydatastructs.utils.misc_util import Backend -import random -from pydatastructs.utils._backend.cpp import _nodes - -def _test_BinarySearchTree(backend): - BST = BinarySearchTree - b = BST(8, 8, backend=backend) - b.delete(8) - b.insert(8, 8) - b.insert(3, 3) - b.insert(10, 10) - b.insert(1, 1) - b.insert(6, 6) - b.insert(4, 4) - b.insert(7, 7) - b.insert(14, 14) - b.insert(13, 13) - # Explicit check for the __str__ method of Binary Trees Class - assert str(b) == \ - ("[(1, 8, 8, 2), (3, 3, 3, 4), (None, 10, 10, 7), (None, 1, 1, None), " - "(5, 6, 6, 6), (None, 4, 4, None), (None, 7, 7, None), (8, 14, 14, None), " - "(None, 13, 13, None)]") - assert b.root_idx == 0 - - assert b.tree[0].left == 1 - assert b.tree[0].key == 8 - assert b.tree[0].data == 8 - assert b.tree[0].right == 2 - - trav = BinaryTreeTraversal(b, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [1, 3, 4, 6, 7, 8, 10, 13, 14] - assert [node.key for node in pre_order] == [8, 3, 1, 6, 4, 7, 10, 14, 13] - - assert b.search(10) == 2 - assert b.search(-1) is None - assert b.delete(13) is True - assert b.search(13) is None - assert b.delete(10) is True - assert b.search(10) is None - assert b.delete(3) is True - assert b.search(3) is None - assert b.delete(13) is None - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [1, 4, 6, 7, 8, 14] - assert [node.key for node in pre_order] == [8, 4, 1, 6, 7, 14] - - b.delete(7) - b.delete(6) - b.delete(1) - b.delete(4) - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [8, 14] - assert [node.key for node in pre_order] == [8, 14] - - bc = BST(1, 1, backend=backend) - assert bc.insert(1, 2) is None - - b = BST(-8, 8, backend=backend) - b.insert(-3, 3) - b.insert(-10, 10) - b.insert(-1, 1) - b.insert(-6, 6) - b.insert(-4, 4) - b.insert(-7, 7) - b.insert(-14, 14) - b.insert(-13, 13) - - b.delete(-13) - b.delete(-10) - b.delete(-3) - b.delete(-13) - assert str(b) == "[(7, -8, 8, 1), (4, -1, 1, None), '', '', (6, -6, 6, 5), (None, -4, 4, None), (None, -7, 7, None), (None, -14, 14, None)]" - - bl = BST(backend=backend) - nodes = [50, 30, 90, 70, 100, 60, 80, 55, 20, 40, 15, 10, 16, 17, 18] - for node in nodes: - bl.insert(node, node) - - assert bl.lowest_common_ancestor(80, 55, 2) == 70 - assert bl.lowest_common_ancestor(60, 70, 2) == 70 - assert bl.lowest_common_ancestor(18, 18, 2) == 18 - assert bl.lowest_common_ancestor(40, 90, 2) == 50 - - assert bl.lowest_common_ancestor(18, 10, 2) == 15 - assert bl.lowest_common_ancestor(55, 100, 2) == 90 - assert bl.lowest_common_ancestor(16, 80, 2) == 50 - assert bl.lowest_common_ancestor(30, 55, 2) == 50 - - assert raises(ValueError, lambda: bl.lowest_common_ancestor(60, 200, 2)) - assert raises(ValueError, lambda: bl.lowest_common_ancestor(200, 60, 2)) - assert raises(ValueError, lambda: bl.lowest_common_ancestor(-3, 4, 2)) - - assert bl.lowest_common_ancestor(80, 55, 1) == 70 - assert bl.lowest_common_ancestor(60, 70, 1) == 70 - assert bl.lowest_common_ancestor(18, 18, 1) == 18 - assert bl.lowest_common_ancestor(40, 90, 1) == 50 - - assert bl.lowest_common_ancestor(18, 10, 1) == 15 - assert bl.lowest_common_ancestor(55, 100, 1) == 90 - assert bl.lowest_common_ancestor(16, 80, 1) == 50 - assert bl.lowest_common_ancestor(30, 55, 1) == 50 - - assert raises(ValueError, lambda: bl.lowest_common_ancestor(60, 200, 1)) - assert raises(ValueError, lambda: bl.lowest_common_ancestor(200, 60, 1)) - assert raises(ValueError, lambda: bl.lowest_common_ancestor(-3, 4, 1)) - -def test_BinarySearchTree(): - _test_BinarySearchTree(Backend.PYTHON) - -def test_cpp_BinarySearchTree(): - _test_BinarySearchTree(Backend.CPP) - -def _test_BinaryTreeTraversal(backend): - BST = BinarySearchTree - BTT = BinaryTreeTraversal - b = BST('F', 'F', backend=backend) - b.insert('B', 'B') - b.insert('A', 'A') - b.insert('G', 'G') - b.insert('D', 'D') - b.insert('C', 'C') - b.insert('E', 'E') - b.insert('I', 'I') - b.insert('H', 'H') - - trav = BTT(b, backend=backend) - pre = trav.depth_first_search(order='pre_order') - assert [node.key for node in pre] == ['F', 'B', 'A', 'D', 'C', 'E', 'G', 'I', 'H'] - - ino = trav.depth_first_search() - assert [node.key for node in ino] == ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'] - - out = trav.depth_first_search(order='out_order') - assert [node.key for node in out] == ['I', 'H', 'G', 'F', 'E', 'D', 'C', 'B', 'A'] - - post = trav.depth_first_search(order='post_order') - assert [node.key for node in post] == ['A', 'C', 'E', 'D', 'B', 'H', 'I', 'G', 'F'] - - bfs = trav.breadth_first_search() - assert [node.key for node in bfs] == ['F', 'B', 'G', 'A', 'D', 'I', 'C', 'E', 'H'] - - assert raises(NotImplementedError, lambda: trav.breadth_first_search(strategy='iddfs')) - assert raises(NotImplementedError, lambda: trav.depth_first_search(order='in_out_order')) - assert raises(TypeError, lambda: BTT(1)) - -def test_BinaryTreeTraversal(): - _test_BinaryTreeTraversal(Backend.PYTHON) - -def test_cpp_BinaryTreeTraversal(): - _test_BinaryTreeTraversal(Backend.CPP) - -def _test_AVLTree(backend): - a = AVLTree('M', 'M', backend=backend) - a.insert('N', 'N') - a.insert('O', 'O') - a.insert('L', 'L') - a.insert('K', 'K') - a.insert('Q', 'Q') - a.insert('P', 'P') - a.insert('H', 'H') - a.insert('I', 'I') - a.insert('A', 'A') - assert a.root_idx == 1 - - trav = BinaryTreeTraversal(a, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == ['A', 'H', 'I', 'K', 'L', 'M', 'N', 'O', 'P', 'Q'] - assert [node.key for node in pre_order] == ['N', 'I', 'H', 'A', 'L', 'K', 'M', 'P', 'O', 'Q'] - - assert [a.balance_factor(a.tree[i]) for i in range(a.tree.size) if a.tree[i] is not None] == \ - [0, -1, 0, 0, 0, 0, 0, -1, 0, 0] - a1 = AVLTree(1, 1, backend=backend) - a1.insert(2, 2) - a1.insert(3, 3) - a1.insert(4, 4) - a1.insert(5, 5) - - trav = BinaryTreeTraversal(a1, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [1, 2, 3, 4, 5] - assert [node.key for node in pre_order] == [2, 1, 4, 3, 5] - - a3 = AVLTree(-1, 1, backend=backend) - a3.insert(-2, 2) - a3.insert(-3, 3) - a3.insert(-4, 4) - a3.insert(-5, 5) - - trav = BinaryTreeTraversal(a3, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [-5, -4, -3, -2, -1] - assert [node.key for node in pre_order] == [-2, -4, -5, -3, -1] - - a2 = AVLTree(backend=backend) - a2.insert(1, 1) - a2.insert(1, 1) - - trav = BinaryTreeTraversal(a2, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [1] - assert [node.key for node in pre_order] == [1] - - a3 = AVLTree(backend=backend) - a3.set_tree( ArrayForTrees(TreeNode, 0, backend=backend) ) - for i in range(0,7): - a3.tree.append(TreeNode(i, i, backend=backend)) - a3.tree[0].left = 1 - a3.tree[0].right = 6 - a3.tree[1].left = 5 - a3.tree[1].right = 2 - a3.tree[2].left = 3 - a3.tree[2].right = 4 - a3._left_right_rotate(0, 1) - assert str(a3) == "[(4, 0, 0, 6), (5, 1, 1, 3), (1, 2, 2, 0), (None, 3, 3, None), (None, 4, 4, None), (None, 5, 5, None), (None, 6, 6, None)]" - - trav = BinaryTreeTraversal(a3, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [5, 1, 3, 2, 4, 0, 6] - assert [node.key for node in pre_order] == [2, 1, 5, 3, 0, 4, 6] - - a4 = AVLTree(backend=backend) - a4.set_tree( ArrayForTrees(TreeNode, 0, backend=backend) ) - for i in range(0,7): - a4.tree.append(TreeNode(i, i,backend=backend)) - a4.tree[0].left = 1 - a4.tree[0].right = 2 - a4.tree[2].left = 3 - a4.tree[2].right = 4 - a4.tree[3].left = 5 - a4.tree[3].right = 6 - a4._right_left_rotate(0, 2) - - trav = BinaryTreeTraversal(a4, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [1, 0, 5, 3, 6, 2, 4] - assert [node.key for node in pre_order] == [3,0,1,5,2,6,4] - - a5 = AVLTree(is_order_statistic=True,backend=backend) - if backend==Backend.PYTHON: - a5.set_tree( ArrayForTrees(TreeNode, [ - TreeNode(10, 10), - TreeNode(5, 5), - TreeNode(17, 17), - TreeNode(2, 2), - TreeNode(9, 9), - TreeNode(12, 12), - TreeNode(20, 20), - TreeNode(3, 3), - TreeNode(11, 11), - TreeNode(15, 15), - TreeNode(18, 18), - TreeNode(30, 30), - TreeNode(13, 13), - TreeNode(33, 33) - ]) ) - else: - a5.set_tree( ArrayForTrees(_nodes.TreeNode, [ - TreeNode(10, 10,backend=backend), - TreeNode(5, 5,backend=backend), - TreeNode(17, 17,backend=backend), - TreeNode(2, 2,backend=backend), - TreeNode(9, 9,backend=backend), - TreeNode(12, 12,backend=backend), - TreeNode(20, 20,backend=backend), - TreeNode(3, 3,backend=backend), - TreeNode(11, 11,backend=backend), - TreeNode(15, 15,backend=backend), - TreeNode(18, 18,backend=backend), - TreeNode(30, 30,backend=backend), - TreeNode(13, 13,backend=backend), - TreeNode(33, 33,backend=backend) - ],backend=backend) ) - - a5.tree[0].left, a5.tree[0].right, a5.tree[0].parent, a5.tree[0].height = \ - 1, 2, None, 4 - a5.tree[1].left, a5.tree[1].right, a5.tree[1].parent, a5.tree[1].height = \ - 3, 4, 0, 2 - a5.tree[2].left, a5.tree[2].right, a5.tree[2].parent, a5.tree[2].height = \ - 5, 6, 0, 3 - a5.tree[3].left, a5.tree[3].right, a5.tree[3].parent, a5.tree[3].height = \ - None, 7, 1, 1 - a5.tree[4].left, a5.tree[4].right, a5.tree[4].parent, a5.tree[4].height = \ - None, None, 1, 0 - a5.tree[5].left, a5.tree[5].right, a5.tree[5].parent, a5.tree[5].height = \ - 8, 9, 2, 2 - a5.tree[6].left, a5.tree[6].right, a5.tree[6].parent, a5.tree[6].height = \ - 10, 11, 2, 2 - a5.tree[7].left, a5.tree[7].right, a5.tree[7].parent, a5.tree[7].height = \ - None, None, 3, 0 - a5.tree[8].left, a5.tree[8].right, a5.tree[8].parent, a5.tree[8].height = \ - None, None, 5, 0 - a5.tree[9].left, a5.tree[9].right, a5.tree[9].parent, a5.tree[9].height = \ - 12, None, 5, 1 - a5.tree[10].left, a5.tree[10].right, a5.tree[10].parent, a5.tree[10].height = \ - None, None, 6, 0 - a5.tree[11].left, a5.tree[11].right, a5.tree[11].parent, a5.tree[11].height = \ - None, 13, 6, 1 - a5.tree[12].left, a5.tree[12].right, a5.tree[12].parent, a5.tree[12].height = \ - None, None, 9, 0 - a5.tree[13].left, a5.tree[13].right, a5.tree[13].parent, a5.tree[13].height = \ - None, None, 11, 0 - - # testing order statistics - a5.tree[0].size = 14 - a5.tree[1].size = 4 - a5.tree[2].size = 9 - a5.tree[3].size = 2 - a5.tree[4].size = 1 - a5.tree[5].size = 4 - a5.tree[6].size = 4 - a5.tree[7].size = 1 - a5.tree[8].size = 1 - a5.tree[9].size = 2 - a5.tree[10].size = 1 - a5.tree[11].size = 2 - a5.tree[12].size = 1 - a5.tree[13].size = 1 - assert str(a5) == "[(1, 10, 10, 2), (3, 5, 5, 4), (5, 17, 17, 6), (None, 2, 2, 7), (None, 9, 9, None), (8, 12, 12, 9), (10, 20, 20, 11), (None, 3, 3, None), (None, 11, 11, None), (12, 15, 15, None), (None, 18, 18, None), (None, 30, 30, 13), (None, 13, 13, None), (None, 33, 33, None)]" - - assert raises(ValueError, lambda: a5.select(0)) - assert raises(ValueError, lambda: a5.select(15)) - - assert a5.rank(-1) is None - def test_select_rank(expected_output): - if backend==Backend.PYTHON: - output = [] - for i in range(len(expected_output)): - output.append(a5.select(i + 1).key) - assert output == expected_output - output = [] - expected_ranks = [i + 1 for i in range(len(expected_output))] - for i in range(len(expected_output)): - output.append(a5.rank(expected_output[i])) - assert output == expected_ranks - - test_select_rank([2, 3, 5, 9, 10, 11, 12, 13, 15, 17, 18, 20, 30, 33]) - a5.delete(9) - a5.delete(13) - a5.delete(20) - assert str(a5) == "[(7, 10, 10, 5), (None, 5, 5, None), (0, 17, 17, 6), (None, 2, 2, None), '', (8, 12, 12, 9), (10, 30, 30, 13), (3, 3, 3, 1), (None, 11, 11, None), (None, 15, 15, None), (None, 18, 18, None), '', '', (None, 33, 33, None)]" - - trav = BinaryTreeTraversal(a5, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [2, 3, 5, 10, 11, 12, 15, 17, 18, 30, 33] - assert [node.key for node in pre_order] == [17, 10, 3, 2, 5, 12, 11, 15, 30, 18, 33] - - test_select_rank([2, 3, 5, 10, 11, 12, 15, 17, 18, 30, 33]) - a5.delete(10) - a5.delete(17) - assert str(a5) == "[(7, 11, 11, 5), (None, 5, 5, None), (0, 18, 18, 6), (None, 2, 2, None), '', (None, 12, 12, 9), (None, 30, 30, 13), (3, 3, 3, 1), '', (None, 15, 15, None), '', '', '', (None, 33, 33, None)]" - test_select_rank([2, 3, 5, 11, 12, 15, 18, 30, 33]) - a5.delete(11) - a5.delete(30) - test_select_rank([2, 3, 5, 12, 15, 18, 33]) - a5.delete(12) - test_select_rank([2, 3, 5, 15, 18, 33]) - a5.delete(15) - test_select_rank([2, 3, 5, 18, 33]) - a5.delete(18) - test_select_rank([2, 3, 5, 33]) - a5.delete(33) - test_select_rank([2, 3, 5]) - a5.delete(5) - test_select_rank([2, 3]) - a5.delete(3) - test_select_rank([2]) - a5.delete(2) - test_select_rank([]) - assert str(a5) == "[(None, None, None, None)]" - -def test_AVLTree(): - _test_AVLTree(backend=Backend.PYTHON) -def test_cpp_AVLTree(): - _test_AVLTree(backend=Backend.CPP) - -def _test_BinaryIndexedTree(backend): - - FT = BinaryIndexedTree - - t = FT([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], backend=backend) - - assert t.get_sum(0, 2) == 6 - assert t.get_sum(0, 4) == 15 - assert t.get_sum(0, 9) == 55 - t.update(0, 100) - assert t.get_sum(0, 2) == 105 - assert t.get_sum(0, 4) == 114 - assert t.get_sum(1, 9) == 54 - -def test_BinaryIndexedTree(): - _test_BinaryIndexedTree(Backend.PYTHON) - -def test_cpp_BinaryIndexedTree(): - _test_BinaryIndexedTree(Backend.CPP) - -def _test_CartesianTree(backend): - tree = CartesianTree(backend=backend) - tree.insert(3, 1, 3) - tree.insert(1, 6, 1) - tree.insert(0, 9, 0) - tree.insert(5, 11, 5) - tree.insert(4, 14, 4) - tree.insert(9, 17, 9) - tree.insert(7, 22, 7) - tree.insert(6, 42, 6) - tree.insert(8, 49, 8) - tree.insert(2, 99, 2) - # Explicit check for the redefined __str__ method of Cartesian Trees Class - - trav = BinaryTreeTraversal(tree, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] - assert [node.key for node in pre_order] == [3, 1, 0, 2, 5, 4, 9, 7, 6, 8] - - tree.insert(1.5, 4, 1.5) - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [0, 1, 1.5, 2, 3, 4, 5, 6, 7, 8, 9] - assert [node.key for node in pre_order] == [3, 1.5, 1, 0, 2, 5, 4, 9, 7, 6, 8] - - k = tree.search(1.5) - assert tree.tree[tree.tree[k].parent].key == 3 - tree.delete(1.5) - assert tree.root_idx == 0 - tree.tree[tree.tree[tree.root_idx].left].key == 1 - tree.delete(8) - assert tree.search(8) is None - tree.delete(7) - assert tree.search(7) is None - tree.delete(3) - assert tree.search(3) is None - assert tree.delete(18) is None - -def test_CartesianTree(): - _test_CartesianTree(backend=Backend.PYTHON) - -def test_cpp_CartesianTree(): - _test_CartesianTree(backend=Backend.CPP) - -def _test_Treap(backend): - - random.seed(0) - tree = Treap(backend=backend) - tree.insert(7, 7) - tree.insert(2, 2) - tree.insert(3, 3) - tree.insert(4, 4) - tree.insert(5, 5) - - assert isinstance(tree.tree[0].priority, float) - tree.delete(1) - assert tree.search(1) is None - assert tree.search(2) == 1 - assert tree.delete(1) is None - -def test_Treap(): - _test_Treap(Backend.PYTHON) - -def test_cpp_Treap(): - _test_Treap(Backend.CPP) - -def _test_SelfBalancingBinaryTree(backend): - """ - https://github.com/codezonediitj/pydatastructs/issues/234 - """ - tree = SelfBalancingBinaryTree(backend=backend) - tree.insert(5, 5) - tree.insert(5.5, 5.5) - tree.insert(4.5, 4.5) - tree.insert(4.6, 4.6) - tree.insert(4.4, 4.4) - tree.insert(4.55, 4.55) - tree.insert(4.65, 4.65) - original_tree = str(tree) - tree._right_rotate(3, 5) - - assert str(tree) == "[(2, 5, 5, 1), (None, 5.5, 5.5, None), (4, 4.5, 4.5, 5), (None, 4.6, 4.6, 6), (None, 4.4, 4.4, None), (None, 4.55, 4.55, 3), (None, 4.65, 4.65, None)]" - assert tree.tree[3].parent == 5 - assert tree.tree[2].right != 3 - assert tree.tree[tree.tree[5].parent].right == 5 - assert tree.root_idx == 0 - - trav = BinaryTreeTraversal(tree, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [4.4, 4.5, 4.55, 4.6, 4.65, 5, 5.5] - assert [node.key for node in pre_order] == [5, 4.5, 4.4, 4.55, 4.6, 4.65, 5.5] - - assert tree.tree[tree.tree[3].parent].right == 3 - tree._left_rotate(5, 3) - assert str(tree) == original_tree - tree.insert(4.54, 4.54) - tree.insert(4.56, 4.56) - tree._left_rotate(5, 8) - assert tree.tree[tree.tree[8].parent].left == 8 - assert str(tree) == "[(2, 5, 5, 1), (None, 5.5, 5.5, None), (4, 4.5, 4.5, 3), (8, 4.6, 4.6, 6), (None, 4.4, 4.4, None), (7, 4.55, 4.55, None), (None, 4.65, 4.65, None), (None, 4.54, 4.54, None), (5, 4.56, 4.56, None)]" - - tree._left_right_rotate(0, 2) - assert str(tree) == "[(6, 5, 5, 1), (None, 5.5, 5.5, None), (4, 4.5, 4.5, 8), (2, 4.6, 4.6, 0), (None, 4.4, 4.4, None), (7, 4.55, 4.55, None), (None, 4.65, 4.65, None), (None, 4.54, 4.54, None), (5, 4.56, 4.56, None)]" - - tree._right_left_rotate(0, 2) - assert str(tree) == "[(6, 5, 5, None), (None, 5.5, 5.5, None), (None, 4.5, 4.5, 8), (2, 4.6, 4.6, 4), (0, 4.4, 4.4, 2), (7, 4.55, 4.55, None), (None, 4.65, 4.65, None), (None, 4.54, 4.54, None), (5, 4.56, 4.56, None)]" - -def test_SelfBalancingBinaryTree(): - _test_SelfBalancingBinaryTree(Backend.PYTHON) -def test_cpp_SelfBalancingBinaryTree(): - _test_SelfBalancingBinaryTree(Backend.CPP) - -def _test_SplayTree(backend): - t = SplayTree(100, 100, backend=backend) - t.insert(50, 50) - t.insert(200, 200) - t.insert(40, 40) - t.insert(30, 30) - t.insert(20, 20) - t.insert(55, 55) - assert str(t) == "[(None, 100, 100, None), (None, 50, 50, None), (0, 200, 200, None), (None, 40, 40, 1), (5, 30, 30, 3), (None, 20, 20, None), (4, 55, 55, 2)]" - assert t.root_idx == 6 - - trav = BinaryTreeTraversal(t, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [20, 30, 40, 50, 55, 100, 200] - assert [node.key for node in pre_order] == [55, 30, 20, 40, 50, 200, 100] - - t.delete(40) - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [20, 30, 50, 55, 100, 200] - assert [node.key for node in pre_order] == [50, 30, 20, 55, 200, 100] - - t.delete(150) - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [20, 30, 50, 55, 100, 200] - assert [node.key for node in pre_order] == [50, 30, 20, 55, 200, 100] - - t1 = SplayTree(1000, 1000, backend=backend) - t1.insert(2000, 2000) - - trav2 = BinaryTreeTraversal(t1, backend=backend) - in_order = trav2.depth_first_search(order='in_order') - pre_order = trav2.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [1000, 2000] - assert [node.key for node in pre_order] == [2000, 1000] - - t.join(t1) - assert str(t) == "[(None, 100, 100, None), '', (6, 200, 200, 8), (4, 50, 50, None), (5, 30, 30, None), (None, 20, 20, None), (3, 55, 55, 0), (None, 1000, 1000, None), (7, 2000, 2000, None), '']" - - if backend == Backend.PYTHON: - trav3 = BinaryTreeTraversal(t, backend=backend) - in_order = trav3.depth_first_search(order='in_order') - pre_order = trav3.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [20, 30, 50, 55, 100, 200, 1000, 2000] - assert [node.key for node in pre_order] == [200, 55, 50, 30, 20, 100, 2000, 1000] - - s = t.split(200) - assert str(s) == "[(1, 2000, 2000, None), (None, 1000, 1000, None)]" - - trav4 = BinaryTreeTraversal(s, backend=backend) - in_order = trav4.depth_first_search(order='in_order') - pre_order = trav4.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [1000, 2000] - assert [node.key for node in pre_order] == [2000, 1000] - - if backend == Backend.PYTHON: - trav5 = BinaryTreeTraversal(t, backend=backend) - in_order = trav5.depth_first_search(order='in_order') - pre_order = trav5.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [20, 30, 50, 55, 100, 200] - assert [node.key for node in pre_order] == [200, 55, 50, 30, 20, 100] - -def test_SplayTree(): - _test_SplayTree(Backend.PYTHON) - -def test_cpp_SplayTree(): - _test_SplayTree(Backend.CPP) - -def _test_RedBlackTree(backend): - tree = RedBlackTree(backend=backend) - tree.insert(10, 10) - tree.insert(18, 18) - tree.insert(7, 7) - tree.insert(15, 15) - tree.insert(16, 16) - tree.insert(30, 30) - tree.insert(25, 25) - tree.insert(40, 40) - tree.insert(60, 60) - tree.insert(2, 2) - tree.insert(17, 17) - tree.insert(6, 6) - assert str(tree) == "[(11, 10, 10, 3), (10, 18, 18, None), (None, 7, 7, None), (None, 15, 15, None), (0, 16, 16, 6), (None, 30, 30, None), (1, 25, 25, 7), (5, 40, 40, 8), (None, 60, 60, None), (None, 2, 2, None), (None, 17, 17, None), (9, 6, 6, 2)]" - assert tree.root_idx == 4 - - trav = BinaryTreeTraversal(tree, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [2, 6, 7, 10, 15, 16, 17, 18, 25, 30, 40, 60] - assert [node.key for node in pre_order] == [16, 10, 6, 2, 7, 15, 25, 18, 17, 40, 30, 60] - - assert tree.lower_bound(0) == 2 - assert tree.lower_bound(2) == 2 - assert tree.lower_bound(3) == 6 - assert tree.lower_bound(7) == 7 - assert tree.lower_bound(25) == 25 - assert tree.lower_bound(32) == 40 - assert tree.lower_bound(41) == 60 - assert tree.lower_bound(60) == 60 - assert tree.lower_bound(61) is None - - assert tree.upper_bound(0) == 2 - assert tree.upper_bound(2) == 6 - assert tree.upper_bound(3) == 6 - assert tree.upper_bound(7) == 10 - assert tree.upper_bound(25) == 30 - assert tree.upper_bound(32) == 40 - assert tree.upper_bound(41) == 60 - assert tree.upper_bound(60) is None - assert tree.upper_bound(61) is None - - tree = RedBlackTree(backend=backend) - - assert tree.lower_bound(1) is None - assert tree.upper_bound(0) is None - - tree.insert(10) - tree.insert(20) - tree.insert(30) - tree.insert(40) - tree.insert(50) - tree.insert(60) - tree.insert(70) - tree.insert(80) - tree.insert(90) - tree.insert(100) - tree.insert(110) - tree.insert(120) - tree.insert(130) - tree.insert(140) - tree.insert(150) - tree.insert(160) - tree.insert(170) - tree.insert(180) - assert str(tree) == "[(None, 10, None, None), (0, 20, None, 2), (None, 30, None, None), (1, 40, None, 5), (None, 50, None, None), (4, 60, None, 6), (None, 70, None, None), (3, 80, None, 11), (None, 90, None, None), (8, 100, None, 10), (None, 110, None, None), (9, 120, None, 13), (None, 130, None, None), (12, 140, None, 15), (None, 150, None, None), (14, 160, None, 16), (None, 170, None, 17), (None, 180, None, None)]" - - assert tree._get_sibling(7) is None - - trav = BinaryTreeTraversal(tree, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 60, 70, 80, 90, - 100, 110, 120, 130, 140, 150, 160, 170, 180] - assert [node.key for node in pre_order] == [80, 40, 20, 10, 30, 60, 50, 70, 120, 100, - 90, 110, 140, 130, 160, 150, 170, 180] - - tree.delete(180) - tree.delete(130) - tree.delete(110) - tree.delete(190) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, - 120, 140, 150, 160, 170] - assert [node.key for node in pre_order] == [80, 40, 20, 10, 30, 60, 50, 70, 120, 100, - 90, 160, 140, 150, 170] - - tree.delete(170) - tree.delete(100) - tree.delete(60) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 70, 80, 90, 120, 140, 150, 160] - assert [node.key for node in pre_order] == [80, 40, 20, 10, 30, 50, 70, 120, 90, 150, 140, 160] - - tree.delete(70) - tree.delete(140) - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 80, 90, 120, 150, 160] - assert [node.key for node in pre_order] == [80, 40, 20, 10, 30, 50, 120, 90, 150, 160] - - tree.delete(150) - tree.delete(120) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 80, 90, 160] - assert [node.key for node in pre_order] == [40, 20, 10, 30, 80, 50, 90, 160] - - tree.delete(50) - tree.delete(80) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [10, 20, 30, 40, 90, 160] - assert [node.key for node in pre_order] == [40, 20, 10, 30, 90, 160] - - tree.delete(30) - tree.delete(20) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [10, 40, 90, 160] - assert [node.key for node in pre_order] == [40, 10, 90, 160] - - tree.delete(10) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [40, 90, 160] - assert [node.key for node in pre_order] == [90, 40, 160] - - tree.delete(40) - tree.delete(90) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [160] - assert [node.key for node in pre_order] == [160] - - tree.delete(160) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order if node.key is not None] == [] - assert [node.key for node in pre_order if node.key is not None] == [] - - tree = RedBlackTree(backend=backend) - tree.insert(50) - tree.insert(40) - tree.insert(30) - tree.insert(20) - tree.insert(10) - tree.insert(5) - - trav = BinaryTreeTraversal(tree, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [5, 10, 20, 30, 40, 50] - assert [node.key for node in pre_order] == [40, 20, 10, 5, 30, 50] - - assert tree.search(50) == 0 - assert tree.search(20) == 3 - assert tree.search(30) == 2 - tree.delete(50) - tree.delete(20) - tree.delete(30) - assert tree.search(50) is None - assert tree.search(20) is None - assert tree.search(30) is None - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [5, 10, 40] - assert [node.key for node in pre_order] == [10, 5, 40] - - tree = RedBlackTree(backend=backend) - tree.insert(10) - tree.insert(5) - tree.insert(20) - tree.insert(15) - - trav = BinaryTreeTraversal(tree, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [5, 10, 15, 20] - assert [node.key for node in pre_order] == [10, 5, 20, 15] - - tree.delete(5) - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [10, 15, 20] - assert [node.key for node in pre_order] == [15, 10, 20] - - tree = RedBlackTree(backend=backend) - tree.insert(10) - tree.insert(5) - tree.insert(20) - tree.insert(15) - tree.insert(2) - tree.insert(6) - - trav = BinaryTreeTraversal(tree,backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [2, 5, 6, 10, 15, 20] - assert [node.key for node in pre_order] == [10, 5, 2, 6, 20, 15] - - tree.delete(10) - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [2, 5, 6, 15, 20] - assert [node.key for node in pre_order] == [6, 5, 2, 20, 15] - -def test_RedBlackTree(): - _test_RedBlackTree(Backend.PYTHON) - -def test_cpp_RedBlackTree(): - _test_RedBlackTree(Backend.CPP) diff --git a/lib/python3.12/site-packages/pydatastructs/trees/tests/test_heaps.py b/lib/python3.12/site-packages/pydatastructs/trees/tests/test_heaps.py deleted file mode 100644 index dece2f132..000000000 --- a/lib/python3.12/site-packages/pydatastructs/trees/tests/test_heaps.py +++ /dev/null @@ -1,236 +0,0 @@ -from pydatastructs.trees.heaps import BinaryHeap, TernaryHeap, BinomialHeap, DHeap -from pydatastructs.linear_data_structures.arrays import DynamicOneDimensionalArray -from pydatastructs.miscellaneous_data_structures.binomial_trees import BinomialTree -from pydatastructs.utils.misc_util import TreeNode, BinomialTreeNode -from pydatastructs.utils.raises_util import raises -from collections import deque as Queue - -def test_BinaryHeap(): - - max_heap = BinaryHeap(heap_property="max") - - assert raises(IndexError, lambda: max_heap.extract()) - - max_heap.insert(100, 100) - max_heap.insert(19, 19) - max_heap.insert(36, 36) - max_heap.insert(17, 17) - max_heap.insert(3, 3) - max_heap.insert(25, 25) - max_heap.insert(1, 1) - max_heap.insert(2, 2) - max_heap.insert(7, 7) - assert str(max_heap) == \ - ("[(100, 100, [1, 2]), (19, 19, [3, 4]), " - "(36, 36, [5, 6]), (17, 17, [7, 8]), " - "(3, 3, []), (25, 25, []), (1, 1, []), " - "(2, 2, []), (7, 7, [])]") - - assert max_heap.extract().key == 100 - - expected_sorted_elements = [36, 25, 19, 17, 7, 3, 2, 1] - l = max_heap.heap[0].left - l = max_heap.heap[0].right - sorted_elements = [] - for _ in range(8): - sorted_elements.append(max_heap.extract().key) - assert expected_sorted_elements == sorted_elements - - elements = [ - TreeNode(7, 7), TreeNode(25, 25), TreeNode(100, 100), - TreeNode(1, 1), TreeNode(2, 2), TreeNode(3, 3), - TreeNode(17, 17), TreeNode(19, 19), TreeNode(36, 36) - ] - min_heap = BinaryHeap(elements=elements, heap_property="min") - assert min_heap.extract().key == 1 - - expected_sorted_elements = [2, 3, 7, 17, 19, 25, 36, 100] - sorted_elements = [min_heap.extract().key for _ in range(8)] - assert expected_sorted_elements == sorted_elements - - non_TreeNode_elements = [ - (7, 7), TreeNode(25, 25), TreeNode(100, 100), - TreeNode(1, 1), (2, 2), TreeNode(3, 3), - TreeNode(17, 17), TreeNode(19, 19), TreeNode(36, 36) - ] - assert raises(TypeError, lambda: - BinaryHeap(elements = non_TreeNode_elements, heap_property='min')) - - non_TreeNode_elements = DynamicOneDimensionalArray(int, 0) - non_TreeNode_elements.append(1) - non_TreeNode_elements.append(2) - assert raises(TypeError, lambda: - BinaryHeap(elements = non_TreeNode_elements, heap_property='min')) - - non_heapable = "[1, 2, 3]" - assert raises(ValueError, lambda: - BinaryHeap(elements = non_heapable, heap_property='min')) - -def test_TernaryHeap(): - max_heap = TernaryHeap(heap_property="max") - assert raises(IndexError, lambda: max_heap.extract()) - max_heap.insert(100, 100) - max_heap.insert(19, 19) - max_heap.insert(36, 36) - max_heap.insert(17, 17) - max_heap.insert(3, 3) - max_heap.insert(25, 25) - max_heap.insert(1, 1) - max_heap.insert(2, 2) - max_heap.insert(7, 7) - assert str(max_heap) == \ - ('[(100, 100, [1, 2, 3]), (25, 25, [4, 5, 6]), ' - '(36, 36, [7, 8]), (17, 17, []), ' - '(3, 3, []), (19, 19, []), (1, 1, []), ' - '(2, 2, []), (7, 7, [])]') - - assert max_heap.extract().key == 100 - - expected_sorted_elements = [36, 25, 19, 17, 7, 3, 2, 1] - sorted_elements = [] - for _ in range(8): - sorted_elements.append(max_heap.extract().key) - assert expected_sorted_elements == sorted_elements - - elements = [ - TreeNode(7, 7), TreeNode(25, 25), TreeNode(100, 100), - TreeNode(1, 1), TreeNode(2, 2), TreeNode(3, 3), - TreeNode(17, 17), TreeNode(19, 19), TreeNode(36, 36) - ] - min_heap = TernaryHeap(elements=elements, heap_property="min") - expected_extracted_element = min_heap.heap[0].key - assert min_heap.extract().key == expected_extracted_element - - expected_sorted_elements = [2, 3, 7, 17, 19, 25, 36, 100] - sorted_elements = [min_heap.extract().key for _ in range(8)] - assert expected_sorted_elements == sorted_elements - -def test_DHeap(): - assert raises(ValueError, lambda: DHeap(heap_property="none", d=4)) - max_heap = DHeap(heap_property="max", d=5) - assert raises(IndexError, lambda: max_heap.extract()) - max_heap.insert(100, 100) - max_heap.insert(19, 19) - max_heap.insert(36, 36) - max_heap.insert(17, 17) - max_heap.insert(3, 3) - max_heap.insert(25, 25) - max_heap.insert(1, 1) - max_heap = DHeap(max_heap.heap, heap_property="max", d=4) - max_heap.insert(2, 2) - max_heap.insert(7, 7) - assert str(max_heap) == \ - ('[(100, 100, [1, 2, 3, 4]), (25, 25, [5, 6, 7, 8]), ' - '(36, 36, []), (17, 17, []), (3, 3, []), (19, 19, []), ' - '(1, 1, []), (2, 2, []), (7, 7, [])]') - - assert max_heap.extract().key == 100 - - expected_sorted_elements = [36, 25, 19, 17, 7, 3, 2, 1] - sorted_elements = [] - for _ in range(8): - sorted_elements.append(max_heap.extract().key) - assert expected_sorted_elements == sorted_elements - - elements = [ - TreeNode(7, 7), TreeNode(25, 25), TreeNode(100, 100), - TreeNode(1, 1), TreeNode(2, 2), TreeNode(3, 3), - TreeNode(17, 17), TreeNode(19, 19), TreeNode(36, 36) - ] - min_heap = DHeap(elements=DynamicOneDimensionalArray(TreeNode, 9, elements), heap_property="min") - assert min_heap.extract().key == 1 - - expected_sorted_elements = [2, 3, 7, 17, 19, 25, 36, 100] - sorted_elements = [min_heap.extract().key for _ in range(8)] - assert expected_sorted_elements == sorted_elements - -def test_BinomialHeap(): - - # Corner cases - assert raises(TypeError, lambda: - BinomialHeap( - root_list=[BinomialTreeNode(1, 1), None]) - ) is True - tree1 = BinomialTree(BinomialTreeNode(1, 1), 0) - tree2 = BinomialTree(BinomialTreeNode(2, 2), 0) - bh = BinomialHeap(root_list=[tree1, tree2]) - assert raises(TypeError, lambda: - bh.merge_tree(BinomialTreeNode(2, 2), None)) - assert raises(TypeError, lambda: - bh.merge(None)) - - # Testing BinomialHeap.merge - nodes = [BinomialTreeNode(1, 1), # 0 - BinomialTreeNode(3, 3), # 1 - BinomialTreeNode(9, 9), # 2 - BinomialTreeNode(11, 11), # 3 - BinomialTreeNode(6, 6), # 4 - BinomialTreeNode(14, 14), # 5 - BinomialTreeNode(2, 2), # 6 - BinomialTreeNode(7, 7), # 7 - BinomialTreeNode(4, 4), # 8 - BinomialTreeNode(8, 8), # 9 - BinomialTreeNode(12, 12), # 10 - BinomialTreeNode(10, 10), # 11 - BinomialTreeNode(5, 5), # 12 - BinomialTreeNode(21, 21)] # 13 - - nodes[2].add_children(nodes[3]) - nodes[4].add_children(nodes[5]) - nodes[6].add_children(nodes[9], nodes[8], nodes[7]) - nodes[7].add_children(nodes[11], nodes[10]) - nodes[8].add_children(nodes[12]) - nodes[10].add_children(nodes[13]) - - tree11 = BinomialTree(nodes[0], 0) - tree12 = BinomialTree(nodes[2], 1) - tree13 = BinomialTree(nodes[6], 3) - tree21 = BinomialTree(nodes[1], 0) - - heap1 = BinomialHeap(root_list=[tree11, tree12, tree13]) - heap2 = BinomialHeap(root_list=[tree21]) - - def bfs(heap): - bfs_trav = [] - for i in range(len(heap.root_list)): - layer = [] - bfs_q = Queue() - bfs_q.append(heap.root_list[i].root) - while len(bfs_q) != 0: - curr_node = bfs_q.popleft() - if curr_node is not None: - layer.append(curr_node.key) - for _i in range(curr_node.children._last_pos_filled + 1): - bfs_q.append(curr_node.children[_i]) - if layer != []: - bfs_trav.append(layer) - return bfs_trav - - heap1.merge(heap2) - expected_bfs_trav = [[1, 3, 9, 11], [2, 8, 4, 7, 5, 10, 12, 21]] - assert bfs(heap1) == expected_bfs_trav - - # Testing Binomial.find_minimum - assert heap1.find_minimum().key == 1 - - # Testing Binomial.delete_minimum - heap1.delete_minimum() - assert bfs(heap1) == [[3], [9, 11], [2, 8, 4, 7, 5, 10, 12, 21]] - assert raises(ValueError, lambda: heap1.decrease_key(nodes[3], 15)) - heap1.decrease_key(nodes[3], 0) - assert bfs(heap1) == [[3], [0, 9], [2, 8, 4, 7, 5, 10, 12, 21]] - heap1.delete(nodes[12]) - assert bfs(heap1) == [[3, 8], [0, 9, 2, 7, 4, 10, 12, 21]] - - # Testing BinomialHeap.insert - heap = BinomialHeap() - assert raises(IndexError, lambda: heap.find_minimum()) - heap.insert(1, 1) - heap.insert(3, 3) - heap.insert(6, 6) - heap.insert(9, 9) - heap.insert(14, 14) - heap.insert(11, 11) - heap.insert(2, 2) - heap.insert(7, 7) - assert bfs(heap) == [[1, 3, 6, 2, 9, 7, 11, 14]] diff --git a/lib/python3.12/site-packages/pydatastructs/trees/tests/test_m_ary_trees.py b/lib/python3.12/site-packages/pydatastructs/trees/tests/test_m_ary_trees.py deleted file mode 100644 index 6cbc84ace..000000000 --- a/lib/python3.12/site-packages/pydatastructs/trees/tests/test_m_ary_trees.py +++ /dev/null @@ -1,5 +0,0 @@ -from pydatastructs import MAryTree - -def test_MAryTree(): - m = MAryTree(1, 1) - assert str(m) == '[(1, 1)]' diff --git a/lib/python3.12/site-packages/pydatastructs/trees/tests/test_space_partitioning_tree.py b/lib/python3.12/site-packages/pydatastructs/trees/tests/test_space_partitioning_tree.py deleted file mode 100644 index 99f0e84cc..000000000 --- a/lib/python3.12/site-packages/pydatastructs/trees/tests/test_space_partitioning_tree.py +++ /dev/null @@ -1,20 +0,0 @@ -from pydatastructs import OneDimensionalSegmentTree -from pydatastructs.utils.raises_util import raises - -def test_OneDimensionalSegmentTree(): - ODST = OneDimensionalSegmentTree - segt = ODST([(0, 5), (1, 6), (9, 13), (1, 2), (3, 8), (9, 20)]) - assert segt.cache is False - segt2 = ODST([(1, 4)]) - assert str(segt2) == ("[(None, [False, 0, 1, False], None, None), " - "(None, [True, 1, 1, True], ['(None, [True, 1, 4, True], None, None)'], " - "None), (None, [False, 1, 4, False], None, None), (None, [True, 4, 4, True], " - "None, None), (0, [False, 0, 1, True], None, 1), (2, [False, 1, 4, True], " - "['(None, [True, 1, 4, True], None, None)'], 3), (4, [False, 0, 4, True], " - "None, 5), (None, [False, 4, 5, False], None, None), (-3, [False, 0, 5, " - "False], None, -2)]") - assert len(segt.query(1.5)) == 3 - assert segt.cache is True - assert len(segt.query(-1)) == 0 - assert len(segt.query(2.8)) == 2 - assert raises(ValueError, lambda: ODST([(1, 2, 3)])) diff --git a/lib/python3.12/site-packages/pydatastructs/utils/__init__.py b/lib/python3.12/site-packages/pydatastructs/utils/__init__.py deleted file mode 100644 index c4971be32..000000000 --- a/lib/python3.12/site-packages/pydatastructs/utils/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -__all__ = [] - -from . import ( - misc_util, - testing_util, -) - -from .misc_util import ( - TreeNode, - MAryTreeNode, - LinkedListNode, - BinomialTreeNode, - AdjacencyListGraphNode, - AdjacencyMatrixGraphNode, - GraphEdge, - Set, - CartesianTreeNode, - RedBlackTreeNode, - TrieNode, - SkipNode, - summation, - greatest_common_divisor, - minimum, - Backend -) -from .testing_util import test - -__all__.extend(misc_util.__all__) -__all__.extend(testing_util.__all__) diff --git a/lib/python3.12/site-packages/pydatastructs/utils/_backend/__init__.py b/lib/python3.12/site-packages/pydatastructs/utils/_backend/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/lib/python3.12/site-packages/pydatastructs/utils/misc_util.py b/lib/python3.12/site-packages/pydatastructs/utils/misc_util.py deleted file mode 100644 index 3672c58b9..000000000 --- a/lib/python3.12/site-packages/pydatastructs/utils/misc_util.py +++ /dev/null @@ -1,632 +0,0 @@ -import math, pydatastructs -from enum import Enum -from pydatastructs.utils._backend.cpp import _nodes, _graph_utils - -__all__ = [ - 'TreeNode', - 'MAryTreeNode', - 'LinkedListNode', - 'BinomialTreeNode', - 'AdjacencyListGraphNode', - 'AdjacencyMatrixGraphNode', - 'GraphEdge', - 'Set', - 'CartesianTreeNode', - 'RedBlackTreeNode', - 'TrieNode', - 'SkipNode', - 'minimum', - 'summation', - 'greatest_common_divisor', - 'Backend' -] - - -class Backend(Enum): - - PYTHON = 'Python' - CPP = 'Cpp' - LLVM = 'Llvm' - - def __str__(self): - return self.value - -def raise_if_backend_is_not_python(api, backend): - if backend != Backend.PYTHON: - raise ValueError("As of {} version, only {} backend is supported for {} API".format( - pydatastructs.__version__, str(Backend.PYTHON), api)) - -_check_type = lambda a, t: isinstance(a, t) -NoneType = type(None) - -class Node(object): - """ - Abstract class representing a node. - """ - pass - -class TreeNode(Node): - """ - Represents node in trees. - - Parameters - ========== - - key - Required for comparison operations. - data - Any valid data to be stored in the node. - left: int - Optional, index of the left child node. - right: int - Optional, index of the right child node. - backend: pydatastructs.Backend - The backend to be used. Available backends: Python and C++ - Optional, by default, the Python backend is used. For faster execution, use the C++ backend. - """ - - __slots__ = ['key', 'data', 'left', 'right', 'is_root', - 'height', 'parent', 'size'] - - @classmethod - def methods(cls): - return ['__new__', '__str__'] - - def __new__(cls, key, data=None, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - return _nodes.TreeNode(key, data, **kwargs) - obj = Node.__new__(cls) - obj.data, obj.key = data, key - obj.left, obj.right, obj.parent, obj.height, obj.size = \ - None, None, None, 0, 1 - obj.is_root = False - return obj - - def __str__(self): - """ - Used for printing. - """ - return str((self.left, self.key, self.data, self.right)) - -class CartesianTreeNode(TreeNode): - """ - Represents node in cartesian trees. - - Parameters - ========== - - key - Required for comparison operations. - data - Any valid data to be stored in the node. - priority: int - An integer value for heap property. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - __slots__ = ['key', 'data', 'priority'] - - def __new__(cls, key, priority, data=None, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = TreeNode.__new__(cls, key, data) - obj.priority = priority - return obj - - def __str__(self): - """ - Used for printing. - """ - return str((self.left, self.key, self.priority, self.data, self.right)) - -class RedBlackTreeNode(TreeNode): - """ - Represents node in red-black trees. - - Parameters - ========== - - key - Required for comparison operations. - data - Any valid data to be stored in the node. - color - 0 for black and 1 for red. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - __slots__ = ['key', 'data', 'color'] - - @classmethod - def methods(cls): - return ['__new__'] - - def __new__(cls, key, data=None, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = TreeNode.__new__(cls, key, data) - obj.color = 1 - return obj - -class BinomialTreeNode(TreeNode): - """ - Represents node in binomial trees. - - Parameters - ========== - - key - Required for comparison operations. - data - Any valid data to be stored in the node. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Note - ==== - - The following are the data members of the class: - - parent: BinomialTreeNode - A reference to the BinomialTreeNode object - which is a prent of this. - children: DynamicOneDimensionalArray - An array of references to BinomialTreeNode objects - which are children this node. - is_root: bool, by default, False - If the current node is a root of the tree then - set it to True otherwise False. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - __slots__ = ['parent', 'key', 'children', 'data', 'is_root'] - - @classmethod - def methods(cls): - return ['__new__', 'add_children', '__str__'] - - def __new__(cls, key, data=None, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - from pydatastructs.linear_data_structures.arrays import DynamicOneDimensionalArray - obj = Node.__new__(cls) - obj.data, obj.key = data, key - obj.children, obj.parent, obj.is_root = ( - DynamicOneDimensionalArray(BinomialTreeNode, 0), - None, - False - ) - return obj - - def add_children(self, *children): - """ - Adds children of current node. - """ - for child in children: - self.children.append(child) - child.parent = self - - def __str__(self): - """ - For printing the key and data. - """ - return str((self.key, self.data)) - -class MAryTreeNode(TreeNode): - """ - Represents node in an M-ary trees. - - Parameters - ========== - - key - Required for comparison operations. - data - Any valid data to be stored in the node. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Note - ==== - - The following are the data members of the class: - - children: DynamicOneDimensionalArray - An array of indices which stores the children of - this node in the M-ary tree array - is_root: bool, by default, False - If the current node is a root of the tree then - set it to True otherwise False. - """ - __slots__ = ['key', 'children', 'data', 'is_root'] - - @classmethod - def methods(cls): - return ['__new__', 'add_children', '__str__'] - - def __new__(cls, key, data=None, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - from pydatastructs.linear_data_structures.arrays import DynamicOneDimensionalArray - obj = Node.__new__(cls) - obj.data = data - obj.key = key - obj.is_root = False - obj.children = DynamicOneDimensionalArray(int, 0) - return obj - - def add_children(self, *children): - """ - Adds children of current node. - """ - for child in children: - self.children.append(child) - - def __str__(self): - return str((self.key, self.data)) - - -class LinkedListNode(Node): - """ - Represents node in linked lists. - - Parameters - ========== - - key - Any valid identifier to uniquely - identify the node in the linked list. - data - Any valid data to be stored in the node. - links - List of names of attributes which should - be used as links to other nodes. - addrs - List of address of nodes to be assigned to - each of the attributes in links. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - @classmethod - def methods(cls): - return ['__new__', '__str__'] - - def __new__(cls, key, data=None, links=None, addrs=None, - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - if links is None: - links = ['next'] - if addrs is None: - addrs = [None] - obj = Node.__new__(cls) - obj.key = key - obj.data = data - for link, addr in zip(links, addrs): - obj.__setattr__(link, addr) - obj.__slots__ = ['key', 'data'] + links - return obj - - def __str__(self): - return str((self.key, self.data)) - -class SkipNode(Node): - """ - Represents node in linked lists. - - Parameters - ========== - - key - Any valid identifier to uniquely - identify the node in the skip list. - data - Any valid data to be stored in the node. - next - Reference to the node lying just forward - to the current node. - Optional, by default, None. - down - Reference to the node lying just below the - current node. - Optional, by default, None. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - - __slots__ = ['key', 'data', 'next', 'down'] - - def __new__(cls, key, data=None, next=None, down=None, - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = Node.__new__(cls) - obj.key, obj.data = key, data - obj.next, obj.down = next, down - return obj - - def __str__(self): - return str((self.key, self.data)) - -class GraphNode(Node): - """ - Abastract class for graph nodes/vertices. - """ - def __str__(self): - return str((self.name, self.data)) - -class AdjacencyListGraphNode(GraphNode): - """ - Represents nodes for adjacency list implementation - of graphs. - - Parameters - ========== - - name: str - The name of the node by which it is identified - in the graph. Must be unique. - data - The data to be stored at each graph node. - adjacency_list: list - Any valid iterator to initialize the adjacent - nodes of the current node. - Optional, by default, None - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - @classmethod - def methods(cls): - return ['__new__', 'add_adjacent_node', - 'remove_adjacent_node'] - - def __new__(cls, name, data=None, adjacency_list=[], - **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.PYTHON: - obj = GraphNode.__new__(cls) - obj.name, obj.data = str(name), data - obj._impl = 'adjacency_list' - if len(adjacency_list) > 0: - for node in adjacency_list: - obj.__setattr__(node.name, node) - obj.adjacent = adjacency_list if len(adjacency_list) > 0 \ - else [] - return obj - else: - return _graph_utils.AdjacencyListGraphNode(name, data, adjacency_list) - - def add_adjacent_node(self, name, data=None): - """ - Adds adjacent node to the current node's - adjacency list with given name and data. - """ - if hasattr(self, name): - getattr(self, name).data = data - else: - new_node = AdjacencyListGraphNode(name, data) - self.__setattr__(new_node.name, new_node) - self.adjacent.append(new_node.name) - - def remove_adjacent_node(self, name): - """ - Removes node with given name from - adjacency list. - """ - if not hasattr(self, name): - raise ValueError("%s is not adjacent to %s"%(name, self.name)) - self.adjacent.remove(name) - delattr(self, name) - -class AdjacencyMatrixGraphNode(GraphNode): - """ - Represents nodes for adjacency matrix implementation - of graphs. - - Parameters - ========== - - name: str - The index of the node in the AdjacencyMatrix. - data - The data to be stored at each graph node. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - __slots__ = ['name', 'data'] - - @classmethod - def methods(cls): - return ['__new__'] - - def __new__(cls, name, data=None, - **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.PYTHON: - obj = GraphNode.__new__(cls) - obj.name, obj.data, obj.is_connected = \ - str(name), data, None - obj._impl = 'adjacency_matrix' - return obj - else: - return _graph_utils.AdjacencyMatrixGraphNode(str(name), data) - -class GraphEdge(object): - """ - Represents the concept of edges in graphs. - - Parameters - ========== - - node1: GraphNode or it's child classes - The source node of the edge. - node2: GraphNode or it's child classes - The target node of the edge. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - @classmethod - def methods(cls): - return ['__new__', '__str__'] - - def __new__(cls, node1, node2, value=None, - **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.PYTHON: - obj = object.__new__(cls) - obj.source, obj.target = node1, node2 - obj.value = value - return obj - else: - return _graph_utils.GraphEdge(node1, node2, value) - - def __str__(self): - return str((self.source.name, self.target.name)) - -class Set(object): - """ - Represents a set in a forest of disjoint sets. - - Parameters - ========== - - key: Hashable python object - The key which uniquely identifies - the set. - data: Python object - The data to be stored in the set. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - - __slots__ = ['parent', 'size', 'key', 'data'] - - @classmethod - def methods(cls): - return ['__new__'] - - def __new__(cls, key, data=None, - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.key = key - obj.data = data - obj.parent, obj.size = [None]*2 - return obj - -class TrieNode(Node): - """ - Represents nodes in the trie data structure. - - Parameters - ========== - - char: The character stored in the current node. - Optional, by default None. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - - __slots__ = ['char', '_children', 'is_terminal'] - - @classmethod - def methods(cls): - return ['__new__', 'add_child', 'get_child', 'remove_child'] - - def __new__(cls, char=None, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = Node.__new__(cls) - obj.char = char - obj._children = {} - obj.is_terminal = False - return obj - - def add_child(self, trie_node) -> None: - self._children[trie_node.char] = trie_node - - def get_child(self, char: str): - return self._children.get(char, None) - - def remove_child(self, char: str) -> None: - self._children.pop(char) - -def _comp(u, v, tcomp): - """ - Overloaded comparator for comparing - two values where any one of them can be - `None`. - """ - if u is None and v is not None: - return False - elif u is not None and v is None: - return True - elif u is None and v is None: - return False - else: - return tcomp(u, v) - -def _check_range_query_inputs(input, bounds): - start, end = input - if start >= end: - raise ValueError("Input (%d, %d) range is empty."%(start, end)) - if start < bounds[0] or end > bounds[1]: - raise IndexError("Input (%d, %d) range is out of " - "bounds of array indices (%d, %d)." - %(start, end, bounds[0], bounds[1])) - -def minimum(x_y): - if len(x_y) == 1: - return x_y[0] - - x, y = x_y - if x is None or y is None: - return x if y is None else y - - return min(x, y) - -def greatest_common_divisor(x_y): - if len(x_y) == 1: - return x_y[0] - - x, y = x_y - if x is None or y is None: - return x if y is None else y - - return math.gcd(x, y) - -def summation(x_y): - if len(x_y) == 1: - return x_y[0] - - x, y = x_y - if x is None or y is None: - return x if y is None else y - - return x + y diff --git a/lib/python3.12/site-packages/pydatastructs/utils/raises_util.py b/lib/python3.12/site-packages/pydatastructs/utils/raises_util.py deleted file mode 100644 index 3a324d38d..000000000 --- a/lib/python3.12/site-packages/pydatastructs/utils/raises_util.py +++ /dev/null @@ -1,17 +0,0 @@ -import pytest - -def raises(exception, code): - """ - Utility for testing exceptions. - - Parameters - ========== - - exception - A valid python exception - code: lambda - Code that causes exception - """ - with pytest.raises(exception): - code() - return True diff --git a/lib/python3.12/site-packages/pydatastructs/utils/testing_util.py b/lib/python3.12/site-packages/pydatastructs/utils/testing_util.py deleted file mode 100644 index e5c0627b5..000000000 --- a/lib/python3.12/site-packages/pydatastructs/utils/testing_util.py +++ /dev/null @@ -1,83 +0,0 @@ -import os -import pathlib -import glob -import types - -__all__ = ['test'] - - -# Root pydatastructs directory -ROOT_DIR = pathlib.Path(os.path.abspath(__file__)).parents[1] - - -SKIP_FILES = ['testing_util.py'] - -def test(submodules=None, only_benchmarks=False, - benchmarks_size=1000, **kwargs): - """ - Runs the library tests using pytest - - Parameters - ========== - - submodules: Optional, list[str] - List of submodules test to run. By default runs - all the tests - """ - try: - import pytest - except ImportError: - raise Exception("pytest must be installed. Use `pip install pytest` " - "to install it.") - - # set benchmarks size - os.environ["PYDATASTRUCTS_BENCHMARK_SIZE"] = str(benchmarks_size) - test_files = [] - if submodules: - if not isinstance(submodules, (list, tuple)): - submodules = [submodules] - for path in glob.glob(f'{ROOT_DIR}/**/test_*.py', recursive=True): - skip_test = False - for skip in SKIP_FILES: - if skip in path: - skip_test = True - break - if skip_test: - continue - for sub_var in submodules: - if isinstance(sub_var, types.ModuleType): - sub = sub_var.__name__.split('.')[-1] - elif isinstance(sub_var, str): - sub = sub_var - else: - raise Exception("Submodule should be of type: str or module") - if sub in path: - if not only_benchmarks: - if 'benchmarks' not in path: - test_files.append(path) - else: - if 'benchmarks' in path: - test_files.append(path) - break - else: - for path in glob.glob(f'{ROOT_DIR}/**/test_*.py', recursive=True): - skip_test = False - for skip in SKIP_FILES: - if skip in path: - skip_test = True - break - if skip_test: - continue - if not only_benchmarks: - if 'benchmarks' not in path: - test_files.append(path) - else: - if 'benchmarks' in path: - test_files.append(path) - - extra_args = [] - if kwargs.get("n", False) is not False: - extra_args.append("-n") - extra_args.append(str(kwargs["n"])) - - pytest.main(extra_args + test_files) diff --git a/lib/python3.12/site-packages/pydatastructs/utils/tests/__init__.py b/lib/python3.12/site-packages/pydatastructs/utils/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/lib/python3.12/site-packages/pydatastructs/utils/tests/test_code_quality.py b/lib/python3.12/site-packages/pydatastructs/utils/tests/test_code_quality.py deleted file mode 100644 index 67afe49e8..000000000 --- a/lib/python3.12/site-packages/pydatastructs/utils/tests/test_code_quality.py +++ /dev/null @@ -1,239 +0,0 @@ -import os, re, sys, pydatastructs, inspect -from typing import Type -import pytest - -def _list_files(checker): - root_path = os.path.abspath( - os.path.join( - os.path.split(__file__)[0], - os.pardir, os.pardir)) - code_files = [] - for (dirpath, _, filenames) in os.walk(root_path): - for _file in filenames: - if checker(_file): - code_files.append(os.path.join(dirpath, _file)) - return code_files - -checker = lambda _file: (re.match(r".*\.py$", _file) or - re.match(r".*\.cpp$", _file) or - re.match(r".*\.hpp$", _file)) -code_files = _list_files(checker) - -def test_trailing_white_spaces(): - messages = [("The following places in your code " - "end with white spaces.")] - msg = "{}:{}" - for file_path in code_files: - file = open(file_path, "r") - line = file.readline() - line_number = 1 - while line != "": - if line.endswith(" \n") or line.endswith("\t\n") \ - or line.endswith(" ") or line.endswith("\t"): - messages.append(msg.format(file_path, line_number)) - line = file.readline() - line_number += 1 - file.close() - - if len(messages) > 1: - assert False, '\n'.join(messages) - -def test_final_new_lines(): - messages = [("The following files in your code " - "do not end with a single new line.")] - msg1 = "No new line in {}:{}" - msg2 = "More than one new line in {}:{}" - for file_path in code_files: - file = open(file_path, "r") - lines = [] - line = file.readline() - while line != "": - lines.append(line) - line = file.readline() - if lines: - if lines[-1][-1] != "\n": - messages.append(msg1.format(file_path, len(lines))) - if lines[-1] == "\n" and lines[-2][-1] == "\n": - messages.append(msg2.format(file_path, len(lines))) - file.close() - - if len(messages) > 1: - assert False, '\n'.join(messages) - -def test_comparison_True_False_None(): - messages = [("The following places in your code " - "use `!=` or `==` for comparing True/False/None." - "Please use `is` instead.")] - msg = "{}:{}" - checker = lambda _file: re.match(r".*\.py$", _file) - py_files = _list_files(checker) - for file_path in py_files: - if file_path.find("test_code_quality.py") == -1: - file = open(file_path, "r") - line = file.readline() - line_number = 1 - while line != "": - if ((line.find("== True") != -1) or - (line.find("== False") != -1) or - (line.find("== None") != -1) or - (line.find("!= True") != -1) or - (line.find("!= False") != -1) or - (line.find("!= None") != -1)): - messages.append(msg.format(file_path, line_number)) - line = file.readline() - line_number += 1 - file.close() - - if len(messages) > 1: - assert False, '\n'.join(messages) - -@pytest.mark.xfail -def test_reinterpret_cast(): - - def is_variable(str): - for ch in str: - if not (ch == '_' or ch.isalnum()): - return False - return True - - checker = lambda _file: (re.match(r".*\.cpp$", _file) or - re.match(r".*\.hpp$", _file)) - cpp_files = _list_files(checker) - messages = [("The following lines should use reinterpret_cast" - " to cast pointers from one type to another")] - msg = "Casting to {} at {}:{}" - for file_path in cpp_files: - file = open(file_path, "r") - line = file.readline() - line_number = 1 - while line != "": - found_open = False - between_open_close = "" - for char in line: - if char == '(': - found_open = True - elif char == ')': - if (between_open_close and - between_open_close[-1] == '*' and - is_variable(between_open_close[:-1])): - messages.append(msg.format(between_open_close[:-1], - file_path, line_number)) - between_open_close = "" - found_open = False - elif char != ' ' and found_open: - between_open_close += char - line = file.readline() - line_number += 1 - file.close() - - if len(messages) > 1: - assert False, '\n'.join(messages) - -def test_presence_of_tabs(): - messages = [("The following places in your code " - "use tabs instead of spaces.")] - msg = "{}:{}" - for file_path in code_files: - file = open(file_path, "r") - line_number = 1 - line = file.readline() - while line != "": - if (line.find('\t') != -1): - messages.append(msg.format(file_path, line_number)) - line = file.readline() - line_number += 1 - file.close() - - if len(messages) > 1: - assert False, '\n'.join(messages) - -def _apis(): - import pydatastructs as pyds - return [ - pyds.graphs.adjacency_list.AdjacencyList, - pyds.graphs.adjacency_matrix.AdjacencyMatrix, - pyds.DoublyLinkedList, pyds.SinglyLinkedList, - pyds.SinglyCircularLinkedList, - pyds.DoublyCircularLinkedList, - pyds.OneDimensionalArray, pyds.MultiDimensionalArray, - pyds.DynamicOneDimensionalArray, - pyds.trees.BinaryTree, pyds.BinarySearchTree, - pyds.AVLTree, pyds.SplayTree, pyds.BinaryTreeTraversal, - pyds.DHeap, pyds.BinaryHeap, pyds.TernaryHeap, pyds.BinomialHeap, - pyds.MAryTree, pyds.OneDimensionalSegmentTree, - pyds.Queue, pyds.miscellaneous_data_structures.queue.ArrayQueue, - pyds.miscellaneous_data_structures.queue.LinkedListQueue, - pyds.PriorityQueue, - pyds.miscellaneous_data_structures.queue.LinkedListPriorityQueue, - pyds.miscellaneous_data_structures.queue.BinaryHeapPriorityQueue, - pyds.miscellaneous_data_structures.queue.BinomialHeapPriorityQueue, - pyds.Stack, pyds.miscellaneous_data_structures.stack.ArrayStack, - pyds.miscellaneous_data_structures.stack.LinkedListStack, - pyds.DisjointSetForest, pyds.BinomialTree, pyds.TreeNode, pyds.MAryTreeNode, - pyds.LinkedListNode, pyds.BinomialTreeNode, pyds.AdjacencyListGraphNode, - pyds.AdjacencyMatrixGraphNode, pyds.GraphEdge, pyds.Set, pyds.BinaryIndexedTree, - pyds.CartesianTree, pyds.CartesianTreeNode, pyds.Treap, pyds.RedBlackTreeNode, pyds.RedBlackTree, - pyds.Trie, pyds.TrieNode, pyds.SkipList, pyds.RangeQueryStatic, pyds.RangeQueryDynamic, pyds.SparseTable, - pyds.miscellaneous_data_structures.segment_tree.OneDimensionalArraySegmentTree, - pyds.bubble_sort, pyds.linear_search, pyds.binary_search, pyds.jump_search, - pyds.selection_sort, pyds.insertion_sort, pyds.quick_sort, pyds.intro_sort] - -def test_public_api(): - pyds = pydatastructs - apis = _apis() - print("\n\nAPI Report") - print("==========") - for name in apis: - if inspect.isclass(name): - _class = name - mro = _class.__mro__ - must_methods = _class.methods() - print("\n" + str(name)) - print("Methods Implemented") - print(must_methods) - print("Parent Classes") - print(mro[1:]) - for supercls in mro: - if supercls != _class: - for method in must_methods: - if hasattr(supercls, method) and \ - getattr(supercls, method) == \ - getattr(_class, method): - assert False, ("%s class doesn't " - "have %s method implemented."%( - _class, method - )) - -def test_backend_argument_message(): - - import pydatastructs as pyds - backend_implemented = [ - pyds.OneDimensionalArray, - pyds.DynamicOneDimensionalArray, - pyds.quick_sort, - pyds.AdjacencyListGraphNode, - pyds.AdjacencyMatrixGraphNode, - pyds.GraphEdge - ] - - def call_and_raise(api, pos_args_count=0): - try: - if pos_args_count == 0: - api(backend=None) - elif pos_args_count == 1: - api(None, backend=None) - elif pos_args_count == 2: - api(None, None, backend=None) - except ValueError as value_error: - assert str(api) in value_error.args[0] - except TypeError as type_error: - max_pos_args_count = 2 - if pos_args_count <= max_pos_args_count: - call_and_raise(api, pos_args_count + 1) - else: - raise type_error - - apis = _apis() - for api in apis: - if api not in backend_implemented: - call_and_raise(api, 0) diff --git a/lib/python3.12/site-packages/pydatastructs/utils/tests/test_misc_util.py b/lib/python3.12/site-packages/pydatastructs/utils/tests/test_misc_util.py deleted file mode 100644 index 13ba2ec8e..000000000 --- a/lib/python3.12/site-packages/pydatastructs/utils/tests/test_misc_util.py +++ /dev/null @@ -1,84 +0,0 @@ -from pydatastructs.utils import (TreeNode, AdjacencyListGraphNode, AdjacencyMatrixGraphNode, - GraphEdge, BinomialTreeNode, MAryTreeNode, CartesianTreeNode, RedBlackTreeNode, SkipNode) -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils.misc_util import Backend - -def test_cpp_TreeNode(): - n = TreeNode(1,100,backend=Backend.CPP) - assert str(n) == "(None, 1, 100, None)" - -def test_AdjacencyListGraphNode(): - g_1 = AdjacencyListGraphNode('g_1', 1) - g_2 = AdjacencyListGraphNode('g_2', 2) - g = AdjacencyListGraphNode('g', 0, adjacency_list=[g_1, g_2]) - g.add_adjacent_node('g_3', 3) - assert g.g_1.name == 'g_1' - assert g.g_2.name == 'g_2' - assert g.g_3.name == 'g_3' - g.remove_adjacent_node('g_3') - assert hasattr(g, 'g_3') is False - assert raises(ValueError, lambda: g.remove_adjacent_node('g_3')) - g.add_adjacent_node('g_1', 4) - assert g.g_1.data == 4 - assert str(g) == "('g', 0)" - - h_1 = AdjacencyListGraphNode('h_1', 1, backend = Backend.CPP) - h_2 = AdjacencyListGraphNode('h_2', 2, backend = Backend.CPP) - assert str(h_1) == "('h_1', 1)" - h = AdjacencyListGraphNode('h', 0, adjacency_list = [h_1, h_2], backend = Backend.CPP) - h.add_adjacent_node('h_3', 3) - assert h.adjacent['h_1'].name == 'h_1' - assert h.adjacent['h_2'].name == 'h_2' - assert h.adjacent['h_3'].name == 'h_3' - h.remove_adjacent_node('h_3') - assert 'h_3' not in h.adjacent - assert raises(ValueError, lambda: h.remove_adjacent_node('h_3')) - h.add_adjacent_node('h_1', 4) - assert h.adjacent['h_1'] == 4 - assert str(h) == "('h', 0)" - h_5 = AdjacencyListGraphNode('h_5', h_1, backend = Backend.CPP) - assert h_5.data == h_1 - -def test_AdjacencyMatrixGraphNode(): - g = AdjacencyMatrixGraphNode("1", 3) - g2 = AdjacencyMatrixGraphNode("1", 3, backend = Backend.CPP) - assert str(g) == "('1', 3)" - assert str(g2) == "('1', 3)" - g3 = AdjacencyListGraphNode("3", g2, backend = Backend.CPP) - assert g3.data == g2 - - -def test_GraphEdge(): - g_1 = AdjacencyListGraphNode('g_1', 1) - g_2 = AdjacencyListGraphNode('g_2', 2) - e = GraphEdge(g_1, g_2, value=2) - assert str(e) == "('g_1', 'g_2')" - - h_1 = AdjacencyListGraphNode('h_1', 1, backend = Backend.CPP) - h_2 = AdjacencyListGraphNode('h_2', 2, backend = Backend.CPP) - e2 = GraphEdge(h_1, h_2, value = 2, backend = Backend.CPP) - assert str(e2) == "('h_1', 'h_2', 2)" - -def test_BinomialTreeNode(): - b = BinomialTreeNode(1,1) - b.add_children(*[BinomialTreeNode(i,i) for i in range(2,10)]) - assert str(b) == '(1, 1)' - assert str(b.children) == "['(2, 2)', '(3, 3)', '(4, 4)', '(5, 5)', '(6, 6)', '(7, 7)', '(8, 8)', '(9, 9)']" - -def test_MAryTreeNode(): - m = MAryTreeNode(1, 1) - m.add_children(*list(range(2, 10))) - assert str(m) == "(1, 1)" - assert str(m.children) == "['2', '3', '4', '5', '6', '7', '8', '9']" - -def test_CartesianTreeNode(): - c = CartesianTreeNode(1, 1, 1) - assert str(c) == "(None, 1, 1, 1, None)" - -def test_RedBlackTreeNode(): - c = RedBlackTreeNode(1, 1) - assert str(c) == "(None, 1, 1, None)" - -def test_SkipNode(): - c = SkipNode(1) - assert str(c) == '(1, None)' From c1ea567887355996442c4456d1aa741b896eeba6 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Thu, 2 Oct 2025 13:10:09 +0530 Subject: [PATCH 28/47] bug fix --- .../site-packages/pydatastructs/__init__.py | 8 + .../pydatastructs/graphs/__init__.py | 28 + .../pydatastructs/graphs/_backend/__init__.py | 0 .../pydatastructs/graphs/adjacency_list.py | 101 + .../pydatastructs/graphs/adjacency_matrix.py | 100 + .../pydatastructs/graphs/algorithms.py | 1386 ++++++++++++ .../pydatastructs/graphs/graph.py | 163 ++ .../pydatastructs/graphs/tests/__init__.py | 0 .../graphs/tests/test_adjacency_list.py | 83 + .../graphs/tests/test_adjacency_matrix.py | 53 + .../graphs/tests/test_algorithms.py | 596 +++++ .../linear_data_structures/__init__.py | 53 + .../_backend/__init__.py | 0 .../linear_data_structures/algorithms.py | 2010 +++++++++++++++++ .../linear_data_structures/arrays.py | 473 ++++ .../linear_data_structures/linked_lists.py | 819 +++++++ .../linear_data_structures/tests/__init__.py | 0 .../tests/test_algorithms.py | 423 ++++ .../tests/test_arrays.py | 157 ++ .../tests/test_linked_lists.py | 193 ++ .../miscellaneous_data_structures/__init__.py | 51 + .../_backend/__init__.py | 0 .../algorithms.py | 335 +++ .../binomial_trees.py | 91 + .../disjoint_set.py | 143 ++ .../miscellaneous_data_structures/multiset.py | 42 + .../miscellaneous_data_structures/queue.py | 498 ++++ .../segment_tree.py | 225 ++ .../sparse_table.py | 108 + .../miscellaneous_data_structures/stack.py | 200 ++ .../tests/__init__.py | 0 .../tests/test_binomial_trees.py | 17 + .../tests/test_disjoint_set.py | 70 + .../tests/test_multiset.py | 39 + .../tests/test_queue.py | 116 + .../tests/test_range_query_dynamic.py | 71 + .../tests/test_range_query_static.py | 63 + .../tests/test_stack.py | 77 + .../pydatastructs/strings/__init__.py | 18 + .../pydatastructs/strings/algorithms.py | 247 ++ .../pydatastructs/strings/tests/__init__.py | 0 .../strings/tests/test_algorithms.py | 76 + .../pydatastructs/strings/tests/test_trie.py | 49 + .../pydatastructs/strings/trie.py | 201 ++ .../pydatastructs/trees/__init__.py | 40 + .../pydatastructs/trees/_backend/__init__.py | 0 .../pydatastructs/trees/binary_trees.py | 1888 ++++++++++++++++ .../pydatastructs/trees/heaps.py | 582 +++++ .../pydatastructs/trees/m_ary_trees.py | 172 ++ .../trees/space_partitioning_trees.py | 242 ++ .../pydatastructs/trees/tests/__init__.py | 0 .../trees/tests/test_binary_trees.py | 820 +++++++ .../pydatastructs/trees/tests/test_heaps.py | 236 ++ .../trees/tests/test_m_ary_trees.py | 5 + .../tests/test_space_partitioning_tree.py | 20 + .../pydatastructs/utils/__init__.py | 29 + .../pydatastructs/utils/_backend/__init__.py | 0 .../pydatastructs/utils/misc_util.py | 632 ++++++ .../pydatastructs/utils/raises_util.py | 17 + .../pydatastructs/utils/testing_util.py | 83 + .../pydatastructs/utils/tests/__init__.py | 0 .../utils/tests/test_code_quality.py | 239 ++ .../utils/tests/test_misc_util.py | 84 + 63 files changed, 14472 insertions(+) create mode 100644 lib/python3.12/site-packages/pydatastructs/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/_backend/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/adjacency_list.py create mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/adjacency_matrix.py create mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/algorithms.py create mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/graph.py create mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/tests/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_list.py create mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_matrix.py create mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/tests/test_algorithms.py create mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/_backend/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/algorithms.py create mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/arrays.py create mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/linked_lists.py create mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_algorithms.py create mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_arrays.py create mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_linked_lists.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/_backend/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/algorithms.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/binomial_trees.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/disjoint_set.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/multiset.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/queue.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/segment_tree.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/sparse_table.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/stack.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_binomial_trees.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_disjoint_set.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_multiset.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_queue.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_dynamic.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_static.py create mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_stack.py create mode 100644 lib/python3.12/site-packages/pydatastructs/strings/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/strings/algorithms.py create mode 100644 lib/python3.12/site-packages/pydatastructs/strings/tests/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/strings/tests/test_algorithms.py create mode 100644 lib/python3.12/site-packages/pydatastructs/strings/tests/test_trie.py create mode 100644 lib/python3.12/site-packages/pydatastructs/strings/trie.py create mode 100644 lib/python3.12/site-packages/pydatastructs/trees/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/trees/_backend/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/trees/binary_trees.py create mode 100644 lib/python3.12/site-packages/pydatastructs/trees/heaps.py create mode 100644 lib/python3.12/site-packages/pydatastructs/trees/m_ary_trees.py create mode 100644 lib/python3.12/site-packages/pydatastructs/trees/space_partitioning_trees.py create mode 100644 lib/python3.12/site-packages/pydatastructs/trees/tests/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/trees/tests/test_binary_trees.py create mode 100644 lib/python3.12/site-packages/pydatastructs/trees/tests/test_heaps.py create mode 100644 lib/python3.12/site-packages/pydatastructs/trees/tests/test_m_ary_trees.py create mode 100644 lib/python3.12/site-packages/pydatastructs/trees/tests/test_space_partitioning_tree.py create mode 100644 lib/python3.12/site-packages/pydatastructs/utils/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/utils/_backend/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/utils/misc_util.py create mode 100644 lib/python3.12/site-packages/pydatastructs/utils/raises_util.py create mode 100644 lib/python3.12/site-packages/pydatastructs/utils/testing_util.py create mode 100644 lib/python3.12/site-packages/pydatastructs/utils/tests/__init__.py create mode 100644 lib/python3.12/site-packages/pydatastructs/utils/tests/test_code_quality.py create mode 100644 lib/python3.12/site-packages/pydatastructs/utils/tests/test_misc_util.py diff --git a/lib/python3.12/site-packages/pydatastructs/__init__.py b/lib/python3.12/site-packages/pydatastructs/__init__.py new file mode 100644 index 000000000..27cc5a202 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/__init__.py @@ -0,0 +1,8 @@ +from .utils import * +from .linear_data_structures import * +from .trees import * +from .miscellaneous_data_structures import * +from .graphs import * +from .strings import * + +__version__ = "1.0.1-dev" diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/__init__.py b/lib/python3.12/site-packages/pydatastructs/graphs/__init__.py new file mode 100644 index 000000000..21e0a5f35 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/graphs/__init__.py @@ -0,0 +1,28 @@ +__all__ = [] + +from . import graph +from .graph import ( + Graph +) +__all__.extend(graph.__all__) + +from . import algorithms +from . import adjacency_list +from . import adjacency_matrix + +from .algorithms import ( + breadth_first_search, + breadth_first_search_parallel, + minimum_spanning_tree, + minimum_spanning_tree_parallel, + strongly_connected_components, + depth_first_search, + shortest_paths, + all_pair_shortest_paths, + topological_sort, + topological_sort_parallel, + max_flow, + find_bridges +) + +__all__.extend(algorithms.__all__) diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/_backend/__init__.py b/lib/python3.12/site-packages/pydatastructs/graphs/_backend/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_list.py b/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_list.py new file mode 100644 index 000000000..bd901b380 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_list.py @@ -0,0 +1,101 @@ +from pydatastructs.graphs.graph import Graph +from pydatastructs.graphs._backend.cpp import _graph +from pydatastructs.utils.misc_util import ( + GraphEdge, Backend, raise_if_backend_is_not_python) + +__all__ = [ + 'AdjacencyList' +] + +class AdjacencyList(Graph): + """ + Adjacency list implementation of graphs. + + See also + ======== + + pydatastructs.graphs.graph.Graph + """ + def __new__(cls, *vertices, **kwargs): + + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.PYTHON: + obj = object.__new__(cls) + for vertex in vertices: + obj.__setattr__(vertex.name, vertex) + obj.vertices = [vertex.name for vertex in vertices] + obj.edge_weights = {} + obj._impl = 'adjacency_list' + return obj + else: + graph = _graph.AdjacencyListGraph() + for vertice in vertices: + graph.add_vertex(vertice) + return graph + + @classmethod + def methods(self): + return ['is_adjacent', 'neighbors', + 'add_vertex', 'remove_vertex', 'add_edge', + 'get_edge', 'remove_edge', '__new__'] + + def is_adjacent(self, node1, node2): + node1 = self.__getattribute__(node1) + return hasattr(node1, node2) + + def num_vertices(self): + return len(self.vertices) + + def num_edges(self): + return sum(len(self.neighbors(v)) for v in self.vertices) + + def neighbors(self, node): + node = self.__getattribute__(node) + return [self.__getattribute__(name) for name in node.adjacent] + + def add_vertex(self, node): + if not hasattr(self, node.name): + self.vertices.append(node.name) + self.__setattr__(node.name, node) + + def remove_vertex(self, name): + delattr(self, name) + self.vertices.remove(name) + for node in self.vertices: + node_obj = self.__getattribute__(node) + if hasattr(node_obj, name): + delattr(node_obj, name) + node_obj.adjacent.remove(name) + + def add_edge(self, source, target, cost=None): + source, target = str(source), str(target) + error_msg = ("Vertex %s is not present in the graph." + "Call Graph.add_vertex to add a new" + "vertex. Graph.add_edge is only responsible" + "for adding edges and it will not add new" + "vertices on its own. This is done to maintain" + "clear separation between the functionality of" + "these two methods.") + if not hasattr(self, source): + raise ValueError(error_msg % (source)) + if not hasattr(self, target): + raise ValueError(error_msg % (target)) + + source, target = self.__getattribute__(source), \ + self.__getattribute__(target) + source.add_adjacent_node(target.name) + if cost is not None: + self.edge_weights[source.name + "_" + target.name] = \ + GraphEdge(source, target, cost) + + def get_edge(self, source, target): + return self.edge_weights.get( + source + "_" + target, + None) + + def remove_edge(self, source, target): + source, target = self.__getattribute__(source), \ + self.__getattribute__(target) + source.remove_adjacent_node(target.name) + self.edge_weights.pop(source.name + "_" + target.name, + None) diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_matrix.py b/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_matrix.py new file mode 100644 index 000000000..9c2326b86 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_matrix.py @@ -0,0 +1,100 @@ +from pydatastructs.graphs.graph import Graph +from pydatastructs.graphs._backend.cpp import _graph +from pydatastructs.utils.misc_util import ( + GraphEdge, raise_if_backend_is_not_python, + Backend) + +__all__ = [ + 'AdjacencyMatrix' +] + +class AdjacencyMatrix(Graph): + """ + Adjacency matrix implementation of graphs. + + See also + ======== + + pydatastructs.graphs.graph.Graph + """ + def __new__(cls, *vertices, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.PYTHON: + obj = object.__new__(cls) + obj.vertices = [vertex.name for vertex in vertices] + for vertex in vertices: + obj.__setattr__(vertex.name, vertex) + obj.matrix = {} + for vertex in vertices: + obj.matrix[vertex.name] = {} + obj.edge_weights = {} + obj._impl = 'adjacency_matrix' + return obj + else: + return _graph.AdjacencyMatrixGraph(vertices) + + @classmethod + def methods(self): + return ['is_adjacent', 'neighbors', + 'add_edge', 'get_edge', 'remove_edge', + '__new__'] + + def is_adjacent(self, node1, node2): + node1, node2 = str(node1), str(node2) + row = self.matrix.get(node1, {}) + return row.get(node2, False) is not False + + def num_vertices(self): + return len(self.vertices) + + def num_edges(self): + return sum(len(v) for v in self.matrix.values()) + + def neighbors(self, node): + node = str(node) + neighbors = [] + row = self.matrix.get(node, {}) + for node, presence in row.items(): + if presence: + neighbors.append(self.__getattribute__( + str(node))) + return neighbors + + def add_vertex(self, node): + raise NotImplementedError("Currently we allow " + "adjacency matrix for static graphs only") + + def remove_vertex(self, node): + raise NotImplementedError("Currently we allow " + "adjacency matrix for static graphs only.") + + def add_edge(self, source, target, cost=None): + source, target = str(source), str(target) + error_msg = ("Vertex %s is not present in the graph." + "Call Graph.add_vertex to add a new" + "vertex. Graph.add_edge is only responsible" + "for adding edges and it will not add new" + "vertices on its own. This is done to maintain" + "clear separation between the functionality of" + "these two methods.") + if source not in self.matrix: + raise ValueError(error_msg % (source)) + if target not in self.matrix: + raise ValueError(error_msg % (target)) + + self.matrix[source][target] = True + if cost is not None: + self.edge_weights[source + "_" + target] = \ + GraphEdge(self.__getattribute__(source), + self.__getattribute__(target), + cost) + + def get_edge(self, source, target): + return self.edge_weights.get( + str(source) + "_" + str(target), + None) + + def remove_edge(self, source, target): + source, target = str(source), str(target) + self.matrix[source][target] = False + self.edge_weights.pop(str(source) + "_" + str(target), None) diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/algorithms.py b/lib/python3.12/site-packages/pydatastructs/graphs/algorithms.py new file mode 100644 index 000000000..9324b7278 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/graphs/algorithms.py @@ -0,0 +1,1386 @@ +""" +Contains algorithms associated with graph +data structure. +""" +from collections import deque +from concurrent.futures import ThreadPoolExecutor +from pydatastructs.utils.misc_util import ( + _comp, raise_if_backend_is_not_python, Backend, AdjacencyListGraphNode) +from pydatastructs.miscellaneous_data_structures import ( + DisjointSetForest, PriorityQueue) +from pydatastructs.graphs.graph import Graph +from pydatastructs.linear_data_structures.algorithms import merge_sort_parallel +from pydatastructs import PriorityQueue + +__all__ = [ + 'breadth_first_search', + 'breadth_first_search_parallel', + 'minimum_spanning_tree', + 'minimum_spanning_tree_parallel', + 'strongly_connected_components', + 'depth_first_search', + 'shortest_paths', + 'all_pair_shortest_paths', + 'topological_sort', + 'topological_sort_parallel', + 'max_flow', + 'find_bridges' +] + +Stack = Queue = deque + +def breadth_first_search( + graph, source_node, operation, *args, **kwargs): + """ + Implementation of serial breadth first search(BFS) + algorithm. + + Parameters + ========== + + graph: Graph + The graph on which BFS is to be performed. + source_node: str + The name of the source node from where the BFS is + to be initiated. + operation: function + The function which is to be applied + on every node when it is visited. + The prototype which is to be followed is, + `function_name(curr_node, next_node, + arg_1, arg_2, . . ., arg_n)`. + Here, the first two arguments denote, the + current node and the node next to current node. + The rest of the arguments are optional and you can + provide your own stuff there. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Note + ==== + + You should pass all the arguments which you are going + to use in the prototype of your `operation` after + passing the operation function. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode + >>> V1 = AdjacencyListGraphNode("V1") + >>> V2 = AdjacencyListGraphNode("V2") + >>> V3 = AdjacencyListGraphNode("V3") + >>> G = Graph(V1, V2, V3) + >>> from pydatastructs import breadth_first_search + >>> def f(curr_node, next_node, dest_node): + ... return curr_node != dest_node + ... + >>> G.add_edge(V1.name, V2.name) + >>> G.add_edge(V2.name, V3.name) + >>> breadth_first_search(G, V1.name, f, V3.name) + """ + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.PYTHON: + import pydatastructs.graphs.algorithms as algorithms + func = "_breadth_first_search_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently breadth first search isn't implemented for " + "%s graphs."%(graph._impl)) + return getattr(algorithms, func)( + graph, source_node, operation, *args, **kwargs) + else: + from pydatastructs.graphs._backend.cpp._algorithms import bfs_adjacency_list, bfs_adjacency_matrix + if (graph._impl == "adjacency_list"): + extra_args = args if args else () + return bfs_adjacency_list(graph, source_node, operation, extra_args) + if (graph._impl == "adjacency_matrix"): + extra_args = args if args else () + return bfs_adjacency_matrix(graph, source_node, operation, extra_args) + +def _breadth_first_search_adjacency_list( + graph, source_node, operation, *args, **kwargs): + bfs_queue = Queue() + visited = {} + bfs_queue.append(source_node) + visited[source_node] = True + while len(bfs_queue) != 0: + curr_node = bfs_queue.popleft() + next_nodes = graph.neighbors(curr_node) + if len(next_nodes) != 0: + for next_node in next_nodes: + if visited.get(next_node.name, False) is False: + status = operation(curr_node, next_node.name, *args, **kwargs) + if not status: + return None + bfs_queue.append(next_node.name) + visited[next_node.name] = True + else: + status = operation(curr_node, "", *args, **kwargs) + if not status: + return None + +_breadth_first_search_adjacency_matrix = _breadth_first_search_adjacency_list + +def breadth_first_search_parallel( + graph, source_node, num_threads, operation, *args, **kwargs): + """ + Parallel implementation of breadth first search on graphs. + + Parameters + ========== + + graph: Graph + The graph on which BFS is to be performed. + source_node: str + The name of the source node from where the BFS is + to be initiated. + num_threads: int + Number of threads to be used for computation. + operation: function + The function which is to be applied + on every node when it is visited. + The prototype which is to be followed is, + `function_name(curr_node, next_node, + arg_1, arg_2, . . ., arg_n)`. + Here, the first two arguments denote, the + current node and the node next to current node. + The rest of the arguments are optional and you can + provide your own stuff there. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Note + ==== + + You should pass all the arguments which you are going + to use in the prototype of your `operation` after + passing the operation function. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode + >>> V1 = AdjacencyListGraphNode("V1") + >>> V2 = AdjacencyListGraphNode("V2") + >>> V3 = AdjacencyListGraphNode("V3") + >>> G = Graph(V1, V2, V3) + >>> from pydatastructs import breadth_first_search_parallel + >>> def f(curr_node, next_node, dest_node): + ... return curr_node != dest_node + ... + >>> G.add_edge(V1.name, V2.name) + >>> G.add_edge(V2.name, V3.name) + >>> breadth_first_search_parallel(G, V1.name, 3, f, V3.name) + """ + raise_if_backend_is_not_python( + breadth_first_search_parallel, kwargs.get('backend', Backend.PYTHON)) + import pydatastructs.graphs.algorithms as algorithms + func = "_breadth_first_search_parallel_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently breadth first search isn't implemented for " + "%s graphs."%(graph._impl)) + return getattr(algorithms, func)( + graph, source_node, num_threads, operation, *args, **kwargs) + +def _generate_layer(**kwargs): + _args, _kwargs = kwargs.get('args'), kwargs.get('kwargs') + (graph, curr_node, next_layer, visited, operation) = _args[0:5] + op_args, op_kwargs = _args[5:], _kwargs + next_nodes = graph.neighbors(curr_node) + status = True + if len(next_nodes) != 0: + for next_node in next_nodes: + if visited.get(next_node, False) is False: + status = status and operation(curr_node, next_node.name, *op_args, **op_kwargs) + next_layer.add(next_node.name) + visited[next_node.name] = True + else: + status = status and operation(curr_node, "", *op_args, **op_kwargs) + return status + +def _breadth_first_search_parallel_adjacency_list( + graph, source_node, num_threads, operation, *args, **kwargs): + visited, layers = {}, {} + layers[0] = set() + layers[0].add(source_node) + visited[source_node] = True + layer = 0 + while len(layers[layer]) != 0: + layers[layer+1] = set() + with ThreadPoolExecutor(max_workers=num_threads) as Executor: + for node in layers[layer]: + status = Executor.submit( + _generate_layer, args= + (graph, node, layers[layer+1], visited, + operation, *args), kwargs=kwargs).result() + layer += 1 + if not status: + return None + +_breadth_first_search_parallel_adjacency_matrix = _breadth_first_search_parallel_adjacency_list + +def _generate_mst_object(graph): + mst = Graph(*[getattr(graph, str(v)) for v in graph.vertices]) + return mst + +def _sort_edges(graph, num_threads=None): + edges = list(graph.edge_weights.items()) + if num_threads is None: + sort_key = lambda item: item[1].value + return sorted(edges, key=sort_key) + + merge_sort_parallel(edges, num_threads, + comp=lambda u,v: u[1].value <= v[1].value) + return edges + +def _minimum_spanning_tree_kruskal_adjacency_list(graph): + mst = _generate_mst_object(graph) + dsf = DisjointSetForest() + for v in graph.vertices: + dsf.make_set(v) + for _, edge in _sort_edges(graph): + u, v = edge.source.name, edge.target.name + if dsf.find_root(u) is not dsf.find_root(v): + mst.add_edge(u, v, edge.value) + mst.add_edge(v, u, edge.value) + dsf.union(u, v) + return mst + +_minimum_spanning_tree_kruskal_adjacency_matrix = \ + _minimum_spanning_tree_kruskal_adjacency_list + +def _minimum_spanning_tree_prim_adjacency_list(graph): + q = PriorityQueue(implementation='binomial_heap') + e = {} + mst = Graph(implementation='adjacency_list') + q.push(next(iter(graph.vertices)), 0) + while not q.is_empty: + v = q.pop() + if not hasattr(mst, v): + mst.add_vertex(graph.__getattribute__(v)) + if e.get(v, None) is not None: + edge = e[v] + mst.add_vertex(edge.target) + mst.add_edge(edge.source.name, edge.target.name, edge.value) + mst.add_edge(edge.target.name, edge.source.name, edge.value) + for w_node in graph.neighbors(v): + w = w_node.name + vw = graph.edge_weights[v + '_' + w] + q.push(w, vw.value) + if e.get(w, None) is None or \ + e[w].value > vw.value: + e[w] = vw + return mst + +def minimum_spanning_tree(graph, algorithm, **kwargs): + """ + Computes a minimum spanning tree for the given + graph and algorithm. + + Parameters + ========== + + graph: Graph + The graph whose minimum spanning tree + has to be computed. + algorithm: str + The algorithm which should be used for + computing a minimum spanning tree. + Currently the following algorithms are + supported, + + 'kruskal' -> Kruskal's algorithm as given in [1]. + + 'prim' -> Prim's algorithm as given in [2]. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + mst: Graph + A minimum spanning tree using the implementation + same as the graph provided in the input. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode + >>> from pydatastructs import minimum_spanning_tree + >>> u = AdjacencyListGraphNode('u') + >>> v = AdjacencyListGraphNode('v') + >>> G = Graph(u, v) + >>> G.add_edge(u.name, v.name, 3) + >>> mst = minimum_spanning_tree(G, 'kruskal') + >>> u_n = mst.neighbors(u.name) + >>> mst.get_edge(u.name, u_n[0].name).value + 3 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Kruskal%27s_algorithm + .. [2] https://en.wikipedia.org/wiki/Prim%27s_algorithm + + Note + ==== + + The concept of minimum spanning tree is valid only for + connected and undirected graphs. So, this function + should be used only for such graphs. Using with other + types of graphs may lead to unwanted results. + """ + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.PYTHON: + import pydatastructs.graphs.algorithms as algorithms + func = "_minimum_spanning_tree_" + algorithm + "_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently %s algoithm for %s implementation of graphs " + "isn't implemented for finding minimum spanning trees." + %(algorithm, graph._impl)) + return getattr(algorithms, func)(graph) + else: + from pydatastructs.graphs._backend.cpp._algorithms import minimum_spanning_tree_prim_adjacency_list + if graph._impl == "adjacency_list" and algorithm == 'prim': + return minimum_spanning_tree_prim_adjacency_list(graph) + +def _minimum_spanning_tree_parallel_kruskal_adjacency_list(graph, num_threads): + mst = _generate_mst_object(graph) + dsf = DisjointSetForest() + for v in graph.vertices: + dsf.make_set(v) + edges = _sort_edges(graph, num_threads) + for _, edge in edges: + u, v = edge.source.name, edge.target.name + if dsf.find_root(u) is not dsf.find_root(v): + mst.add_edge(u, v, edge.value) + mst.add_edge(v, u, edge.value) + dsf.union(u, v) + return mst + +_minimum_spanning_tree_parallel_kruskal_adjacency_matrix = \ + _minimum_spanning_tree_parallel_kruskal_adjacency_list + +def _find_min(q, v, i): + if not q.is_empty: + v[i] = q.peek + else: + v[i] = None + +def _minimum_spanning_tree_parallel_prim_adjacency_list(graph, num_threads): + q = [PriorityQueue(implementation='binomial_heap') for _ in range(num_threads)] + e = [{} for _ in range(num_threads)] + v2q = {} + mst = Graph(implementation='adjacency_list') + + itr = iter(graph.vertices) + for i in range(len(graph.vertices)): + v2q[next(itr)] = i%len(q) + q[0].push(next(iter(graph.vertices)), 0) + + while True: + + _vs = [None for _ in range(num_threads)] + with ThreadPoolExecutor(max_workers=num_threads) as Executor: + for i in range(num_threads): + Executor.submit(_find_min, q[i], _vs, i).result() + v = None + + for i in range(num_threads): + if _comp(_vs[i], v, lambda u, v: u.key < v.key): + v = _vs[i] + if v is None: + break + v = v.data + idx = v2q[v] + q[idx].pop() + + if not hasattr(mst, v): + mst.add_vertex(graph.__getattribute__(v)) + if e[idx].get(v, None) is not None: + edge = e[idx][v] + mst.add_vertex(edge.target) + mst.add_edge(edge.source.name, edge.target.name, edge.value) + mst.add_edge(edge.target.name, edge.source.name, edge.value) + for w_node in graph.neighbors(v): + w = w_node.name + vw = graph.edge_weights[v + '_' + w] + j = v2q[w] + q[j].push(w, vw.value) + if e[j].get(w, None) is None or \ + e[j][w].value > vw.value: + e[j][w] = vw + + return mst + +def minimum_spanning_tree_parallel(graph, algorithm, num_threads, **kwargs): + """ + Computes a minimum spanning tree for the given + graph and algorithm using the given number of threads. + + Parameters + ========== + + graph: Graph + The graph whose minimum spanning tree + has to be computed. + algorithm: str + The algorithm which should be used for + computing a minimum spanning tree. + Currently the following algorithms are + supported, + + 'kruskal' -> Kruskal's algorithm as given in [1]. + + 'prim' -> Prim's algorithm as given in [2]. + num_threads: int + The number of threads to be used. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + mst: Graph + A minimum spanning tree using the implementation + same as the graph provided in the input. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode + >>> from pydatastructs import minimum_spanning_tree_parallel + >>> u = AdjacencyListGraphNode('u') + >>> v = AdjacencyListGraphNode('v') + >>> G = Graph(u, v) + >>> G.add_edge(u.name, v.name, 3) + >>> mst = minimum_spanning_tree_parallel(G, 'kruskal', 3) + >>> u_n = mst.neighbors(u.name) + >>> mst.get_edge(u.name, u_n[0].name).value + 3 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Kruskal%27s_algorithm#Parallel_algorithm + .. [2] https://en.wikipedia.org/wiki/Prim%27s_algorithm#Parallel_algorithm + + Note + ==== + + The concept of minimum spanning tree is valid only for + connected and undirected graphs. So, this function + should be used only for such graphs. Using with other + types of graphs will lead to unwanted results. + """ + raise_if_backend_is_not_python( + minimum_spanning_tree_parallel, kwargs.get('backend', Backend.PYTHON)) + import pydatastructs.graphs.algorithms as algorithms + func = "_minimum_spanning_tree_parallel_" + algorithm + "_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently %s algoithm for %s implementation of graphs " + "isn't implemented for finding minimum spanning trees." + %(algorithm, graph._impl)) + return getattr(algorithms, func)(graph, num_threads) + +def _visit(graph, vertex, visited, incoming, L): + stack = [vertex] + while stack: + top = stack[-1] + if not visited.get(top, False): + visited[top] = True + for node in graph.neighbors(top): + if incoming.get(node.name, None) is None: + incoming[node.name] = [] + incoming[node.name].append(top) + if not visited.get(node.name, False): + stack.append(node.name) + if top is stack[-1]: + L.append(stack.pop()) + +def _assign(graph, u, incoming, assigned, component): + stack = [u] + while stack: + top = stack[-1] + if not assigned.get(top, False): + assigned[top] = True + component.add(top) + for u in incoming[top]: + if not assigned.get(u, False): + stack.append(u) + if top is stack[-1]: + stack.pop() + +def _strongly_connected_components_kosaraju_adjacency_list(graph): + visited, incoming, L = {}, {}, [] + for u in graph.vertices: + if not visited.get(u, False): + _visit(graph, u, visited, incoming, L) + + assigned = {} + components = [] + for i in range(-1, -len(L) - 1, -1): + comp = set() + if not assigned.get(L[i], False): + _assign(graph, L[i], incoming, assigned, comp) + if comp: + components.append(comp) + + return components + +_strongly_connected_components_kosaraju_adjacency_matrix = \ + _strongly_connected_components_kosaraju_adjacency_list + +def _tarjan_dfs(u, graph, index, stack, indices, low_links, on_stacks, components): + indices[u] = index[0] + low_links[u] = index[0] + index[0] += 1 + stack.append(u) + on_stacks[u] = True + + for node in graph.neighbors(u): + v = node.name + if indices[v] == -1: + _tarjan_dfs(v, graph, index, stack, indices, low_links, on_stacks, components) + low_links[u] = min(low_links[u], low_links[v]) + elif on_stacks[v]: + low_links[u] = min(low_links[u], low_links[v]) + + if low_links[u] == indices[u]: + component = set() + while stack: + w = stack.pop() + on_stacks[w] = False + component.add(w) + if w == u: + break + components.append(component) + +def _strongly_connected_components_tarjan_adjacency_list(graph): + index = [0] # mutable object + stack = Stack([]) + indices, low_links, on_stacks = {}, {}, {} + + for u in graph.vertices: + indices[u] = -1 + low_links[u] = -1 + on_stacks[u] = False + + components = [] + + for u in graph.vertices: + if indices[u] == -1: + _tarjan_dfs(u, graph, index, stack, indices, low_links, on_stacks, components) + + return components + +_strongly_connected_components_tarjan_adjacency_matrix = \ + _strongly_connected_components_tarjan_adjacency_list + +def strongly_connected_components(graph, algorithm, **kwargs): + """ + Computes strongly connected components for the given + graph and algorithm. + + Parameters + ========== + + graph: Graph + The graph whose minimum spanning tree + has to be computed. + algorithm: str + The algorithm which should be used for + computing strongly connected components. + Currently the following algorithms are + supported, + + 'kosaraju' -> Kosaraju's algorithm as given in [1]. + 'tarjan' -> Tarjan's algorithm as given in [2]. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + components: list + Python list with each element as set of vertices. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode + >>> from pydatastructs import strongly_connected_components + >>> v1, v2, v3 = [AdjacencyListGraphNode(i) for i in range(3)] + >>> g = Graph(v1, v2, v3) + >>> g.add_edge(v1.name, v2.name) + >>> g.add_edge(v2.name, v3.name) + >>> g.add_edge(v3.name, v1.name) + >>> scc = strongly_connected_components(g, 'kosaraju') + >>> scc == [{'2', '0', '1'}] + True + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Kosaraju%27s_algorithm + .. [2] https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm + + """ + raise_if_backend_is_not_python( + strongly_connected_components, kwargs.get('backend', Backend.PYTHON)) + import pydatastructs.graphs.algorithms as algorithms + func = "_strongly_connected_components_" + algorithm + "_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently %s algoithm for %s implementation of graphs " + "isn't implemented for finding strongly connected components." + %(algorithm, graph._impl)) + return getattr(algorithms, func)(graph) + +def depth_first_search( + graph, source_node, operation, *args, **kwargs): + """ + Implementation of depth first search (DFS) + algorithm. + + Parameters + ========== + + graph: Graph + The graph on which DFS is to be performed. + source_node: str + The name of the source node from where the DFS is + to be initiated. + operation: function + The function which is to be applied + on every node when it is visited. + The prototype which is to be followed is, + `function_name(curr_node, next_node, + arg_1, arg_2, . . ., arg_n)`. + Here, the first two arguments denote, the + current node and the node next to current node. + The rest of the arguments are optional and you can + provide your own stuff there. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Note + ==== + + You should pass all the arguments which you are going + to use in the prototype of your `operation` after + passing the operation function. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode + >>> V1 = AdjacencyListGraphNode("V1") + >>> V2 = AdjacencyListGraphNode("V2") + >>> V3 = AdjacencyListGraphNode("V3") + >>> G = Graph(V1, V2, V3) + >>> from pydatastructs import depth_first_search + >>> def f(curr_node, next_node, dest_node): + ... return curr_node != dest_node + ... + >>> G.add_edge(V1.name, V2.name) + >>> G.add_edge(V2.name, V3.name) + >>> depth_first_search(G, V1.name, f, V3.name) + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Depth-first_search + """ + raise_if_backend_is_not_python( + depth_first_search, kwargs.get('backend', Backend.PYTHON)) + import pydatastructs.graphs.algorithms as algorithms + func = "_depth_first_search_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently depth first search isn't implemented for " + "%s graphs."%(graph._impl)) + return getattr(algorithms, func)( + graph, source_node, operation, *args, **kwargs) + +def _depth_first_search_adjacency_list( + graph, source_node, operation, *args, **kwargs): + dfs_stack = Stack() + visited = {} + dfs_stack.append(source_node) + visited[source_node] = True + while len(dfs_stack) != 0: + curr_node = dfs_stack.pop() + next_nodes = graph.neighbors(curr_node) + if len(next_nodes) != 0: + for next_node in next_nodes: + if next_node.name not in visited: + status = operation(curr_node, next_node.name, *args, **kwargs) + if not status: + return None + dfs_stack.append(next_node.name) + visited[next_node.name] = True + else: + status = operation(curr_node, "", *args, **kwargs) + if not status: + return None + +_depth_first_search_adjacency_matrix = _depth_first_search_adjacency_list + +def shortest_paths(graph: Graph, algorithm: str, + source: str, target: str="", + **kwargs) -> tuple: + """ + Finds shortest paths in the given graph from a given source. + + Parameters + ========== + + graph: Graph + The graph under consideration. + algorithm: str + The algorithm to be used. Currently, the following algorithms + are implemented, + + 'bellman_ford' -> Bellman-Ford algorithm as given in [1] + + 'dijkstra' -> Dijkstra algorithm as given in [2]. + source: str + The name of the source the node. + target: str + The name of the target node. + Optional, by default, all pair shortest paths + are returned. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + (distances, predecessors): (dict, dict) + If target is not provided and algorithm used + is 'bellman_ford'/'dijkstra'. + (distances[target], predecessors): (float, dict) + If target is provided and algorithm used is + 'bellman_ford'/'dijkstra'. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode + >>> from pydatastructs import shortest_paths + >>> V1 = AdjacencyListGraphNode("V1") + >>> V2 = AdjacencyListGraphNode("V2") + >>> V3 = AdjacencyListGraphNode("V3") + >>> G = Graph(V1, V2, V3) + >>> G.add_edge('V2', 'V3', 10) + >>> G.add_edge('V1', 'V2', 11) + >>> shortest_paths(G, 'bellman_ford', 'V1') + ({'V1': 0, 'V2': 11, 'V3': 21}, {'V1': None, 'V2': 'V1', 'V3': 'V2'}) + >>> shortest_paths(G, 'dijkstra', 'V1') + ({'V2': 11, 'V3': 21, 'V1': 0}, {'V1': None, 'V2': 'V1', 'V3': 'V2'}) + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Bellman%E2%80%93Ford_algorithm + .. [2] https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm + """ + backend = kwargs.get('backend', Backend.PYTHON) + if (backend == Backend.PYTHON): + import pydatastructs.graphs.algorithms as algorithms + func = "_" + algorithm + "_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently %s algorithm isn't implemented for " + "finding shortest paths in graphs."%(algorithm)) + return getattr(algorithms, func)(graph, source, target) + else: + from pydatastructs.graphs._backend.cpp._algorithms import shortest_paths_dijkstra_adjacency_list + if graph._impl == "adjacency_list" and algorithm == 'dijkstra': + return shortest_paths_dijkstra_adjacency_list(graph, source, target) + +def _bellman_ford_adjacency_list(graph: Graph, source: str, target: str) -> tuple: + distances, predecessor, visited, cnts = {}, {}, {}, {} + + for v in graph.vertices: + distances[v] = float('inf') + predecessor[v] = None + visited[v] = False + cnts[v] = 0 + distances[source] = 0 + verticy_num = len(graph.vertices) + + que = Queue([source]) + + while que: + u = que.popleft() + visited[u] = False + neighbors = graph.neighbors(u) + for neighbor in neighbors: + v = neighbor.name + edge_str = u + '_' + v + if distances[u] != float('inf') and distances[u] + graph.edge_weights[edge_str].value < distances[v]: + distances[v] = distances[u] + graph.edge_weights[edge_str].value + predecessor[v] = u + cnts[v] = cnts[u] + 1 + if cnts[v] >= verticy_num: + raise ValueError("Graph contains a negative weight cycle.") + if not visited[v]: + que.append(v) + visited[v] = True + + if target != "": + return (distances[target], predecessor) + return (distances, predecessor) + +_bellman_ford_adjacency_matrix = _bellman_ford_adjacency_list + +def _dijkstra_adjacency_list(graph: Graph, start: str, target: str): + V = len(graph.vertices) + visited, dist, pred = {}, {}, {} + for v in graph.vertices: + visited[v] = False + pred[v] = None + if v != start: + dist[v] = float('inf') + dist[start] = 0 + pq = PriorityQueue(implementation='binomial_heap') + for vertex in dist: + pq.push(vertex, dist[vertex]) + for _ in range(V): + u = pq.pop() + visited[u] = True + for v in graph.vertices: + edge_str = u + '_' + v + if (edge_str in graph.edge_weights and graph.edge_weights[edge_str].value >= 0 and + visited[v] is False and dist[v] > dist[u] + graph.edge_weights[edge_str].value): + dist[v] = dist[u] + graph.edge_weights[edge_str].value + pred[v] = u + pq.push(v, dist[v]) + + if target != "": + return (dist[target], pred) + return dist, pred + +_dijkstra_adjacency_matrix = _dijkstra_adjacency_list + +def all_pair_shortest_paths(graph: Graph, algorithm: str, + **kwargs) -> tuple: + """ + Finds shortest paths between all pairs of vertices in the given graph. + + Parameters + ========== + + graph: Graph + The graph under consideration. + algorithm: str + The algorithm to be used. Currently, the following algorithms + are implemented, + + 'floyd_warshall' -> Floyd Warshall algorithm as given in [1]. + 'johnson' -> Johnson's Algorithm as given in [2] + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + (distances, predecessors): (dict, dict) + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode + >>> from pydatastructs import all_pair_shortest_paths + >>> V1 = AdjacencyListGraphNode("V1") + >>> V2 = AdjacencyListGraphNode("V2") + >>> V3 = AdjacencyListGraphNode("V3") + >>> G = Graph(V1, V2, V3) + >>> G.add_edge('V2', 'V3', 10) + >>> G.add_edge('V1', 'V2', 11) + >>> G.add_edge('V3', 'V1', 5) + >>> dist, _ = all_pair_shortest_paths(G, 'floyd_warshall') + >>> dist['V1']['V3'] + 21 + >>> dist['V3']['V1'] + 5 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm + .. [2] https://en.wikipedia.org/wiki/Johnson's_algorithm + """ + raise_if_backend_is_not_python( + all_pair_shortest_paths, kwargs.get('backend', Backend.PYTHON)) + import pydatastructs.graphs.algorithms as algorithms + func = "_" + algorithm + "_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently %s algorithm isn't implemented for " + "finding shortest paths in graphs."%(algorithm)) + return getattr(algorithms, func)(graph) + +def _floyd_warshall_adjacency_list(graph: Graph): + dist, next_vertex = {}, {} + V, E = graph.vertices, graph.edge_weights + + for v in V: + dist[v] = {} + next_vertex[v] = {} + + for name, edge in E.items(): + dist[edge.source.name][edge.target.name] = edge.value + next_vertex[edge.source.name][edge.target.name] = edge.source.name + + for v in V: + dist[v][v] = 0 + next_vertex[v][v] = v + + for k in V: + for i in V: + for j in V: + dist_i_j = dist.get(i, {}).get(j, float('inf')) + dist_i_k = dist.get(i, {}).get(k, float('inf')) + dist_k_j = dist.get(k, {}).get(j, float('inf')) + next_i_k = next_vertex.get(i + '_' + k, None) + if dist_i_j > dist_i_k + dist_k_j: + dist[i][j] = dist_i_k + dist_k_j + next_vertex[i][j] = next_i_k + + return (dist, next_vertex) + +_floyd_warshall_adjacency_matrix = _floyd_warshall_adjacency_list + +def _johnson_adjacency_list(graph: Graph): + new_vertex = AdjacencyListGraphNode('__q__') + graph.add_vertex(new_vertex) + + for vertex in graph.vertices: + if vertex != '__q__': + graph.add_edge('__q__', vertex, 0) + + distances, predecessors = shortest_paths(graph, 'bellman_ford', '__q__') + + edges_to_remove = [] + for edge in graph.edge_weights: + edge_node = graph.edge_weights[edge] + if edge_node.source.name == '__q__': + edges_to_remove.append((edge_node.source.name, edge_node.target.name)) + + for u, v in edges_to_remove: + graph.remove_edge(u, v) + graph.remove_vertex('__q__') + + for edge in graph.edge_weights: + edge_node = graph.edge_weights[edge] + u, v = edge_node.source.name, edge_node.target.name + graph.edge_weights[edge].value += (distances[u] - distances[v]) + + all_distances = {} + all_next_vertex = {} + + for vertex in graph.vertices: + u = vertex + dijkstra_dist, dijkstra_pred = shortest_paths(graph, 'dijkstra', u) + all_distances[u] = {} + all_next_vertex[u] = {} + for v in graph.vertices: + if dijkstra_pred[v] is None or dijkstra_pred[v] == u : + all_next_vertex[u][v] = u + else: + all_next_vertex[u][v] = None + if v in dijkstra_dist: + all_distances[u][v] = dijkstra_dist[v] - distances[u] + distances[v] + else: + all_distances[u][v] = float('inf') + + return (all_distances, all_next_vertex) + +def topological_sort(graph: Graph, algorithm: str, + **kwargs) -> list: + """ + Performs topological sort on the given graph using given algorithm. + + Parameters + ========== + + graph: Graph + The graph under consideration. + algorithm: str + The algorithm to be used. + Currently, following are supported, + + 'kahn' -> Kahn's algorithm as given in [1]. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + list + The list of topologically sorted vertices. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode, topological_sort + >>> v_1 = AdjacencyListGraphNode('v_1') + >>> v_2 = AdjacencyListGraphNode('v_2') + >>> graph = Graph(v_1, v_2) + >>> graph.add_edge('v_1', 'v_2') + >>> topological_sort(graph, 'kahn') + ['v_1', 'v_2'] + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm + """ + raise_if_backend_is_not_python( + topological_sort, kwargs.get('backend', Backend.PYTHON)) + import pydatastructs.graphs.algorithms as algorithms + func = "_" + algorithm + "_" + graph._impl + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently %s algorithm isn't implemented for " + "performing topological sort on %s graphs."%(algorithm, graph._impl)) + return getattr(algorithms, func)(graph) + +def _kahn_adjacency_list(graph: Graph) -> list: + S = Queue() + in_degree = {u: 0 for u in graph.vertices} + for u in graph.vertices: + for v in graph.neighbors(u): + in_degree[v.name] += 1 + for u in graph.vertices: + if in_degree[u] == 0: + S.append(u) + in_degree.pop(u) + + L = [] + while S: + n = S.popleft() + L.append(n) + for m in graph.neighbors(n): + graph.remove_edge(n, m.name) + in_degree[m.name] -= 1 + if in_degree[m.name] == 0: + S.append(m.name) + in_degree.pop(m.name) + + if in_degree: + raise ValueError("Graph is not acyclic.") + return L + +def topological_sort_parallel(graph: Graph, algorithm: str, num_threads: int, + **kwargs) -> list: + """ + Performs topological sort on the given graph using given algorithm using + given number of threads. + + Parameters + ========== + + graph: Graph + The graph under consideration. + algorithm: str + The algorithm to be used. + Currently, following are supported, + + 'kahn' -> Kahn's algorithm as given in [1]. + num_threads: int + The maximum number of threads to be used. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + list + The list of topologically sorted vertices. + + Examples + ======== + + >>> from pydatastructs import Graph, AdjacencyListGraphNode, topological_sort_parallel + >>> v_1 = AdjacencyListGraphNode('v_1') + >>> v_2 = AdjacencyListGraphNode('v_2') + >>> graph = Graph(v_1, v_2) + >>> graph.add_edge('v_1', 'v_2') + >>> topological_sort_parallel(graph, 'kahn', 1) + ['v_1', 'v_2'] + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm + """ + raise_if_backend_is_not_python( + topological_sort_parallel, kwargs.get('backend', Backend.PYTHON)) + import pydatastructs.graphs.algorithms as algorithms + func = "_" + algorithm + "_" + graph._impl + '_parallel' + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently %s algorithm isn't implemented for " + "performing topological sort on %s graphs."%(algorithm, graph._impl)) + return getattr(algorithms, func)(graph, num_threads) + +def _kahn_adjacency_list_parallel(graph: Graph, num_threads: int) -> list: + num_vertices = len(graph.vertices) + + def _collect_source_nodes(graph: Graph) -> list: + S = [] + in_degree = {u: 0 for u in graph.vertices} + for u in graph.vertices: + for v in graph.neighbors(u): + in_degree[v.name] += 1 + for u in in_degree: + if in_degree[u] == 0: + S.append(u) + return list(S) + + def _job(graph: Graph, u: str): + for v in graph.neighbors(u): + graph.remove_edge(u, v.name) + + L = [] + source_nodes = _collect_source_nodes(graph) + while source_nodes: + with ThreadPoolExecutor(max_workers=num_threads) as Executor: + for node in source_nodes: + L.append(node) + Executor.submit(_job, graph, node) + for node in source_nodes: + graph.remove_vertex(node) + source_nodes = _collect_source_nodes(graph) + + if len(L) != num_vertices: + raise ValueError("Graph is not acyclic.") + return L + + +def _breadth_first_search_max_flow(graph: Graph, source_node, sink_node, flow_passed, for_dinic=False): + bfs_queue = Queue() + parent, currentPathC = {}, {} + currentPathC[source_node] = float('inf') + bfs_queue.append(source_node) + while len(bfs_queue) != 0: + curr_node = bfs_queue.popleft() + next_nodes = graph.neighbors(curr_node) + if len(next_nodes) != 0: + for next_node in next_nodes: + capacity = graph.get_edge(curr_node, next_node.name).value + fp = flow_passed.get((curr_node, next_node.name), 0) + if capacity and parent.get(next_node.name, False) is False and capacity - fp > 0: + parent[next_node.name] = curr_node + next_flow = min(currentPathC[curr_node], capacity - fp) + currentPathC[next_node.name] = next_flow + if next_node.name == sink_node and not for_dinic: + return (next_flow, parent) + bfs_queue.append(next_node.name) + return (0, parent) + + +def _max_flow_edmonds_karp_(graph: Graph, source, sink): + m_flow = 0 + flow_passed = {} + new_flow, parent = _breadth_first_search_max_flow(graph, source, sink, flow_passed) + while new_flow != 0: + m_flow += new_flow + current = sink + while current != source: + prev = parent[current] + fp = flow_passed.get((prev, current), 0) + flow_passed[(prev, current)] = fp + new_flow + fp = flow_passed.get((current, prev), 0) + flow_passed[(current, prev)] = fp - new_flow + current = prev + new_flow, parent = _breadth_first_search_max_flow(graph, source, sink, flow_passed) + return m_flow + + +def _depth_first_search_max_flow_dinic(graph: Graph, u, parent, sink_node, flow, flow_passed): + if u == sink_node: + return flow + + next_nodes = graph.neighbors(u) + if len(next_nodes) != 0: + for next_node in next_nodes: + capacity = graph.get_edge(u, next_node.name).value + fp = flow_passed.get((u, next_node.name), 0) + parent_cond = parent.get(next_node.name, None) + if parent_cond and parent_cond == u and capacity - fp > 0: + path_flow = _depth_first_search_max_flow_dinic(graph, + next_node.name, + parent, sink_node, + min(flow, capacity - fp), flow_passed) + if path_flow > 0: + fp = flow_passed.get((u, next_node.name), 0) + flow_passed[(u, next_node.name)] = fp + path_flow + fp = flow_passed.get((next_node.name, u), 0) + flow_passed[(next_node.name, u)] = fp - path_flow + return path_flow + return 0 + + +def _max_flow_dinic_(graph: Graph, source, sink): + max_flow = 0 + flow_passed = {} + while True: + next_flow, parent = _breadth_first_search_max_flow(graph, source, sink, flow_passed, True) + if parent.get(sink, False) is False: + break + + while True: + path_flow = _depth_first_search_max_flow_dinic(graph, source, + parent, sink, + float('inf'), + flow_passed) + if path_flow <= 0: + break + max_flow += path_flow + + return max_flow + + +def max_flow(graph, source, sink, algorithm='edmonds_karp', **kwargs): + raise_if_backend_is_not_python( + max_flow, kwargs.get('backend', Backend.PYTHON)) + + import pydatastructs.graphs.algorithms as algorithms + func = "_max_flow_" + algorithm + "_" + if not hasattr(algorithms, func): + raise NotImplementedError( + f"Currently {algorithm} algorithm isn't implemented for " + "performing max flow on graphs.") + return getattr(algorithms, func)(graph, source, sink) + + +def find_bridges(graph): + """ + Finds all bridges in an undirected graph using Tarjan's Algorithm. + + Parameters + ========== + graph : Graph + An undirected graph instance. + + Returns + ========== + List[tuple] + A list of bridges, where each bridge is represented as a tuple (u, v) + with u <= v. + + Example + ======== + >>> from pydatastructs import Graph, AdjacencyListGraphNode, find_bridges + >>> v0 = AdjacencyListGraphNode(0) + >>> v1 = AdjacencyListGraphNode(1) + >>> v2 = AdjacencyListGraphNode(2) + >>> v3 = AdjacencyListGraphNode(3) + >>> v4 = AdjacencyListGraphNode(4) + >>> graph = Graph(v0, v1, v2, v3, v4, implementation='adjacency_list') + >>> graph.add_edge(v0.name, v1.name) + >>> graph.add_edge(v1.name, v2.name) + >>> graph.add_edge(v2.name, v3.name) + >>> graph.add_edge(v3.name, v4.name) + >>> find_bridges(graph) + [('0', '1'), ('1', '2'), ('2', '3'), ('3', '4')] + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Bridge_(graph_theory) + """ + + vertices = list(graph.vertices) + processed_vertices = [] + for v in vertices: + if hasattr(v, "name"): + processed_vertices.append(v.name) + else: + processed_vertices.append(v) + + n = len(processed_vertices) + adj = {v: [] for v in processed_vertices} + for v in processed_vertices: + for neighbor in graph.neighbors(v): + if hasattr(neighbor, "name"): + nbr = neighbor.name + else: + nbr = neighbor + adj[v].append(nbr) + + mapping = {v: idx for idx, v in enumerate(processed_vertices)} + inv_mapping = {idx: v for v, idx in mapping.items()} + + n_adj = [[] for _ in range(n)] + for v in processed_vertices: + idx_v = mapping[v] + for u in adj[v]: + idx_u = mapping[u] + n_adj[idx_v].append(idx_u) + + visited = [False] * n + disc = [0] * n + low = [0] * n + parent = [-1] * n + bridges_idx = [] + time = 0 + + def dfs(u): + nonlocal time + visited[u] = True + disc[u] = low[u] = time + time += 1 + for v in n_adj[u]: + if not visited[v]: + parent[v] = u + dfs(v) + low[u] = min(low[u], low[v]) + if low[v] > disc[u]: + bridges_idx.append((u, v)) + elif v != parent[u]: + low[u] = min(low[u], disc[v]) + + for i in range(n): + if not visited[i]: + dfs(i) + + bridges = [] + for u, v in bridges_idx: + a = inv_mapping[u] + b = inv_mapping[v] + if a <= b: + bridges.append((a, b)) + else: + bridges.append((b, a)) + bridges.sort() + return bridges diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/graph.py b/lib/python3.12/site-packages/pydatastructs/graphs/graph.py new file mode 100644 index 000000000..39c2692e3 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/graphs/graph.py @@ -0,0 +1,163 @@ + +from pydatastructs.utils.misc_util import Backend, raise_if_backend_is_not_python + +__all__ = [ + 'Graph' +] + +class Graph(object): + """ + Represents generic concept of graphs. + + Parameters + ========== + + implementation: str + The implementation to be used for storing + graph in memory. It can be figured out + from type of the vertices(if passed at construction). + Currently the following implementations are supported, + + 'adjacency_list' -> Adjacency list implementation. + + 'adjacency_matrix' -> Adjacency matrix implementation. + + By default, 'adjacency_list'. + vertices: GraphNode(s) + For AdjacencyList implementation vertices + can be passed for initializing the graph. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs.graphs import Graph + >>> from pydatastructs.utils import AdjacencyListGraphNode + >>> v_1 = AdjacencyListGraphNode('v_1', 1) + >>> v_2 = AdjacencyListGraphNode('v_2', 2) + >>> g = Graph(v_1, v_2) + >>> g.add_edge('v_1', 'v_2') + >>> g.add_edge('v_2', 'v_1') + >>> g.is_adjacent('v_1', 'v_2') + True + >>> g.is_adjacent('v_2', 'v_1') + True + >>> g.remove_edge('v_1', 'v_2') + >>> g.is_adjacent('v_1', 'v_2') + False + >>> g.is_adjacent('v_2', 'v_1') + True + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Graph_(abstract_data_type) + + Note + ==== + + Make sure to create nodes (AdjacencyListGraphNode or AdjacencyMatrixGraphNode) + and them in your graph using Graph.add_vertex before adding edges whose + end points require either of the nodes that you added. In other words, + Graph.add_edge doesn't add new nodes on its own if the input + nodes are not already present in the Graph. + + """ + + __slots__ = ['_impl'] + + def __new__(cls, *args, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + try: + default_impl = args[0]._impl if args else 'adjacency_list' + except: + default_impl = 'adjacency_list' + implementation = kwargs.get('implementation', default_impl) + if implementation == 'adjacency_list': + from pydatastructs.graphs.adjacency_list import AdjacencyList + obj = AdjacencyList(*args, **kwargs) + return obj + elif implementation == 'adjacency_matrix': + from pydatastructs.graphs.adjacency_matrix import AdjacencyMatrix + obj = AdjacencyMatrix(*args, **kwargs) + return obj + else: + raise NotImplementedError("%s implementation is not a part " + "of the library currently."%(implementation)) + + def is_adjacent(self, node1, node2): + """ + Checks if the nodes with the given + with the given names are adjacent + to each other. + """ + raise NotImplementedError( + "This is an abstract method.") + + def neighbors(self, node): + """ + Lists the neighbors of the node + with given name. + """ + raise NotImplementedError( + "This is an abstract method.") + + def add_vertex(self, node): + """ + Adds the input vertex to the node, or does nothing + if the input vertex is already in the graph. + """ + raise NotImplementedError( + "This is an abstract method.") + + def remove_vertex(self, node): + """ + Removes the input vertex along with all the edges + pointing towards it. + """ + raise NotImplementedError( + "This is an abstract method.") + + def add_edge(self, source, target, cost=None): + """ + Adds the edge starting at first parameter + i.e., source and ending at the second + parameter i.e., target. + """ + raise NotImplementedError( + "This is an abstract method.") + + def get_edge(self, source, target): + """ + Returns GraphEdge object if there + is an edge between source and target + otherwise None. + """ + raise NotImplementedError( + "This is an abstract method.") + + def remove_edge(self, source, target): + """ + Removes the edge starting at first parameter + i.e., source and ending at the second + parameter i.e., target. + """ + raise NotImplementedError( + "This is an abstract method.") + + def num_vertices(self): + """ + Number of vertices + """ + raise NotImplementedError( + "This is an abstract method.") + + def num_edges(self): + """ + Number of edges + """ + raise NotImplementedError( + "This is an abstract method.") diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/tests/__init__.py b/lib/python3.12/site-packages/pydatastructs/graphs/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_list.py b/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_list.py new file mode 100644 index 000000000..3a9cdb14f --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_list.py @@ -0,0 +1,83 @@ +from pydatastructs.graphs import Graph +from pydatastructs.utils import AdjacencyListGraphNode +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils.misc_util import Backend + +def test_adjacency_list(): + v_1 = AdjacencyListGraphNode('v_1', 1) + v_2 = AdjacencyListGraphNode('v_2', 2) + g = Graph(v_1, v_2, implementation='adjacency_list') + v_3 = AdjacencyListGraphNode('v_3', 3) + g.add_vertex(v_2) + g.add_vertex(v_3) + g.add_edge('v_1', 'v_2') + g.add_edge('v_2', 'v_3') + g.add_edge('v_3', 'v_1') + assert g.is_adjacent('v_1', 'v_2') is True + assert g.is_adjacent('v_2', 'v_3') is True + assert g.is_adjacent('v_3', 'v_1') is True + assert g.is_adjacent('v_2', 'v_1') is False + assert g.is_adjacent('v_3', 'v_2') is False + assert g.is_adjacent('v_1', 'v_3') is False + neighbors = g.neighbors('v_1') + assert neighbors == [v_2] + v = AdjacencyListGraphNode('v', 4) + g.add_vertex(v) + g.add_edge('v_1', 'v', 0) + g.add_edge('v_2', 'v', 0) + g.add_edge('v_3', 'v', 0) + assert g.is_adjacent('v_1', 'v') is True + assert g.is_adjacent('v_2', 'v') is True + assert g.is_adjacent('v_3', 'v') is True + e1 = g.get_edge('v_1', 'v') + e2 = g.get_edge('v_2', 'v') + e3 = g.get_edge('v_3', 'v') + assert (e1.source.name, e1.target.name) == ('v_1', 'v') + assert (e2.source.name, e2.target.name) == ('v_2', 'v') + assert (e3.source.name, e3.target.name) == ('v_3', 'v') + g.remove_edge('v_1', 'v') + assert g.is_adjacent('v_1', 'v') is False + g.remove_vertex('v') + assert g.is_adjacent('v_2', 'v') is False + assert g.is_adjacent('v_3', 'v') is False + + assert raises(ValueError, lambda: g.add_edge('u', 'v')) + assert raises(ValueError, lambda: g.add_edge('v', 'x')) + + v_4 = AdjacencyListGraphNode('v_4', 4, backend = Backend.CPP) + v_5 = AdjacencyListGraphNode('v_5', 5, backend = Backend.CPP) + g2 = Graph(v_4,v_5,implementation = 'adjacency_list', backend = Backend.CPP) + v_6 = AdjacencyListGraphNode('v_6', 6, backend = Backend.CPP) + assert raises(ValueError, lambda: g2.add_vertex(v_5)) + g2.add_vertex(v_6) + g2.add_edge('v_4', 'v_5') + g2.add_edge('v_5', 'v_6') + g2.add_edge('v_4', 'v_6') + assert g2.is_adjacent('v_4', 'v_5') is True + assert g2.is_adjacent('v_5', 'v_6') is True + assert g2.is_adjacent('v_4', 'v_6') is True + assert g2.is_adjacent('v_5', 'v_4') is False + assert g2.is_adjacent('v_6', 'v_5') is False + assert g2.is_adjacent('v_6', 'v_4') is False + assert g2.num_edges() == 3 + assert g2.num_vertices() == 3 + neighbors = g2.neighbors('v_4') + assert neighbors == [v_6, v_5] + v = AdjacencyListGraphNode('v', 4, backend = Backend.CPP) + g2.add_vertex(v) + g2.add_edge('v_4', 'v', 0) + g2.add_edge('v_5', 'v', 0) + g2.add_edge('v_6', 'v', "h") + assert g2.is_adjacent('v_4', 'v') is True + assert g2.is_adjacent('v_5', 'v') is True + assert g2.is_adjacent('v_6', 'v') is True + e1 = g2.get_edge('v_4', 'v') + e2 = g2.get_edge('v_5', 'v') + e3 = g2.get_edge('v_6', 'v') + assert (str(e1)) == "('v_4', 'v', 0)" + assert (str(e2)) == "('v_5', 'v', 0)" + assert (str(e3)) == "('v_6', 'v', h)" + g2.remove_edge('v_4', 'v') + assert g2.is_adjacent('v_4', 'v') is False + g2.remove_vertex('v') + assert raises(ValueError, lambda: g2.add_edge('v_4', 'v')) diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_matrix.py b/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_matrix.py new file mode 100644 index 000000000..27dc81790 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_matrix.py @@ -0,0 +1,53 @@ +from pydatastructs.graphs import Graph +from pydatastructs.utils import AdjacencyMatrixGraphNode +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils.misc_util import Backend + +def test_AdjacencyMatrix(): + v_0 = AdjacencyMatrixGraphNode(0, 0) + v_1 = AdjacencyMatrixGraphNode(1, 1) + v_2 = AdjacencyMatrixGraphNode(2, 2) + g = Graph(v_0, v_1, v_2) + g.add_edge(0, 1, 0) + g.add_edge(1, 2, 0) + g.add_edge(2, 0, 0) + e1 = g.get_edge(0, 1) + e2 = g.get_edge(1, 2) + e3 = g.get_edge(2, 0) + assert (e1.source.name, e1.target.name) == ('0', '1') + assert (e2.source.name, e2.target.name) == ('1', '2') + assert (e3.source.name, e3.target.name) == ('2', '0') + assert g.is_adjacent(0, 1) is True + assert g.is_adjacent(1, 2) is True + assert g.is_adjacent(2, 0) is True + assert g.is_adjacent(1, 0) is False + assert g.is_adjacent(2, 1) is False + assert g.is_adjacent(0, 2) is False + neighbors = g.neighbors(0) + assert neighbors == [v_1] + g.remove_edge(0, 1) + assert g.is_adjacent(0, 1) is False + assert raises(ValueError, lambda: g.add_edge('u', 'v')) + assert raises(ValueError, lambda: g.add_edge('v', 'x')) + assert raises(ValueError, lambda: g.add_edge(2, 3)) + assert raises(ValueError, lambda: g.add_edge(3, 2)) + + v_3 = AdjacencyMatrixGraphNode('0', 0, backend = Backend.CPP) + v_4 = AdjacencyMatrixGraphNode('1', 1, backend = Backend.CPP) + v_5 = AdjacencyMatrixGraphNode('2', 2, backend = Backend.CPP) + g2 = Graph(v_3, v_4, v_5, implementation = 'adjacency_matrix', backend = Backend.CPP) + g2.add_edge('0', '1', 0) + g2.add_edge('1', '2', 0) + g2.add_edge('2', '0', 0) + assert g2.is_adjacent('0', '1') is True + assert g2.is_adjacent('1', '2') is True + assert g2.is_adjacent('2', '0') is True + assert g2.is_adjacent('1', '0') is False + assert g2.is_adjacent('2', '1') is False + assert g2.is_adjacent('0', '2') is False + neighbors = g2.neighbors('0') + assert neighbors == [v_4] + g2.remove_edge('0', '1') + assert g2.is_adjacent('0', '1') is False + assert raises(ValueError, lambda: g2.add_edge('u', 'v')) + assert raises(ValueError, lambda: g2.add_edge('v', 'x')) diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_algorithms.py b/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_algorithms.py new file mode 100644 index 000000000..04ebcccda --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_algorithms.py @@ -0,0 +1,596 @@ +from pydatastructs import (breadth_first_search, Graph, +breadth_first_search_parallel, minimum_spanning_tree, +minimum_spanning_tree_parallel, strongly_connected_components, +depth_first_search, shortest_paths,all_pair_shortest_paths, topological_sort, +topological_sort_parallel, max_flow, find_bridges) +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils.misc_util import AdjacencyListGraphNode, AdjacencyMatrixGraphNode +from pydatastructs.graphs._backend.cpp import _graph +from pydatastructs.graphs._backend.cpp import _algorithms +from pydatastructs.utils.misc_util import Backend + +def test_breadth_first_search(): + + def _test_breadth_first_search(ds): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + + V1 = GraphNode(0) + V2 = GraphNode(1) + V3 = GraphNode(2) + + G1 = Graph(V1, V2, V3) + + assert G1.num_vertices() == 3 + + edges = [ + (V1.name, V2.name), + (V2.name, V3.name), + (V1.name, V3.name) + ] + + for edge in edges: + G1.add_edge(*edge) + + assert G1.num_edges() == len(edges) + + parent = {} + def bfs_tree(curr_node, next_node, parent): + if next_node != "": + parent[next_node] = curr_node + return True + + breadth_first_search(G1, V1.name, bfs_tree, parent) + assert (parent[V3.name] == V1.name and parent[V2.name] == V1.name) or \ + (parent[V3.name] == V2.name and parent[V2.name] == V1.name) + + if (ds=='List'): + parent = {} + V9 = AdjacencyListGraphNode("9",0,backend = Backend.CPP) + V10 = AdjacencyListGraphNode("10",0,backend = Backend.CPP) + V11 = AdjacencyListGraphNode("11",0,backend = Backend.CPP) + G2 = Graph(V9, V10, V11,implementation = 'adjacency_list', backend = Backend.CPP) + assert G2.num_vertices()==3 + G2.add_edge("9", "10") + G2.add_edge("10", "11") + breadth_first_search(G2, "9", bfs_tree, parent, backend = Backend.CPP) + assert parent[V10] == V9 + assert parent[V11] == V10 + + if (ds == 'Matrix'): + parent3 = {} + V12 = AdjacencyMatrixGraphNode("12", 0, backend = Backend.CPP) + V13 = AdjacencyMatrixGraphNode("13", 0, backend = Backend.CPP) + V14 = AdjacencyMatrixGraphNode("14", 0, backend = Backend.CPP) + G3 = Graph(V12, V13, V14, implementation = 'adjacency_matrix', backend = Backend.CPP) + assert G3.num_vertices() == 3 + G3.add_edge("12", "13") + G3.add_edge("13", "14") + breadth_first_search(G3, "12", bfs_tree, parent3, backend = Backend.CPP) + assert parent3[V13] == V12 + assert parent3[V14] == V13 + + V4 = GraphNode(0) + V5 = GraphNode(1) + V6 = GraphNode(2) + V7 = GraphNode(3) + V8 = GraphNode(4) + + edges = [ + (V4.name, V5.name), + (V5.name, V6.name), + (V6.name, V7.name), + (V6.name, V4.name), + (V7.name, V8.name) + ] + + G2 = Graph(V4, V5, V6, V7, V8) + + for edge in edges: + G2.add_edge(*edge) + + assert G2.num_edges() == len(edges) + + path = [] + def path_finder(curr_node, next_node, dest_node, parent, path): + if next_node != "": + parent[next_node] = curr_node + if curr_node == dest_node: + node = curr_node + path.append(node) + while node is not None: + if parent.get(node, None) is not None: + path.append(parent[node]) + node = parent.get(node, None) + path.reverse() + return False + return True + + parent.clear() + breadth_first_search(G2, V4.name, path_finder, V7.name, parent, path) + assert path == [V4.name, V5.name, V6.name, V7.name] + + _test_breadth_first_search("List") + _test_breadth_first_search("Matrix") + +def test_breadth_first_search_parallel(): + + def _test_breadth_first_search_parallel(ds): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + + V1 = GraphNode(0) + V2 = GraphNode(1) + V3 = GraphNode(2) + V4 = GraphNode(3) + V5 = GraphNode(4) + V6 = GraphNode(5) + V7 = GraphNode(6) + V8 = GraphNode(7) + + + G1 = Graph(V1, V2, V3, V4, V5, V6, V7, V8) + + edges = [ + (V1.name, V2.name), + (V1.name, V3.name), + (V1.name, V4.name), + (V2.name, V5.name), + (V2.name, V6.name), + (V3.name, V6.name), + (V3.name, V7.name), + (V4.name, V7.name), + (V4.name, V8.name) + ] + + for edge in edges: + G1.add_edge(*edge) + + parent = {} + def bfs_tree(curr_node, next_node, parent): + if next_node != "": + parent[next_node] = curr_node + return True + + breadth_first_search_parallel(G1, V1.name, 5, bfs_tree, parent) + assert (parent[V2.name] == V1.name and parent[V3.name] == V1.name and + parent[V4.name] == V1.name and parent[V5.name] == V2.name and + (parent[V6.name] in (V2.name, V3.name)) and + (parent[V7.name] in (V3.name, V4.name)) and (parent[V8.name] == V4.name)) + + _test_breadth_first_search_parallel("List") + _test_breadth_first_search_parallel("Matrix") + +def test_minimum_spanning_tree(): + + def _test_minimum_spanning_tree(func, ds, algorithm, *args): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + a, b, c, d, e = [GraphNode(x) for x in [0, 1, 2, 3, 4]] + graph = Graph(a, b, c, d, e) + graph.add_edge(a.name, c.name, 10) + graph.add_edge(c.name, a.name, 10) + graph.add_edge(a.name, d.name, 7) + graph.add_edge(d.name, a.name, 7) + graph.add_edge(c.name, d.name, 9) + graph.add_edge(d.name, c.name, 9) + graph.add_edge(d.name, b.name, 32) + graph.add_edge(b.name, d.name, 32) + graph.add_edge(d.name, e.name, 23) + graph.add_edge(e.name, d.name, 23) + mst = func(graph, algorithm, *args) + expected_mst = [('0_3', 7), ('2_3', 9), ('3_4', 23), ('3_1', 32), + ('3_0', 7), ('3_2', 9), ('4_3', 23), ('1_3', 32)] + assert len(expected_mst) == len(mst.edge_weights.items()) + for k, v in mst.edge_weights.items(): + assert (k, v.value) in expected_mst + + def _test_minimum_spanning_tree_cpp(ds, algorithm, *args): + if (ds == 'List' and algorithm == "prim"): + a1 = AdjacencyListGraphNode('a', 0, backend = Backend.CPP) + b1 = AdjacencyListGraphNode('b', 0, backend = Backend.CPP) + c1 = AdjacencyListGraphNode('c', 0, backend = Backend.CPP) + d1 = AdjacencyListGraphNode('d', 0, backend = Backend.CPP) + e1 = AdjacencyListGraphNode('e', 0, backend = Backend.CPP) + g = Graph(a1, b1, c1, d1, e1, backend = Backend.CPP) + g.add_edge(a1.name, c1.name, 10) + g.add_edge(c1.name, a1.name, 10) + g.add_edge(a1.name, d1.name, 7) + g.add_edge(d1.name, a1.name, 7) + g.add_edge(c1.name, d1.name, 9) + g.add_edge(d1.name, c1.name, 9) + g.add_edge(d1.name, b1.name, 32) + g.add_edge(b1.name, d1.name, 32) + g.add_edge(d1.name, e1.name, 23) + g.add_edge(e1.name, d1.name, 23) + mst = minimum_spanning_tree(g, "prim", backend = Backend.CPP) + expected_mst = ["('a', 'd', 7)", "('d', 'c', 9)", "('e', 'd', 23)", "('b', 'd', 32)", + "('d', 'a', 7)", "('c', 'd', 9)", "('d', 'e', 23)", "('d', 'b', 32)"] + assert str(mst.get_edge('a', 'd')) in expected_mst + assert str(mst.get_edge('e', 'd')) in expected_mst + assert str(mst.get_edge('d', 'c')) in expected_mst + assert str(mst.get_edge('b', 'd')) in expected_mst + assert mst.num_edges() == 8 + a=AdjacencyListGraphNode('0', 0, backend = Backend.CPP) + b=AdjacencyListGraphNode('1', 0, backend = Backend.CPP) + c=AdjacencyListGraphNode('2', 0, backend = Backend.CPP) + d=AdjacencyListGraphNode('3', 0, backend = Backend.CPP) + g2 = Graph(a,b,c,d,backend = Backend.CPP) + g2.add_edge('0', '1', 74) + g2.add_edge('1', '0', 74) + g2.add_edge('0', '3', 55) + g2.add_edge('3', '0', 55) + g2.add_edge('1', '2', 74) + g2.add_edge('2', '1', 74) + mst2=minimum_spanning_tree(g2, "prim", backend = Backend.CPP) + assert mst2.num_edges() == 6 + + fmst = minimum_spanning_tree + fmstp = minimum_spanning_tree_parallel + _test_minimum_spanning_tree(fmst, "List", "kruskal") + _test_minimum_spanning_tree(fmst, "Matrix", "kruskal") + _test_minimum_spanning_tree(fmst, "List", "prim") + _test_minimum_spanning_tree(fmstp, "List", "kruskal", 3) + _test_minimum_spanning_tree(fmstp, "Matrix", "kruskal", 3) + _test_minimum_spanning_tree(fmstp, "List", "prim", 3) + _test_minimum_spanning_tree_cpp("List", "prim") + +def test_strongly_connected_components(): + + def _test_strongly_connected_components(func, ds, algorithm, *args): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + a, b, c, d, e, f, g, h = \ + [GraphNode(chr(x)) for x in range(ord('a'), ord('h') + 1)] + graph = Graph(a, b, c, d, e, f, g, h) + graph.add_edge(a.name, b.name) + graph.add_edge(b.name, c.name) + graph.add_edge(b.name, f.name) + graph.add_edge(b.name, e.name) + graph.add_edge(c.name, d.name) + graph.add_edge(c.name, g.name) + graph.add_edge(d.name, h.name) + graph.add_edge(d.name, c.name) + graph.add_edge(e.name, f.name) + graph.add_edge(e.name, a.name) + graph.add_edge(f.name, g.name) + graph.add_edge(g.name, f.name) + graph.add_edge(h.name, d.name) + graph.add_edge(h.name, g.name) + comps = func(graph, algorithm) + expected_comps = [{'e', 'a', 'b'}, {'d', 'c', 'h'}, {'g', 'f'}] + assert comps.sort() == expected_comps.sort() + + scc = strongly_connected_components + _test_strongly_connected_components(scc, "List", "kosaraju") + _test_strongly_connected_components(scc, "Matrix", "kosaraju") + _test_strongly_connected_components(scc, "List", "tarjan") + _test_strongly_connected_components(scc, "Matrix", "tarjan") + +def test_depth_first_search(): + + def _test_depth_first_search(ds): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + + V1 = GraphNode(0) + V2 = GraphNode(1) + V3 = GraphNode(2) + + G1 = Graph(V1, V2, V3) + + edges = [ + (V1.name, V2.name), + (V2.name, V3.name), + (V1.name, V3.name) + ] + + for edge in edges: + G1.add_edge(*edge) + + parent = {} + def dfs_tree(curr_node, next_node, parent): + if next_node != "": + parent[next_node] = curr_node + return True + + depth_first_search(G1, V1.name, dfs_tree, parent) + assert (parent[V3.name] == V1.name and parent[V2.name] == V1.name) or \ + (parent[V3.name] == V2.name and parent[V2.name] == V1.name) + + V4 = GraphNode(0) + V5 = GraphNode(1) + V6 = GraphNode(2) + V7 = GraphNode(3) + V8 = GraphNode(4) + + edges = [ + (V4.name, V5.name), + (V5.name, V6.name), + (V6.name, V7.name), + (V6.name, V4.name), + (V7.name, V8.name) + ] + + G2 = Graph(V4, V5, V6, V7, V8) + + for edge in edges: + G2.add_edge(*edge) + + path = [] + def path_finder(curr_node, next_node, dest_node, parent, path): + if next_node != "": + parent[next_node] = curr_node + if curr_node == dest_node: + node = curr_node + path.append(node) + while node is not None: + if parent.get(node, None) is not None: + path.append(parent[node]) + node = parent.get(node, None) + path.reverse() + return False + return True + + parent.clear() + depth_first_search(G2, V4.name, path_finder, V7.name, parent, path) + assert path == [V4.name, V5.name, V6.name, V7.name] + + _test_depth_first_search("List") + _test_depth_first_search("Matrix") + +def test_shortest_paths(): + + def _test_shortest_paths_positive_edges(ds, algorithm): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + vertices = [GraphNode('S'), GraphNode('C'), + GraphNode('SLC'), GraphNode('SF'), + GraphNode('D')] + + graph = Graph(*vertices) + graph.add_edge('S', 'SLC', 2) + graph.add_edge('C', 'S', 4) + graph.add_edge('C', 'D', 2) + graph.add_edge('SLC', 'C', 2) + graph.add_edge('SLC', 'D', 3) + graph.add_edge('SF', 'SLC', 2) + graph.add_edge('SF', 'S', 2) + graph.add_edge('D', 'SF', 3) + dist, pred = shortest_paths(graph, algorithm, 'SLC') + assert dist == {'S': 6, 'C': 2, 'SLC': 0, 'SF': 6, 'D': 3} + assert pred == {'S': 'C', 'C': 'SLC', 'SLC': None, 'SF': 'D', 'D': 'SLC'} + dist, pred = shortest_paths(graph, algorithm, 'SLC', 'SF') + assert dist == 6 + assert pred == {'S': 'C', 'C': 'SLC', 'SLC': None, 'SF': 'D', 'D': 'SLC'} + graph.remove_edge('SLC', 'D') + graph.add_edge('D', 'SLC', -10) + assert raises(ValueError, lambda: shortest_paths(graph, 'bellman_ford', 'SLC')) + + if (ds == 'List' and algorithm == 'dijkstra'): + vertices2 = [AdjacencyListGraphNode('S', 0, backend = Backend.CPP), AdjacencyListGraphNode('C', 0, backend = Backend.CPP), + AdjacencyListGraphNode('SLC', 0, backend = Backend.CPP), AdjacencyListGraphNode('SF', 0, backend = Backend.CPP), + AdjacencyListGraphNode('D', 0, backend = Backend.CPP)] + graph2 = Graph(*vertices2, backend = Backend.CPP) + graph2.add_edge('S', 'SLC', 2) + graph2.add_edge('C', 'S', 4) + graph2.add_edge('C', 'D', 2) + graph2.add_edge('SLC', 'C', 2) + graph2.add_edge('SLC', 'D', 3) + graph2.add_edge('SF', 'SLC', 2) + graph2.add_edge('SF', 'S', 2) + graph2.add_edge('D', 'SF', 3) + (dist2, pred2) = shortest_paths(graph2, algorithm, 'SLC', backend = Backend.CPP) + assert dist2 == {'S': 6, 'C': 2, 'SLC': 0, 'SF': 6, 'D': 3} + assert pred2 == {'S': 'C', 'C': 'SLC', 'SLC': None, 'SF': 'D', 'D': 'SLC'} + + + + def _test_shortest_paths_negative_edges(ds, algorithm): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + vertices = [GraphNode('s'), GraphNode('a'), + GraphNode('b'), GraphNode('c'), + GraphNode('d')] + + graph = Graph(*vertices) + graph.add_edge('s', 'a', 3) + graph.add_edge('s', 'b', 2) + graph.add_edge('a', 'c', 1) + graph.add_edge('b', 'd', 1) + graph.add_edge('b', 'a', -2) + graph.add_edge('c', 'd', 1) + dist, pred = shortest_paths(graph, algorithm, 's') + assert dist == {'s': 0, 'a': 0, 'b': 2, 'c': 1, 'd': 2} + assert pred == {'s': None, 'a': 'b', 'b': 's', 'c': 'a', 'd': 'c'} + dist, pred = shortest_paths(graph, algorithm, 's', 'd') + assert dist == 2 + assert pred == {'s': None, 'a': 'b', 'b': 's', 'c': 'a', 'd': 'c'} + + _test_shortest_paths_positive_edges("List", 'bellman_ford') + _test_shortest_paths_positive_edges("Matrix", 'bellman_ford') + _test_shortest_paths_negative_edges("List", 'bellman_ford') + _test_shortest_paths_negative_edges("Matrix", 'bellman_ford') + _test_shortest_paths_positive_edges("List", 'dijkstra') + _test_shortest_paths_positive_edges("Matrix", 'dijkstra') + +def test_all_pair_shortest_paths(): + + def _test_shortest_paths_negative_edges(ds, algorithm): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + vertices = [GraphNode('1'), GraphNode('2'), + GraphNode('3'), GraphNode('4')] + + graph = Graph(*vertices) + graph.add_edge('1', '3', -2) + graph.add_edge('2', '1', 4) + graph.add_edge('2', '3', 3) + graph.add_edge('3', '4', 2) + graph.add_edge('4', '2', -1) + dist, next_v = all_pair_shortest_paths(graph, algorithm) + assert dist == {'1': {'3': -2, '1': 0, '4': 0, '2': -1}, + '2': {'1': 4, '3': 2, '2': 0, '4': 4}, + '3': {'4': 2, '3': 0, '1': 5, '2': 1}, + '4': {'2': -1, '4': 0, '1': 3, '3': 1}} + assert next_v == {'1': {'3': '1', '1': '1', '4': None, '2': None}, + '2': {'1': '2', '3': None, '2': '2', '4': None}, + '3': {'4': '3', '3': '3', '1': None, '2': None}, + '4': {'2': '4', '4': '4', '1': None, '3': None}} + + _test_shortest_paths_negative_edges("List", 'floyd_warshall') + _test_shortest_paths_negative_edges("Matrix", 'floyd_warshall') + _test_shortest_paths_negative_edges("List", 'johnson') + +def test_topological_sort(): + + def _test_topological_sort(func, ds, algorithm, threads=None): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + vertices = [GraphNode('2'), GraphNode('3'), GraphNode('5'), + GraphNode('7'), GraphNode('8'), GraphNode('10'), + GraphNode('11'), GraphNode('9')] + + graph = Graph(*vertices) + graph.add_edge('5', '11') + graph.add_edge('7', '11') + graph.add_edge('7', '8') + graph.add_edge('3', '8') + graph.add_edge('3', '10') + graph.add_edge('11', '2') + graph.add_edge('11', '9') + graph.add_edge('11', '10') + graph.add_edge('8', '9') + if threads is not None: + l = func(graph, algorithm, threads) + else: + l = func(graph, algorithm) + assert all([(l1 in l[0:3]) for l1 in ('3', '5', '7')] + + [(l2 in l[3:5]) for l2 in ('8', '11')] + + [(l3 in l[5:]) for l3 in ('10', '9', '2')]) + + _test_topological_sort(topological_sort, "List", "kahn") + _test_topological_sort(topological_sort_parallel, "List", "kahn", 3) + + +def test_max_flow(): + def _test_max_flow(ds, algorithm): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + + a = GraphNode('a') + b = GraphNode('b') + c = GraphNode('c') + d = GraphNode('d') + e = GraphNode('e') + + G = Graph(a, b, c, d, e) + + G.add_edge('a', 'b', 3) + G.add_edge('a', 'c', 4) + G.add_edge('b', 'c', 2) + G.add_edge('b', 'd', 3) + G.add_edge('c', 'd', 1) + G.add_edge('d', 'e', 6) + + assert max_flow(G, 'a', 'e', algorithm) == 4 + assert max_flow(G, 'a', 'c', algorithm) == 6 + + a = GraphNode('a') + b = GraphNode('b') + c = GraphNode('c') + d = GraphNode('d') + e = GraphNode('e') + f = GraphNode('f') + + G2 = Graph(a, b, c, d, e, f) + + G2.add_edge('a', 'b', 16) + G2.add_edge('a', 'c', 13) + G2.add_edge('b', 'c', 10) + G2.add_edge('b', 'd', 12) + G2.add_edge('c', 'b', 4) + G2.add_edge('c', 'e', 14) + G2.add_edge('d', 'c', 9) + G2.add_edge('d', 'f', 20) + G2.add_edge('e', 'd', 7) + G2.add_edge('e', 'f', 4) + + assert max_flow(G2, 'a', 'f', algorithm) == 23 + assert max_flow(G2, 'a', 'd', algorithm) == 19 + + a = GraphNode('a') + b = GraphNode('b') + c = GraphNode('c') + d = GraphNode('d') + + G3 = Graph(a, b, c, d) + + G3.add_edge('a', 'b', 3) + G3.add_edge('a', 'c', 2) + G3.add_edge('b', 'c', 2) + G3.add_edge('b', 'd', 3) + G3.add_edge('c', 'd', 2) + + assert max_flow(G3, 'a', 'd', algorithm) == 5 + assert max_flow(G3, 'a', 'b', algorithm) == 3 + + + _test_max_flow("List", "edmonds_karp") + _test_max_flow("Matrix", "edmonds_karp") + _test_max_flow("List", "dinic") + _test_max_flow("Matrix", "dinic") + + +def test_find_bridges(): + def _test_find_bridges(ds): + import pydatastructs.utils.misc_util as utils + GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") + + impl = 'adjacency_list' if ds == "List" else 'adjacency_matrix' + + v0 = GraphNode(0) + v1 = GraphNode(1) + v2 = GraphNode(2) + v3 = GraphNode(3) + v4 = GraphNode(4) + + G1 = Graph(v0, v1, v2, v3, v4, implementation=impl) + G1.add_edge(v0.name, v1.name) + G1.add_edge(v1.name, v2.name) + G1.add_edge(v2.name, v3.name) + G1.add_edge(v3.name, v4.name) + + bridges = find_bridges(G1) + expected_bridges = [('0', '1'), ('1', '2'), ('2', '3'), ('3', '4')] + assert sorted(bridges) == sorted(expected_bridges) + + u0 = GraphNode(0) + u1 = GraphNode(1) + u2 = GraphNode(2) + + G2 = Graph(u0, u1, u2, implementation=impl) + G2.add_edge(u0.name, u1.name) + G2.add_edge(u1.name, u2.name) + G2.add_edge(u2.name, u0.name) + + bridges = find_bridges(G2) + assert bridges == [] + + w0 = GraphNode(0) + w1 = GraphNode(1) + w2 = GraphNode(2) + w3 = GraphNode(3) + w4 = GraphNode(4) + + G3 = Graph(w0, w1, w2, w3, w4, implementation=impl) + G3.add_edge(w0.name, w1.name) + G3.add_edge(w1.name, w2.name) + G3.add_edge(w3.name, w4.name) + + bridges = find_bridges(G3) + expected_bridges = [('0', '1'), ('1', '2'), ('3', '4')] + assert sorted(bridges) == sorted(expected_bridges) + + _test_find_bridges("List") + _test_find_bridges("Matrix") diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/__init__.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/__init__.py new file mode 100644 index 000000000..c6b3341d2 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/__init__.py @@ -0,0 +1,53 @@ +__all__ = [] + +from . import ( + arrays, + linked_lists, + algorithms, +) + +from .arrays import ( + OneDimensionalArray, + DynamicOneDimensionalArray, + MultiDimensionalArray, + ArrayForTrees +) +__all__.extend(arrays.__all__) + +from .linked_lists import ( + SinglyLinkedList, + DoublyLinkedList, + SinglyCircularLinkedList, + DoublyCircularLinkedList, + SkipList +) +__all__.extend(linked_lists.__all__) + +from .algorithms import ( + merge_sort_parallel, + brick_sort, + brick_sort_parallel, + heapsort, + matrix_multiply_parallel, + counting_sort, + bucket_sort, + cocktail_shaker_sort, + quick_sort, + longest_common_subsequence, + is_ordered, + upper_bound, + lower_bound, + longest_increasing_subsequence, + next_permutation, + prev_permutation, + bubble_sort, + linear_search, + binary_search, + jump_search, + selection_sort, + insertion_sort, + intro_sort, + shell_sort, + radix_sort +) +__all__.extend(algorithms.__all__) diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/_backend/__init__.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/_backend/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/algorithms.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/algorithms.py new file mode 100644 index 000000000..6d383fdca --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/algorithms.py @@ -0,0 +1,2010 @@ +from pydatastructs.linear_data_structures.arrays import ( + OneDimensionalArray, DynamicArray, DynamicOneDimensionalArray, Array) +from pydatastructs.linear_data_structures._backend.cpp import _algorithms, _arrays +from pydatastructs.utils.misc_util import ( + _check_type, _comp, Backend, + raise_if_backend_is_not_python) +from concurrent.futures import ThreadPoolExecutor +from math import log, floor, sqrt + +__all__ = [ + 'merge_sort_parallel', + 'brick_sort', + 'brick_sort_parallel', + 'heapsort', + 'matrix_multiply_parallel', + 'counting_sort', + 'bucket_sort', + 'cocktail_shaker_sort', + 'quick_sort', + 'longest_common_subsequence', + 'is_ordered', + 'upper_bound', + 'lower_bound', + 'longest_increasing_subsequence', + 'next_permutation', + 'prev_permutation', + 'bubble_sort', + 'linear_search', + 'binary_search', + 'jump_search', + 'selection_sort', + 'insertion_sort', + 'intro_sort', + 'shell_sort', + 'radix_sort' +] + +def _merge(array, sl, el, sr, er, end, comp): + l, r = [], [] + for i in range(sl, el + 1): + if i <= end: + l.append(array[i]) + array[i] = None + for i in range(sr, er + 1): + if i <= end: + r.append(array[i]) + array[i] = None + i, j, k = 0, 0, sl + while i < len(l) and j < len(r): + if _comp(l[i], r[j], comp): + array[k] = l[i] + i += 1 + else: + array[k] = r[j] + j += 1 + k += 1 + + while i < len(l): + array[k] = l[i] + i += 1 + k += 1 + + while j < len(r): + array[k] = r[j] + j += 1 + k += 1 + +def merge_sort_parallel(array, num_threads, **kwargs): + """ + Implements parallel merge sort. + + Parameters + ========== + + array: Array + The array which is to be sorted. + num_threads: int + The maximum number of threads + to be used for sorting. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, merge_sort_parallel + >>> arr = OneDimensionalArray(int,[3, 2, 1]) + >>> merge_sort_parallel(arr, 3) + >>> [arr[0], arr[1], arr[2]] + [1, 2, 3] + >>> merge_sort_parallel(arr, 3, comp=lambda u, v: u > v) + >>> [arr[0], arr[1], arr[2]] + [3, 2, 1] + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Merge_sort + """ + raise_if_backend_is_not_python( + merge_sort_parallel, kwargs.get('backend', Backend.PYTHON)) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get("comp", lambda u, v: u <= v) + for size in range(floor(log(end - start + 1, 2)) + 1): + pow_2 = 2**size + with ThreadPoolExecutor(max_workers=num_threads) as Executor: + i = start + while i <= end: + Executor.submit( + _merge, + array, + i, i + pow_2 - 1, + i + pow_2, i + 2*pow_2 - 1, + end, comp).result() + i = i + 2*pow_2 + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + +def brick_sort(array, **kwargs): + """ + Implements Brick Sort / Odd Even sorting algorithm + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + >>> from pydatastructs import OneDimensionalArray, brick_sort + >>> arr = OneDimensionalArray(int,[3, 2, 1]) + >>> brick_sort(arr) + >>> [arr[0], arr[1], arr[2]] + [1, 2, 3] + >>> brick_sort(arr, comp=lambda u, v: u > v) + >>> [arr[0], arr[1], arr[2]] + [3, 2, 1] + + References + ========== + .. [1] https://www.geeksforgeeks.org/odd-even-sort-brick-sort/ + """ + raise_if_backend_is_not_python( + brick_sort, kwargs.get('backend', Backend.PYTHON)) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get("comp", lambda u, v: u <= v) + + is_sorted = False + while is_sorted is False: + is_sorted = True + for i in range(start+1, end, 2): + if _comp(array[i+1], array[i], comp): + array[i], array[i+1] = array[i+1], array[i] + is_sorted = False + for i in range(start, end, 2): + if _comp(array[i+1], array[i], comp): + array[i], array[i+1] = array[i+1], array[i] + is_sorted = False + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + +def _brick_sort_swap(array, i, j, comp, is_sorted): + if _comp(array[j], array[i], comp): + array[i], array[j] = array[j], array[i] + is_sorted[0] = False + +def brick_sort_parallel(array, num_threads, **kwargs): + """ + Implements Concurrent Brick Sort / Odd Even sorting algorithm + + Parameters + ========== + + array: Array/list + The array which is to be sorted. + num_threads: int + The maximum number of threads + to be used for sorting. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, brick_sort_parallel + >>> arr = OneDimensionalArray(int,[3, 2, 1]) + >>> brick_sort_parallel(arr, num_threads=5) + >>> [arr[0], arr[1], arr[2]] + [1, 2, 3] + >>> brick_sort_parallel(arr, num_threads=5, comp=lambda u, v: u > v) + >>> [arr[0], arr[1], arr[2]] + [3, 2, 1] + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Odd%E2%80%93even_sort + """ + raise_if_backend_is_not_python( + brick_sort_parallel, kwargs.get('backend', Backend.PYTHON)) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get("comp", lambda u, v: u <= v) + + is_sorted = [False] + with ThreadPoolExecutor(max_workers=num_threads) as Executor: + while is_sorted[0] is False: + is_sorted[0] = True + for i in range(start + 1, end, 2): + Executor.submit(_brick_sort_swap, array, i, i + 1, comp, is_sorted).result() + + for i in range(start, end, 2): + Executor.submit(_brick_sort_swap, array, i, i + 1, comp, is_sorted).result() + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + +def heapsort(array, **kwargs): + """ + Implements Heapsort algorithm. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, heapsort + >>> arr = OneDimensionalArray(int,[3, 2, 1]) + >>> heapsort(arr) + >>> [arr[0], arr[1], arr[2]] + [1, 2, 3] + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Heapsort + + Note + ==== + + This function does not support custom comparators as is the case with + other sorting functions in this file. + """ + raise_if_backend_is_not_python( + heapsort, kwargs.get('backend', Backend.PYTHON)) + from pydatastructs.trees.heaps import BinaryHeap + + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + + h = BinaryHeap(heap_property="min") + for i in range(start, end+1): + if array[i] is not None: + h.insert(array[i]) + array[i] = None + + i = start + while not h.is_empty: + array[i] = h.extract().key + i += 1 + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + +def counting_sort(array: Array, **kwargs) -> Array: + """ + Performs counting sort on the given array. + + Parameters + ========== + + array: Array + The array which is to be sorted. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs import DynamicOneDimensionalArray as DODA, counting_sort + >>> arr = DODA(int, [5, 78, 1, 0]) + >>> out = counting_sort(arr) + >>> str(out) + "['0', '1', '5', '78']" + >>> arr.delete(2) + >>> out = counting_sort(arr) + >>> str(out) + "['0', '5', '78']" + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Counting_sort + + Note + ==== + + Since, counting sort is a non-comparison sorting algorithm, + custom comparators aren't allowed. + The ouput array doesn't contain any `None` value. + """ + raise_if_backend_is_not_python( + counting_sort, kwargs.get('backend', Backend.PYTHON)) + max_val, min_val = array[0], array[0] + none_count = 0 + for i in range(len(array)): + if array[i] is not None: + if max_val is None or max_val < array[i]: + max_val = array[i] + if min_val is None or array[i] < min_val: + min_val = array[i] + else: + none_count += 1 + if min_val is None or max_val is None: + return array + + count = [0 for _ in range(max_val - min_val + 1)] + for i in range(len(array)): + if array[i] is not None: + count[array[i] - min_val] += 1 + + total = 0 + for i in range(max_val - min_val + 1): + count[i], total = total, count[i] + total + + output = type(array)(array._dtype, + [array[i] for i in range(len(array)) + if array[i] is not None]) + if _check_type(output, DynamicArray): + output._modify(force=True) + + for i in range(len(array)): + x = array[i] + if x is not None: + output[count[x-min_val]] = x + count[x-min_val] += 1 + + return output + +def _matrix_multiply_helper(m1, m2, row, col): + s = 0 + for i in range(len(m1)): + s += m1[row][i] * m2[i][col] + return s + +def matrix_multiply_parallel(matrix_1, matrix_2, num_threads): + """ + Implements concurrent Matrix multiplication + + Parameters + ========== + + matrix_1: Any matrix representation + Left matrix + matrix_2: Any matrix representation + Right matrix + num_threads: int + The maximum number of threads + to be used for multiplication. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Raises + ====== + + ValueError + When the columns in matrix_1 are not equal to the rows in matrix_2 + + Returns + ======= + + C: list + The result of matrix multiplication. + + Examples + ======== + + >>> from pydatastructs import matrix_multiply_parallel + >>> I = [[1, 1, 0], [0, 1, 0], [0, 0, 1]] + >>> J = [[2, 1, 2], [1, 2, 1], [2, 2, 2]] + >>> matrix_multiply_parallel(I, J, num_threads=5) + [[3, 3, 3], [1, 2, 1], [2, 2, 2]] + + References + ========== + .. [1] https://www3.nd.edu/~zxu2/acms60212-40212/Lec-07-3.pdf + """ + row_matrix_1, col_matrix_1 = len(matrix_1), len(matrix_1[0]) + row_matrix_2, col_matrix_2 = len(matrix_2), len(matrix_2[0]) + + if col_matrix_1 != row_matrix_2: + raise ValueError("Matrix size mismatch: %s * %s"%( + (row_matrix_1, col_matrix_1), (row_matrix_2, col_matrix_2))) + + C = [[None for i in range(col_matrix_1)] for j in range(row_matrix_2)] + + with ThreadPoolExecutor(max_workers=num_threads) as Executor: + for i in range(row_matrix_1): + for j in range(col_matrix_2): + C[i][j] = Executor.submit(_matrix_multiply_helper, + matrix_1, + matrix_2, + i, j).result() + + return C + +def _bucket_sort_helper(bucket: Array) -> Array: + for i in range(1, len(bucket)): + key = bucket[i] + j = i - 1 + while j >= 0 and bucket[j] > key: + bucket[j+1] = bucket[j] + j -= 1 + bucket[j+1] = key + return bucket + +def bucket_sort(array: Array, **kwargs) -> Array: + """ + Performs bucket sort on the given array. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs import DynamicOneDimensionalArray as DODA, bucket_sort + >>> arr = DODA(int, [5, 78, 1, 0]) + >>> out = bucket_sort(arr) + >>> str(out) + "['0', '1', '5', '78']" + >>> arr.delete(2) + >>> out = bucket_sort(arr) + >>> str(out) + "['0', '1', '78']" + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Bucket_sort + + Note + ==== + + This function does not support custom comparators as is the case with + other sorting functions in this file. + """ + raise_if_backend_is_not_python( + bucket_sort, kwargs.get('backend', Backend.PYTHON)) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + + #Find maximum value in the list and use length of the list to determine which value in the list goes into which bucket + max_value = None + for i in range(start, end+1): + if array[i] is not None: + max_value = array[i] + + count = 0 + for i in range(start, end+1): + if array[i] is not None: + count += 1 + if array[i] > max_value: + max_value = array[i] + + number_of_null_values = end - start + 1 - count + size = max_value // count + + # Create n empty buckets where n is equal to the length of the input list + buckets_list = [[] for _ in range(count)] + + # Put list elements into different buckets based on the size + for i in range(start, end + 1): + if array[i] is not None: + j = array[i] // size + if j is not count: + buckets_list[j].append(array[i]) + else: + buckets_list[count-1].append(array[i]) + + # Sort elements within the buckets using Insertion Sort + for z in range(count): + _bucket_sort_helper(buckets_list[z]) + + # Concatenate buckets with sorted elements into a single array + sorted_list = [] + for x in range(count): + sorted_list.extend(buckets_list[x]) + for i in range(end, end - number_of_null_values, -1): + array[i] = None + for i in range(start, end - number_of_null_values + 1): + array[i] = sorted_list[i-start] + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + return array + +def cocktail_shaker_sort(array: Array, **kwargs) -> Array: + """ + Performs cocktail sort on the given array. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray as ODA, cocktail_shaker_sort + >>> arr = ODA(int, [5, 78, 1, 0]) + >>> out = cocktail_shaker_sort(arr) + >>> str(out) + '[0, 1, 5, 78]' + >>> arr = ODA(int, [21, 37, 5]) + >>> out = cocktail_shaker_sort(arr) + >>> str(out) + '[5, 21, 37]' + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Cocktail_shaker_sort + """ + raise_if_backend_is_not_python( + cocktail_shaker_sort, kwargs.get('backend', Backend.PYTHON)) + def swap(i, j): + array[i], array[j] = array[j], array[i] + + lower = kwargs.get('start', 0) + upper = kwargs.get('end', len(array) - 1) + comp = kwargs.get("comp", lambda u, v: u <= v) + + swapping = False + while (not swapping and upper - lower >= 1): + + swapping = True + for j in range(lower, upper): + if _comp(array[j], array[j+1], comp) is False: + swap(j + 1, j) + swapping = False + + upper = upper - 1 + for j in range(upper, lower, -1): + if _comp(array[j-1], array[j], comp) is False: + swap(j, j - 1) + swapping = False + lower = lower + 1 + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + + return array + +def quick_sort(array: Array, **kwargs) -> Array: + """ + Performs quick sort on the given array. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + pick_pivot_element: lambda/function + The function implementing the pivot picking + logic for quick sort. Should accept, `low`, + `high`, and `array` in this order, where `low` + represents the left end of the current partition, + `high` represents the right end, and `array` is + the original input array to `quick_sort` function. + Optional, by default, picks the element at `high` + index of the current partition as pivot. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray as ODA, quick_sort + >>> arr = ODA(int, [5, 78, 1, 0]) + >>> out = quick_sort(arr) + >>> str(out) + '[0, 1, 5, 78]' + >>> arr = ODA(int, [21, 37, 5]) + >>> out = quick_sort(arr) + >>> str(out) + '[5, 21, 37]' + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Quicksort + """ + backend = kwargs.pop("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _algorithms.quick_sort(array, **kwargs) + from pydatastructs import Stack + comp = kwargs.get("comp", lambda u, v: u <= v) + pick_pivot_element = kwargs.get("pick_pivot_element", + lambda low, high, array: array[high]) + + def partition(low, high, pick_pivot_element): + i = (low - 1) + x = pick_pivot_element(low, high, array) + for j in range(low , high): + if _comp(array[j], x, comp) is True: + i = i + 1 + array[i], array[j] = array[j], array[i] + array[i + 1], array[high] = array[high], array[i + 1] + return (i + 1) + + lower = kwargs.get('start', 0) + upper = kwargs.get('end', len(array) - 1) + stack = Stack() + + stack.push(lower) + stack.push(upper) + + while stack.is_empty is False: + high = stack.pop() + low = stack.pop() + p = partition(low, high, pick_pivot_element) + if p - 1 > low: + stack.push(low) + stack.push(p - 1) + if p + 1 < high: + stack.push(p + 1) + stack.push(high) + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + + return array + +def longest_common_subsequence(seq1: OneDimensionalArray, seq2: OneDimensionalArray, + **kwargs) -> OneDimensionalArray: + """ + Finds the longest common subsequence between the + two given sequences. + + Parameters + ======== + + seq1: OneDimensionalArray + The first sequence. + seq2: OneDimensionalArray + The second sequence. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: OneDimensionalArray + The longest common subsequence. + + Examples + ======== + + >>> from pydatastructs import longest_common_subsequence as LCS, OneDimensionalArray as ODA + >>> arr1 = ODA(str, ['A', 'B', 'C', 'D', 'E']) + >>> arr2 = ODA(str, ['A', 'B', 'C', 'G' ,'D', 'E', 'F']) + >>> lcs = LCS(arr1, arr2) + >>> str(lcs) + "['A', 'B', 'C', 'D', 'E']" + >>> arr1 = ODA(str, ['A', 'P', 'P']) + >>> arr2 = ODA(str, ['A', 'p', 'P', 'S', 'P']) + >>> lcs = LCS(arr1, arr2) + >>> str(lcs) + "['A', 'P', 'P']" + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Longest_common_subsequence_problem + + Note + ==== + + The data types of elements across both the sequences + should be same and should be comparable. + """ + raise_if_backend_is_not_python( + longest_common_subsequence, kwargs.get('backend', Backend.PYTHON)) + row = len(seq1) + col = len(seq2) + check_mat = {0: [(0, []) for _ in range(col + 1)]} + + for i in range(1, row + 1): + check_mat[i] = [(0, []) for _ in range(col + 1)] + for j in range(1, col + 1): + if seq1[i-1] == seq2[j-1]: + temp = check_mat[i-1][j-1][1][:] + temp.append(seq1[i-1]) + check_mat[i][j] = (check_mat[i-1][j-1][0] + 1, temp) + else: + if check_mat[i-1][j][0] > check_mat[i][j-1][0]: + check_mat[i][j] = check_mat[i-1][j] + else: + check_mat[i][j] = check_mat[i][j-1] + + return OneDimensionalArray(seq1._dtype, check_mat[row][col][-1]) + +def is_ordered(array, **kwargs): + """ + Checks whether the given array is ordered or not. + + Parameters + ========== + + array: OneDimensionalArray + The array which is to be checked for having + specified ordering among its elements. + start: int + The starting index of the portion of the array + under consideration. + Optional, by default 0 + end: int + The ending index of the portion of the array + under consideration. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for specifying the desired ordering. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + True if the specified ordering is present + from start to end (inclusive) otherwise False. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, is_ordered + >>> arr = OneDimensionalArray(int, [1, 2, 3, 4]) + >>> is_ordered(arr) + True + >>> arr1 = OneDimensionalArray(int, [1, 2, 3]) + >>> is_ordered(arr1, start=0, end=1, comp=lambda u, v: u > v) + False + """ + backend = kwargs.pop("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _algorithms.is_ordered(array, **kwargs) + lower = kwargs.get('start', 0) + upper = kwargs.get('end', len(array) - 1) + comp = kwargs.get("comp", lambda u, v: u <= v) + + for i in range(lower + 1, upper + 1): + if array[i] is None or array[i - 1] is None: + continue + if comp(array[i], array[i - 1]): + return False + return True + +def upper_bound(array, value, **kwargs): + """ + Finds the index of the first occurence of an element greater than the given + value according to specified order, in the given OneDimensionalArray using a variation of binary search method. + + Parameters + ========== + + array: OneDimensionalArray + The array in which the upper bound has to be found. + start: int + The staring index of the portion of the array in which the upper bound + of a given value has to be looked for. + Optional, by default 0 + end: int, optional + The ending index of the portion of the array in which the upper bound + of a given value has to be looked for. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for specifying the desired ordering. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + index: int + Index of the upper bound of the given value in the given OneDimensionalArray. + + Examples + ======== + + >>> from pydatastructs import upper_bound, OneDimensionalArray as ODA + >>> arr1 = ODA(int, [4, 5, 5, 6, 7]) + >>> ub = upper_bound(arr1, 5, start=0, end=4) + >>> ub + 3 + >>> arr2 = ODA(int, [7, 6, 5, 5, 4]) + >>> ub = upper_bound(arr2, 5, comp=lambda x, y: x > y) + >>> ub + 4 + + Note + ==== + + DynamicOneDimensionalArray objects may not work as expected. + """ + raise_if_backend_is_not_python( + upper_bound, kwargs.get('backend', Backend.PYTHON)) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array)) + comp = kwargs.get('comp', lambda x, y: x < y) + index = end + inclusive_end = end - 1 + if comp(value, array[start]): + index = start + while start <= inclusive_end: + mid = (start + inclusive_end)//2 + if not comp(value, array[mid]): + start = mid + 1 + else: + index = mid + inclusive_end = mid - 1 + return index + +def lower_bound(array, value, **kwargs): + """ + Finds the the index of the first occurence of an element which is not + less than the given value according to specified order, + in the given OneDimensionalArray using a variation of binary search method. + + Parameters + ========== + + array: OneDimensionalArray + The array in which the lower bound has to be found. + start: int + The staring index of the portion of the array in which the upper bound + of a given value has to be looked for. + Optional, by default 0 + end: int, optional + The ending index of the portion of the array in which the upper bound + of a given value has to be looked for. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for specifying the desired ordering. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + index: int + Index of the lower bound of the given value in the given OneDimensionalArray + + Examples + ======== + + >>> from pydatastructs import lower_bound, OneDimensionalArray as ODA + >>> arr1 = ODA(int, [4, 5, 5, 6, 7]) + >>> lb = lower_bound(arr1, 5, end=4, comp=lambda x, y : x < y) + >>> lb + 1 + >>> arr = ODA(int, [7, 6, 5, 5, 4]) + >>> lb = lower_bound(arr, 5, start=0, comp=lambda x, y : x > y) + >>> lb + 2 + + Note + ==== + + DynamicOneDimensionalArray objects may not work as expected. + """ + raise_if_backend_is_not_python( + lower_bound, kwargs.get('backend', Backend.PYTHON)) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array)) + comp = kwargs.get('comp', lambda x, y: x < y) + index = end + inclusive_end = end - 1 + if not comp(array[start], value): + index = start + while start <= inclusive_end: + mid = (start + inclusive_end)//2 + if comp(array[mid], value): + start = mid + 1 + else: + index = mid + inclusive_end = mid - 1 + return index + +def longest_increasing_subsequence(array, **kwargs): + """ + Returns the longest increasing subsequence (as a OneDimensionalArray) that + can be obtained from a given OneDimensionalArray. A subsequence + of an array is an ordered subset of the array's elements having the same + sequential ordering as the original array. Here, an increasing + sequence stands for a strictly increasing sequence of numbers. + + Parameters + ========== + + array: OneDimensionalArray + The given array in the form of a OneDimensionalArray + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: OneDimensionalArray + Returns the longest increasing subsequence that can be obtained + from the given array + + Examples + ======== + + >>> from pydatastructs import lower_bound, OneDimensionalArray as ODA + >>> from pydatastructs import longest_increasing_subsequence as LIS + >>> array = ODA(int, [2, 5, 3, 7, 11, 8, 10, 13, 6]) + >>> longest_inc_subsequence = LIS(array) + >>> str(longest_inc_subsequence) + '[2, 3, 7, 8, 10, 13]' + >>> array2 = ODA(int, [3, 4, -1, 5, 8, 2, 2 ,2, 3, 12, 7, 9, 10]) + >>> longest_inc_subsequence = LIS(array2) + >>> str(longest_inc_subsequence) + '[-1, 2, 3, 7, 9, 10]' + """ + raise_if_backend_is_not_python( + longest_increasing_subsequence, + kwargs.get('backend', Backend.PYTHON)) + n = len(array) + dp = OneDimensionalArray(int, n) + dp.fill(0) + parent = OneDimensionalArray(int, n) + parent.fill(-1) + length = 0 + for i in range(1, n): + if array[i] <= array[dp[0]]: + dp[0] = i + elif array[dp[length]] < array[i]: + length += 1 + dp[length] = i + parent[i] = dp[length - 1] + else: + curr_array = [array[dp[i]] for i in range(length)] + ceil = lower_bound(curr_array, array[i]) + dp[ceil] = i + parent[i] = dp[ceil - 1] + ans = DynamicOneDimensionalArray(int, 0) + last_index = dp[length] + while last_index != -1: + ans.append(array[last_index]) + last_index = parent[last_index] + n = ans._last_pos_filled + 1 + ans_ODA = OneDimensionalArray(int, n) + for i in range(n): + ans_ODA[n-1-i] = ans[i] + return ans_ODA + +def _permutation_util(array, start, end, comp, perm_comp): + size = end - start + 1 + permute = OneDimensionalArray(int, size) + for i, j in zip(range(start, end + 1), range(size)): + permute[j] = array[i] + i = size - 1 + while i > 0 and perm_comp(permute[i - 1], permute[i], comp): + i -= 1 + if i > 0: + left, right = i, size - 1 + while left <= right: + mid = left + (right - left) // 2 + if not perm_comp(permute[i - 1], permute[mid], comp): + left = mid + 1 + else: + right = mid - 1 + permute[i - 1], permute[left - 1] = \ + permute[left - 1], permute[i - 1] + left, right = i, size - 1 + while left < right: + permute[left], permute[right] = permute[right], permute[left] + left += 1 + right -= 1 + result = True if i > 0 else False + return result, permute + +def next_permutation(array, **kwargs): + """ + If the function can determine the next higher permutation, it + returns `True` and the permutation in a new array. + If that is not possible, because it is already at the largest possible + permutation, it returns the elements according to the first permutation + and returns `False` and the permutation in a new array. + + Parameters + ========== + + array: OneDimensionalArray + The array which is to be used for finding next permutation. + start: int + The staring index of the considered portion of the array. + Optional, by default 0 + end: int, optional + The ending index of the considered portion of the array. + Optional, by default the index of the last position filled. + comp: lambda/function + The comparator which is to be used for specifying the + desired lexicographical ordering. + Optional, by default, less than is + used for comparing two values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + + Returns + ======= + + output: bool, OneDimensionalArray + First element is `True` if the function can rearrange + the given portion of the input array as a lexicographically + greater permutation, otherwise returns `False`. + Second element is an array having the next permutation. + + + Examples + ======== + + >>> from pydatastructs import next_permutation, OneDimensionalArray as ODA + >>> array = ODA(int, [1, 2, 3, 4]) + >>> is_greater, next_permute = next_permutation(array) + >>> is_greater, str(next_permute) + (True, '[1, 2, 4, 3]') + >>> array = ODA(int, [3, 2, 1]) + >>> is_greater, next_permute = next_permutation(array) + >>> is_greater, str(next_permute) + (False, '[1, 2, 3]') + + References + ========== + + .. [1] http://www.cplusplus.com/reference/algorithm/next_permutation/ + """ + raise_if_backend_is_not_python( + next_permutation, kwargs.get('backend', Backend.PYTHON)) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get('comp', lambda x, y: x < y) + + def _next_permutation_comp(x, y, _comp): + if _comp(x, y): + return False + else: + return True + + return _permutation_util(array, start, end, comp, + _next_permutation_comp) + +def prev_permutation(array, **kwargs): + """ + If the function can determine the next lower permutation, it + returns `True` and the permutation in a new array. + If that is not possible, because it is already at the lowest possible + permutation, it returns the elements according to the last permutation + and returns `False` and the permutation in a new array. + + Parameters + ========== + + array: OneDimensionalArray + The array which is to be used for finding next permutation. + start: int + The staring index of the considered portion of the array. + Optional, by default 0 + end: int, optional + The ending index of the considered portion of the array. + Optional, by default the index of the last position filled. + comp: lambda/function + The comparator which is to be used for specifying the + desired lexicographical ordering. + Optional, by default, less than is + used for comparing two values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + + Returns + ======= + + output: bool, OneDimensionalArray + First element is `True` if the function can rearrange + the given portion of the input array as a lexicographically + smaller permutation, otherwise returns `False`. + Second element is an array having the previous permutation. + + + Examples + ======== + + >>> from pydatastructs import prev_permutation, OneDimensionalArray as ODA + >>> array = ODA(int, [1, 2, 4, 3]) + >>> is_lower, prev_permute = prev_permutation(array) + >>> is_lower, str(prev_permute) + (True, '[1, 2, 3, 4]') + >>> array = ODA(int, [1, 2, 3, 4]) + >>> is_lower, prev_permute = prev_permutation(array) + >>> is_lower, str(prev_permute) + (False, '[4, 3, 2, 1]') + + References + ========== + + .. [1] http://www.cplusplus.com/reference/algorithm/prev_permutation/ + """ + raise_if_backend_is_not_python( + prev_permutation, kwargs.get('backend', Backend.PYTHON)) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get('comp', lambda x, y: x < y) + + def _prev_permutation_comp(x, y, _comp): + if _comp(x, y): + return True + else: + return False + + return _permutation_util(array, start, end, comp, + _prev_permutation_comp) + +def bubble_sort(array, **kwargs): + """ + Implements bubble sort algorithm. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, bubble_sort + >>> arr = OneDimensionalArray(int,[3, 2, 1]) + >>> out = bubble_sort(arr) + >>> str(out) + '[1, 2, 3]' + >>> out = bubble_sort(arr, comp=lambda u, v: u > v) + >>> str(out) + '[3, 2, 1]' + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Bubble_sort + """ + backend = kwargs.pop("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _algorithms.bubble_sort(array, **kwargs) + if backend == Backend.LLVM: + return _algorithms.bubble_sort_llvm(array, **kwargs) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get("comp", lambda u, v: u <= v) + arr_len = len(array) + for i in range(arr_len - 1): + for j in range(start , end): + if not _comp(array[j], array[j + 1], comp): + array[j], array[j + 1] = array[j + 1], array[j] + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + + return array + +def selection_sort(array, **kwargs): + """ + Implements selection sort algorithm. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, selection_sort + >>> arr = OneDimensionalArray(int,[3, 2, 1]) + >>> out = selection_sort(arr) + >>> str(out) + '[1, 2, 3]' + >>> out = selection_sort(arr, comp=lambda u, v: u > v) + >>> str(out) + '[3, 2, 1]' + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Selection_sort + """ + backend = kwargs.pop("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _algorithms.bubble_sort(array, **kwargs) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get('comp', lambda u, v: u <= v) + + for i in range(start, end + 1): + jMin = i + for j in range(i + 1, end + 1): + if not _comp(array[jMin], array[j], comp): + jMin = j + if jMin != i: + array[i], array[jMin] = array[jMin], array[i] + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + + return array + +def insertion_sort(array, **kwargs): + """ + Implements insertion sort algorithm. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, insertion_sort + >>> arr = OneDimensionalArray(int,[3, 2, 1]) + >>> out = insertion_sort(arr) + >>> str(out) + '[1, 2, 3]' + >>> out = insertion_sort(arr, comp=lambda u, v: u > v) + >>> str(out) + '[3, 2, 1]' + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Insertion_sort + """ + backend = kwargs.pop("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _algorithms.insertion_sort(array, **kwargs) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get('comp', lambda u, v: u <= v) + + for i in range(start + 1, end + 1): + temp = array[i] + j = i + while j > start and not _comp(array[j - 1], temp, comp): + array[j] = array[j - 1] + j -= 1 + array[j] = temp + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + + return array + +def linear_search(array, value, **kwargs): + """ + Implements linear search algorithm. + + Parameters + ========== + + array: OneDimensionalArray + The array which is to be searched. + value: + The value which is to be searched + inside the array. + start: int + The starting index of the portion + which is to be searched. + Optional, by default 0 + end: int + The ending index of the portion which + is to be searched. + Optional, by default the index + of the last position filled. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: int + The index of value if found. + If not found, returns None. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, linear_search + >>> arr = OneDimensionalArray(int,[3, 2, 1]) + >>> linear_search(arr, 2) + 1 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Linear_search + """ + backend = kwargs.pop("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _algorithms.linear_search(array, value, **kwargs) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + + for i in range(start, end + 1): + if array[i] == value: + return i + + return None + +def binary_search(array, value, **kwargs): + """ + Implements binary search algorithm. + + Parameters + ========== + + array: OneDimensionalArray + The array which is to be searched. + value: + The value which is to be searched + inside the array. + start: int + The starting index of the portion + which is to be searched. + Optional, by default 0 + end: int + The ending index of the portion which + is to be searched. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for performing comparisons. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: int + The index of elem if found. + If not found, returns None. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, binary_search + >>> arr = OneDimensionalArray(int,[1, 2, 3, 5, 10, 12]) + >>> binary_search(arr, 5) + 3 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Binary_search_algorithm + + Note + ==== + + This algorithm assumes that the portion of the array + to be searched is already sorted. + """ + backend = kwargs.pop("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _algorithms.binary_search(array, value, **kwargs) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get("comp", lambda u, v: u <= v) + + left = start + right = end + while left <= right: + middle = left//2 + right//2 + left % 2 * right % 2 + if array[middle] == value: + return middle + if comp(array[middle], value): + left = middle + 1 + else: + right = middle - 1 + + return None + +def jump_search(array, value, **kwargs): + """ + Implements jump search algorithm. + + Parameters + ========== + + array: OneDimensionalArray + The array which is to be searched. + value: + The value which is to be searched + inside the array. + start: int + The starting index of the portion + which is to be searched. + Optional, by default 0 + end: int + The ending index of the portion which + is to be searched. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for performing comparisons. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: int + The index of elem if found. + If not found, returns None. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, jump_search + >>> arr = OneDimensionalArray(int,[1, 2, 3, 5, 10, 12]) + >>> linear_search(arr, 5) + 3 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Jump_search + + Note + ==== + + This algorithm assumes that the portion of the array + to be searched is already sorted. + """ + backend = kwargs.pop("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _algorithms.jump_search(array, value, **kwargs) + start = kwargs.get('start', 0) + end = kwargs.get('end', len(array) - 1) + comp = kwargs.get("comp", lambda u, v: u < v) + + step = int(sqrt(end - start + 1)) + current_position = step + prev = start + while comp(array[min(current_position, end)], value): + prev = current_position + current_position += step + if prev > end: + return None + while prev <= min(current_position, end): + if array[prev] == value: + return prev + prev += 1 + + return None + +def intro_sort(array, **kwargs) -> Array: + """ + Performs intro sort on the given array. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + maxdepth: Enables the user to define the maximum + recursion depth, takes value 2*log(length(A)) + by default (ref: Wikipedia[1]). + ins_threshold: Threshold under which insertion + sort has to be performed, default value is + 16 (ref: Wikipedia[1]). + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray as ODA, intro_sort + >>> arr = ODA(int, [5, 78, 1, 0]) + >>> out = intro_sort(arr) + >>> str(out) + '[0, 1, 5, 78]' + >>> arr = ODA(int, [21, 37, 5]) + >>> out = intro_sort(arr) + >>> str(out) + '[5, 21, 37]' + + Note + ==== + + This function does not support custom comparators as + is the case with other sorting functions in this file. + This is because of heapsort's limitation. + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Introsort + """ + raise_if_backend_is_not_python( + intro_sort, kwargs.get('backend', Backend.PYTHON)) + + # Always sorts in increasing order, this is because of + # heapsort's limitation + comp = lambda u, v: u <= v + lower = kwargs.get('start', 0) + upper = kwargs.get('end', len(array) - 1) + n = upper - lower + 1 + if n <= 0: + maxdepth = 0 + else: + maxdepth = kwargs.get("maxdepth", int(2 * (log(n)/log(2)))) + + ins_threshold = kwargs.get("ins_threshold", 16) + + def partition(array, lower, upper): + pivot = array[lower] + left = lower + 1 + right = upper + done = False + while not done: + while left <= right and _comp(array[left], pivot, comp): + left += 1 + while _comp(pivot, array[right], comp) and right >= left: + right -= 1 + if right < left: + done = True + else: + array[left], array[right] = array[right], array[left] + left+=1 + right-=1 + + array[lower], array[right] = array[right], array[lower] + return right + + if n < ins_threshold: + return insertion_sort(array, start=lower, end=upper) + elif maxdepth == 0: + heapsort(array, start=lower, end=upper) + return array + else: + p = partition(array, lower, upper) + + intro_sort(array, start=lower, end=p-1, maxdepth=maxdepth-1, ins_threshold=ins_threshold) + intro_sort(array, start=p+1, end=upper, maxdepth=maxdepth-1, ins_threshold=ins_threshold) + + return array + +def shell_sort(array, *args, **kwargs): + """ + Implements shell sort algorithm. + + Parameters + ========== + + array: Array + The array which is to be sorted. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + comp: lambda/function + The comparator which is to be used + for sorting. If the function returns + False then only swapping is performed. + Optional, by default, less than or + equal to is used for comparing two + values. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs.linear_data_structures.algorithms import OneDimensionalArray, shell_sort + >>> arr = OneDimensionalArray(int, [3, 2, 1]) + >>> out = shell_sort(arr) + >>> str(out) + '[1, 2, 3]' + >>> out = shell_sort(arr, comp=lambda u, v: u > v) + >>> str(out) + '[3, 2, 1]' + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Shellsort + """ + start = int(kwargs.get('start', 0)) + end = int(kwargs.get('end', len(array) - 1)) + comp = kwargs.get('comp', lambda u, v: u <= v) + + n = end - start + 1 + gap = n // 2 + while gap > 0: + for i in range(start + gap, end + 1): + temp = array[i] + j = i + while j >= start + gap and not _comp(array[j - gap], temp, comp): + array[j] = array[j - gap] + j -= gap + array[j] = temp + gap //= 2 + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + + return array + +def radix_sort(array, *args, **kwargs): + """ + Implements radix sort algorithm for non-negative integers. + + Parameters + ========== + + array: Array + The array which is to be sorted. Must contain non-negative integers. + start: int + The starting index of the portion + which is to be sorted. + Optional, by default 0 + end: int + The ending index of the portion which + is to be sorted. + Optional, by default the index + of the last position filled. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + output: Array + The sorted array. + + Examples + ======== + + >>> from pydatastructs.linear_data_structures.algorithms import OneDimensionalArray, radix_sort + >>> arr = OneDimensionalArray(int, [170, 45, 75, 90, 802, 24, 2, 66]) + >>> out = radix_sort(arr) + >>> str(out) + '[2, 24, 45, 66, 75, 90, 170, 802]' + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Radix_sort + """ + start = int(kwargs.get('start', 0)) + end = int(kwargs.get('end', len(array) - 1)) + + n = end - start + 1 + max_val = array[start] + for i in range(start + 1, end + 1): + if array[i] is not None and array[i] > max_val: + max_val = array[i] + exp = 1 + while max_val // exp > 0: + count = [0] * 10 + output = [None] * n + + for i in range(start, end + 1): + if array[i] is not None: + digit = (array[i] // exp) % 10 + count[digit] += 1 + + for i in range(1, 10): + count[i] += count[i - 1] + + for i in range(end, start - 1, -1): + if array[i] is not None: + digit = (array[i] // exp) % 10 + count[digit] -= 1 + output[count[digit]] = array[i] + + for i in range(n): + array[start + i] = output[i] + + exp *= 10 + + if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): + array._modify(True) + + return array diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/arrays.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/arrays.py new file mode 100644 index 000000000..2e0c3fd97 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/arrays.py @@ -0,0 +1,473 @@ +from pydatastructs.utils.misc_util import ( + _check_type, NoneType, Backend, + raise_if_backend_is_not_python) +from pydatastructs.linear_data_structures._backend.cpp import _arrays + +__all__ = [ + 'OneDimensionalArray', + 'MultiDimensionalArray', + 'DynamicOneDimensionalArray' +] + +class Array(object): + """ + Abstract class for arrays in pydatastructs. + """ + def __str__(self) -> str: + return str(self._data) + +class OneDimensionalArray(Array): + """ + Represents one dimensional static arrays of + fixed size. + + Parameters + ========== + + dtype: type + A valid object type. + size: int + The number of elements in the array. + elements: list + The elements in the array, all should + be of same type. + init: a python type + The initial value with which the element has + to be initialized. By default none, used only + when the data is not given. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Raises + ====== + + ValueError + When the number of elements in the list do not + match with the size. + More than three parameters are passed as arguments. + Types of arguments is not as mentioned in the docstring. + + Note + ==== + + At least one parameter should be passed as an argument along + with the dtype. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray + >>> arr = OneDimensionalArray(int, 5) + >>> arr.fill(6) + >>> arr[0] + 6 + >>> arr[0] = 7.2 + >>> arr[0] + 7 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Array_data_structure#One-dimensional_arrays + """ + + __slots__ = ['_size', '_data', '_dtype'] + + def __new__(cls, dtype=NoneType, *args, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + return _arrays.OneDimensionalArray(dtype, *args, **kwargs) + if dtype is NoneType: + raise ValueError("Data type is not defined.") + if len(args) not in (1, 2): + raise ValueError("Too few arguments to create a 1D array," + " pass either size of the array" + " or list of elements or both.") + obj = Array.__new__(cls) + obj._dtype = dtype + if len(args) == 2: + if _check_type(args[0], list) and \ + _check_type(args[1], int): + for i in range(len(args[0])): + if _check_type(args[0][i], dtype) is False: + args[0][i] = dtype(args[0][i]) + size, data = args[1], list(args[0]) + elif _check_type(args[1], list) and \ + _check_type(args[0], int): + for i in range(len(args[1])): + if _check_type(args[1][i], dtype) is False: + args[1][i] = dtype(args[1][i]) + size, data = args[0], list(args[1]) + else: + raise TypeError("Expected type of size is int and " + "expected type of data is list/tuple.") + if size != len(data): + raise ValueError("Conflict in the size, %s and length of data, %s" + %(size, len(data))) + obj._size, obj._data = size, data + + elif len(args) == 1: + if _check_type(args[0], int): + obj._size = args[0] + init = kwargs.get('init', None) + obj._data = [init for i in range(args[0])] + elif _check_type(args[0], (list, tuple)): + for i in range(len(args[0])): + if _check_type(args[0][i], dtype) is False: + args[0][i] = dtype(args[0][i]) + obj._size, obj._data = len(args[0]), \ + list(args[0]) + else: + raise TypeError("Expected type of size is int and " + "expected type of data is list/tuple.") + + return obj + + @classmethod + def methods(cls): + return ['__new__', '__getitem__', + '__setitem__', 'fill', '__len__'] + + def __getitem__(self, i): + if i >= self._size or i < 0: + raise IndexError(("Index, {} out of range, " + "[{}, {}).".format(i, 0, self._size))) + return self._data.__getitem__(i) + + def __setitem__(self, idx, elem): + if elem is None: + self._data[idx] = None + else: + if _check_type(elem, self._dtype) is False: + elem = self._dtype(elem) + self._data[idx] = elem + + def fill(self, elem): + elem = self._dtype(elem) + for i in range(self._size): + self._data[i] = elem + + def __len__(self): + return self._size + +class MultiDimensionalArray(Array): + """ + Represents a multi-dimensional array. + + Parameters + ========== + + dtype: type + A valid object type. + *args: int + The dimensions of the array. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Raises + ====== + + IndexError + Index goes out of boundaries, or + the number of index given is not + the same as the number of dimensions. + ValueError + When there's no dimensions or the + dimension size is 0. + + Examples + ======== + + >>> from pydatastructs import MultiDimensionalArray as MDA + >>> arr = MDA(int, 5, 6, 9) + >>> arr.fill(32) + >>> arr[3, 0, 0] + 32 + >>> arr[3, 0, 0] = 7 + >>> arr[3, 0, 0] + 7 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Array_data_structure#Multidimensional_arrays + + """ + __slots__ = ['_sizes', '_data', '_dtype'] + + def __new__(cls, dtype: type = NoneType, *args, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + if dtype is NoneType: + raise ValueError("Data type is not defined.") + elif not args: + raise ValueError("Too few arguments to create a " + "multi dimensional array, pass dimensions.") + if len(args) == 1: + obj = Array.__new__(cls) + obj._dtype = dtype + obj._sizes = (args[0], 1) + obj._data = [None] * args[0] + return obj + + dimensions = args + for dimension in dimensions: + if dimension < 1: + raise ValueError("Size of dimension cannot be less than 1") + n_dimensions = len(dimensions) + d_sizes = [] + index = 0 + while n_dimensions > 1: + size = dimensions[index] + for i in range(index+1, len(dimensions)): + size = size * dimensions[i] + d_sizes.append(size) + n_dimensions -= 1 + index += 1 + d_sizes.append(dimensions[index]) + d_sizes.append(1) + obj = Array.__new__(cls) + obj._dtype = dtype + obj._sizes = tuple(d_sizes) + obj._data = [None] * obj._sizes[1] * dimensions[0] + return obj + + @classmethod + def methods(cls) -> list: + return ['__new__', '__getitem__', '__setitem__', 'fill', 'shape'] + + def __getitem__(self, indices): + self._compare_shape(indices) + if isinstance(indices, int): + return self._data[indices] + position = 0 + for i in range(0, len(indices)): + position += self._sizes[i + 1] * indices[i] + return self._data[position] + + def __setitem__(self, indices, element) -> None: + self._compare_shape(indices) + if isinstance(indices, int): + self._data[indices] = element + else: + position = 0 + for i in range(0, len(indices)): + position += self._sizes[i + 1] * indices[i] + self._data[position] = element + + def _compare_shape(self, indices) -> None: + indices = [indices] if isinstance(indices, int) else indices + if len(indices) != len(self._sizes) - 1: + raise IndexError("Shape mismatch, current shape is %s" % str(self.shape)) + if any(indices[i] >= self._sizes[i] for i in range(len(indices))): + raise IndexError("Index out of range.") + + def fill(self, element) -> None: + element = self._dtype(element) + for i in range(len(self._data)): + self._data[i] = element + + @property + def shape(self) -> tuple: + shape = [] + size = len(self._sizes) + for i in range(1, size): + shape.append(self._sizes[i-1]//self._sizes[i]) + return tuple(shape) + +class DynamicArray(Array): + """ + Abstract class for dynamic arrays. + """ + pass + +class DynamicOneDimensionalArray(DynamicArray, OneDimensionalArray): + """ + Represents resizable and dynamic one + dimensional arrays. + + Parameters + ========== + + dtype: type + A valid object type. + size: int + The number of elements in the array. + elements: list/tuple + The elements in the array, all should + be of same type. + init: a python type + The inital value with which the element has + to be initialized. By default none, used only + when the data is not given. + load_factor: float, by default 0.25 + The number below which if the ratio, Num(T)/Size(T) + falls then the array is contracted such that at + most only half the positions are filled. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Raises + ====== + + ValueError + When the number of elements in the list do not + match with the size. + More than three parameters are passed as arguments. + Types of arguments is not as mentioned in the docstring. + The load factor is not of floating point type. + + Note + ==== + + At least one parameter should be passed as an argument along + with the dtype. + Num(T) means the number of positions which are not None in the + array. + Size(T) means the maximum number of elements that the array can hold. + + Examples + ======== + + >>> from pydatastructs import DynamicOneDimensionalArray as DODA + >>> arr = DODA(int, 0) + >>> arr.append(1) + >>> arr.append(2) + >>> arr[0] + 1 + >>> arr.delete(0) + >>> arr[0] + >>> arr[1] + 2 + >>> arr.append(3) + >>> arr.append(4) + >>> [arr[i] for i in range(arr.size)] + [None, 2, 3, 4, None, None, None] + + References + ========== + + .. [1] http://www.cs.nthu.edu.tw/~wkhon/algo09/lectures/lecture16.pdf + """ + + __slots__ = ['_load_factor', '_num', '_last_pos_filled', '_size'] + + def __new__(cls, dtype=NoneType, *args, **kwargs): + backend = kwargs.get("backend", Backend.PYTHON) + if backend == Backend.CPP: + return _arrays.DynamicOneDimensionalArray(dtype, *args, **kwargs) + obj = super().__new__(cls, dtype, *args, **kwargs) + obj._load_factor = float(kwargs.get('load_factor', 0.25)) + obj._num = 0 if obj._size == 0 or obj[0] is None else obj._size + obj._last_pos_filled = obj._num - 1 + return obj + + @classmethod + def methods(cls): + return ['__new__', '_modify', + 'append', 'delete', 'size', + '__str__', '__reversed__'] + + def _modify(self, force=False): + """ + Contracts the array if Num(T)/Size(T) falls + below load factor. + """ + if force: + i = -1 + while self._data[i] is None: + i -= 1 + self._last_pos_filled = i%self._size + if (self._num/self._size < self._load_factor): + arr_new = OneDimensionalArray(self._dtype, 2*self._num + 1) + j = 0 + for i in range(self._last_pos_filled + 1): + if self._data[i] is not None: + arr_new[j] = self[i] + j += 1 + self._last_pos_filled = j - 1 + self._data = arr_new._data + self._size = arr_new._size + + def append(self, el): + if self._last_pos_filled + 1 == self._size: + arr_new = OneDimensionalArray(self._dtype, 2*self._size + 1) + for i in range(self._last_pos_filled + 1): + arr_new[i] = self[i] + arr_new[self._last_pos_filled + 1] = el + self._size = arr_new._size + self._data = arr_new._data + else: + self[self._last_pos_filled + 1] = el + self._last_pos_filled += 1 + self._num += 1 + self._modify() + + def delete(self, idx): + if idx <= self._last_pos_filled and idx >= 0 and \ + self[idx] is not None: + self[idx] = None + self._num -= 1 + if self._last_pos_filled == idx: + self._last_pos_filled -= 1 + return self._modify() + + @property + def size(self): + return self._size + + def __str__(self): + to_be_printed = ['' for _ in range(self._last_pos_filled + 1)] + for i in range(self._last_pos_filled + 1): + if self._data[i] is not None: + to_be_printed[i] = str(self._data[i]) + return str(to_be_printed) + + def __reversed__(self): + for i in range(self._last_pos_filled, -1, -1): + yield self._data[i] + +class ArrayForTrees(DynamicOneDimensionalArray): + """ + Utility dynamic array for storing nodes of a tree. + + Parameters + ========== + + backend: pydatastructs.Backend + The backend to be used. Available backends: Python and C++ + Optional, by default, the Python backend is used. For faster execution, use the C++ backend. + + See Also + ======== + + pydatastructs.linear_data_structures.arrays.DynamicOneDimensionalArray + """ + def _modify(self): + if self._num/self._size < self._load_factor: + new_indices = {} + arr_new = OneDimensionalArray(self._dtype, 2*self._num + 1) + j = 0 + for i in range(self._last_pos_filled + 1): + if self[i] is not None: + arr_new[j] = self[i] + new_indices[self[i].key] = j + j += 1 + for i in range(j): + if arr_new[i].left is not None: + arr_new[i].left = new_indices[self[arr_new[i].left].key] + if arr_new[i].right is not None: + arr_new[i].right = new_indices[self[arr_new[i].right].key] + if arr_new[i].parent is not None: + arr_new[i].parent = new_indices[self[arr_new[i].parent].key] + self._last_pos_filled = j - 1 + self._data = arr_new._data + self._size = arr_new._size + return new_indices + return None diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/linked_lists.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/linked_lists.py new file mode 100644 index 000000000..09178daf1 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/linked_lists.py @@ -0,0 +1,819 @@ +import math, random +from pydatastructs.utils.misc_util import _check_type, LinkedListNode, SkipNode +from pydatastructs.utils.misc_util import ( + Backend, raise_if_backend_is_not_python) + +__all__ = [ + 'SinglyLinkedList', + 'DoublyLinkedList', + 'SinglyCircularLinkedList', + 'DoublyCircularLinkedList', + 'SkipList' +] + +class LinkedList(object): + """ + Abstract class for Linked List. + """ + __slots__ = ['head', 'size'] + + def __len__(self): + return self.size + + @property + def is_empty(self): + return self.size == 0 + + def search(self, key): + curr_node = self.head + while curr_node is not None: + if curr_node.key == key: + return curr_node + curr_node = curr_node.next + if curr_node is self.head: + return None + return None + + def __str__(self): + """ + For printing the linked list. + """ + elements = [] + current_node = self.head + while current_node is not None: + elements.append(str(current_node)) + current_node = current_node.next + if current_node == self.head: + break + return str(elements) + + def insert_after(self, prev_node, key, data=None): + """ + Inserts a new node after the prev_node. + + Parameters + ========== + + prev_node: LinkedListNode + The node after which the + new node is to be inserted. + + key + Any valid identifier to uniquely + identify the node in the linked list. + + data + Any valid data to be stored in the node. + """ + raise NotImplementedError('This is an abstract method') + + def insert_at(self, index, key, data=None): + """ + Inserts a new node at the input index. + + Parameters + ========== + + index: int + An integer satisfying python indexing properties. + + key + Any valid identifier to uniquely + identify the node in the linked list. + + data + Any valid data to be stored in the node. + """ + raise NotImplementedError('This is an abstract method') + + def extract(self, index): + """ + Extracts the node at the index of the list. + + Parameters + ========== + + index: int + An integer satisfying python indexing properties. + + Returns + ======= + + current_node: LinkedListNode + The node at index i. + """ + raise NotImplementedError('This is an abstract method') + + def __getitem__(self, index): + """ + Returns + ======= + + current_node: LinkedListNode + The node at given index. + """ + if index < 0: + index = self.size + index + + if index >= self.size: + raise IndexError('%d index is out of range.'%(index)) + + counter = 0 + current_node = self.head + while counter != index: + current_node = current_node.next + counter += 1 + return current_node + + def appendleft(self, key, data=None): + """ + Pushes a new node at the start i.e., + the left of the list. + + Parameters + ========== + + key + Any valid identifier to uniquely + identify the node in the linked list. + + data + Any valid data to be stored in the node. + """ + self.insert_at(0, key, data) + + def append(self, key, data=None): + """ + Appends a new node at the end of the list. + + Parameters + ========== + + key + Any valid identifier to uniquely + identify the node in the linked list. + + data + Any valid data to be stored in the node. + """ + self.insert_at(self.size, key, data) + + def insert_before(self, next_node, key, data=None): + """ + Inserts a new node before the next_node. + + Parameters + ========== + + next_node: LinkedListNode + The node before which the + new node is to be inserted. + + key + Any valid identifier to uniquely + identify the node in the linked list. + + data + Any valid data to be stored in the node. + """ + raise NotImplementedError('This is an abstract method') + + def popleft(self): + """ + Extracts the Node from the left + i.e. start of the list. + + Returns + ======= + + old_head: LinkedListNode + The leftmost element of linked + list. + """ + return self.extract(0) + + def popright(self): + """ + Extracts the node from the right + of the linked list. + + Returns + ======= + + old_tail: LinkedListNode + The leftmost element of linked + list. + """ + return self.extract(-1) + +class DoublyLinkedList(LinkedList): + """ + Represents Doubly Linked List + + Parameters + ========== + + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import DoublyLinkedList + >>> dll = DoublyLinkedList() + >>> dll.append(6) + >>> dll[0].key + 6 + >>> dll.head.key + 6 + >>> dll.append(5) + >>> dll.appendleft(2) + >>> str(dll) + "['(2, None)', '(6, None)', '(5, None)']" + >>> dll[0].key = 7.2 + >>> dll.extract(1).key + 6 + >>> str(dll) + "['(7.2, None)', '(5, None)']" + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Doubly_linked_list + + """ + __slots__ = ['head', 'tail', 'size'] + + def __new__(cls, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = LinkedList.__new__(cls) + obj.head = None + obj.tail = None + obj.size = 0 + return obj + + @classmethod + def methods(cls): + return ['__new__', 'insert_after', + 'insert_before', 'insert_at', 'extract'] + + def insert_after(self, prev_node, key, data=None): + self.size += 1 + new_node = LinkedListNode(key, data, + links=['next', 'prev'], + addrs=[None, None]) + new_node.next = prev_node.next + if new_node.next is not None: + new_node.next.prev = new_node + prev_node.next = new_node + new_node.prev = prev_node + + if new_node.next is None: + self.tail = new_node + + def insert_before(self, next_node, key, data=None): + self.size += 1 + new_node = LinkedListNode(key, data, + links=['next', 'prev'], + addrs=[None, None]) + new_node.prev = next_node.prev + next_node.prev = new_node + new_node.next = next_node + if new_node.prev is not None: + new_node.prev.next = new_node + else: + self.head = new_node + + def insert_at(self, index, key, data=None): + if self.size == 0 and (index in (0, -1)): + index = 0 + + if index < 0: + index = self.size + index + + if index > self.size: + raise IndexError('%d index is out of range.'%(index)) + + self.size += 1 + new_node = LinkedListNode(key, data, + links=['next', 'prev'], + addrs=[None, None]) + if self.size == 1: + self.head, self.tail = \ + new_node, new_node + elif index == self.size - 1: + new_node.prev = self.tail + new_node.next = self.tail.next + self.tail.next = new_node + self.tail = new_node + else: + counter = 0 + current_node = self.head + prev_node = None + while counter != index: + prev_node = current_node + current_node = current_node.next + counter += 1 + new_node.prev = prev_node + new_node.next = current_node + if prev_node is not None: + prev_node.next = new_node + if current_node is not None: + current_node.prev = new_node + if new_node.next is None: + self.tail = new_node + if new_node.prev is None: + self.head = new_node + + def extract(self, index): + if self.is_empty: + raise ValueError("The list is empty.") + + if index < 0: + index = self.size + index + + if index >= self.size: + raise IndexError('%d is out of range.'%(index)) + + self.size -= 1 + counter = 0 + current_node = self.head + prev_node = None + while counter != index: + prev_node = current_node + current_node = current_node.next + counter += 1 + if prev_node is not None: + prev_node.next = current_node.next + if current_node.next is not None: + current_node.next.prev = prev_node + if index == 0: + self.head = current_node.next + if index == self.size: + self.tail = current_node.prev + return current_node + +class SinglyLinkedList(LinkedList): + """ + Represents Singly Linked List + + Parameters + ========== + + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import SinglyLinkedList + >>> sll = SinglyLinkedList() + >>> sll.append(6) + >>> sll[0].key + 6 + >>> sll.head.key + 6 + >>> sll.append(5) + >>> sll.appendleft(2) + >>> str(sll) + "['(2, None)', '(6, None)', '(5, None)']" + >>> sll[0].key = 7.2 + >>> sll.extract(1).key + 6 + >>> str(sll) + "['(7.2, None)', '(5, None)']" + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Singly_linked_list + + """ + __slots__ = ['head', 'tail', 'size'] + + def __new__(cls, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = LinkedList.__new__(cls) + obj.head = None + obj.tail = None + obj.size = 0 + return obj + + @classmethod + def methods(cls): + return ['insert_after', 'insert_at', + 'extract'] + + def insert_after(self, prev_node, key, data=None): + self.size += 1 + new_node = LinkedListNode(key, data, + links=['next'], + addrs=[None]) + new_node.next = prev_node.next + prev_node.next = new_node + + if new_node.next is None: + self.tail = new_node + + def insert_at(self, index, key, data=None): + if self.size == 0 and (index in (0, -1)): + index = 0 + + if index < 0: + index = self.size + index + + if index > self.size: + raise IndexError('%d index is out of range.'%(index)) + + self.size += 1 + new_node = LinkedListNode(key, data, + links=['next'], + addrs=[None]) + if self.size == 1: + self.head, self.tail = \ + new_node, new_node + elif index == self.size - 1: + new_node.next = self.tail.next + self.tail.next = new_node + self.tail = new_node + else: + counter = 0 + current_node = self.head + prev_node = None + while counter != index: + prev_node = current_node + current_node = current_node.next + counter += 1 + new_node.next = current_node + if prev_node is not None: + prev_node.next = new_node + if new_node.next is None: + self.tail = new_node + if index == 0: + self.head = new_node + + def extract(self, index): + if self.is_empty: + raise ValueError("The list is empty.") + + if index < 0: + index = self.size + index + + if index >= self.size: + raise IndexError('%d is out of range.'%(index)) + + self.size -= 1 + counter = 0 + current_node = self.head + prev_node = None + while counter != index: + prev_node = current_node + current_node = current_node.next + counter += 1 + if prev_node is not None: + prev_node.next = current_node.next + if index == 0: + self.head = current_node.next + if index == self.size: + self.tail = prev_node + return current_node + +class SinglyCircularLinkedList(SinglyLinkedList): + """ + Represents Singly Circular Linked List. + + Parameters + ========== + + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + + Examples + ======== + + >>> from pydatastructs import SinglyCircularLinkedList + >>> scll = SinglyCircularLinkedList() + >>> scll.append(6) + >>> scll[0].key + 6 + >>> scll.head.key + 6 + >>> scll.append(5) + >>> scll.appendleft(2) + >>> str(scll) + "['(2, None)', '(6, None)', '(5, None)']" + >>> scll[0].key = 7.2 + >>> scll.extract(1).key + 6 + >>> str(scll) + "['(7.2, None)', '(5, None)']" + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Linked_list#Circular_linked_list + + """ + + @classmethod + def methods(cls): + return ['insert_after', 'insert_at', 'extract'] + + def insert_after(self, prev_node, key, data=None): + super(SinglyCircularLinkedList, self).\ + insert_after(prev_node, key, data) + if prev_node.next.next == self.head: + self.tail = prev_node.next + + def insert_at(self, index, key, data=None): + super(SinglyCircularLinkedList, self).insert_at(index, key, data) + if self.size == 1: + self.head.next = self.head + new_node = self.__getitem__(index) + if index == 0: + self.tail.next = new_node + if new_node.next == self.head: + self.tail = new_node + + def extract(self, index): + node = super(SinglyCircularLinkedList, self).extract(index) + if self.tail is None: + self.head = None + elif index == 0: + self.tail.next = self.head + return node + +class DoublyCircularLinkedList(DoublyLinkedList): + """ + Represents Doubly Circular Linked List + + Parameters + ========== + + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import DoublyCircularLinkedList + >>> dcll = DoublyCircularLinkedList() + >>> dcll.append(6) + >>> dcll[0].key + 6 + >>> dcll.head.key + 6 + >>> dcll.append(5) + >>> dcll.appendleft(2) + >>> str(dcll) + "['(2, None)', '(6, None)', '(5, None)']" + >>> dcll[0].key = 7.2 + >>> dcll.extract(1).key + 6 + >>> str(dcll) + "['(7.2, None)', '(5, None)']" + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Doubly_linked_list#Circular_doubly_linked_lists + + """ + + @classmethod + def methods(cls): + return ['insert_after', 'insert_before', + 'insert_at', 'extract'] + + def insert_after(self, prev_node, key, data=None): + super(DoublyCircularLinkedList, self)\ + .insert_after(prev_node, key, data) + if prev_node.next.next == self.head: + self.tail = prev_node.next + + def insert_before(self, next_node, key, data=None): + super(DoublyCircularLinkedList, self).\ + insert_before(next_node, key, data) + if next_node == self.head: + self.head = next_node.prev + + def insert_at(self, index, key, data=None): + super(DoublyCircularLinkedList, self).\ + insert_at(index, key, data) + if self.size == 1: + self.head.next = self.head + self.head.prev = self.head + new_node = self.__getitem__(index) + if index == 0: + self.tail.next = new_node + new_node.prev = self.tail + if new_node.next == self.head: + self.tail = new_node + new_node.next = self.head + self.head.prev = new_node + + def extract(self, index): + node = super(DoublyCircularLinkedList, self).extract(index) + if self.tail is None: + self.head = None + elif index == 0: + self.tail.next = self.head + return node + +class SkipList(object): + """ + Represents Skip List + + Parameters + ========== + + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import SkipList + >>> sl = SkipList() + >>> sl.insert(6) + >>> sl.insert(1) + >>> sl.insert(3) + >>> node = sl.extract(1) + >>> str(node) + '(1, None)' + >>> sl.insert(4) + >>> sl.insert(2) + >>> sl.search(4) + True + >>> sl.search(10) + False + + """ + + __slots__ = ['head', 'tail', '_levels', '_num_nodes', 'seed'] + + def __new__(cls, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.head, obj.tail = None, None + obj._num_nodes = 0 + obj._levels = 0 + obj._add_level() + return obj + + @classmethod + def methods(cls): + return ['__new__', 'levels', 'search', + 'extract', '__str__', 'size'] + + def _add_level(self): + self.tail = SkipNode(math.inf, next=None, down=self.tail) + self.head = SkipNode(-math.inf, next=self.tail, down=self.head) + self._levels += 1 + + @property + def levels(self): + """ + Returns the number of levels in the + current skip list. + """ + return self._levels + + def _search(self, key) -> list: + path = [] + node = self.head + while node: + if node.next.key >= key: + path.append(node) + node = node.down + else: + node = node.next + return path + + def search(self, key) -> bool: + return self._search(key)[-1].next.key == key + + def insert(self, key, data=None): + """ + Inserts a new node to the skip list. + + Parameters + ========== + + key + Any valid identifier to uniquely + identify the node in the linked list. + + data + Any valid data to be stored in the node. + """ + path = self._search(key) + tip = path[-1] + below = SkipNode(key=key, data=data, next=tip.next) + tip.next = below + total_level = self._levels + level = 1 + while random.getrandbits(1) % 2 == 0 and level <= total_level: + if level == total_level: + self._add_level() + prev = self.head + else: + prev = path[total_level - 1 - level] + below = SkipNode(key=key, data=None, next=prev.next, down=below) + prev.next = below + level += 1 + self._num_nodes += 1 + + @property + def size(self): + return self._num_nodes + + def extract(self, key): + """ + Extracts the node with the given key in the skip list. + + Parameters + ========== + + key + The key of the node under consideration. + + Returns + ======= + + return_node: SkipNode + The node with given key. + """ + path = self._search(key) + tip = path[-1] + if tip.next.key != key: + raise KeyError('Node with key %s is not there in %s'%(key, self)) + return_node = SkipNode(tip.next.key, tip.next.data) + total_level = self._levels + level = total_level - 1 + while level >= 0 and path[level].next.key == key: + path[level].next = path[level].next.next + level -= 1 + walk = self.head + while walk is not None: + if walk.next is self.tail: + self._levels -= 1 + self.head = walk.down + self.tail = self.tail.down + walk = walk.down + else: + break + self._num_nodes -= 1 + if self._levels == 0: + self._add_level() + return return_node + + def __str__(self): + node2row = {} + node2col = {} + walk = self.head + curr_level = self._levels - 1 + while walk is not None: + curr_node = walk + col = 0 + while curr_node is not None: + if curr_node.key != math.inf and curr_node.key != -math.inf: + node2row[curr_node] = curr_level + if walk.down is None: + node2col[curr_node.key] = col + col += 1 + curr_node = curr_node.next + walk = walk.down + curr_level -= 1 + sl_mat = [[str(None) for _ in range(self._num_nodes)] for _ in range(self._levels)] + walk = self.head + while walk is not None: + curr_node = walk + while curr_node is not None: + if curr_node in node2row: + row = node2row[curr_node] + col = node2col[curr_node.key] + sl_mat[row][col] = str(curr_node) + curr_node = curr_node.next + walk = walk.down + sl_str = "" + for level_list in sl_mat[::-1]: + for node_str in level_list: + sl_str += node_str + " " + if len(sl_str) > 0: + sl_str += "\n" + return sl_str diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/__init__.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_algorithms.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_algorithms.py new file mode 100644 index 000000000..3e287bb74 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_algorithms.py @@ -0,0 +1,423 @@ +from pydatastructs import ( + merge_sort_parallel, DynamicOneDimensionalArray, + OneDimensionalArray, brick_sort, brick_sort_parallel, + heapsort, matrix_multiply_parallel, counting_sort, bucket_sort, + cocktail_shaker_sort, quick_sort, longest_common_subsequence, is_ordered, + upper_bound, lower_bound, longest_increasing_subsequence, next_permutation, + prev_permutation, bubble_sort, linear_search, binary_search, jump_search, + selection_sort, insertion_sort, intro_sort, shell_sort, radix_sort, Backend) + +from pydatastructs.utils.raises_util import raises +import random + +def _test_common_sort(sort, *args, **kwargs): + random.seed(1000) + + n = random.randint(10, 20) + arr = DynamicOneDimensionalArray(int, 0) + generated_ints = [] + for _ in range(n): + integer = random.randint(1, 1000) + generated_ints.append(integer) + arr.append(integer) + for _ in range(n//3): + integer = random.randint(0, n//2) + generated_ints.append(integer) + arr.delete(integer) + expected_arr_1 = [686, 779, 102, 134, 362, 448, + 480, 548, None, None, None, + 228, 688, 247, 373, 696, None, + None, None, None, None, None, + None, None, None, None, None, + None, None, None, None] + sort(arr, *args, **kwargs, start=2, end=10) + assert arr._data == expected_arr_1 + sort(arr, *args, **kwargs) + expected_arr_2 = [102, 134, 228, 247, 362, 373, 448, + 480, 548, 686, 688, 696, 779, + None, None, None, None, None, None, + None, None, None, None, None, + None, None, None, None, None, None, None] + assert arr._data == expected_arr_2 + assert (arr._last_pos_filled, arr._num, arr._size) == (12, 13, 31) + + arr = DynamicOneDimensionalArray(int, 0, backend=Backend.CPP) + int_idx = 0 + for _ in range(n): + arr.append(generated_ints[int_idx]) + int_idx += 1 + for _ in range(n//3): + arr.delete(generated_ints[int_idx]) + int_idx += 1 + sort(arr, *args, **kwargs, start=2, end=10) + for i in range(len(expected_arr_1)): + assert arr[i] == expected_arr_1[i] + sort(arr, *args, **kwargs) + for i in range(len(expected_arr_2)): + assert arr[i] == expected_arr_2[i] + assert (arr._last_pos_filled, arr._num, arr.size) == (12, 13, 31) + + n = random.randint(10, 20) + arr = OneDimensionalArray(int, n) + generated_ints.clear() + for i in range(n): + integer = random.randint(1, 1000) + arr[i] = integer + generated_ints.append(integer) + expected_arr_3 = [42, 695, 147, 500, 768, + 998, 473, 732, 728, 426, + 709, 910] + sort(arr, *args, **kwargs, start=2, end=5) + assert arr._data == expected_arr_3 + + arr = OneDimensionalArray(int, n, backend=Backend.CPP) + int_idx = 0 + for i in range(n): + arr[i] = generated_ints[int_idx] + int_idx += 1 + sort(arr, *args, **kwargs, start=2, end=5) + for i in range(len(expected_arr_3)): + assert arr[i] == expected_arr_3[i] + +def test_merge_sort_parallel(): + _test_common_sort(merge_sort_parallel, num_threads=5) + +def test_brick_sort(): + _test_common_sort(brick_sort) + +def test_brick_sort_parallel(): + _test_common_sort(brick_sort_parallel, num_threads=3) + +def test_heapsort(): + _test_common_sort(heapsort) + +def test_bucket_sort(): + _test_common_sort(bucket_sort) + +def test_counting_sort(): + random.seed(1000) + + n = random.randint(10, 20) + arr = DynamicOneDimensionalArray(int, 0) + for _ in range(n): + arr.append(random.randint(1, 1000)) + for _ in range(n//3): + arr.delete(random.randint(0, n//2)) + + expected_arr = [102, 134, 228, 247, 362, 373, 448, + 480, 548, 686, 688, 696, 779] + assert counting_sort(arr)._data == expected_arr + +def test_cocktail_shaker_sort(): + _test_common_sort(cocktail_shaker_sort) + +def test_quick_sort(): + _test_common_sort(quick_sort) + _test_common_sort(quick_sort, backend=Backend.CPP) + +def test_intro_sort(): + _test_common_sort(intro_sort) + +def test_bubble_sort(): + _test_common_sort(bubble_sort) + _test_common_sort(bubble_sort, backend=Backend.CPP) + _test_common_sort(bubble_sort, backend=Backend.LLVM) + +def test_selection_sort(): + _test_common_sort(selection_sort) + _test_common_sort(selection_sort, backend=Backend.CPP) + +def test_insertion_sort(): + _test_common_sort(insertion_sort) + _test_common_sort(insertion_sort, backend=Backend.CPP) + +def test_matrix_multiply_parallel(): + ODA = OneDimensionalArray + + expected_result = [[3, 3, 3], [1, 2, 1], [2, 2, 2]] + + I = ODA(ODA, [ODA(int, [1, 1, 0]), ODA(int, [0, 1, 0]), ODA(int, [0, 0, 1])]) + J = ODA(ODA, [ODA(int, [2, 1, 2]), ODA(int, [1, 2, 1]), ODA(int, [2, 2, 2])]) + output = matrix_multiply_parallel(I, J, num_threads=5) + assert expected_result == output + + I = [[1, 1, 0], [0, 1, 0], [0, 0, 1]] + J = [[2, 1, 2], [1, 2, 1], [2, 2, 2]] + output = matrix_multiply_parallel(I, J, num_threads=5) + assert expected_result == output + + I = [[1, 1, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]] + J = [[2, 1, 2], [1, 2, 1], [2, 2, 2]] + assert raises(ValueError, lambda: matrix_multiply_parallel(I, J, num_threads=5)) + + I = [[1, 1, 0], [0, 1, 0], [0, 0, 1]] + J = [[2, 1, 2], [1, 2, 1], [2, 2, 2]] + output = matrix_multiply_parallel(I, J, num_threads=1) + assert expected_result == output + +def test_longest_common_sequence(): + ODA = OneDimensionalArray + expected_result = "['A', 'S', 'C', 'I', 'I']" + + str1 = ODA(str, ['A', 'A', 'S', 'C', 'C', 'I', 'I']) + str2 = ODA(str, ['A', 'S', 'S', 'C', 'I', 'I', 'I', 'I']) + output = longest_common_subsequence(str1, str2) + assert str(output) == expected_result + + expected_result = "['O', 'V', 'A']" + + I = ODA(str, ['O', 'V', 'A', 'L']) + J = ODA(str, ['F', 'O', 'R', 'V', 'A', 'E', 'W']) + output = longest_common_subsequence(I, J) + assert str(output) == expected_result + + X = ODA(int, [1, 2, 3, 4, 5, 6, 6, 5, 4, 3, 2, 1]) + Y = ODA(int, [1, 2, 3, 4, 4, 3, 2, 1]) + output = longest_common_subsequence(X, Y) + assert str(output) == '[1, 2, 3, 4, 4, 3, 2, 1]' + + Z = ODA(int, []) + output = longest_common_subsequence(Y, Z) + assert str(output) == '[]' + +def test_is_ordered(): + def _test_inner_ordered(*args, **kwargs): + ODA = OneDimensionalArray + DODA = DynamicOneDimensionalArray + + expected_result = True + arr = ODA(int, [1, 2, 5, 6]) + output = is_ordered(arr, **kwargs) + assert output == expected_result + + expected_result = False + arr1 = ODA(int, [4, 3, 2, 1]) + output = is_ordered(arr1, **kwargs) + assert output == expected_result + + expected_result = True + arr2 = ODA(int, [6, 1, 2, 3, 4, 5]) + output = is_ordered(arr2, start=1, end=5, **kwargs) + assert output == expected_result + + expected_result = True + arr3 = ODA(int, [0, -1, -2, -3, -4, 4]) + output = is_ordered(arr3, start=1, end=4, + comp=lambda u, v: u > v, **kwargs) + assert output == expected_result + + expected_result = True + arr4 = DODA(int, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + arr4.delete(0) + output = is_ordered(arr4, **kwargs) + assert output == expected_result + + _test_inner_ordered() + _test_inner_ordered(backend=Backend.CPP) + + +def test_upper_bound(): + ODA = OneDimensionalArray + arr1 = ODA(int, [3, 3, 3]) + output = upper_bound(arr1, 3) + expected_result = 3 + assert expected_result == output + + arr2 = ODA(int, [4, 4, 5, 6]) + output = upper_bound(arr2, 4, end=3) + expected_result = 2 + assert expected_result == output + + arr3 = ODA(int, [6, 6, 7, 8, 9]) + output = upper_bound(arr3, 5, start=2, end=4) + expected_result = 2 + assert expected_result == output + + arr4 = ODA(int, [3, 4, 4, 6]) + output = upper_bound(arr4, 5, start=1, end=3) + expected_result = 3 + assert expected_result == output + + arr5 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) + output = upper_bound(arr5, 6, comp=lambda x, y: x > y) + expected_result = 5 + assert expected_result == output + + arr6 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) + output = upper_bound(arr6, 2, start=2, comp=lambda x, y: x > y) + expected_result = 8 + assert expected_result == output + + arr7 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) + output = upper_bound(arr7, 9, start=3, end=7, comp=lambda x, y: x > y) + expected_result = 3 + assert expected_result == output + + arr8 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) + output = upper_bound(arr8, 6, end=3, comp=lambda x, y: x > y) + expected_result = 3 + assert expected_result == output + + +def test_lower_bound(): + ODA = OneDimensionalArray + arr1 = ODA(int, [3, 3, 3]) + output = lower_bound(arr1, 3, start=1) + expected_result = 1 + assert expected_result == output + + arr2 = ODA(int, [4, 4, 4, 4, 5, 6]) + output = lower_bound(arr2, 5, end=3) + expected_result = 3 + assert expected_result == output + + arr3 = ODA(int, [6, 6, 7, 8, 9]) + output = lower_bound(arr3, 5, end=3) + expected_result = 0 + assert expected_result == output + + arr4 = ODA(int, [3, 4, 4, 4]) + output = lower_bound(arr4, 5) + expected_result = 4 + assert expected_result == output + + arr5 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) + output = lower_bound(arr5, 5, comp=lambda x, y: x > y) + expected_result = 5 + assert expected_result == output + + arr6 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) + output = lower_bound(arr6, 2, start=4, comp=lambda x, y: x > y) + expected_result = 8 + assert expected_result == output + + arr7 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) + output = lower_bound(arr7, 9, end=5, comp=lambda x, y: x > y) + expected_result = 0 + assert expected_result == output + + arr8 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) + output = lower_bound(arr8, 6, end=3, comp=lambda x, y: x > y) + expected_result = 1 + assert expected_result == output + +def test_longest_increasing_subsequence(): + ODA = OneDimensionalArray + + arr1 = ODA(int, [2, 5, 3, 7, 11, 8, 10, 13, 6]) + output = longest_increasing_subsequence(arr1) + expected_result = [2, 3, 7, 8, 10, 13] + assert str(expected_result) == str(output) + + arr2 = ODA(int, [3, 4, -1, 5, 8, 2, 2, 2, 3, 12, 7, 9, 10]) + output = longest_increasing_subsequence(arr2) + expected_result = [-1, 2, 3, 7, 9, 10] + assert str(expected_result) == str(output) + + arr3 = ODA(int, [6, 6, 6, 19, 9]) + output = longest_increasing_subsequence(arr3) + expected_result = [6, 9] + assert str(expected_result) == str(output) + + arr4 = ODA(int, [5, 4, 4, 3, 3, 6, 6, 8]) + output = longest_increasing_subsequence(arr4) + expected_result = [3, 6, 8] + assert str(expected_result) == str(output) + + arr5 = ODA(int, [7, 6, 6, 6, 5, 4, 3]) + output = longest_increasing_subsequence(arr5) + expected_result = [3] + assert str(expected_result) == str(output) + +def _test_permutation_common(array, expected_perms, func): + num_perms = len(expected_perms) + + output = [] + for _ in range(num_perms): + signal, array = func(array) + output.append(array) + if not signal: + break + + assert len(output) == len(expected_perms) + for perm1, perm2 in zip(output, expected_perms): + assert str(perm1) == str(perm2) + +def test_next_permutation(): + ODA = OneDimensionalArray + + array = ODA(int, [1, 2, 3]) + expected_perms = [[1, 3, 2], [2, 1, 3], + [2, 3, 1], [3, 1, 2], + [3, 2, 1], [1, 2, 3]] + _test_permutation_common(array, expected_perms, next_permutation) + +def test_prev_permutation(): + ODA = OneDimensionalArray + + array = ODA(int, [3, 2, 1]) + expected_perms = [[3, 1, 2], [2, 3, 1], + [2, 1, 3], [1, 3, 2], + [1, 2, 3], [3, 2, 1]] + _test_permutation_common(array, expected_perms, prev_permutation) + +def test_next_prev_permutation(): + ODA = OneDimensionalArray + random.seed(1000) + + for i in range(100): + data = set(random.sample(range(1, 10000), 10)) + array = ODA(int, list(data)) + + _, next_array = next_permutation(array) + _, orig_array = prev_permutation(next_array) + assert str(orig_array) == str(array) + + _, prev_array = prev_permutation(array) + _, orig_array = next_permutation(prev_array) + assert str(orig_array) == str(array) + +def _test_common_search(search_func, sort_array=True, **kwargs): + ODA = OneDimensionalArray + + array = ODA(int, [1, 2, 5, 7, 10, 29, 40]) + for i in range(len(array)): + assert i == search_func(array, array[i], **kwargs) + + checker_array = [None, None, 2, 3, 4, 5, None] + for i in range(len(array)): + assert checker_array[i] == search_func(array, array[i], start=2, end=5, **kwargs) + + random.seed(1000) + + for i in range(25): + data = list(set(random.sample(range(1, 10000), 100))) + + if sort_array: + data.sort() + + array = ODA(int, list(data)) + + for i in range(len(array)): + assert search_func(array, array[i], **kwargs) == i + + for _ in range(50): + assert search_func(array, random.randint(10001, 50000), **kwargs) is None + +def test_linear_search(): + _test_common_search(linear_search, sort_array=False) + _test_common_search(linear_search, sort_array=False, backend=Backend.CPP) + +def test_binary_search(): + _test_common_search(binary_search) + _test_common_search(binary_search, backend=Backend.CPP) + +def test_jump_search(): + _test_common_search(jump_search) + _test_common_search(jump_search, backend=Backend.CPP) + +def test_shell_sort(): + _test_common_sort(shell_sort) + +def test_radix_sort(): + _test_common_sort(radix_sort) diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_arrays.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_arrays.py new file mode 100644 index 000000000..886510113 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_arrays.py @@ -0,0 +1,157 @@ +from pydatastructs.linear_data_structures import ( + OneDimensionalArray, DynamicOneDimensionalArray, + MultiDimensionalArray, ArrayForTrees) +from pydatastructs.utils.misc_util import Backend +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils import TreeNode +from pydatastructs.utils._backend.cpp import _nodes + +def test_OneDimensionalArray(): + ODA = OneDimensionalArray + A = ODA(int, 5, [1.0, 2, 3, 4, 5], init=6) + A[1] = 2.0 + assert str(A) == '[1, 2, 3, 4, 5]' + assert A + assert ODA(int, [1.0, 2, 3, 4, 5], 5) + assert ODA(int, 5) + assert ODA(int, [1.0, 2, 3]) + assert raises(IndexError, lambda: A[7]) + assert raises(IndexError, lambda: A[-1]) + assert raises(ValueError, lambda: ODA()) + assert raises(ValueError, lambda: ODA(int, 1, 2, 3)) + assert raises(TypeError, lambda: ODA(int, 5.0, set([1, 2, 3]))) + assert raises(TypeError, lambda: ODA(int, 5.0)) + assert raises(TypeError, lambda: ODA(int, set([1, 2, 3]))) + assert raises(ValueError, lambda: ODA(int, 3, [1])) + + A = ODA(int, 5, [1, 2, 3, 4, 5], init=6, backend=Backend.CPP) + A[1] = 2 + assert str(A) == "['1', '2', '3', '4', '5']" + assert A + assert ODA(int, [1, 2, 3, 4, 5], 5, backend=Backend.CPP) + assert ODA(int, 5, backend=Backend.CPP) + assert ODA(int, [1, 2, 3], backend=Backend.CPP) + assert raises(TypeError, lambda: ODA(int, [1.0, 2, 3, 4, 5], 5, backend=Backend.CPP)) + assert raises(TypeError, lambda: ODA(int, [1.0, 2, 3], backend=Backend.CPP)) + assert raises(IndexError, lambda: A[7]) + assert raises(IndexError, lambda: A[-1]) + assert raises(ValueError, lambda: ODA(backend=Backend.CPP)) + assert raises(ValueError, lambda: ODA(int, 1, 2, 3, backend=Backend.CPP)) + assert raises(TypeError, lambda: ODA(int, 5.0, set([1, 2, 3]), backend=Backend.CPP)) + assert raises(TypeError, lambda: ODA(int, 5.0, backend=Backend.CPP)) + assert raises(TypeError, lambda: ODA(int, set([1, 2, 3]), backend=Backend.CPP)) + assert raises(ValueError, lambda: ODA(int, 3, [1], backend=Backend.CPP)) + assert raises(ValueError, lambda: ODA(int, 3, [1], backend=Backend.CPP)) + assert raises(TypeError, lambda: A.fill(2.0)) + + +def test_MultiDimensionalArray(): + assert raises(ValueError, lambda: MultiDimensionalArray(int, 2, -1, 3)) + assert MultiDimensionalArray(int, 10).shape == (10,) + array = MultiDimensionalArray(int, 5, 9, 3, 8) + assert array.shape == (5, 9, 3, 8) + array.fill(5) + array[1, 3, 2, 5] = 2.0 + assert array + assert array[1, 3, 2, 5] == 2.0 + assert array[1, 3, 0, 5] == 5 + assert array[1, 2, 2, 5] == 5 + assert array[2, 3, 2, 5] == 5 + assert raises(IndexError, lambda: array[5]) + assert raises(IndexError, lambda: array[4, 10]) + assert raises(IndexError, lambda: array[-1]) + assert raises(IndexError, lambda: array[2, 3, 2, 8]) + assert raises(ValueError, lambda: MultiDimensionalArray()) + assert raises(ValueError, lambda: MultiDimensionalArray(int)) + assert raises(TypeError, lambda: MultiDimensionalArray(int, 5, 6, "")) + array = MultiDimensionalArray(int, 3, 2, 2) + array.fill(1) + array[0, 0, 0] = 0 + array[0, 0, 1] = 0 + array[1, 0, 0] = 0 + array[2, 1, 1] = 0 + assert str(array) == '[0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0]' + array = MultiDimensionalArray(int, 4) + assert array.shape == (4,) + array.fill(5) + array[3] = 3 + assert array[3] == 3 + +def test_DynamicOneDimensionalArray(): + DODA = DynamicOneDimensionalArray + A = DODA(int, 0) + A.append(1) + A.append(2) + A.append(3) + A.append(4) + assert str(A) == "['1', '2', '3', '4']" + A.delete(0) + A.delete(0) + A.delete(15) + A.delete(-1) + A.delete(1) + A.delete(2) + assert A._data == [4, None, None] + assert str(A) == "['4']" + assert A.size == 3 + A.fill(4) + assert A._data == [4, 4, 4] + b = DynamicOneDimensionalArray(int, 0) + b.append(1) + b.append(2) + b.append(3) + b.append(4) + b.append(5) + assert b._data == [1, 2, 3, 4, 5, None, None] + assert list(reversed(b)) == [5, 4, 3, 2, 1] + + A = DODA(int, 0, backend=Backend.CPP) + A.append(1) + A.append(2) + A.append(3) + A.append(4) + assert str(A) == "['1', '2', '3', '4']" + A.delete(0) + A.delete(0) + A.delete(15) + A.delete(-1) + A.delete(1) + A.delete(2) + assert [A[i] for i in range(A.size)] == [4, None, None] + assert A.size == 3 + A.fill(4) + assert [A[0], A[1], A[2]] == [4, 4, 4] + b = DODA(int, 0, backend=Backend.CPP) + b.append(1) + b.append(2) + b.append(3) + b.append(4) + b.append(5) + assert [b[i] for i in range(b.size)] == [1, 2, 3, 4, 5, None, None] + +def test_DynamicOneDimensionalArray2(): + DODA = DynamicOneDimensionalArray + root = TreeNode(1, 100) + A = DODA(TreeNode, [root]) + assert str(A[0]) == "(None, 1, 100, None)" + +def _test_ArrayForTrees(backend): + AFT = ArrayForTrees + root = TreeNode(1, 100,backend=backend) + if backend==Backend.PYTHON: + A = AFT(TreeNode, [root], backend=backend) + B = AFT(TreeNode, 0, backend=backend) + else: + A = AFT(_nodes.TreeNode, [root], backend=backend) + B = AFT(_nodes.TreeNode, 0, backend=backend) + assert str(A) == "['(None, 1, 100, None)']" + node = TreeNode(2, 200, backend=backend) + A.append(node) + assert str(A) == "['(None, 1, 100, None)', '(None, 2, 200, None)']" + assert str(B) == "[]" + +def test_ArrayForTrees(): + _test_ArrayForTrees(Backend.PYTHON) + +def test_cpp_ArrayForTrees(): + _test_ArrayForTrees(Backend.CPP) diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_linked_lists.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_linked_lists.py new file mode 100644 index 000000000..b7f172ddc --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_linked_lists.py @@ -0,0 +1,193 @@ +from pydatastructs.linear_data_structures import DoublyLinkedList, SinglyLinkedList, SinglyCircularLinkedList, DoublyCircularLinkedList, SkipList +from pydatastructs.utils.raises_util import raises +import copy, random + +def test_DoublyLinkedList(): + random.seed(1000) + dll = DoublyLinkedList() + assert raises(IndexError, lambda: dll[2]) + dll.appendleft(5) + dll.append(1) + dll.appendleft(2) + dll.append(3) + dll.insert_after(dll[-1], 4) + dll.insert_after(dll[2], 6) + dll.insert_before(dll[4], 1.1) + dll.insert_before(dll[0], 7) + dll.insert_at(0, 2) + dll.insert_at(-1, 9) + dll.extract(2) + assert dll.popleft().key == 2 + assert dll.popright().key == 4 + assert dll.search(3) == dll[-2] + assert dll.search(-1) is None + dll[-2].key = 0 + assert str(dll) == ("['(7, None)', '(5, None)', '(1, None)', " + "'(6, None)', '(1.1, None)', '(0, None)', " + "'(9, None)']") + assert len(dll) == 7 + assert raises(IndexError, lambda: dll.insert_at(8, None)) + assert raises(IndexError, lambda: dll.extract(20)) + dll_copy = DoublyCircularLinkedList() + for i in range(dll.size): + dll_copy.append(dll[i]) + for i in range(len(dll)): + if i%2 == 0: + dll.popleft() + else: + dll.popright() + assert str(dll) == "[]" + for _ in range(len(dll_copy)): + index = random.randint(0, len(dll_copy) - 1) + dll_copy.extract(index) + assert str(dll_copy) == "[]" + assert raises(ValueError, lambda: dll_copy.extract(1)) + +def test_SinglyLinkedList(): + random.seed(1000) + sll = SinglyLinkedList() + assert raises(IndexError, lambda: sll[2]) + sll.appendleft(5) + sll.append(1) + sll.appendleft(2) + sll.append(3) + sll.insert_after(sll[1], 4) + sll.insert_after(sll[-1], 6) + sll.insert_at(0, 2) + sll.insert_at(-1, 9) + sll.extract(2) + assert sll.popleft().key == 2 + assert sll.popright().key == 6 + sll[-2].key = 0 + assert str(sll) == ("['(2, None)', '(4, None)', '(1, None)', " + "'(0, None)', '(9, None)']") + assert len(sll) == 5 + assert raises(IndexError, lambda: sll.insert_at(6, None)) + assert raises(IndexError, lambda: sll.extract(20)) + sll_copy = DoublyCircularLinkedList() + for i in range(sll.size): + sll_copy.append(sll[i]) + for i in range(len(sll)): + if i%2 == 0: + sll.popleft() + else: + sll.popright() + assert str(sll) == "[]" + for _ in range(len(sll_copy)): + index = random.randint(0, len(sll_copy) - 1) + sll_copy.extract(index) + assert str(sll_copy) == "[]" + assert raises(ValueError, lambda: sll_copy.extract(1)) + +def test_SinglyCircularLinkedList(): + random.seed(1000) + scll = SinglyCircularLinkedList() + assert raises(IndexError, lambda: scll[2]) + scll.appendleft(5) + scll.append(1) + scll.appendleft(2) + scll.append(3) + scll.insert_after(scll[1], 4) + scll.insert_after(scll[-1], 6) + scll.insert_at(0, 2) + scll.insert_at(-1, 9) + scll.extract(2) + assert scll.popleft().key == 2 + assert scll.popright().key == 6 + assert scll.search(-1) is None + scll[-2].key = 0 + assert str(scll) == ("['(2, None)', '(4, None)', '(1, None)', " + "'(0, None)', '(9, None)']") + assert len(scll) == 5 + assert raises(IndexError, lambda: scll.insert_at(6, None)) + assert raises(IndexError, lambda: scll.extract(20)) + scll_copy = DoublyCircularLinkedList() + for i in range(scll.size): + scll_copy.append(scll[i]) + for i in range(len(scll)): + if i%2 == 0: + scll.popleft() + else: + scll.popright() + assert str(scll) == "[]" + for _ in range(len(scll_copy)): + index = random.randint(0, len(scll_copy) - 1) + scll_copy.extract(index) + assert str(scll_copy) == "[]" + assert raises(ValueError, lambda: scll_copy.extract(1)) + +def test_DoublyCircularLinkedList(): + random.seed(1000) + dcll = DoublyCircularLinkedList() + assert raises(IndexError, lambda: dcll[2]) + dcll.appendleft(5) + dcll.append(1) + dcll.appendleft(2) + dcll.append(3) + dcll.insert_after(dcll[-1], 4) + dcll.insert_after(dcll[2], 6) + dcll.insert_before(dcll[4], 1) + dcll.insert_before(dcll[0], 7) + dcll.insert_at(0, 2) + dcll.insert_at(-1, 9) + dcll.extract(2) + assert dcll.popleft().key == 2 + assert dcll.popright().key == 4 + dcll[-2].key = 0 + assert str(dcll) == ("['(7, None)', '(5, None)', '(1, None)', " + "'(6, None)', '(1, None)', '(0, None)', " + "'(9, None)']") + assert len(dcll) == 7 + assert raises(IndexError, lambda: dcll.insert_at(8, None)) + assert raises(IndexError, lambda: dcll.extract(20)) + dcll_copy = DoublyCircularLinkedList() + for i in range(dcll.size): + dcll_copy.append(dcll[i]) + for i in range(len(dcll)): + if i%2 == 0: + dcll.popleft() + else: + dcll.popright() + assert str(dcll) == "[]" + for _ in range(len(dcll_copy)): + index = random.randint(0, len(dcll_copy) - 1) + dcll_copy.extract(index) + assert str(dcll_copy) == "[]" + assert raises(ValueError, lambda: dcll_copy.extract(1)) + +def test_SkipList(): + random.seed(0) + sl = SkipList() + sl.insert(2) + sl.insert(10) + sl.insert(92) + sl.insert(1) + sl.insert(4) + sl.insert(27) + sl.extract(10) + assert str(sl) == ("(1, None) None None None None \n" + "(1, None) None None None None \n" + "(1, None) (2, None) (4, None) (27, None) (92, None) \n") + assert raises(KeyError, lambda: sl.extract(15)) + assert sl.search(1) is True + assert sl.search(47) is False + + sl = SkipList() + + for a in range(0, 20, 2): + sl.insert(a) + assert sl.search(16) is True + for a in range(4, 20, 4): + sl.extract(a) + assert sl.search(10) is True + for a in range(4, 20, 4): + sl.insert(a) + for a in range(0, 20, 2): + sl.extract(a) + assert sl.search(3) is False + + li = SkipList() + li.insert(1) + li.insert(2) + assert li.levels == 1 + assert li.size == 2 diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/__init__.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/__init__.py new file mode 100644 index 000000000..6ed099769 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/__init__.py @@ -0,0 +1,51 @@ +__all__ = [] + +from . import ( + stack, + binomial_trees, + queue, + disjoint_set, + sparse_table, +) + +from .binomial_trees import ( + BinomialTree +) +__all__.extend(binomial_trees.__all__) + +from .stack import ( + Stack, +) +__all__.extend(stack.__all__) + +from .queue import ( + Queue, + PriorityQueue +) +__all__.extend(queue.__all__) + +from .disjoint_set import ( + DisjointSetForest, +) +__all__.extend(disjoint_set.__all__) + +from .sparse_table import ( + SparseTable, +) +__all__.extend(sparse_table.__all__) + +from .segment_tree import ( + ArraySegmentTree, +) +__all__.extend(segment_tree.__all__) + +from .algorithms import ( + RangeQueryStatic, + RangeQueryDynamic +) +__all__.extend(algorithms.__all__) + +from .multiset import ( + Multiset +) +__all__.extend(multiset.__all__) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/_backend/__init__.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/_backend/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/algorithms.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/algorithms.py new file mode 100644 index 000000000..3c2f86516 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/algorithms.py @@ -0,0 +1,335 @@ +from pydatastructs.miscellaneous_data_structures.sparse_table import SparseTable +from pydatastructs.miscellaneous_data_structures.segment_tree import ArraySegmentTree +from pydatastructs.utils.misc_util import ( + _check_range_query_inputs, Backend, + raise_if_backend_is_not_python) + +__all__ = [ + 'RangeQueryStatic', + 'RangeQueryDynamic' +] + + +class RangeQueryStatic: + """ + Produces results for range queries of different kinds + by using specified data structure. + + Parameters + ========== + + array: OneDimensionalArray + The array for which we need to answer queries. + All the elements should be of type `int`. + func: callable + The function to be used for generating results + of a query. It should accept only one tuple as an + argument. The size of the tuple will be either 1 or 2 + and any one of the elements can be `None`. You can treat + `None` in whatever way you want according to the query + you are performing. For example, in case of range minimum + queries, `None` can be treated as infinity. We provide + the following which can be used as an argument value for this + parameter, + + `minimum` - For range minimum queries. + + `greatest_common_divisor` - For queries finding greatest + common divisor of a range. + + `summation` - For range sum queries. + data_structure: str + The data structure to be used for performing + range queries. + Currently the following data structures are supported, + + 'array' -> Array data structure. + Each query takes O(end - start) time asymptotically. + + 'sparse_table' -> Sparse table data structure. + Each query takes O(log(end - start)) time + asymptotically. + + By default, 'sparse_table'. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, RangeQueryStatic + >>> from pydatastructs import minimum + >>> arr = OneDimensionalArray(int, [4, 6, 1, 5, 7, 3]) + >>> RMQ = RangeQueryStatic(arr, minimum) + >>> RMQ.query(3, 4) + 5 + >>> RMQ.query(0, 4) + 1 + >>> RMQ.query(0, 2) + 1 + + Note + ==== + + The array once passed as an input should not be modified + once the `RangeQueryStatic` constructor is called. If you + have updated the array, then you need to create a new + `RangeQueryStatic` object with this updated array. + """ + + def __new__(cls, array, func, data_structure='sparse_table', **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + if len(array) == 0: + raise ValueError("Input %s array is empty."%(array)) + + if data_structure == 'array': + return RangeQueryStaticArray(array, func) + elif data_structure == 'sparse_table': + return RangeQueryStaticSparseTable(array, func) + else: + raise NotImplementedError( + "Currently %s data structure for range " + "query without updates isn't implemented yet." + % (data_structure)) + + @classmethod + def methods(cls): + return ['query'] + + def query(start, end): + """ + Method to perform a query in [start, end) range. + + Parameters + ========== + + start: int + The starting index of the range. + end: int + The ending index of the range. + """ + raise NotImplementedError( + "This is an abstract method.") + + +class RangeQueryStaticSparseTable(RangeQueryStatic): + + __slots__ = ["sparse_table", "bounds"] + + def __new__(cls, array, func, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + sparse_table = SparseTable(array, func) + obj.bounds = (0, len(array)) + obj.sparse_table = sparse_table + return obj + + @classmethod + def methods(cls): + return ['query'] + + def query(self, start, end): + _check_range_query_inputs((start, end + 1), self.bounds) + return self.sparse_table.query(start, end) + + +class RangeQueryStaticArray(RangeQueryStatic): + + __slots__ = ["array", "func"] + + def __new__(cls, array, func): + obj = object.__new__(cls) + obj.array = array + obj.func = func + return obj + + @classmethod + def methods(cls): + return ['query'] + + def query(self, start, end): + _check_range_query_inputs((start, end + 1), (0, len(self.array))) + + rsize = end - start + 1 + + if rsize == 1: + return self.func((self.array[start],)) + + query_ans = self.func((self.array[start], self.array[start + 1])) + for i in range(start + 2, end + 1): + query_ans = self.func((query_ans, self.array[i])) + return query_ans + +class RangeQueryDynamic: + """ + Produces results for range queries of different kinds + while allowing point updates by using specified + data structure. + + Parameters + ========== + + array: OneDimensionalArray + The array for which we need to answer queries. + All the elements should be of type `int`. + func: callable + The function to be used for generating results + of a query. It should accept only one tuple as an + argument. The size of the tuple will be either 1 or 2 + and any one of the elements can be `None`. You can treat + `None` in whatever way you want according to the query + you are performing. For example, in case of range minimum + queries, `None` can be treated as infinity. We provide + the following which can be used as an argument value for this + parameter, + + `minimum` - For range minimum queries. + + `greatest_common_divisor` - For queries finding greatest + common divisor of a range. + + `summation` - For range sum queries. + data_structure: str + The data structure to be used for performing + range queries. + Currently the following data structures are supported, + + 'array' -> Array data structure. + Each query takes O(end - start) time asymptotically. + Each point update takes O(1) time asymptotically. + + 'segment_tree' -> Segment tree data structure. + Each query takes O(log(end - start)) time + asymptotically. + Each point update takes O(log(len(array))) time + asymptotically. + + By default, 'segment_tree'. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalArray, RangeQueryDynamic + >>> from pydatastructs import minimum + >>> arr = OneDimensionalArray(int, [4, 6, 1, 5, 7, 3]) + >>> RMQ = RangeQueryDynamic(arr, minimum) + >>> RMQ.query(3, 4) + 5 + >>> RMQ.query(0, 4) + 1 + >>> RMQ.query(0, 2) + 1 + >>> RMQ.update(2, 0) + >>> RMQ.query(0, 2) + 0 + + Note + ==== + + The array once passed as an input should be modified + only with `RangeQueryDynamic.update` method. + """ + + def __new__(cls, array, func, data_structure='segment_tree', **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + + if len(array) == 0: + raise ValueError("Input %s array is empty."%(array)) + + if data_structure == 'array': + return RangeQueryDynamicArray(array, func, **kwargs) + elif data_structure == 'segment_tree': + return RangeQueryDynamicSegmentTree(array, func, **kwargs) + else: + raise NotImplementedError( + "Currently %s data structure for range " + "query with point updates isn't implemented yet." + % (data_structure)) + + @classmethod + def methods(cls): + return ['query', 'update'] + + def query(start, end): + """ + Method to perform a query in [start, end) range. + + Parameters + ========== + + start: int + The starting index of the range. + end: int + The ending index of the range. + """ + raise NotImplementedError( + "This is an abstract method.") + + def update(self, index, value): + """ + Method to update index with a new value. + + Parameters + ========== + + index: int + The index to be update. + value: int + The new value. + """ + raise NotImplementedError( + "This is an abstract method.") + +class RangeQueryDynamicArray(RangeQueryDynamic): + + __slots__ = ["range_query_static"] + + def __new__(cls, array, func, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.range_query_static = RangeQueryStaticArray(array, func) + return obj + + @classmethod + def methods(cls): + return ['query', 'update'] + + def query(self, start, end): + return self.range_query_static.query(start, end) + + def update(self, index, value): + self.range_query_static.array[index] = value + +class RangeQueryDynamicSegmentTree(RangeQueryDynamic): + + __slots__ = ["segment_tree", "bounds"] + + def __new__(cls, array, func, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.pop('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.segment_tree = ArraySegmentTree(array, func, dimensions=1) + obj.segment_tree.build() + obj.bounds = (0, len(array)) + return obj + + @classmethod + def methods(cls): + return ['query', 'update'] + + def query(self, start, end): + _check_range_query_inputs((start, end + 1), self.bounds) + return self.segment_tree.query(start, end) + + def update(self, index, value): + self.segment_tree.update(index, value) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/binomial_trees.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/binomial_trees.py new file mode 100644 index 000000000..9ea91d828 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/binomial_trees.py @@ -0,0 +1,91 @@ +from pydatastructs.utils.misc_util import ( + BinomialTreeNode, _check_type, Backend, + raise_if_backend_is_not_python) + +__all__ = [ + 'BinomialTree' +] + +class BinomialTree(object): + """ + Represents binomial trees + + Parameters + ========== + + root: BinomialTreeNode + The root of the binomial tree. + By default, None + order: int + The order of the binomial tree. + By default, None + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import BinomialTree, BinomialTreeNode + >>> root = BinomialTreeNode(1, 1) + >>> tree = BinomialTree(root, 0) + >>> tree.is_empty + False + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Binomial_heap + """ + __slots__ = ['root', 'order'] + + def __new__(cls, root=None, order=None, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + if root is not None and \ + not _check_type(root, BinomialTreeNode): + raise TypeError("%s i.e., root should be of " + "type BinomialTreeNode."%(root)) + if order is not None and not _check_type(order, int): + raise TypeError("%s i.e., order should be of " + "type int."%(order)) + obj = object.__new__(cls) + if root is not None: + root.is_root = True + obj.root = root + obj.order = order + return obj + + @classmethod + def methods(cls): + return ['add_sub_tree', '__new__', 'is_empty'] + + def add_sub_tree(self, other_tree): + """ + Adds a sub tree to current tree. + + Parameters + ========== + + other_tree: BinomialTree + + Raises + ====== + + ValueError: If order of the two trees + are different. + """ + if not _check_type(other_tree, BinomialTree): + raise TypeError("%s i.e., other_tree should be of " + "type BinomialTree"%(other_tree)) + if self.order != other_tree.order: + raise ValueError("Orders of both the trees should be same.") + self.root.children.append(other_tree.root) + other_tree.root.parent = self.root + other_tree.root.is_root = False + self.order += 1 + + @property + def is_empty(self): + return self.root is None diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/disjoint_set.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/disjoint_set.py new file mode 100644 index 000000000..9a5caef5b --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/disjoint_set.py @@ -0,0 +1,143 @@ +from pydatastructs.utils import Set +from pydatastructs.utils.misc_util import ( + Backend, raise_if_backend_is_not_python) + +__all__ = ['DisjointSetForest'] + +class DisjointSetForest(object): + """ + Represents a forest of disjoint set trees. + + Parameters + ========== + + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import DisjointSetForest + >>> dst = DisjointSetForest() + >>> dst.make_set(1) + >>> dst.make_set(2) + >>> dst.union(1, 2) + >>> dst.find_root(2).key + 1 + >>> dst.make_root(2) + >>> dst.find_root(2).key + 2 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Disjoint-set_data_structure + """ + + __slots__ = ['tree'] + + def __new__(cls, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.tree = dict() + return obj + + @classmethod + def methods(cls): + return ['make_set', '__new__', 'find_root', 'union'] + + def make_set(self, key, data=None): + """ + Adds a singleton set to the tree + of disjoint sets with given key + and optionally data. + """ + if self.tree.get(key, None) is None: + new_set = Set(key, data) + self.tree[key] = new_set + new_set.parent = new_set + new_set.size = 1 + + def find_root(self, key): + """ + Finds the root of the set + with the given key by path + splitting algorithm. + """ + if self.tree.get(key, None) is None: + raise KeyError("Invalid key, %s"%(key)) + _set = self.tree[key] + while _set.parent is not _set: + _set, _set.parent = _set.parent, _set.parent.parent + return _set + + def union(self, key1, key2): + """ + Takes the union of the two + disjoint set trees with given + keys. The union is done by size. + """ + x_root = self.find_root(key1) + y_root = self.find_root(key2) + + if x_root is not y_root: + if x_root.size < y_root.size: + x_root, y_root = y_root, x_root + + y_root.parent = x_root + x_root.size += y_root.size + + def make_root(self, key): + """ + Finds the set to which the key belongs + and makes it as the root of the set. + """ + if self.tree.get(key, None) is None: + raise KeyError("Invalid key, %s"%(key)) + + key_set = self.tree[key] + if key_set.parent is not key_set: + current_parent = key_set.parent + # Remove this key subtree size from all its ancestors + while current_parent.parent is not current_parent: + current_parent.size -= key_set.size + current_parent = current_parent.parent + + all_set_size = current_parent.size # This is the root node + current_parent.size -= key_set.size + + # Make parent of current root as key + current_parent.parent = key_set + # size of new root will be same as previous root's size + key_set.size = all_set_size + # Make parent of key as itself + key_set.parent = key_set + + def find_size(self, key): + """ + Finds the size of set to which the key belongs. + """ + if self.tree.get(key, None) is None: + raise KeyError("Invalid key, %s"%(key)) + + return self.find_root(key).size + + def disjoint_sets(self): + """ + Returns a list of disjoint sets in the data structure. + """ + result = dict() + for key in self.tree.keys(): + parent = self.find_root(key).key + members = result.get(parent, []) + members.append(key) + result[parent] = members + sorted_groups = [] + for v in result.values(): + sorted_groups.append(v) + sorted_groups[-1].sort() + sorted_groups.sort() + return sorted_groups diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/multiset.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/multiset.py new file mode 100644 index 000000000..397978224 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/multiset.py @@ -0,0 +1,42 @@ +__all__ = [ + 'Multiset' +] + + +class Multiset: + def __init__(self, *args): + # TODO: Implement dict in pydatastructs + self.counter = dict() + from pydatastructs.trees import RedBlackTree + self.tree = RedBlackTree() + self._n = 0 + for arg in args: + self.add(arg) + + def add(self, element): + self.counter[element] = self.counter.get(element, 0) + 1 + self._n += 1 + if self.counter[element] == 1: + self.tree.insert(element) + + def remove(self, element): + if self.counter[element] == 1: + self.tree.delete(element) + if self.counter.get(element, 0) > 0: + self._n -= 1 + self.counter[element] -= 1 + + def lower_bound(self, element): + return self.tree.lower_bound(element) + + def upper_bound(self, element): + return self.tree.upper_bound(element) + + def __contains__(self, element): + return self.counter.get(element, 0) > 0 + + def __len__(self): + return self._n + + def count(self, element): + return self.counter.get(element, 0) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/queue.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/queue.py new file mode 100644 index 000000000..033ef9af3 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/queue.py @@ -0,0 +1,498 @@ +from pydatastructs.linear_data_structures import DynamicOneDimensionalArray, SinglyLinkedList +from pydatastructs.utils.misc_util import ( + NoneType, Backend, raise_if_backend_is_not_python) +from pydatastructs.trees.heaps import BinaryHeap, BinomialHeap +from copy import deepcopy as dc + +__all__ = [ + 'Queue', + 'PriorityQueue' +] + +class Queue(object): + """Representation of queue data structure. + + Parameters + ========== + + implementation : str + Implementation to be used for queue. + By default, 'array' + items : list/tuple + Optional, by default, None + The inital items in the queue. + dtype : A valid python type + Optional, by default NoneType if item + is None. + Required only for 'array' implementation. + double_ended : bool + Optional, by default, False. + Set to True if the queue should support + additional, appendleft and pop operations + from left and right sides respectively. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import Queue + >>> q = Queue() + >>> q.append(1) + >>> q.append(2) + >>> q.append(3) + >>> q.popleft() + 1 + >>> len(q) + 2 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Queue_(abstract_data_type) + """ + + def __new__(cls, implementation='array', **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + if implementation == 'array': + return ArrayQueue( + kwargs.get('items', None), + kwargs.get('dtype', int), + kwargs.get('double_ended', False)) + elif implementation == 'linked_list': + return LinkedListQueue( + kwargs.get('items', None), + kwargs.get('double_ended', False) + ) + else: + raise NotImplementedError( + "%s hasn't been implemented yet."%(implementation)) + + @classmethod + def methods(cls): + return ['__new__'] + + def _double_ended_check(self): + if not self._double_ended: + raise NotImplementedError( + "This method is only supported for " + "double ended queues.") + + def append(self, *args, **kwargs): + raise NotImplementedError( + "This is an abstract method.") + + def appendleft(self, *args, **kwargs): + raise NotImplementedError( + "This is an abstract method.") + + def pop(self, *args, **kwargs): + raise NotImplementedError( + "This is an abstract method.") + + def popleft(self, *args, **kwargs): + raise NotImplementedError( + "This is an abstract method.") + + @property + def is_empty(self): + raise NotImplementedError( + "This is an abstract method.") + + +class ArrayQueue(Queue): + + __slots__ = ['_front', '_rear', '_double_ended'] + + def __new__(cls, items=None, dtype=NoneType, double_ended=False, + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + if items is None: + items = DynamicOneDimensionalArray(dtype, 0) + else: + dtype = type(items[0]) + items = DynamicOneDimensionalArray(dtype, items) + obj = object.__new__(cls) + obj.items, obj._front = items, -1 + if items.size == 0: + obj._front = -1 + obj._rear = -1 + else: + obj._front = 0 + obj._rear = items._num - 1 + obj._double_ended = double_ended + return obj + + @classmethod + def methods(cls): + return ['__new__', 'append', 'appendleft', 'popleft', + 'pop', 'is_empty', '__len__', '__str__', 'front', + 'rear'] + + def append(self, x): + if self.is_empty: + self._front = 0 + self.items._dtype = type(x) + self.items.append(x) + self._rear += 1 + + def appendleft(self, x): + self._double_ended_check() + temp = [] + if self.is_empty: + self._front = 0 + self._rear = -1 + self.items._dtype = type(x) + temp.append(x) + for i in range(self._front, self._rear + 1): + temp.append(self.items._data[i]) + self.items = DynamicOneDimensionalArray(type(temp[0]), temp) + self._rear += 1 + + def popleft(self): + if self.is_empty: + raise IndexError("Queue is empty.") + return_value = dc(self.items[self._front]) + front_temp = self._front + if self._front == self._rear: + self._front = -1 + self._rear = -1 + else: + if (self.items._num - 1)/self.items._size < \ + self.items._load_factor: + self._front = 0 + else: + self._front += 1 + self.items.delete(front_temp) + return return_value + + def pop(self): + self._double_ended_check() + if self.is_empty: + raise IndexError("Queue is empty.") + + return_value = dc(self.items[self._rear]) + rear_temp = self._rear + if self._front == self._rear: + self._front = -1 + self._rear = -1 + else: + if (self.items._num - 1)/self.items._size < \ + self.items._load_factor: + self._front = 0 + else: + self._rear -= 1 + self.items.delete(rear_temp) + return return_value + + @property + def front(self): + return self._front + + @property + def rear(self): + return self._rear + + @property + def is_empty(self): + return self.__len__() == 0 + + def __len__(self): + return self.items._num + + def __str__(self): + _data = [] + for i in range(self._front, self._rear + 1): + _data.append(self.items._data[i]) + return str(_data) + +class LinkedListQueue(Queue): + + __slots__ = ['queue', '_double_ended'] + + def __new__(cls, items=None, double_ended=False, + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.queue = SinglyLinkedList() + if items is None: + pass + elif type(items) in (list, tuple): + for x in items: + obj.append(x) + else: + raise TypeError("Expected type: list/tuple") + obj._double_ended = double_ended + return obj + + @classmethod + def methods(cls): + return ['__new__', 'append', 'appendleft', 'pop', 'popleft', + 'is_empty', '__len__', '__str__', 'front', 'rear'] + + def append(self, x): + self.queue.append(x) + + def appendleft(self, x): + self._double_ended_check() + if self._double_ended: + self.queue.appendleft(x) + + def pop(self): + self._double_ended_check() + if self.is_empty: + raise IndexError("Queue is empty.") + return_value = self.queue.popright() + return return_value + + def popleft(self): + if self.is_empty: + raise IndexError("Queue is empty.") + return_value = self.queue.popleft() + return return_value + + @property + def is_empty(self): + return self.__len__() == 0 + + @property + def front(self): + return self.queue.head + + @property + def rear(self): + return self.queue.tail + + def __len__(self): + return self.queue.size + + def __str__(self): + return str(self.queue) + +class PriorityQueue(object): + """ + Represents the concept of priority queue. + + Parameters + ========== + + implementation: str + The implementation which is to be + used for supporting operations + of priority queue. + The following implementations are supported, + + 'linked_list' -> Linked list implementation. + + 'binary_heap' -> Binary heap implementation. + + 'binomial_heap' -> Binomial heap implementation. + Doesn't support custom comparators, minimum + key data is extracted in every pop. + + Optional, by default, 'binary_heap' implementation + is used. + comp: function + The comparator to be used while comparing priorities. + Must return a bool object. + By default, `lambda u, v: u < v` is used to compare + priorities i.e., minimum priority elements are extracted + by pop operation. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import PriorityQueue + >>> pq = PriorityQueue() + >>> pq.push(1, 2) + >>> pq.push(2, 3) + >>> pq.pop() + 1 + >>> pq2 = PriorityQueue(comp=lambda u, v: u > v) + >>> pq2.push(1, 2) + >>> pq2.push(2, 3) + >>> pq2.pop() + 2 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Priority_queue + """ + + def __new__(cls, implementation='binary_heap', **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + comp = kwargs.get("comp", lambda u, v: u < v) + if implementation == 'linked_list': + return LinkedListPriorityQueue(comp) + elif implementation == 'binary_heap': + return BinaryHeapPriorityQueue(comp) + elif implementation == 'binomial_heap': + return BinomialHeapPriorityQueue() + else: + raise NotImplementedError( + "%s implementation is not currently supported " + "by priority queue.") + + @classmethod + def methods(cls): + return ['__new__'] + + def push(self, value, priority): + """ + Pushes the value to the priority queue + according to the given priority. + + value + Value to be pushed. + priority + Priority to be given to the value. + """ + raise NotImplementedError( + "This is an abstract method.") + + def pop(self): + """ + Pops out the value from the priority queue. + """ + raise NotImplementedError( + "This is an abstract method.") + + @property + def peek(self): + """ + Returns the pointer to the value which will be + popped out by `pop` method. + """ + raise NotImplementedError( + "This is an abstract method.") + + @property + def is_empty(self): + """ + Checks if the priority queue is empty. + """ + raise NotImplementedError( + "This is an abstract method.") + +class LinkedListPriorityQueue(PriorityQueue): + + __slots__ = ['items', 'comp'] + + @classmethod + def methods(cls): + return ['__new__', 'push', 'pop', 'peek', 'is_empty'] + + def __new__(cls, comp, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.items = SinglyLinkedList() + obj.comp = comp + return obj + + def push(self, value, priority): + self.items.append(priority, value) + + def pop(self): + _, max_i = self._find_peek(return_index=True) + pop_val = self.items.extract(max_i) + return pop_val.data + + def _find_peek(self, return_index=False): + if self.is_empty: + raise IndexError("Priority queue is empty.") + + walk = self.items.head + i, max_i, max_p = 0, 0, walk + while walk is not None: + if self.comp(walk.key, max_p.key): + max_i = i + max_p = walk + i += 1 + walk = walk.next + if return_index: + return max_p, max_i + return max_p + + @property + def peek(self): + return self._find_peek() + + @property + def is_empty(self): + return self.items.size == 0 + +class BinaryHeapPriorityQueue(PriorityQueue): + + __slots__ = ['items'] + + @classmethod + def methods(cls): + return ['__new__', 'push', 'pop', 'peek', 'is_empty'] + + def __new__(cls, comp, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.items = BinaryHeap() + obj.items._comp = comp + return obj + + def push(self, value, priority): + self.items.insert(priority, value) + + def pop(self): + node = self.items.extract() + return node.data + + @property + def peek(self): + if self.items.is_empty: + raise IndexError("Priority queue is empty.") + return self.items.heap[0] + + @property + def is_empty(self): + return self.items.is_empty + +class BinomialHeapPriorityQueue(PriorityQueue): + + __slots__ = ['items'] + + @classmethod + def methods(cls): + return ['__new__', 'push', 'pop', 'peek', 'is_empty'] + + def __new__(cls, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.items = BinomialHeap() + return obj + + def push(self, value, priority): + self.items.insert(priority, value) + + def pop(self): + node = self.items.find_minimum() + self.items.delete_minimum() + return node.data + + @property + def peek(self): + return self.items.find_minimum() + + @property + def is_empty(self): + return self.items.is_empty diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/segment_tree.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/segment_tree.py new file mode 100644 index 000000000..0895ba6da --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/segment_tree.py @@ -0,0 +1,225 @@ +from .stack import Stack +from pydatastructs.utils.misc_util import (TreeNode, + Backend, raise_if_backend_is_not_python) + +__all__ = ['ArraySegmentTree'] + +class ArraySegmentTree(object): + """ + Represents the segment tree data structure, + defined on arrays. + + Parameters + ========== + + array: Array + The array to be used for filling the segment tree. + func: callable + The function to be used for filling the segment tree. + It should accept only one tuple as an argument. The + size of the tuple will be either 1 or 2 and any one + of the elements can be `None`. You can treat `None` in + whatever way you want. For example, in case of minimum + values, `None` can be treated as infinity. We provide + the following which can be used as an argument value for this + parameter, + + `minimum` - For range minimum queries. + + `greatest_common_divisor` - For queries finding greatest + common divisor of a range. + + `summation` - For range sum queries. + dimensions: int + The number of dimensions of the array to be used + for the segment tree. + Optional, by default 1. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import ArraySegmentTree, minimum + >>> from pydatastructs import OneDimensionalArray + >>> arr = OneDimensionalArray(int, [1, 2, 3, 4, 5]) + >>> s_t = ArraySegmentTree(arr, minimum) + >>> s_t.build() + >>> s_t.query(0, 1) + 1 + >>> s_t.query(1, 3) + 2 + >>> s_t.update(2, -1) + >>> s_t.query(1, 3) + -1 + >>> arr = OneDimensionalArray(int, [1, 2]) + >>> s_t = ArraySegmentTree(arr, minimum) + >>> s_t.build() + >>> str(s_t) + "['((0, 1), 1)', '((0, 0), 1)', '', '', '((1, 1), 2)', '', '']" + + References + ========== + + .. [1] https://cp-algorithms.com/data_structures/segment_tree.html + """ + def __new__(cls, array, func, **kwargs): + + dimensions = kwargs.pop("dimensions", 1) + if dimensions == 1: + return OneDimensionalArraySegmentTree(array, func, **kwargs) + else: + raise NotImplementedError("ArraySegmentTree do not support " + "{}-dimensional arrays as of now.".format(dimensions)) + + def build(self): + """ + Generates segment tree nodes when called. + Nothing happens if nodes are already generated. + """ + raise NotImplementedError( + "This is an abstract method.") + + def update(self, index, value): + """ + Updates the value at given index. + """ + raise NotImplementedError( + "This is an abstract method.") + + def query(self, start, end): + """ + Queries [start, end] range according + to the function provided while constructing + `ArraySegmentTree` object. + """ + raise NotImplementedError( + "This is an abstract method.") + + def __str__(self): + recursion_stack = Stack(implementation='linked_list') + recursion_stack.push(self._root) + to_be_printed = [] + while not recursion_stack.is_empty: + node = recursion_stack.pop().key + if node is not None: + to_be_printed.append(str((node.key, node.data))) + else: + to_be_printed.append('') + if node is not None: + recursion_stack.push(node.right) + recursion_stack.push(node.left) + return str(to_be_printed) + + +class OneDimensionalArraySegmentTree(ArraySegmentTree): + + __slots__ = ["_func", "_array", "_root", "_backend"] + + def __new__(cls, array, func, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + raise_if_backend_is_not_python(cls, backend) + + obj = object.__new__(cls) + obj._func = func + obj._array = array + obj._root = None + obj._backend = backend + return obj + + @classmethod + def methods(self): + return ['__new__', 'build', 'update', + 'query'] + + @property + def is_ready(self): + return self._root is not None + + def build(self): + if self.is_ready: + return + + recursion_stack = Stack(implementation='linked_list') + node = TreeNode((0, len(self._array) - 1), None, backend=self._backend) + node.is_root = True + self._root = node + recursion_stack.push(node) + + while not recursion_stack.is_empty: + node = recursion_stack.peek.key + start, end = node.key + if start == end: + node.data = self._array[start] + recursion_stack.pop() + continue + + if (node.left is not None and + node.right is not None): + recursion_stack.pop() + node.data = self._func((node.left.data, node.right.data)) + else: + mid = (start + end) // 2 + if node.left is None: + left_node = TreeNode((start, mid), None) + node.left = left_node + recursion_stack.push(left_node) + if node.right is None: + right_node = TreeNode((mid + 1, end), None) + node.right = right_node + recursion_stack.push(right_node) + + def update(self, index, value): + if not self.is_ready: + raise ValueError("{} tree is not built yet. ".format(self) + + "Call .build method to prepare the segment tree.") + + recursion_stack = Stack(implementation='linked_list') + recursion_stack.push((self._root, None)) + + while not recursion_stack.is_empty: + node, child = recursion_stack.peek.key + start, end = node.key + if start == end: + self._array[index] = value + node.data = value + recursion_stack.pop() + if not recursion_stack.is_empty: + parent_node = recursion_stack.pop() + recursion_stack.push((parent_node.key[0], node)) + continue + + if child is not None: + node.data = self._func((node.left.data, node.right.data)) + recursion_stack.pop() + if not recursion_stack.is_empty: + parent_node = recursion_stack.pop() + recursion_stack.push((parent_node.key[0], node)) + else: + mid = (start + end) // 2 + if start <= index and index <= mid: + recursion_stack.push((node.left, None)) + else: + recursion_stack.push((node.right, None)) + + def _query(self, node, start, end, l, r): + if r < start or end < l: + return None + + if l <= start and end <= r: + return node.data + + mid = (start + end) // 2 + left_result = self._query(node.left, start, mid, l, r) + right_result = self._query(node.right, mid + 1, end, l, r) + return self._func((left_result, right_result)) + + def query(self, start, end): + if not self.is_ready: + raise ValueError("{} tree is not built yet. ".format(self) + + "Call .build method to prepare the segment tree.") + + return self._query(self._root, 0, len(self._array) - 1, + start, end) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/sparse_table.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/sparse_table.py new file mode 100644 index 000000000..55ec4e9b3 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/sparse_table.py @@ -0,0 +1,108 @@ +from pydatastructs.linear_data_structures.arrays import OneDimensionalArray +from pydatastructs.utils.misc_util import ( + Backend, raise_if_backend_is_not_python) +import math + +__all__ = ['SparseTable'] + + +class SparseTable(object): + """ + Represents the sparse table data structure. + + Parameters + ========== + + array: OneDimensionalArray + The array to be used for filling the sparse table. + func: callable + The function to be used for filling the sparse table. + It should accept only one tuple as an argument. The + size of the tuple will be either 1 or 2 and any one + of the elements can be `None`. You can treat `None` in + whatever way you want. For example, in case of minimum + values, `None` can be treated as infinity. We provide + the following which can be used as an argument value for this + parameter, + + `minimum` - For range minimum queries. + + `greatest_common_divisor` - For queries finding greatest + common divisor of a range. + + `summation` - For range sum queries. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import SparseTable, minimum + >>> from pydatastructs import OneDimensionalArray + >>> arr = OneDimensionalArray(int, [1, 2, 3, 4, 5]) + >>> s_t = SparseTable(arr, minimum) + >>> str(s_t) + "['[1, 1, 1]', '[2, 2, 2]', '[3, 3, None]', '[4, 4, None]', '[5, None, None]']" + + References + ========== + + .. [1] https://cp-algorithms.com/data_structures/sparse-table.html + """ + + __slots__ = ['_table', 'func'] + + def __new__(cls, array, func, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + + # TODO: If possible remove the following check. + if len(array) == 0: + raise ValueError("Input %s array is empty."%(array)) + + obj = object.__new__(cls) + size = len(array) + log_size = int(math.log2(size)) + 1 + obj._table = [OneDimensionalArray(int, log_size) for _ in range(size)] + obj.func = func + + for i in range(size): + obj._table[i][0] = func((array[i],)) + + for j in range(1, log_size + 1): + for i in range(size - (1 << j) + 1): + obj._table[i][j] = func((obj._table[i][j - 1], + obj._table[i + (1 << (j - 1))][j - 1])) + + return obj + + @classmethod + def methods(cls): + return ['query', '__str__'] + + def query(self, start, end): + """ + Method to perform a query on sparse table in [start, end) + range. + + Parameters + ========== + + start: int + The starting index of the range. + end: int + The ending index of the range. + """ + j = int(math.log2(end - start + 1)) + 1 + answer = None + while j >= 0: + if start + (1 << j) - 1 <= end: + answer = self.func((answer, self._table[start][j])) + start += 1 << j + j -= 1 + return answer + + def __str__(self): + return str([str(array) for array in self._table]) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/stack.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/stack.py new file mode 100644 index 000000000..38f72b43f --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/stack.py @@ -0,0 +1,200 @@ +from pydatastructs.linear_data_structures import DynamicOneDimensionalArray, SinglyLinkedList +from pydatastructs.miscellaneous_data_structures._backend.cpp import _stack +from pydatastructs.utils.misc_util import ( + _check_type, NoneType, Backend, + raise_if_backend_is_not_python) +from copy import deepcopy as dc + +__all__ = [ + 'Stack' +] + +class Stack(object): + """Representation of stack data structure + + Parameters + ========== + + implementation : str + Implementation to be used for stack. + By default, 'array' + Currently only supports 'array' + implementation. + items : list/tuple + Optional, by default, None + The inital items in the stack. + For array implementation. + dtype : A valid python type + Optional, by default NoneType if item + is None, otherwise takes the data + type of DynamicOneDimensionalArray + For array implementation. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import Stack + >>> s = Stack() + >>> s.push(1) + >>> s.push(2) + >>> s.push(3) + >>> str(s) + '[1, 2, 3]' + >>> s.pop() + 3 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Stack_(abstract_data_type) + """ + + def __new__(cls, implementation='array', **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if implementation == 'array': + items = kwargs.get('items', None) + dtype = kwargs.get('dtype', int) + if backend == Backend.CPP: + return _stack.ArrayStack(items, dtype) + + return ArrayStack(items, dtype) + if implementation == 'linked_list': + raise_if_backend_is_not_python(cls, backend) + + return LinkedListStack( + kwargs.get('items', None) + ) + raise NotImplementedError( + "%s hasn't been implemented yet."%(implementation)) + + @classmethod + def methods(cls): + return ['__new__'] + + def push(self, *args, **kwargs): + raise NotImplementedError( + "This is an abstract method.") + + def pop(self, *args, **kwargs): + raise NotImplementedError( + "This is an abstract method.") + + @property + def is_empty(self): + raise NotImplementedError( + "This is an abstract method.") + + @property + def peek(self): + raise NotImplementedError( + "This is an abstract method.") + +class ArrayStack(Stack): + + __slots__ = ['items'] + + def __new__(cls, items=None, dtype=NoneType, + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + if items is None: + items = DynamicOneDimensionalArray(dtype, 0) + else: + items = DynamicOneDimensionalArray(dtype, items) + obj = object.__new__(cls) + obj.items = items + return obj + + @classmethod + def methods(cls): + return ['__new__', 'push', 'pop', 'is_emtpy', + 'peek', '__len__', '__str__'] + + def push(self, x): + if self.is_empty: + self.items._dtype = type(x) + self.items.append(x) + + def pop(self): + if self.is_empty: + raise IndexError("Stack is empty") + + top_element = dc(self.items[self.items._last_pos_filled]) + self.items.delete(self.items._last_pos_filled) + return top_element + + @property + def is_empty(self): + return self.items._last_pos_filled == -1 + + @property + def peek(self): + return self.items[self.items._last_pos_filled] + + def __len__(self): + return self.items._num + + def __str__(self): + """ + Used for printing. + """ + return str(self.items._data) + + +class LinkedListStack(Stack): + + __slots__ = ['stack'] + + def __new__(cls, items=None, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.stack = SinglyLinkedList() + if items is None: + pass + elif type(items) in (list, tuple): + for x in items: + obj.push(x) + else: + raise TypeError("Expected type: list/tuple") + return obj + + @classmethod + def methods(cls): + return ['__new__', 'push', 'pop', 'is_emtpy', + 'peek', '__len__', '__str__'] + + def push(self, x): + self.stack.appendleft(x) + + def pop(self): + if self.is_empty: + raise IndexError("Stack is empty") + return self.stack.popleft() + + @property + def is_empty(self): + return self.__len__() == 0 + + @property + def peek(self): + return self.stack.head + + @property + def size(self): + return self.stack.size + + def __len__(self): + return self.stack.size + + def __str__(self): + elements = [] + current_node = self.peek + while current_node is not None: + elements.append(str(current_node)) + current_node = current_node.next + return str(elements[::-1]) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/__init__.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_binomial_trees.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_binomial_trees.py new file mode 100644 index 000000000..1275e9aec --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_binomial_trees.py @@ -0,0 +1,17 @@ +from pydatastructs.miscellaneous_data_structures.binomial_trees import BinomialTree +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils.misc_util import BinomialTreeNode + +# only tests the corner cases +def test_BinomialTree(): + assert raises(TypeError, lambda: BinomialTree(1, 1)) + assert raises(TypeError, lambda: BinomialTree(None, 1.5)) + + bt = BinomialTree() + assert raises(TypeError, lambda: bt.add_sub_tree(None)) + bt1 = BinomialTree(BinomialTreeNode(1, 1), 0) + node = BinomialTreeNode(2, 2) + node.add_children(BinomialTreeNode(3, 3)) + bt2 = BinomialTree(node, 1) + assert raises(ValueError, lambda: bt1.add_sub_tree(bt2)) + assert bt1.is_empty is False diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_disjoint_set.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_disjoint_set.py new file mode 100644 index 000000000..fcabd3112 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_disjoint_set.py @@ -0,0 +1,70 @@ +from pydatastructs import DisjointSetForest +from pydatastructs.utils.raises_util import raises + +def test_DisjointSetForest(): + + dst = DisjointSetForest() + for i in range(8): + dst.make_set(i+1) + + dst.union(1, 2) + dst.union(1, 5) + assert dst.find_size(2) == 3 + dst.union(1, 6) + dst.union(1, 8) + dst.union(3, 4) + assert dst.find_size(3) == 2 + + assert (dst.find_root(1) == dst.find_root(2) == + dst.find_root(5) == dst.find_root(6) == dst.find_root(8)) + assert dst.disjoint_sets() == [[1, 2, 5, 6, 8], [3, 4], [7]] + assert dst.find_root(3) == dst.find_root(4) + assert dst.find_root(7).key == 7 + + assert raises(KeyError, lambda: dst.find_root(9)) + assert raises(KeyError, lambda: dst.find_size(9)) + dst.union(3, 1) + assert dst.find_root(3).key == 1 + assert dst.find_root(5).key == 1 + dst.make_root(6) + assert dst.disjoint_sets() == [[1, 2, 3, 4, 5, 6, 8], [7]] + assert dst.find_root(3).key == 6 + assert dst.find_root(5).key == 6 + dst.make_root(5) + assert dst.find_root(1).key == 5 + assert dst.find_root(5).key == 5 + assert raises(KeyError, lambda: dst.make_root(9)) + + dst = DisjointSetForest() + for i in range(6): + dst.make_set(i) + assert dst.tree[2].size == 1 + dst.union(2, 3) + assert dst.tree[2].size == 2 + assert dst.tree[3].size == 1 + dst.union(1, 4) + dst.union(2, 4) + assert dst.disjoint_sets() == [[0], [1, 2, 3, 4], [5]] + # current tree + ############### + # 2 + # / \ + # 1 3 + # / + # 4 + ############### + assert dst.tree[2].size == 4 + assert dst.tree[1].size == 2 + assert dst.tree[3].size == dst.tree[4].size == 1 + dst.make_root(4) + # New tree + ############### + # 4 + # | + # 2 + # / \ + # 1 3 + ############### + assert dst.tree[4].size == 4 + assert dst.tree[2].size == 3 + assert dst.tree[1].size == dst.tree[3].size == 1 diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_multiset.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_multiset.py new file mode 100644 index 000000000..fb412704a --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_multiset.py @@ -0,0 +1,39 @@ +from pydatastructs.miscellaneous_data_structures import Multiset + +def test_Multiset(): + + ms = Multiset() + ms.add(5) + ms.add(5) + ms.add(3) + ms.add(7) + assert len(ms) == 4 + assert 5 in ms + assert ms.count(5) == 2 + assert ms.count(3) == 1 + assert ms.count(-3) == 0 + assert not 4 in ms + ms.remove(5) + assert 5 in ms + assert ms.lower_bound(5) == 5 + assert ms.upper_bound(5) == 7 + + ms = Multiset(5, 3, 7, 2) + + assert len(ms) == 4 + assert 5 in ms + assert ms.count(7) == 1 + assert not 4 in ms + assert ms.lower_bound(3) == 3 + assert ms.upper_bound(3) == 5 + assert ms.upper_bound(7) is None + + ms.remove(5) + + assert len(ms) == 3 + assert not 5 in ms + + ms.add(4) + + assert 4 in ms + assert len(ms) == 4 diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_queue.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_queue.py new file mode 100644 index 000000000..81e1e996e --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_queue.py @@ -0,0 +1,116 @@ +from pydatastructs.miscellaneous_data_structures import Queue +from pydatastructs.miscellaneous_data_structures.queue import ( + ArrayQueue, LinkedListQueue, PriorityQueue, + LinkedListPriorityQueue) +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils.misc_util import _check_type + +def test_Queue(): + q = Queue(implementation='array') + q1 = Queue() + assert _check_type(q, ArrayQueue) is True + assert _check_type(q1, ArrayQueue) is True + q2 = Queue(implementation='linked_list') + assert _check_type(q2, LinkedListQueue) is True + assert raises(NotImplementedError, lambda: Queue(implementation='')) + +def test_ArrayQueue(): + q1 = Queue() + raises(IndexError, lambda: q1.popleft()) + q1 = Queue(implementation='array', items=[0]) + q1.append(1) + q1.append(2) + q1.append(3) + assert str(q1) == '[0, 1, 2, 3]' + assert len(q1) == 4 + assert q1.popleft() == 0 + assert q1.popleft() == 1 + assert len(q1) == 2 + assert q1.popleft() == 2 + assert q1.popleft() == 3 + assert len(q1) == 0 + + q2 = Queue(implementation='array', items=[0], double_ended=True) + q2.append(1) + q2.append(2) + q2.appendleft(3) + assert str(q2) == '[3, 0, 1, 2]' + assert len(q2) == 4 + assert q2.popleft() == 3 + assert q2.pop() == 2 + assert len(q2) == 2 + assert q2.popleft() == 0 + assert q2.pop() == 1 + assert len(q2) == 0 + + q1 = Queue(implementation='array', items=[0]) + assert raises(NotImplementedError, lambda: q1.appendleft(2)) + + +def test_LinkedListQueue(): + q1 = Queue(implementation='linked_list') + q1.append(1) + assert raises(TypeError, lambda: Queue(implementation='linked_list', items={0, 1})) + q1 = Queue(implementation='linked_list', items = [0, 1]) + q1.append(2) + q1.append(3) + assert str(q1) == ("['(0, None)', '(1, None)', " + "'(2, None)', '(3, None)']") + assert len(q1) == 4 + assert q1.popleft().key == 0 + assert q1.popleft().key == 1 + assert len(q1) == 2 + assert q1.popleft().key == 2 + assert q1.popleft().key == 3 + assert len(q1) == 0 + raises(IndexError, lambda: q1.popleft()) + + q1 = Queue(implementation='linked_list',items=['a',None,type,{}]) + assert len(q1) == 4 + + front = q1.front + assert front.key == q1.popleft().key + + rear = q1.rear + for _ in range(len(q1)-1): + q1.popleft() + + assert rear.key == q1.popleft().key + + q1 = Queue(implementation='linked_list', double_ended=True) + q1.appendleft(1) + q2 = Queue(implementation='linked_list', items=[0, 1]) + assert raises(NotImplementedError, lambda: q2.appendleft(1)) + q1 = Queue(implementation='linked_list', items = [0, 1], double_ended=True) + q1.appendleft(2) + q1.append(3) + assert str(q1) == "['(2, None)', '(0, None)', '(1, None)', '(3, None)']" + assert len(q1) == 4 + assert q1.popleft().key == 2 + assert q1.pop().key == 3 + assert len(q1) == 2 + assert q1.pop().key == 1 + assert q1.popleft().key == 0 + assert len(q1) == 0 + assert raises(IndexError, lambda: q1.popleft()) + +def test_PriorityQueue(): + pq1 = PriorityQueue(implementation='linked_list') + assert _check_type(pq1, LinkedListPriorityQueue) is True + assert raises(NotImplementedError, lambda: Queue(implementation='')) + +def test_ImplementationPriorityQueue(): + impls = ['linked_list', 'binomial_heap', 'binary_heap'] + for impl in impls: + pq1 = PriorityQueue(implementation=impl) + pq1.push(1, 4) + pq1.push(2, 3) + pq1.push(3, 2) + assert pq1.peek.data == 3 + assert pq1.pop() == 3 + assert pq1.peek.data == 2 + assert pq1.pop() == 2 + assert pq1.peek.data == 1 + assert pq1.pop() == 1 + assert pq1.is_empty is True + assert raises(IndexError, lambda: pq1.peek) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_dynamic.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_dynamic.py new file mode 100644 index 000000000..f655c546d --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_dynamic.py @@ -0,0 +1,71 @@ +from pydatastructs import ( + RangeQueryDynamic, minimum, + greatest_common_divisor, summation, + OneDimensionalArray) +from pydatastructs.utils.raises_util import raises +import random, math +from copy import deepcopy + +def _test_RangeQueryDynamic_common(func, gen_expected): + + array = OneDimensionalArray(int, []) + raises(ValueError, lambda: RangeQueryDynamic(array, func)) + + array = OneDimensionalArray(int, [1]) + rq = RangeQueryDynamic(array, func) + assert rq.query(0, 0) == 1 + raises(ValueError, lambda: rq.query(0, -1)) + raises(IndexError, lambda: rq.query(0, 1)) + + array_sizes = [3, 6, 12, 24, 48, 96] + random.seed(0) + for array_size in array_sizes: + inputs = [] + for i in range(array_size): + for j in range(i + 1, array_size): + inputs.append((i, j)) + + data_structures = ["array", "segment_tree"] + for ds in data_structures: + data = random.sample(range(-2*array_size, 2*array_size), array_size) + array = OneDimensionalArray(int, data) + rmq = RangeQueryDynamic(array, func, data_structure=ds) + for input in inputs: + assert rmq.query(input[0], input[1]) == gen_expected(data, input[0], input[1]) + + data_copy = deepcopy(data) + for _ in range(array_size//2): + index = random.randint(0, array_size - 1) + value = random.randint(0, 4 * array_size) + data_copy[index] = value + rmq.update(index, value) + + for input in inputs: + assert rmq.query(input[0], input[1]) == gen_expected(data_copy, input[0], input[1]) + +def test_RangeQueryDynamic_minimum(): + + def _gen_minimum_expected(data, i, j): + return min(data[i:j + 1]) + + _test_RangeQueryDynamic_common(minimum, _gen_minimum_expected) + +def test_RangeQueryDynamic_greatest_common_divisor(): + + def _gen_gcd_expected(data, i, j): + if j == i: + return data[i] + else: + expected_gcd = math.gcd(data[i], data[i + 1]) + for idx in range(i + 2, j + 1): + expected_gcd = math.gcd(expected_gcd, data[idx]) + return expected_gcd + + _test_RangeQueryDynamic_common(greatest_common_divisor, _gen_gcd_expected) + +def test_RangeQueryDynamic_summation(): + + def _gen_summation_expected(data, i, j): + return sum(data[i:j + 1]) + + return _test_RangeQueryDynamic_common(summation, _gen_summation_expected) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_static.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_static.py new file mode 100644 index 000000000..e898653c9 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_static.py @@ -0,0 +1,63 @@ +from pydatastructs import ( + RangeQueryStatic, minimum, + greatest_common_divisor, summation, + OneDimensionalArray) +from pydatastructs.utils.raises_util import raises +import random, math + +def _test_RangeQueryStatic_common(func, gen_expected): + + array = OneDimensionalArray(int, []) + raises(ValueError, lambda: RangeQueryStatic(array, func)) + + array = OneDimensionalArray(int, [1]) + rq = RangeQueryStatic(array, func) + assert rq.query(0, 0) == 1 + raises(ValueError, lambda: rq.query(0, -1)) + raises(IndexError, lambda: rq.query(0, 1)) + + array_sizes = [3, 6, 12, 24, 48, 96] + random.seed(0) + for array_size in array_sizes: + data = random.sample(range(-2*array_size, 2*array_size), array_size) + array = OneDimensionalArray(int, data) + + expected = [] + inputs = [] + for i in range(array_size): + for j in range(i + 1, array_size): + inputs.append((i, j)) + expected.append(gen_expected(data, i, j)) + + data_structures = ["array", "sparse_table"] + for ds in data_structures: + rmq = RangeQueryStatic(array, func, data_structure=ds) + for input, correct in zip(inputs, expected): + assert rmq.query(input[0], input[1]) == correct + +def test_RangeQueryStatic_minimum(): + + def _gen_minimum_expected(data, i, j): + return min(data[i:j + 1]) + + _test_RangeQueryStatic_common(minimum, _gen_minimum_expected) + +def test_RangeQueryStatic_greatest_common_divisor(): + + def _gen_gcd_expected(data, i, j): + if j == i: + return data[i] + else: + expected_gcd = math.gcd(data[i], data[i + 1]) + for idx in range(i + 2, j + 1): + expected_gcd = math.gcd(expected_gcd, data[idx]) + return expected_gcd + + _test_RangeQueryStatic_common(greatest_common_divisor, _gen_gcd_expected) + +def test_RangeQueryStatic_summation(): + + def _gen_summation_expected(data, i, j): + return sum(data[i:j + 1]) + + return _test_RangeQueryStatic_common(summation, _gen_summation_expected) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_stack.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_stack.py new file mode 100644 index 000000000..2d9d08b82 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_stack.py @@ -0,0 +1,77 @@ +from pydatastructs.miscellaneous_data_structures import Stack +from pydatastructs.miscellaneous_data_structures.stack import ArrayStack, LinkedListStack +from pydatastructs.miscellaneous_data_structures._backend.cpp import _stack +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils.misc_util import _check_type, Backend + + +def test_Stack(): + s = Stack(implementation='array') + s1 = Stack() + assert _check_type(s, ArrayStack) is True + assert _check_type(s1, ArrayStack) is True + s2 = Stack(implementation='linked_list') + assert _check_type(s2, LinkedListStack) is True + assert raises(NotImplementedError, lambda: Stack(implementation='')) + + s3 = Stack(backend=Backend.CPP) + assert _check_type(s3, _stack.ArrayStack) is True + s4 = Stack(implementation="array", backend=Backend.CPP) + assert _check_type(s4, _stack.ArrayStack) is True + +def test_ArrayStack(): + s = Stack(implementation='array') + s.push(1) + s.push(2) + s.push(3) + assert s.peek == 3 + assert str(s) == '[1, 2, 3]' + assert s.pop() == 3 + assert s.pop() == 2 + assert s.pop() == 1 + assert s.is_empty is True + assert raises(IndexError, lambda : s.pop()) + _s = Stack(items=[1, 2, 3]) + assert str(_s) == '[1, 2, 3]' + assert len(_s) == 3 + + # Cpp test + s1 = Stack(implementation="array", backend=Backend.CPP) + s1.push(1) + s1.push(2) + s1.push(3) + assert s1.peek == 3 + assert str(s1) == "['1', '2', '3']" + assert s1.pop() == 3 + assert s1.pop() == 2 + assert s1.pop() == 1 + assert s1.is_empty is True + assert raises(IndexError, lambda : s1.pop()) + _s1 = Stack(items=[1, 2, 3], backend=Backend.CPP) + assert str(_s1) == "['1', '2', '3']" + assert len(_s1) == 3 + +def test_LinkedListStack(): + s = Stack(implementation='linked_list') + s.push(1) + s.push(2) + s.push(3) + assert s.peek.key == 3 + assert str(s) == ("['(1, None)', '(2, None)', '(3, None)']") + assert s.pop().key == 3 + assert s.pop().key == 2 + assert s.pop().key == 1 + assert s.is_empty is True + assert raises(IndexError, lambda : s.pop()) + assert str(s) == '[]' + _s = Stack(implementation='linked_list',items=[1, 2, 3]) + assert str(_s) == "['(1, None)', '(2, None)', '(3, None)']" + assert len(_s) == 3 + + s = Stack(implementation='linked_list',items=['a',None,type,{}]) + assert len(s) == 4 + assert s.size == 4 + + peek = s.peek + assert peek.key == s.pop().key + assert raises(TypeError, lambda: Stack(implementation='linked_list', items={0, 1})) diff --git a/lib/python3.12/site-packages/pydatastructs/strings/__init__.py b/lib/python3.12/site-packages/pydatastructs/strings/__init__.py new file mode 100644 index 000000000..33930b426 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/strings/__init__.py @@ -0,0 +1,18 @@ +__all__ = [] + +from . import ( + trie, + algorithms +) + +from .trie import ( + Trie +) + +__all__.extend(trie.__all__) + +from .algorithms import ( + find +) + +__all__.extend(algorithms.__all__) diff --git a/lib/python3.12/site-packages/pydatastructs/strings/algorithms.py b/lib/python3.12/site-packages/pydatastructs/strings/algorithms.py new file mode 100644 index 000000000..1e26b9411 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/strings/algorithms.py @@ -0,0 +1,247 @@ +from pydatastructs.linear_data_structures.arrays import ( + DynamicOneDimensionalArray, OneDimensionalArray) +from pydatastructs.utils.misc_util import ( + Backend, raise_if_backend_is_not_python) + +__all__ = [ + 'find' +] + +PRIME_NUMBER, MOD = 257, 1000000007 + +def find(text, query, algorithm, **kwargs): + """ + Finds occurrence of a query string within the text string. + + Parameters + ========== + + text: str + The string on which query is to be performed. + query: str + The string which is to be searched in the text. + algorithm: str + The algorithm which should be used for + searching. + Currently the following algorithms are + supported, + + 'kmp' -> Knuth-Morris-Pratt as given in [1]. + + 'rabin_karp' -> Rabin–Karp algorithm as given in [2]. + + 'boyer_moore' -> Boyer-Moore algorithm as given in [3]. + + 'z_function' -> Z-function algorithm as given in [4]. + + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Returns + ======= + + DynamicOneDimensionalArray + An array of starting positions of the portions + in the text which match with the given query. + + Examples + ======== + + >>> from pydatastructs.strings.algorithms import find + >>> text = "abcdefabcabe" + >>> pos = find(text, "ab", algorithm="kmp") + >>> str(pos) + "['0', '6', '9']" + >>> pos = find(text, "abc", algorithm="kmp") + >>> str(pos) + "['0', '6']" + >>> pos = find(text, "abe", algorithm="kmp") + >>> str(pos) + "['9']" + >>> pos = find(text, "abed", algorithm="kmp") + >>> str(pos) + '[]' + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Knuth%E2%80%93Morris%E2%80%93Pratt_algorithm + .. [2] https://en.wikipedia.org/wiki/Rabin%E2%80%93Karp_algorithm + .. [3] https://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_string-search_algorithm + .. [4] https://usaco.guide/CPH.pdf#page=257 + """ + raise_if_backend_is_not_python( + find, kwargs.get('backend', Backend.PYTHON)) + import pydatastructs.strings.algorithms as algorithms + func = "_" + algorithm + if not hasattr(algorithms, func): + raise NotImplementedError( + "Currently %s algoithm for searching strings " + "inside a text isn't implemented yet." + %(algorithm)) + return getattr(algorithms, func)(text, query) + + +def _knuth_morris_pratt(text, query): + if len(text) == 0 or len(query) == 0: + return DynamicOneDimensionalArray(int, 0) + kmp_table = _build_kmp_table(query) + return _do_match(text, query, kmp_table) + +_kmp = _knuth_morris_pratt + +def _build_kmp_table(query): + pos, cnd = 1, 0 + kmp_table = OneDimensionalArray(int, len(query) + 1) + + kmp_table[0] = -1 + + while pos < len(query): + if query[pos] == query[cnd]: + kmp_table[pos] = kmp_table[cnd] + else: + kmp_table[pos] = cnd + while cnd >= 0 and query[pos] != query[cnd]: + cnd = kmp_table[cnd] + pos, cnd = pos + 1, cnd + 1 + kmp_table[pos] = cnd + + return kmp_table + + + +def _do_match(string, query, kmp_table): + j, k = 0, 0 + positions = DynamicOneDimensionalArray(int, 0) + + while j < len(string): + if query[k] == string[j]: + j = j + 1 + k = k + 1 + if k == len(query): + positions.append(j - k) + k = kmp_table[k] + else: + k = kmp_table[k] + if k < 0: + j = j + 1 + k = k + 1 + + return positions + +def _p_pow(length, p=PRIME_NUMBER, m=MOD): + p_pow = OneDimensionalArray(int, length) + p_pow[0] = 1 + for i in range(1, length): + p_pow[i] = (p_pow[i-1] * p) % m + return p_pow + +def _hash_str(string, p=PRIME_NUMBER, m=MOD): + hash_value = 0 + p_pow = _p_pow(len(string), p, m) + for i in range(len(string)): + hash_value = (hash_value + ord(string[i]) * p_pow[i]) % m + return hash_value + +def _rabin_karp(text, query): + t = len(text) + q = len(query) + positions = DynamicOneDimensionalArray(int, 0) + if q == 0 or t == 0: + return positions + + query_hash = _hash_str(query) + text_hash = OneDimensionalArray(int, t + 1) + text_hash.fill(0) + p_pow = _p_pow(t) + + for i in range(t): + text_hash[i+1] = (text_hash[i] + ord(text[i]) * p_pow[i]) % MOD + for i in range(t - q + 1): + curr_hash = (text_hash[i + q] + MOD - text_hash[i]) % MOD + if curr_hash == (query_hash * p_pow[i]) % MOD: + positions.append(i) + + return positions + +def _boyer_moore(text, query): + positions = DynamicOneDimensionalArray(int, 0) + text_length, query_length = len(text), len(query) + + if text_length == 0 or query_length == 0: + return positions + + # Preprocessing Step + bad_match_table = dict() + for i in range(query_length): + bad_match_table[query[i]] = i + + shift = 0 + # Matching procedure + while shift <= text_length-query_length: + j = query_length - 1 + while j >= 0 and query[j] == text[shift + j]: + j -= 1 + if j < 0: + positions.append(shift) + if shift + query_length < text_length: + if text[shift + query_length] in bad_match_table: + shift += query_length - bad_match_table[text[shift + query_length]] + else: + shift += query_length + 1 + else: + shift += 1 + else: + letter_pos = text[shift + j] + if letter_pos in bad_match_table: + shift += max(1, j - bad_match_table[letter_pos]) + else: + shift += max(1, j + 1) + return positions + +def _z_vector(text, query): + string = text + if query != "": + string = query + str("$") + text + + z_fct = OneDimensionalArray(int, len(string)) + z_fct.fill(0) + + curr_pos = 1 + seg_left = 0 + seg_right = 0 + + for curr_pos in range(1,len(string)): + if curr_pos <= seg_right: + z_fct[curr_pos] = min(seg_right - curr_pos + 1, z_fct[curr_pos - seg_left]) + + while curr_pos + z_fct[curr_pos] < len(string) and \ + string[z_fct[curr_pos]] == string[curr_pos + z_fct[curr_pos]]: + z_fct[curr_pos] += 1 + + if curr_pos + z_fct[curr_pos] - 1 > seg_right: + seg_left = curr_pos + seg_right = curr_pos + z_fct[curr_pos] - 1 + + final_z_fct = DynamicOneDimensionalArray(int, 0) + start_index = 0 + if query != "": + start_index = len(query) + 1 + for pos in range(start_index, len(string)): + final_z_fct.append(z_fct[pos]) + + return final_z_fct + +def _z_function(text, query): + positions = DynamicOneDimensionalArray(int, 0) + if len(text) == 0 or len(query) == 0: + return positions + + fct = _z_vector(text, query) + for pos in range(len(fct)): + if fct[pos] == len(query): + positions.append(pos) + + return positions diff --git a/lib/python3.12/site-packages/pydatastructs/strings/tests/__init__.py b/lib/python3.12/site-packages/pydatastructs/strings/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/python3.12/site-packages/pydatastructs/strings/tests/test_algorithms.py b/lib/python3.12/site-packages/pydatastructs/strings/tests/test_algorithms.py new file mode 100644 index 000000000..37622cf80 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/strings/tests/test_algorithms.py @@ -0,0 +1,76 @@ +from pydatastructs.strings import find + +import random, string + +def test_kmp(): + _test_common_string_matching('kmp') + +def test_rka(): + _test_common_string_matching('rabin_karp') + +def test_bm(): + _test_common_string_matching('boyer_moore') + +def test_zf(): + _test_common_string_matching('z_function') + +def _test_common_string_matching(algorithm): + true_text_pattern_dictionary = { + "Knuth-Morris-Pratt": "-Morris-", + "abcabcabcabdabcabdabcabca": "abcabdabcabca", + "aefcdfaecdaefaefcdaefeaefcdcdeae": "aefcdaefeaefcd", + "aaaaaaaa": "aaa", + "fullstringmatch": "fullstringmatch", + "z-function": "z-fun" + } + for test_case_key in true_text_pattern_dictionary: + text = test_case_key + query = true_text_pattern_dictionary[test_case_key] + positions = find(text, query, algorithm) + for i in range(positions._last_pos_filled): + p = positions[i] + assert text[p:p + len(query)] == query + + false_text_pattern_dictionary = { + "Knuth-Morris-Pratt": "-Pratt-", + "abcabcabcabdabcabdabcabca": "qwertyuiopzxcvbnm", + "aefcdfaecdaefaefcdaefeaefcdcdeae": "cdaefaefe", + "fullstringmatch": "fullstrinmatch", + "z-function": "function-", + "abc": "", + "": "abc" + } + + for test_case_key in false_text_pattern_dictionary: + text = test_case_key + query = false_text_pattern_dictionary[test_case_key] + positions = find(text, query, algorithm) + assert positions.size == 0 + + random.seed(1000) + + def gen_random_string(length): + ascii = string.ascii_uppercase + digits = string.digits + return ''.join(random.choices(ascii + digits, k=length)) + + for _ in range(100): + query = gen_random_string(random.randint(3, 10)) + num_times = random.randint(1, 10) + freq = 0 + text = "" + while freq < num_times: + rand_str = gen_random_string(random.randint(5, 10)) + if rand_str != query: + freq += 1 + text += query + rand_str + query + positions = find(text, query, algorithm) + assert positions._num == num_times * 2 + for i in range(positions._last_pos_filled): + p = positions[i] + assert text[p:p + len(query)] == query + + text = gen_random_string(len(query)) + if text != query: + positions = find(text, query, algorithm) + assert positions.size == 0 diff --git a/lib/python3.12/site-packages/pydatastructs/strings/tests/test_trie.py b/lib/python3.12/site-packages/pydatastructs/strings/tests/test_trie.py new file mode 100644 index 000000000..059104708 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/strings/tests/test_trie.py @@ -0,0 +1,49 @@ +from pydatastructs import Trie + +def test_Trie(): + + strings = ["A", "to", "tea", "ted", "ten", "i", + "in", "inn", "Amfn", "snbr"] + trie = Trie() + for string in strings: + trie.insert(string) + + prefix_strings = ["te", "t", "Am", "snb"] + + for string in strings: + assert trie.is_inserted(string) + + for string in strings[::-1]: + assert trie.is_inserted(string) + + for string in prefix_strings: + assert trie.is_present(string) + assert not trie.is_inserted(string) + + assert sorted(trie.strings_with_prefix("t")) == ['tea', 'ted', 'ten', 'to'] + assert sorted(trie.strings_with_prefix("te")) == ["tea", "ted", "ten"] + assert trie.strings_with_prefix("i") == ["i", "in", "inn"] + assert trie.strings_with_prefix("a") == [] + + remove_order = ["to", "tea", "ted", "ten", "inn", "in", "A"] + + assert trie.delete("z") is None + + for string in remove_order: + trie.delete(string) + for present in strings: + if present == string: + assert not trie.is_inserted(present) + else: + assert trie.is_present(present) + assert trie.is_inserted(present) + strings.remove(string) + + prefix_strings_1 = ["dict", "dicts", "dicts_lists_tuples"] + trie_1 = Trie() + + for i in range(len(prefix_strings_1)): + trie_1.insert(prefix_strings_1[i]) + for j in range(i + 1): + assert trie_1.is_inserted(prefix_strings_1[j]) + assert trie_1.is_present(prefix_strings_1[j]) diff --git a/lib/python3.12/site-packages/pydatastructs/strings/trie.py b/lib/python3.12/site-packages/pydatastructs/strings/trie.py new file mode 100644 index 000000000..cdf6666cf --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/strings/trie.py @@ -0,0 +1,201 @@ +from pydatastructs.utils.misc_util import ( + TrieNode, Backend, + raise_if_backend_is_not_python) +from collections import deque +import copy + +__all__ = [ + 'Trie' +] + +Stack = Queue = deque + +class Trie(object): + """ + Represents the trie data structure for storing strings. + + Parameters + ========== + + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import Trie + >>> trie = Trie() + >>> trie.insert("a") + >>> trie.insert("aa") + >>> trie.strings_with_prefix("a") + ['a', 'aa'] + >>> trie.is_present("aa") + True + >>> trie.delete("aa") + True + >>> trie.is_present("aa") + False + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Trie + """ + + __slots__ = ['root'] + + @classmethod + def methods(cls): + return ['__new__', 'insert', 'is_present', 'delete', + 'strings_with_prefix'] + + def __new__(cls, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.root = TrieNode() + return obj + + def insert(self, string: str) -> None: + """ + Inserts the given string into the trie. + + Parameters + ========== + + string: str + + Returns + ======= + + None + """ + walk = self.root + for char in string: + if walk.get_child(char) is None: + newNode = TrieNode(char) + walk.add_child(newNode) + walk = newNode + else: + walk = walk.get_child(char) + walk.is_terminal = True + + def is_present(self, string: str) -> bool: + """ + Checks if the given string is present as a prefix in the trie. + + Parameters + ========== + + string: str + + Returns + ======= + + True if the given string is present as a prefix; + False in all other cases. + """ + walk = self.root + for char in string: + if walk.get_child(char) is None: + return False + walk = walk.get_child(char) + return True + + def is_inserted(self, string: str) -> bool: + """ + Checks if the given string was inserted in the trie. + + Parameters + ========== + + string: str + + Returns + ======= + + True if the given string was inserted in trie; + False in all other cases. + """ + walk = self.root + for char in string: + if walk.get_child(char) is None: + return False + walk = walk.get_child(char) + return walk.is_terminal + + def delete(self, string: str) -> bool: + """ + Deletes the given string from the trie. + + Parameters + ========== + + string: str + + Returns + ======= + + True if successfully deleted; + None if the string is not present in the trie. + """ + path = [] + walk = self.root + size = len(string) + for i in range(size): + char = string[i] + path.append(walk) + if walk.get_child(char) is None: + return None + walk = walk.get_child(char) + path.append(walk) + i = len(path) - 1 + path[i].is_terminal = False + while not path[i]._children and i >= 1: + path[i-1].remove_child(path[i].char) + i -= 1 + if path[i].is_terminal: + return True + return True + + def strings_with_prefix(self, string: str) -> list: + """ + Generates a list of all strings with the given prefix. + + Parameters + ========== + + string: str + + Returns + ======= + + strings: list + The list of strings with the given prefix. + """ + + def _collect(prefix: str, node: TrieNode, strings: list) -> str: + TrieNode_stack = Stack() + TrieNode_stack.append((node, prefix)) + while TrieNode_stack: + walk, curr_prefix = TrieNode_stack.pop() + if walk.is_terminal: + strings.append(curr_prefix + walk.char) + for child in walk._children: + TrieNode_stack.append((walk.get_child(child), curr_prefix + walk.char)) + + strings = [] + prefix = "" + walk = self.root + for char in string: + walk = walk.get_child(char) + if walk is None: + return strings + prefix += char + if walk.is_terminal: + strings.append(walk.char) + for child in walk._children: + _collect(prefix, walk.get_child(child), strings) + return strings diff --git a/lib/python3.12/site-packages/pydatastructs/trees/__init__.py b/lib/python3.12/site-packages/pydatastructs/trees/__init__.py new file mode 100644 index 000000000..892730122 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/trees/__init__.py @@ -0,0 +1,40 @@ +__all__ = [] + +from . import ( + binary_trees, + m_ary_trees, + space_partitioning_trees, + heaps, +) + +from .binary_trees import ( + BinaryTree, + BinarySearchTree, + BinaryTreeTraversal, + AVLTree, + BinaryIndexedTree, + CartesianTree, + Treap, + SplayTree, + RedBlackTree +) +__all__.extend(binary_trees.__all__) + +from .m_ary_trees import ( + MAryTreeNode, MAryTree +) + +__all__.extend(m_ary_trees.__all__) + +from .space_partitioning_trees import ( + OneDimensionalSegmentTree +) +__all__.extend(space_partitioning_trees.__all__) + +from .heaps import ( + BinaryHeap, + TernaryHeap, + DHeap, + BinomialHeap +) +__all__.extend(heaps.__all__) diff --git a/lib/python3.12/site-packages/pydatastructs/trees/_backend/__init__.py b/lib/python3.12/site-packages/pydatastructs/trees/_backend/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/python3.12/site-packages/pydatastructs/trees/binary_trees.py b/lib/python3.12/site-packages/pydatastructs/trees/binary_trees.py new file mode 100644 index 000000000..48446d1d4 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/trees/binary_trees.py @@ -0,0 +1,1888 @@ +import random +from collections import deque as Queue +from pydatastructs.utils import TreeNode, CartesianTreeNode, RedBlackTreeNode +from pydatastructs.miscellaneous_data_structures import Stack +from pydatastructs.linear_data_structures import OneDimensionalArray +from pydatastructs.linear_data_structures.arrays import ArrayForTrees +from pydatastructs.utils.misc_util import Backend +from pydatastructs.trees._backend.cpp import _trees + +__all__ = [ + 'AVLTree', + 'BinaryTree', + 'BinarySearchTree', + 'BinaryTreeTraversal', + 'BinaryIndexedTree', + 'CartesianTree', + 'Treap', + 'SplayTree', + 'RedBlackTree' +] + +class BinaryTree(object): + """ + Abstract binary tree. + + Parameters + ========== + + key + Required if tree is to be instantiated with + root otherwise not needed. + root_data + Optional, the root node of the binary tree. + If not of type TreeNode, it will consider + root as data and a new root node will + be created. + comp: lambda/function + Optional, A lambda function which will be used + for comparison of keys. Should return a + bool value. By default it implements less + than operator. + is_order_statistic: bool + Set it to True, if you want to use the + order statistic features of the tree. + backend: pydatastructs.Backend + The backend to be used. Available backends: Python and C++ + Optional, by default, the Python backend is used. For faster execution, use the C++ backend. + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Binary_tree + """ + + __slots__ = ['root_idx', 'comparator', 'tree', 'size', + 'is_order_statistic'] + + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + if comp is None: + comp = lambda key1, key2: key1 < key2 + return _trees.BinaryTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp + obj = object.__new__(cls) + if key is None and root_data is not None: + raise ValueError('Key required.') + key = None if root_data is None else key + root = TreeNode(key, root_data) + root.is_root = True + obj.root_idx = 0 + obj.tree, obj.size = ArrayForTrees(TreeNode, [root]), 1 + obj.comparator = lambda key1, key2: key1 < key2 \ + if comp is None else comp + obj.is_order_statistic = is_order_statistic + return obj + + @classmethod + def methods(cls): + return ['__new__', '__str__'] + + def insert(self, key, data=None): + """ + Inserts data by the passed key using iterative + algorithm. + + Parameters + ========== + + key + The key for comparison. + + data + The data to be inserted. + + Returns + ======= + + None + """ + raise NotImplementedError("This is an abstract method.") + + def delete(self, key, **kwargs): + """ + Deletes the data with the passed key + using iterative algorithm. + + Parameters + ========== + + key + The key of the node which is + to be deleted. + balancing_info: bool + Optional, by default, False + The information needed for updating + the tree is returned if this parameter + is set to True. It is not meant for + user facing APIs. + + Returns + ======= + + True + If the node is deleted successfully. + None + If the node to be deleted doesn't exists. + + Note + ==== + + The node is deleted means that the connection to that + node are removed but the it is still in three. This + is being done to keep the complexity of deletion, O(logn). + """ + raise NotImplementedError("This is an abstract method.") + + def search(self, key, **kwargs): + """ + Searches for the data in the binary search tree + using iterative algorithm. + + Parameters + ========== + + key + The key for searching. + parent: bool + If true then returns index of the + parent of the node with the passed + key. + By default, False + + Returns + ======= + + int + If the node with the passed key is + in the tree. + tuple + The index of the searched node and + the index of the parent of that node. + None + In all other cases. + """ + raise NotImplementedError("This is an abstract method.") + + + def __str__(self): + to_be_printed = ['' for i in range(self.tree._last_pos_filled + 1)] + for i in range(self.tree._last_pos_filled + 1): + if self.tree[i] is not None: + node = self.tree[i] + to_be_printed[i] = (node.left, node.key, node.data, node.right) + return str(to_be_printed) + +class BinarySearchTree(BinaryTree): + """ + Represents binary search trees. + + Examples + ======== + + >>> from pydatastructs.trees import BinarySearchTree as BST + >>> b = BST() + >>> b.insert(1, 1) + >>> b.insert(2, 2) + >>> child = b.tree[b.root_idx].right + >>> b.tree[child].data + 2 + >>> b.search(1) + 0 + >>> b.search(-1) is None + True + >>> b.delete(1) is True + True + >>> b.search(1) is None + True + >>> b.delete(2) is True + True + >>> b.search(2) is None + True + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Binary_search_tree + + See Also + ======== + + pydatastructs.trees.binary_tree.BinaryTree + """ + + @classmethod + def methods(cls): + return ['insert', 'search', 'delete', 'select', + 'rank', 'lowest_common_ancestor'] + + left_size = lambda self, node: self.tree[node.left].size \ + if node.left is not None else 0 + right_size = lambda self, node: self.tree[node.right].size \ + if node.right is not None else 0 + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + if comp is None: + comp = lambda key1, key2: key1 < key2 + return _trees.BinarySearchTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp + return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) + + def _update_size(self, start_idx): + if self.is_order_statistic: + walk = start_idx + while walk is not None: + self.tree[walk].size = ( + self.left_size(self.tree[walk]) + + self.right_size(self.tree[walk]) + 1) + walk = self.tree[walk].parent + + def insert(self, key, data=None): + res = self.search(key) + if res is not None: + self.tree[res].data = data + return None + walk = self.root_idx + if self.tree[walk].key is None: + self.tree[walk].key = key + self.tree[walk].data = data + return None + new_node, prev_node, flag = TreeNode(key, data), self.root_idx, True + while flag: + if not self.comparator(key, self.tree[walk].key): + if self.tree[walk].right is None: + new_node.parent = prev_node + self.tree.append(new_node) + self.tree[walk].right = self.size + self.size += 1 + flag = False + prev_node = walk = self.tree[walk].right + else: + if self.tree[walk].left is None: + new_node.parent = prev_node + self.tree.append(new_node) + self.tree[walk].left = self.size + self.size += 1 + flag = False + prev_node = walk = self.tree[walk].left + self._update_size(walk) + + def search(self, key, **kwargs): + ret_parent = kwargs.get('parent', False) + parent = None + walk = self.root_idx + if self.tree[walk].key is None: + return None + while walk is not None: + if self.tree[walk].key == key: + break + parent = walk + if self.comparator(key, self.tree[walk].key): + walk = self.tree[walk].left + else: + walk = self.tree[walk].right + return (walk, parent) if ret_parent else walk + + def _bound_helper(self, node_idx, bound_key, is_upper=False): + if node_idx is None: + return None + if self.tree[node_idx].key is None: + return None + + if self.tree[node_idx].key == bound_key: + if not is_upper: + return self.tree[node_idx].key + else: + return self._bound_helper(self.tree[node_idx].right, + bound_key, is_upper) + + if self.comparator(self.tree[node_idx].key, bound_key): + return self._bound_helper(self.tree[node_idx].right, + bound_key, is_upper) + else: + res_bound = self._bound_helper(self.tree[node_idx].left, + bound_key, is_upper) + return res_bound if res_bound is not None else self.tree[node_idx].key + + + def lower_bound(self, key, **kwargs): + """ + Finds the lower bound of the given key in the tree + + Parameters + ========== + + key + The key for comparison + + Examples + ======== + + >>> from pydatastructs.trees import BinarySearchTree as BST + >>> b = BST() + >>> b.insert(10, 10) + >>> b.insert(18, 18) + >>> b.insert(7, 7) + >>> b.lower_bound(9) + 10 + >>> b.lower_bound(7) + 7 + >>> b.lower_bound(20) is None + True + + Returns + ======= + + value + The lower bound of the given key. + Returns None if the value doesn't exist + """ + return self._bound_helper(self.root_idx, key) + + + def upper_bound(self, key, **kwargs): + """ + Finds the upper bound of the given key in the tree + + Parameters + ========== + + key + The key for comparison + + Examples + ======== + + >>> from pydatastructs.trees import BinarySearchTree as BST + >>> b = BST() + >>> b.insert(10, 10) + >>> b.insert(18, 18) + >>> b.insert(7, 7) + >>> b.upper_bound(9) + 10 + >>> b.upper_bound(7) + 10 + >>> b.upper_bound(20) is None + True + + Returns + ======= + + value + The upper bound of the given key. + Returns None if the value doesn't exist + """ + return self._bound_helper(self.root_idx, key, True) + + + def delete(self, key, **kwargs): + (walk, parent) = self.search(key, parent=True) + a = None + if walk is None: + return None + if self.tree[walk].left is None and \ + self.tree[walk].right is None: + if parent is None: + self.tree[self.root_idx].data = None + self.tree[self.root_idx].key = None + else: + if self.tree[parent].left == walk: + self.tree[parent].left = None + else: + self.tree[parent].right = None + a = parent + par_key, root_key = (self.tree[parent].key, + self.tree[self.root_idx].key) + new_indices = self.tree.delete(walk) + if new_indices is not None: + a = new_indices[par_key] + self.root_idx = new_indices[root_key] + self._update_size(a) + + elif self.tree[walk].left is not None and \ + self.tree[walk].right is not None: + twalk = self.tree[walk].right + par = walk + flag = False + while self.tree[twalk].left is not None: + flag = True + par = twalk + twalk = self.tree[twalk].left + self.tree[walk].data = self.tree[twalk].data + self.tree[walk].key = self.tree[twalk].key + if flag: + self.tree[par].left = self.tree[twalk].right + else: + self.tree[par].right = self.tree[twalk].right + if self.tree[twalk].right is not None: + self.tree[self.tree[twalk].right].parent = par + if twalk is not None: + a = par + par_key, root_key = (self.tree[par].key, + self.tree[self.root_idx].key) + new_indices = self.tree.delete(twalk) + if new_indices is not None: + a = new_indices[par_key] + self.root_idx = new_indices[root_key] + self._update_size(a) + + else: + if self.tree[walk].left is not None: + child = self.tree[walk].left + else: + child = self.tree[walk].right + if parent is None: + self.tree[self.root_idx].left = self.tree[child].left + self.tree[self.root_idx].right = self.tree[child].right + self.tree[self.root_idx].data = self.tree[child].data + self.tree[self.root_idx].key = self.tree[child].key + self.tree[self.root_idx].parent = None + root_key = self.tree[self.root_idx].key + new_indices = self.tree.delete(child) + if new_indices is not None: + self.root_idx = new_indices[root_key] + else: + if self.tree[parent].left == walk: + self.tree[parent].left = child + else: + self.tree[parent].right = child + self.tree[child].parent = parent + a = parent + par_key, root_key = (self.tree[parent].key, + self.tree[self.root_idx].key) + new_indices = self.tree.delete(walk) + if new_indices is not None: + parent = new_indices[par_key] + self.tree[child].parent = new_indices[par_key] + a = new_indices[par_key] + self.root_idx = new_indices[root_key] + self._update_size(a) + + if kwargs.get("balancing_info", False) is not False: + return a + return True + + def select(self, i): + """ + Finds the i-th smallest node in the tree. + + Parameters + ========== + + i: int + A positive integer + + Returns + ======= + + n: TreeNode + The node with the i-th smallest key + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Order_statistic_tree + """ + i -= 1 # The algorithm is based on zero indexing + if i < 0: + raise ValueError("Expected a positive integer, got %d"%(i + 1)) + if i >= self.tree._num: + raise ValueError("%d is greater than the size of the " + "tree which is, %d"%(i + 1, self.tree._num)) + walk = self.root_idx + while walk is not None: + l = self.left_size(self.tree[walk]) + if i == l: + return self.tree[walk] + left_walk = self.tree[walk].left + right_walk = self.tree[walk].right + if left_walk is None and right_walk is None: + raise IndexError("The traversal is terminated " + "due to no child nodes ahead.") + if i < l: + if left_walk is not None and \ + self.comparator(self.tree[left_walk].key, + self.tree[walk].key): + walk = left_walk + else: + walk = right_walk + else: + if right_walk is not None and \ + not self.comparator(self.tree[right_walk].key, + self.tree[walk].key): + walk = right_walk + else: + walk = left_walk + i -= (l + 1) + + def rank(self, x): + """ + Finds the rank of the given node, i.e. + its index in the sorted list of nodes + of the tree. + + Parameters + ========== + + x: key + The key of the node whose rank is to be found out. + """ + walk = self.search(x) + if walk is None: + return None + r = self.left_size(self.tree[walk]) + 1 + while self.tree[walk].key != self.tree[self.root_idx].key: + p = self.tree[walk].parent + if walk == self.tree[p].right: + r += self.left_size(self.tree[p]) + 1 + walk = p + return r + + def _simple_path(self, key, root): + """ + Utility funtion to find the simple path between root and node. + + Parameters + ========== + + key: Node.key + Key of the node to be searched + + Returns + ======= + + path: list + """ + + stack = Stack() + stack.push(root) + path = [] + node_idx = -1 + + while not stack.is_empty: + node = stack.pop() + if self.tree[node].key == key: + node_idx = node + break + if self.tree[node].left: + stack.push(self.tree[node].left) + if self.tree[node].right: + stack.push(self.tree[node].right) + + if node_idx == -1: + return path + + while node_idx != 0: + path.append(node_idx) + node_idx = self.tree[node_idx].parent + path.append(0) + path.reverse() + + return path + + def _lca_1(self, j, k): + root = self.root_idx + path1 = self._simple_path(j, root) + path2 = self._simple_path(k, root) + if not path1 or not path2: + raise ValueError("One of two path doesn't exists. See %s, %s" + %(path1, path2)) + + n, m = len(path1), len(path2) + i = j = 0 + while i < n and j < m: + if path1[i] != path2[j]: + return self.tree[path1[i - 1]].key + i += 1 + j += 1 + if path1 < path2: + return self.tree[path1[-1]].key + return self.tree[path2[-1]].key + + def _lca_2(self, j, k): + curr_root = self.root_idx + u, v = self.search(j), self.search(k) + if (u is None) or (v is None): + raise ValueError("One of the nodes with key %s " + "or %s doesn't exits"%(j, k)) + u_left = self.comparator(self.tree[u].key, \ + self.tree[curr_root].key) + v_left = self.comparator(self.tree[v].key, \ + self.tree[curr_root].key) + + while not (u_left ^ v_left): + if u_left and v_left: + curr_root = self.tree[curr_root].left + else: + curr_root = self.tree[curr_root].right + + if curr_root == u or curr_root == v: + if curr_root is None: + return None + return self.tree[curr_root].key + u_left = self.comparator(self.tree[u].key, \ + self.tree[curr_root].key) + v_left = self.comparator(self.tree[v].key, \ + self.tree[curr_root].key) + + if curr_root is None: + return curr_root + return self.tree[curr_root].key + + def lowest_common_ancestor(self, j, k, algorithm=1): + + """ + Computes the lowest common ancestor of two nodes. + + Parameters + ========== + + j: Node.key + Key of first node + + k: Node.key + Key of second node + + algorithm: int + The algorithm to be used for computing the + lowest common ancestor. + Optional, by default uses algorithm 1. + + 1 -> Determines the lowest common ancestor by finding + the first intersection of the paths from v and w + to the root. + + 2 -> Modifed version of the algorithm given in the + following publication, + D. Harel. A linear time algorithm for the + lowest common ancestors problem. In 21s + Annual Symposium On Foundations of + Computer Science, pages 308-319, 1980. + + Returns + ======= + + Node.key + The key of the lowest common ancestor in the tree. + if both the nodes are present in the tree. + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Lowest_common_ancestor + + .. [2] https://pdfs.semanticscholar.org/e75b/386cc554214aa0ebd6bd6dbdd0e490da3739.pdf + + """ + return getattr(self, "_lca_"+str(algorithm))(j, k) + +class SelfBalancingBinaryTree(BinarySearchTree): + """ + Represents Base class for all rotation based balancing trees like AVL tree, Red Black tree, Splay Tree. + """ + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + if comp is None: + comp = lambda key1, key2: key1 < key2 + return _trees.SelfBalancingBinaryTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp + return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) + + def _right_rotate(self, j, k): + y = self.tree[k].right + if y is not None: + self.tree[y].parent = j + self.tree[j].left = y + self.tree[k].parent = self.tree[j].parent + if self.tree[k].parent is not None: + if self.tree[self.tree[k].parent].left == j: + self.tree[self.tree[k].parent].left = k + else: + self.tree[self.tree[k].parent].right = k + self.tree[j].parent = k + self.tree[k].right = j + kp = self.tree[k].parent + if kp is None: + self.root_idx = k + + def _left_right_rotate(self, j, k): + i = self.tree[k].right + v, w = self.tree[i].left, self.tree[i].right + self.tree[k].right, self.tree[j].left = v, w + if v is not None: + self.tree[v].parent = k + if w is not None: + self.tree[w].parent = j + self.tree[i].left, self.tree[i].right, self.tree[i].parent = \ + k, j, self.tree[j].parent + self.tree[k].parent, self.tree[j].parent = i, i + ip = self.tree[i].parent + if ip is not None: + if self.tree[ip].left == j: + self.tree[ip].left = i + else: + self.tree[ip].right = i + else: + self.root_idx = i + + def _right_left_rotate(self, j, k): + i = self.tree[k].left + v, w = self.tree[i].left, self.tree[i].right + self.tree[k].left, self.tree[j].right = w, v + if v is not None: + self.tree[v].parent = j + if w is not None: + self.tree[w].parent = k + self.tree[i].right, self.tree[i].left, self.tree[i].parent = \ + k, j, self.tree[j].parent + self.tree[k].parent, self.tree[j].parent = i, i + ip = self.tree[i].parent + if ip is not None: + if self.tree[ip].left == j: + self.tree[ip].left = i + else: + self.tree[ip].right = i + else: + self.root_idx = i + + def _left_rotate(self, j, k): + y = self.tree[k].left + if y is not None: + self.tree[y].parent = j + self.tree[j].right = y + self.tree[k].parent = self.tree[j].parent + if self.tree[k].parent is not None: + if self.tree[self.tree[k].parent].left == j: + self.tree[self.tree[k].parent].left = k + else: + self.tree[self.tree[k].parent].right = k + self.tree[j].parent = k + self.tree[k].left = j + kp = self.tree[k].parent + if kp is None: + self.root_idx = k + +class CartesianTree(SelfBalancingBinaryTree): + """ + Represents cartesian trees. + + Examples + ======== + + >>> from pydatastructs.trees import CartesianTree as CT + >>> c = CT() + >>> c.insert(1, 4, 1) + >>> c.insert(2, 3, 2) + >>> child = c.tree[c.root_idx].left + >>> c.tree[child].data + 1 + >>> c.search(1) + 0 + >>> c.search(-1) is None + True + >>> c.delete(1) is True + True + >>> c.search(1) is None + True + >>> c.delete(2) is True + True + >>> c.search(2) is None + True + + References + ========== + + .. [1] https://www.cs.princeton.edu/courses/archive/spr09/cos423/Lectures/geo-st.pdf + + See Also + ======== + + pydatastructs.trees.binary_trees.SelfBalancingBinaryTree + """ + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + if comp is None: + comp = lambda key1, key2: key1 < key2 + return _trees.CartesianTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp + return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) + + @classmethod + def methods(cls): + return ['__new__', '__str__', 'insert', 'delete'] + + def _bubble_up(self, node_idx): + node = self.tree[node_idx] + parent_idx = self.tree[node_idx].parent + parent = self.tree[parent_idx] + while (node.parent is not None) and (parent.priority > node.priority): + if parent.right == node_idx: + self._left_rotate(parent_idx, node_idx) + else: + self._right_rotate(parent_idx, node_idx) + node = self.tree[node_idx] + parent_idx = self.tree[node_idx].parent + if parent_idx is not None: + parent = self.tree[parent_idx] + if node.parent is None: + self.tree[node_idx].is_root = True + + def _trickle_down(self, node_idx): + node = self.tree[node_idx] + while node.left is not None or node.right is not None: + if node.left is None: + self._left_rotate(node_idx, self.tree[node_idx].right) + elif node.right is None: + self._right_rotate(node_idx, self.tree[node_idx].left) + elif self.tree[node.left].priority < self.tree[node.right].priority: + self._right_rotate(node_idx, self.tree[node_idx].left) + else: + self._left_rotate(node_idx, self.tree[node_idx].right) + node = self.tree[node_idx] + + def insert(self, key, priority, data=None): + super(CartesianTree, self).insert(key, data) + node_idx = super(CartesianTree, self).search(key) + node = self.tree[node_idx] + new_node = CartesianTreeNode(key, priority, data) + new_node.parent = node.parent + new_node.left = node.left + new_node.right = node.right + self.tree[node_idx] = new_node + if node.is_root: + self.tree[node_idx].is_root = True + else: + self._bubble_up(node_idx) + + def delete(self, key, **kwargs): + balancing_info = kwargs.get('balancing_info', False) + node_idx = super(CartesianTree, self).search(key) + if node_idx is not None: + self._trickle_down(node_idx) + return super(CartesianTree, self).delete(key, balancing_info = balancing_info) + + def __str__(self): + to_be_printed = ['' for i in range(self.tree._last_pos_filled + 1)] + for i in range(self.tree._last_pos_filled + 1): + if self.tree[i] is not None: + node = self.tree[i] + to_be_printed[i] = (node.left, node.key, node.priority, node.data, node.right) + return str(to_be_printed) + +class Treap(CartesianTree): + """ + Represents treaps. + + Examples + ======== + + >>> from pydatastructs.trees import Treap as T + >>> t = T() + >>> t.insert(1, 1) + >>> t.insert(2, 2) + >>> t.search(1) + 0 + >>> t.search(-1) is None + True + >>> t.delete(1) is True + True + >>> t.search(1) is None + True + >>> t.delete(2) is True + True + >>> t.search(2) is None + True + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Treap + + """ + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + if comp is None: + comp = lambda key1, key2: key1 < key2 + return _trees.Treap(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp + return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) + + @classmethod + def methods(cls): + return ['__new__', 'insert'] + + def insert(self, key, data=None): + priority = random.random() + super(Treap, self).insert(key, priority, data) + +class AVLTree(SelfBalancingBinaryTree): + """ + Represents AVL trees. + + References + ========== + + .. [1] https://courses.cs.washington.edu/courses/cse373/06sp/handouts/lecture12.pdf + .. [2] https://en.wikipedia.org/wiki/AVL_tree + .. [3] http://faculty.cs.niu.edu/~freedman/340/340notes/340avl2.htm + + See Also + ======== + + pydatastructs.trees.binary_trees.BinaryTree + """ + + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + if comp is None: + comp = lambda key1, key2: key1 < key2 + return _trees.AVLTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp + return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) + + @classmethod + def methods(cls): + return ['__new__', 'set_tree', 'insert', 'delete'] + + left_height = lambda self, node: self.tree[node.left].height \ + if node.left is not None else -1 + right_height = lambda self, node: self.tree[node.right].height \ + if node.right is not None else -1 + balance_factor = lambda self, node: self.right_height(node) - \ + self.left_height(node) + + def set_tree(self, arr): + self.tree = arr + + def _right_rotate(self, j, k): + super(AVLTree, self)._right_rotate(j, k) + self.tree[j].height = max(self.left_height(self.tree[j]), + self.right_height(self.tree[j])) + 1 + if self.is_order_statistic: + self.tree[j].size = (self.left_size(self.tree[j]) + + self.right_size(self.tree[j]) + 1) + + def _left_right_rotate(self, j, k): + super(AVLTree, self)._left_right_rotate(j, k) + self.tree[j].height = max(self.left_height(self.tree[j]), + self.right_height(self.tree[j])) + 1 + self.tree[k].height = max(self.left_height(self.tree[k]), + self.right_height(self.tree[k])) + 1 + if self.is_order_statistic: + self.tree[j].size = (self.left_size(self.tree[j]) + + self.right_size(self.tree[j]) + 1) + self.tree[k].size = (self.left_size(self.tree[k]) + + self.right_size(self.tree[k]) + 1) + + def _right_left_rotate(self, j, k): + super(AVLTree, self)._right_left_rotate(j, k) + self.tree[j].height = max(self.left_height(self.tree[j]), + self.right_height(self.tree[j])) + 1 + self.tree[k].height = max(self.left_height(self.tree[k]), + self.right_height(self.tree[k])) + 1 + if self.is_order_statistic: + self.tree[j].size = (self.left_size(self.tree[j]) + + self.right_size(self.tree[j]) + 1) + self.tree[k].size = (self.left_size(self.tree[k]) + + self.right_size(self.tree[k]) + 1) + + def _left_rotate(self, j, k): + super(AVLTree, self)._left_rotate(j, k) + self.tree[j].height = max(self.left_height(self.tree[j]), + self.right_height(self.tree[j])) + 1 + self.tree[k].height = max(self.left_height(self.tree[k]), + self.right_height(self.tree[k])) + 1 + if self.is_order_statistic: + self.tree[j].size = (self.left_size(self.tree[j]) + + self.right_size(self.tree[j]) + 1) + + def _balance_insertion(self, curr, last): + walk = last + path = Queue() + path.append(curr), path.append(last) + while walk is not None: + self.tree[walk].height = max(self.left_height(self.tree[walk]), + self.right_height(self.tree[walk])) + 1 + if self.is_order_statistic: + self.tree[walk].size = (self.left_size(self.tree[walk]) + + self.right_size(self.tree[walk]) + 1) + last = path.popleft() + last2last = path.popleft() + if self.balance_factor(self.tree[walk]) not in (1, 0, -1): + l = self.tree[walk].left + if l is not None and l == last and self.tree[l].left == last2last: + self._right_rotate(walk, last) + r = self.tree[walk].right + if r is not None and r == last and self.tree[r].right == last2last: + self._left_rotate(walk, last) + if l is not None and l == last and self.tree[l].right == last2last: + self._left_right_rotate(walk, last) + if r is not None and r == last and self.tree[r].left == last2last: + self._right_left_rotate(walk, last) + path.append(walk), path.append(last) + walk = self.tree[walk].parent + + def insert(self, key, data=None): + super(AVLTree, self).insert(key, data) + self._balance_insertion(self.size - 1, self.tree[self.size-1].parent) + + def _balance_deletion(self, start_idx, key): + walk = start_idx + while walk is not None: + self.tree[walk].height = max(self.left_height(self.tree[walk]), + self.right_height(self.tree[walk])) + 1 + if self.is_order_statistic: + self.tree[walk].size = (self.left_size(self.tree[walk]) + + self.right_size(self.tree[walk]) + 1) + if self.balance_factor(self.tree[walk]) not in (1, 0, -1): + if self.balance_factor(self.tree[walk]) < 0: + b = self.tree[walk].left + if self.balance_factor(self.tree[b]) <= 0: + self._right_rotate(walk, b) + else: + self._left_right_rotate(walk, b) + else: + b = self.tree[walk].right + if self.balance_factor(self.tree[b]) >= 0: + self._left_rotate(walk, b) + else: + self._right_left_rotate(walk, b) + walk = self.tree[walk].parent + + + def delete(self, key, **kwargs): + a = super(AVLTree, self).delete(key, balancing_info=True) + self._balance_deletion(a, key) + return True + +class SplayTree(SelfBalancingBinaryTree): + """ + Represents Splay Trees. + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Splay_tree + + """ + + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + if comp is None: + comp = lambda key1, key2: key1 < key2 + return _trees.SplayTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp + return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) + + @classmethod + def methods(cls): + return ['__new__', 'insert', 'delete', 'join', 'split'] + + def _zig(self, x, p): + if self.tree[p].left == x: + super(SplayTree, self)._right_rotate(p, x) + else: + super(SplayTree, self)._left_rotate(p, x) + + def _zig_zig(self, x, p): + super(SplayTree, self)._right_rotate(self.tree[p].parent, p) + super(SplayTree, self)._right_rotate(p, x) + + def _zig_zag(self, p): + super(SplayTree, self)._left_right_rotate(self.tree[p].parent, p) + + def _zag_zag(self, x, p): + super(SplayTree, self)._left_rotate(self.tree[p].parent, p) + super(SplayTree, self)._left_rotate(p, x) + + def _zag_zig(self, p): + super(SplayTree, self)._right_left_rotate(self.tree[p].parent, p) + + def splay(self, x, p): + while self.tree[x].parent is not None: + if self.tree[p].parent is None: + self._zig(x, p) + elif self.tree[p].left == x and \ + self.tree[self.tree[p].parent].left == p: + self._zig_zig(x, p) + elif self.tree[p].right == x and \ + self.tree[self.tree[p].parent].right == p: + self._zag_zag(x, p) + elif self.tree[p].left == x and \ + self.tree[self.tree[p].parent].right == p: + self._zag_zig(p) + else: + self._zig_zag(p) + p = self.tree[x].parent + + def insert(self, key, x): + super(SelfBalancingBinaryTree, self).insert(key, x) + e, p = super(SelfBalancingBinaryTree, self).search(key, parent=True) + self.tree[self.size-1].parent = p + self.splay(e, p) + + def delete(self, x): + e, p = super(SelfBalancingBinaryTree, self).search(x, parent=True) + if e is None: + return + self.splay(e, p) + status = super(SelfBalancingBinaryTree, self).delete(x) + return status + + def join(self, other): + """ + Joins two trees current and other such that all elements of + the current splay tree are smaller than the elements of the other tree. + + Parameters + ========== + + other: SplayTree + SplayTree which needs to be joined with the self tree. + + """ + maxm = self.root_idx + while self.tree[maxm].right is not None: + maxm = self.tree[maxm].right + minm = other.root_idx + while other.tree[minm].left is not None: + minm = other.tree[minm].left + if not self.comparator(self.tree[maxm].key, + other.tree[minm].key): + raise ValueError("Elements of %s aren't less " + "than that of %s"%(self, other)) + self.splay(maxm, self.tree[maxm].parent) + idx_update = self.tree._size + for node in other.tree: + if node is not None: + node_copy = TreeNode(node.key, node.data) + if node.left is not None: + node_copy.left = node.left + idx_update + if node.right is not None: + node_copy.right = node.right + idx_update + self.tree.append(node_copy) + else: + self.tree.append(node) + self.tree[self.root_idx].right = \ + other.root_idx + idx_update + + def split(self, x): + """ + Splits current splay tree into two trees such that one tree contains nodes + with key less than or equal to x and the other tree containing + nodes with key greater than x. + + Parameters + ========== + + x: key + Key of the element on the basis of which split is performed. + + Returns + ======= + + other: SplayTree + SplayTree containing elements with key greater than x. + + """ + e, p = super(SelfBalancingBinaryTree, self).search(x, parent=True) + if e is None: + return + self.splay(e, p) + other = SplayTree(None, None) + if self.tree[self.root_idx].right is not None: + traverse = BinaryTreeTraversal(self) + elements = traverse.depth_first_search(order='pre_order', node=self.tree[self.root_idx].right) + for i in range(len(elements)): + super(SelfBalancingBinaryTree, other).insert(elements[i].key, elements[i].data) + for j in range(len(elements) - 1, -1, -1): + e, p = super(SelfBalancingBinaryTree, self).search(elements[j].key, parent=True) + self.tree[e] = None + self.tree[self.root_idx].right = None + return other + +class RedBlackTree(SelfBalancingBinaryTree): + """ + Represents Red Black trees. + + Examples + ======== + + >>> from pydatastructs.trees import RedBlackTree as RB + >>> b = RB() + >>> b.insert(1, 1) + >>> b.insert(2, 2) + >>> child = b.tree[b.root_idx].right + >>> b.tree[child].data + 2 + >>> b.search(1) + 0 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Red%E2%80%93black_tree + + See Also + ======== + + pydatastructs.trees.binary_trees.SelfBalancingBinaryTree + """ + + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + if comp is None: + comp = lambda key1, key2: key1 < key2 + return _trees.RedBlackTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp + return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) + + @classmethod + def methods(cls): + return ['__new__', 'insert', 'delete'] + + def _get_parent(self, node_idx): + return self.tree[node_idx].parent + + def _get_grand_parent(self, node_idx): + parent_idx=self._get_parent(node_idx) + return self.tree[parent_idx].parent + + def _get_sibling(self, node_idx): + parent_idx=self._get_parent(node_idx) + if parent_idx is None: + return None + node = self.tree[parent_idx] + if node_idx==node.left: + sibling_idx=node.right + return sibling_idx + else: + sibling_idx=node.left + return sibling_idx + + def _get_uncle(self, node_idx): + parent_idx=self._get_parent(node_idx) + return self._get_sibling(parent_idx) + + def _is_onleft(self, node_idx): + parent = self._get_parent(node_idx) + if self.tree[parent].left == node_idx: + return True + return False + + def _is_onright(self, node_idx): + if self._is_onleft(node_idx) is False: + return True + return False + + def __fix_insert(self, node_idx): + while self._get_parent(node_idx) is not None and \ + self.tree[self._get_parent(node_idx)].color == 1 and self.tree[node_idx].color==1: + parent_idx=self._get_parent(node_idx) + grand_parent_idx=self._get_grand_parent(node_idx) + uncle_idx = self._get_uncle(node_idx) + if uncle_idx is not None and self.tree[uncle_idx].color == 1: + self.tree[uncle_idx].color = 0 + self.tree[parent_idx].color = 0 + self.tree[grand_parent_idx].color = 1 + node_idx= grand_parent_idx + else: + self.tree[self.root_idx].is_root=False + if self._is_onright(parent_idx): + if self._is_onleft(node_idx): + self._right_rotate(parent_idx, node_idx) + node_idx=parent_idx + parent_idx=self._get_parent(node_idx) + node_idx=parent_idx + parent_idx=self._get_parent(node_idx) + self._left_rotate(parent_idx, node_idx) + elif self._is_onleft(parent_idx): + if self._is_onright(node_idx): + self._left_rotate(parent_idx, node_idx) + node_idx=parent_idx + parent_idx=self._get_parent(node_idx) + node_idx=parent_idx + parent_idx=self._get_parent(node_idx) + self._right_rotate(parent_idx, node_idx) + self.tree[node_idx].color = 0 + self.tree[parent_idx].color = 1 + self.tree[self.root_idx].is_root=True + if self.tree[node_idx].is_root: + break + self.tree[self.root_idx].color=0 + + def insert(self, key, data=None): + super(RedBlackTree, self).insert(key, data) + node_idx = super(RedBlackTree, self).search(key) + node = self.tree[node_idx] + new_node = RedBlackTreeNode(key, data) + new_node.parent = node.parent + new_node.left = node.left + new_node.right = node.right + self.tree[node_idx] = new_node + if node.is_root: + self.tree[node_idx].is_root = True + self.tree[node_idx].color=0 + elif self.tree[self.tree[node_idx].parent].color==1: + self.__fix_insert(node_idx) + + def _find_predecessor(self, node_idx): + while self.tree[node_idx].right is not None: + node_idx = self.tree[node_idx].right + return node_idx + + def _transplant_values(self, node_idx1, node_idx2): + parent = self.tree[node_idx1].parent + if self.tree[node_idx1].is_root and self._has_one_child(node_idx1): + self.tree[self.root_idx].key = self.tree[node_idx2].key + self.tree[self.root_idx].data = self.tree[node_idx2].data + self.tree[self.root_idx].left = self.tree[node_idx2].left + self.tree[self.root_idx].right = self.tree[node_idx2].right + self.tree[node_idx1].parent = None + return self.tree[self.root_idx].key + else: + self.tree[node_idx1].key = self.tree[node_idx2].key + self.tree[node_idx1].data = self.tree[node_idx2].data + + def _has_one_child(self, node_idx): + if self._is_leaf(node_idx) is False and self._has_two_child(node_idx) is False: + return True + return False + + def _is_leaf(self, node_idx): + if self.tree[node_idx].left is None and self.tree[node_idx].right is None: + return True + return False + + def _has_two_child(self, node_idx): + if self.tree[node_idx].left is not None and self.tree[node_idx].right is not None: + return True + return False + + def __has_red_child(self, node_idx): + left_idx = self.tree[node_idx].left + right_idx = self.tree[node_idx].right + if (left_idx is not None and self.tree[left_idx].color == 1) or \ + (right_idx is not None and self.tree[right_idx].color == 1): + return True + return False + + def _replace_node(self, node_idx): + if self._is_leaf(node_idx): + return None + elif self._has_one_child(node_idx): + if self.tree[node_idx].left is not None: + child = self.tree[node_idx].left + else: + child = self.tree[node_idx].right + return child + else: + return self._find_predecessor(self.tree[node_idx].left) + + def __walk1_walk_isblack(self, color, node_idx1): + if (node_idx1 is None or self.tree[node_idx1].color == 0) and (color == 0): + return True + return False + + def __left_left_siblingcase(self, node_idx): + left_idx = self.tree[node_idx].left + parent = self._get_parent(node_idx) + parent_color = self.tree[parent].color + self.tree[left_idx].color = self.tree[node_idx].color + self.tree[node_idx].color = parent_color + self._right_rotate(parent, node_idx) + + def __right_left_siblingcase(self, node_idx): + left_idx = self.tree[node_idx].left + parent = self._get_parent(node_idx) + parent_color = self.tree[parent].color + self.tree[left_idx].color = parent_color + self._right_rotate(node_idx, left_idx) + child = self._get_parent(node_idx) + self._left_rotate(parent, child) + + def __left_right_siblingcase(self, node_idx): + right_idx = self.tree[node_idx].right + parent = self._get_parent(node_idx) + parent_color = self.tree[parent].color + self.tree[right_idx].color = parent_color + self._left_rotate(node_idx, right_idx) + child = self._get_parent(node_idx) + self._right_rotate(parent, child) + + def __right_right_siblingcase(self, node_idx): + right_idx = self.tree[node_idx].right + parent = self._get_parent(node_idx) + parent_color = self.tree[parent].color + self.tree[right_idx].color = self.tree[node_idx].color + self.tree[node_idx].color = parent_color + self._left_rotate(parent, node_idx) + + def __fix_deletion(self, node_idx): + node = self.tree[node_idx] + color = node.color + while node_idx!= self.root_idx and color == 0: + sibling_idx = self._get_sibling(node_idx) + parent_idx = self._get_parent(node_idx) + if sibling_idx is None: + node_idx = parent_idx + continue + else: + if self.tree[sibling_idx].color == 1: + self.tree[self.root_idx].is_root = False + self.tree[parent_idx].color = 1 + self.tree[sibling_idx].color = 0 + if self._is_onleft(sibling_idx): + self._right_rotate(parent_idx, sibling_idx) + else: + self._left_rotate(parent_idx, sibling_idx) + self.tree[self.root_idx].is_root = True + continue + else: + if self.__has_red_child(sibling_idx): + self.tree[self.root_idx].is_root = False + left_idx = self.tree[sibling_idx].left + if self.tree[sibling_idx].left is not None and \ + self.tree[left_idx].color == 1: + if self._is_onleft(sibling_idx): + self.__left_left_siblingcase(sibling_idx) + else: + self.__right_left_siblingcase(sibling_idx) + else: + if self._is_onleft(sibling_idx): + self.__left_right_siblingcase(sibling_idx) + else: + self.__right_right_siblingcase(sibling_idx) + self.tree[self.root_idx].is_root = True + self.tree[parent_idx].color = 0 + else: + self.tree[sibling_idx].color = 1 + if self.tree[parent_idx].color == 0: + node_idx = parent_idx + continue + else: + self.tree[parent_idx].color = 0 + color = 1 + + def _remove_node(self, node_idx): + parent = self._get_parent(node_idx) + a = parent + if self._is_leaf(node_idx): + par_key, root_key = (self.tree[parent].key, self.tree[self.root_idx].key) + new_indices = self.tree.delete(node_idx) + if new_indices is not None: + a = new_indices[par_key] + self.root_idx = new_indices[root_key] + elif self._has_one_child(node_idx): + child = self._replace_node(node_idx) + parent = self._get_parent(node_idx) + par_key, root_key = (self.tree[parent].key, self.tree[self.root_idx].key) + new_indices = self.tree.delete(node_idx) + self._update_size(a) + + def _delete_root(self, node_idx, node_idx1): + if self._is_leaf(node_idx): + self.tree[self.root_idx].data = None + self.tree[self.root_idx].key = None + elif self._has_one_child(node_idx): + root_key = self._transplant_values(node_idx, node_idx1) + new_indices = self.tree.delete(node_idx1) + if new_indices is not None: + self.root_idx = new_indices[root_key] + + def __leaf_case(self, node_idx, node_idx1): + walk = node_idx + walk1 = node_idx1 + parent = self._get_parent(node_idx) + color = self.tree[walk].color + if parent is None: + self._delete_root(walk, walk1) + else: + if self.__walk1_walk_isblack(color, walk1): + self.__fix_deletion(walk) + else: + sibling_idx = self._get_sibling(walk) + if sibling_idx is not None: + self.tree[sibling_idx].color = 1 + if self._is_onleft(walk): + self.tree[parent].left = None + else: + self.tree[parent].right = None + self._remove_node(walk) + + def __one_child_case(self, node_idx, node_idx1): + walk = node_idx + walk1 = node_idx1 + walk_original_color = self.tree[walk].color + parent = self._get_parent(node_idx) + if parent is None: + self._delete_root(walk, walk1) + else: + if self._is_onleft(walk): + self.tree[parent].left = walk1 + else: + self.tree[parent].right = walk1 + self.tree[walk1].parent = parent + a = self._remove_node(walk) + if self.__walk1_walk_isblack(walk_original_color, walk1): + self.__fix_deletion(walk1) + else: + self.tree[walk1].color = 0 + + def __two_child_case(self, node_idx): + walk = node_idx + successor = self._replace_node(walk) + self._transplant_values(walk, successor) + walk = successor + walk1 = self._replace_node(walk) + return walk, walk1 + + def delete(self, key, **kwargs): + walk = super(RedBlackTree, self).search(key) + if walk is not None: + walk1 = self._replace_node(walk) + if self._has_two_child(walk): + walk, walk1 = self.__two_child_case(walk) + if self._is_leaf(walk): + self.__leaf_case(walk, walk1) + elif self._has_one_child(walk): + self.__one_child_case(walk, walk1) + return True + else: + return None + +class BinaryTreeTraversal(object): + """ + Represents the traversals possible in + a binary tree. + + Parameters + ========== + + tree: BinaryTree + The binary tree for whose traversal + is to be done. + backend: pydatastructs.Backend + The backend to be used. Available backends: Python and C++ + Optional, by default, the Python backend is used. For faster execution, use the C++ backend. + + Traversals + ========== + + - Depth First Search + In Order, Post Order, Pre Order Out Order + + - Breadth First Search + + Examples + ======== + + >>> from pydatastructs import BinarySearchTree as BST + >>> from pydatastructs import BinaryTreeTraversal as BTT + >>> b = BST(2, 2) + >>> b.insert(1, 1) + >>> b.insert(3, 3) + >>> trav = BTT(b) + >>> dfs = trav.depth_first_search() + >>> [str(n) for n in dfs] + ['(None, 1, 1, None)', '(1, 2, 2, 2)', '(None, 3, 3, None)'] + >>> bfs = trav.breadth_first_search() + >>> [str(n) for n in bfs] + ['(1, 2, 2, 2)', '(None, 1, 1, None)', '(None, 3, 3, None)'] + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Tree_traversal + """ + + @classmethod + def methods(cls): + return ['__new__', 'depth_first_search', + 'breadth_first_search'] + + __slots__ = ['tree'] + + def __new__(cls, tree, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + return _trees.BinaryTreeTraversal(tree, **kwargs) + if not isinstance(tree, BinaryTree): + raise TypeError("%s is not a binary tree"%(tree)) + obj = object.__new__(cls) + obj.tree = tree + return obj + + def _pre_order(self, node): + """ + Utility method for computing pre-order + of a binary tree using iterative algorithm. + """ + visit = [] + tree, size = self.tree.tree, self.tree.size + s = Stack() + s.push(node) + while not s.is_empty: + node = s.pop() + visit.append(tree[node]) + if tree[node].right is not None: + s.push(tree[node].right) + if tree[node].left is not None: + s.push(tree[node].left) + return visit + + def _in_order(self, node): + """ + Utility method for computing in-order + of a binary tree using iterative algorithm. + """ + visit = [] + tree, size = self.tree.tree, self.tree.size + s = Stack() + while not s.is_empty or node is not None: + if node is not None: + s.push(node) + node = tree[node].left + else: + node = s.pop() + visit.append(tree[node]) + node = tree[node].right + return visit + + def _post_order(self, node): + """ + Utility method for computing post-order + of a binary tree using iterative algorithm. + """ + visit = [] + tree, size = self.tree.tree, self.tree.size + s = Stack() + s.push(node) + last = OneDimensionalArray(int, size) + last.fill(False) + while not s.is_empty: + node = s.peek + l, r = tree[node].left, tree[node].right + cl, cr = l is None or last[l], r is None or last[r] + if cl and cr: + s.pop() + visit.append(tree[node]) + last[node] = True + continue + if not cr: + s.push(r) + if not cl: + s.push(l) + return visit + + def _out_order(self, node): + """ + Utility method for computing out-order + of a binary tree using iterative algorithm. + """ + return reversed(self._in_order(node)) + + def depth_first_search(self, order='in_order', node=None): + """ + Computes the depth first search traversal of the binary + trees. + + Parameters + ========== + + order : str + One of the strings, 'in_order', 'post_order', + 'pre_order', 'out_order'. + By default, it is set to, 'in_order'. + node : int + The index of the node from where the traversal + is to be instantiated. + + Returns + ======= + + list + Each element is of type 'TreeNode'. + """ + if node is None: + node = self.tree.root_idx + if order not in ('in_order', 'post_order', 'pre_order', 'out_order'): + raise NotImplementedError( + "%s order is not implemented yet." + "We only support `in_order`, `post_order`, " + "`pre_order` and `out_order` traversals.") + return getattr(self, '_' + order)(node) + + def breadth_first_search(self, node=None, strategy='queue'): + """ + Computes the breadth first search traversal of a binary tree. + + Parameters + ========== + + node : int + The index of the node from where the traversal has to be instantiated. + By default, set to, root index. + + strategy : str + The strategy using which the computation has to happen. + By default, it is set 'queue'. + + Returns + ======= + + list + Each element of the list is of type `TreeNode`. + """ + # TODO: IMPLEMENT ITERATIVE DEEPENING-DEPTH FIRST SEARCH STRATEGY + strategies = ('queue',) + if strategy not in strategies: + raise NotImplementedError( + "%s startegy is not implemented yet"%(strategy)) + if node is None: + node = self.tree.root_idx + q, visit, tree = Queue(), [], self.tree.tree + q.append(node) + while len(q) > 0: + node = q.popleft() + visit.append(tree[node]) + if tree[node].left is not None: + q.append(tree[node].left) + if tree[node].right is not None: + q.append(tree[node].right) + return visit + +class BinaryIndexedTree(object): + """ + Represents binary indexed trees + a.k.a fenwick trees. + + Parameters + ========== + + array: list/tuple + The array whose elements are to be + considered for the queries. + backend: pydatastructs.Backend + The backend to be used. Available backends: Python and C++ + Optional, by default, the Python backend is used. For faster execution, use the C++ backend. + + Examples + ======== + + >>> from pydatastructs import BinaryIndexedTree + >>> bit = BinaryIndexedTree([1, 2, 3]) + >>> bit.get_sum(0, 2) + 6 + >>> bit.update(0, 100) + >>> bit.get_sum(0, 2) + 105 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Fenwick_tree + """ + + __slots__ = ['tree', 'array', 'flag'] + + def __new__(cls, array, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + return _trees.BinaryIndexedTree(type(array[0]), array, **kwargs) + obj = object.__new__(cls) + obj.array = OneDimensionalArray(type(array[0]), array) + obj.tree = [0] * (obj.array._size + 2) + obj.flag = [0] * (obj.array._size) + for index in range(obj.array._size): + obj.update(index, array[index]) + return obj + + @classmethod + def methods(cls): + return ['update', 'get_prefix_sum', + 'get_sum'] + + def update(self, index, value): + """ + Updates value at the given index. + + Parameters + ========== + + index: int + Index of element to be updated. + + value + The value to be inserted. + """ + _index, _value = index, value + if self.flag[index] == 0: + self.flag[index] = 1 + index += 1 + while index < self.array._size + 1: + self.tree[index] += value + index = index + (index & (-index)) + else: + value = value - self.array[index] + index += 1 + while index < self.array._size + 1: + self.tree[index] += value + index = index + (index & (-index)) + self.array[_index] = _value + + def get_prefix_sum(self, index): + """ + Computes sum of elements from index 0 to given index. + + Parameters + ========== + + index: int + Index till which sum has to be calculated. + + Returns + ======= + + sum: int + The required sum. + """ + index += 1 + sum = 0 + while index > 0: + sum += self.tree[index] + index = index - (index & (-index)) + return sum + + def get_sum(self, left_index, right_index): + """ + Get sum of elements from left index to right index. + + Parameters + ========== + + left_index: int + Starting index from where sum has to be computed. + + right_index: int + Ending index till where sum has to be computed. + + Returns + ======= + + sum: int + The required sum + """ + if left_index >= 1: + return self.get_prefix_sum(right_index) - \ + self.get_prefix_sum(left_index - 1) + else: + return self.get_prefix_sum(right_index) diff --git a/lib/python3.12/site-packages/pydatastructs/trees/heaps.py b/lib/python3.12/site-packages/pydatastructs/trees/heaps.py new file mode 100644 index 000000000..12133a6f1 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/trees/heaps.py @@ -0,0 +1,582 @@ +from pydatastructs.utils.misc_util import ( + _check_type, TreeNode, BinomialTreeNode, + Backend, raise_if_backend_is_not_python) +from pydatastructs.linear_data_structures.arrays import ( + DynamicOneDimensionalArray, Array) +from pydatastructs.miscellaneous_data_structures.binomial_trees import BinomialTree + +__all__ = [ + 'BinaryHeap', + 'TernaryHeap', + 'DHeap', + 'BinomialHeap' +] + +class Heap(object): + """ + Abstract class for representing heaps. + """ + pass + + +class DHeap(Heap): + """ + Represents D-ary Heap. + + Parameters + ========== + + elements: list, tuple, Array + Optional, by default 'None'. + list/tuple/Array of initial TreeNode in Heap. + heap_property: str + If the key stored in each node is + either greater than or equal to + the keys in the node's children + then pass 'max'. + If the key stored in each node is + either less than or equal to + the keys in the node's children + then pass 'min'. + By default, the heap property is + set to 'min'. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs.trees.heaps import DHeap + >>> min_heap = DHeap(heap_property="min", d=3) + >>> min_heap.insert(1, 1) + >>> min_heap.insert(5, 5) + >>> min_heap.insert(7, 7) + >>> min_heap.extract().key + 1 + >>> min_heap.insert(4, 4) + >>> min_heap.extract().key + 4 + + >>> max_heap = DHeap(heap_property='max', d=2) + >>> max_heap.insert(1, 1) + >>> max_heap.insert(5, 5) + >>> max_heap.insert(7, 7) + >>> max_heap.extract().key + 7 + >>> max_heap.insert(6, 6) + >>> max_heap.extract().key + 6 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/D-ary_heap + """ + __slots__ = ['_comp', 'heap', 'd', 'heap_property', '_last_pos_filled'] + + def __new__(cls, elements=None, heap_property="min", d=4, + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = Heap.__new__(cls) + obj.heap_property = heap_property + obj.d = d + if heap_property == "min": + obj._comp = lambda key_parent, key_child: key_parent <= key_child + elif heap_property == "max": + obj._comp = lambda key_parent, key_child: key_parent >= key_child + else: + raise ValueError("%s is invalid heap property"%(heap_property)) + if elements is None: + elements = DynamicOneDimensionalArray(TreeNode, 0) + elif _check_type(elements, (list,tuple)): + elements = DynamicOneDimensionalArray(TreeNode, len(elements), elements) + elif _check_type(elements, Array): + elements = DynamicOneDimensionalArray(TreeNode, len(elements), elements._data) + else: + raise ValueError(f'Expected a list/tuple/Array of TreeNode got {type(elements)}') + obj.heap = elements + obj._last_pos_filled = obj.heap._last_pos_filled + obj._build() + return obj + + @classmethod + def methods(cls): + return ['__new__', 'insert', 'extract', '__str__', 'is_empty'] + + def _build(self): + for i in range(self._last_pos_filled + 1): + self.heap[i]._leftmost, self.heap[i]._rightmost = \ + self.d*i + 1, self.d*i + self.d + for i in range((self._last_pos_filled + 1)//self.d, -1, -1): + self._heapify(i) + + def _swap(self, idx1, idx2): + idx1_key, idx1_data = \ + self.heap[idx1].key, self.heap[idx1].data + self.heap[idx1].key, self.heap[idx1].data = \ + self.heap[idx2].key, self.heap[idx2].data + self.heap[idx2].key, self.heap[idx2].data = \ + idx1_key, idx1_data + + def _heapify(self, i): + while True: + target = i + l = self.d*i + 1 + r = self.d*i + self.d + + for j in range(l, r+1): + if j <= self._last_pos_filled: + target = j if self._comp(self.heap[j].key, self.heap[target].key) \ + else target + else: + break + + if target != i: + self._swap(target, i) + i = target + else: + break + + def insert(self, key, data=None): + """ + Insert a new element to the heap according to heap property. + + Parameters + ========== + + key + The key for comparison. + data + The data to be inserted. + + Returns + ======= + + None + """ + new_node = TreeNode(key, data) + self.heap.append(new_node) + self._last_pos_filled += 1 + i = self._last_pos_filled + self.heap[i]._leftmost, self.heap[i]._rightmost = self.d*i + 1, self.d*i + self.d + + while True: + parent = (i - 1)//self.d + if i == 0 or self._comp(self.heap[parent].key, self.heap[i].key): + break + else: + self._swap(i, parent) + i = parent + + def extract(self): + """ + Extract root element of the Heap. + + Returns + ======= + + root_element: TreeNode + The TreeNode at the root of the heap, + if the heap is not empty. + + None + If the heap is empty. + """ + if self._last_pos_filled == -1: + raise IndexError("Heap is empty.") + else: + element_to_be_extracted = TreeNode(self.heap[0].key, self.heap[0].data) + self._swap(0, self._last_pos_filled) + self.heap.delete(self._last_pos_filled) + self._last_pos_filled -= 1 + self._heapify(0) + return element_to_be_extracted + + def __str__(self): + to_be_printed = ['' for i in range(self._last_pos_filled + 1)] + for i in range(self._last_pos_filled + 1): + node = self.heap[i] + if node._leftmost <= self._last_pos_filled: + if node._rightmost <= self._last_pos_filled: + children = list(range(node._leftmost, node._rightmost + 1)) + else: + children = list(range(node._leftmost, self._last_pos_filled + 1)) + else: + children = [] + to_be_printed[i] = (node.key, node.data, children) + return str(to_be_printed) + + @property + def is_empty(self): + """ + Checks if the heap is empty. + """ + return self.heap._last_pos_filled == -1 + + +class BinaryHeap(DHeap): + """ + Represents Binary Heap. + + Parameters + ========== + + elements: list, tuple + Optional, by default 'None'. + List/tuple of initial elements in Heap. + heap_property: str + If the key stored in each node is + either greater than or equal to + the keys in the node's children + then pass 'max'. + If the key stored in each node is + either less than or equal to + the keys in the node's children + then pass 'min'. + By default, the heap property is + set to 'min'. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs.trees.heaps import BinaryHeap + >>> min_heap = BinaryHeap(heap_property="min") + >>> min_heap.insert(1, 1) + >>> min_heap.insert(5, 5) + >>> min_heap.insert(7, 7) + >>> min_heap.extract().key + 1 + >>> min_heap.insert(4, 4) + >>> min_heap.extract().key + 4 + + >>> max_heap = BinaryHeap(heap_property='max') + >>> max_heap.insert(1, 1) + >>> max_heap.insert(5, 5) + >>> max_heap.insert(7, 7) + >>> max_heap.extract().key + 7 + >>> max_heap.insert(6, 6) + >>> max_heap.extract().key + 6 + + References + ========== + + .. [1] https://en.m.wikipedia.org/wiki/Binary_heap + """ + def __new__(cls, elements=None, heap_property="min", + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = DHeap.__new__(cls, elements, heap_property, 2) + return obj + + @classmethod + def methods(cls): + return ['__new__'] + + +class TernaryHeap(DHeap): + """ + Represents Ternary Heap. + + Parameters + ========== + + elements: list, tuple + Optional, by default 'None'. + List/tuple of initial elements in Heap. + heap_property: str + If the key stored in each node is + either greater than or equal to + the keys in the node's children + then pass 'max'. + If the key stored in each node is + either less than or equal to + the keys in the node's children + then pass 'min'. + By default, the heap property is + set to 'min'. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs.trees.heaps import TernaryHeap + >>> min_heap = TernaryHeap(heap_property="min") + >>> min_heap.insert(1, 1) + >>> min_heap.insert(5, 5) + >>> min_heap.insert(7, 7) + >>> min_heap.insert(3, 3) + >>> min_heap.extract().key + 1 + >>> min_heap.insert(4, 4) + >>> min_heap.extract().key + 3 + + >>> max_heap = TernaryHeap(heap_property='max') + >>> max_heap.insert(1, 1) + >>> max_heap.insert(5, 5) + >>> max_heap.insert(7, 7) + >>> min_heap.insert(3, 3) + >>> max_heap.extract().key + 7 + >>> max_heap.insert(6, 6) + >>> max_heap.extract().key + 6 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/D-ary_heap + .. [2] https://ece.uwaterloo.ca/~dwharder/aads/Algorithms/d-ary_heaps/Ternary_heaps/ + """ + def __new__(cls, elements=None, heap_property="min", + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = DHeap.__new__(cls, elements, heap_property, 3) + return obj + + @classmethod + def methods(cls): + return ['__new__'] + + +class BinomialHeap(Heap): + """ + Represents binomial heap. + + Parameters + ========== + + root_list: list/tuple/Array + By default, [] + The list of BinomialTree object references + in sorted order. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import BinomialHeap + >>> b = BinomialHeap() + >>> b.insert(1, 1) + >>> b.insert(2, 2) + >>> b.find_minimum().key + 1 + >>> b.find_minimum().children[0].key + 2 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Binomial_heap + """ + __slots__ = ['root_list'] + + def __new__(cls, root_list=None, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + if root_list is None: + root_list = [] + if not all((_check_type(root, BinomialTree)) + for root in root_list): + raise TypeError("The root_list should contain " + "references to objects of BinomialTree.") + obj = Heap.__new__(cls) + obj.root_list = root_list + return obj + + @classmethod + def methods(cls): + return ['__new__', 'merge_tree', 'merge', 'insert', + 'find_minimum', 'is_emtpy', 'decrease_key', 'delete', + 'delete_minimum'] + + def merge_tree(self, tree1, tree2): + """ + Merges two BinomialTree objects. + + Parameters + ========== + + tree1: BinomialTree + + tree2: BinomialTree + """ + if (not _check_type(tree1, BinomialTree)) or \ + (not _check_type(tree2, BinomialTree)): + raise TypeError("Both the trees should be of type " + "BinomalTree.") + ret_value = None + if tree1.root.key <= tree2.root.key: + tree1.add_sub_tree(tree2) + ret_value = tree1 + else: + tree2.add_sub_tree(tree1) + ret_value = tree2 + return ret_value + + def _merge_heap_last_new_tree(self, new_root_list, new_tree): + """ + Merges last tree node in root list with the incoming tree. + """ + pos = -1 + if len(new_root_list) > 0 and new_root_list[pos].order == new_tree.order: + new_root_list[pos] = self.merge_tree(new_root_list[pos], new_tree) + else: + new_root_list.append(new_tree) + + def merge(self, other_heap): + """ + Merges current binomial heap with the given binomial heap. + + Parameters + ========== + + other_heap: BinomialHeap + """ + if not _check_type(other_heap, BinomialHeap): + raise TypeError("Other heap is not of type BinomialHeap.") + new_root_list = [] + i, j = 0, 0 + while (i < len(self.root_list)) and \ + (j < len(other_heap.root_list)): + new_tree = None + while self.root_list[i] is None: + i += 1 + while other_heap.root_list[j] is None: + j += 1 + if self.root_list[i].order == other_heap.root_list[j].order: + new_tree = self.merge_tree(self.root_list[i], + other_heap.root_list[j]) + i += 1 + j += 1 + else: + if self.root_list[i].order < other_heap.root_list[j].order: + new_tree = self.root_list[i] + i += 1 + else: + new_tree = other_heap.root_list[j] + j += 1 + self._merge_heap_last_new_tree(new_root_list, new_tree) + + while i < len(self.root_list): + new_tree = self.root_list[i] + self._merge_heap_last_new_tree(new_root_list, new_tree) + i += 1 + while j < len(other_heap.root_list): + new_tree = other_heap.root_list[j] + self._merge_heap_last_new_tree(new_root_list, new_tree) + j += 1 + self.root_list = new_root_list + + def insert(self, key, data=None): + """ + Inserts new node with the given key and data. + + key + The key of the node which can be operated + upon by relational operators. + + data + The data to be stored in the new node. + """ + new_node = BinomialTreeNode(key, data) + new_tree = BinomialTree(root=new_node, order=0) + new_heap = BinomialHeap(root_list=[new_tree]) + self.merge(new_heap) + + def find_minimum(self, **kwargs): + """ + Finds the node with the minimum key. + + Returns + ======= + + min_node: BinomialTreeNode + """ + if self.is_empty: + raise IndexError("Binomial heap is empty.") + min_node = None + idx, min_idx = 0, None + for tree in self.root_list: + if ((min_node is None) or + (tree is not None and tree.root is not None and + min_node.key > tree.root.key)): + min_node = tree.root + min_idx = idx + idx += 1 + if kwargs.get('get_index', None) is not None: + return min_node, min_idx + return min_node + + def delete_minimum(self): + """ + Deletes the node with minimum key. + """ + min_node, min_idx = self.find_minimum(get_index=True) + child_root_list = [] + for k, child in enumerate(min_node.children): + if child is not None: + child_root_list.append(BinomialTree(root=child, order=k)) + self.root_list.remove(self.root_list[min_idx]) + child_heap = BinomialHeap(root_list=child_root_list) + self.merge(child_heap) + + @property + def is_empty(self): + return not self.root_list + + def decrease_key(self, node, new_key): + """ + Decreases the key of the given node. + + Parameters + ========== + + node: BinomialTreeNode + The node whose key is to be reduced. + new_key + The new key of the given node, + should be less than the current key. + """ + if node.key <= new_key: + raise ValueError("The new key " + "should be less than current node's key.") + node.key = new_key + while ((not node.is_root) and + (node.parent.key > node.key)): + node.parent.key, node.key = \ + node.key, node.parent.key + node.parent.data, node.data = \ + node.data, node.parent.data + node = node.parent + + def delete(self, node): + """ + Deletes the given node. + + Parameters + ========== + + node: BinomialTreeNode + The node which is to be deleted. + """ + self.decrease_key(node, self.find_minimum().key - 1) + self.delete_minimum() diff --git a/lib/python3.12/site-packages/pydatastructs/trees/m_ary_trees.py b/lib/python3.12/site-packages/pydatastructs/trees/m_ary_trees.py new file mode 100644 index 000000000..a06fda9ee --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/trees/m_ary_trees.py @@ -0,0 +1,172 @@ +from pydatastructs.utils import MAryTreeNode +from pydatastructs.linear_data_structures.arrays import ArrayForTrees +from pydatastructs.utils.misc_util import ( + Backend, raise_if_backend_is_not_python) + +__all__ = [ + 'MAryTree' +] + +class MAryTree(object): + """ + Abstract m-ary tree. + + Parameters + ========== + + key + Required if tree is to be instantiated with + root otherwise not needed. + root_data + Optional, the root node of the binary tree. + If not of type MAryTreeNode, it will consider + root as data and a new root node will + be created. + comp: lambda + Optional, A lambda function which will be used + for comparison of keys. Should return a + bool value. By default it implements less + than operator. + is_order_statistic: bool + Set it to True, if you want to use the + order statistic features of the tree. + max_children + Optional, specifies the maximum number of children + a node can have. Defaults to 2 in case nothing is + specified. + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/M-ary_tree + """ + + __slots__ = ['root_idx', 'max_children', 'comparator', 'tree', 'size', + 'is_order_statistic'] + + + def __new__(cls, key=None, root_data=None, comp=None, + is_order_statistic=False, max_children=2, + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + if key is None and root_data is not None: + raise ValueError('Key required.') + key = None if root_data is None else key + root = MAryTreeNode(key, root_data) + root.is_root = True + obj.root_idx = 0 + obj.max_children = max_children + obj.tree, obj.size = ArrayForTrees(MAryTreeNode, [root]), 1 + obj.comparator = lambda key1, key2: key1 < key2 \ + if comp is None else comp + obj.is_order_statistic = is_order_statistic + return obj + + @classmethod + def methods(cls): + return ['__new__', '__str__'] + + def insert(self, key, data=None): + """ + Inserts data by the passed key using iterative + algorithm. + + Parameters + ========== + + key + The key for comparison. + data + The data to be inserted. + + Returns + ======= + + None + """ + raise NotImplementedError("This is an abstract method.") + + def delete(self, key, **kwargs): + """ + Deletes the data with the passed key + using iterative algorithm. + + Parameters + ========== + + key + The key of the node which is + to be deleted. + + Returns + ======= + + True + If the node is deleted successfully. + + None + If the node to be deleted doesn't exists. + + Note + ==== + + The node is deleted means that the connection to that + node are removed but the it is still in tree. + """ + raise NotImplementedError("This is an abstract method.") + + def search(self, key, **kwargs): + """ + Searches for the data in the binary search tree + using iterative algorithm. + + Parameters + ========== + + key + The key for searching. + parent: bool + If true then returns index of the + parent of the node with the passed + key. + By default, False + + Returns + ======= + + int + If the node with the passed key is + in the tree. + tuple + The index of the searched node and + the index of the parent of that node. + None + In all other cases. + """ + raise NotImplementedError("This is an abstract method.") + + def to_binary_tree(self): + """ + Converts an m-ary tree to a binary tree. + + Returns + ======= + + TreeNode + The root of the newly created binary tree. + """ + raise NotImplementedError("This is an abstract method.") + + + def __str__(self): + to_be_printed = ['' for i in range(self.tree._last_pos_filled + 1)] + for i in range(self.tree._last_pos_filled + 1): + if self.tree[i] is not None: + node = self.tree[i] + to_be_printed[i] = (node.key, node.data) + for j in node.children: + if j is not None: + to_be_printed[i].append(j) + return str(to_be_printed) diff --git a/lib/python3.12/site-packages/pydatastructs/trees/space_partitioning_trees.py b/lib/python3.12/site-packages/pydatastructs/trees/space_partitioning_trees.py new file mode 100644 index 000000000..f13c1f280 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/trees/space_partitioning_trees.py @@ -0,0 +1,242 @@ +from pydatastructs.utils import TreeNode +from collections import deque as Queue +from pydatastructs.utils.misc_util import ( + _check_type, Backend, + raise_if_backend_is_not_python) + +__all__ = [ + 'OneDimensionalSegmentTree' +] + +class OneDimensionalSegmentTree(object): + """ + Represents one dimensional segment trees. + + Parameters + ========== + + segs: list/tuple/set + The segs should contains tuples/list/set of size 2 + denoting the start and end points of the intervals. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Examples + ======== + + >>> from pydatastructs import OneDimensionalSegmentTree as ODST + >>> segt = ODST([(3, 8), (9, 20)]) + >>> segt.build() + >>> segt.tree[0].key + [False, 2, 3, False] + >>> len(segt.query(4)) + 1 + + Note + ==== + + All the segments are assumed to be closed intervals, + i.e., the ends are points of segments are also included in + computation. + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Segment_tree + + """ + + __slots__ = ['segments', 'tree', 'root_idx', 'cache'] + + def __new__(cls, segs, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + if any((not isinstance(seg, (tuple, list, set)) or len(seg) != 2) + for seg in segs): + raise ValueError('%s is invalid set of intervals'%(segs)) + for i in range(len(segs)): + segs[i] = list(segs[i]) + segs[i].sort() + obj.segments = list(segs) + obj.tree, obj.root_idx, obj.cache = [], None, False + return obj + + @classmethod + def methods(cls): + return ['build', 'query', '__str__'] + + def _union(self, i1, i2): + """ + Helper function for taking union of two + intervals. + """ + return TreeNode([i1.key[0], i1.key[1], i2.key[2], i2.key[3]], None) + + def _intersect(self, i1, i2): + """ + Helper function for finding intersection of two + intervals. + """ + if i1 is None or i2 is None: + return False + if i1.key[2] < i2.key[1] or i2.key[2] < i1.key[1]: + return False + c1, c2 = None, None + if i1.key[2] == i2.key[1]: + c1 = (i1.key[3] and i2.key[0]) + if i2.key[2] == i1.key[1]: + c2 = (i2.key[3] and i1.key[0]) + if c1 is False and c2 is False: + return False + return True + + def _contains(self, i1, i2): + """ + Helper function for checking if the first interval + is contained in second interval. + """ + if i1 is None or i2 is None: + return False + if i1.key[1] < i2.key[1] and i1.key[2] > i2.key[2]: + return True + if i1.key[1] == i2.key[1] and i1.key[2] > i2.key[2]: + return (i1.key[0] or not i2.key[0]) + if i1.key[1] < i2.key[1] and i1.key[2] == i2.key[2]: + return i1.key[3] or not i2.key[3] + if i1.key[1] == i2.key[1] and i1.key[2] == i2.key[2]: + return not ((not i1.key[3] and i2.key[3]) or (not i1.key[0] and i2.key[0])) + return False + + def _iterate(self, calls, I, idx): + """ + Helper function for filling the calls + stack. Used for imitating the stack based + approach used in recursion. + """ + if self.tree[idx].right is None: + rc = None + else: + rc = self.tree[self.tree[idx].right] + if self.tree[idx].left is None: + lc = None + else: + lc = self.tree[self.tree[idx].left] + if self._intersect(I, rc): + calls.append(self.tree[idx].right) + if self._intersect(I, lc): + calls.append(self.tree[idx].left) + return calls + + def build(self): + """ + Builds the segment tree from the segments, + using iterative algorithm based on queues. + """ + if self.cache: + return None + endpoints = [] + for segment in self.segments: + endpoints.extend(segment) + endpoints.sort() + + elem_int = Queue() + elem_int.append(TreeNode([False, endpoints[0] - 1, endpoints[0], False], None)) + i = 0 + while i < len(endpoints) - 1: + elem_int.append(TreeNode([True, endpoints[i], endpoints[i], True], None)) + elem_int.append(TreeNode([False, endpoints[i], endpoints[i+1], False], None)) + i += 1 + elem_int.append(TreeNode([True, endpoints[i], endpoints[i], True], None)) + elem_int.append(TreeNode([False, endpoints[i], endpoints[i] + 1, False], None)) + + self.tree = [] + while len(elem_int) > 1: + m = len(elem_int) + while m >= 2: + I1 = elem_int.popleft() + I2 = elem_int.popleft() + I = self._union(I1, I2) + I.left = len(self.tree) + I.right = len(self.tree) + 1 + self.tree.append(I1), self.tree.append(I2) + elem_int.append(I) + m -= 2 + if m & 1 == 1: + Il = elem_int.popleft() + elem_int.append(Il) + + Ir = elem_int.popleft() + Ir.left, Ir.right = -3, -2 + self.tree.append(Ir) + self.root_idx = -1 + + for segment in self.segments: + I = TreeNode([True, segment[0], segment[1], True], None) + calls = [self.root_idx] + while calls: + idx = calls.pop() + if self._contains(I, self.tree[idx]): + if self.tree[idx].data is None: + self.tree[idx].data = [] + self.tree[idx].data.append(I) + continue + calls = self._iterate(calls, I, idx) + self.cache = True + + def query(self, qx, init_node=None): + """ + Queries the segment tree. + + Parameters + ========== + + qx: int/float + The query point + + init_node: int + The index of the node from which the query process + is to be started. + + Returns + ======= + + intervals: set + The set of the intervals which contain the query + point. + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Segment_tree + """ + if not self.cache: + self.build() + if init_node is None: + init_node = self.root_idx + qn = TreeNode([True, qx, qx, True], None) + intervals = [] + calls = [init_node] + while calls: + idx = calls.pop() + if _check_type(self.tree[idx].data, list): + intervals.extend(self.tree[idx].data) + calls = self._iterate(calls, qn, idx) + return set(intervals) + + def __str__(self): + """ + Used for printing. + """ + if not self.cache: + self.build() + str_tree = [] + for seg in self.tree: + if seg.data is None: + data = None + else: + data = [str(sd) for sd in seg.data] + str_tree.append((seg.left, seg.key, data, seg.right)) + return str(str_tree) diff --git a/lib/python3.12/site-packages/pydatastructs/trees/tests/__init__.py b/lib/python3.12/site-packages/pydatastructs/trees/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/python3.12/site-packages/pydatastructs/trees/tests/test_binary_trees.py b/lib/python3.12/site-packages/pydatastructs/trees/tests/test_binary_trees.py new file mode 100644 index 000000000..826100b78 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/trees/tests/test_binary_trees.py @@ -0,0 +1,820 @@ +from pydatastructs.trees.binary_trees import ( + BinaryTree, BinarySearchTree, BinaryTreeTraversal, AVLTree, + ArrayForTrees, BinaryIndexedTree, SelfBalancingBinaryTree, SplayTree, CartesianTree, Treap, RedBlackTree) +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils.misc_util import TreeNode +from copy import deepcopy +from pydatastructs.utils.misc_util import Backend +import random +from pydatastructs.utils._backend.cpp import _nodes + +def _test_BinarySearchTree(backend): + BST = BinarySearchTree + b = BST(8, 8, backend=backend) + b.delete(8) + b.insert(8, 8) + b.insert(3, 3) + b.insert(10, 10) + b.insert(1, 1) + b.insert(6, 6) + b.insert(4, 4) + b.insert(7, 7) + b.insert(14, 14) + b.insert(13, 13) + # Explicit check for the __str__ method of Binary Trees Class + assert str(b) == \ + ("[(1, 8, 8, 2), (3, 3, 3, 4), (None, 10, 10, 7), (None, 1, 1, None), " + "(5, 6, 6, 6), (None, 4, 4, None), (None, 7, 7, None), (8, 14, 14, None), " + "(None, 13, 13, None)]") + assert b.root_idx == 0 + + assert b.tree[0].left == 1 + assert b.tree[0].key == 8 + assert b.tree[0].data == 8 + assert b.tree[0].right == 2 + + trav = BinaryTreeTraversal(b, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [1, 3, 4, 6, 7, 8, 10, 13, 14] + assert [node.key for node in pre_order] == [8, 3, 1, 6, 4, 7, 10, 14, 13] + + assert b.search(10) == 2 + assert b.search(-1) is None + assert b.delete(13) is True + assert b.search(13) is None + assert b.delete(10) is True + assert b.search(10) is None + assert b.delete(3) is True + assert b.search(3) is None + assert b.delete(13) is None + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [1, 4, 6, 7, 8, 14] + assert [node.key for node in pre_order] == [8, 4, 1, 6, 7, 14] + + b.delete(7) + b.delete(6) + b.delete(1) + b.delete(4) + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [8, 14] + assert [node.key for node in pre_order] == [8, 14] + + bc = BST(1, 1, backend=backend) + assert bc.insert(1, 2) is None + + b = BST(-8, 8, backend=backend) + b.insert(-3, 3) + b.insert(-10, 10) + b.insert(-1, 1) + b.insert(-6, 6) + b.insert(-4, 4) + b.insert(-7, 7) + b.insert(-14, 14) + b.insert(-13, 13) + + b.delete(-13) + b.delete(-10) + b.delete(-3) + b.delete(-13) + assert str(b) == "[(7, -8, 8, 1), (4, -1, 1, None), '', '', (6, -6, 6, 5), (None, -4, 4, None), (None, -7, 7, None), (None, -14, 14, None)]" + + bl = BST(backend=backend) + nodes = [50, 30, 90, 70, 100, 60, 80, 55, 20, 40, 15, 10, 16, 17, 18] + for node in nodes: + bl.insert(node, node) + + assert bl.lowest_common_ancestor(80, 55, 2) == 70 + assert bl.lowest_common_ancestor(60, 70, 2) == 70 + assert bl.lowest_common_ancestor(18, 18, 2) == 18 + assert bl.lowest_common_ancestor(40, 90, 2) == 50 + + assert bl.lowest_common_ancestor(18, 10, 2) == 15 + assert bl.lowest_common_ancestor(55, 100, 2) == 90 + assert bl.lowest_common_ancestor(16, 80, 2) == 50 + assert bl.lowest_common_ancestor(30, 55, 2) == 50 + + assert raises(ValueError, lambda: bl.lowest_common_ancestor(60, 200, 2)) + assert raises(ValueError, lambda: bl.lowest_common_ancestor(200, 60, 2)) + assert raises(ValueError, lambda: bl.lowest_common_ancestor(-3, 4, 2)) + + assert bl.lowest_common_ancestor(80, 55, 1) == 70 + assert bl.lowest_common_ancestor(60, 70, 1) == 70 + assert bl.lowest_common_ancestor(18, 18, 1) == 18 + assert bl.lowest_common_ancestor(40, 90, 1) == 50 + + assert bl.lowest_common_ancestor(18, 10, 1) == 15 + assert bl.lowest_common_ancestor(55, 100, 1) == 90 + assert bl.lowest_common_ancestor(16, 80, 1) == 50 + assert bl.lowest_common_ancestor(30, 55, 1) == 50 + + assert raises(ValueError, lambda: bl.lowest_common_ancestor(60, 200, 1)) + assert raises(ValueError, lambda: bl.lowest_common_ancestor(200, 60, 1)) + assert raises(ValueError, lambda: bl.lowest_common_ancestor(-3, 4, 1)) + +def test_BinarySearchTree(): + _test_BinarySearchTree(Backend.PYTHON) + +def test_cpp_BinarySearchTree(): + _test_BinarySearchTree(Backend.CPP) + +def _test_BinaryTreeTraversal(backend): + BST = BinarySearchTree + BTT = BinaryTreeTraversal + b = BST('F', 'F', backend=backend) + b.insert('B', 'B') + b.insert('A', 'A') + b.insert('G', 'G') + b.insert('D', 'D') + b.insert('C', 'C') + b.insert('E', 'E') + b.insert('I', 'I') + b.insert('H', 'H') + + trav = BTT(b, backend=backend) + pre = trav.depth_first_search(order='pre_order') + assert [node.key for node in pre] == ['F', 'B', 'A', 'D', 'C', 'E', 'G', 'I', 'H'] + + ino = trav.depth_first_search() + assert [node.key for node in ino] == ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'] + + out = trav.depth_first_search(order='out_order') + assert [node.key for node in out] == ['I', 'H', 'G', 'F', 'E', 'D', 'C', 'B', 'A'] + + post = trav.depth_first_search(order='post_order') + assert [node.key for node in post] == ['A', 'C', 'E', 'D', 'B', 'H', 'I', 'G', 'F'] + + bfs = trav.breadth_first_search() + assert [node.key for node in bfs] == ['F', 'B', 'G', 'A', 'D', 'I', 'C', 'E', 'H'] + + assert raises(NotImplementedError, lambda: trav.breadth_first_search(strategy='iddfs')) + assert raises(NotImplementedError, lambda: trav.depth_first_search(order='in_out_order')) + assert raises(TypeError, lambda: BTT(1)) + +def test_BinaryTreeTraversal(): + _test_BinaryTreeTraversal(Backend.PYTHON) + +def test_cpp_BinaryTreeTraversal(): + _test_BinaryTreeTraversal(Backend.CPP) + +def _test_AVLTree(backend): + a = AVLTree('M', 'M', backend=backend) + a.insert('N', 'N') + a.insert('O', 'O') + a.insert('L', 'L') + a.insert('K', 'K') + a.insert('Q', 'Q') + a.insert('P', 'P') + a.insert('H', 'H') + a.insert('I', 'I') + a.insert('A', 'A') + assert a.root_idx == 1 + + trav = BinaryTreeTraversal(a, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == ['A', 'H', 'I', 'K', 'L', 'M', 'N', 'O', 'P', 'Q'] + assert [node.key for node in pre_order] == ['N', 'I', 'H', 'A', 'L', 'K', 'M', 'P', 'O', 'Q'] + + assert [a.balance_factor(a.tree[i]) for i in range(a.tree.size) if a.tree[i] is not None] == \ + [0, -1, 0, 0, 0, 0, 0, -1, 0, 0] + a1 = AVLTree(1, 1, backend=backend) + a1.insert(2, 2) + a1.insert(3, 3) + a1.insert(4, 4) + a1.insert(5, 5) + + trav = BinaryTreeTraversal(a1, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [1, 2, 3, 4, 5] + assert [node.key for node in pre_order] == [2, 1, 4, 3, 5] + + a3 = AVLTree(-1, 1, backend=backend) + a3.insert(-2, 2) + a3.insert(-3, 3) + a3.insert(-4, 4) + a3.insert(-5, 5) + + trav = BinaryTreeTraversal(a3, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [-5, -4, -3, -2, -1] + assert [node.key for node in pre_order] == [-2, -4, -5, -3, -1] + + a2 = AVLTree(backend=backend) + a2.insert(1, 1) + a2.insert(1, 1) + + trav = BinaryTreeTraversal(a2, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [1] + assert [node.key for node in pre_order] == [1] + + a3 = AVLTree(backend=backend) + a3.set_tree( ArrayForTrees(TreeNode, 0, backend=backend) ) + for i in range(0,7): + a3.tree.append(TreeNode(i, i, backend=backend)) + a3.tree[0].left = 1 + a3.tree[0].right = 6 + a3.tree[1].left = 5 + a3.tree[1].right = 2 + a3.tree[2].left = 3 + a3.tree[2].right = 4 + a3._left_right_rotate(0, 1) + assert str(a3) == "[(4, 0, 0, 6), (5, 1, 1, 3), (1, 2, 2, 0), (None, 3, 3, None), (None, 4, 4, None), (None, 5, 5, None), (None, 6, 6, None)]" + + trav = BinaryTreeTraversal(a3, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [5, 1, 3, 2, 4, 0, 6] + assert [node.key for node in pre_order] == [2, 1, 5, 3, 0, 4, 6] + + a4 = AVLTree(backend=backend) + a4.set_tree( ArrayForTrees(TreeNode, 0, backend=backend) ) + for i in range(0,7): + a4.tree.append(TreeNode(i, i,backend=backend)) + a4.tree[0].left = 1 + a4.tree[0].right = 2 + a4.tree[2].left = 3 + a4.tree[2].right = 4 + a4.tree[3].left = 5 + a4.tree[3].right = 6 + a4._right_left_rotate(0, 2) + + trav = BinaryTreeTraversal(a4, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [1, 0, 5, 3, 6, 2, 4] + assert [node.key for node in pre_order] == [3,0,1,5,2,6,4] + + a5 = AVLTree(is_order_statistic=True,backend=backend) + if backend==Backend.PYTHON: + a5.set_tree( ArrayForTrees(TreeNode, [ + TreeNode(10, 10), + TreeNode(5, 5), + TreeNode(17, 17), + TreeNode(2, 2), + TreeNode(9, 9), + TreeNode(12, 12), + TreeNode(20, 20), + TreeNode(3, 3), + TreeNode(11, 11), + TreeNode(15, 15), + TreeNode(18, 18), + TreeNode(30, 30), + TreeNode(13, 13), + TreeNode(33, 33) + ]) ) + else: + a5.set_tree( ArrayForTrees(_nodes.TreeNode, [ + TreeNode(10, 10,backend=backend), + TreeNode(5, 5,backend=backend), + TreeNode(17, 17,backend=backend), + TreeNode(2, 2,backend=backend), + TreeNode(9, 9,backend=backend), + TreeNode(12, 12,backend=backend), + TreeNode(20, 20,backend=backend), + TreeNode(3, 3,backend=backend), + TreeNode(11, 11,backend=backend), + TreeNode(15, 15,backend=backend), + TreeNode(18, 18,backend=backend), + TreeNode(30, 30,backend=backend), + TreeNode(13, 13,backend=backend), + TreeNode(33, 33,backend=backend) + ],backend=backend) ) + + a5.tree[0].left, a5.tree[0].right, a5.tree[0].parent, a5.tree[0].height = \ + 1, 2, None, 4 + a5.tree[1].left, a5.tree[1].right, a5.tree[1].parent, a5.tree[1].height = \ + 3, 4, 0, 2 + a5.tree[2].left, a5.tree[2].right, a5.tree[2].parent, a5.tree[2].height = \ + 5, 6, 0, 3 + a5.tree[3].left, a5.tree[3].right, a5.tree[3].parent, a5.tree[3].height = \ + None, 7, 1, 1 + a5.tree[4].left, a5.tree[4].right, a5.tree[4].parent, a5.tree[4].height = \ + None, None, 1, 0 + a5.tree[5].left, a5.tree[5].right, a5.tree[5].parent, a5.tree[5].height = \ + 8, 9, 2, 2 + a5.tree[6].left, a5.tree[6].right, a5.tree[6].parent, a5.tree[6].height = \ + 10, 11, 2, 2 + a5.tree[7].left, a5.tree[7].right, a5.tree[7].parent, a5.tree[7].height = \ + None, None, 3, 0 + a5.tree[8].left, a5.tree[8].right, a5.tree[8].parent, a5.tree[8].height = \ + None, None, 5, 0 + a5.tree[9].left, a5.tree[9].right, a5.tree[9].parent, a5.tree[9].height = \ + 12, None, 5, 1 + a5.tree[10].left, a5.tree[10].right, a5.tree[10].parent, a5.tree[10].height = \ + None, None, 6, 0 + a5.tree[11].left, a5.tree[11].right, a5.tree[11].parent, a5.tree[11].height = \ + None, 13, 6, 1 + a5.tree[12].left, a5.tree[12].right, a5.tree[12].parent, a5.tree[12].height = \ + None, None, 9, 0 + a5.tree[13].left, a5.tree[13].right, a5.tree[13].parent, a5.tree[13].height = \ + None, None, 11, 0 + + # testing order statistics + a5.tree[0].size = 14 + a5.tree[1].size = 4 + a5.tree[2].size = 9 + a5.tree[3].size = 2 + a5.tree[4].size = 1 + a5.tree[5].size = 4 + a5.tree[6].size = 4 + a5.tree[7].size = 1 + a5.tree[8].size = 1 + a5.tree[9].size = 2 + a5.tree[10].size = 1 + a5.tree[11].size = 2 + a5.tree[12].size = 1 + a5.tree[13].size = 1 + assert str(a5) == "[(1, 10, 10, 2), (3, 5, 5, 4), (5, 17, 17, 6), (None, 2, 2, 7), (None, 9, 9, None), (8, 12, 12, 9), (10, 20, 20, 11), (None, 3, 3, None), (None, 11, 11, None), (12, 15, 15, None), (None, 18, 18, None), (None, 30, 30, 13), (None, 13, 13, None), (None, 33, 33, None)]" + + assert raises(ValueError, lambda: a5.select(0)) + assert raises(ValueError, lambda: a5.select(15)) + + assert a5.rank(-1) is None + def test_select_rank(expected_output): + if backend==Backend.PYTHON: + output = [] + for i in range(len(expected_output)): + output.append(a5.select(i + 1).key) + assert output == expected_output + output = [] + expected_ranks = [i + 1 for i in range(len(expected_output))] + for i in range(len(expected_output)): + output.append(a5.rank(expected_output[i])) + assert output == expected_ranks + + test_select_rank([2, 3, 5, 9, 10, 11, 12, 13, 15, 17, 18, 20, 30, 33]) + a5.delete(9) + a5.delete(13) + a5.delete(20) + assert str(a5) == "[(7, 10, 10, 5), (None, 5, 5, None), (0, 17, 17, 6), (None, 2, 2, None), '', (8, 12, 12, 9), (10, 30, 30, 13), (3, 3, 3, 1), (None, 11, 11, None), (None, 15, 15, None), (None, 18, 18, None), '', '', (None, 33, 33, None)]" + + trav = BinaryTreeTraversal(a5, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [2, 3, 5, 10, 11, 12, 15, 17, 18, 30, 33] + assert [node.key for node in pre_order] == [17, 10, 3, 2, 5, 12, 11, 15, 30, 18, 33] + + test_select_rank([2, 3, 5, 10, 11, 12, 15, 17, 18, 30, 33]) + a5.delete(10) + a5.delete(17) + assert str(a5) == "[(7, 11, 11, 5), (None, 5, 5, None), (0, 18, 18, 6), (None, 2, 2, None), '', (None, 12, 12, 9), (None, 30, 30, 13), (3, 3, 3, 1), '', (None, 15, 15, None), '', '', '', (None, 33, 33, None)]" + test_select_rank([2, 3, 5, 11, 12, 15, 18, 30, 33]) + a5.delete(11) + a5.delete(30) + test_select_rank([2, 3, 5, 12, 15, 18, 33]) + a5.delete(12) + test_select_rank([2, 3, 5, 15, 18, 33]) + a5.delete(15) + test_select_rank([2, 3, 5, 18, 33]) + a5.delete(18) + test_select_rank([2, 3, 5, 33]) + a5.delete(33) + test_select_rank([2, 3, 5]) + a5.delete(5) + test_select_rank([2, 3]) + a5.delete(3) + test_select_rank([2]) + a5.delete(2) + test_select_rank([]) + assert str(a5) == "[(None, None, None, None)]" + +def test_AVLTree(): + _test_AVLTree(backend=Backend.PYTHON) +def test_cpp_AVLTree(): + _test_AVLTree(backend=Backend.CPP) + +def _test_BinaryIndexedTree(backend): + + FT = BinaryIndexedTree + + t = FT([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], backend=backend) + + assert t.get_sum(0, 2) == 6 + assert t.get_sum(0, 4) == 15 + assert t.get_sum(0, 9) == 55 + t.update(0, 100) + assert t.get_sum(0, 2) == 105 + assert t.get_sum(0, 4) == 114 + assert t.get_sum(1, 9) == 54 + +def test_BinaryIndexedTree(): + _test_BinaryIndexedTree(Backend.PYTHON) + +def test_cpp_BinaryIndexedTree(): + _test_BinaryIndexedTree(Backend.CPP) + +def _test_CartesianTree(backend): + tree = CartesianTree(backend=backend) + tree.insert(3, 1, 3) + tree.insert(1, 6, 1) + tree.insert(0, 9, 0) + tree.insert(5, 11, 5) + tree.insert(4, 14, 4) + tree.insert(9, 17, 9) + tree.insert(7, 22, 7) + tree.insert(6, 42, 6) + tree.insert(8, 49, 8) + tree.insert(2, 99, 2) + # Explicit check for the redefined __str__ method of Cartesian Trees Class + + trav = BinaryTreeTraversal(tree, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + assert [node.key for node in pre_order] == [3, 1, 0, 2, 5, 4, 9, 7, 6, 8] + + tree.insert(1.5, 4, 1.5) + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [0, 1, 1.5, 2, 3, 4, 5, 6, 7, 8, 9] + assert [node.key for node in pre_order] == [3, 1.5, 1, 0, 2, 5, 4, 9, 7, 6, 8] + + k = tree.search(1.5) + assert tree.tree[tree.tree[k].parent].key == 3 + tree.delete(1.5) + assert tree.root_idx == 0 + tree.tree[tree.tree[tree.root_idx].left].key == 1 + tree.delete(8) + assert tree.search(8) is None + tree.delete(7) + assert tree.search(7) is None + tree.delete(3) + assert tree.search(3) is None + assert tree.delete(18) is None + +def test_CartesianTree(): + _test_CartesianTree(backend=Backend.PYTHON) + +def test_cpp_CartesianTree(): + _test_CartesianTree(backend=Backend.CPP) + +def _test_Treap(backend): + + random.seed(0) + tree = Treap(backend=backend) + tree.insert(7, 7) + tree.insert(2, 2) + tree.insert(3, 3) + tree.insert(4, 4) + tree.insert(5, 5) + + assert isinstance(tree.tree[0].priority, float) + tree.delete(1) + assert tree.search(1) is None + assert tree.search(2) == 1 + assert tree.delete(1) is None + +def test_Treap(): + _test_Treap(Backend.PYTHON) + +def test_cpp_Treap(): + _test_Treap(Backend.CPP) + +def _test_SelfBalancingBinaryTree(backend): + """ + https://github.com/codezonediitj/pydatastructs/issues/234 + """ + tree = SelfBalancingBinaryTree(backend=backend) + tree.insert(5, 5) + tree.insert(5.5, 5.5) + tree.insert(4.5, 4.5) + tree.insert(4.6, 4.6) + tree.insert(4.4, 4.4) + tree.insert(4.55, 4.55) + tree.insert(4.65, 4.65) + original_tree = str(tree) + tree._right_rotate(3, 5) + + assert str(tree) == "[(2, 5, 5, 1), (None, 5.5, 5.5, None), (4, 4.5, 4.5, 5), (None, 4.6, 4.6, 6), (None, 4.4, 4.4, None), (None, 4.55, 4.55, 3), (None, 4.65, 4.65, None)]" + assert tree.tree[3].parent == 5 + assert tree.tree[2].right != 3 + assert tree.tree[tree.tree[5].parent].right == 5 + assert tree.root_idx == 0 + + trav = BinaryTreeTraversal(tree, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [4.4, 4.5, 4.55, 4.6, 4.65, 5, 5.5] + assert [node.key for node in pre_order] == [5, 4.5, 4.4, 4.55, 4.6, 4.65, 5.5] + + assert tree.tree[tree.tree[3].parent].right == 3 + tree._left_rotate(5, 3) + assert str(tree) == original_tree + tree.insert(4.54, 4.54) + tree.insert(4.56, 4.56) + tree._left_rotate(5, 8) + assert tree.tree[tree.tree[8].parent].left == 8 + assert str(tree) == "[(2, 5, 5, 1), (None, 5.5, 5.5, None), (4, 4.5, 4.5, 3), (8, 4.6, 4.6, 6), (None, 4.4, 4.4, None), (7, 4.55, 4.55, None), (None, 4.65, 4.65, None), (None, 4.54, 4.54, None), (5, 4.56, 4.56, None)]" + + tree._left_right_rotate(0, 2) + assert str(tree) == "[(6, 5, 5, 1), (None, 5.5, 5.5, None), (4, 4.5, 4.5, 8), (2, 4.6, 4.6, 0), (None, 4.4, 4.4, None), (7, 4.55, 4.55, None), (None, 4.65, 4.65, None), (None, 4.54, 4.54, None), (5, 4.56, 4.56, None)]" + + tree._right_left_rotate(0, 2) + assert str(tree) == "[(6, 5, 5, None), (None, 5.5, 5.5, None), (None, 4.5, 4.5, 8), (2, 4.6, 4.6, 4), (0, 4.4, 4.4, 2), (7, 4.55, 4.55, None), (None, 4.65, 4.65, None), (None, 4.54, 4.54, None), (5, 4.56, 4.56, None)]" + +def test_SelfBalancingBinaryTree(): + _test_SelfBalancingBinaryTree(Backend.PYTHON) +def test_cpp_SelfBalancingBinaryTree(): + _test_SelfBalancingBinaryTree(Backend.CPP) + +def _test_SplayTree(backend): + t = SplayTree(100, 100, backend=backend) + t.insert(50, 50) + t.insert(200, 200) + t.insert(40, 40) + t.insert(30, 30) + t.insert(20, 20) + t.insert(55, 55) + assert str(t) == "[(None, 100, 100, None), (None, 50, 50, None), (0, 200, 200, None), (None, 40, 40, 1), (5, 30, 30, 3), (None, 20, 20, None), (4, 55, 55, 2)]" + assert t.root_idx == 6 + + trav = BinaryTreeTraversal(t, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [20, 30, 40, 50, 55, 100, 200] + assert [node.key for node in pre_order] == [55, 30, 20, 40, 50, 200, 100] + + t.delete(40) + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [20, 30, 50, 55, 100, 200] + assert [node.key for node in pre_order] == [50, 30, 20, 55, 200, 100] + + t.delete(150) + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [20, 30, 50, 55, 100, 200] + assert [node.key for node in pre_order] == [50, 30, 20, 55, 200, 100] + + t1 = SplayTree(1000, 1000, backend=backend) + t1.insert(2000, 2000) + + trav2 = BinaryTreeTraversal(t1, backend=backend) + in_order = trav2.depth_first_search(order='in_order') + pre_order = trav2.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [1000, 2000] + assert [node.key for node in pre_order] == [2000, 1000] + + t.join(t1) + assert str(t) == "[(None, 100, 100, None), '', (6, 200, 200, 8), (4, 50, 50, None), (5, 30, 30, None), (None, 20, 20, None), (3, 55, 55, 0), (None, 1000, 1000, None), (7, 2000, 2000, None), '']" + + if backend == Backend.PYTHON: + trav3 = BinaryTreeTraversal(t, backend=backend) + in_order = trav3.depth_first_search(order='in_order') + pre_order = trav3.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [20, 30, 50, 55, 100, 200, 1000, 2000] + assert [node.key for node in pre_order] == [200, 55, 50, 30, 20, 100, 2000, 1000] + + s = t.split(200) + assert str(s) == "[(1, 2000, 2000, None), (None, 1000, 1000, None)]" + + trav4 = BinaryTreeTraversal(s, backend=backend) + in_order = trav4.depth_first_search(order='in_order') + pre_order = trav4.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [1000, 2000] + assert [node.key for node in pre_order] == [2000, 1000] + + if backend == Backend.PYTHON: + trav5 = BinaryTreeTraversal(t, backend=backend) + in_order = trav5.depth_first_search(order='in_order') + pre_order = trav5.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [20, 30, 50, 55, 100, 200] + assert [node.key for node in pre_order] == [200, 55, 50, 30, 20, 100] + +def test_SplayTree(): + _test_SplayTree(Backend.PYTHON) + +def test_cpp_SplayTree(): + _test_SplayTree(Backend.CPP) + +def _test_RedBlackTree(backend): + tree = RedBlackTree(backend=backend) + tree.insert(10, 10) + tree.insert(18, 18) + tree.insert(7, 7) + tree.insert(15, 15) + tree.insert(16, 16) + tree.insert(30, 30) + tree.insert(25, 25) + tree.insert(40, 40) + tree.insert(60, 60) + tree.insert(2, 2) + tree.insert(17, 17) + tree.insert(6, 6) + assert str(tree) == "[(11, 10, 10, 3), (10, 18, 18, None), (None, 7, 7, None), (None, 15, 15, None), (0, 16, 16, 6), (None, 30, 30, None), (1, 25, 25, 7), (5, 40, 40, 8), (None, 60, 60, None), (None, 2, 2, None), (None, 17, 17, None), (9, 6, 6, 2)]" + assert tree.root_idx == 4 + + trav = BinaryTreeTraversal(tree, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [2, 6, 7, 10, 15, 16, 17, 18, 25, 30, 40, 60] + assert [node.key for node in pre_order] == [16, 10, 6, 2, 7, 15, 25, 18, 17, 40, 30, 60] + + assert tree.lower_bound(0) == 2 + assert tree.lower_bound(2) == 2 + assert tree.lower_bound(3) == 6 + assert tree.lower_bound(7) == 7 + assert tree.lower_bound(25) == 25 + assert tree.lower_bound(32) == 40 + assert tree.lower_bound(41) == 60 + assert tree.lower_bound(60) == 60 + assert tree.lower_bound(61) is None + + assert tree.upper_bound(0) == 2 + assert tree.upper_bound(2) == 6 + assert tree.upper_bound(3) == 6 + assert tree.upper_bound(7) == 10 + assert tree.upper_bound(25) == 30 + assert tree.upper_bound(32) == 40 + assert tree.upper_bound(41) == 60 + assert tree.upper_bound(60) is None + assert tree.upper_bound(61) is None + + tree = RedBlackTree(backend=backend) + + assert tree.lower_bound(1) is None + assert tree.upper_bound(0) is None + + tree.insert(10) + tree.insert(20) + tree.insert(30) + tree.insert(40) + tree.insert(50) + tree.insert(60) + tree.insert(70) + tree.insert(80) + tree.insert(90) + tree.insert(100) + tree.insert(110) + tree.insert(120) + tree.insert(130) + tree.insert(140) + tree.insert(150) + tree.insert(160) + tree.insert(170) + tree.insert(180) + assert str(tree) == "[(None, 10, None, None), (0, 20, None, 2), (None, 30, None, None), (1, 40, None, 5), (None, 50, None, None), (4, 60, None, 6), (None, 70, None, None), (3, 80, None, 11), (None, 90, None, None), (8, 100, None, 10), (None, 110, None, None), (9, 120, None, 13), (None, 130, None, None), (12, 140, None, 15), (None, 150, None, None), (14, 160, None, 16), (None, 170, None, 17), (None, 180, None, None)]" + + assert tree._get_sibling(7) is None + + trav = BinaryTreeTraversal(tree, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 60, 70, 80, 90, + 100, 110, 120, 130, 140, 150, 160, 170, 180] + assert [node.key for node in pre_order] == [80, 40, 20, 10, 30, 60, 50, 70, 120, 100, + 90, 110, 140, 130, 160, 150, 170, 180] + + tree.delete(180) + tree.delete(130) + tree.delete(110) + tree.delete(190) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, + 120, 140, 150, 160, 170] + assert [node.key for node in pre_order] == [80, 40, 20, 10, 30, 60, 50, 70, 120, 100, + 90, 160, 140, 150, 170] + + tree.delete(170) + tree.delete(100) + tree.delete(60) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 70, 80, 90, 120, 140, 150, 160] + assert [node.key for node in pre_order] == [80, 40, 20, 10, 30, 50, 70, 120, 90, 150, 140, 160] + + tree.delete(70) + tree.delete(140) + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 80, 90, 120, 150, 160] + assert [node.key for node in pre_order] == [80, 40, 20, 10, 30, 50, 120, 90, 150, 160] + + tree.delete(150) + tree.delete(120) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 80, 90, 160] + assert [node.key for node in pre_order] == [40, 20, 10, 30, 80, 50, 90, 160] + + tree.delete(50) + tree.delete(80) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [10, 20, 30, 40, 90, 160] + assert [node.key for node in pre_order] == [40, 20, 10, 30, 90, 160] + + tree.delete(30) + tree.delete(20) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [10, 40, 90, 160] + assert [node.key for node in pre_order] == [40, 10, 90, 160] + + tree.delete(10) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [40, 90, 160] + assert [node.key for node in pre_order] == [90, 40, 160] + + tree.delete(40) + tree.delete(90) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [160] + assert [node.key for node in pre_order] == [160] + + tree.delete(160) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order if node.key is not None] == [] + assert [node.key for node in pre_order if node.key is not None] == [] + + tree = RedBlackTree(backend=backend) + tree.insert(50) + tree.insert(40) + tree.insert(30) + tree.insert(20) + tree.insert(10) + tree.insert(5) + + trav = BinaryTreeTraversal(tree, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [5, 10, 20, 30, 40, 50] + assert [node.key for node in pre_order] == [40, 20, 10, 5, 30, 50] + + assert tree.search(50) == 0 + assert tree.search(20) == 3 + assert tree.search(30) == 2 + tree.delete(50) + tree.delete(20) + tree.delete(30) + assert tree.search(50) is None + assert tree.search(20) is None + assert tree.search(30) is None + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [5, 10, 40] + assert [node.key for node in pre_order] == [10, 5, 40] + + tree = RedBlackTree(backend=backend) + tree.insert(10) + tree.insert(5) + tree.insert(20) + tree.insert(15) + + trav = BinaryTreeTraversal(tree, backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [5, 10, 15, 20] + assert [node.key for node in pre_order] == [10, 5, 20, 15] + + tree.delete(5) + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [10, 15, 20] + assert [node.key for node in pre_order] == [15, 10, 20] + + tree = RedBlackTree(backend=backend) + tree.insert(10) + tree.insert(5) + tree.insert(20) + tree.insert(15) + tree.insert(2) + tree.insert(6) + + trav = BinaryTreeTraversal(tree,backend=backend) + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [2, 5, 6, 10, 15, 20] + assert [node.key for node in pre_order] == [10, 5, 2, 6, 20, 15] + + tree.delete(10) + + in_order = trav.depth_first_search(order='in_order') + pre_order = trav.depth_first_search(order='pre_order') + assert [node.key for node in in_order] == [2, 5, 6, 15, 20] + assert [node.key for node in pre_order] == [6, 5, 2, 20, 15] + +def test_RedBlackTree(): + _test_RedBlackTree(Backend.PYTHON) + +def test_cpp_RedBlackTree(): + _test_RedBlackTree(Backend.CPP) diff --git a/lib/python3.12/site-packages/pydatastructs/trees/tests/test_heaps.py b/lib/python3.12/site-packages/pydatastructs/trees/tests/test_heaps.py new file mode 100644 index 000000000..dece2f132 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/trees/tests/test_heaps.py @@ -0,0 +1,236 @@ +from pydatastructs.trees.heaps import BinaryHeap, TernaryHeap, BinomialHeap, DHeap +from pydatastructs.linear_data_structures.arrays import DynamicOneDimensionalArray +from pydatastructs.miscellaneous_data_structures.binomial_trees import BinomialTree +from pydatastructs.utils.misc_util import TreeNode, BinomialTreeNode +from pydatastructs.utils.raises_util import raises +from collections import deque as Queue + +def test_BinaryHeap(): + + max_heap = BinaryHeap(heap_property="max") + + assert raises(IndexError, lambda: max_heap.extract()) + + max_heap.insert(100, 100) + max_heap.insert(19, 19) + max_heap.insert(36, 36) + max_heap.insert(17, 17) + max_heap.insert(3, 3) + max_heap.insert(25, 25) + max_heap.insert(1, 1) + max_heap.insert(2, 2) + max_heap.insert(7, 7) + assert str(max_heap) == \ + ("[(100, 100, [1, 2]), (19, 19, [3, 4]), " + "(36, 36, [5, 6]), (17, 17, [7, 8]), " + "(3, 3, []), (25, 25, []), (1, 1, []), " + "(2, 2, []), (7, 7, [])]") + + assert max_heap.extract().key == 100 + + expected_sorted_elements = [36, 25, 19, 17, 7, 3, 2, 1] + l = max_heap.heap[0].left + l = max_heap.heap[0].right + sorted_elements = [] + for _ in range(8): + sorted_elements.append(max_heap.extract().key) + assert expected_sorted_elements == sorted_elements + + elements = [ + TreeNode(7, 7), TreeNode(25, 25), TreeNode(100, 100), + TreeNode(1, 1), TreeNode(2, 2), TreeNode(3, 3), + TreeNode(17, 17), TreeNode(19, 19), TreeNode(36, 36) + ] + min_heap = BinaryHeap(elements=elements, heap_property="min") + assert min_heap.extract().key == 1 + + expected_sorted_elements = [2, 3, 7, 17, 19, 25, 36, 100] + sorted_elements = [min_heap.extract().key for _ in range(8)] + assert expected_sorted_elements == sorted_elements + + non_TreeNode_elements = [ + (7, 7), TreeNode(25, 25), TreeNode(100, 100), + TreeNode(1, 1), (2, 2), TreeNode(3, 3), + TreeNode(17, 17), TreeNode(19, 19), TreeNode(36, 36) + ] + assert raises(TypeError, lambda: + BinaryHeap(elements = non_TreeNode_elements, heap_property='min')) + + non_TreeNode_elements = DynamicOneDimensionalArray(int, 0) + non_TreeNode_elements.append(1) + non_TreeNode_elements.append(2) + assert raises(TypeError, lambda: + BinaryHeap(elements = non_TreeNode_elements, heap_property='min')) + + non_heapable = "[1, 2, 3]" + assert raises(ValueError, lambda: + BinaryHeap(elements = non_heapable, heap_property='min')) + +def test_TernaryHeap(): + max_heap = TernaryHeap(heap_property="max") + assert raises(IndexError, lambda: max_heap.extract()) + max_heap.insert(100, 100) + max_heap.insert(19, 19) + max_heap.insert(36, 36) + max_heap.insert(17, 17) + max_heap.insert(3, 3) + max_heap.insert(25, 25) + max_heap.insert(1, 1) + max_heap.insert(2, 2) + max_heap.insert(7, 7) + assert str(max_heap) == \ + ('[(100, 100, [1, 2, 3]), (25, 25, [4, 5, 6]), ' + '(36, 36, [7, 8]), (17, 17, []), ' + '(3, 3, []), (19, 19, []), (1, 1, []), ' + '(2, 2, []), (7, 7, [])]') + + assert max_heap.extract().key == 100 + + expected_sorted_elements = [36, 25, 19, 17, 7, 3, 2, 1] + sorted_elements = [] + for _ in range(8): + sorted_elements.append(max_heap.extract().key) + assert expected_sorted_elements == sorted_elements + + elements = [ + TreeNode(7, 7), TreeNode(25, 25), TreeNode(100, 100), + TreeNode(1, 1), TreeNode(2, 2), TreeNode(3, 3), + TreeNode(17, 17), TreeNode(19, 19), TreeNode(36, 36) + ] + min_heap = TernaryHeap(elements=elements, heap_property="min") + expected_extracted_element = min_heap.heap[0].key + assert min_heap.extract().key == expected_extracted_element + + expected_sorted_elements = [2, 3, 7, 17, 19, 25, 36, 100] + sorted_elements = [min_heap.extract().key for _ in range(8)] + assert expected_sorted_elements == sorted_elements + +def test_DHeap(): + assert raises(ValueError, lambda: DHeap(heap_property="none", d=4)) + max_heap = DHeap(heap_property="max", d=5) + assert raises(IndexError, lambda: max_heap.extract()) + max_heap.insert(100, 100) + max_heap.insert(19, 19) + max_heap.insert(36, 36) + max_heap.insert(17, 17) + max_heap.insert(3, 3) + max_heap.insert(25, 25) + max_heap.insert(1, 1) + max_heap = DHeap(max_heap.heap, heap_property="max", d=4) + max_heap.insert(2, 2) + max_heap.insert(7, 7) + assert str(max_heap) == \ + ('[(100, 100, [1, 2, 3, 4]), (25, 25, [5, 6, 7, 8]), ' + '(36, 36, []), (17, 17, []), (3, 3, []), (19, 19, []), ' + '(1, 1, []), (2, 2, []), (7, 7, [])]') + + assert max_heap.extract().key == 100 + + expected_sorted_elements = [36, 25, 19, 17, 7, 3, 2, 1] + sorted_elements = [] + for _ in range(8): + sorted_elements.append(max_heap.extract().key) + assert expected_sorted_elements == sorted_elements + + elements = [ + TreeNode(7, 7), TreeNode(25, 25), TreeNode(100, 100), + TreeNode(1, 1), TreeNode(2, 2), TreeNode(3, 3), + TreeNode(17, 17), TreeNode(19, 19), TreeNode(36, 36) + ] + min_heap = DHeap(elements=DynamicOneDimensionalArray(TreeNode, 9, elements), heap_property="min") + assert min_heap.extract().key == 1 + + expected_sorted_elements = [2, 3, 7, 17, 19, 25, 36, 100] + sorted_elements = [min_heap.extract().key for _ in range(8)] + assert expected_sorted_elements == sorted_elements + +def test_BinomialHeap(): + + # Corner cases + assert raises(TypeError, lambda: + BinomialHeap( + root_list=[BinomialTreeNode(1, 1), None]) + ) is True + tree1 = BinomialTree(BinomialTreeNode(1, 1), 0) + tree2 = BinomialTree(BinomialTreeNode(2, 2), 0) + bh = BinomialHeap(root_list=[tree1, tree2]) + assert raises(TypeError, lambda: + bh.merge_tree(BinomialTreeNode(2, 2), None)) + assert raises(TypeError, lambda: + bh.merge(None)) + + # Testing BinomialHeap.merge + nodes = [BinomialTreeNode(1, 1), # 0 + BinomialTreeNode(3, 3), # 1 + BinomialTreeNode(9, 9), # 2 + BinomialTreeNode(11, 11), # 3 + BinomialTreeNode(6, 6), # 4 + BinomialTreeNode(14, 14), # 5 + BinomialTreeNode(2, 2), # 6 + BinomialTreeNode(7, 7), # 7 + BinomialTreeNode(4, 4), # 8 + BinomialTreeNode(8, 8), # 9 + BinomialTreeNode(12, 12), # 10 + BinomialTreeNode(10, 10), # 11 + BinomialTreeNode(5, 5), # 12 + BinomialTreeNode(21, 21)] # 13 + + nodes[2].add_children(nodes[3]) + nodes[4].add_children(nodes[5]) + nodes[6].add_children(nodes[9], nodes[8], nodes[7]) + nodes[7].add_children(nodes[11], nodes[10]) + nodes[8].add_children(nodes[12]) + nodes[10].add_children(nodes[13]) + + tree11 = BinomialTree(nodes[0], 0) + tree12 = BinomialTree(nodes[2], 1) + tree13 = BinomialTree(nodes[6], 3) + tree21 = BinomialTree(nodes[1], 0) + + heap1 = BinomialHeap(root_list=[tree11, tree12, tree13]) + heap2 = BinomialHeap(root_list=[tree21]) + + def bfs(heap): + bfs_trav = [] + for i in range(len(heap.root_list)): + layer = [] + bfs_q = Queue() + bfs_q.append(heap.root_list[i].root) + while len(bfs_q) != 0: + curr_node = bfs_q.popleft() + if curr_node is not None: + layer.append(curr_node.key) + for _i in range(curr_node.children._last_pos_filled + 1): + bfs_q.append(curr_node.children[_i]) + if layer != []: + bfs_trav.append(layer) + return bfs_trav + + heap1.merge(heap2) + expected_bfs_trav = [[1, 3, 9, 11], [2, 8, 4, 7, 5, 10, 12, 21]] + assert bfs(heap1) == expected_bfs_trav + + # Testing Binomial.find_minimum + assert heap1.find_minimum().key == 1 + + # Testing Binomial.delete_minimum + heap1.delete_minimum() + assert bfs(heap1) == [[3], [9, 11], [2, 8, 4, 7, 5, 10, 12, 21]] + assert raises(ValueError, lambda: heap1.decrease_key(nodes[3], 15)) + heap1.decrease_key(nodes[3], 0) + assert bfs(heap1) == [[3], [0, 9], [2, 8, 4, 7, 5, 10, 12, 21]] + heap1.delete(nodes[12]) + assert bfs(heap1) == [[3, 8], [0, 9, 2, 7, 4, 10, 12, 21]] + + # Testing BinomialHeap.insert + heap = BinomialHeap() + assert raises(IndexError, lambda: heap.find_minimum()) + heap.insert(1, 1) + heap.insert(3, 3) + heap.insert(6, 6) + heap.insert(9, 9) + heap.insert(14, 14) + heap.insert(11, 11) + heap.insert(2, 2) + heap.insert(7, 7) + assert bfs(heap) == [[1, 3, 6, 2, 9, 7, 11, 14]] diff --git a/lib/python3.12/site-packages/pydatastructs/trees/tests/test_m_ary_trees.py b/lib/python3.12/site-packages/pydatastructs/trees/tests/test_m_ary_trees.py new file mode 100644 index 000000000..6cbc84ace --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/trees/tests/test_m_ary_trees.py @@ -0,0 +1,5 @@ +from pydatastructs import MAryTree + +def test_MAryTree(): + m = MAryTree(1, 1) + assert str(m) == '[(1, 1)]' diff --git a/lib/python3.12/site-packages/pydatastructs/trees/tests/test_space_partitioning_tree.py b/lib/python3.12/site-packages/pydatastructs/trees/tests/test_space_partitioning_tree.py new file mode 100644 index 000000000..99f0e84cc --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/trees/tests/test_space_partitioning_tree.py @@ -0,0 +1,20 @@ +from pydatastructs import OneDimensionalSegmentTree +from pydatastructs.utils.raises_util import raises + +def test_OneDimensionalSegmentTree(): + ODST = OneDimensionalSegmentTree + segt = ODST([(0, 5), (1, 6), (9, 13), (1, 2), (3, 8), (9, 20)]) + assert segt.cache is False + segt2 = ODST([(1, 4)]) + assert str(segt2) == ("[(None, [False, 0, 1, False], None, None), " + "(None, [True, 1, 1, True], ['(None, [True, 1, 4, True], None, None)'], " + "None), (None, [False, 1, 4, False], None, None), (None, [True, 4, 4, True], " + "None, None), (0, [False, 0, 1, True], None, 1), (2, [False, 1, 4, True], " + "['(None, [True, 1, 4, True], None, None)'], 3), (4, [False, 0, 4, True], " + "None, 5), (None, [False, 4, 5, False], None, None), (-3, [False, 0, 5, " + "False], None, -2)]") + assert len(segt.query(1.5)) == 3 + assert segt.cache is True + assert len(segt.query(-1)) == 0 + assert len(segt.query(2.8)) == 2 + assert raises(ValueError, lambda: ODST([(1, 2, 3)])) diff --git a/lib/python3.12/site-packages/pydatastructs/utils/__init__.py b/lib/python3.12/site-packages/pydatastructs/utils/__init__.py new file mode 100644 index 000000000..c4971be32 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/utils/__init__.py @@ -0,0 +1,29 @@ +__all__ = [] + +from . import ( + misc_util, + testing_util, +) + +from .misc_util import ( + TreeNode, + MAryTreeNode, + LinkedListNode, + BinomialTreeNode, + AdjacencyListGraphNode, + AdjacencyMatrixGraphNode, + GraphEdge, + Set, + CartesianTreeNode, + RedBlackTreeNode, + TrieNode, + SkipNode, + summation, + greatest_common_divisor, + minimum, + Backend +) +from .testing_util import test + +__all__.extend(misc_util.__all__) +__all__.extend(testing_util.__all__) diff --git a/lib/python3.12/site-packages/pydatastructs/utils/_backend/__init__.py b/lib/python3.12/site-packages/pydatastructs/utils/_backend/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/python3.12/site-packages/pydatastructs/utils/misc_util.py b/lib/python3.12/site-packages/pydatastructs/utils/misc_util.py new file mode 100644 index 000000000..3672c58b9 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/utils/misc_util.py @@ -0,0 +1,632 @@ +import math, pydatastructs +from enum import Enum +from pydatastructs.utils._backend.cpp import _nodes, _graph_utils + +__all__ = [ + 'TreeNode', + 'MAryTreeNode', + 'LinkedListNode', + 'BinomialTreeNode', + 'AdjacencyListGraphNode', + 'AdjacencyMatrixGraphNode', + 'GraphEdge', + 'Set', + 'CartesianTreeNode', + 'RedBlackTreeNode', + 'TrieNode', + 'SkipNode', + 'minimum', + 'summation', + 'greatest_common_divisor', + 'Backend' +] + + +class Backend(Enum): + + PYTHON = 'Python' + CPP = 'Cpp' + LLVM = 'Llvm' + + def __str__(self): + return self.value + +def raise_if_backend_is_not_python(api, backend): + if backend != Backend.PYTHON: + raise ValueError("As of {} version, only {} backend is supported for {} API".format( + pydatastructs.__version__, str(Backend.PYTHON), api)) + +_check_type = lambda a, t: isinstance(a, t) +NoneType = type(None) + +class Node(object): + """ + Abstract class representing a node. + """ + pass + +class TreeNode(Node): + """ + Represents node in trees. + + Parameters + ========== + + key + Required for comparison operations. + data + Any valid data to be stored in the node. + left: int + Optional, index of the left child node. + right: int + Optional, index of the right child node. + backend: pydatastructs.Backend + The backend to be used. Available backends: Python and C++ + Optional, by default, the Python backend is used. For faster execution, use the C++ backend. + """ + + __slots__ = ['key', 'data', 'left', 'right', 'is_root', + 'height', 'parent', 'size'] + + @classmethod + def methods(cls): + return ['__new__', '__str__'] + + def __new__(cls, key, data=None, **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.CPP: + return _nodes.TreeNode(key, data, **kwargs) + obj = Node.__new__(cls) + obj.data, obj.key = data, key + obj.left, obj.right, obj.parent, obj.height, obj.size = \ + None, None, None, 0, 1 + obj.is_root = False + return obj + + def __str__(self): + """ + Used for printing. + """ + return str((self.left, self.key, self.data, self.right)) + +class CartesianTreeNode(TreeNode): + """ + Represents node in cartesian trees. + + Parameters + ========== + + key + Required for comparison operations. + data + Any valid data to be stored in the node. + priority: int + An integer value for heap property. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + __slots__ = ['key', 'data', 'priority'] + + def __new__(cls, key, priority, data=None, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = TreeNode.__new__(cls, key, data) + obj.priority = priority + return obj + + def __str__(self): + """ + Used for printing. + """ + return str((self.left, self.key, self.priority, self.data, self.right)) + +class RedBlackTreeNode(TreeNode): + """ + Represents node in red-black trees. + + Parameters + ========== + + key + Required for comparison operations. + data + Any valid data to be stored in the node. + color + 0 for black and 1 for red. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + __slots__ = ['key', 'data', 'color'] + + @classmethod + def methods(cls): + return ['__new__'] + + def __new__(cls, key, data=None, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = TreeNode.__new__(cls, key, data) + obj.color = 1 + return obj + +class BinomialTreeNode(TreeNode): + """ + Represents node in binomial trees. + + Parameters + ========== + + key + Required for comparison operations. + data + Any valid data to be stored in the node. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Note + ==== + + The following are the data members of the class: + + parent: BinomialTreeNode + A reference to the BinomialTreeNode object + which is a prent of this. + children: DynamicOneDimensionalArray + An array of references to BinomialTreeNode objects + which are children this node. + is_root: bool, by default, False + If the current node is a root of the tree then + set it to True otherwise False. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + __slots__ = ['parent', 'key', 'children', 'data', 'is_root'] + + @classmethod + def methods(cls): + return ['__new__', 'add_children', '__str__'] + + def __new__(cls, key, data=None, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + from pydatastructs.linear_data_structures.arrays import DynamicOneDimensionalArray + obj = Node.__new__(cls) + obj.data, obj.key = data, key + obj.children, obj.parent, obj.is_root = ( + DynamicOneDimensionalArray(BinomialTreeNode, 0), + None, + False + ) + return obj + + def add_children(self, *children): + """ + Adds children of current node. + """ + for child in children: + self.children.append(child) + child.parent = self + + def __str__(self): + """ + For printing the key and data. + """ + return str((self.key, self.data)) + +class MAryTreeNode(TreeNode): + """ + Represents node in an M-ary trees. + + Parameters + ========== + + key + Required for comparison operations. + data + Any valid data to be stored in the node. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + + Note + ==== + + The following are the data members of the class: + + children: DynamicOneDimensionalArray + An array of indices which stores the children of + this node in the M-ary tree array + is_root: bool, by default, False + If the current node is a root of the tree then + set it to True otherwise False. + """ + __slots__ = ['key', 'children', 'data', 'is_root'] + + @classmethod + def methods(cls): + return ['__new__', 'add_children', '__str__'] + + def __new__(cls, key, data=None, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + from pydatastructs.linear_data_structures.arrays import DynamicOneDimensionalArray + obj = Node.__new__(cls) + obj.data = data + obj.key = key + obj.is_root = False + obj.children = DynamicOneDimensionalArray(int, 0) + return obj + + def add_children(self, *children): + """ + Adds children of current node. + """ + for child in children: + self.children.append(child) + + def __str__(self): + return str((self.key, self.data)) + + +class LinkedListNode(Node): + """ + Represents node in linked lists. + + Parameters + ========== + + key + Any valid identifier to uniquely + identify the node in the linked list. + data + Any valid data to be stored in the node. + links + List of names of attributes which should + be used as links to other nodes. + addrs + List of address of nodes to be assigned to + each of the attributes in links. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + @classmethod + def methods(cls): + return ['__new__', '__str__'] + + def __new__(cls, key, data=None, links=None, addrs=None, + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + if links is None: + links = ['next'] + if addrs is None: + addrs = [None] + obj = Node.__new__(cls) + obj.key = key + obj.data = data + for link, addr in zip(links, addrs): + obj.__setattr__(link, addr) + obj.__slots__ = ['key', 'data'] + links + return obj + + def __str__(self): + return str((self.key, self.data)) + +class SkipNode(Node): + """ + Represents node in linked lists. + + Parameters + ========== + + key + Any valid identifier to uniquely + identify the node in the skip list. + data + Any valid data to be stored in the node. + next + Reference to the node lying just forward + to the current node. + Optional, by default, None. + down + Reference to the node lying just below the + current node. + Optional, by default, None. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + + __slots__ = ['key', 'data', 'next', 'down'] + + def __new__(cls, key, data=None, next=None, down=None, + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = Node.__new__(cls) + obj.key, obj.data = key, data + obj.next, obj.down = next, down + return obj + + def __str__(self): + return str((self.key, self.data)) + +class GraphNode(Node): + """ + Abastract class for graph nodes/vertices. + """ + def __str__(self): + return str((self.name, self.data)) + +class AdjacencyListGraphNode(GraphNode): + """ + Represents nodes for adjacency list implementation + of graphs. + + Parameters + ========== + + name: str + The name of the node by which it is identified + in the graph. Must be unique. + data + The data to be stored at each graph node. + adjacency_list: list + Any valid iterator to initialize the adjacent + nodes of the current node. + Optional, by default, None + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + @classmethod + def methods(cls): + return ['__new__', 'add_adjacent_node', + 'remove_adjacent_node'] + + def __new__(cls, name, data=None, adjacency_list=[], + **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.PYTHON: + obj = GraphNode.__new__(cls) + obj.name, obj.data = str(name), data + obj._impl = 'adjacency_list' + if len(adjacency_list) > 0: + for node in adjacency_list: + obj.__setattr__(node.name, node) + obj.adjacent = adjacency_list if len(adjacency_list) > 0 \ + else [] + return obj + else: + return _graph_utils.AdjacencyListGraphNode(name, data, adjacency_list) + + def add_adjacent_node(self, name, data=None): + """ + Adds adjacent node to the current node's + adjacency list with given name and data. + """ + if hasattr(self, name): + getattr(self, name).data = data + else: + new_node = AdjacencyListGraphNode(name, data) + self.__setattr__(new_node.name, new_node) + self.adjacent.append(new_node.name) + + def remove_adjacent_node(self, name): + """ + Removes node with given name from + adjacency list. + """ + if not hasattr(self, name): + raise ValueError("%s is not adjacent to %s"%(name, self.name)) + self.adjacent.remove(name) + delattr(self, name) + +class AdjacencyMatrixGraphNode(GraphNode): + """ + Represents nodes for adjacency matrix implementation + of graphs. + + Parameters + ========== + + name: str + The index of the node in the AdjacencyMatrix. + data + The data to be stored at each graph node. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + __slots__ = ['name', 'data'] + + @classmethod + def methods(cls): + return ['__new__'] + + def __new__(cls, name, data=None, + **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.PYTHON: + obj = GraphNode.__new__(cls) + obj.name, obj.data, obj.is_connected = \ + str(name), data, None + obj._impl = 'adjacency_matrix' + return obj + else: + return _graph_utils.AdjacencyMatrixGraphNode(str(name), data) + +class GraphEdge(object): + """ + Represents the concept of edges in graphs. + + Parameters + ========== + + node1: GraphNode or it's child classes + The source node of the edge. + node2: GraphNode or it's child classes + The target node of the edge. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + @classmethod + def methods(cls): + return ['__new__', '__str__'] + + def __new__(cls, node1, node2, value=None, + **kwargs): + backend = kwargs.get('backend', Backend.PYTHON) + if backend == Backend.PYTHON: + obj = object.__new__(cls) + obj.source, obj.target = node1, node2 + obj.value = value + return obj + else: + return _graph_utils.GraphEdge(node1, node2, value) + + def __str__(self): + return str((self.source.name, self.target.name)) + +class Set(object): + """ + Represents a set in a forest of disjoint sets. + + Parameters + ========== + + key: Hashable python object + The key which uniquely identifies + the set. + data: Python object + The data to be stored in the set. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + + __slots__ = ['parent', 'size', 'key', 'data'] + + @classmethod + def methods(cls): + return ['__new__'] + + def __new__(cls, key, data=None, + **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = object.__new__(cls) + obj.key = key + obj.data = data + obj.parent, obj.size = [None]*2 + return obj + +class TrieNode(Node): + """ + Represents nodes in the trie data structure. + + Parameters + ========== + + char: The character stored in the current node. + Optional, by default None. + backend: pydatastructs.Backend + The backend to be used. + Optional, by default, the best available + backend is used. + """ + + __slots__ = ['char', '_children', 'is_terminal'] + + @classmethod + def methods(cls): + return ['__new__', 'add_child', 'get_child', 'remove_child'] + + def __new__(cls, char=None, **kwargs): + raise_if_backend_is_not_python( + cls, kwargs.get('backend', Backend.PYTHON)) + obj = Node.__new__(cls) + obj.char = char + obj._children = {} + obj.is_terminal = False + return obj + + def add_child(self, trie_node) -> None: + self._children[trie_node.char] = trie_node + + def get_child(self, char: str): + return self._children.get(char, None) + + def remove_child(self, char: str) -> None: + self._children.pop(char) + +def _comp(u, v, tcomp): + """ + Overloaded comparator for comparing + two values where any one of them can be + `None`. + """ + if u is None and v is not None: + return False + elif u is not None and v is None: + return True + elif u is None and v is None: + return False + else: + return tcomp(u, v) + +def _check_range_query_inputs(input, bounds): + start, end = input + if start >= end: + raise ValueError("Input (%d, %d) range is empty."%(start, end)) + if start < bounds[0] or end > bounds[1]: + raise IndexError("Input (%d, %d) range is out of " + "bounds of array indices (%d, %d)." + %(start, end, bounds[0], bounds[1])) + +def minimum(x_y): + if len(x_y) == 1: + return x_y[0] + + x, y = x_y + if x is None or y is None: + return x if y is None else y + + return min(x, y) + +def greatest_common_divisor(x_y): + if len(x_y) == 1: + return x_y[0] + + x, y = x_y + if x is None or y is None: + return x if y is None else y + + return math.gcd(x, y) + +def summation(x_y): + if len(x_y) == 1: + return x_y[0] + + x, y = x_y + if x is None or y is None: + return x if y is None else y + + return x + y diff --git a/lib/python3.12/site-packages/pydatastructs/utils/raises_util.py b/lib/python3.12/site-packages/pydatastructs/utils/raises_util.py new file mode 100644 index 000000000..3a324d38d --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/utils/raises_util.py @@ -0,0 +1,17 @@ +import pytest + +def raises(exception, code): + """ + Utility for testing exceptions. + + Parameters + ========== + + exception + A valid python exception + code: lambda + Code that causes exception + """ + with pytest.raises(exception): + code() + return True diff --git a/lib/python3.12/site-packages/pydatastructs/utils/testing_util.py b/lib/python3.12/site-packages/pydatastructs/utils/testing_util.py new file mode 100644 index 000000000..e5c0627b5 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/utils/testing_util.py @@ -0,0 +1,83 @@ +import os +import pathlib +import glob +import types + +__all__ = ['test'] + + +# Root pydatastructs directory +ROOT_DIR = pathlib.Path(os.path.abspath(__file__)).parents[1] + + +SKIP_FILES = ['testing_util.py'] + +def test(submodules=None, only_benchmarks=False, + benchmarks_size=1000, **kwargs): + """ + Runs the library tests using pytest + + Parameters + ========== + + submodules: Optional, list[str] + List of submodules test to run. By default runs + all the tests + """ + try: + import pytest + except ImportError: + raise Exception("pytest must be installed. Use `pip install pytest` " + "to install it.") + + # set benchmarks size + os.environ["PYDATASTRUCTS_BENCHMARK_SIZE"] = str(benchmarks_size) + test_files = [] + if submodules: + if not isinstance(submodules, (list, tuple)): + submodules = [submodules] + for path in glob.glob(f'{ROOT_DIR}/**/test_*.py', recursive=True): + skip_test = False + for skip in SKIP_FILES: + if skip in path: + skip_test = True + break + if skip_test: + continue + for sub_var in submodules: + if isinstance(sub_var, types.ModuleType): + sub = sub_var.__name__.split('.')[-1] + elif isinstance(sub_var, str): + sub = sub_var + else: + raise Exception("Submodule should be of type: str or module") + if sub in path: + if not only_benchmarks: + if 'benchmarks' not in path: + test_files.append(path) + else: + if 'benchmarks' in path: + test_files.append(path) + break + else: + for path in glob.glob(f'{ROOT_DIR}/**/test_*.py', recursive=True): + skip_test = False + for skip in SKIP_FILES: + if skip in path: + skip_test = True + break + if skip_test: + continue + if not only_benchmarks: + if 'benchmarks' not in path: + test_files.append(path) + else: + if 'benchmarks' in path: + test_files.append(path) + + extra_args = [] + if kwargs.get("n", False) is not False: + extra_args.append("-n") + extra_args.append(str(kwargs["n"])) + + pytest.main(extra_args + test_files) diff --git a/lib/python3.12/site-packages/pydatastructs/utils/tests/__init__.py b/lib/python3.12/site-packages/pydatastructs/utils/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/python3.12/site-packages/pydatastructs/utils/tests/test_code_quality.py b/lib/python3.12/site-packages/pydatastructs/utils/tests/test_code_quality.py new file mode 100644 index 000000000..67afe49e8 --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/utils/tests/test_code_quality.py @@ -0,0 +1,239 @@ +import os, re, sys, pydatastructs, inspect +from typing import Type +import pytest + +def _list_files(checker): + root_path = os.path.abspath( + os.path.join( + os.path.split(__file__)[0], + os.pardir, os.pardir)) + code_files = [] + for (dirpath, _, filenames) in os.walk(root_path): + for _file in filenames: + if checker(_file): + code_files.append(os.path.join(dirpath, _file)) + return code_files + +checker = lambda _file: (re.match(r".*\.py$", _file) or + re.match(r".*\.cpp$", _file) or + re.match(r".*\.hpp$", _file)) +code_files = _list_files(checker) + +def test_trailing_white_spaces(): + messages = [("The following places in your code " + "end with white spaces.")] + msg = "{}:{}" + for file_path in code_files: + file = open(file_path, "r") + line = file.readline() + line_number = 1 + while line != "": + if line.endswith(" \n") or line.endswith("\t\n") \ + or line.endswith(" ") or line.endswith("\t"): + messages.append(msg.format(file_path, line_number)) + line = file.readline() + line_number += 1 + file.close() + + if len(messages) > 1: + assert False, '\n'.join(messages) + +def test_final_new_lines(): + messages = [("The following files in your code " + "do not end with a single new line.")] + msg1 = "No new line in {}:{}" + msg2 = "More than one new line in {}:{}" + for file_path in code_files: + file = open(file_path, "r") + lines = [] + line = file.readline() + while line != "": + lines.append(line) + line = file.readline() + if lines: + if lines[-1][-1] != "\n": + messages.append(msg1.format(file_path, len(lines))) + if lines[-1] == "\n" and lines[-2][-1] == "\n": + messages.append(msg2.format(file_path, len(lines))) + file.close() + + if len(messages) > 1: + assert False, '\n'.join(messages) + +def test_comparison_True_False_None(): + messages = [("The following places in your code " + "use `!=` or `==` for comparing True/False/None." + "Please use `is` instead.")] + msg = "{}:{}" + checker = lambda _file: re.match(r".*\.py$", _file) + py_files = _list_files(checker) + for file_path in py_files: + if file_path.find("test_code_quality.py") == -1: + file = open(file_path, "r") + line = file.readline() + line_number = 1 + while line != "": + if ((line.find("== True") != -1) or + (line.find("== False") != -1) or + (line.find("== None") != -1) or + (line.find("!= True") != -1) or + (line.find("!= False") != -1) or + (line.find("!= None") != -1)): + messages.append(msg.format(file_path, line_number)) + line = file.readline() + line_number += 1 + file.close() + + if len(messages) > 1: + assert False, '\n'.join(messages) + +@pytest.mark.xfail +def test_reinterpret_cast(): + + def is_variable(str): + for ch in str: + if not (ch == '_' or ch.isalnum()): + return False + return True + + checker = lambda _file: (re.match(r".*\.cpp$", _file) or + re.match(r".*\.hpp$", _file)) + cpp_files = _list_files(checker) + messages = [("The following lines should use reinterpret_cast" + " to cast pointers from one type to another")] + msg = "Casting to {} at {}:{}" + for file_path in cpp_files: + file = open(file_path, "r") + line = file.readline() + line_number = 1 + while line != "": + found_open = False + between_open_close = "" + for char in line: + if char == '(': + found_open = True + elif char == ')': + if (between_open_close and + between_open_close[-1] == '*' and + is_variable(between_open_close[:-1])): + messages.append(msg.format(between_open_close[:-1], + file_path, line_number)) + between_open_close = "" + found_open = False + elif char != ' ' and found_open: + between_open_close += char + line = file.readline() + line_number += 1 + file.close() + + if len(messages) > 1: + assert False, '\n'.join(messages) + +def test_presence_of_tabs(): + messages = [("The following places in your code " + "use tabs instead of spaces.")] + msg = "{}:{}" + for file_path in code_files: + file = open(file_path, "r") + line_number = 1 + line = file.readline() + while line != "": + if (line.find('\t') != -1): + messages.append(msg.format(file_path, line_number)) + line = file.readline() + line_number += 1 + file.close() + + if len(messages) > 1: + assert False, '\n'.join(messages) + +def _apis(): + import pydatastructs as pyds + return [ + pyds.graphs.adjacency_list.AdjacencyList, + pyds.graphs.adjacency_matrix.AdjacencyMatrix, + pyds.DoublyLinkedList, pyds.SinglyLinkedList, + pyds.SinglyCircularLinkedList, + pyds.DoublyCircularLinkedList, + pyds.OneDimensionalArray, pyds.MultiDimensionalArray, + pyds.DynamicOneDimensionalArray, + pyds.trees.BinaryTree, pyds.BinarySearchTree, + pyds.AVLTree, pyds.SplayTree, pyds.BinaryTreeTraversal, + pyds.DHeap, pyds.BinaryHeap, pyds.TernaryHeap, pyds.BinomialHeap, + pyds.MAryTree, pyds.OneDimensionalSegmentTree, + pyds.Queue, pyds.miscellaneous_data_structures.queue.ArrayQueue, + pyds.miscellaneous_data_structures.queue.LinkedListQueue, + pyds.PriorityQueue, + pyds.miscellaneous_data_structures.queue.LinkedListPriorityQueue, + pyds.miscellaneous_data_structures.queue.BinaryHeapPriorityQueue, + pyds.miscellaneous_data_structures.queue.BinomialHeapPriorityQueue, + pyds.Stack, pyds.miscellaneous_data_structures.stack.ArrayStack, + pyds.miscellaneous_data_structures.stack.LinkedListStack, + pyds.DisjointSetForest, pyds.BinomialTree, pyds.TreeNode, pyds.MAryTreeNode, + pyds.LinkedListNode, pyds.BinomialTreeNode, pyds.AdjacencyListGraphNode, + pyds.AdjacencyMatrixGraphNode, pyds.GraphEdge, pyds.Set, pyds.BinaryIndexedTree, + pyds.CartesianTree, pyds.CartesianTreeNode, pyds.Treap, pyds.RedBlackTreeNode, pyds.RedBlackTree, + pyds.Trie, pyds.TrieNode, pyds.SkipList, pyds.RangeQueryStatic, pyds.RangeQueryDynamic, pyds.SparseTable, + pyds.miscellaneous_data_structures.segment_tree.OneDimensionalArraySegmentTree, + pyds.bubble_sort, pyds.linear_search, pyds.binary_search, pyds.jump_search, + pyds.selection_sort, pyds.insertion_sort, pyds.quick_sort, pyds.intro_sort] + +def test_public_api(): + pyds = pydatastructs + apis = _apis() + print("\n\nAPI Report") + print("==========") + for name in apis: + if inspect.isclass(name): + _class = name + mro = _class.__mro__ + must_methods = _class.methods() + print("\n" + str(name)) + print("Methods Implemented") + print(must_methods) + print("Parent Classes") + print(mro[1:]) + for supercls in mro: + if supercls != _class: + for method in must_methods: + if hasattr(supercls, method) and \ + getattr(supercls, method) == \ + getattr(_class, method): + assert False, ("%s class doesn't " + "have %s method implemented."%( + _class, method + )) + +def test_backend_argument_message(): + + import pydatastructs as pyds + backend_implemented = [ + pyds.OneDimensionalArray, + pyds.DynamicOneDimensionalArray, + pyds.quick_sort, + pyds.AdjacencyListGraphNode, + pyds.AdjacencyMatrixGraphNode, + pyds.GraphEdge + ] + + def call_and_raise(api, pos_args_count=0): + try: + if pos_args_count == 0: + api(backend=None) + elif pos_args_count == 1: + api(None, backend=None) + elif pos_args_count == 2: + api(None, None, backend=None) + except ValueError as value_error: + assert str(api) in value_error.args[0] + except TypeError as type_error: + max_pos_args_count = 2 + if pos_args_count <= max_pos_args_count: + call_and_raise(api, pos_args_count + 1) + else: + raise type_error + + apis = _apis() + for api in apis: + if api not in backend_implemented: + call_and_raise(api, 0) diff --git a/lib/python3.12/site-packages/pydatastructs/utils/tests/test_misc_util.py b/lib/python3.12/site-packages/pydatastructs/utils/tests/test_misc_util.py new file mode 100644 index 000000000..13ba2ec8e --- /dev/null +++ b/lib/python3.12/site-packages/pydatastructs/utils/tests/test_misc_util.py @@ -0,0 +1,84 @@ +from pydatastructs.utils import (TreeNode, AdjacencyListGraphNode, AdjacencyMatrixGraphNode, + GraphEdge, BinomialTreeNode, MAryTreeNode, CartesianTreeNode, RedBlackTreeNode, SkipNode) +from pydatastructs.utils.raises_util import raises +from pydatastructs.utils.misc_util import Backend + +def test_cpp_TreeNode(): + n = TreeNode(1,100,backend=Backend.CPP) + assert str(n) == "(None, 1, 100, None)" + +def test_AdjacencyListGraphNode(): + g_1 = AdjacencyListGraphNode('g_1', 1) + g_2 = AdjacencyListGraphNode('g_2', 2) + g = AdjacencyListGraphNode('g', 0, adjacency_list=[g_1, g_2]) + g.add_adjacent_node('g_3', 3) + assert g.g_1.name == 'g_1' + assert g.g_2.name == 'g_2' + assert g.g_3.name == 'g_3' + g.remove_adjacent_node('g_3') + assert hasattr(g, 'g_3') is False + assert raises(ValueError, lambda: g.remove_adjacent_node('g_3')) + g.add_adjacent_node('g_1', 4) + assert g.g_1.data == 4 + assert str(g) == "('g', 0)" + + h_1 = AdjacencyListGraphNode('h_1', 1, backend = Backend.CPP) + h_2 = AdjacencyListGraphNode('h_2', 2, backend = Backend.CPP) + assert str(h_1) == "('h_1', 1)" + h = AdjacencyListGraphNode('h', 0, adjacency_list = [h_1, h_2], backend = Backend.CPP) + h.add_adjacent_node('h_3', 3) + assert h.adjacent['h_1'].name == 'h_1' + assert h.adjacent['h_2'].name == 'h_2' + assert h.adjacent['h_3'].name == 'h_3' + h.remove_adjacent_node('h_3') + assert 'h_3' not in h.adjacent + assert raises(ValueError, lambda: h.remove_adjacent_node('h_3')) + h.add_adjacent_node('h_1', 4) + assert h.adjacent['h_1'] == 4 + assert str(h) == "('h', 0)" + h_5 = AdjacencyListGraphNode('h_5', h_1, backend = Backend.CPP) + assert h_5.data == h_1 + +def test_AdjacencyMatrixGraphNode(): + g = AdjacencyMatrixGraphNode("1", 3) + g2 = AdjacencyMatrixGraphNode("1", 3, backend = Backend.CPP) + assert str(g) == "('1', 3)" + assert str(g2) == "('1', 3)" + g3 = AdjacencyListGraphNode("3", g2, backend = Backend.CPP) + assert g3.data == g2 + + +def test_GraphEdge(): + g_1 = AdjacencyListGraphNode('g_1', 1) + g_2 = AdjacencyListGraphNode('g_2', 2) + e = GraphEdge(g_1, g_2, value=2) + assert str(e) == "('g_1', 'g_2')" + + h_1 = AdjacencyListGraphNode('h_1', 1, backend = Backend.CPP) + h_2 = AdjacencyListGraphNode('h_2', 2, backend = Backend.CPP) + e2 = GraphEdge(h_1, h_2, value = 2, backend = Backend.CPP) + assert str(e2) == "('h_1', 'h_2', 2)" + +def test_BinomialTreeNode(): + b = BinomialTreeNode(1,1) + b.add_children(*[BinomialTreeNode(i,i) for i in range(2,10)]) + assert str(b) == '(1, 1)' + assert str(b.children) == "['(2, 2)', '(3, 3)', '(4, 4)', '(5, 5)', '(6, 6)', '(7, 7)', '(8, 8)', '(9, 9)']" + +def test_MAryTreeNode(): + m = MAryTreeNode(1, 1) + m.add_children(*list(range(2, 10))) + assert str(m) == "(1, 1)" + assert str(m.children) == "['2', '3', '4', '5', '6', '7', '8', '9']" + +def test_CartesianTreeNode(): + c = CartesianTreeNode(1, 1, 1) + assert str(c) == "(None, 1, 1, 1, None)" + +def test_RedBlackTreeNode(): + c = RedBlackTreeNode(1, 1) + assert str(c) == "(None, 1, 1, None)" + +def test_SkipNode(): + c = SkipNode(1) + assert str(c) == '(1, None)' From 8d9e45a043169d108b6417c104ab7c62d070ada2 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Sat, 4 Oct 2025 20:24:24 +0530 Subject: [PATCH 29/47] bug fix --- .github/workflows/ci.yml | 19 +- .../site-packages/pydatastructs/__init__.py | 8 - .../pydatastructs/graphs/__init__.py | 28 - .../pydatastructs/graphs/_backend/__init__.py | 0 .../pydatastructs/graphs/adjacency_list.py | 101 - .../pydatastructs/graphs/adjacency_matrix.py | 100 - .../pydatastructs/graphs/algorithms.py | 1386 ------------ .../pydatastructs/graphs/graph.py | 163 -- .../pydatastructs/graphs/tests/__init__.py | 0 .../graphs/tests/test_adjacency_list.py | 83 - .../graphs/tests/test_adjacency_matrix.py | 53 - .../graphs/tests/test_algorithms.py | 596 ----- .../linear_data_structures/__init__.py | 53 - .../_backend/__init__.py | 0 .../linear_data_structures/algorithms.py | 2010 ----------------- .../linear_data_structures/arrays.py | 473 ---- .../linear_data_structures/linked_lists.py | 819 ------- .../linear_data_structures/tests/__init__.py | 0 .../tests/test_algorithms.py | 423 ---- .../tests/test_arrays.py | 157 -- .../tests/test_linked_lists.py | 193 -- .../miscellaneous_data_structures/__init__.py | 51 - .../_backend/__init__.py | 0 .../algorithms.py | 335 --- .../binomial_trees.py | 91 - .../disjoint_set.py | 143 -- .../miscellaneous_data_structures/multiset.py | 42 - .../miscellaneous_data_structures/queue.py | 498 ---- .../segment_tree.py | 225 -- .../sparse_table.py | 108 - .../miscellaneous_data_structures/stack.py | 200 -- .../tests/__init__.py | 0 .../tests/test_binomial_trees.py | 17 - .../tests/test_disjoint_set.py | 70 - .../tests/test_multiset.py | 39 - .../tests/test_queue.py | 116 - .../tests/test_range_query_dynamic.py | 71 - .../tests/test_range_query_static.py | 63 - .../tests/test_stack.py | 77 - .../pydatastructs/strings/__init__.py | 18 - .../pydatastructs/strings/algorithms.py | 247 -- .../pydatastructs/strings/tests/__init__.py | 0 .../strings/tests/test_algorithms.py | 76 - .../pydatastructs/strings/tests/test_trie.py | 49 - .../pydatastructs/strings/trie.py | 201 -- .../pydatastructs/trees/__init__.py | 40 - .../pydatastructs/trees/_backend/__init__.py | 0 .../pydatastructs/trees/binary_trees.py | 1888 ---------------- .../pydatastructs/trees/heaps.py | 582 ----- .../pydatastructs/trees/m_ary_trees.py | 172 -- .../trees/space_partitioning_trees.py | 242 -- .../pydatastructs/trees/tests/__init__.py | 0 .../trees/tests/test_binary_trees.py | 820 ------- .../pydatastructs/trees/tests/test_heaps.py | 236 -- .../trees/tests/test_m_ary_trees.py | 5 - .../tests/test_space_partitioning_tree.py | 20 - .../pydatastructs/utils/__init__.py | 29 - .../pydatastructs/utils/_backend/__init__.py | 0 .../pydatastructs/utils/misc_util.py | 632 ------ .../pydatastructs/utils/raises_util.py | 17 - .../pydatastructs/utils/testing_util.py | 83 - .../pydatastructs/utils/tests/__init__.py | 0 .../utils/tests/test_code_quality.py | 239 -- .../utils/tests/test_misc_util.py | 84 - .../site-packages/pydatastructs/__init__.py | 8 - .../pydatastructs/graphs/__init__.py | 28 - .../pydatastructs/graphs/_backend/__init__.py | 0 .../pydatastructs/graphs/adjacency_list.py | 101 - .../pydatastructs/graphs/adjacency_matrix.py | 100 - .../pydatastructs/graphs/algorithms.py | 1386 ------------ .../pydatastructs/graphs/graph.py | 163 -- .../pydatastructs/graphs/tests/__init__.py | 0 .../graphs/tests/test_adjacency_list.py | 83 - .../graphs/tests/test_adjacency_matrix.py | 53 - .../graphs/tests/test_algorithms.py | 596 ----- .../linear_data_structures/__init__.py | 53 - .../_backend/__init__.py | 0 .../linear_data_structures/algorithms.py | 2010 ----------------- .../linear_data_structures/arrays.py | 473 ---- .../linear_data_structures/linked_lists.py | 819 ------- .../linear_data_structures/tests/__init__.py | 0 .../tests/test_algorithms.py | 423 ---- .../tests/test_arrays.py | 157 -- .../tests/test_linked_lists.py | 193 -- .../miscellaneous_data_structures/__init__.py | 51 - .../_backend/__init__.py | 0 .../algorithms.py | 335 --- .../binomial_trees.py | 91 - .../disjoint_set.py | 143 -- .../miscellaneous_data_structures/multiset.py | 42 - .../miscellaneous_data_structures/queue.py | 498 ---- .../segment_tree.py | 225 -- .../sparse_table.py | 108 - .../miscellaneous_data_structures/stack.py | 200 -- .../tests/__init__.py | 0 .../tests/test_binomial_trees.py | 17 - .../tests/test_disjoint_set.py | 70 - .../tests/test_multiset.py | 39 - .../tests/test_queue.py | 116 - .../tests/test_range_query_dynamic.py | 71 - .../tests/test_range_query_static.py | 63 - .../tests/test_stack.py | 77 - .../pydatastructs/strings/__init__.py | 18 - .../pydatastructs/strings/algorithms.py | 247 -- .../pydatastructs/strings/tests/__init__.py | 0 .../strings/tests/test_algorithms.py | 76 - .../pydatastructs/strings/tests/test_trie.py | 49 - .../pydatastructs/strings/trie.py | 201 -- .../pydatastructs/trees/__init__.py | 40 - .../pydatastructs/trees/_backend/__init__.py | 0 .../pydatastructs/trees/binary_trees.py | 1888 ---------------- .../pydatastructs/trees/heaps.py | 582 ----- .../pydatastructs/trees/m_ary_trees.py | 172 -- .../trees/space_partitioning_trees.py | 242 -- .../pydatastructs/trees/tests/__init__.py | 0 .../trees/tests/test_binary_trees.py | 820 ------- .../pydatastructs/trees/tests/test_heaps.py | 236 -- .../trees/tests/test_m_ary_trees.py | 5 - .../tests/test_space_partitioning_tree.py | 20 - .../pydatastructs/utils/__init__.py | 29 - .../pydatastructs/utils/_backend/__init__.py | 0 .../pydatastructs/utils/misc_util.py | 632 ------ .../pydatastructs/utils/raises_util.py | 17 - .../pydatastructs/utils/testing_util.py | 83 - .../pydatastructs/utils/tests/__init__.py | 0 .../utils/tests/test_code_quality.py | 239 -- .../utils/tests/test_misc_util.py | 84 - pydatastructs/graphs/_backend/cpp/graph.cpp | 52 +- pydatastructs/graphs/algorithms.py | 6 +- pydatastructs/graphs/meson.build | 13 +- pydatastructs/graphs/tests/test_algorithms.py | 1 - .../_backend/cpp/AdjacencyListGraphNode.hpp | 45 - .../_backend/cpp/AdjacencyMatrixGraphNode.hpp | 40 - .../utils/_backend/cpp/GraphEdge.hpp | 40 - .../utils/_backend/cpp/GraphNode.hpp | 41 +- .../utils/_backend/cpp/graph_utils.cpp | 161 ++ pydatastructs/utils/meson.build | 12 +- pydatastructs/utils/misc_util.py | 12 +- 138 files changed, 221 insertions(+), 29165 deletions(-) delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/__init__.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/__init__.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/_backend/__init__.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_list.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_matrix.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/algorithms.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/graph.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/__init__.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_list.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_matrix.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_algorithms.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/__init__.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/_backend/__init__.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/algorithms.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/arrays.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/linked_lists.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/__init__.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_algorithms.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_arrays.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_linked_lists.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/__init__.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/_backend/__init__.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/algorithms.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/binomial_trees.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/disjoint_set.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/multiset.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/queue.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/segment_tree.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/sparse_table.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/stack.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/__init__.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_binomial_trees.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_disjoint_set.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_multiset.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_queue.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_dynamic.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_static.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_stack.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/__init__.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/algorithms.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/tests/__init__.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/tests/test_algorithms.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/tests/test_trie.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/trie.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/__init__.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/_backend/__init__.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/binary_trees.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/heaps.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/m_ary_trees.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/space_partitioning_trees.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/__init__.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_binary_trees.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_heaps.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_m_ary_trees.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_space_partitioning_tree.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/__init__.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/_backend/__init__.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/misc_util.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/raises_util.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/testing_util.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/tests/__init__.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/tests/test_code_quality.py delete mode 100644 build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/tests/test_misc_util.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/_backend/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/adjacency_list.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/adjacency_matrix.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/algorithms.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/graph.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/tests/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_list.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_matrix.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/graphs/tests/test_algorithms.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/_backend/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/algorithms.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/arrays.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/linked_lists.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_algorithms.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_arrays.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_linked_lists.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/_backend/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/algorithms.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/binomial_trees.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/disjoint_set.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/multiset.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/queue.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/segment_tree.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/sparse_table.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/stack.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_binomial_trees.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_disjoint_set.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_multiset.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_queue.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_dynamic.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_static.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_stack.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/strings/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/strings/algorithms.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/strings/tests/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/strings/tests/test_algorithms.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/strings/tests/test_trie.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/strings/trie.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/trees/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/trees/_backend/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/trees/binary_trees.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/trees/heaps.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/trees/m_ary_trees.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/trees/space_partitioning_trees.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/trees/tests/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/trees/tests/test_binary_trees.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/trees/tests/test_heaps.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/trees/tests/test_m_ary_trees.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/trees/tests/test_space_partitioning_tree.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/utils/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/utils/_backend/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/utils/misc_util.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/utils/raises_util.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/utils/testing_util.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/utils/tests/__init__.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/utils/tests/test_code_quality.py delete mode 100644 lib/python3.12/site-packages/pydatastructs/utils/tests/test_misc_util.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index acf6e6105..c1b995530 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -44,14 +44,11 @@ jobs: CXXFLAGS: "-std=c++17 --coverage" CFLAGS: "--coverage" run: | - meson setup build --prefix=$PWD - meson compile -C build - meson install -C build --skip-subprojects - + spin build -v # coverage tests - name: Run tests run: | - python -m pytest pydatastructs/ -v --ignore-glob="**/benchmarks/**" + spin test -v - name: Capture Coverage Data with lcov run: | @@ -110,13 +107,11 @@ jobs: env: CXXFLAGS: "-std=c++17" run: | - meson setup build --prefix=$PWD - meson compile -C build - meson install -C build --skip-subprojects + spin build -v - name: Run tests run: | - python -m pytest pydatastructs/ -v --ignore-glob="**/benchmarks/**" + spin test -v - name: Build Documentation run: | @@ -156,12 +151,10 @@ jobs: MACOSX_DEPLOYMENT_TARGET: 11.0 CXXFLAGS: "-std=c++17" run: | - meson setup build --prefix=$PWD - meson compile -C build - meson install -C build --skip-subprojects + spin build -v - name: Run tests run: | - python -m pytest pydatastructs/ -v --ignore-glob="**/benchmarks/**" + spin test -v - name: Build Documentation run: | diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/__init__.py deleted file mode 100644 index 27cc5a202..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .utils import * -from .linear_data_structures import * -from .trees import * -from .miscellaneous_data_structures import * -from .graphs import * -from .strings import * - -__version__ = "1.0.1-dev" diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/__init__.py deleted file mode 100644 index 21e0a5f35..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -__all__ = [] - -from . import graph -from .graph import ( - Graph -) -__all__.extend(graph.__all__) - -from . import algorithms -from . import adjacency_list -from . import adjacency_matrix - -from .algorithms import ( - breadth_first_search, - breadth_first_search_parallel, - minimum_spanning_tree, - minimum_spanning_tree_parallel, - strongly_connected_components, - depth_first_search, - shortest_paths, - all_pair_shortest_paths, - topological_sort, - topological_sort_parallel, - max_flow, - find_bridges -) - -__all__.extend(algorithms.__all__) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/_backend/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/_backend/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_list.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_list.py deleted file mode 100644 index bd901b380..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_list.py +++ /dev/null @@ -1,101 +0,0 @@ -from pydatastructs.graphs.graph import Graph -from pydatastructs.graphs._backend.cpp import _graph -from pydatastructs.utils.misc_util import ( - GraphEdge, Backend, raise_if_backend_is_not_python) - -__all__ = [ - 'AdjacencyList' -] - -class AdjacencyList(Graph): - """ - Adjacency list implementation of graphs. - - See also - ======== - - pydatastructs.graphs.graph.Graph - """ - def __new__(cls, *vertices, **kwargs): - - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.PYTHON: - obj = object.__new__(cls) - for vertex in vertices: - obj.__setattr__(vertex.name, vertex) - obj.vertices = [vertex.name for vertex in vertices] - obj.edge_weights = {} - obj._impl = 'adjacency_list' - return obj - else: - graph = _graph.AdjacencyListGraph() - for vertice in vertices: - graph.add_vertex(vertice) - return graph - - @classmethod - def methods(self): - return ['is_adjacent', 'neighbors', - 'add_vertex', 'remove_vertex', 'add_edge', - 'get_edge', 'remove_edge', '__new__'] - - def is_adjacent(self, node1, node2): - node1 = self.__getattribute__(node1) - return hasattr(node1, node2) - - def num_vertices(self): - return len(self.vertices) - - def num_edges(self): - return sum(len(self.neighbors(v)) for v in self.vertices) - - def neighbors(self, node): - node = self.__getattribute__(node) - return [self.__getattribute__(name) for name in node.adjacent] - - def add_vertex(self, node): - if not hasattr(self, node.name): - self.vertices.append(node.name) - self.__setattr__(node.name, node) - - def remove_vertex(self, name): - delattr(self, name) - self.vertices.remove(name) - for node in self.vertices: - node_obj = self.__getattribute__(node) - if hasattr(node_obj, name): - delattr(node_obj, name) - node_obj.adjacent.remove(name) - - def add_edge(self, source, target, cost=None): - source, target = str(source), str(target) - error_msg = ("Vertex %s is not present in the graph." - "Call Graph.add_vertex to add a new" - "vertex. Graph.add_edge is only responsible" - "for adding edges and it will not add new" - "vertices on its own. This is done to maintain" - "clear separation between the functionality of" - "these two methods.") - if not hasattr(self, source): - raise ValueError(error_msg % (source)) - if not hasattr(self, target): - raise ValueError(error_msg % (target)) - - source, target = self.__getattribute__(source), \ - self.__getattribute__(target) - source.add_adjacent_node(target.name) - if cost is not None: - self.edge_weights[source.name + "_" + target.name] = \ - GraphEdge(source, target, cost) - - def get_edge(self, source, target): - return self.edge_weights.get( - source + "_" + target, - None) - - def remove_edge(self, source, target): - source, target = self.__getattribute__(source), \ - self.__getattribute__(target) - source.remove_adjacent_node(target.name) - self.edge_weights.pop(source.name + "_" + target.name, - None) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_matrix.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_matrix.py deleted file mode 100644 index 9c2326b86..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_matrix.py +++ /dev/null @@ -1,100 +0,0 @@ -from pydatastructs.graphs.graph import Graph -from pydatastructs.graphs._backend.cpp import _graph -from pydatastructs.utils.misc_util import ( - GraphEdge, raise_if_backend_is_not_python, - Backend) - -__all__ = [ - 'AdjacencyMatrix' -] - -class AdjacencyMatrix(Graph): - """ - Adjacency matrix implementation of graphs. - - See also - ======== - - pydatastructs.graphs.graph.Graph - """ - def __new__(cls, *vertices, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.PYTHON: - obj = object.__new__(cls) - obj.vertices = [vertex.name for vertex in vertices] - for vertex in vertices: - obj.__setattr__(vertex.name, vertex) - obj.matrix = {} - for vertex in vertices: - obj.matrix[vertex.name] = {} - obj.edge_weights = {} - obj._impl = 'adjacency_matrix' - return obj - else: - return _graph.AdjacencyMatrixGraph(vertices) - - @classmethod - def methods(self): - return ['is_adjacent', 'neighbors', - 'add_edge', 'get_edge', 'remove_edge', - '__new__'] - - def is_adjacent(self, node1, node2): - node1, node2 = str(node1), str(node2) - row = self.matrix.get(node1, {}) - return row.get(node2, False) is not False - - def num_vertices(self): - return len(self.vertices) - - def num_edges(self): - return sum(len(v) for v in self.matrix.values()) - - def neighbors(self, node): - node = str(node) - neighbors = [] - row = self.matrix.get(node, {}) - for node, presence in row.items(): - if presence: - neighbors.append(self.__getattribute__( - str(node))) - return neighbors - - def add_vertex(self, node): - raise NotImplementedError("Currently we allow " - "adjacency matrix for static graphs only") - - def remove_vertex(self, node): - raise NotImplementedError("Currently we allow " - "adjacency matrix for static graphs only.") - - def add_edge(self, source, target, cost=None): - source, target = str(source), str(target) - error_msg = ("Vertex %s is not present in the graph." - "Call Graph.add_vertex to add a new" - "vertex. Graph.add_edge is only responsible" - "for adding edges and it will not add new" - "vertices on its own. This is done to maintain" - "clear separation between the functionality of" - "these two methods.") - if source not in self.matrix: - raise ValueError(error_msg % (source)) - if target not in self.matrix: - raise ValueError(error_msg % (target)) - - self.matrix[source][target] = True - if cost is not None: - self.edge_weights[source + "_" + target] = \ - GraphEdge(self.__getattribute__(source), - self.__getattribute__(target), - cost) - - def get_edge(self, source, target): - return self.edge_weights.get( - str(source) + "_" + str(target), - None) - - def remove_edge(self, source, target): - source, target = str(source), str(target) - self.matrix[source][target] = False - self.edge_weights.pop(str(source) + "_" + str(target), None) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/algorithms.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/algorithms.py deleted file mode 100644 index 9324b7278..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/algorithms.py +++ /dev/null @@ -1,1386 +0,0 @@ -""" -Contains algorithms associated with graph -data structure. -""" -from collections import deque -from concurrent.futures import ThreadPoolExecutor -from pydatastructs.utils.misc_util import ( - _comp, raise_if_backend_is_not_python, Backend, AdjacencyListGraphNode) -from pydatastructs.miscellaneous_data_structures import ( - DisjointSetForest, PriorityQueue) -from pydatastructs.graphs.graph import Graph -from pydatastructs.linear_data_structures.algorithms import merge_sort_parallel -from pydatastructs import PriorityQueue - -__all__ = [ - 'breadth_first_search', - 'breadth_first_search_parallel', - 'minimum_spanning_tree', - 'minimum_spanning_tree_parallel', - 'strongly_connected_components', - 'depth_first_search', - 'shortest_paths', - 'all_pair_shortest_paths', - 'topological_sort', - 'topological_sort_parallel', - 'max_flow', - 'find_bridges' -] - -Stack = Queue = deque - -def breadth_first_search( - graph, source_node, operation, *args, **kwargs): - """ - Implementation of serial breadth first search(BFS) - algorithm. - - Parameters - ========== - - graph: Graph - The graph on which BFS is to be performed. - source_node: str - The name of the source node from where the BFS is - to be initiated. - operation: function - The function which is to be applied - on every node when it is visited. - The prototype which is to be followed is, - `function_name(curr_node, next_node, - arg_1, arg_2, . . ., arg_n)`. - Here, the first two arguments denote, the - current node and the node next to current node. - The rest of the arguments are optional and you can - provide your own stuff there. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Note - ==== - - You should pass all the arguments which you are going - to use in the prototype of your `operation` after - passing the operation function. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode - >>> V1 = AdjacencyListGraphNode("V1") - >>> V2 = AdjacencyListGraphNode("V2") - >>> V3 = AdjacencyListGraphNode("V3") - >>> G = Graph(V1, V2, V3) - >>> from pydatastructs import breadth_first_search - >>> def f(curr_node, next_node, dest_node): - ... return curr_node != dest_node - ... - >>> G.add_edge(V1.name, V2.name) - >>> G.add_edge(V2.name, V3.name) - >>> breadth_first_search(G, V1.name, f, V3.name) - """ - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.PYTHON: - import pydatastructs.graphs.algorithms as algorithms - func = "_breadth_first_search_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently breadth first search isn't implemented for " - "%s graphs."%(graph._impl)) - return getattr(algorithms, func)( - graph, source_node, operation, *args, **kwargs) - else: - from pydatastructs.graphs._backend.cpp._algorithms import bfs_adjacency_list, bfs_adjacency_matrix - if (graph._impl == "adjacency_list"): - extra_args = args if args else () - return bfs_adjacency_list(graph, source_node, operation, extra_args) - if (graph._impl == "adjacency_matrix"): - extra_args = args if args else () - return bfs_adjacency_matrix(graph, source_node, operation, extra_args) - -def _breadth_first_search_adjacency_list( - graph, source_node, operation, *args, **kwargs): - bfs_queue = Queue() - visited = {} - bfs_queue.append(source_node) - visited[source_node] = True - while len(bfs_queue) != 0: - curr_node = bfs_queue.popleft() - next_nodes = graph.neighbors(curr_node) - if len(next_nodes) != 0: - for next_node in next_nodes: - if visited.get(next_node.name, False) is False: - status = operation(curr_node, next_node.name, *args, **kwargs) - if not status: - return None - bfs_queue.append(next_node.name) - visited[next_node.name] = True - else: - status = operation(curr_node, "", *args, **kwargs) - if not status: - return None - -_breadth_first_search_adjacency_matrix = _breadth_first_search_adjacency_list - -def breadth_first_search_parallel( - graph, source_node, num_threads, operation, *args, **kwargs): - """ - Parallel implementation of breadth first search on graphs. - - Parameters - ========== - - graph: Graph - The graph on which BFS is to be performed. - source_node: str - The name of the source node from where the BFS is - to be initiated. - num_threads: int - Number of threads to be used for computation. - operation: function - The function which is to be applied - on every node when it is visited. - The prototype which is to be followed is, - `function_name(curr_node, next_node, - arg_1, arg_2, . . ., arg_n)`. - Here, the first two arguments denote, the - current node and the node next to current node. - The rest of the arguments are optional and you can - provide your own stuff there. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Note - ==== - - You should pass all the arguments which you are going - to use in the prototype of your `operation` after - passing the operation function. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode - >>> V1 = AdjacencyListGraphNode("V1") - >>> V2 = AdjacencyListGraphNode("V2") - >>> V3 = AdjacencyListGraphNode("V3") - >>> G = Graph(V1, V2, V3) - >>> from pydatastructs import breadth_first_search_parallel - >>> def f(curr_node, next_node, dest_node): - ... return curr_node != dest_node - ... - >>> G.add_edge(V1.name, V2.name) - >>> G.add_edge(V2.name, V3.name) - >>> breadth_first_search_parallel(G, V1.name, 3, f, V3.name) - """ - raise_if_backend_is_not_python( - breadth_first_search_parallel, kwargs.get('backend', Backend.PYTHON)) - import pydatastructs.graphs.algorithms as algorithms - func = "_breadth_first_search_parallel_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently breadth first search isn't implemented for " - "%s graphs."%(graph._impl)) - return getattr(algorithms, func)( - graph, source_node, num_threads, operation, *args, **kwargs) - -def _generate_layer(**kwargs): - _args, _kwargs = kwargs.get('args'), kwargs.get('kwargs') - (graph, curr_node, next_layer, visited, operation) = _args[0:5] - op_args, op_kwargs = _args[5:], _kwargs - next_nodes = graph.neighbors(curr_node) - status = True - if len(next_nodes) != 0: - for next_node in next_nodes: - if visited.get(next_node, False) is False: - status = status and operation(curr_node, next_node.name, *op_args, **op_kwargs) - next_layer.add(next_node.name) - visited[next_node.name] = True - else: - status = status and operation(curr_node, "", *op_args, **op_kwargs) - return status - -def _breadth_first_search_parallel_adjacency_list( - graph, source_node, num_threads, operation, *args, **kwargs): - visited, layers = {}, {} - layers[0] = set() - layers[0].add(source_node) - visited[source_node] = True - layer = 0 - while len(layers[layer]) != 0: - layers[layer+1] = set() - with ThreadPoolExecutor(max_workers=num_threads) as Executor: - for node in layers[layer]: - status = Executor.submit( - _generate_layer, args= - (graph, node, layers[layer+1], visited, - operation, *args), kwargs=kwargs).result() - layer += 1 - if not status: - return None - -_breadth_first_search_parallel_adjacency_matrix = _breadth_first_search_parallel_adjacency_list - -def _generate_mst_object(graph): - mst = Graph(*[getattr(graph, str(v)) for v in graph.vertices]) - return mst - -def _sort_edges(graph, num_threads=None): - edges = list(graph.edge_weights.items()) - if num_threads is None: - sort_key = lambda item: item[1].value - return sorted(edges, key=sort_key) - - merge_sort_parallel(edges, num_threads, - comp=lambda u,v: u[1].value <= v[1].value) - return edges - -def _minimum_spanning_tree_kruskal_adjacency_list(graph): - mst = _generate_mst_object(graph) - dsf = DisjointSetForest() - for v in graph.vertices: - dsf.make_set(v) - for _, edge in _sort_edges(graph): - u, v = edge.source.name, edge.target.name - if dsf.find_root(u) is not dsf.find_root(v): - mst.add_edge(u, v, edge.value) - mst.add_edge(v, u, edge.value) - dsf.union(u, v) - return mst - -_minimum_spanning_tree_kruskal_adjacency_matrix = \ - _minimum_spanning_tree_kruskal_adjacency_list - -def _minimum_spanning_tree_prim_adjacency_list(graph): - q = PriorityQueue(implementation='binomial_heap') - e = {} - mst = Graph(implementation='adjacency_list') - q.push(next(iter(graph.vertices)), 0) - while not q.is_empty: - v = q.pop() - if not hasattr(mst, v): - mst.add_vertex(graph.__getattribute__(v)) - if e.get(v, None) is not None: - edge = e[v] - mst.add_vertex(edge.target) - mst.add_edge(edge.source.name, edge.target.name, edge.value) - mst.add_edge(edge.target.name, edge.source.name, edge.value) - for w_node in graph.neighbors(v): - w = w_node.name - vw = graph.edge_weights[v + '_' + w] - q.push(w, vw.value) - if e.get(w, None) is None or \ - e[w].value > vw.value: - e[w] = vw - return mst - -def minimum_spanning_tree(graph, algorithm, **kwargs): - """ - Computes a minimum spanning tree for the given - graph and algorithm. - - Parameters - ========== - - graph: Graph - The graph whose minimum spanning tree - has to be computed. - algorithm: str - The algorithm which should be used for - computing a minimum spanning tree. - Currently the following algorithms are - supported, - - 'kruskal' -> Kruskal's algorithm as given in [1]. - - 'prim' -> Prim's algorithm as given in [2]. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - mst: Graph - A minimum spanning tree using the implementation - same as the graph provided in the input. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode - >>> from pydatastructs import minimum_spanning_tree - >>> u = AdjacencyListGraphNode('u') - >>> v = AdjacencyListGraphNode('v') - >>> G = Graph(u, v) - >>> G.add_edge(u.name, v.name, 3) - >>> mst = minimum_spanning_tree(G, 'kruskal') - >>> u_n = mst.neighbors(u.name) - >>> mst.get_edge(u.name, u_n[0].name).value - 3 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Kruskal%27s_algorithm - .. [2] https://en.wikipedia.org/wiki/Prim%27s_algorithm - - Note - ==== - - The concept of minimum spanning tree is valid only for - connected and undirected graphs. So, this function - should be used only for such graphs. Using with other - types of graphs may lead to unwanted results. - """ - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.PYTHON: - import pydatastructs.graphs.algorithms as algorithms - func = "_minimum_spanning_tree_" + algorithm + "_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently %s algoithm for %s implementation of graphs " - "isn't implemented for finding minimum spanning trees." - %(algorithm, graph._impl)) - return getattr(algorithms, func)(graph) - else: - from pydatastructs.graphs._backend.cpp._algorithms import minimum_spanning_tree_prim_adjacency_list - if graph._impl == "adjacency_list" and algorithm == 'prim': - return minimum_spanning_tree_prim_adjacency_list(graph) - -def _minimum_spanning_tree_parallel_kruskal_adjacency_list(graph, num_threads): - mst = _generate_mst_object(graph) - dsf = DisjointSetForest() - for v in graph.vertices: - dsf.make_set(v) - edges = _sort_edges(graph, num_threads) - for _, edge in edges: - u, v = edge.source.name, edge.target.name - if dsf.find_root(u) is not dsf.find_root(v): - mst.add_edge(u, v, edge.value) - mst.add_edge(v, u, edge.value) - dsf.union(u, v) - return mst - -_minimum_spanning_tree_parallel_kruskal_adjacency_matrix = \ - _minimum_spanning_tree_parallel_kruskal_adjacency_list - -def _find_min(q, v, i): - if not q.is_empty: - v[i] = q.peek - else: - v[i] = None - -def _minimum_spanning_tree_parallel_prim_adjacency_list(graph, num_threads): - q = [PriorityQueue(implementation='binomial_heap') for _ in range(num_threads)] - e = [{} for _ in range(num_threads)] - v2q = {} - mst = Graph(implementation='adjacency_list') - - itr = iter(graph.vertices) - for i in range(len(graph.vertices)): - v2q[next(itr)] = i%len(q) - q[0].push(next(iter(graph.vertices)), 0) - - while True: - - _vs = [None for _ in range(num_threads)] - with ThreadPoolExecutor(max_workers=num_threads) as Executor: - for i in range(num_threads): - Executor.submit(_find_min, q[i], _vs, i).result() - v = None - - for i in range(num_threads): - if _comp(_vs[i], v, lambda u, v: u.key < v.key): - v = _vs[i] - if v is None: - break - v = v.data - idx = v2q[v] - q[idx].pop() - - if not hasattr(mst, v): - mst.add_vertex(graph.__getattribute__(v)) - if e[idx].get(v, None) is not None: - edge = e[idx][v] - mst.add_vertex(edge.target) - mst.add_edge(edge.source.name, edge.target.name, edge.value) - mst.add_edge(edge.target.name, edge.source.name, edge.value) - for w_node in graph.neighbors(v): - w = w_node.name - vw = graph.edge_weights[v + '_' + w] - j = v2q[w] - q[j].push(w, vw.value) - if e[j].get(w, None) is None or \ - e[j][w].value > vw.value: - e[j][w] = vw - - return mst - -def minimum_spanning_tree_parallel(graph, algorithm, num_threads, **kwargs): - """ - Computes a minimum spanning tree for the given - graph and algorithm using the given number of threads. - - Parameters - ========== - - graph: Graph - The graph whose minimum spanning tree - has to be computed. - algorithm: str - The algorithm which should be used for - computing a minimum spanning tree. - Currently the following algorithms are - supported, - - 'kruskal' -> Kruskal's algorithm as given in [1]. - - 'prim' -> Prim's algorithm as given in [2]. - num_threads: int - The number of threads to be used. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - mst: Graph - A minimum spanning tree using the implementation - same as the graph provided in the input. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode - >>> from pydatastructs import minimum_spanning_tree_parallel - >>> u = AdjacencyListGraphNode('u') - >>> v = AdjacencyListGraphNode('v') - >>> G = Graph(u, v) - >>> G.add_edge(u.name, v.name, 3) - >>> mst = minimum_spanning_tree_parallel(G, 'kruskal', 3) - >>> u_n = mst.neighbors(u.name) - >>> mst.get_edge(u.name, u_n[0].name).value - 3 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Kruskal%27s_algorithm#Parallel_algorithm - .. [2] https://en.wikipedia.org/wiki/Prim%27s_algorithm#Parallel_algorithm - - Note - ==== - - The concept of minimum spanning tree is valid only for - connected and undirected graphs. So, this function - should be used only for such graphs. Using with other - types of graphs will lead to unwanted results. - """ - raise_if_backend_is_not_python( - minimum_spanning_tree_parallel, kwargs.get('backend', Backend.PYTHON)) - import pydatastructs.graphs.algorithms as algorithms - func = "_minimum_spanning_tree_parallel_" + algorithm + "_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently %s algoithm for %s implementation of graphs " - "isn't implemented for finding minimum spanning trees." - %(algorithm, graph._impl)) - return getattr(algorithms, func)(graph, num_threads) - -def _visit(graph, vertex, visited, incoming, L): - stack = [vertex] - while stack: - top = stack[-1] - if not visited.get(top, False): - visited[top] = True - for node in graph.neighbors(top): - if incoming.get(node.name, None) is None: - incoming[node.name] = [] - incoming[node.name].append(top) - if not visited.get(node.name, False): - stack.append(node.name) - if top is stack[-1]: - L.append(stack.pop()) - -def _assign(graph, u, incoming, assigned, component): - stack = [u] - while stack: - top = stack[-1] - if not assigned.get(top, False): - assigned[top] = True - component.add(top) - for u in incoming[top]: - if not assigned.get(u, False): - stack.append(u) - if top is stack[-1]: - stack.pop() - -def _strongly_connected_components_kosaraju_adjacency_list(graph): - visited, incoming, L = {}, {}, [] - for u in graph.vertices: - if not visited.get(u, False): - _visit(graph, u, visited, incoming, L) - - assigned = {} - components = [] - for i in range(-1, -len(L) - 1, -1): - comp = set() - if not assigned.get(L[i], False): - _assign(graph, L[i], incoming, assigned, comp) - if comp: - components.append(comp) - - return components - -_strongly_connected_components_kosaraju_adjacency_matrix = \ - _strongly_connected_components_kosaraju_adjacency_list - -def _tarjan_dfs(u, graph, index, stack, indices, low_links, on_stacks, components): - indices[u] = index[0] - low_links[u] = index[0] - index[0] += 1 - stack.append(u) - on_stacks[u] = True - - for node in graph.neighbors(u): - v = node.name - if indices[v] == -1: - _tarjan_dfs(v, graph, index, stack, indices, low_links, on_stacks, components) - low_links[u] = min(low_links[u], low_links[v]) - elif on_stacks[v]: - low_links[u] = min(low_links[u], low_links[v]) - - if low_links[u] == indices[u]: - component = set() - while stack: - w = stack.pop() - on_stacks[w] = False - component.add(w) - if w == u: - break - components.append(component) - -def _strongly_connected_components_tarjan_adjacency_list(graph): - index = [0] # mutable object - stack = Stack([]) - indices, low_links, on_stacks = {}, {}, {} - - for u in graph.vertices: - indices[u] = -1 - low_links[u] = -1 - on_stacks[u] = False - - components = [] - - for u in graph.vertices: - if indices[u] == -1: - _tarjan_dfs(u, graph, index, stack, indices, low_links, on_stacks, components) - - return components - -_strongly_connected_components_tarjan_adjacency_matrix = \ - _strongly_connected_components_tarjan_adjacency_list - -def strongly_connected_components(graph, algorithm, **kwargs): - """ - Computes strongly connected components for the given - graph and algorithm. - - Parameters - ========== - - graph: Graph - The graph whose minimum spanning tree - has to be computed. - algorithm: str - The algorithm which should be used for - computing strongly connected components. - Currently the following algorithms are - supported, - - 'kosaraju' -> Kosaraju's algorithm as given in [1]. - 'tarjan' -> Tarjan's algorithm as given in [2]. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - components: list - Python list with each element as set of vertices. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode - >>> from pydatastructs import strongly_connected_components - >>> v1, v2, v3 = [AdjacencyListGraphNode(i) for i in range(3)] - >>> g = Graph(v1, v2, v3) - >>> g.add_edge(v1.name, v2.name) - >>> g.add_edge(v2.name, v3.name) - >>> g.add_edge(v3.name, v1.name) - >>> scc = strongly_connected_components(g, 'kosaraju') - >>> scc == [{'2', '0', '1'}] - True - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Kosaraju%27s_algorithm - .. [2] https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm - - """ - raise_if_backend_is_not_python( - strongly_connected_components, kwargs.get('backend', Backend.PYTHON)) - import pydatastructs.graphs.algorithms as algorithms - func = "_strongly_connected_components_" + algorithm + "_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently %s algoithm for %s implementation of graphs " - "isn't implemented for finding strongly connected components." - %(algorithm, graph._impl)) - return getattr(algorithms, func)(graph) - -def depth_first_search( - graph, source_node, operation, *args, **kwargs): - """ - Implementation of depth first search (DFS) - algorithm. - - Parameters - ========== - - graph: Graph - The graph on which DFS is to be performed. - source_node: str - The name of the source node from where the DFS is - to be initiated. - operation: function - The function which is to be applied - on every node when it is visited. - The prototype which is to be followed is, - `function_name(curr_node, next_node, - arg_1, arg_2, . . ., arg_n)`. - Here, the first two arguments denote, the - current node and the node next to current node. - The rest of the arguments are optional and you can - provide your own stuff there. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Note - ==== - - You should pass all the arguments which you are going - to use in the prototype of your `operation` after - passing the operation function. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode - >>> V1 = AdjacencyListGraphNode("V1") - >>> V2 = AdjacencyListGraphNode("V2") - >>> V3 = AdjacencyListGraphNode("V3") - >>> G = Graph(V1, V2, V3) - >>> from pydatastructs import depth_first_search - >>> def f(curr_node, next_node, dest_node): - ... return curr_node != dest_node - ... - >>> G.add_edge(V1.name, V2.name) - >>> G.add_edge(V2.name, V3.name) - >>> depth_first_search(G, V1.name, f, V3.name) - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Depth-first_search - """ - raise_if_backend_is_not_python( - depth_first_search, kwargs.get('backend', Backend.PYTHON)) - import pydatastructs.graphs.algorithms as algorithms - func = "_depth_first_search_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently depth first search isn't implemented for " - "%s graphs."%(graph._impl)) - return getattr(algorithms, func)( - graph, source_node, operation, *args, **kwargs) - -def _depth_first_search_adjacency_list( - graph, source_node, operation, *args, **kwargs): - dfs_stack = Stack() - visited = {} - dfs_stack.append(source_node) - visited[source_node] = True - while len(dfs_stack) != 0: - curr_node = dfs_stack.pop() - next_nodes = graph.neighbors(curr_node) - if len(next_nodes) != 0: - for next_node in next_nodes: - if next_node.name not in visited: - status = operation(curr_node, next_node.name, *args, **kwargs) - if not status: - return None - dfs_stack.append(next_node.name) - visited[next_node.name] = True - else: - status = operation(curr_node, "", *args, **kwargs) - if not status: - return None - -_depth_first_search_adjacency_matrix = _depth_first_search_adjacency_list - -def shortest_paths(graph: Graph, algorithm: str, - source: str, target: str="", - **kwargs) -> tuple: - """ - Finds shortest paths in the given graph from a given source. - - Parameters - ========== - - graph: Graph - The graph under consideration. - algorithm: str - The algorithm to be used. Currently, the following algorithms - are implemented, - - 'bellman_ford' -> Bellman-Ford algorithm as given in [1] - - 'dijkstra' -> Dijkstra algorithm as given in [2]. - source: str - The name of the source the node. - target: str - The name of the target node. - Optional, by default, all pair shortest paths - are returned. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - (distances, predecessors): (dict, dict) - If target is not provided and algorithm used - is 'bellman_ford'/'dijkstra'. - (distances[target], predecessors): (float, dict) - If target is provided and algorithm used is - 'bellman_ford'/'dijkstra'. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode - >>> from pydatastructs import shortest_paths - >>> V1 = AdjacencyListGraphNode("V1") - >>> V2 = AdjacencyListGraphNode("V2") - >>> V3 = AdjacencyListGraphNode("V3") - >>> G = Graph(V1, V2, V3) - >>> G.add_edge('V2', 'V3', 10) - >>> G.add_edge('V1', 'V2', 11) - >>> shortest_paths(G, 'bellman_ford', 'V1') - ({'V1': 0, 'V2': 11, 'V3': 21}, {'V1': None, 'V2': 'V1', 'V3': 'V2'}) - >>> shortest_paths(G, 'dijkstra', 'V1') - ({'V2': 11, 'V3': 21, 'V1': 0}, {'V1': None, 'V2': 'V1', 'V3': 'V2'}) - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Bellman%E2%80%93Ford_algorithm - .. [2] https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm - """ - backend = kwargs.get('backend', Backend.PYTHON) - if (backend == Backend.PYTHON): - import pydatastructs.graphs.algorithms as algorithms - func = "_" + algorithm + "_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently %s algorithm isn't implemented for " - "finding shortest paths in graphs."%(algorithm)) - return getattr(algorithms, func)(graph, source, target) - else: - from pydatastructs.graphs._backend.cpp._algorithms import shortest_paths_dijkstra_adjacency_list - if graph._impl == "adjacency_list" and algorithm == 'dijkstra': - return shortest_paths_dijkstra_adjacency_list(graph, source, target) - -def _bellman_ford_adjacency_list(graph: Graph, source: str, target: str) -> tuple: - distances, predecessor, visited, cnts = {}, {}, {}, {} - - for v in graph.vertices: - distances[v] = float('inf') - predecessor[v] = None - visited[v] = False - cnts[v] = 0 - distances[source] = 0 - verticy_num = len(graph.vertices) - - que = Queue([source]) - - while que: - u = que.popleft() - visited[u] = False - neighbors = graph.neighbors(u) - for neighbor in neighbors: - v = neighbor.name - edge_str = u + '_' + v - if distances[u] != float('inf') and distances[u] + graph.edge_weights[edge_str].value < distances[v]: - distances[v] = distances[u] + graph.edge_weights[edge_str].value - predecessor[v] = u - cnts[v] = cnts[u] + 1 - if cnts[v] >= verticy_num: - raise ValueError("Graph contains a negative weight cycle.") - if not visited[v]: - que.append(v) - visited[v] = True - - if target != "": - return (distances[target], predecessor) - return (distances, predecessor) - -_bellman_ford_adjacency_matrix = _bellman_ford_adjacency_list - -def _dijkstra_adjacency_list(graph: Graph, start: str, target: str): - V = len(graph.vertices) - visited, dist, pred = {}, {}, {} - for v in graph.vertices: - visited[v] = False - pred[v] = None - if v != start: - dist[v] = float('inf') - dist[start] = 0 - pq = PriorityQueue(implementation='binomial_heap') - for vertex in dist: - pq.push(vertex, dist[vertex]) - for _ in range(V): - u = pq.pop() - visited[u] = True - for v in graph.vertices: - edge_str = u + '_' + v - if (edge_str in graph.edge_weights and graph.edge_weights[edge_str].value >= 0 and - visited[v] is False and dist[v] > dist[u] + graph.edge_weights[edge_str].value): - dist[v] = dist[u] + graph.edge_weights[edge_str].value - pred[v] = u - pq.push(v, dist[v]) - - if target != "": - return (dist[target], pred) - return dist, pred - -_dijkstra_adjacency_matrix = _dijkstra_adjacency_list - -def all_pair_shortest_paths(graph: Graph, algorithm: str, - **kwargs) -> tuple: - """ - Finds shortest paths between all pairs of vertices in the given graph. - - Parameters - ========== - - graph: Graph - The graph under consideration. - algorithm: str - The algorithm to be used. Currently, the following algorithms - are implemented, - - 'floyd_warshall' -> Floyd Warshall algorithm as given in [1]. - 'johnson' -> Johnson's Algorithm as given in [2] - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - (distances, predecessors): (dict, dict) - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode - >>> from pydatastructs import all_pair_shortest_paths - >>> V1 = AdjacencyListGraphNode("V1") - >>> V2 = AdjacencyListGraphNode("V2") - >>> V3 = AdjacencyListGraphNode("V3") - >>> G = Graph(V1, V2, V3) - >>> G.add_edge('V2', 'V3', 10) - >>> G.add_edge('V1', 'V2', 11) - >>> G.add_edge('V3', 'V1', 5) - >>> dist, _ = all_pair_shortest_paths(G, 'floyd_warshall') - >>> dist['V1']['V3'] - 21 - >>> dist['V3']['V1'] - 5 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm - .. [2] https://en.wikipedia.org/wiki/Johnson's_algorithm - """ - raise_if_backend_is_not_python( - all_pair_shortest_paths, kwargs.get('backend', Backend.PYTHON)) - import pydatastructs.graphs.algorithms as algorithms - func = "_" + algorithm + "_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently %s algorithm isn't implemented for " - "finding shortest paths in graphs."%(algorithm)) - return getattr(algorithms, func)(graph) - -def _floyd_warshall_adjacency_list(graph: Graph): - dist, next_vertex = {}, {} - V, E = graph.vertices, graph.edge_weights - - for v in V: - dist[v] = {} - next_vertex[v] = {} - - for name, edge in E.items(): - dist[edge.source.name][edge.target.name] = edge.value - next_vertex[edge.source.name][edge.target.name] = edge.source.name - - for v in V: - dist[v][v] = 0 - next_vertex[v][v] = v - - for k in V: - for i in V: - for j in V: - dist_i_j = dist.get(i, {}).get(j, float('inf')) - dist_i_k = dist.get(i, {}).get(k, float('inf')) - dist_k_j = dist.get(k, {}).get(j, float('inf')) - next_i_k = next_vertex.get(i + '_' + k, None) - if dist_i_j > dist_i_k + dist_k_j: - dist[i][j] = dist_i_k + dist_k_j - next_vertex[i][j] = next_i_k - - return (dist, next_vertex) - -_floyd_warshall_adjacency_matrix = _floyd_warshall_adjacency_list - -def _johnson_adjacency_list(graph: Graph): - new_vertex = AdjacencyListGraphNode('__q__') - graph.add_vertex(new_vertex) - - for vertex in graph.vertices: - if vertex != '__q__': - graph.add_edge('__q__', vertex, 0) - - distances, predecessors = shortest_paths(graph, 'bellman_ford', '__q__') - - edges_to_remove = [] - for edge in graph.edge_weights: - edge_node = graph.edge_weights[edge] - if edge_node.source.name == '__q__': - edges_to_remove.append((edge_node.source.name, edge_node.target.name)) - - for u, v in edges_to_remove: - graph.remove_edge(u, v) - graph.remove_vertex('__q__') - - for edge in graph.edge_weights: - edge_node = graph.edge_weights[edge] - u, v = edge_node.source.name, edge_node.target.name - graph.edge_weights[edge].value += (distances[u] - distances[v]) - - all_distances = {} - all_next_vertex = {} - - for vertex in graph.vertices: - u = vertex - dijkstra_dist, dijkstra_pred = shortest_paths(graph, 'dijkstra', u) - all_distances[u] = {} - all_next_vertex[u] = {} - for v in graph.vertices: - if dijkstra_pred[v] is None or dijkstra_pred[v] == u : - all_next_vertex[u][v] = u - else: - all_next_vertex[u][v] = None - if v in dijkstra_dist: - all_distances[u][v] = dijkstra_dist[v] - distances[u] + distances[v] - else: - all_distances[u][v] = float('inf') - - return (all_distances, all_next_vertex) - -def topological_sort(graph: Graph, algorithm: str, - **kwargs) -> list: - """ - Performs topological sort on the given graph using given algorithm. - - Parameters - ========== - - graph: Graph - The graph under consideration. - algorithm: str - The algorithm to be used. - Currently, following are supported, - - 'kahn' -> Kahn's algorithm as given in [1]. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - list - The list of topologically sorted vertices. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode, topological_sort - >>> v_1 = AdjacencyListGraphNode('v_1') - >>> v_2 = AdjacencyListGraphNode('v_2') - >>> graph = Graph(v_1, v_2) - >>> graph.add_edge('v_1', 'v_2') - >>> topological_sort(graph, 'kahn') - ['v_1', 'v_2'] - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm - """ - raise_if_backend_is_not_python( - topological_sort, kwargs.get('backend', Backend.PYTHON)) - import pydatastructs.graphs.algorithms as algorithms - func = "_" + algorithm + "_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently %s algorithm isn't implemented for " - "performing topological sort on %s graphs."%(algorithm, graph._impl)) - return getattr(algorithms, func)(graph) - -def _kahn_adjacency_list(graph: Graph) -> list: - S = Queue() - in_degree = {u: 0 for u in graph.vertices} - for u in graph.vertices: - for v in graph.neighbors(u): - in_degree[v.name] += 1 - for u in graph.vertices: - if in_degree[u] == 0: - S.append(u) - in_degree.pop(u) - - L = [] - while S: - n = S.popleft() - L.append(n) - for m in graph.neighbors(n): - graph.remove_edge(n, m.name) - in_degree[m.name] -= 1 - if in_degree[m.name] == 0: - S.append(m.name) - in_degree.pop(m.name) - - if in_degree: - raise ValueError("Graph is not acyclic.") - return L - -def topological_sort_parallel(graph: Graph, algorithm: str, num_threads: int, - **kwargs) -> list: - """ - Performs topological sort on the given graph using given algorithm using - given number of threads. - - Parameters - ========== - - graph: Graph - The graph under consideration. - algorithm: str - The algorithm to be used. - Currently, following are supported, - - 'kahn' -> Kahn's algorithm as given in [1]. - num_threads: int - The maximum number of threads to be used. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - list - The list of topologically sorted vertices. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode, topological_sort_parallel - >>> v_1 = AdjacencyListGraphNode('v_1') - >>> v_2 = AdjacencyListGraphNode('v_2') - >>> graph = Graph(v_1, v_2) - >>> graph.add_edge('v_1', 'v_2') - >>> topological_sort_parallel(graph, 'kahn', 1) - ['v_1', 'v_2'] - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm - """ - raise_if_backend_is_not_python( - topological_sort_parallel, kwargs.get('backend', Backend.PYTHON)) - import pydatastructs.graphs.algorithms as algorithms - func = "_" + algorithm + "_" + graph._impl + '_parallel' - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently %s algorithm isn't implemented for " - "performing topological sort on %s graphs."%(algorithm, graph._impl)) - return getattr(algorithms, func)(graph, num_threads) - -def _kahn_adjacency_list_parallel(graph: Graph, num_threads: int) -> list: - num_vertices = len(graph.vertices) - - def _collect_source_nodes(graph: Graph) -> list: - S = [] - in_degree = {u: 0 for u in graph.vertices} - for u in graph.vertices: - for v in graph.neighbors(u): - in_degree[v.name] += 1 - for u in in_degree: - if in_degree[u] == 0: - S.append(u) - return list(S) - - def _job(graph: Graph, u: str): - for v in graph.neighbors(u): - graph.remove_edge(u, v.name) - - L = [] - source_nodes = _collect_source_nodes(graph) - while source_nodes: - with ThreadPoolExecutor(max_workers=num_threads) as Executor: - for node in source_nodes: - L.append(node) - Executor.submit(_job, graph, node) - for node in source_nodes: - graph.remove_vertex(node) - source_nodes = _collect_source_nodes(graph) - - if len(L) != num_vertices: - raise ValueError("Graph is not acyclic.") - return L - - -def _breadth_first_search_max_flow(graph: Graph, source_node, sink_node, flow_passed, for_dinic=False): - bfs_queue = Queue() - parent, currentPathC = {}, {} - currentPathC[source_node] = float('inf') - bfs_queue.append(source_node) - while len(bfs_queue) != 0: - curr_node = bfs_queue.popleft() - next_nodes = graph.neighbors(curr_node) - if len(next_nodes) != 0: - for next_node in next_nodes: - capacity = graph.get_edge(curr_node, next_node.name).value - fp = flow_passed.get((curr_node, next_node.name), 0) - if capacity and parent.get(next_node.name, False) is False and capacity - fp > 0: - parent[next_node.name] = curr_node - next_flow = min(currentPathC[curr_node], capacity - fp) - currentPathC[next_node.name] = next_flow - if next_node.name == sink_node and not for_dinic: - return (next_flow, parent) - bfs_queue.append(next_node.name) - return (0, parent) - - -def _max_flow_edmonds_karp_(graph: Graph, source, sink): - m_flow = 0 - flow_passed = {} - new_flow, parent = _breadth_first_search_max_flow(graph, source, sink, flow_passed) - while new_flow != 0: - m_flow += new_flow - current = sink - while current != source: - prev = parent[current] - fp = flow_passed.get((prev, current), 0) - flow_passed[(prev, current)] = fp + new_flow - fp = flow_passed.get((current, prev), 0) - flow_passed[(current, prev)] = fp - new_flow - current = prev - new_flow, parent = _breadth_first_search_max_flow(graph, source, sink, flow_passed) - return m_flow - - -def _depth_first_search_max_flow_dinic(graph: Graph, u, parent, sink_node, flow, flow_passed): - if u == sink_node: - return flow - - next_nodes = graph.neighbors(u) - if len(next_nodes) != 0: - for next_node in next_nodes: - capacity = graph.get_edge(u, next_node.name).value - fp = flow_passed.get((u, next_node.name), 0) - parent_cond = parent.get(next_node.name, None) - if parent_cond and parent_cond == u and capacity - fp > 0: - path_flow = _depth_first_search_max_flow_dinic(graph, - next_node.name, - parent, sink_node, - min(flow, capacity - fp), flow_passed) - if path_flow > 0: - fp = flow_passed.get((u, next_node.name), 0) - flow_passed[(u, next_node.name)] = fp + path_flow - fp = flow_passed.get((next_node.name, u), 0) - flow_passed[(next_node.name, u)] = fp - path_flow - return path_flow - return 0 - - -def _max_flow_dinic_(graph: Graph, source, sink): - max_flow = 0 - flow_passed = {} - while True: - next_flow, parent = _breadth_first_search_max_flow(graph, source, sink, flow_passed, True) - if parent.get(sink, False) is False: - break - - while True: - path_flow = _depth_first_search_max_flow_dinic(graph, source, - parent, sink, - float('inf'), - flow_passed) - if path_flow <= 0: - break - max_flow += path_flow - - return max_flow - - -def max_flow(graph, source, sink, algorithm='edmonds_karp', **kwargs): - raise_if_backend_is_not_python( - max_flow, kwargs.get('backend', Backend.PYTHON)) - - import pydatastructs.graphs.algorithms as algorithms - func = "_max_flow_" + algorithm + "_" - if not hasattr(algorithms, func): - raise NotImplementedError( - f"Currently {algorithm} algorithm isn't implemented for " - "performing max flow on graphs.") - return getattr(algorithms, func)(graph, source, sink) - - -def find_bridges(graph): - """ - Finds all bridges in an undirected graph using Tarjan's Algorithm. - - Parameters - ========== - graph : Graph - An undirected graph instance. - - Returns - ========== - List[tuple] - A list of bridges, where each bridge is represented as a tuple (u, v) - with u <= v. - - Example - ======== - >>> from pydatastructs import Graph, AdjacencyListGraphNode, find_bridges - >>> v0 = AdjacencyListGraphNode(0) - >>> v1 = AdjacencyListGraphNode(1) - >>> v2 = AdjacencyListGraphNode(2) - >>> v3 = AdjacencyListGraphNode(3) - >>> v4 = AdjacencyListGraphNode(4) - >>> graph = Graph(v0, v1, v2, v3, v4, implementation='adjacency_list') - >>> graph.add_edge(v0.name, v1.name) - >>> graph.add_edge(v1.name, v2.name) - >>> graph.add_edge(v2.name, v3.name) - >>> graph.add_edge(v3.name, v4.name) - >>> find_bridges(graph) - [('0', '1'), ('1', '2'), ('2', '3'), ('3', '4')] - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Bridge_(graph_theory) - """ - - vertices = list(graph.vertices) - processed_vertices = [] - for v in vertices: - if hasattr(v, "name"): - processed_vertices.append(v.name) - else: - processed_vertices.append(v) - - n = len(processed_vertices) - adj = {v: [] for v in processed_vertices} - for v in processed_vertices: - for neighbor in graph.neighbors(v): - if hasattr(neighbor, "name"): - nbr = neighbor.name - else: - nbr = neighbor - adj[v].append(nbr) - - mapping = {v: idx for idx, v in enumerate(processed_vertices)} - inv_mapping = {idx: v for v, idx in mapping.items()} - - n_adj = [[] for _ in range(n)] - for v in processed_vertices: - idx_v = mapping[v] - for u in adj[v]: - idx_u = mapping[u] - n_adj[idx_v].append(idx_u) - - visited = [False] * n - disc = [0] * n - low = [0] * n - parent = [-1] * n - bridges_idx = [] - time = 0 - - def dfs(u): - nonlocal time - visited[u] = True - disc[u] = low[u] = time - time += 1 - for v in n_adj[u]: - if not visited[v]: - parent[v] = u - dfs(v) - low[u] = min(low[u], low[v]) - if low[v] > disc[u]: - bridges_idx.append((u, v)) - elif v != parent[u]: - low[u] = min(low[u], disc[v]) - - for i in range(n): - if not visited[i]: - dfs(i) - - bridges = [] - for u, v in bridges_idx: - a = inv_mapping[u] - b = inv_mapping[v] - if a <= b: - bridges.append((a, b)) - else: - bridges.append((b, a)) - bridges.sort() - return bridges diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/graph.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/graph.py deleted file mode 100644 index 39c2692e3..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/graph.py +++ /dev/null @@ -1,163 +0,0 @@ - -from pydatastructs.utils.misc_util import Backend, raise_if_backend_is_not_python - -__all__ = [ - 'Graph' -] - -class Graph(object): - """ - Represents generic concept of graphs. - - Parameters - ========== - - implementation: str - The implementation to be used for storing - graph in memory. It can be figured out - from type of the vertices(if passed at construction). - Currently the following implementations are supported, - - 'adjacency_list' -> Adjacency list implementation. - - 'adjacency_matrix' -> Adjacency matrix implementation. - - By default, 'adjacency_list'. - vertices: GraphNode(s) - For AdjacencyList implementation vertices - can be passed for initializing the graph. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs.graphs import Graph - >>> from pydatastructs.utils import AdjacencyListGraphNode - >>> v_1 = AdjacencyListGraphNode('v_1', 1) - >>> v_2 = AdjacencyListGraphNode('v_2', 2) - >>> g = Graph(v_1, v_2) - >>> g.add_edge('v_1', 'v_2') - >>> g.add_edge('v_2', 'v_1') - >>> g.is_adjacent('v_1', 'v_2') - True - >>> g.is_adjacent('v_2', 'v_1') - True - >>> g.remove_edge('v_1', 'v_2') - >>> g.is_adjacent('v_1', 'v_2') - False - >>> g.is_adjacent('v_2', 'v_1') - True - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Graph_(abstract_data_type) - - Note - ==== - - Make sure to create nodes (AdjacencyListGraphNode or AdjacencyMatrixGraphNode) - and them in your graph using Graph.add_vertex before adding edges whose - end points require either of the nodes that you added. In other words, - Graph.add_edge doesn't add new nodes on its own if the input - nodes are not already present in the Graph. - - """ - - __slots__ = ['_impl'] - - def __new__(cls, *args, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - try: - default_impl = args[0]._impl if args else 'adjacency_list' - except: - default_impl = 'adjacency_list' - implementation = kwargs.get('implementation', default_impl) - if implementation == 'adjacency_list': - from pydatastructs.graphs.adjacency_list import AdjacencyList - obj = AdjacencyList(*args, **kwargs) - return obj - elif implementation == 'adjacency_matrix': - from pydatastructs.graphs.adjacency_matrix import AdjacencyMatrix - obj = AdjacencyMatrix(*args, **kwargs) - return obj - else: - raise NotImplementedError("%s implementation is not a part " - "of the library currently."%(implementation)) - - def is_adjacent(self, node1, node2): - """ - Checks if the nodes with the given - with the given names are adjacent - to each other. - """ - raise NotImplementedError( - "This is an abstract method.") - - def neighbors(self, node): - """ - Lists the neighbors of the node - with given name. - """ - raise NotImplementedError( - "This is an abstract method.") - - def add_vertex(self, node): - """ - Adds the input vertex to the node, or does nothing - if the input vertex is already in the graph. - """ - raise NotImplementedError( - "This is an abstract method.") - - def remove_vertex(self, node): - """ - Removes the input vertex along with all the edges - pointing towards it. - """ - raise NotImplementedError( - "This is an abstract method.") - - def add_edge(self, source, target, cost=None): - """ - Adds the edge starting at first parameter - i.e., source and ending at the second - parameter i.e., target. - """ - raise NotImplementedError( - "This is an abstract method.") - - def get_edge(self, source, target): - """ - Returns GraphEdge object if there - is an edge between source and target - otherwise None. - """ - raise NotImplementedError( - "This is an abstract method.") - - def remove_edge(self, source, target): - """ - Removes the edge starting at first parameter - i.e., source and ending at the second - parameter i.e., target. - """ - raise NotImplementedError( - "This is an abstract method.") - - def num_vertices(self): - """ - Number of vertices - """ - raise NotImplementedError( - "This is an abstract method.") - - def num_edges(self): - """ - Number of edges - """ - raise NotImplementedError( - "This is an abstract method.") diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_list.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_list.py deleted file mode 100644 index 3a9cdb14f..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_list.py +++ /dev/null @@ -1,83 +0,0 @@ -from pydatastructs.graphs import Graph -from pydatastructs.utils import AdjacencyListGraphNode -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils.misc_util import Backend - -def test_adjacency_list(): - v_1 = AdjacencyListGraphNode('v_1', 1) - v_2 = AdjacencyListGraphNode('v_2', 2) - g = Graph(v_1, v_2, implementation='adjacency_list') - v_3 = AdjacencyListGraphNode('v_3', 3) - g.add_vertex(v_2) - g.add_vertex(v_3) - g.add_edge('v_1', 'v_2') - g.add_edge('v_2', 'v_3') - g.add_edge('v_3', 'v_1') - assert g.is_adjacent('v_1', 'v_2') is True - assert g.is_adjacent('v_2', 'v_3') is True - assert g.is_adjacent('v_3', 'v_1') is True - assert g.is_adjacent('v_2', 'v_1') is False - assert g.is_adjacent('v_3', 'v_2') is False - assert g.is_adjacent('v_1', 'v_3') is False - neighbors = g.neighbors('v_1') - assert neighbors == [v_2] - v = AdjacencyListGraphNode('v', 4) - g.add_vertex(v) - g.add_edge('v_1', 'v', 0) - g.add_edge('v_2', 'v', 0) - g.add_edge('v_3', 'v', 0) - assert g.is_adjacent('v_1', 'v') is True - assert g.is_adjacent('v_2', 'v') is True - assert g.is_adjacent('v_3', 'v') is True - e1 = g.get_edge('v_1', 'v') - e2 = g.get_edge('v_2', 'v') - e3 = g.get_edge('v_3', 'v') - assert (e1.source.name, e1.target.name) == ('v_1', 'v') - assert (e2.source.name, e2.target.name) == ('v_2', 'v') - assert (e3.source.name, e3.target.name) == ('v_3', 'v') - g.remove_edge('v_1', 'v') - assert g.is_adjacent('v_1', 'v') is False - g.remove_vertex('v') - assert g.is_adjacent('v_2', 'v') is False - assert g.is_adjacent('v_3', 'v') is False - - assert raises(ValueError, lambda: g.add_edge('u', 'v')) - assert raises(ValueError, lambda: g.add_edge('v', 'x')) - - v_4 = AdjacencyListGraphNode('v_4', 4, backend = Backend.CPP) - v_5 = AdjacencyListGraphNode('v_5', 5, backend = Backend.CPP) - g2 = Graph(v_4,v_5,implementation = 'adjacency_list', backend = Backend.CPP) - v_6 = AdjacencyListGraphNode('v_6', 6, backend = Backend.CPP) - assert raises(ValueError, lambda: g2.add_vertex(v_5)) - g2.add_vertex(v_6) - g2.add_edge('v_4', 'v_5') - g2.add_edge('v_5', 'v_6') - g2.add_edge('v_4', 'v_6') - assert g2.is_adjacent('v_4', 'v_5') is True - assert g2.is_adjacent('v_5', 'v_6') is True - assert g2.is_adjacent('v_4', 'v_6') is True - assert g2.is_adjacent('v_5', 'v_4') is False - assert g2.is_adjacent('v_6', 'v_5') is False - assert g2.is_adjacent('v_6', 'v_4') is False - assert g2.num_edges() == 3 - assert g2.num_vertices() == 3 - neighbors = g2.neighbors('v_4') - assert neighbors == [v_6, v_5] - v = AdjacencyListGraphNode('v', 4, backend = Backend.CPP) - g2.add_vertex(v) - g2.add_edge('v_4', 'v', 0) - g2.add_edge('v_5', 'v', 0) - g2.add_edge('v_6', 'v', "h") - assert g2.is_adjacent('v_4', 'v') is True - assert g2.is_adjacent('v_5', 'v') is True - assert g2.is_adjacent('v_6', 'v') is True - e1 = g2.get_edge('v_4', 'v') - e2 = g2.get_edge('v_5', 'v') - e3 = g2.get_edge('v_6', 'v') - assert (str(e1)) == "('v_4', 'v', 0)" - assert (str(e2)) == "('v_5', 'v', 0)" - assert (str(e3)) == "('v_6', 'v', h)" - g2.remove_edge('v_4', 'v') - assert g2.is_adjacent('v_4', 'v') is False - g2.remove_vertex('v') - assert raises(ValueError, lambda: g2.add_edge('v_4', 'v')) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_matrix.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_matrix.py deleted file mode 100644 index 27dc81790..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_matrix.py +++ /dev/null @@ -1,53 +0,0 @@ -from pydatastructs.graphs import Graph -from pydatastructs.utils import AdjacencyMatrixGraphNode -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils.misc_util import Backend - -def test_AdjacencyMatrix(): - v_0 = AdjacencyMatrixGraphNode(0, 0) - v_1 = AdjacencyMatrixGraphNode(1, 1) - v_2 = AdjacencyMatrixGraphNode(2, 2) - g = Graph(v_0, v_1, v_2) - g.add_edge(0, 1, 0) - g.add_edge(1, 2, 0) - g.add_edge(2, 0, 0) - e1 = g.get_edge(0, 1) - e2 = g.get_edge(1, 2) - e3 = g.get_edge(2, 0) - assert (e1.source.name, e1.target.name) == ('0', '1') - assert (e2.source.name, e2.target.name) == ('1', '2') - assert (e3.source.name, e3.target.name) == ('2', '0') - assert g.is_adjacent(0, 1) is True - assert g.is_adjacent(1, 2) is True - assert g.is_adjacent(2, 0) is True - assert g.is_adjacent(1, 0) is False - assert g.is_adjacent(2, 1) is False - assert g.is_adjacent(0, 2) is False - neighbors = g.neighbors(0) - assert neighbors == [v_1] - g.remove_edge(0, 1) - assert g.is_adjacent(0, 1) is False - assert raises(ValueError, lambda: g.add_edge('u', 'v')) - assert raises(ValueError, lambda: g.add_edge('v', 'x')) - assert raises(ValueError, lambda: g.add_edge(2, 3)) - assert raises(ValueError, lambda: g.add_edge(3, 2)) - - v_3 = AdjacencyMatrixGraphNode('0', 0, backend = Backend.CPP) - v_4 = AdjacencyMatrixGraphNode('1', 1, backend = Backend.CPP) - v_5 = AdjacencyMatrixGraphNode('2', 2, backend = Backend.CPP) - g2 = Graph(v_3, v_4, v_5, implementation = 'adjacency_matrix', backend = Backend.CPP) - g2.add_edge('0', '1', 0) - g2.add_edge('1', '2', 0) - g2.add_edge('2', '0', 0) - assert g2.is_adjacent('0', '1') is True - assert g2.is_adjacent('1', '2') is True - assert g2.is_adjacent('2', '0') is True - assert g2.is_adjacent('1', '0') is False - assert g2.is_adjacent('2', '1') is False - assert g2.is_adjacent('0', '2') is False - neighbors = g2.neighbors('0') - assert neighbors == [v_4] - g2.remove_edge('0', '1') - assert g2.is_adjacent('0', '1') is False - assert raises(ValueError, lambda: g2.add_edge('u', 'v')) - assert raises(ValueError, lambda: g2.add_edge('v', 'x')) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_algorithms.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_algorithms.py deleted file mode 100644 index 04ebcccda..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_algorithms.py +++ /dev/null @@ -1,596 +0,0 @@ -from pydatastructs import (breadth_first_search, Graph, -breadth_first_search_parallel, minimum_spanning_tree, -minimum_spanning_tree_parallel, strongly_connected_components, -depth_first_search, shortest_paths,all_pair_shortest_paths, topological_sort, -topological_sort_parallel, max_flow, find_bridges) -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils.misc_util import AdjacencyListGraphNode, AdjacencyMatrixGraphNode -from pydatastructs.graphs._backend.cpp import _graph -from pydatastructs.graphs._backend.cpp import _algorithms -from pydatastructs.utils.misc_util import Backend - -def test_breadth_first_search(): - - def _test_breadth_first_search(ds): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - - V1 = GraphNode(0) - V2 = GraphNode(1) - V3 = GraphNode(2) - - G1 = Graph(V1, V2, V3) - - assert G1.num_vertices() == 3 - - edges = [ - (V1.name, V2.name), - (V2.name, V3.name), - (V1.name, V3.name) - ] - - for edge in edges: - G1.add_edge(*edge) - - assert G1.num_edges() == len(edges) - - parent = {} - def bfs_tree(curr_node, next_node, parent): - if next_node != "": - parent[next_node] = curr_node - return True - - breadth_first_search(G1, V1.name, bfs_tree, parent) - assert (parent[V3.name] == V1.name and parent[V2.name] == V1.name) or \ - (parent[V3.name] == V2.name and parent[V2.name] == V1.name) - - if (ds=='List'): - parent = {} - V9 = AdjacencyListGraphNode("9",0,backend = Backend.CPP) - V10 = AdjacencyListGraphNode("10",0,backend = Backend.CPP) - V11 = AdjacencyListGraphNode("11",0,backend = Backend.CPP) - G2 = Graph(V9, V10, V11,implementation = 'adjacency_list', backend = Backend.CPP) - assert G2.num_vertices()==3 - G2.add_edge("9", "10") - G2.add_edge("10", "11") - breadth_first_search(G2, "9", bfs_tree, parent, backend = Backend.CPP) - assert parent[V10] == V9 - assert parent[V11] == V10 - - if (ds == 'Matrix'): - parent3 = {} - V12 = AdjacencyMatrixGraphNode("12", 0, backend = Backend.CPP) - V13 = AdjacencyMatrixGraphNode("13", 0, backend = Backend.CPP) - V14 = AdjacencyMatrixGraphNode("14", 0, backend = Backend.CPP) - G3 = Graph(V12, V13, V14, implementation = 'adjacency_matrix', backend = Backend.CPP) - assert G3.num_vertices() == 3 - G3.add_edge("12", "13") - G3.add_edge("13", "14") - breadth_first_search(G3, "12", bfs_tree, parent3, backend = Backend.CPP) - assert parent3[V13] == V12 - assert parent3[V14] == V13 - - V4 = GraphNode(0) - V5 = GraphNode(1) - V6 = GraphNode(2) - V7 = GraphNode(3) - V8 = GraphNode(4) - - edges = [ - (V4.name, V5.name), - (V5.name, V6.name), - (V6.name, V7.name), - (V6.name, V4.name), - (V7.name, V8.name) - ] - - G2 = Graph(V4, V5, V6, V7, V8) - - for edge in edges: - G2.add_edge(*edge) - - assert G2.num_edges() == len(edges) - - path = [] - def path_finder(curr_node, next_node, dest_node, parent, path): - if next_node != "": - parent[next_node] = curr_node - if curr_node == dest_node: - node = curr_node - path.append(node) - while node is not None: - if parent.get(node, None) is not None: - path.append(parent[node]) - node = parent.get(node, None) - path.reverse() - return False - return True - - parent.clear() - breadth_first_search(G2, V4.name, path_finder, V7.name, parent, path) - assert path == [V4.name, V5.name, V6.name, V7.name] - - _test_breadth_first_search("List") - _test_breadth_first_search("Matrix") - -def test_breadth_first_search_parallel(): - - def _test_breadth_first_search_parallel(ds): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - - V1 = GraphNode(0) - V2 = GraphNode(1) - V3 = GraphNode(2) - V4 = GraphNode(3) - V5 = GraphNode(4) - V6 = GraphNode(5) - V7 = GraphNode(6) - V8 = GraphNode(7) - - - G1 = Graph(V1, V2, V3, V4, V5, V6, V7, V8) - - edges = [ - (V1.name, V2.name), - (V1.name, V3.name), - (V1.name, V4.name), - (V2.name, V5.name), - (V2.name, V6.name), - (V3.name, V6.name), - (V3.name, V7.name), - (V4.name, V7.name), - (V4.name, V8.name) - ] - - for edge in edges: - G1.add_edge(*edge) - - parent = {} - def bfs_tree(curr_node, next_node, parent): - if next_node != "": - parent[next_node] = curr_node - return True - - breadth_first_search_parallel(G1, V1.name, 5, bfs_tree, parent) - assert (parent[V2.name] == V1.name and parent[V3.name] == V1.name and - parent[V4.name] == V1.name and parent[V5.name] == V2.name and - (parent[V6.name] in (V2.name, V3.name)) and - (parent[V7.name] in (V3.name, V4.name)) and (parent[V8.name] == V4.name)) - - _test_breadth_first_search_parallel("List") - _test_breadth_first_search_parallel("Matrix") - -def test_minimum_spanning_tree(): - - def _test_minimum_spanning_tree(func, ds, algorithm, *args): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - a, b, c, d, e = [GraphNode(x) for x in [0, 1, 2, 3, 4]] - graph = Graph(a, b, c, d, e) - graph.add_edge(a.name, c.name, 10) - graph.add_edge(c.name, a.name, 10) - graph.add_edge(a.name, d.name, 7) - graph.add_edge(d.name, a.name, 7) - graph.add_edge(c.name, d.name, 9) - graph.add_edge(d.name, c.name, 9) - graph.add_edge(d.name, b.name, 32) - graph.add_edge(b.name, d.name, 32) - graph.add_edge(d.name, e.name, 23) - graph.add_edge(e.name, d.name, 23) - mst = func(graph, algorithm, *args) - expected_mst = [('0_3', 7), ('2_3', 9), ('3_4', 23), ('3_1', 32), - ('3_0', 7), ('3_2', 9), ('4_3', 23), ('1_3', 32)] - assert len(expected_mst) == len(mst.edge_weights.items()) - for k, v in mst.edge_weights.items(): - assert (k, v.value) in expected_mst - - def _test_minimum_spanning_tree_cpp(ds, algorithm, *args): - if (ds == 'List' and algorithm == "prim"): - a1 = AdjacencyListGraphNode('a', 0, backend = Backend.CPP) - b1 = AdjacencyListGraphNode('b', 0, backend = Backend.CPP) - c1 = AdjacencyListGraphNode('c', 0, backend = Backend.CPP) - d1 = AdjacencyListGraphNode('d', 0, backend = Backend.CPP) - e1 = AdjacencyListGraphNode('e', 0, backend = Backend.CPP) - g = Graph(a1, b1, c1, d1, e1, backend = Backend.CPP) - g.add_edge(a1.name, c1.name, 10) - g.add_edge(c1.name, a1.name, 10) - g.add_edge(a1.name, d1.name, 7) - g.add_edge(d1.name, a1.name, 7) - g.add_edge(c1.name, d1.name, 9) - g.add_edge(d1.name, c1.name, 9) - g.add_edge(d1.name, b1.name, 32) - g.add_edge(b1.name, d1.name, 32) - g.add_edge(d1.name, e1.name, 23) - g.add_edge(e1.name, d1.name, 23) - mst = minimum_spanning_tree(g, "prim", backend = Backend.CPP) - expected_mst = ["('a', 'd', 7)", "('d', 'c', 9)", "('e', 'd', 23)", "('b', 'd', 32)", - "('d', 'a', 7)", "('c', 'd', 9)", "('d', 'e', 23)", "('d', 'b', 32)"] - assert str(mst.get_edge('a', 'd')) in expected_mst - assert str(mst.get_edge('e', 'd')) in expected_mst - assert str(mst.get_edge('d', 'c')) in expected_mst - assert str(mst.get_edge('b', 'd')) in expected_mst - assert mst.num_edges() == 8 - a=AdjacencyListGraphNode('0', 0, backend = Backend.CPP) - b=AdjacencyListGraphNode('1', 0, backend = Backend.CPP) - c=AdjacencyListGraphNode('2', 0, backend = Backend.CPP) - d=AdjacencyListGraphNode('3', 0, backend = Backend.CPP) - g2 = Graph(a,b,c,d,backend = Backend.CPP) - g2.add_edge('0', '1', 74) - g2.add_edge('1', '0', 74) - g2.add_edge('0', '3', 55) - g2.add_edge('3', '0', 55) - g2.add_edge('1', '2', 74) - g2.add_edge('2', '1', 74) - mst2=minimum_spanning_tree(g2, "prim", backend = Backend.CPP) - assert mst2.num_edges() == 6 - - fmst = minimum_spanning_tree - fmstp = minimum_spanning_tree_parallel - _test_minimum_spanning_tree(fmst, "List", "kruskal") - _test_minimum_spanning_tree(fmst, "Matrix", "kruskal") - _test_minimum_spanning_tree(fmst, "List", "prim") - _test_minimum_spanning_tree(fmstp, "List", "kruskal", 3) - _test_minimum_spanning_tree(fmstp, "Matrix", "kruskal", 3) - _test_minimum_spanning_tree(fmstp, "List", "prim", 3) - _test_minimum_spanning_tree_cpp("List", "prim") - -def test_strongly_connected_components(): - - def _test_strongly_connected_components(func, ds, algorithm, *args): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - a, b, c, d, e, f, g, h = \ - [GraphNode(chr(x)) for x in range(ord('a'), ord('h') + 1)] - graph = Graph(a, b, c, d, e, f, g, h) - graph.add_edge(a.name, b.name) - graph.add_edge(b.name, c.name) - graph.add_edge(b.name, f.name) - graph.add_edge(b.name, e.name) - graph.add_edge(c.name, d.name) - graph.add_edge(c.name, g.name) - graph.add_edge(d.name, h.name) - graph.add_edge(d.name, c.name) - graph.add_edge(e.name, f.name) - graph.add_edge(e.name, a.name) - graph.add_edge(f.name, g.name) - graph.add_edge(g.name, f.name) - graph.add_edge(h.name, d.name) - graph.add_edge(h.name, g.name) - comps = func(graph, algorithm) - expected_comps = [{'e', 'a', 'b'}, {'d', 'c', 'h'}, {'g', 'f'}] - assert comps.sort() == expected_comps.sort() - - scc = strongly_connected_components - _test_strongly_connected_components(scc, "List", "kosaraju") - _test_strongly_connected_components(scc, "Matrix", "kosaraju") - _test_strongly_connected_components(scc, "List", "tarjan") - _test_strongly_connected_components(scc, "Matrix", "tarjan") - -def test_depth_first_search(): - - def _test_depth_first_search(ds): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - - V1 = GraphNode(0) - V2 = GraphNode(1) - V3 = GraphNode(2) - - G1 = Graph(V1, V2, V3) - - edges = [ - (V1.name, V2.name), - (V2.name, V3.name), - (V1.name, V3.name) - ] - - for edge in edges: - G1.add_edge(*edge) - - parent = {} - def dfs_tree(curr_node, next_node, parent): - if next_node != "": - parent[next_node] = curr_node - return True - - depth_first_search(G1, V1.name, dfs_tree, parent) - assert (parent[V3.name] == V1.name and parent[V2.name] == V1.name) or \ - (parent[V3.name] == V2.name and parent[V2.name] == V1.name) - - V4 = GraphNode(0) - V5 = GraphNode(1) - V6 = GraphNode(2) - V7 = GraphNode(3) - V8 = GraphNode(4) - - edges = [ - (V4.name, V5.name), - (V5.name, V6.name), - (V6.name, V7.name), - (V6.name, V4.name), - (V7.name, V8.name) - ] - - G2 = Graph(V4, V5, V6, V7, V8) - - for edge in edges: - G2.add_edge(*edge) - - path = [] - def path_finder(curr_node, next_node, dest_node, parent, path): - if next_node != "": - parent[next_node] = curr_node - if curr_node == dest_node: - node = curr_node - path.append(node) - while node is not None: - if parent.get(node, None) is not None: - path.append(parent[node]) - node = parent.get(node, None) - path.reverse() - return False - return True - - parent.clear() - depth_first_search(G2, V4.name, path_finder, V7.name, parent, path) - assert path == [V4.name, V5.name, V6.name, V7.name] - - _test_depth_first_search("List") - _test_depth_first_search("Matrix") - -def test_shortest_paths(): - - def _test_shortest_paths_positive_edges(ds, algorithm): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - vertices = [GraphNode('S'), GraphNode('C'), - GraphNode('SLC'), GraphNode('SF'), - GraphNode('D')] - - graph = Graph(*vertices) - graph.add_edge('S', 'SLC', 2) - graph.add_edge('C', 'S', 4) - graph.add_edge('C', 'D', 2) - graph.add_edge('SLC', 'C', 2) - graph.add_edge('SLC', 'D', 3) - graph.add_edge('SF', 'SLC', 2) - graph.add_edge('SF', 'S', 2) - graph.add_edge('D', 'SF', 3) - dist, pred = shortest_paths(graph, algorithm, 'SLC') - assert dist == {'S': 6, 'C': 2, 'SLC': 0, 'SF': 6, 'D': 3} - assert pred == {'S': 'C', 'C': 'SLC', 'SLC': None, 'SF': 'D', 'D': 'SLC'} - dist, pred = shortest_paths(graph, algorithm, 'SLC', 'SF') - assert dist == 6 - assert pred == {'S': 'C', 'C': 'SLC', 'SLC': None, 'SF': 'D', 'D': 'SLC'} - graph.remove_edge('SLC', 'D') - graph.add_edge('D', 'SLC', -10) - assert raises(ValueError, lambda: shortest_paths(graph, 'bellman_ford', 'SLC')) - - if (ds == 'List' and algorithm == 'dijkstra'): - vertices2 = [AdjacencyListGraphNode('S', 0, backend = Backend.CPP), AdjacencyListGraphNode('C', 0, backend = Backend.CPP), - AdjacencyListGraphNode('SLC', 0, backend = Backend.CPP), AdjacencyListGraphNode('SF', 0, backend = Backend.CPP), - AdjacencyListGraphNode('D', 0, backend = Backend.CPP)] - graph2 = Graph(*vertices2, backend = Backend.CPP) - graph2.add_edge('S', 'SLC', 2) - graph2.add_edge('C', 'S', 4) - graph2.add_edge('C', 'D', 2) - graph2.add_edge('SLC', 'C', 2) - graph2.add_edge('SLC', 'D', 3) - graph2.add_edge('SF', 'SLC', 2) - graph2.add_edge('SF', 'S', 2) - graph2.add_edge('D', 'SF', 3) - (dist2, pred2) = shortest_paths(graph2, algorithm, 'SLC', backend = Backend.CPP) - assert dist2 == {'S': 6, 'C': 2, 'SLC': 0, 'SF': 6, 'D': 3} - assert pred2 == {'S': 'C', 'C': 'SLC', 'SLC': None, 'SF': 'D', 'D': 'SLC'} - - - - def _test_shortest_paths_negative_edges(ds, algorithm): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - vertices = [GraphNode('s'), GraphNode('a'), - GraphNode('b'), GraphNode('c'), - GraphNode('d')] - - graph = Graph(*vertices) - graph.add_edge('s', 'a', 3) - graph.add_edge('s', 'b', 2) - graph.add_edge('a', 'c', 1) - graph.add_edge('b', 'd', 1) - graph.add_edge('b', 'a', -2) - graph.add_edge('c', 'd', 1) - dist, pred = shortest_paths(graph, algorithm, 's') - assert dist == {'s': 0, 'a': 0, 'b': 2, 'c': 1, 'd': 2} - assert pred == {'s': None, 'a': 'b', 'b': 's', 'c': 'a', 'd': 'c'} - dist, pred = shortest_paths(graph, algorithm, 's', 'd') - assert dist == 2 - assert pred == {'s': None, 'a': 'b', 'b': 's', 'c': 'a', 'd': 'c'} - - _test_shortest_paths_positive_edges("List", 'bellman_ford') - _test_shortest_paths_positive_edges("Matrix", 'bellman_ford') - _test_shortest_paths_negative_edges("List", 'bellman_ford') - _test_shortest_paths_negative_edges("Matrix", 'bellman_ford') - _test_shortest_paths_positive_edges("List", 'dijkstra') - _test_shortest_paths_positive_edges("Matrix", 'dijkstra') - -def test_all_pair_shortest_paths(): - - def _test_shortest_paths_negative_edges(ds, algorithm): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - vertices = [GraphNode('1'), GraphNode('2'), - GraphNode('3'), GraphNode('4')] - - graph = Graph(*vertices) - graph.add_edge('1', '3', -2) - graph.add_edge('2', '1', 4) - graph.add_edge('2', '3', 3) - graph.add_edge('3', '4', 2) - graph.add_edge('4', '2', -1) - dist, next_v = all_pair_shortest_paths(graph, algorithm) - assert dist == {'1': {'3': -2, '1': 0, '4': 0, '2': -1}, - '2': {'1': 4, '3': 2, '2': 0, '4': 4}, - '3': {'4': 2, '3': 0, '1': 5, '2': 1}, - '4': {'2': -1, '4': 0, '1': 3, '3': 1}} - assert next_v == {'1': {'3': '1', '1': '1', '4': None, '2': None}, - '2': {'1': '2', '3': None, '2': '2', '4': None}, - '3': {'4': '3', '3': '3', '1': None, '2': None}, - '4': {'2': '4', '4': '4', '1': None, '3': None}} - - _test_shortest_paths_negative_edges("List", 'floyd_warshall') - _test_shortest_paths_negative_edges("Matrix", 'floyd_warshall') - _test_shortest_paths_negative_edges("List", 'johnson') - -def test_topological_sort(): - - def _test_topological_sort(func, ds, algorithm, threads=None): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - vertices = [GraphNode('2'), GraphNode('3'), GraphNode('5'), - GraphNode('7'), GraphNode('8'), GraphNode('10'), - GraphNode('11'), GraphNode('9')] - - graph = Graph(*vertices) - graph.add_edge('5', '11') - graph.add_edge('7', '11') - graph.add_edge('7', '8') - graph.add_edge('3', '8') - graph.add_edge('3', '10') - graph.add_edge('11', '2') - graph.add_edge('11', '9') - graph.add_edge('11', '10') - graph.add_edge('8', '9') - if threads is not None: - l = func(graph, algorithm, threads) - else: - l = func(graph, algorithm) - assert all([(l1 in l[0:3]) for l1 in ('3', '5', '7')] + - [(l2 in l[3:5]) for l2 in ('8', '11')] + - [(l3 in l[5:]) for l3 in ('10', '9', '2')]) - - _test_topological_sort(topological_sort, "List", "kahn") - _test_topological_sort(topological_sort_parallel, "List", "kahn", 3) - - -def test_max_flow(): - def _test_max_flow(ds, algorithm): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - - a = GraphNode('a') - b = GraphNode('b') - c = GraphNode('c') - d = GraphNode('d') - e = GraphNode('e') - - G = Graph(a, b, c, d, e) - - G.add_edge('a', 'b', 3) - G.add_edge('a', 'c', 4) - G.add_edge('b', 'c', 2) - G.add_edge('b', 'd', 3) - G.add_edge('c', 'd', 1) - G.add_edge('d', 'e', 6) - - assert max_flow(G, 'a', 'e', algorithm) == 4 - assert max_flow(G, 'a', 'c', algorithm) == 6 - - a = GraphNode('a') - b = GraphNode('b') - c = GraphNode('c') - d = GraphNode('d') - e = GraphNode('e') - f = GraphNode('f') - - G2 = Graph(a, b, c, d, e, f) - - G2.add_edge('a', 'b', 16) - G2.add_edge('a', 'c', 13) - G2.add_edge('b', 'c', 10) - G2.add_edge('b', 'd', 12) - G2.add_edge('c', 'b', 4) - G2.add_edge('c', 'e', 14) - G2.add_edge('d', 'c', 9) - G2.add_edge('d', 'f', 20) - G2.add_edge('e', 'd', 7) - G2.add_edge('e', 'f', 4) - - assert max_flow(G2, 'a', 'f', algorithm) == 23 - assert max_flow(G2, 'a', 'd', algorithm) == 19 - - a = GraphNode('a') - b = GraphNode('b') - c = GraphNode('c') - d = GraphNode('d') - - G3 = Graph(a, b, c, d) - - G3.add_edge('a', 'b', 3) - G3.add_edge('a', 'c', 2) - G3.add_edge('b', 'c', 2) - G3.add_edge('b', 'd', 3) - G3.add_edge('c', 'd', 2) - - assert max_flow(G3, 'a', 'd', algorithm) == 5 - assert max_flow(G3, 'a', 'b', algorithm) == 3 - - - _test_max_flow("List", "edmonds_karp") - _test_max_flow("Matrix", "edmonds_karp") - _test_max_flow("List", "dinic") - _test_max_flow("Matrix", "dinic") - - -def test_find_bridges(): - def _test_find_bridges(ds): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - - impl = 'adjacency_list' if ds == "List" else 'adjacency_matrix' - - v0 = GraphNode(0) - v1 = GraphNode(1) - v2 = GraphNode(2) - v3 = GraphNode(3) - v4 = GraphNode(4) - - G1 = Graph(v0, v1, v2, v3, v4, implementation=impl) - G1.add_edge(v0.name, v1.name) - G1.add_edge(v1.name, v2.name) - G1.add_edge(v2.name, v3.name) - G1.add_edge(v3.name, v4.name) - - bridges = find_bridges(G1) - expected_bridges = [('0', '1'), ('1', '2'), ('2', '3'), ('3', '4')] - assert sorted(bridges) == sorted(expected_bridges) - - u0 = GraphNode(0) - u1 = GraphNode(1) - u2 = GraphNode(2) - - G2 = Graph(u0, u1, u2, implementation=impl) - G2.add_edge(u0.name, u1.name) - G2.add_edge(u1.name, u2.name) - G2.add_edge(u2.name, u0.name) - - bridges = find_bridges(G2) - assert bridges == [] - - w0 = GraphNode(0) - w1 = GraphNode(1) - w2 = GraphNode(2) - w3 = GraphNode(3) - w4 = GraphNode(4) - - G3 = Graph(w0, w1, w2, w3, w4, implementation=impl) - G3.add_edge(w0.name, w1.name) - G3.add_edge(w1.name, w2.name) - G3.add_edge(w3.name, w4.name) - - bridges = find_bridges(G3) - expected_bridges = [('0', '1'), ('1', '2'), ('3', '4')] - assert sorted(bridges) == sorted(expected_bridges) - - _test_find_bridges("List") - _test_find_bridges("Matrix") diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/__init__.py deleted file mode 100644 index c6b3341d2..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/__init__.py +++ /dev/null @@ -1,53 +0,0 @@ -__all__ = [] - -from . import ( - arrays, - linked_lists, - algorithms, -) - -from .arrays import ( - OneDimensionalArray, - DynamicOneDimensionalArray, - MultiDimensionalArray, - ArrayForTrees -) -__all__.extend(arrays.__all__) - -from .linked_lists import ( - SinglyLinkedList, - DoublyLinkedList, - SinglyCircularLinkedList, - DoublyCircularLinkedList, - SkipList -) -__all__.extend(linked_lists.__all__) - -from .algorithms import ( - merge_sort_parallel, - brick_sort, - brick_sort_parallel, - heapsort, - matrix_multiply_parallel, - counting_sort, - bucket_sort, - cocktail_shaker_sort, - quick_sort, - longest_common_subsequence, - is_ordered, - upper_bound, - lower_bound, - longest_increasing_subsequence, - next_permutation, - prev_permutation, - bubble_sort, - linear_search, - binary_search, - jump_search, - selection_sort, - insertion_sort, - intro_sort, - shell_sort, - radix_sort -) -__all__.extend(algorithms.__all__) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/_backend/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/_backend/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/algorithms.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/algorithms.py deleted file mode 100644 index 6d383fdca..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/algorithms.py +++ /dev/null @@ -1,2010 +0,0 @@ -from pydatastructs.linear_data_structures.arrays import ( - OneDimensionalArray, DynamicArray, DynamicOneDimensionalArray, Array) -from pydatastructs.linear_data_structures._backend.cpp import _algorithms, _arrays -from pydatastructs.utils.misc_util import ( - _check_type, _comp, Backend, - raise_if_backend_is_not_python) -from concurrent.futures import ThreadPoolExecutor -from math import log, floor, sqrt - -__all__ = [ - 'merge_sort_parallel', - 'brick_sort', - 'brick_sort_parallel', - 'heapsort', - 'matrix_multiply_parallel', - 'counting_sort', - 'bucket_sort', - 'cocktail_shaker_sort', - 'quick_sort', - 'longest_common_subsequence', - 'is_ordered', - 'upper_bound', - 'lower_bound', - 'longest_increasing_subsequence', - 'next_permutation', - 'prev_permutation', - 'bubble_sort', - 'linear_search', - 'binary_search', - 'jump_search', - 'selection_sort', - 'insertion_sort', - 'intro_sort', - 'shell_sort', - 'radix_sort' -] - -def _merge(array, sl, el, sr, er, end, comp): - l, r = [], [] - for i in range(sl, el + 1): - if i <= end: - l.append(array[i]) - array[i] = None - for i in range(sr, er + 1): - if i <= end: - r.append(array[i]) - array[i] = None - i, j, k = 0, 0, sl - while i < len(l) and j < len(r): - if _comp(l[i], r[j], comp): - array[k] = l[i] - i += 1 - else: - array[k] = r[j] - j += 1 - k += 1 - - while i < len(l): - array[k] = l[i] - i += 1 - k += 1 - - while j < len(r): - array[k] = r[j] - j += 1 - k += 1 - -def merge_sort_parallel(array, num_threads, **kwargs): - """ - Implements parallel merge sort. - - Parameters - ========== - - array: Array - The array which is to be sorted. - num_threads: int - The maximum number of threads - to be used for sorting. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, merge_sort_parallel - >>> arr = OneDimensionalArray(int,[3, 2, 1]) - >>> merge_sort_parallel(arr, 3) - >>> [arr[0], arr[1], arr[2]] - [1, 2, 3] - >>> merge_sort_parallel(arr, 3, comp=lambda u, v: u > v) - >>> [arr[0], arr[1], arr[2]] - [3, 2, 1] - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Merge_sort - """ - raise_if_backend_is_not_python( - merge_sort_parallel, kwargs.get('backend', Backend.PYTHON)) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get("comp", lambda u, v: u <= v) - for size in range(floor(log(end - start + 1, 2)) + 1): - pow_2 = 2**size - with ThreadPoolExecutor(max_workers=num_threads) as Executor: - i = start - while i <= end: - Executor.submit( - _merge, - array, - i, i + pow_2 - 1, - i + pow_2, i + 2*pow_2 - 1, - end, comp).result() - i = i + 2*pow_2 - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - -def brick_sort(array, **kwargs): - """ - Implements Brick Sort / Odd Even sorting algorithm - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - >>> from pydatastructs import OneDimensionalArray, brick_sort - >>> arr = OneDimensionalArray(int,[3, 2, 1]) - >>> brick_sort(arr) - >>> [arr[0], arr[1], arr[2]] - [1, 2, 3] - >>> brick_sort(arr, comp=lambda u, v: u > v) - >>> [arr[0], arr[1], arr[2]] - [3, 2, 1] - - References - ========== - .. [1] https://www.geeksforgeeks.org/odd-even-sort-brick-sort/ - """ - raise_if_backend_is_not_python( - brick_sort, kwargs.get('backend', Backend.PYTHON)) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get("comp", lambda u, v: u <= v) - - is_sorted = False - while is_sorted is False: - is_sorted = True - for i in range(start+1, end, 2): - if _comp(array[i+1], array[i], comp): - array[i], array[i+1] = array[i+1], array[i] - is_sorted = False - for i in range(start, end, 2): - if _comp(array[i+1], array[i], comp): - array[i], array[i+1] = array[i+1], array[i] - is_sorted = False - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - -def _brick_sort_swap(array, i, j, comp, is_sorted): - if _comp(array[j], array[i], comp): - array[i], array[j] = array[j], array[i] - is_sorted[0] = False - -def brick_sort_parallel(array, num_threads, **kwargs): - """ - Implements Concurrent Brick Sort / Odd Even sorting algorithm - - Parameters - ========== - - array: Array/list - The array which is to be sorted. - num_threads: int - The maximum number of threads - to be used for sorting. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, brick_sort_parallel - >>> arr = OneDimensionalArray(int,[3, 2, 1]) - >>> brick_sort_parallel(arr, num_threads=5) - >>> [arr[0], arr[1], arr[2]] - [1, 2, 3] - >>> brick_sort_parallel(arr, num_threads=5, comp=lambda u, v: u > v) - >>> [arr[0], arr[1], arr[2]] - [3, 2, 1] - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Odd%E2%80%93even_sort - """ - raise_if_backend_is_not_python( - brick_sort_parallel, kwargs.get('backend', Backend.PYTHON)) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get("comp", lambda u, v: u <= v) - - is_sorted = [False] - with ThreadPoolExecutor(max_workers=num_threads) as Executor: - while is_sorted[0] is False: - is_sorted[0] = True - for i in range(start + 1, end, 2): - Executor.submit(_brick_sort_swap, array, i, i + 1, comp, is_sorted).result() - - for i in range(start, end, 2): - Executor.submit(_brick_sort_swap, array, i, i + 1, comp, is_sorted).result() - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - -def heapsort(array, **kwargs): - """ - Implements Heapsort algorithm. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, heapsort - >>> arr = OneDimensionalArray(int,[3, 2, 1]) - >>> heapsort(arr) - >>> [arr[0], arr[1], arr[2]] - [1, 2, 3] - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Heapsort - - Note - ==== - - This function does not support custom comparators as is the case with - other sorting functions in this file. - """ - raise_if_backend_is_not_python( - heapsort, kwargs.get('backend', Backend.PYTHON)) - from pydatastructs.trees.heaps import BinaryHeap - - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - - h = BinaryHeap(heap_property="min") - for i in range(start, end+1): - if array[i] is not None: - h.insert(array[i]) - array[i] = None - - i = start - while not h.is_empty: - array[i] = h.extract().key - i += 1 - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - -def counting_sort(array: Array, **kwargs) -> Array: - """ - Performs counting sort on the given array. - - Parameters - ========== - - array: Array - The array which is to be sorted. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs import DynamicOneDimensionalArray as DODA, counting_sort - >>> arr = DODA(int, [5, 78, 1, 0]) - >>> out = counting_sort(arr) - >>> str(out) - "['0', '1', '5', '78']" - >>> arr.delete(2) - >>> out = counting_sort(arr) - >>> str(out) - "['0', '5', '78']" - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Counting_sort - - Note - ==== - - Since, counting sort is a non-comparison sorting algorithm, - custom comparators aren't allowed. - The ouput array doesn't contain any `None` value. - """ - raise_if_backend_is_not_python( - counting_sort, kwargs.get('backend', Backend.PYTHON)) - max_val, min_val = array[0], array[0] - none_count = 0 - for i in range(len(array)): - if array[i] is not None: - if max_val is None or max_val < array[i]: - max_val = array[i] - if min_val is None or array[i] < min_val: - min_val = array[i] - else: - none_count += 1 - if min_val is None or max_val is None: - return array - - count = [0 for _ in range(max_val - min_val + 1)] - for i in range(len(array)): - if array[i] is not None: - count[array[i] - min_val] += 1 - - total = 0 - for i in range(max_val - min_val + 1): - count[i], total = total, count[i] + total - - output = type(array)(array._dtype, - [array[i] for i in range(len(array)) - if array[i] is not None]) - if _check_type(output, DynamicArray): - output._modify(force=True) - - for i in range(len(array)): - x = array[i] - if x is not None: - output[count[x-min_val]] = x - count[x-min_val] += 1 - - return output - -def _matrix_multiply_helper(m1, m2, row, col): - s = 0 - for i in range(len(m1)): - s += m1[row][i] * m2[i][col] - return s - -def matrix_multiply_parallel(matrix_1, matrix_2, num_threads): - """ - Implements concurrent Matrix multiplication - - Parameters - ========== - - matrix_1: Any matrix representation - Left matrix - matrix_2: Any matrix representation - Right matrix - num_threads: int - The maximum number of threads - to be used for multiplication. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Raises - ====== - - ValueError - When the columns in matrix_1 are not equal to the rows in matrix_2 - - Returns - ======= - - C: list - The result of matrix multiplication. - - Examples - ======== - - >>> from pydatastructs import matrix_multiply_parallel - >>> I = [[1, 1, 0], [0, 1, 0], [0, 0, 1]] - >>> J = [[2, 1, 2], [1, 2, 1], [2, 2, 2]] - >>> matrix_multiply_parallel(I, J, num_threads=5) - [[3, 3, 3], [1, 2, 1], [2, 2, 2]] - - References - ========== - .. [1] https://www3.nd.edu/~zxu2/acms60212-40212/Lec-07-3.pdf - """ - row_matrix_1, col_matrix_1 = len(matrix_1), len(matrix_1[0]) - row_matrix_2, col_matrix_2 = len(matrix_2), len(matrix_2[0]) - - if col_matrix_1 != row_matrix_2: - raise ValueError("Matrix size mismatch: %s * %s"%( - (row_matrix_1, col_matrix_1), (row_matrix_2, col_matrix_2))) - - C = [[None for i in range(col_matrix_1)] for j in range(row_matrix_2)] - - with ThreadPoolExecutor(max_workers=num_threads) as Executor: - for i in range(row_matrix_1): - for j in range(col_matrix_2): - C[i][j] = Executor.submit(_matrix_multiply_helper, - matrix_1, - matrix_2, - i, j).result() - - return C - -def _bucket_sort_helper(bucket: Array) -> Array: - for i in range(1, len(bucket)): - key = bucket[i] - j = i - 1 - while j >= 0 and bucket[j] > key: - bucket[j+1] = bucket[j] - j -= 1 - bucket[j+1] = key - return bucket - -def bucket_sort(array: Array, **kwargs) -> Array: - """ - Performs bucket sort on the given array. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs import DynamicOneDimensionalArray as DODA, bucket_sort - >>> arr = DODA(int, [5, 78, 1, 0]) - >>> out = bucket_sort(arr) - >>> str(out) - "['0', '1', '5', '78']" - >>> arr.delete(2) - >>> out = bucket_sort(arr) - >>> str(out) - "['0', '1', '78']" - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Bucket_sort - - Note - ==== - - This function does not support custom comparators as is the case with - other sorting functions in this file. - """ - raise_if_backend_is_not_python( - bucket_sort, kwargs.get('backend', Backend.PYTHON)) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - - #Find maximum value in the list and use length of the list to determine which value in the list goes into which bucket - max_value = None - for i in range(start, end+1): - if array[i] is not None: - max_value = array[i] - - count = 0 - for i in range(start, end+1): - if array[i] is not None: - count += 1 - if array[i] > max_value: - max_value = array[i] - - number_of_null_values = end - start + 1 - count - size = max_value // count - - # Create n empty buckets where n is equal to the length of the input list - buckets_list = [[] for _ in range(count)] - - # Put list elements into different buckets based on the size - for i in range(start, end + 1): - if array[i] is not None: - j = array[i] // size - if j is not count: - buckets_list[j].append(array[i]) - else: - buckets_list[count-1].append(array[i]) - - # Sort elements within the buckets using Insertion Sort - for z in range(count): - _bucket_sort_helper(buckets_list[z]) - - # Concatenate buckets with sorted elements into a single array - sorted_list = [] - for x in range(count): - sorted_list.extend(buckets_list[x]) - for i in range(end, end - number_of_null_values, -1): - array[i] = None - for i in range(start, end - number_of_null_values + 1): - array[i] = sorted_list[i-start] - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - return array - -def cocktail_shaker_sort(array: Array, **kwargs) -> Array: - """ - Performs cocktail sort on the given array. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray as ODA, cocktail_shaker_sort - >>> arr = ODA(int, [5, 78, 1, 0]) - >>> out = cocktail_shaker_sort(arr) - >>> str(out) - '[0, 1, 5, 78]' - >>> arr = ODA(int, [21, 37, 5]) - >>> out = cocktail_shaker_sort(arr) - >>> str(out) - '[5, 21, 37]' - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Cocktail_shaker_sort - """ - raise_if_backend_is_not_python( - cocktail_shaker_sort, kwargs.get('backend', Backend.PYTHON)) - def swap(i, j): - array[i], array[j] = array[j], array[i] - - lower = kwargs.get('start', 0) - upper = kwargs.get('end', len(array) - 1) - comp = kwargs.get("comp", lambda u, v: u <= v) - - swapping = False - while (not swapping and upper - lower >= 1): - - swapping = True - for j in range(lower, upper): - if _comp(array[j], array[j+1], comp) is False: - swap(j + 1, j) - swapping = False - - upper = upper - 1 - for j in range(upper, lower, -1): - if _comp(array[j-1], array[j], comp) is False: - swap(j, j - 1) - swapping = False - lower = lower + 1 - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - - return array - -def quick_sort(array: Array, **kwargs) -> Array: - """ - Performs quick sort on the given array. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - pick_pivot_element: lambda/function - The function implementing the pivot picking - logic for quick sort. Should accept, `low`, - `high`, and `array` in this order, where `low` - represents the left end of the current partition, - `high` represents the right end, and `array` is - the original input array to `quick_sort` function. - Optional, by default, picks the element at `high` - index of the current partition as pivot. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray as ODA, quick_sort - >>> arr = ODA(int, [5, 78, 1, 0]) - >>> out = quick_sort(arr) - >>> str(out) - '[0, 1, 5, 78]' - >>> arr = ODA(int, [21, 37, 5]) - >>> out = quick_sort(arr) - >>> str(out) - '[5, 21, 37]' - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Quicksort - """ - backend = kwargs.pop("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _algorithms.quick_sort(array, **kwargs) - from pydatastructs import Stack - comp = kwargs.get("comp", lambda u, v: u <= v) - pick_pivot_element = kwargs.get("pick_pivot_element", - lambda low, high, array: array[high]) - - def partition(low, high, pick_pivot_element): - i = (low - 1) - x = pick_pivot_element(low, high, array) - for j in range(low , high): - if _comp(array[j], x, comp) is True: - i = i + 1 - array[i], array[j] = array[j], array[i] - array[i + 1], array[high] = array[high], array[i + 1] - return (i + 1) - - lower = kwargs.get('start', 0) - upper = kwargs.get('end', len(array) - 1) - stack = Stack() - - stack.push(lower) - stack.push(upper) - - while stack.is_empty is False: - high = stack.pop() - low = stack.pop() - p = partition(low, high, pick_pivot_element) - if p - 1 > low: - stack.push(low) - stack.push(p - 1) - if p + 1 < high: - stack.push(p + 1) - stack.push(high) - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - - return array - -def longest_common_subsequence(seq1: OneDimensionalArray, seq2: OneDimensionalArray, - **kwargs) -> OneDimensionalArray: - """ - Finds the longest common subsequence between the - two given sequences. - - Parameters - ======== - - seq1: OneDimensionalArray - The first sequence. - seq2: OneDimensionalArray - The second sequence. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: OneDimensionalArray - The longest common subsequence. - - Examples - ======== - - >>> from pydatastructs import longest_common_subsequence as LCS, OneDimensionalArray as ODA - >>> arr1 = ODA(str, ['A', 'B', 'C', 'D', 'E']) - >>> arr2 = ODA(str, ['A', 'B', 'C', 'G' ,'D', 'E', 'F']) - >>> lcs = LCS(arr1, arr2) - >>> str(lcs) - "['A', 'B', 'C', 'D', 'E']" - >>> arr1 = ODA(str, ['A', 'P', 'P']) - >>> arr2 = ODA(str, ['A', 'p', 'P', 'S', 'P']) - >>> lcs = LCS(arr1, arr2) - >>> str(lcs) - "['A', 'P', 'P']" - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Longest_common_subsequence_problem - - Note - ==== - - The data types of elements across both the sequences - should be same and should be comparable. - """ - raise_if_backend_is_not_python( - longest_common_subsequence, kwargs.get('backend', Backend.PYTHON)) - row = len(seq1) - col = len(seq2) - check_mat = {0: [(0, []) for _ in range(col + 1)]} - - for i in range(1, row + 1): - check_mat[i] = [(0, []) for _ in range(col + 1)] - for j in range(1, col + 1): - if seq1[i-1] == seq2[j-1]: - temp = check_mat[i-1][j-1][1][:] - temp.append(seq1[i-1]) - check_mat[i][j] = (check_mat[i-1][j-1][0] + 1, temp) - else: - if check_mat[i-1][j][0] > check_mat[i][j-1][0]: - check_mat[i][j] = check_mat[i-1][j] - else: - check_mat[i][j] = check_mat[i][j-1] - - return OneDimensionalArray(seq1._dtype, check_mat[row][col][-1]) - -def is_ordered(array, **kwargs): - """ - Checks whether the given array is ordered or not. - - Parameters - ========== - - array: OneDimensionalArray - The array which is to be checked for having - specified ordering among its elements. - start: int - The starting index of the portion of the array - under consideration. - Optional, by default 0 - end: int - The ending index of the portion of the array - under consideration. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for specifying the desired ordering. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - True if the specified ordering is present - from start to end (inclusive) otherwise False. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, is_ordered - >>> arr = OneDimensionalArray(int, [1, 2, 3, 4]) - >>> is_ordered(arr) - True - >>> arr1 = OneDimensionalArray(int, [1, 2, 3]) - >>> is_ordered(arr1, start=0, end=1, comp=lambda u, v: u > v) - False - """ - backend = kwargs.pop("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _algorithms.is_ordered(array, **kwargs) - lower = kwargs.get('start', 0) - upper = kwargs.get('end', len(array) - 1) - comp = kwargs.get("comp", lambda u, v: u <= v) - - for i in range(lower + 1, upper + 1): - if array[i] is None or array[i - 1] is None: - continue - if comp(array[i], array[i - 1]): - return False - return True - -def upper_bound(array, value, **kwargs): - """ - Finds the index of the first occurence of an element greater than the given - value according to specified order, in the given OneDimensionalArray using a variation of binary search method. - - Parameters - ========== - - array: OneDimensionalArray - The array in which the upper bound has to be found. - start: int - The staring index of the portion of the array in which the upper bound - of a given value has to be looked for. - Optional, by default 0 - end: int, optional - The ending index of the portion of the array in which the upper bound - of a given value has to be looked for. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for specifying the desired ordering. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - index: int - Index of the upper bound of the given value in the given OneDimensionalArray. - - Examples - ======== - - >>> from pydatastructs import upper_bound, OneDimensionalArray as ODA - >>> arr1 = ODA(int, [4, 5, 5, 6, 7]) - >>> ub = upper_bound(arr1, 5, start=0, end=4) - >>> ub - 3 - >>> arr2 = ODA(int, [7, 6, 5, 5, 4]) - >>> ub = upper_bound(arr2, 5, comp=lambda x, y: x > y) - >>> ub - 4 - - Note - ==== - - DynamicOneDimensionalArray objects may not work as expected. - """ - raise_if_backend_is_not_python( - upper_bound, kwargs.get('backend', Backend.PYTHON)) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array)) - comp = kwargs.get('comp', lambda x, y: x < y) - index = end - inclusive_end = end - 1 - if comp(value, array[start]): - index = start - while start <= inclusive_end: - mid = (start + inclusive_end)//2 - if not comp(value, array[mid]): - start = mid + 1 - else: - index = mid - inclusive_end = mid - 1 - return index - -def lower_bound(array, value, **kwargs): - """ - Finds the the index of the first occurence of an element which is not - less than the given value according to specified order, - in the given OneDimensionalArray using a variation of binary search method. - - Parameters - ========== - - array: OneDimensionalArray - The array in which the lower bound has to be found. - start: int - The staring index of the portion of the array in which the upper bound - of a given value has to be looked for. - Optional, by default 0 - end: int, optional - The ending index of the portion of the array in which the upper bound - of a given value has to be looked for. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for specifying the desired ordering. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - index: int - Index of the lower bound of the given value in the given OneDimensionalArray - - Examples - ======== - - >>> from pydatastructs import lower_bound, OneDimensionalArray as ODA - >>> arr1 = ODA(int, [4, 5, 5, 6, 7]) - >>> lb = lower_bound(arr1, 5, end=4, comp=lambda x, y : x < y) - >>> lb - 1 - >>> arr = ODA(int, [7, 6, 5, 5, 4]) - >>> lb = lower_bound(arr, 5, start=0, comp=lambda x, y : x > y) - >>> lb - 2 - - Note - ==== - - DynamicOneDimensionalArray objects may not work as expected. - """ - raise_if_backend_is_not_python( - lower_bound, kwargs.get('backend', Backend.PYTHON)) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array)) - comp = kwargs.get('comp', lambda x, y: x < y) - index = end - inclusive_end = end - 1 - if not comp(array[start], value): - index = start - while start <= inclusive_end: - mid = (start + inclusive_end)//2 - if comp(array[mid], value): - start = mid + 1 - else: - index = mid - inclusive_end = mid - 1 - return index - -def longest_increasing_subsequence(array, **kwargs): - """ - Returns the longest increasing subsequence (as a OneDimensionalArray) that - can be obtained from a given OneDimensionalArray. A subsequence - of an array is an ordered subset of the array's elements having the same - sequential ordering as the original array. Here, an increasing - sequence stands for a strictly increasing sequence of numbers. - - Parameters - ========== - - array: OneDimensionalArray - The given array in the form of a OneDimensionalArray - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: OneDimensionalArray - Returns the longest increasing subsequence that can be obtained - from the given array - - Examples - ======== - - >>> from pydatastructs import lower_bound, OneDimensionalArray as ODA - >>> from pydatastructs import longest_increasing_subsequence as LIS - >>> array = ODA(int, [2, 5, 3, 7, 11, 8, 10, 13, 6]) - >>> longest_inc_subsequence = LIS(array) - >>> str(longest_inc_subsequence) - '[2, 3, 7, 8, 10, 13]' - >>> array2 = ODA(int, [3, 4, -1, 5, 8, 2, 2 ,2, 3, 12, 7, 9, 10]) - >>> longest_inc_subsequence = LIS(array2) - >>> str(longest_inc_subsequence) - '[-1, 2, 3, 7, 9, 10]' - """ - raise_if_backend_is_not_python( - longest_increasing_subsequence, - kwargs.get('backend', Backend.PYTHON)) - n = len(array) - dp = OneDimensionalArray(int, n) - dp.fill(0) - parent = OneDimensionalArray(int, n) - parent.fill(-1) - length = 0 - for i in range(1, n): - if array[i] <= array[dp[0]]: - dp[0] = i - elif array[dp[length]] < array[i]: - length += 1 - dp[length] = i - parent[i] = dp[length - 1] - else: - curr_array = [array[dp[i]] for i in range(length)] - ceil = lower_bound(curr_array, array[i]) - dp[ceil] = i - parent[i] = dp[ceil - 1] - ans = DynamicOneDimensionalArray(int, 0) - last_index = dp[length] - while last_index != -1: - ans.append(array[last_index]) - last_index = parent[last_index] - n = ans._last_pos_filled + 1 - ans_ODA = OneDimensionalArray(int, n) - for i in range(n): - ans_ODA[n-1-i] = ans[i] - return ans_ODA - -def _permutation_util(array, start, end, comp, perm_comp): - size = end - start + 1 - permute = OneDimensionalArray(int, size) - for i, j in zip(range(start, end + 1), range(size)): - permute[j] = array[i] - i = size - 1 - while i > 0 and perm_comp(permute[i - 1], permute[i], comp): - i -= 1 - if i > 0: - left, right = i, size - 1 - while left <= right: - mid = left + (right - left) // 2 - if not perm_comp(permute[i - 1], permute[mid], comp): - left = mid + 1 - else: - right = mid - 1 - permute[i - 1], permute[left - 1] = \ - permute[left - 1], permute[i - 1] - left, right = i, size - 1 - while left < right: - permute[left], permute[right] = permute[right], permute[left] - left += 1 - right -= 1 - result = True if i > 0 else False - return result, permute - -def next_permutation(array, **kwargs): - """ - If the function can determine the next higher permutation, it - returns `True` and the permutation in a new array. - If that is not possible, because it is already at the largest possible - permutation, it returns the elements according to the first permutation - and returns `False` and the permutation in a new array. - - Parameters - ========== - - array: OneDimensionalArray - The array which is to be used for finding next permutation. - start: int - The staring index of the considered portion of the array. - Optional, by default 0 - end: int, optional - The ending index of the considered portion of the array. - Optional, by default the index of the last position filled. - comp: lambda/function - The comparator which is to be used for specifying the - desired lexicographical ordering. - Optional, by default, less than is - used for comparing two values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - - Returns - ======= - - output: bool, OneDimensionalArray - First element is `True` if the function can rearrange - the given portion of the input array as a lexicographically - greater permutation, otherwise returns `False`. - Second element is an array having the next permutation. - - - Examples - ======== - - >>> from pydatastructs import next_permutation, OneDimensionalArray as ODA - >>> array = ODA(int, [1, 2, 3, 4]) - >>> is_greater, next_permute = next_permutation(array) - >>> is_greater, str(next_permute) - (True, '[1, 2, 4, 3]') - >>> array = ODA(int, [3, 2, 1]) - >>> is_greater, next_permute = next_permutation(array) - >>> is_greater, str(next_permute) - (False, '[1, 2, 3]') - - References - ========== - - .. [1] http://www.cplusplus.com/reference/algorithm/next_permutation/ - """ - raise_if_backend_is_not_python( - next_permutation, kwargs.get('backend', Backend.PYTHON)) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get('comp', lambda x, y: x < y) - - def _next_permutation_comp(x, y, _comp): - if _comp(x, y): - return False - else: - return True - - return _permutation_util(array, start, end, comp, - _next_permutation_comp) - -def prev_permutation(array, **kwargs): - """ - If the function can determine the next lower permutation, it - returns `True` and the permutation in a new array. - If that is not possible, because it is already at the lowest possible - permutation, it returns the elements according to the last permutation - and returns `False` and the permutation in a new array. - - Parameters - ========== - - array: OneDimensionalArray - The array which is to be used for finding next permutation. - start: int - The staring index of the considered portion of the array. - Optional, by default 0 - end: int, optional - The ending index of the considered portion of the array. - Optional, by default the index of the last position filled. - comp: lambda/function - The comparator which is to be used for specifying the - desired lexicographical ordering. - Optional, by default, less than is - used for comparing two values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - - Returns - ======= - - output: bool, OneDimensionalArray - First element is `True` if the function can rearrange - the given portion of the input array as a lexicographically - smaller permutation, otherwise returns `False`. - Second element is an array having the previous permutation. - - - Examples - ======== - - >>> from pydatastructs import prev_permutation, OneDimensionalArray as ODA - >>> array = ODA(int, [1, 2, 4, 3]) - >>> is_lower, prev_permute = prev_permutation(array) - >>> is_lower, str(prev_permute) - (True, '[1, 2, 3, 4]') - >>> array = ODA(int, [1, 2, 3, 4]) - >>> is_lower, prev_permute = prev_permutation(array) - >>> is_lower, str(prev_permute) - (False, '[4, 3, 2, 1]') - - References - ========== - - .. [1] http://www.cplusplus.com/reference/algorithm/prev_permutation/ - """ - raise_if_backend_is_not_python( - prev_permutation, kwargs.get('backend', Backend.PYTHON)) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get('comp', lambda x, y: x < y) - - def _prev_permutation_comp(x, y, _comp): - if _comp(x, y): - return True - else: - return False - - return _permutation_util(array, start, end, comp, - _prev_permutation_comp) - -def bubble_sort(array, **kwargs): - """ - Implements bubble sort algorithm. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, bubble_sort - >>> arr = OneDimensionalArray(int,[3, 2, 1]) - >>> out = bubble_sort(arr) - >>> str(out) - '[1, 2, 3]' - >>> out = bubble_sort(arr, comp=lambda u, v: u > v) - >>> str(out) - '[3, 2, 1]' - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Bubble_sort - """ - backend = kwargs.pop("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _algorithms.bubble_sort(array, **kwargs) - if backend == Backend.LLVM: - return _algorithms.bubble_sort_llvm(array, **kwargs) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get("comp", lambda u, v: u <= v) - arr_len = len(array) - for i in range(arr_len - 1): - for j in range(start , end): - if not _comp(array[j], array[j + 1], comp): - array[j], array[j + 1] = array[j + 1], array[j] - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - - return array - -def selection_sort(array, **kwargs): - """ - Implements selection sort algorithm. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, selection_sort - >>> arr = OneDimensionalArray(int,[3, 2, 1]) - >>> out = selection_sort(arr) - >>> str(out) - '[1, 2, 3]' - >>> out = selection_sort(arr, comp=lambda u, v: u > v) - >>> str(out) - '[3, 2, 1]' - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Selection_sort - """ - backend = kwargs.pop("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _algorithms.bubble_sort(array, **kwargs) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get('comp', lambda u, v: u <= v) - - for i in range(start, end + 1): - jMin = i - for j in range(i + 1, end + 1): - if not _comp(array[jMin], array[j], comp): - jMin = j - if jMin != i: - array[i], array[jMin] = array[jMin], array[i] - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - - return array - -def insertion_sort(array, **kwargs): - """ - Implements insertion sort algorithm. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, insertion_sort - >>> arr = OneDimensionalArray(int,[3, 2, 1]) - >>> out = insertion_sort(arr) - >>> str(out) - '[1, 2, 3]' - >>> out = insertion_sort(arr, comp=lambda u, v: u > v) - >>> str(out) - '[3, 2, 1]' - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Insertion_sort - """ - backend = kwargs.pop("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _algorithms.insertion_sort(array, **kwargs) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get('comp', lambda u, v: u <= v) - - for i in range(start + 1, end + 1): - temp = array[i] - j = i - while j > start and not _comp(array[j - 1], temp, comp): - array[j] = array[j - 1] - j -= 1 - array[j] = temp - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - - return array - -def linear_search(array, value, **kwargs): - """ - Implements linear search algorithm. - - Parameters - ========== - - array: OneDimensionalArray - The array which is to be searched. - value: - The value which is to be searched - inside the array. - start: int - The starting index of the portion - which is to be searched. - Optional, by default 0 - end: int - The ending index of the portion which - is to be searched. - Optional, by default the index - of the last position filled. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: int - The index of value if found. - If not found, returns None. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, linear_search - >>> arr = OneDimensionalArray(int,[3, 2, 1]) - >>> linear_search(arr, 2) - 1 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Linear_search - """ - backend = kwargs.pop("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _algorithms.linear_search(array, value, **kwargs) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - - for i in range(start, end + 1): - if array[i] == value: - return i - - return None - -def binary_search(array, value, **kwargs): - """ - Implements binary search algorithm. - - Parameters - ========== - - array: OneDimensionalArray - The array which is to be searched. - value: - The value which is to be searched - inside the array. - start: int - The starting index of the portion - which is to be searched. - Optional, by default 0 - end: int - The ending index of the portion which - is to be searched. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for performing comparisons. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: int - The index of elem if found. - If not found, returns None. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, binary_search - >>> arr = OneDimensionalArray(int,[1, 2, 3, 5, 10, 12]) - >>> binary_search(arr, 5) - 3 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Binary_search_algorithm - - Note - ==== - - This algorithm assumes that the portion of the array - to be searched is already sorted. - """ - backend = kwargs.pop("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _algorithms.binary_search(array, value, **kwargs) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get("comp", lambda u, v: u <= v) - - left = start - right = end - while left <= right: - middle = left//2 + right//2 + left % 2 * right % 2 - if array[middle] == value: - return middle - if comp(array[middle], value): - left = middle + 1 - else: - right = middle - 1 - - return None - -def jump_search(array, value, **kwargs): - """ - Implements jump search algorithm. - - Parameters - ========== - - array: OneDimensionalArray - The array which is to be searched. - value: - The value which is to be searched - inside the array. - start: int - The starting index of the portion - which is to be searched. - Optional, by default 0 - end: int - The ending index of the portion which - is to be searched. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for performing comparisons. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: int - The index of elem if found. - If not found, returns None. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, jump_search - >>> arr = OneDimensionalArray(int,[1, 2, 3, 5, 10, 12]) - >>> linear_search(arr, 5) - 3 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Jump_search - - Note - ==== - - This algorithm assumes that the portion of the array - to be searched is already sorted. - """ - backend = kwargs.pop("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _algorithms.jump_search(array, value, **kwargs) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get("comp", lambda u, v: u < v) - - step = int(sqrt(end - start + 1)) - current_position = step - prev = start - while comp(array[min(current_position, end)], value): - prev = current_position - current_position += step - if prev > end: - return None - while prev <= min(current_position, end): - if array[prev] == value: - return prev - prev += 1 - - return None - -def intro_sort(array, **kwargs) -> Array: - """ - Performs intro sort on the given array. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - maxdepth: Enables the user to define the maximum - recursion depth, takes value 2*log(length(A)) - by default (ref: Wikipedia[1]). - ins_threshold: Threshold under which insertion - sort has to be performed, default value is - 16 (ref: Wikipedia[1]). - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray as ODA, intro_sort - >>> arr = ODA(int, [5, 78, 1, 0]) - >>> out = intro_sort(arr) - >>> str(out) - '[0, 1, 5, 78]' - >>> arr = ODA(int, [21, 37, 5]) - >>> out = intro_sort(arr) - >>> str(out) - '[5, 21, 37]' - - Note - ==== - - This function does not support custom comparators as - is the case with other sorting functions in this file. - This is because of heapsort's limitation. - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Introsort - """ - raise_if_backend_is_not_python( - intro_sort, kwargs.get('backend', Backend.PYTHON)) - - # Always sorts in increasing order, this is because of - # heapsort's limitation - comp = lambda u, v: u <= v - lower = kwargs.get('start', 0) - upper = kwargs.get('end', len(array) - 1) - n = upper - lower + 1 - if n <= 0: - maxdepth = 0 - else: - maxdepth = kwargs.get("maxdepth", int(2 * (log(n)/log(2)))) - - ins_threshold = kwargs.get("ins_threshold", 16) - - def partition(array, lower, upper): - pivot = array[lower] - left = lower + 1 - right = upper - done = False - while not done: - while left <= right and _comp(array[left], pivot, comp): - left += 1 - while _comp(pivot, array[right], comp) and right >= left: - right -= 1 - if right < left: - done = True - else: - array[left], array[right] = array[right], array[left] - left+=1 - right-=1 - - array[lower], array[right] = array[right], array[lower] - return right - - if n < ins_threshold: - return insertion_sort(array, start=lower, end=upper) - elif maxdepth == 0: - heapsort(array, start=lower, end=upper) - return array - else: - p = partition(array, lower, upper) - - intro_sort(array, start=lower, end=p-1, maxdepth=maxdepth-1, ins_threshold=ins_threshold) - intro_sort(array, start=p+1, end=upper, maxdepth=maxdepth-1, ins_threshold=ins_threshold) - - return array - -def shell_sort(array, *args, **kwargs): - """ - Implements shell sort algorithm. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs.linear_data_structures.algorithms import OneDimensionalArray, shell_sort - >>> arr = OneDimensionalArray(int, [3, 2, 1]) - >>> out = shell_sort(arr) - >>> str(out) - '[1, 2, 3]' - >>> out = shell_sort(arr, comp=lambda u, v: u > v) - >>> str(out) - '[3, 2, 1]' - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Shellsort - """ - start = int(kwargs.get('start', 0)) - end = int(kwargs.get('end', len(array) - 1)) - comp = kwargs.get('comp', lambda u, v: u <= v) - - n = end - start + 1 - gap = n // 2 - while gap > 0: - for i in range(start + gap, end + 1): - temp = array[i] - j = i - while j >= start + gap and not _comp(array[j - gap], temp, comp): - array[j] = array[j - gap] - j -= gap - array[j] = temp - gap //= 2 - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - - return array - -def radix_sort(array, *args, **kwargs): - """ - Implements radix sort algorithm for non-negative integers. - - Parameters - ========== - - array: Array - The array which is to be sorted. Must contain non-negative integers. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs.linear_data_structures.algorithms import OneDimensionalArray, radix_sort - >>> arr = OneDimensionalArray(int, [170, 45, 75, 90, 802, 24, 2, 66]) - >>> out = radix_sort(arr) - >>> str(out) - '[2, 24, 45, 66, 75, 90, 170, 802]' - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Radix_sort - """ - start = int(kwargs.get('start', 0)) - end = int(kwargs.get('end', len(array) - 1)) - - n = end - start + 1 - max_val = array[start] - for i in range(start + 1, end + 1): - if array[i] is not None and array[i] > max_val: - max_val = array[i] - exp = 1 - while max_val // exp > 0: - count = [0] * 10 - output = [None] * n - - for i in range(start, end + 1): - if array[i] is not None: - digit = (array[i] // exp) % 10 - count[digit] += 1 - - for i in range(1, 10): - count[i] += count[i - 1] - - for i in range(end, start - 1, -1): - if array[i] is not None: - digit = (array[i] // exp) % 10 - count[digit] -= 1 - output[count[digit]] = array[i] - - for i in range(n): - array[start + i] = output[i] - - exp *= 10 - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - - return array diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/arrays.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/arrays.py deleted file mode 100644 index 2e0c3fd97..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/arrays.py +++ /dev/null @@ -1,473 +0,0 @@ -from pydatastructs.utils.misc_util import ( - _check_type, NoneType, Backend, - raise_if_backend_is_not_python) -from pydatastructs.linear_data_structures._backend.cpp import _arrays - -__all__ = [ - 'OneDimensionalArray', - 'MultiDimensionalArray', - 'DynamicOneDimensionalArray' -] - -class Array(object): - """ - Abstract class for arrays in pydatastructs. - """ - def __str__(self) -> str: - return str(self._data) - -class OneDimensionalArray(Array): - """ - Represents one dimensional static arrays of - fixed size. - - Parameters - ========== - - dtype: type - A valid object type. - size: int - The number of elements in the array. - elements: list - The elements in the array, all should - be of same type. - init: a python type - The initial value with which the element has - to be initialized. By default none, used only - when the data is not given. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Raises - ====== - - ValueError - When the number of elements in the list do not - match with the size. - More than three parameters are passed as arguments. - Types of arguments is not as mentioned in the docstring. - - Note - ==== - - At least one parameter should be passed as an argument along - with the dtype. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray - >>> arr = OneDimensionalArray(int, 5) - >>> arr.fill(6) - >>> arr[0] - 6 - >>> arr[0] = 7.2 - >>> arr[0] - 7 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Array_data_structure#One-dimensional_arrays - """ - - __slots__ = ['_size', '_data', '_dtype'] - - def __new__(cls, dtype=NoneType, *args, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - return _arrays.OneDimensionalArray(dtype, *args, **kwargs) - if dtype is NoneType: - raise ValueError("Data type is not defined.") - if len(args) not in (1, 2): - raise ValueError("Too few arguments to create a 1D array," - " pass either size of the array" - " or list of elements or both.") - obj = Array.__new__(cls) - obj._dtype = dtype - if len(args) == 2: - if _check_type(args[0], list) and \ - _check_type(args[1], int): - for i in range(len(args[0])): - if _check_type(args[0][i], dtype) is False: - args[0][i] = dtype(args[0][i]) - size, data = args[1], list(args[0]) - elif _check_type(args[1], list) and \ - _check_type(args[0], int): - for i in range(len(args[1])): - if _check_type(args[1][i], dtype) is False: - args[1][i] = dtype(args[1][i]) - size, data = args[0], list(args[1]) - else: - raise TypeError("Expected type of size is int and " - "expected type of data is list/tuple.") - if size != len(data): - raise ValueError("Conflict in the size, %s and length of data, %s" - %(size, len(data))) - obj._size, obj._data = size, data - - elif len(args) == 1: - if _check_type(args[0], int): - obj._size = args[0] - init = kwargs.get('init', None) - obj._data = [init for i in range(args[0])] - elif _check_type(args[0], (list, tuple)): - for i in range(len(args[0])): - if _check_type(args[0][i], dtype) is False: - args[0][i] = dtype(args[0][i]) - obj._size, obj._data = len(args[0]), \ - list(args[0]) - else: - raise TypeError("Expected type of size is int and " - "expected type of data is list/tuple.") - - return obj - - @classmethod - def methods(cls): - return ['__new__', '__getitem__', - '__setitem__', 'fill', '__len__'] - - def __getitem__(self, i): - if i >= self._size or i < 0: - raise IndexError(("Index, {} out of range, " - "[{}, {}).".format(i, 0, self._size))) - return self._data.__getitem__(i) - - def __setitem__(self, idx, elem): - if elem is None: - self._data[idx] = None - else: - if _check_type(elem, self._dtype) is False: - elem = self._dtype(elem) - self._data[idx] = elem - - def fill(self, elem): - elem = self._dtype(elem) - for i in range(self._size): - self._data[i] = elem - - def __len__(self): - return self._size - -class MultiDimensionalArray(Array): - """ - Represents a multi-dimensional array. - - Parameters - ========== - - dtype: type - A valid object type. - *args: int - The dimensions of the array. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Raises - ====== - - IndexError - Index goes out of boundaries, or - the number of index given is not - the same as the number of dimensions. - ValueError - When there's no dimensions or the - dimension size is 0. - - Examples - ======== - - >>> from pydatastructs import MultiDimensionalArray as MDA - >>> arr = MDA(int, 5, 6, 9) - >>> arr.fill(32) - >>> arr[3, 0, 0] - 32 - >>> arr[3, 0, 0] = 7 - >>> arr[3, 0, 0] - 7 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Array_data_structure#Multidimensional_arrays - - """ - __slots__ = ['_sizes', '_data', '_dtype'] - - def __new__(cls, dtype: type = NoneType, *args, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - if dtype is NoneType: - raise ValueError("Data type is not defined.") - elif not args: - raise ValueError("Too few arguments to create a " - "multi dimensional array, pass dimensions.") - if len(args) == 1: - obj = Array.__new__(cls) - obj._dtype = dtype - obj._sizes = (args[0], 1) - obj._data = [None] * args[0] - return obj - - dimensions = args - for dimension in dimensions: - if dimension < 1: - raise ValueError("Size of dimension cannot be less than 1") - n_dimensions = len(dimensions) - d_sizes = [] - index = 0 - while n_dimensions > 1: - size = dimensions[index] - for i in range(index+1, len(dimensions)): - size = size * dimensions[i] - d_sizes.append(size) - n_dimensions -= 1 - index += 1 - d_sizes.append(dimensions[index]) - d_sizes.append(1) - obj = Array.__new__(cls) - obj._dtype = dtype - obj._sizes = tuple(d_sizes) - obj._data = [None] * obj._sizes[1] * dimensions[0] - return obj - - @classmethod - def methods(cls) -> list: - return ['__new__', '__getitem__', '__setitem__', 'fill', 'shape'] - - def __getitem__(self, indices): - self._compare_shape(indices) - if isinstance(indices, int): - return self._data[indices] - position = 0 - for i in range(0, len(indices)): - position += self._sizes[i + 1] * indices[i] - return self._data[position] - - def __setitem__(self, indices, element) -> None: - self._compare_shape(indices) - if isinstance(indices, int): - self._data[indices] = element - else: - position = 0 - for i in range(0, len(indices)): - position += self._sizes[i + 1] * indices[i] - self._data[position] = element - - def _compare_shape(self, indices) -> None: - indices = [indices] if isinstance(indices, int) else indices - if len(indices) != len(self._sizes) - 1: - raise IndexError("Shape mismatch, current shape is %s" % str(self.shape)) - if any(indices[i] >= self._sizes[i] for i in range(len(indices))): - raise IndexError("Index out of range.") - - def fill(self, element) -> None: - element = self._dtype(element) - for i in range(len(self._data)): - self._data[i] = element - - @property - def shape(self) -> tuple: - shape = [] - size = len(self._sizes) - for i in range(1, size): - shape.append(self._sizes[i-1]//self._sizes[i]) - return tuple(shape) - -class DynamicArray(Array): - """ - Abstract class for dynamic arrays. - """ - pass - -class DynamicOneDimensionalArray(DynamicArray, OneDimensionalArray): - """ - Represents resizable and dynamic one - dimensional arrays. - - Parameters - ========== - - dtype: type - A valid object type. - size: int - The number of elements in the array. - elements: list/tuple - The elements in the array, all should - be of same type. - init: a python type - The inital value with which the element has - to be initialized. By default none, used only - when the data is not given. - load_factor: float, by default 0.25 - The number below which if the ratio, Num(T)/Size(T) - falls then the array is contracted such that at - most only half the positions are filled. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Raises - ====== - - ValueError - When the number of elements in the list do not - match with the size. - More than three parameters are passed as arguments. - Types of arguments is not as mentioned in the docstring. - The load factor is not of floating point type. - - Note - ==== - - At least one parameter should be passed as an argument along - with the dtype. - Num(T) means the number of positions which are not None in the - array. - Size(T) means the maximum number of elements that the array can hold. - - Examples - ======== - - >>> from pydatastructs import DynamicOneDimensionalArray as DODA - >>> arr = DODA(int, 0) - >>> arr.append(1) - >>> arr.append(2) - >>> arr[0] - 1 - >>> arr.delete(0) - >>> arr[0] - >>> arr[1] - 2 - >>> arr.append(3) - >>> arr.append(4) - >>> [arr[i] for i in range(arr.size)] - [None, 2, 3, 4, None, None, None] - - References - ========== - - .. [1] http://www.cs.nthu.edu.tw/~wkhon/algo09/lectures/lecture16.pdf - """ - - __slots__ = ['_load_factor', '_num', '_last_pos_filled', '_size'] - - def __new__(cls, dtype=NoneType, *args, **kwargs): - backend = kwargs.get("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _arrays.DynamicOneDimensionalArray(dtype, *args, **kwargs) - obj = super().__new__(cls, dtype, *args, **kwargs) - obj._load_factor = float(kwargs.get('load_factor', 0.25)) - obj._num = 0 if obj._size == 0 or obj[0] is None else obj._size - obj._last_pos_filled = obj._num - 1 - return obj - - @classmethod - def methods(cls): - return ['__new__', '_modify', - 'append', 'delete', 'size', - '__str__', '__reversed__'] - - def _modify(self, force=False): - """ - Contracts the array if Num(T)/Size(T) falls - below load factor. - """ - if force: - i = -1 - while self._data[i] is None: - i -= 1 - self._last_pos_filled = i%self._size - if (self._num/self._size < self._load_factor): - arr_new = OneDimensionalArray(self._dtype, 2*self._num + 1) - j = 0 - for i in range(self._last_pos_filled + 1): - if self._data[i] is not None: - arr_new[j] = self[i] - j += 1 - self._last_pos_filled = j - 1 - self._data = arr_new._data - self._size = arr_new._size - - def append(self, el): - if self._last_pos_filled + 1 == self._size: - arr_new = OneDimensionalArray(self._dtype, 2*self._size + 1) - for i in range(self._last_pos_filled + 1): - arr_new[i] = self[i] - arr_new[self._last_pos_filled + 1] = el - self._size = arr_new._size - self._data = arr_new._data - else: - self[self._last_pos_filled + 1] = el - self._last_pos_filled += 1 - self._num += 1 - self._modify() - - def delete(self, idx): - if idx <= self._last_pos_filled and idx >= 0 and \ - self[idx] is not None: - self[idx] = None - self._num -= 1 - if self._last_pos_filled == idx: - self._last_pos_filled -= 1 - return self._modify() - - @property - def size(self): - return self._size - - def __str__(self): - to_be_printed = ['' for _ in range(self._last_pos_filled + 1)] - for i in range(self._last_pos_filled + 1): - if self._data[i] is not None: - to_be_printed[i] = str(self._data[i]) - return str(to_be_printed) - - def __reversed__(self): - for i in range(self._last_pos_filled, -1, -1): - yield self._data[i] - -class ArrayForTrees(DynamicOneDimensionalArray): - """ - Utility dynamic array for storing nodes of a tree. - - Parameters - ========== - - backend: pydatastructs.Backend - The backend to be used. Available backends: Python and C++ - Optional, by default, the Python backend is used. For faster execution, use the C++ backend. - - See Also - ======== - - pydatastructs.linear_data_structures.arrays.DynamicOneDimensionalArray - """ - def _modify(self): - if self._num/self._size < self._load_factor: - new_indices = {} - arr_new = OneDimensionalArray(self._dtype, 2*self._num + 1) - j = 0 - for i in range(self._last_pos_filled + 1): - if self[i] is not None: - arr_new[j] = self[i] - new_indices[self[i].key] = j - j += 1 - for i in range(j): - if arr_new[i].left is not None: - arr_new[i].left = new_indices[self[arr_new[i].left].key] - if arr_new[i].right is not None: - arr_new[i].right = new_indices[self[arr_new[i].right].key] - if arr_new[i].parent is not None: - arr_new[i].parent = new_indices[self[arr_new[i].parent].key] - self._last_pos_filled = j - 1 - self._data = arr_new._data - self._size = arr_new._size - return new_indices - return None diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/linked_lists.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/linked_lists.py deleted file mode 100644 index 09178daf1..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/linked_lists.py +++ /dev/null @@ -1,819 +0,0 @@ -import math, random -from pydatastructs.utils.misc_util import _check_type, LinkedListNode, SkipNode -from pydatastructs.utils.misc_util import ( - Backend, raise_if_backend_is_not_python) - -__all__ = [ - 'SinglyLinkedList', - 'DoublyLinkedList', - 'SinglyCircularLinkedList', - 'DoublyCircularLinkedList', - 'SkipList' -] - -class LinkedList(object): - """ - Abstract class for Linked List. - """ - __slots__ = ['head', 'size'] - - def __len__(self): - return self.size - - @property - def is_empty(self): - return self.size == 0 - - def search(self, key): - curr_node = self.head - while curr_node is not None: - if curr_node.key == key: - return curr_node - curr_node = curr_node.next - if curr_node is self.head: - return None - return None - - def __str__(self): - """ - For printing the linked list. - """ - elements = [] - current_node = self.head - while current_node is not None: - elements.append(str(current_node)) - current_node = current_node.next - if current_node == self.head: - break - return str(elements) - - def insert_after(self, prev_node, key, data=None): - """ - Inserts a new node after the prev_node. - - Parameters - ========== - - prev_node: LinkedListNode - The node after which the - new node is to be inserted. - - key - Any valid identifier to uniquely - identify the node in the linked list. - - data - Any valid data to be stored in the node. - """ - raise NotImplementedError('This is an abstract method') - - def insert_at(self, index, key, data=None): - """ - Inserts a new node at the input index. - - Parameters - ========== - - index: int - An integer satisfying python indexing properties. - - key - Any valid identifier to uniquely - identify the node in the linked list. - - data - Any valid data to be stored in the node. - """ - raise NotImplementedError('This is an abstract method') - - def extract(self, index): - """ - Extracts the node at the index of the list. - - Parameters - ========== - - index: int - An integer satisfying python indexing properties. - - Returns - ======= - - current_node: LinkedListNode - The node at index i. - """ - raise NotImplementedError('This is an abstract method') - - def __getitem__(self, index): - """ - Returns - ======= - - current_node: LinkedListNode - The node at given index. - """ - if index < 0: - index = self.size + index - - if index >= self.size: - raise IndexError('%d index is out of range.'%(index)) - - counter = 0 - current_node = self.head - while counter != index: - current_node = current_node.next - counter += 1 - return current_node - - def appendleft(self, key, data=None): - """ - Pushes a new node at the start i.e., - the left of the list. - - Parameters - ========== - - key - Any valid identifier to uniquely - identify the node in the linked list. - - data - Any valid data to be stored in the node. - """ - self.insert_at(0, key, data) - - def append(self, key, data=None): - """ - Appends a new node at the end of the list. - - Parameters - ========== - - key - Any valid identifier to uniquely - identify the node in the linked list. - - data - Any valid data to be stored in the node. - """ - self.insert_at(self.size, key, data) - - def insert_before(self, next_node, key, data=None): - """ - Inserts a new node before the next_node. - - Parameters - ========== - - next_node: LinkedListNode - The node before which the - new node is to be inserted. - - key - Any valid identifier to uniquely - identify the node in the linked list. - - data - Any valid data to be stored in the node. - """ - raise NotImplementedError('This is an abstract method') - - def popleft(self): - """ - Extracts the Node from the left - i.e. start of the list. - - Returns - ======= - - old_head: LinkedListNode - The leftmost element of linked - list. - """ - return self.extract(0) - - def popright(self): - """ - Extracts the node from the right - of the linked list. - - Returns - ======= - - old_tail: LinkedListNode - The leftmost element of linked - list. - """ - return self.extract(-1) - -class DoublyLinkedList(LinkedList): - """ - Represents Doubly Linked List - - Parameters - ========== - - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import DoublyLinkedList - >>> dll = DoublyLinkedList() - >>> dll.append(6) - >>> dll[0].key - 6 - >>> dll.head.key - 6 - >>> dll.append(5) - >>> dll.appendleft(2) - >>> str(dll) - "['(2, None)', '(6, None)', '(5, None)']" - >>> dll[0].key = 7.2 - >>> dll.extract(1).key - 6 - >>> str(dll) - "['(7.2, None)', '(5, None)']" - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Doubly_linked_list - - """ - __slots__ = ['head', 'tail', 'size'] - - def __new__(cls, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = LinkedList.__new__(cls) - obj.head = None - obj.tail = None - obj.size = 0 - return obj - - @classmethod - def methods(cls): - return ['__new__', 'insert_after', - 'insert_before', 'insert_at', 'extract'] - - def insert_after(self, prev_node, key, data=None): - self.size += 1 - new_node = LinkedListNode(key, data, - links=['next', 'prev'], - addrs=[None, None]) - new_node.next = prev_node.next - if new_node.next is not None: - new_node.next.prev = new_node - prev_node.next = new_node - new_node.prev = prev_node - - if new_node.next is None: - self.tail = new_node - - def insert_before(self, next_node, key, data=None): - self.size += 1 - new_node = LinkedListNode(key, data, - links=['next', 'prev'], - addrs=[None, None]) - new_node.prev = next_node.prev - next_node.prev = new_node - new_node.next = next_node - if new_node.prev is not None: - new_node.prev.next = new_node - else: - self.head = new_node - - def insert_at(self, index, key, data=None): - if self.size == 0 and (index in (0, -1)): - index = 0 - - if index < 0: - index = self.size + index - - if index > self.size: - raise IndexError('%d index is out of range.'%(index)) - - self.size += 1 - new_node = LinkedListNode(key, data, - links=['next', 'prev'], - addrs=[None, None]) - if self.size == 1: - self.head, self.tail = \ - new_node, new_node - elif index == self.size - 1: - new_node.prev = self.tail - new_node.next = self.tail.next - self.tail.next = new_node - self.tail = new_node - else: - counter = 0 - current_node = self.head - prev_node = None - while counter != index: - prev_node = current_node - current_node = current_node.next - counter += 1 - new_node.prev = prev_node - new_node.next = current_node - if prev_node is not None: - prev_node.next = new_node - if current_node is not None: - current_node.prev = new_node - if new_node.next is None: - self.tail = new_node - if new_node.prev is None: - self.head = new_node - - def extract(self, index): - if self.is_empty: - raise ValueError("The list is empty.") - - if index < 0: - index = self.size + index - - if index >= self.size: - raise IndexError('%d is out of range.'%(index)) - - self.size -= 1 - counter = 0 - current_node = self.head - prev_node = None - while counter != index: - prev_node = current_node - current_node = current_node.next - counter += 1 - if prev_node is not None: - prev_node.next = current_node.next - if current_node.next is not None: - current_node.next.prev = prev_node - if index == 0: - self.head = current_node.next - if index == self.size: - self.tail = current_node.prev - return current_node - -class SinglyLinkedList(LinkedList): - """ - Represents Singly Linked List - - Parameters - ========== - - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import SinglyLinkedList - >>> sll = SinglyLinkedList() - >>> sll.append(6) - >>> sll[0].key - 6 - >>> sll.head.key - 6 - >>> sll.append(5) - >>> sll.appendleft(2) - >>> str(sll) - "['(2, None)', '(6, None)', '(5, None)']" - >>> sll[0].key = 7.2 - >>> sll.extract(1).key - 6 - >>> str(sll) - "['(7.2, None)', '(5, None)']" - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Singly_linked_list - - """ - __slots__ = ['head', 'tail', 'size'] - - def __new__(cls, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = LinkedList.__new__(cls) - obj.head = None - obj.tail = None - obj.size = 0 - return obj - - @classmethod - def methods(cls): - return ['insert_after', 'insert_at', - 'extract'] - - def insert_after(self, prev_node, key, data=None): - self.size += 1 - new_node = LinkedListNode(key, data, - links=['next'], - addrs=[None]) - new_node.next = prev_node.next - prev_node.next = new_node - - if new_node.next is None: - self.tail = new_node - - def insert_at(self, index, key, data=None): - if self.size == 0 and (index in (0, -1)): - index = 0 - - if index < 0: - index = self.size + index - - if index > self.size: - raise IndexError('%d index is out of range.'%(index)) - - self.size += 1 - new_node = LinkedListNode(key, data, - links=['next'], - addrs=[None]) - if self.size == 1: - self.head, self.tail = \ - new_node, new_node - elif index == self.size - 1: - new_node.next = self.tail.next - self.tail.next = new_node - self.tail = new_node - else: - counter = 0 - current_node = self.head - prev_node = None - while counter != index: - prev_node = current_node - current_node = current_node.next - counter += 1 - new_node.next = current_node - if prev_node is not None: - prev_node.next = new_node - if new_node.next is None: - self.tail = new_node - if index == 0: - self.head = new_node - - def extract(self, index): - if self.is_empty: - raise ValueError("The list is empty.") - - if index < 0: - index = self.size + index - - if index >= self.size: - raise IndexError('%d is out of range.'%(index)) - - self.size -= 1 - counter = 0 - current_node = self.head - prev_node = None - while counter != index: - prev_node = current_node - current_node = current_node.next - counter += 1 - if prev_node is not None: - prev_node.next = current_node.next - if index == 0: - self.head = current_node.next - if index == self.size: - self.tail = prev_node - return current_node - -class SinglyCircularLinkedList(SinglyLinkedList): - """ - Represents Singly Circular Linked List. - - Parameters - ========== - - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - - Examples - ======== - - >>> from pydatastructs import SinglyCircularLinkedList - >>> scll = SinglyCircularLinkedList() - >>> scll.append(6) - >>> scll[0].key - 6 - >>> scll.head.key - 6 - >>> scll.append(5) - >>> scll.appendleft(2) - >>> str(scll) - "['(2, None)', '(6, None)', '(5, None)']" - >>> scll[0].key = 7.2 - >>> scll.extract(1).key - 6 - >>> str(scll) - "['(7.2, None)', '(5, None)']" - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Linked_list#Circular_linked_list - - """ - - @classmethod - def methods(cls): - return ['insert_after', 'insert_at', 'extract'] - - def insert_after(self, prev_node, key, data=None): - super(SinglyCircularLinkedList, self).\ - insert_after(prev_node, key, data) - if prev_node.next.next == self.head: - self.tail = prev_node.next - - def insert_at(self, index, key, data=None): - super(SinglyCircularLinkedList, self).insert_at(index, key, data) - if self.size == 1: - self.head.next = self.head - new_node = self.__getitem__(index) - if index == 0: - self.tail.next = new_node - if new_node.next == self.head: - self.tail = new_node - - def extract(self, index): - node = super(SinglyCircularLinkedList, self).extract(index) - if self.tail is None: - self.head = None - elif index == 0: - self.tail.next = self.head - return node - -class DoublyCircularLinkedList(DoublyLinkedList): - """ - Represents Doubly Circular Linked List - - Parameters - ========== - - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import DoublyCircularLinkedList - >>> dcll = DoublyCircularLinkedList() - >>> dcll.append(6) - >>> dcll[0].key - 6 - >>> dcll.head.key - 6 - >>> dcll.append(5) - >>> dcll.appendleft(2) - >>> str(dcll) - "['(2, None)', '(6, None)', '(5, None)']" - >>> dcll[0].key = 7.2 - >>> dcll.extract(1).key - 6 - >>> str(dcll) - "['(7.2, None)', '(5, None)']" - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Doubly_linked_list#Circular_doubly_linked_lists - - """ - - @classmethod - def methods(cls): - return ['insert_after', 'insert_before', - 'insert_at', 'extract'] - - def insert_after(self, prev_node, key, data=None): - super(DoublyCircularLinkedList, self)\ - .insert_after(prev_node, key, data) - if prev_node.next.next == self.head: - self.tail = prev_node.next - - def insert_before(self, next_node, key, data=None): - super(DoublyCircularLinkedList, self).\ - insert_before(next_node, key, data) - if next_node == self.head: - self.head = next_node.prev - - def insert_at(self, index, key, data=None): - super(DoublyCircularLinkedList, self).\ - insert_at(index, key, data) - if self.size == 1: - self.head.next = self.head - self.head.prev = self.head - new_node = self.__getitem__(index) - if index == 0: - self.tail.next = new_node - new_node.prev = self.tail - if new_node.next == self.head: - self.tail = new_node - new_node.next = self.head - self.head.prev = new_node - - def extract(self, index): - node = super(DoublyCircularLinkedList, self).extract(index) - if self.tail is None: - self.head = None - elif index == 0: - self.tail.next = self.head - return node - -class SkipList(object): - """ - Represents Skip List - - Parameters - ========== - - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import SkipList - >>> sl = SkipList() - >>> sl.insert(6) - >>> sl.insert(1) - >>> sl.insert(3) - >>> node = sl.extract(1) - >>> str(node) - '(1, None)' - >>> sl.insert(4) - >>> sl.insert(2) - >>> sl.search(4) - True - >>> sl.search(10) - False - - """ - - __slots__ = ['head', 'tail', '_levels', '_num_nodes', 'seed'] - - def __new__(cls, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.head, obj.tail = None, None - obj._num_nodes = 0 - obj._levels = 0 - obj._add_level() - return obj - - @classmethod - def methods(cls): - return ['__new__', 'levels', 'search', - 'extract', '__str__', 'size'] - - def _add_level(self): - self.tail = SkipNode(math.inf, next=None, down=self.tail) - self.head = SkipNode(-math.inf, next=self.tail, down=self.head) - self._levels += 1 - - @property - def levels(self): - """ - Returns the number of levels in the - current skip list. - """ - return self._levels - - def _search(self, key) -> list: - path = [] - node = self.head - while node: - if node.next.key >= key: - path.append(node) - node = node.down - else: - node = node.next - return path - - def search(self, key) -> bool: - return self._search(key)[-1].next.key == key - - def insert(self, key, data=None): - """ - Inserts a new node to the skip list. - - Parameters - ========== - - key - Any valid identifier to uniquely - identify the node in the linked list. - - data - Any valid data to be stored in the node. - """ - path = self._search(key) - tip = path[-1] - below = SkipNode(key=key, data=data, next=tip.next) - tip.next = below - total_level = self._levels - level = 1 - while random.getrandbits(1) % 2 == 0 and level <= total_level: - if level == total_level: - self._add_level() - prev = self.head - else: - prev = path[total_level - 1 - level] - below = SkipNode(key=key, data=None, next=prev.next, down=below) - prev.next = below - level += 1 - self._num_nodes += 1 - - @property - def size(self): - return self._num_nodes - - def extract(self, key): - """ - Extracts the node with the given key in the skip list. - - Parameters - ========== - - key - The key of the node under consideration. - - Returns - ======= - - return_node: SkipNode - The node with given key. - """ - path = self._search(key) - tip = path[-1] - if tip.next.key != key: - raise KeyError('Node with key %s is not there in %s'%(key, self)) - return_node = SkipNode(tip.next.key, tip.next.data) - total_level = self._levels - level = total_level - 1 - while level >= 0 and path[level].next.key == key: - path[level].next = path[level].next.next - level -= 1 - walk = self.head - while walk is not None: - if walk.next is self.tail: - self._levels -= 1 - self.head = walk.down - self.tail = self.tail.down - walk = walk.down - else: - break - self._num_nodes -= 1 - if self._levels == 0: - self._add_level() - return return_node - - def __str__(self): - node2row = {} - node2col = {} - walk = self.head - curr_level = self._levels - 1 - while walk is not None: - curr_node = walk - col = 0 - while curr_node is not None: - if curr_node.key != math.inf and curr_node.key != -math.inf: - node2row[curr_node] = curr_level - if walk.down is None: - node2col[curr_node.key] = col - col += 1 - curr_node = curr_node.next - walk = walk.down - curr_level -= 1 - sl_mat = [[str(None) for _ in range(self._num_nodes)] for _ in range(self._levels)] - walk = self.head - while walk is not None: - curr_node = walk - while curr_node is not None: - if curr_node in node2row: - row = node2row[curr_node] - col = node2col[curr_node.key] - sl_mat[row][col] = str(curr_node) - curr_node = curr_node.next - walk = walk.down - sl_str = "" - for level_list in sl_mat[::-1]: - for node_str in level_list: - sl_str += node_str + " " - if len(sl_str) > 0: - sl_str += "\n" - return sl_str diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_algorithms.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_algorithms.py deleted file mode 100644 index 3e287bb74..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_algorithms.py +++ /dev/null @@ -1,423 +0,0 @@ -from pydatastructs import ( - merge_sort_parallel, DynamicOneDimensionalArray, - OneDimensionalArray, brick_sort, brick_sort_parallel, - heapsort, matrix_multiply_parallel, counting_sort, bucket_sort, - cocktail_shaker_sort, quick_sort, longest_common_subsequence, is_ordered, - upper_bound, lower_bound, longest_increasing_subsequence, next_permutation, - prev_permutation, bubble_sort, linear_search, binary_search, jump_search, - selection_sort, insertion_sort, intro_sort, shell_sort, radix_sort, Backend) - -from pydatastructs.utils.raises_util import raises -import random - -def _test_common_sort(sort, *args, **kwargs): - random.seed(1000) - - n = random.randint(10, 20) - arr = DynamicOneDimensionalArray(int, 0) - generated_ints = [] - for _ in range(n): - integer = random.randint(1, 1000) - generated_ints.append(integer) - arr.append(integer) - for _ in range(n//3): - integer = random.randint(0, n//2) - generated_ints.append(integer) - arr.delete(integer) - expected_arr_1 = [686, 779, 102, 134, 362, 448, - 480, 548, None, None, None, - 228, 688, 247, 373, 696, None, - None, None, None, None, None, - None, None, None, None, None, - None, None, None, None] - sort(arr, *args, **kwargs, start=2, end=10) - assert arr._data == expected_arr_1 - sort(arr, *args, **kwargs) - expected_arr_2 = [102, 134, 228, 247, 362, 373, 448, - 480, 548, 686, 688, 696, 779, - None, None, None, None, None, None, - None, None, None, None, None, - None, None, None, None, None, None, None] - assert arr._data == expected_arr_2 - assert (arr._last_pos_filled, arr._num, arr._size) == (12, 13, 31) - - arr = DynamicOneDimensionalArray(int, 0, backend=Backend.CPP) - int_idx = 0 - for _ in range(n): - arr.append(generated_ints[int_idx]) - int_idx += 1 - for _ in range(n//3): - arr.delete(generated_ints[int_idx]) - int_idx += 1 - sort(arr, *args, **kwargs, start=2, end=10) - for i in range(len(expected_arr_1)): - assert arr[i] == expected_arr_1[i] - sort(arr, *args, **kwargs) - for i in range(len(expected_arr_2)): - assert arr[i] == expected_arr_2[i] - assert (arr._last_pos_filled, arr._num, arr.size) == (12, 13, 31) - - n = random.randint(10, 20) - arr = OneDimensionalArray(int, n) - generated_ints.clear() - for i in range(n): - integer = random.randint(1, 1000) - arr[i] = integer - generated_ints.append(integer) - expected_arr_3 = [42, 695, 147, 500, 768, - 998, 473, 732, 728, 426, - 709, 910] - sort(arr, *args, **kwargs, start=2, end=5) - assert arr._data == expected_arr_3 - - arr = OneDimensionalArray(int, n, backend=Backend.CPP) - int_idx = 0 - for i in range(n): - arr[i] = generated_ints[int_idx] - int_idx += 1 - sort(arr, *args, **kwargs, start=2, end=5) - for i in range(len(expected_arr_3)): - assert arr[i] == expected_arr_3[i] - -def test_merge_sort_parallel(): - _test_common_sort(merge_sort_parallel, num_threads=5) - -def test_brick_sort(): - _test_common_sort(brick_sort) - -def test_brick_sort_parallel(): - _test_common_sort(brick_sort_parallel, num_threads=3) - -def test_heapsort(): - _test_common_sort(heapsort) - -def test_bucket_sort(): - _test_common_sort(bucket_sort) - -def test_counting_sort(): - random.seed(1000) - - n = random.randint(10, 20) - arr = DynamicOneDimensionalArray(int, 0) - for _ in range(n): - arr.append(random.randint(1, 1000)) - for _ in range(n//3): - arr.delete(random.randint(0, n//2)) - - expected_arr = [102, 134, 228, 247, 362, 373, 448, - 480, 548, 686, 688, 696, 779] - assert counting_sort(arr)._data == expected_arr - -def test_cocktail_shaker_sort(): - _test_common_sort(cocktail_shaker_sort) - -def test_quick_sort(): - _test_common_sort(quick_sort) - _test_common_sort(quick_sort, backend=Backend.CPP) - -def test_intro_sort(): - _test_common_sort(intro_sort) - -def test_bubble_sort(): - _test_common_sort(bubble_sort) - _test_common_sort(bubble_sort, backend=Backend.CPP) - _test_common_sort(bubble_sort, backend=Backend.LLVM) - -def test_selection_sort(): - _test_common_sort(selection_sort) - _test_common_sort(selection_sort, backend=Backend.CPP) - -def test_insertion_sort(): - _test_common_sort(insertion_sort) - _test_common_sort(insertion_sort, backend=Backend.CPP) - -def test_matrix_multiply_parallel(): - ODA = OneDimensionalArray - - expected_result = [[3, 3, 3], [1, 2, 1], [2, 2, 2]] - - I = ODA(ODA, [ODA(int, [1, 1, 0]), ODA(int, [0, 1, 0]), ODA(int, [0, 0, 1])]) - J = ODA(ODA, [ODA(int, [2, 1, 2]), ODA(int, [1, 2, 1]), ODA(int, [2, 2, 2])]) - output = matrix_multiply_parallel(I, J, num_threads=5) - assert expected_result == output - - I = [[1, 1, 0], [0, 1, 0], [0, 0, 1]] - J = [[2, 1, 2], [1, 2, 1], [2, 2, 2]] - output = matrix_multiply_parallel(I, J, num_threads=5) - assert expected_result == output - - I = [[1, 1, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]] - J = [[2, 1, 2], [1, 2, 1], [2, 2, 2]] - assert raises(ValueError, lambda: matrix_multiply_parallel(I, J, num_threads=5)) - - I = [[1, 1, 0], [0, 1, 0], [0, 0, 1]] - J = [[2, 1, 2], [1, 2, 1], [2, 2, 2]] - output = matrix_multiply_parallel(I, J, num_threads=1) - assert expected_result == output - -def test_longest_common_sequence(): - ODA = OneDimensionalArray - expected_result = "['A', 'S', 'C', 'I', 'I']" - - str1 = ODA(str, ['A', 'A', 'S', 'C', 'C', 'I', 'I']) - str2 = ODA(str, ['A', 'S', 'S', 'C', 'I', 'I', 'I', 'I']) - output = longest_common_subsequence(str1, str2) - assert str(output) == expected_result - - expected_result = "['O', 'V', 'A']" - - I = ODA(str, ['O', 'V', 'A', 'L']) - J = ODA(str, ['F', 'O', 'R', 'V', 'A', 'E', 'W']) - output = longest_common_subsequence(I, J) - assert str(output) == expected_result - - X = ODA(int, [1, 2, 3, 4, 5, 6, 6, 5, 4, 3, 2, 1]) - Y = ODA(int, [1, 2, 3, 4, 4, 3, 2, 1]) - output = longest_common_subsequence(X, Y) - assert str(output) == '[1, 2, 3, 4, 4, 3, 2, 1]' - - Z = ODA(int, []) - output = longest_common_subsequence(Y, Z) - assert str(output) == '[]' - -def test_is_ordered(): - def _test_inner_ordered(*args, **kwargs): - ODA = OneDimensionalArray - DODA = DynamicOneDimensionalArray - - expected_result = True - arr = ODA(int, [1, 2, 5, 6]) - output = is_ordered(arr, **kwargs) - assert output == expected_result - - expected_result = False - arr1 = ODA(int, [4, 3, 2, 1]) - output = is_ordered(arr1, **kwargs) - assert output == expected_result - - expected_result = True - arr2 = ODA(int, [6, 1, 2, 3, 4, 5]) - output = is_ordered(arr2, start=1, end=5, **kwargs) - assert output == expected_result - - expected_result = True - arr3 = ODA(int, [0, -1, -2, -3, -4, 4]) - output = is_ordered(arr3, start=1, end=4, - comp=lambda u, v: u > v, **kwargs) - assert output == expected_result - - expected_result = True - arr4 = DODA(int, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) - arr4.delete(0) - output = is_ordered(arr4, **kwargs) - assert output == expected_result - - _test_inner_ordered() - _test_inner_ordered(backend=Backend.CPP) - - -def test_upper_bound(): - ODA = OneDimensionalArray - arr1 = ODA(int, [3, 3, 3]) - output = upper_bound(arr1, 3) - expected_result = 3 - assert expected_result == output - - arr2 = ODA(int, [4, 4, 5, 6]) - output = upper_bound(arr2, 4, end=3) - expected_result = 2 - assert expected_result == output - - arr3 = ODA(int, [6, 6, 7, 8, 9]) - output = upper_bound(arr3, 5, start=2, end=4) - expected_result = 2 - assert expected_result == output - - arr4 = ODA(int, [3, 4, 4, 6]) - output = upper_bound(arr4, 5, start=1, end=3) - expected_result = 3 - assert expected_result == output - - arr5 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) - output = upper_bound(arr5, 6, comp=lambda x, y: x > y) - expected_result = 5 - assert expected_result == output - - arr6 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) - output = upper_bound(arr6, 2, start=2, comp=lambda x, y: x > y) - expected_result = 8 - assert expected_result == output - - arr7 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) - output = upper_bound(arr7, 9, start=3, end=7, comp=lambda x, y: x > y) - expected_result = 3 - assert expected_result == output - - arr8 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) - output = upper_bound(arr8, 6, end=3, comp=lambda x, y: x > y) - expected_result = 3 - assert expected_result == output - - -def test_lower_bound(): - ODA = OneDimensionalArray - arr1 = ODA(int, [3, 3, 3]) - output = lower_bound(arr1, 3, start=1) - expected_result = 1 - assert expected_result == output - - arr2 = ODA(int, [4, 4, 4, 4, 5, 6]) - output = lower_bound(arr2, 5, end=3) - expected_result = 3 - assert expected_result == output - - arr3 = ODA(int, [6, 6, 7, 8, 9]) - output = lower_bound(arr3, 5, end=3) - expected_result = 0 - assert expected_result == output - - arr4 = ODA(int, [3, 4, 4, 4]) - output = lower_bound(arr4, 5) - expected_result = 4 - assert expected_result == output - - arr5 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) - output = lower_bound(arr5, 5, comp=lambda x, y: x > y) - expected_result = 5 - assert expected_result == output - - arr6 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) - output = lower_bound(arr6, 2, start=4, comp=lambda x, y: x > y) - expected_result = 8 - assert expected_result == output - - arr7 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) - output = lower_bound(arr7, 9, end=5, comp=lambda x, y: x > y) - expected_result = 0 - assert expected_result == output - - arr8 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) - output = lower_bound(arr8, 6, end=3, comp=lambda x, y: x > y) - expected_result = 1 - assert expected_result == output - -def test_longest_increasing_subsequence(): - ODA = OneDimensionalArray - - arr1 = ODA(int, [2, 5, 3, 7, 11, 8, 10, 13, 6]) - output = longest_increasing_subsequence(arr1) - expected_result = [2, 3, 7, 8, 10, 13] - assert str(expected_result) == str(output) - - arr2 = ODA(int, [3, 4, -1, 5, 8, 2, 2, 2, 3, 12, 7, 9, 10]) - output = longest_increasing_subsequence(arr2) - expected_result = [-1, 2, 3, 7, 9, 10] - assert str(expected_result) == str(output) - - arr3 = ODA(int, [6, 6, 6, 19, 9]) - output = longest_increasing_subsequence(arr3) - expected_result = [6, 9] - assert str(expected_result) == str(output) - - arr4 = ODA(int, [5, 4, 4, 3, 3, 6, 6, 8]) - output = longest_increasing_subsequence(arr4) - expected_result = [3, 6, 8] - assert str(expected_result) == str(output) - - arr5 = ODA(int, [7, 6, 6, 6, 5, 4, 3]) - output = longest_increasing_subsequence(arr5) - expected_result = [3] - assert str(expected_result) == str(output) - -def _test_permutation_common(array, expected_perms, func): - num_perms = len(expected_perms) - - output = [] - for _ in range(num_perms): - signal, array = func(array) - output.append(array) - if not signal: - break - - assert len(output) == len(expected_perms) - for perm1, perm2 in zip(output, expected_perms): - assert str(perm1) == str(perm2) - -def test_next_permutation(): - ODA = OneDimensionalArray - - array = ODA(int, [1, 2, 3]) - expected_perms = [[1, 3, 2], [2, 1, 3], - [2, 3, 1], [3, 1, 2], - [3, 2, 1], [1, 2, 3]] - _test_permutation_common(array, expected_perms, next_permutation) - -def test_prev_permutation(): - ODA = OneDimensionalArray - - array = ODA(int, [3, 2, 1]) - expected_perms = [[3, 1, 2], [2, 3, 1], - [2, 1, 3], [1, 3, 2], - [1, 2, 3], [3, 2, 1]] - _test_permutation_common(array, expected_perms, prev_permutation) - -def test_next_prev_permutation(): - ODA = OneDimensionalArray - random.seed(1000) - - for i in range(100): - data = set(random.sample(range(1, 10000), 10)) - array = ODA(int, list(data)) - - _, next_array = next_permutation(array) - _, orig_array = prev_permutation(next_array) - assert str(orig_array) == str(array) - - _, prev_array = prev_permutation(array) - _, orig_array = next_permutation(prev_array) - assert str(orig_array) == str(array) - -def _test_common_search(search_func, sort_array=True, **kwargs): - ODA = OneDimensionalArray - - array = ODA(int, [1, 2, 5, 7, 10, 29, 40]) - for i in range(len(array)): - assert i == search_func(array, array[i], **kwargs) - - checker_array = [None, None, 2, 3, 4, 5, None] - for i in range(len(array)): - assert checker_array[i] == search_func(array, array[i], start=2, end=5, **kwargs) - - random.seed(1000) - - for i in range(25): - data = list(set(random.sample(range(1, 10000), 100))) - - if sort_array: - data.sort() - - array = ODA(int, list(data)) - - for i in range(len(array)): - assert search_func(array, array[i], **kwargs) == i - - for _ in range(50): - assert search_func(array, random.randint(10001, 50000), **kwargs) is None - -def test_linear_search(): - _test_common_search(linear_search, sort_array=False) - _test_common_search(linear_search, sort_array=False, backend=Backend.CPP) - -def test_binary_search(): - _test_common_search(binary_search) - _test_common_search(binary_search, backend=Backend.CPP) - -def test_jump_search(): - _test_common_search(jump_search) - _test_common_search(jump_search, backend=Backend.CPP) - -def test_shell_sort(): - _test_common_sort(shell_sort) - -def test_radix_sort(): - _test_common_sort(radix_sort) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_arrays.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_arrays.py deleted file mode 100644 index 886510113..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_arrays.py +++ /dev/null @@ -1,157 +0,0 @@ -from pydatastructs.linear_data_structures import ( - OneDimensionalArray, DynamicOneDimensionalArray, - MultiDimensionalArray, ArrayForTrees) -from pydatastructs.utils.misc_util import Backend -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils import TreeNode -from pydatastructs.utils._backend.cpp import _nodes - -def test_OneDimensionalArray(): - ODA = OneDimensionalArray - A = ODA(int, 5, [1.0, 2, 3, 4, 5], init=6) - A[1] = 2.0 - assert str(A) == '[1, 2, 3, 4, 5]' - assert A - assert ODA(int, [1.0, 2, 3, 4, 5], 5) - assert ODA(int, 5) - assert ODA(int, [1.0, 2, 3]) - assert raises(IndexError, lambda: A[7]) - assert raises(IndexError, lambda: A[-1]) - assert raises(ValueError, lambda: ODA()) - assert raises(ValueError, lambda: ODA(int, 1, 2, 3)) - assert raises(TypeError, lambda: ODA(int, 5.0, set([1, 2, 3]))) - assert raises(TypeError, lambda: ODA(int, 5.0)) - assert raises(TypeError, lambda: ODA(int, set([1, 2, 3]))) - assert raises(ValueError, lambda: ODA(int, 3, [1])) - - A = ODA(int, 5, [1, 2, 3, 4, 5], init=6, backend=Backend.CPP) - A[1] = 2 - assert str(A) == "['1', '2', '3', '4', '5']" - assert A - assert ODA(int, [1, 2, 3, 4, 5], 5, backend=Backend.CPP) - assert ODA(int, 5, backend=Backend.CPP) - assert ODA(int, [1, 2, 3], backend=Backend.CPP) - assert raises(TypeError, lambda: ODA(int, [1.0, 2, 3, 4, 5], 5, backend=Backend.CPP)) - assert raises(TypeError, lambda: ODA(int, [1.0, 2, 3], backend=Backend.CPP)) - assert raises(IndexError, lambda: A[7]) - assert raises(IndexError, lambda: A[-1]) - assert raises(ValueError, lambda: ODA(backend=Backend.CPP)) - assert raises(ValueError, lambda: ODA(int, 1, 2, 3, backend=Backend.CPP)) - assert raises(TypeError, lambda: ODA(int, 5.0, set([1, 2, 3]), backend=Backend.CPP)) - assert raises(TypeError, lambda: ODA(int, 5.0, backend=Backend.CPP)) - assert raises(TypeError, lambda: ODA(int, set([1, 2, 3]), backend=Backend.CPP)) - assert raises(ValueError, lambda: ODA(int, 3, [1], backend=Backend.CPP)) - assert raises(ValueError, lambda: ODA(int, 3, [1], backend=Backend.CPP)) - assert raises(TypeError, lambda: A.fill(2.0)) - - -def test_MultiDimensionalArray(): - assert raises(ValueError, lambda: MultiDimensionalArray(int, 2, -1, 3)) - assert MultiDimensionalArray(int, 10).shape == (10,) - array = MultiDimensionalArray(int, 5, 9, 3, 8) - assert array.shape == (5, 9, 3, 8) - array.fill(5) - array[1, 3, 2, 5] = 2.0 - assert array - assert array[1, 3, 2, 5] == 2.0 - assert array[1, 3, 0, 5] == 5 - assert array[1, 2, 2, 5] == 5 - assert array[2, 3, 2, 5] == 5 - assert raises(IndexError, lambda: array[5]) - assert raises(IndexError, lambda: array[4, 10]) - assert raises(IndexError, lambda: array[-1]) - assert raises(IndexError, lambda: array[2, 3, 2, 8]) - assert raises(ValueError, lambda: MultiDimensionalArray()) - assert raises(ValueError, lambda: MultiDimensionalArray(int)) - assert raises(TypeError, lambda: MultiDimensionalArray(int, 5, 6, "")) - array = MultiDimensionalArray(int, 3, 2, 2) - array.fill(1) - array[0, 0, 0] = 0 - array[0, 0, 1] = 0 - array[1, 0, 0] = 0 - array[2, 1, 1] = 0 - assert str(array) == '[0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0]' - array = MultiDimensionalArray(int, 4) - assert array.shape == (4,) - array.fill(5) - array[3] = 3 - assert array[3] == 3 - -def test_DynamicOneDimensionalArray(): - DODA = DynamicOneDimensionalArray - A = DODA(int, 0) - A.append(1) - A.append(2) - A.append(3) - A.append(4) - assert str(A) == "['1', '2', '3', '4']" - A.delete(0) - A.delete(0) - A.delete(15) - A.delete(-1) - A.delete(1) - A.delete(2) - assert A._data == [4, None, None] - assert str(A) == "['4']" - assert A.size == 3 - A.fill(4) - assert A._data == [4, 4, 4] - b = DynamicOneDimensionalArray(int, 0) - b.append(1) - b.append(2) - b.append(3) - b.append(4) - b.append(5) - assert b._data == [1, 2, 3, 4, 5, None, None] - assert list(reversed(b)) == [5, 4, 3, 2, 1] - - A = DODA(int, 0, backend=Backend.CPP) - A.append(1) - A.append(2) - A.append(3) - A.append(4) - assert str(A) == "['1', '2', '3', '4']" - A.delete(0) - A.delete(0) - A.delete(15) - A.delete(-1) - A.delete(1) - A.delete(2) - assert [A[i] for i in range(A.size)] == [4, None, None] - assert A.size == 3 - A.fill(4) - assert [A[0], A[1], A[2]] == [4, 4, 4] - b = DODA(int, 0, backend=Backend.CPP) - b.append(1) - b.append(2) - b.append(3) - b.append(4) - b.append(5) - assert [b[i] for i in range(b.size)] == [1, 2, 3, 4, 5, None, None] - -def test_DynamicOneDimensionalArray2(): - DODA = DynamicOneDimensionalArray - root = TreeNode(1, 100) - A = DODA(TreeNode, [root]) - assert str(A[0]) == "(None, 1, 100, None)" - -def _test_ArrayForTrees(backend): - AFT = ArrayForTrees - root = TreeNode(1, 100,backend=backend) - if backend==Backend.PYTHON: - A = AFT(TreeNode, [root], backend=backend) - B = AFT(TreeNode, 0, backend=backend) - else: - A = AFT(_nodes.TreeNode, [root], backend=backend) - B = AFT(_nodes.TreeNode, 0, backend=backend) - assert str(A) == "['(None, 1, 100, None)']" - node = TreeNode(2, 200, backend=backend) - A.append(node) - assert str(A) == "['(None, 1, 100, None)', '(None, 2, 200, None)']" - assert str(B) == "[]" - -def test_ArrayForTrees(): - _test_ArrayForTrees(Backend.PYTHON) - -def test_cpp_ArrayForTrees(): - _test_ArrayForTrees(Backend.CPP) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_linked_lists.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_linked_lists.py deleted file mode 100644 index b7f172ddc..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_linked_lists.py +++ /dev/null @@ -1,193 +0,0 @@ -from pydatastructs.linear_data_structures import DoublyLinkedList, SinglyLinkedList, SinglyCircularLinkedList, DoublyCircularLinkedList, SkipList -from pydatastructs.utils.raises_util import raises -import copy, random - -def test_DoublyLinkedList(): - random.seed(1000) - dll = DoublyLinkedList() - assert raises(IndexError, lambda: dll[2]) - dll.appendleft(5) - dll.append(1) - dll.appendleft(2) - dll.append(3) - dll.insert_after(dll[-1], 4) - dll.insert_after(dll[2], 6) - dll.insert_before(dll[4], 1.1) - dll.insert_before(dll[0], 7) - dll.insert_at(0, 2) - dll.insert_at(-1, 9) - dll.extract(2) - assert dll.popleft().key == 2 - assert dll.popright().key == 4 - assert dll.search(3) == dll[-2] - assert dll.search(-1) is None - dll[-2].key = 0 - assert str(dll) == ("['(7, None)', '(5, None)', '(1, None)', " - "'(6, None)', '(1.1, None)', '(0, None)', " - "'(9, None)']") - assert len(dll) == 7 - assert raises(IndexError, lambda: dll.insert_at(8, None)) - assert raises(IndexError, lambda: dll.extract(20)) - dll_copy = DoublyCircularLinkedList() - for i in range(dll.size): - dll_copy.append(dll[i]) - for i in range(len(dll)): - if i%2 == 0: - dll.popleft() - else: - dll.popright() - assert str(dll) == "[]" - for _ in range(len(dll_copy)): - index = random.randint(0, len(dll_copy) - 1) - dll_copy.extract(index) - assert str(dll_copy) == "[]" - assert raises(ValueError, lambda: dll_copy.extract(1)) - -def test_SinglyLinkedList(): - random.seed(1000) - sll = SinglyLinkedList() - assert raises(IndexError, lambda: sll[2]) - sll.appendleft(5) - sll.append(1) - sll.appendleft(2) - sll.append(3) - sll.insert_after(sll[1], 4) - sll.insert_after(sll[-1], 6) - sll.insert_at(0, 2) - sll.insert_at(-1, 9) - sll.extract(2) - assert sll.popleft().key == 2 - assert sll.popright().key == 6 - sll[-2].key = 0 - assert str(sll) == ("['(2, None)', '(4, None)', '(1, None)', " - "'(0, None)', '(9, None)']") - assert len(sll) == 5 - assert raises(IndexError, lambda: sll.insert_at(6, None)) - assert raises(IndexError, lambda: sll.extract(20)) - sll_copy = DoublyCircularLinkedList() - for i in range(sll.size): - sll_copy.append(sll[i]) - for i in range(len(sll)): - if i%2 == 0: - sll.popleft() - else: - sll.popright() - assert str(sll) == "[]" - for _ in range(len(sll_copy)): - index = random.randint(0, len(sll_copy) - 1) - sll_copy.extract(index) - assert str(sll_copy) == "[]" - assert raises(ValueError, lambda: sll_copy.extract(1)) - -def test_SinglyCircularLinkedList(): - random.seed(1000) - scll = SinglyCircularLinkedList() - assert raises(IndexError, lambda: scll[2]) - scll.appendleft(5) - scll.append(1) - scll.appendleft(2) - scll.append(3) - scll.insert_after(scll[1], 4) - scll.insert_after(scll[-1], 6) - scll.insert_at(0, 2) - scll.insert_at(-1, 9) - scll.extract(2) - assert scll.popleft().key == 2 - assert scll.popright().key == 6 - assert scll.search(-1) is None - scll[-2].key = 0 - assert str(scll) == ("['(2, None)', '(4, None)', '(1, None)', " - "'(0, None)', '(9, None)']") - assert len(scll) == 5 - assert raises(IndexError, lambda: scll.insert_at(6, None)) - assert raises(IndexError, lambda: scll.extract(20)) - scll_copy = DoublyCircularLinkedList() - for i in range(scll.size): - scll_copy.append(scll[i]) - for i in range(len(scll)): - if i%2 == 0: - scll.popleft() - else: - scll.popright() - assert str(scll) == "[]" - for _ in range(len(scll_copy)): - index = random.randint(0, len(scll_copy) - 1) - scll_copy.extract(index) - assert str(scll_copy) == "[]" - assert raises(ValueError, lambda: scll_copy.extract(1)) - -def test_DoublyCircularLinkedList(): - random.seed(1000) - dcll = DoublyCircularLinkedList() - assert raises(IndexError, lambda: dcll[2]) - dcll.appendleft(5) - dcll.append(1) - dcll.appendleft(2) - dcll.append(3) - dcll.insert_after(dcll[-1], 4) - dcll.insert_after(dcll[2], 6) - dcll.insert_before(dcll[4], 1) - dcll.insert_before(dcll[0], 7) - dcll.insert_at(0, 2) - dcll.insert_at(-1, 9) - dcll.extract(2) - assert dcll.popleft().key == 2 - assert dcll.popright().key == 4 - dcll[-2].key = 0 - assert str(dcll) == ("['(7, None)', '(5, None)', '(1, None)', " - "'(6, None)', '(1, None)', '(0, None)', " - "'(9, None)']") - assert len(dcll) == 7 - assert raises(IndexError, lambda: dcll.insert_at(8, None)) - assert raises(IndexError, lambda: dcll.extract(20)) - dcll_copy = DoublyCircularLinkedList() - for i in range(dcll.size): - dcll_copy.append(dcll[i]) - for i in range(len(dcll)): - if i%2 == 0: - dcll.popleft() - else: - dcll.popright() - assert str(dcll) == "[]" - for _ in range(len(dcll_copy)): - index = random.randint(0, len(dcll_copy) - 1) - dcll_copy.extract(index) - assert str(dcll_copy) == "[]" - assert raises(ValueError, lambda: dcll_copy.extract(1)) - -def test_SkipList(): - random.seed(0) - sl = SkipList() - sl.insert(2) - sl.insert(10) - sl.insert(92) - sl.insert(1) - sl.insert(4) - sl.insert(27) - sl.extract(10) - assert str(sl) == ("(1, None) None None None None \n" - "(1, None) None None None None \n" - "(1, None) (2, None) (4, None) (27, None) (92, None) \n") - assert raises(KeyError, lambda: sl.extract(15)) - assert sl.search(1) is True - assert sl.search(47) is False - - sl = SkipList() - - for a in range(0, 20, 2): - sl.insert(a) - assert sl.search(16) is True - for a in range(4, 20, 4): - sl.extract(a) - assert sl.search(10) is True - for a in range(4, 20, 4): - sl.insert(a) - for a in range(0, 20, 2): - sl.extract(a) - assert sl.search(3) is False - - li = SkipList() - li.insert(1) - li.insert(2) - assert li.levels == 1 - assert li.size == 2 diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/__init__.py deleted file mode 100644 index 6ed099769..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/__init__.py +++ /dev/null @@ -1,51 +0,0 @@ -__all__ = [] - -from . import ( - stack, - binomial_trees, - queue, - disjoint_set, - sparse_table, -) - -from .binomial_trees import ( - BinomialTree -) -__all__.extend(binomial_trees.__all__) - -from .stack import ( - Stack, -) -__all__.extend(stack.__all__) - -from .queue import ( - Queue, - PriorityQueue -) -__all__.extend(queue.__all__) - -from .disjoint_set import ( - DisjointSetForest, -) -__all__.extend(disjoint_set.__all__) - -from .sparse_table import ( - SparseTable, -) -__all__.extend(sparse_table.__all__) - -from .segment_tree import ( - ArraySegmentTree, -) -__all__.extend(segment_tree.__all__) - -from .algorithms import ( - RangeQueryStatic, - RangeQueryDynamic -) -__all__.extend(algorithms.__all__) - -from .multiset import ( - Multiset -) -__all__.extend(multiset.__all__) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/_backend/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/_backend/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/algorithms.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/algorithms.py deleted file mode 100644 index 3c2f86516..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/algorithms.py +++ /dev/null @@ -1,335 +0,0 @@ -from pydatastructs.miscellaneous_data_structures.sparse_table import SparseTable -from pydatastructs.miscellaneous_data_structures.segment_tree import ArraySegmentTree -from pydatastructs.utils.misc_util import ( - _check_range_query_inputs, Backend, - raise_if_backend_is_not_python) - -__all__ = [ - 'RangeQueryStatic', - 'RangeQueryDynamic' -] - - -class RangeQueryStatic: - """ - Produces results for range queries of different kinds - by using specified data structure. - - Parameters - ========== - - array: OneDimensionalArray - The array for which we need to answer queries. - All the elements should be of type `int`. - func: callable - The function to be used for generating results - of a query. It should accept only one tuple as an - argument. The size of the tuple will be either 1 or 2 - and any one of the elements can be `None`. You can treat - `None` in whatever way you want according to the query - you are performing. For example, in case of range minimum - queries, `None` can be treated as infinity. We provide - the following which can be used as an argument value for this - parameter, - - `minimum` - For range minimum queries. - - `greatest_common_divisor` - For queries finding greatest - common divisor of a range. - - `summation` - For range sum queries. - data_structure: str - The data structure to be used for performing - range queries. - Currently the following data structures are supported, - - 'array' -> Array data structure. - Each query takes O(end - start) time asymptotically. - - 'sparse_table' -> Sparse table data structure. - Each query takes O(log(end - start)) time - asymptotically. - - By default, 'sparse_table'. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, RangeQueryStatic - >>> from pydatastructs import minimum - >>> arr = OneDimensionalArray(int, [4, 6, 1, 5, 7, 3]) - >>> RMQ = RangeQueryStatic(arr, minimum) - >>> RMQ.query(3, 4) - 5 - >>> RMQ.query(0, 4) - 1 - >>> RMQ.query(0, 2) - 1 - - Note - ==== - - The array once passed as an input should not be modified - once the `RangeQueryStatic` constructor is called. If you - have updated the array, then you need to create a new - `RangeQueryStatic` object with this updated array. - """ - - def __new__(cls, array, func, data_structure='sparse_table', **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - if len(array) == 0: - raise ValueError("Input %s array is empty."%(array)) - - if data_structure == 'array': - return RangeQueryStaticArray(array, func) - elif data_structure == 'sparse_table': - return RangeQueryStaticSparseTable(array, func) - else: - raise NotImplementedError( - "Currently %s data structure for range " - "query without updates isn't implemented yet." - % (data_structure)) - - @classmethod - def methods(cls): - return ['query'] - - def query(start, end): - """ - Method to perform a query in [start, end) range. - - Parameters - ========== - - start: int - The starting index of the range. - end: int - The ending index of the range. - """ - raise NotImplementedError( - "This is an abstract method.") - - -class RangeQueryStaticSparseTable(RangeQueryStatic): - - __slots__ = ["sparse_table", "bounds"] - - def __new__(cls, array, func, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - sparse_table = SparseTable(array, func) - obj.bounds = (0, len(array)) - obj.sparse_table = sparse_table - return obj - - @classmethod - def methods(cls): - return ['query'] - - def query(self, start, end): - _check_range_query_inputs((start, end + 1), self.bounds) - return self.sparse_table.query(start, end) - - -class RangeQueryStaticArray(RangeQueryStatic): - - __slots__ = ["array", "func"] - - def __new__(cls, array, func): - obj = object.__new__(cls) - obj.array = array - obj.func = func - return obj - - @classmethod - def methods(cls): - return ['query'] - - def query(self, start, end): - _check_range_query_inputs((start, end + 1), (0, len(self.array))) - - rsize = end - start + 1 - - if rsize == 1: - return self.func((self.array[start],)) - - query_ans = self.func((self.array[start], self.array[start + 1])) - for i in range(start + 2, end + 1): - query_ans = self.func((query_ans, self.array[i])) - return query_ans - -class RangeQueryDynamic: - """ - Produces results for range queries of different kinds - while allowing point updates by using specified - data structure. - - Parameters - ========== - - array: OneDimensionalArray - The array for which we need to answer queries. - All the elements should be of type `int`. - func: callable - The function to be used for generating results - of a query. It should accept only one tuple as an - argument. The size of the tuple will be either 1 or 2 - and any one of the elements can be `None`. You can treat - `None` in whatever way you want according to the query - you are performing. For example, in case of range minimum - queries, `None` can be treated as infinity. We provide - the following which can be used as an argument value for this - parameter, - - `minimum` - For range minimum queries. - - `greatest_common_divisor` - For queries finding greatest - common divisor of a range. - - `summation` - For range sum queries. - data_structure: str - The data structure to be used for performing - range queries. - Currently the following data structures are supported, - - 'array' -> Array data structure. - Each query takes O(end - start) time asymptotically. - Each point update takes O(1) time asymptotically. - - 'segment_tree' -> Segment tree data structure. - Each query takes O(log(end - start)) time - asymptotically. - Each point update takes O(log(len(array))) time - asymptotically. - - By default, 'segment_tree'. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, RangeQueryDynamic - >>> from pydatastructs import minimum - >>> arr = OneDimensionalArray(int, [4, 6, 1, 5, 7, 3]) - >>> RMQ = RangeQueryDynamic(arr, minimum) - >>> RMQ.query(3, 4) - 5 - >>> RMQ.query(0, 4) - 1 - >>> RMQ.query(0, 2) - 1 - >>> RMQ.update(2, 0) - >>> RMQ.query(0, 2) - 0 - - Note - ==== - - The array once passed as an input should be modified - only with `RangeQueryDynamic.update` method. - """ - - def __new__(cls, array, func, data_structure='segment_tree', **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - - if len(array) == 0: - raise ValueError("Input %s array is empty."%(array)) - - if data_structure == 'array': - return RangeQueryDynamicArray(array, func, **kwargs) - elif data_structure == 'segment_tree': - return RangeQueryDynamicSegmentTree(array, func, **kwargs) - else: - raise NotImplementedError( - "Currently %s data structure for range " - "query with point updates isn't implemented yet." - % (data_structure)) - - @classmethod - def methods(cls): - return ['query', 'update'] - - def query(start, end): - """ - Method to perform a query in [start, end) range. - - Parameters - ========== - - start: int - The starting index of the range. - end: int - The ending index of the range. - """ - raise NotImplementedError( - "This is an abstract method.") - - def update(self, index, value): - """ - Method to update index with a new value. - - Parameters - ========== - - index: int - The index to be update. - value: int - The new value. - """ - raise NotImplementedError( - "This is an abstract method.") - -class RangeQueryDynamicArray(RangeQueryDynamic): - - __slots__ = ["range_query_static"] - - def __new__(cls, array, func, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.range_query_static = RangeQueryStaticArray(array, func) - return obj - - @classmethod - def methods(cls): - return ['query', 'update'] - - def query(self, start, end): - return self.range_query_static.query(start, end) - - def update(self, index, value): - self.range_query_static.array[index] = value - -class RangeQueryDynamicSegmentTree(RangeQueryDynamic): - - __slots__ = ["segment_tree", "bounds"] - - def __new__(cls, array, func, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.pop('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.segment_tree = ArraySegmentTree(array, func, dimensions=1) - obj.segment_tree.build() - obj.bounds = (0, len(array)) - return obj - - @classmethod - def methods(cls): - return ['query', 'update'] - - def query(self, start, end): - _check_range_query_inputs((start, end + 1), self.bounds) - return self.segment_tree.query(start, end) - - def update(self, index, value): - self.segment_tree.update(index, value) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/binomial_trees.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/binomial_trees.py deleted file mode 100644 index 9ea91d828..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/binomial_trees.py +++ /dev/null @@ -1,91 +0,0 @@ -from pydatastructs.utils.misc_util import ( - BinomialTreeNode, _check_type, Backend, - raise_if_backend_is_not_python) - -__all__ = [ - 'BinomialTree' -] - -class BinomialTree(object): - """ - Represents binomial trees - - Parameters - ========== - - root: BinomialTreeNode - The root of the binomial tree. - By default, None - order: int - The order of the binomial tree. - By default, None - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import BinomialTree, BinomialTreeNode - >>> root = BinomialTreeNode(1, 1) - >>> tree = BinomialTree(root, 0) - >>> tree.is_empty - False - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Binomial_heap - """ - __slots__ = ['root', 'order'] - - def __new__(cls, root=None, order=None, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - if root is not None and \ - not _check_type(root, BinomialTreeNode): - raise TypeError("%s i.e., root should be of " - "type BinomialTreeNode."%(root)) - if order is not None and not _check_type(order, int): - raise TypeError("%s i.e., order should be of " - "type int."%(order)) - obj = object.__new__(cls) - if root is not None: - root.is_root = True - obj.root = root - obj.order = order - return obj - - @classmethod - def methods(cls): - return ['add_sub_tree', '__new__', 'is_empty'] - - def add_sub_tree(self, other_tree): - """ - Adds a sub tree to current tree. - - Parameters - ========== - - other_tree: BinomialTree - - Raises - ====== - - ValueError: If order of the two trees - are different. - """ - if not _check_type(other_tree, BinomialTree): - raise TypeError("%s i.e., other_tree should be of " - "type BinomialTree"%(other_tree)) - if self.order != other_tree.order: - raise ValueError("Orders of both the trees should be same.") - self.root.children.append(other_tree.root) - other_tree.root.parent = self.root - other_tree.root.is_root = False - self.order += 1 - - @property - def is_empty(self): - return self.root is None diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/disjoint_set.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/disjoint_set.py deleted file mode 100644 index 9a5caef5b..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/disjoint_set.py +++ /dev/null @@ -1,143 +0,0 @@ -from pydatastructs.utils import Set -from pydatastructs.utils.misc_util import ( - Backend, raise_if_backend_is_not_python) - -__all__ = ['DisjointSetForest'] - -class DisjointSetForest(object): - """ - Represents a forest of disjoint set trees. - - Parameters - ========== - - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import DisjointSetForest - >>> dst = DisjointSetForest() - >>> dst.make_set(1) - >>> dst.make_set(2) - >>> dst.union(1, 2) - >>> dst.find_root(2).key - 1 - >>> dst.make_root(2) - >>> dst.find_root(2).key - 2 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Disjoint-set_data_structure - """ - - __slots__ = ['tree'] - - def __new__(cls, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.tree = dict() - return obj - - @classmethod - def methods(cls): - return ['make_set', '__new__', 'find_root', 'union'] - - def make_set(self, key, data=None): - """ - Adds a singleton set to the tree - of disjoint sets with given key - and optionally data. - """ - if self.tree.get(key, None) is None: - new_set = Set(key, data) - self.tree[key] = new_set - new_set.parent = new_set - new_set.size = 1 - - def find_root(self, key): - """ - Finds the root of the set - with the given key by path - splitting algorithm. - """ - if self.tree.get(key, None) is None: - raise KeyError("Invalid key, %s"%(key)) - _set = self.tree[key] - while _set.parent is not _set: - _set, _set.parent = _set.parent, _set.parent.parent - return _set - - def union(self, key1, key2): - """ - Takes the union of the two - disjoint set trees with given - keys. The union is done by size. - """ - x_root = self.find_root(key1) - y_root = self.find_root(key2) - - if x_root is not y_root: - if x_root.size < y_root.size: - x_root, y_root = y_root, x_root - - y_root.parent = x_root - x_root.size += y_root.size - - def make_root(self, key): - """ - Finds the set to which the key belongs - and makes it as the root of the set. - """ - if self.tree.get(key, None) is None: - raise KeyError("Invalid key, %s"%(key)) - - key_set = self.tree[key] - if key_set.parent is not key_set: - current_parent = key_set.parent - # Remove this key subtree size from all its ancestors - while current_parent.parent is not current_parent: - current_parent.size -= key_set.size - current_parent = current_parent.parent - - all_set_size = current_parent.size # This is the root node - current_parent.size -= key_set.size - - # Make parent of current root as key - current_parent.parent = key_set - # size of new root will be same as previous root's size - key_set.size = all_set_size - # Make parent of key as itself - key_set.parent = key_set - - def find_size(self, key): - """ - Finds the size of set to which the key belongs. - """ - if self.tree.get(key, None) is None: - raise KeyError("Invalid key, %s"%(key)) - - return self.find_root(key).size - - def disjoint_sets(self): - """ - Returns a list of disjoint sets in the data structure. - """ - result = dict() - for key in self.tree.keys(): - parent = self.find_root(key).key - members = result.get(parent, []) - members.append(key) - result[parent] = members - sorted_groups = [] - for v in result.values(): - sorted_groups.append(v) - sorted_groups[-1].sort() - sorted_groups.sort() - return sorted_groups diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/multiset.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/multiset.py deleted file mode 100644 index 397978224..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/multiset.py +++ /dev/null @@ -1,42 +0,0 @@ -__all__ = [ - 'Multiset' -] - - -class Multiset: - def __init__(self, *args): - # TODO: Implement dict in pydatastructs - self.counter = dict() - from pydatastructs.trees import RedBlackTree - self.tree = RedBlackTree() - self._n = 0 - for arg in args: - self.add(arg) - - def add(self, element): - self.counter[element] = self.counter.get(element, 0) + 1 - self._n += 1 - if self.counter[element] == 1: - self.tree.insert(element) - - def remove(self, element): - if self.counter[element] == 1: - self.tree.delete(element) - if self.counter.get(element, 0) > 0: - self._n -= 1 - self.counter[element] -= 1 - - def lower_bound(self, element): - return self.tree.lower_bound(element) - - def upper_bound(self, element): - return self.tree.upper_bound(element) - - def __contains__(self, element): - return self.counter.get(element, 0) > 0 - - def __len__(self): - return self._n - - def count(self, element): - return self.counter.get(element, 0) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/queue.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/queue.py deleted file mode 100644 index 033ef9af3..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/queue.py +++ /dev/null @@ -1,498 +0,0 @@ -from pydatastructs.linear_data_structures import DynamicOneDimensionalArray, SinglyLinkedList -from pydatastructs.utils.misc_util import ( - NoneType, Backend, raise_if_backend_is_not_python) -from pydatastructs.trees.heaps import BinaryHeap, BinomialHeap -from copy import deepcopy as dc - -__all__ = [ - 'Queue', - 'PriorityQueue' -] - -class Queue(object): - """Representation of queue data structure. - - Parameters - ========== - - implementation : str - Implementation to be used for queue. - By default, 'array' - items : list/tuple - Optional, by default, None - The inital items in the queue. - dtype : A valid python type - Optional, by default NoneType if item - is None. - Required only for 'array' implementation. - double_ended : bool - Optional, by default, False. - Set to True if the queue should support - additional, appendleft and pop operations - from left and right sides respectively. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import Queue - >>> q = Queue() - >>> q.append(1) - >>> q.append(2) - >>> q.append(3) - >>> q.popleft() - 1 - >>> len(q) - 2 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Queue_(abstract_data_type) - """ - - def __new__(cls, implementation='array', **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - if implementation == 'array': - return ArrayQueue( - kwargs.get('items', None), - kwargs.get('dtype', int), - kwargs.get('double_ended', False)) - elif implementation == 'linked_list': - return LinkedListQueue( - kwargs.get('items', None), - kwargs.get('double_ended', False) - ) - else: - raise NotImplementedError( - "%s hasn't been implemented yet."%(implementation)) - - @classmethod - def methods(cls): - return ['__new__'] - - def _double_ended_check(self): - if not self._double_ended: - raise NotImplementedError( - "This method is only supported for " - "double ended queues.") - - def append(self, *args, **kwargs): - raise NotImplementedError( - "This is an abstract method.") - - def appendleft(self, *args, **kwargs): - raise NotImplementedError( - "This is an abstract method.") - - def pop(self, *args, **kwargs): - raise NotImplementedError( - "This is an abstract method.") - - def popleft(self, *args, **kwargs): - raise NotImplementedError( - "This is an abstract method.") - - @property - def is_empty(self): - raise NotImplementedError( - "This is an abstract method.") - - -class ArrayQueue(Queue): - - __slots__ = ['_front', '_rear', '_double_ended'] - - def __new__(cls, items=None, dtype=NoneType, double_ended=False, - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - if items is None: - items = DynamicOneDimensionalArray(dtype, 0) - else: - dtype = type(items[0]) - items = DynamicOneDimensionalArray(dtype, items) - obj = object.__new__(cls) - obj.items, obj._front = items, -1 - if items.size == 0: - obj._front = -1 - obj._rear = -1 - else: - obj._front = 0 - obj._rear = items._num - 1 - obj._double_ended = double_ended - return obj - - @classmethod - def methods(cls): - return ['__new__', 'append', 'appendleft', 'popleft', - 'pop', 'is_empty', '__len__', '__str__', 'front', - 'rear'] - - def append(self, x): - if self.is_empty: - self._front = 0 - self.items._dtype = type(x) - self.items.append(x) - self._rear += 1 - - def appendleft(self, x): - self._double_ended_check() - temp = [] - if self.is_empty: - self._front = 0 - self._rear = -1 - self.items._dtype = type(x) - temp.append(x) - for i in range(self._front, self._rear + 1): - temp.append(self.items._data[i]) - self.items = DynamicOneDimensionalArray(type(temp[0]), temp) - self._rear += 1 - - def popleft(self): - if self.is_empty: - raise IndexError("Queue is empty.") - return_value = dc(self.items[self._front]) - front_temp = self._front - if self._front == self._rear: - self._front = -1 - self._rear = -1 - else: - if (self.items._num - 1)/self.items._size < \ - self.items._load_factor: - self._front = 0 - else: - self._front += 1 - self.items.delete(front_temp) - return return_value - - def pop(self): - self._double_ended_check() - if self.is_empty: - raise IndexError("Queue is empty.") - - return_value = dc(self.items[self._rear]) - rear_temp = self._rear - if self._front == self._rear: - self._front = -1 - self._rear = -1 - else: - if (self.items._num - 1)/self.items._size < \ - self.items._load_factor: - self._front = 0 - else: - self._rear -= 1 - self.items.delete(rear_temp) - return return_value - - @property - def front(self): - return self._front - - @property - def rear(self): - return self._rear - - @property - def is_empty(self): - return self.__len__() == 0 - - def __len__(self): - return self.items._num - - def __str__(self): - _data = [] - for i in range(self._front, self._rear + 1): - _data.append(self.items._data[i]) - return str(_data) - -class LinkedListQueue(Queue): - - __slots__ = ['queue', '_double_ended'] - - def __new__(cls, items=None, double_ended=False, - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.queue = SinglyLinkedList() - if items is None: - pass - elif type(items) in (list, tuple): - for x in items: - obj.append(x) - else: - raise TypeError("Expected type: list/tuple") - obj._double_ended = double_ended - return obj - - @classmethod - def methods(cls): - return ['__new__', 'append', 'appendleft', 'pop', 'popleft', - 'is_empty', '__len__', '__str__', 'front', 'rear'] - - def append(self, x): - self.queue.append(x) - - def appendleft(self, x): - self._double_ended_check() - if self._double_ended: - self.queue.appendleft(x) - - def pop(self): - self._double_ended_check() - if self.is_empty: - raise IndexError("Queue is empty.") - return_value = self.queue.popright() - return return_value - - def popleft(self): - if self.is_empty: - raise IndexError("Queue is empty.") - return_value = self.queue.popleft() - return return_value - - @property - def is_empty(self): - return self.__len__() == 0 - - @property - def front(self): - return self.queue.head - - @property - def rear(self): - return self.queue.tail - - def __len__(self): - return self.queue.size - - def __str__(self): - return str(self.queue) - -class PriorityQueue(object): - """ - Represents the concept of priority queue. - - Parameters - ========== - - implementation: str - The implementation which is to be - used for supporting operations - of priority queue. - The following implementations are supported, - - 'linked_list' -> Linked list implementation. - - 'binary_heap' -> Binary heap implementation. - - 'binomial_heap' -> Binomial heap implementation. - Doesn't support custom comparators, minimum - key data is extracted in every pop. - - Optional, by default, 'binary_heap' implementation - is used. - comp: function - The comparator to be used while comparing priorities. - Must return a bool object. - By default, `lambda u, v: u < v` is used to compare - priorities i.e., minimum priority elements are extracted - by pop operation. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import PriorityQueue - >>> pq = PriorityQueue() - >>> pq.push(1, 2) - >>> pq.push(2, 3) - >>> pq.pop() - 1 - >>> pq2 = PriorityQueue(comp=lambda u, v: u > v) - >>> pq2.push(1, 2) - >>> pq2.push(2, 3) - >>> pq2.pop() - 2 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Priority_queue - """ - - def __new__(cls, implementation='binary_heap', **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - comp = kwargs.get("comp", lambda u, v: u < v) - if implementation == 'linked_list': - return LinkedListPriorityQueue(comp) - elif implementation == 'binary_heap': - return BinaryHeapPriorityQueue(comp) - elif implementation == 'binomial_heap': - return BinomialHeapPriorityQueue() - else: - raise NotImplementedError( - "%s implementation is not currently supported " - "by priority queue.") - - @classmethod - def methods(cls): - return ['__new__'] - - def push(self, value, priority): - """ - Pushes the value to the priority queue - according to the given priority. - - value - Value to be pushed. - priority - Priority to be given to the value. - """ - raise NotImplementedError( - "This is an abstract method.") - - def pop(self): - """ - Pops out the value from the priority queue. - """ - raise NotImplementedError( - "This is an abstract method.") - - @property - def peek(self): - """ - Returns the pointer to the value which will be - popped out by `pop` method. - """ - raise NotImplementedError( - "This is an abstract method.") - - @property - def is_empty(self): - """ - Checks if the priority queue is empty. - """ - raise NotImplementedError( - "This is an abstract method.") - -class LinkedListPriorityQueue(PriorityQueue): - - __slots__ = ['items', 'comp'] - - @classmethod - def methods(cls): - return ['__new__', 'push', 'pop', 'peek', 'is_empty'] - - def __new__(cls, comp, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.items = SinglyLinkedList() - obj.comp = comp - return obj - - def push(self, value, priority): - self.items.append(priority, value) - - def pop(self): - _, max_i = self._find_peek(return_index=True) - pop_val = self.items.extract(max_i) - return pop_val.data - - def _find_peek(self, return_index=False): - if self.is_empty: - raise IndexError("Priority queue is empty.") - - walk = self.items.head - i, max_i, max_p = 0, 0, walk - while walk is not None: - if self.comp(walk.key, max_p.key): - max_i = i - max_p = walk - i += 1 - walk = walk.next - if return_index: - return max_p, max_i - return max_p - - @property - def peek(self): - return self._find_peek() - - @property - def is_empty(self): - return self.items.size == 0 - -class BinaryHeapPriorityQueue(PriorityQueue): - - __slots__ = ['items'] - - @classmethod - def methods(cls): - return ['__new__', 'push', 'pop', 'peek', 'is_empty'] - - def __new__(cls, comp, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.items = BinaryHeap() - obj.items._comp = comp - return obj - - def push(self, value, priority): - self.items.insert(priority, value) - - def pop(self): - node = self.items.extract() - return node.data - - @property - def peek(self): - if self.items.is_empty: - raise IndexError("Priority queue is empty.") - return self.items.heap[0] - - @property - def is_empty(self): - return self.items.is_empty - -class BinomialHeapPriorityQueue(PriorityQueue): - - __slots__ = ['items'] - - @classmethod - def methods(cls): - return ['__new__', 'push', 'pop', 'peek', 'is_empty'] - - def __new__(cls, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.items = BinomialHeap() - return obj - - def push(self, value, priority): - self.items.insert(priority, value) - - def pop(self): - node = self.items.find_minimum() - self.items.delete_minimum() - return node.data - - @property - def peek(self): - return self.items.find_minimum() - - @property - def is_empty(self): - return self.items.is_empty diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/segment_tree.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/segment_tree.py deleted file mode 100644 index 0895ba6da..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/segment_tree.py +++ /dev/null @@ -1,225 +0,0 @@ -from .stack import Stack -from pydatastructs.utils.misc_util import (TreeNode, - Backend, raise_if_backend_is_not_python) - -__all__ = ['ArraySegmentTree'] - -class ArraySegmentTree(object): - """ - Represents the segment tree data structure, - defined on arrays. - - Parameters - ========== - - array: Array - The array to be used for filling the segment tree. - func: callable - The function to be used for filling the segment tree. - It should accept only one tuple as an argument. The - size of the tuple will be either 1 or 2 and any one - of the elements can be `None`. You can treat `None` in - whatever way you want. For example, in case of minimum - values, `None` can be treated as infinity. We provide - the following which can be used as an argument value for this - parameter, - - `minimum` - For range minimum queries. - - `greatest_common_divisor` - For queries finding greatest - common divisor of a range. - - `summation` - For range sum queries. - dimensions: int - The number of dimensions of the array to be used - for the segment tree. - Optional, by default 1. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import ArraySegmentTree, minimum - >>> from pydatastructs import OneDimensionalArray - >>> arr = OneDimensionalArray(int, [1, 2, 3, 4, 5]) - >>> s_t = ArraySegmentTree(arr, minimum) - >>> s_t.build() - >>> s_t.query(0, 1) - 1 - >>> s_t.query(1, 3) - 2 - >>> s_t.update(2, -1) - >>> s_t.query(1, 3) - -1 - >>> arr = OneDimensionalArray(int, [1, 2]) - >>> s_t = ArraySegmentTree(arr, minimum) - >>> s_t.build() - >>> str(s_t) - "['((0, 1), 1)', '((0, 0), 1)', '', '', '((1, 1), 2)', '', '']" - - References - ========== - - .. [1] https://cp-algorithms.com/data_structures/segment_tree.html - """ - def __new__(cls, array, func, **kwargs): - - dimensions = kwargs.pop("dimensions", 1) - if dimensions == 1: - return OneDimensionalArraySegmentTree(array, func, **kwargs) - else: - raise NotImplementedError("ArraySegmentTree do not support " - "{}-dimensional arrays as of now.".format(dimensions)) - - def build(self): - """ - Generates segment tree nodes when called. - Nothing happens if nodes are already generated. - """ - raise NotImplementedError( - "This is an abstract method.") - - def update(self, index, value): - """ - Updates the value at given index. - """ - raise NotImplementedError( - "This is an abstract method.") - - def query(self, start, end): - """ - Queries [start, end] range according - to the function provided while constructing - `ArraySegmentTree` object. - """ - raise NotImplementedError( - "This is an abstract method.") - - def __str__(self): - recursion_stack = Stack(implementation='linked_list') - recursion_stack.push(self._root) - to_be_printed = [] - while not recursion_stack.is_empty: - node = recursion_stack.pop().key - if node is not None: - to_be_printed.append(str((node.key, node.data))) - else: - to_be_printed.append('') - if node is not None: - recursion_stack.push(node.right) - recursion_stack.push(node.left) - return str(to_be_printed) - - -class OneDimensionalArraySegmentTree(ArraySegmentTree): - - __slots__ = ["_func", "_array", "_root", "_backend"] - - def __new__(cls, array, func, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - raise_if_backend_is_not_python(cls, backend) - - obj = object.__new__(cls) - obj._func = func - obj._array = array - obj._root = None - obj._backend = backend - return obj - - @classmethod - def methods(self): - return ['__new__', 'build', 'update', - 'query'] - - @property - def is_ready(self): - return self._root is not None - - def build(self): - if self.is_ready: - return - - recursion_stack = Stack(implementation='linked_list') - node = TreeNode((0, len(self._array) - 1), None, backend=self._backend) - node.is_root = True - self._root = node - recursion_stack.push(node) - - while not recursion_stack.is_empty: - node = recursion_stack.peek.key - start, end = node.key - if start == end: - node.data = self._array[start] - recursion_stack.pop() - continue - - if (node.left is not None and - node.right is not None): - recursion_stack.pop() - node.data = self._func((node.left.data, node.right.data)) - else: - mid = (start + end) // 2 - if node.left is None: - left_node = TreeNode((start, mid), None) - node.left = left_node - recursion_stack.push(left_node) - if node.right is None: - right_node = TreeNode((mid + 1, end), None) - node.right = right_node - recursion_stack.push(right_node) - - def update(self, index, value): - if not self.is_ready: - raise ValueError("{} tree is not built yet. ".format(self) + - "Call .build method to prepare the segment tree.") - - recursion_stack = Stack(implementation='linked_list') - recursion_stack.push((self._root, None)) - - while not recursion_stack.is_empty: - node, child = recursion_stack.peek.key - start, end = node.key - if start == end: - self._array[index] = value - node.data = value - recursion_stack.pop() - if not recursion_stack.is_empty: - parent_node = recursion_stack.pop() - recursion_stack.push((parent_node.key[0], node)) - continue - - if child is not None: - node.data = self._func((node.left.data, node.right.data)) - recursion_stack.pop() - if not recursion_stack.is_empty: - parent_node = recursion_stack.pop() - recursion_stack.push((parent_node.key[0], node)) - else: - mid = (start + end) // 2 - if start <= index and index <= mid: - recursion_stack.push((node.left, None)) - else: - recursion_stack.push((node.right, None)) - - def _query(self, node, start, end, l, r): - if r < start or end < l: - return None - - if l <= start and end <= r: - return node.data - - mid = (start + end) // 2 - left_result = self._query(node.left, start, mid, l, r) - right_result = self._query(node.right, mid + 1, end, l, r) - return self._func((left_result, right_result)) - - def query(self, start, end): - if not self.is_ready: - raise ValueError("{} tree is not built yet. ".format(self) + - "Call .build method to prepare the segment tree.") - - return self._query(self._root, 0, len(self._array) - 1, - start, end) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/sparse_table.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/sparse_table.py deleted file mode 100644 index 55ec4e9b3..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/sparse_table.py +++ /dev/null @@ -1,108 +0,0 @@ -from pydatastructs.linear_data_structures.arrays import OneDimensionalArray -from pydatastructs.utils.misc_util import ( - Backend, raise_if_backend_is_not_python) -import math - -__all__ = ['SparseTable'] - - -class SparseTable(object): - """ - Represents the sparse table data structure. - - Parameters - ========== - - array: OneDimensionalArray - The array to be used for filling the sparse table. - func: callable - The function to be used for filling the sparse table. - It should accept only one tuple as an argument. The - size of the tuple will be either 1 or 2 and any one - of the elements can be `None`. You can treat `None` in - whatever way you want. For example, in case of minimum - values, `None` can be treated as infinity. We provide - the following which can be used as an argument value for this - parameter, - - `minimum` - For range minimum queries. - - `greatest_common_divisor` - For queries finding greatest - common divisor of a range. - - `summation` - For range sum queries. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import SparseTable, minimum - >>> from pydatastructs import OneDimensionalArray - >>> arr = OneDimensionalArray(int, [1, 2, 3, 4, 5]) - >>> s_t = SparseTable(arr, minimum) - >>> str(s_t) - "['[1, 1, 1]', '[2, 2, 2]', '[3, 3, None]', '[4, 4, None]', '[5, None, None]']" - - References - ========== - - .. [1] https://cp-algorithms.com/data_structures/sparse-table.html - """ - - __slots__ = ['_table', 'func'] - - def __new__(cls, array, func, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - - # TODO: If possible remove the following check. - if len(array) == 0: - raise ValueError("Input %s array is empty."%(array)) - - obj = object.__new__(cls) - size = len(array) - log_size = int(math.log2(size)) + 1 - obj._table = [OneDimensionalArray(int, log_size) for _ in range(size)] - obj.func = func - - for i in range(size): - obj._table[i][0] = func((array[i],)) - - for j in range(1, log_size + 1): - for i in range(size - (1 << j) + 1): - obj._table[i][j] = func((obj._table[i][j - 1], - obj._table[i + (1 << (j - 1))][j - 1])) - - return obj - - @classmethod - def methods(cls): - return ['query', '__str__'] - - def query(self, start, end): - """ - Method to perform a query on sparse table in [start, end) - range. - - Parameters - ========== - - start: int - The starting index of the range. - end: int - The ending index of the range. - """ - j = int(math.log2(end - start + 1)) + 1 - answer = None - while j >= 0: - if start + (1 << j) - 1 <= end: - answer = self.func((answer, self._table[start][j])) - start += 1 << j - j -= 1 - return answer - - def __str__(self): - return str([str(array) for array in self._table]) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/stack.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/stack.py deleted file mode 100644 index 38f72b43f..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/stack.py +++ /dev/null @@ -1,200 +0,0 @@ -from pydatastructs.linear_data_structures import DynamicOneDimensionalArray, SinglyLinkedList -from pydatastructs.miscellaneous_data_structures._backend.cpp import _stack -from pydatastructs.utils.misc_util import ( - _check_type, NoneType, Backend, - raise_if_backend_is_not_python) -from copy import deepcopy as dc - -__all__ = [ - 'Stack' -] - -class Stack(object): - """Representation of stack data structure - - Parameters - ========== - - implementation : str - Implementation to be used for stack. - By default, 'array' - Currently only supports 'array' - implementation. - items : list/tuple - Optional, by default, None - The inital items in the stack. - For array implementation. - dtype : A valid python type - Optional, by default NoneType if item - is None, otherwise takes the data - type of DynamicOneDimensionalArray - For array implementation. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import Stack - >>> s = Stack() - >>> s.push(1) - >>> s.push(2) - >>> s.push(3) - >>> str(s) - '[1, 2, 3]' - >>> s.pop() - 3 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Stack_(abstract_data_type) - """ - - def __new__(cls, implementation='array', **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if implementation == 'array': - items = kwargs.get('items', None) - dtype = kwargs.get('dtype', int) - if backend == Backend.CPP: - return _stack.ArrayStack(items, dtype) - - return ArrayStack(items, dtype) - if implementation == 'linked_list': - raise_if_backend_is_not_python(cls, backend) - - return LinkedListStack( - kwargs.get('items', None) - ) - raise NotImplementedError( - "%s hasn't been implemented yet."%(implementation)) - - @classmethod - def methods(cls): - return ['__new__'] - - def push(self, *args, **kwargs): - raise NotImplementedError( - "This is an abstract method.") - - def pop(self, *args, **kwargs): - raise NotImplementedError( - "This is an abstract method.") - - @property - def is_empty(self): - raise NotImplementedError( - "This is an abstract method.") - - @property - def peek(self): - raise NotImplementedError( - "This is an abstract method.") - -class ArrayStack(Stack): - - __slots__ = ['items'] - - def __new__(cls, items=None, dtype=NoneType, - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - if items is None: - items = DynamicOneDimensionalArray(dtype, 0) - else: - items = DynamicOneDimensionalArray(dtype, items) - obj = object.__new__(cls) - obj.items = items - return obj - - @classmethod - def methods(cls): - return ['__new__', 'push', 'pop', 'is_emtpy', - 'peek', '__len__', '__str__'] - - def push(self, x): - if self.is_empty: - self.items._dtype = type(x) - self.items.append(x) - - def pop(self): - if self.is_empty: - raise IndexError("Stack is empty") - - top_element = dc(self.items[self.items._last_pos_filled]) - self.items.delete(self.items._last_pos_filled) - return top_element - - @property - def is_empty(self): - return self.items._last_pos_filled == -1 - - @property - def peek(self): - return self.items[self.items._last_pos_filled] - - def __len__(self): - return self.items._num - - def __str__(self): - """ - Used for printing. - """ - return str(self.items._data) - - -class LinkedListStack(Stack): - - __slots__ = ['stack'] - - def __new__(cls, items=None, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.stack = SinglyLinkedList() - if items is None: - pass - elif type(items) in (list, tuple): - for x in items: - obj.push(x) - else: - raise TypeError("Expected type: list/tuple") - return obj - - @classmethod - def methods(cls): - return ['__new__', 'push', 'pop', 'is_emtpy', - 'peek', '__len__', '__str__'] - - def push(self, x): - self.stack.appendleft(x) - - def pop(self): - if self.is_empty: - raise IndexError("Stack is empty") - return self.stack.popleft() - - @property - def is_empty(self): - return self.__len__() == 0 - - @property - def peek(self): - return self.stack.head - - @property - def size(self): - return self.stack.size - - def __len__(self): - return self.stack.size - - def __str__(self): - elements = [] - current_node = self.peek - while current_node is not None: - elements.append(str(current_node)) - current_node = current_node.next - return str(elements[::-1]) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_binomial_trees.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_binomial_trees.py deleted file mode 100644 index 1275e9aec..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_binomial_trees.py +++ /dev/null @@ -1,17 +0,0 @@ -from pydatastructs.miscellaneous_data_structures.binomial_trees import BinomialTree -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils.misc_util import BinomialTreeNode - -# only tests the corner cases -def test_BinomialTree(): - assert raises(TypeError, lambda: BinomialTree(1, 1)) - assert raises(TypeError, lambda: BinomialTree(None, 1.5)) - - bt = BinomialTree() - assert raises(TypeError, lambda: bt.add_sub_tree(None)) - bt1 = BinomialTree(BinomialTreeNode(1, 1), 0) - node = BinomialTreeNode(2, 2) - node.add_children(BinomialTreeNode(3, 3)) - bt2 = BinomialTree(node, 1) - assert raises(ValueError, lambda: bt1.add_sub_tree(bt2)) - assert bt1.is_empty is False diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_disjoint_set.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_disjoint_set.py deleted file mode 100644 index fcabd3112..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_disjoint_set.py +++ /dev/null @@ -1,70 +0,0 @@ -from pydatastructs import DisjointSetForest -from pydatastructs.utils.raises_util import raises - -def test_DisjointSetForest(): - - dst = DisjointSetForest() - for i in range(8): - dst.make_set(i+1) - - dst.union(1, 2) - dst.union(1, 5) - assert dst.find_size(2) == 3 - dst.union(1, 6) - dst.union(1, 8) - dst.union(3, 4) - assert dst.find_size(3) == 2 - - assert (dst.find_root(1) == dst.find_root(2) == - dst.find_root(5) == dst.find_root(6) == dst.find_root(8)) - assert dst.disjoint_sets() == [[1, 2, 5, 6, 8], [3, 4], [7]] - assert dst.find_root(3) == dst.find_root(4) - assert dst.find_root(7).key == 7 - - assert raises(KeyError, lambda: dst.find_root(9)) - assert raises(KeyError, lambda: dst.find_size(9)) - dst.union(3, 1) - assert dst.find_root(3).key == 1 - assert dst.find_root(5).key == 1 - dst.make_root(6) - assert dst.disjoint_sets() == [[1, 2, 3, 4, 5, 6, 8], [7]] - assert dst.find_root(3).key == 6 - assert dst.find_root(5).key == 6 - dst.make_root(5) - assert dst.find_root(1).key == 5 - assert dst.find_root(5).key == 5 - assert raises(KeyError, lambda: dst.make_root(9)) - - dst = DisjointSetForest() - for i in range(6): - dst.make_set(i) - assert dst.tree[2].size == 1 - dst.union(2, 3) - assert dst.tree[2].size == 2 - assert dst.tree[3].size == 1 - dst.union(1, 4) - dst.union(2, 4) - assert dst.disjoint_sets() == [[0], [1, 2, 3, 4], [5]] - # current tree - ############### - # 2 - # / \ - # 1 3 - # / - # 4 - ############### - assert dst.tree[2].size == 4 - assert dst.tree[1].size == 2 - assert dst.tree[3].size == dst.tree[4].size == 1 - dst.make_root(4) - # New tree - ############### - # 4 - # | - # 2 - # / \ - # 1 3 - ############### - assert dst.tree[4].size == 4 - assert dst.tree[2].size == 3 - assert dst.tree[1].size == dst.tree[3].size == 1 diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_multiset.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_multiset.py deleted file mode 100644 index fb412704a..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_multiset.py +++ /dev/null @@ -1,39 +0,0 @@ -from pydatastructs.miscellaneous_data_structures import Multiset - -def test_Multiset(): - - ms = Multiset() - ms.add(5) - ms.add(5) - ms.add(3) - ms.add(7) - assert len(ms) == 4 - assert 5 in ms - assert ms.count(5) == 2 - assert ms.count(3) == 1 - assert ms.count(-3) == 0 - assert not 4 in ms - ms.remove(5) - assert 5 in ms - assert ms.lower_bound(5) == 5 - assert ms.upper_bound(5) == 7 - - ms = Multiset(5, 3, 7, 2) - - assert len(ms) == 4 - assert 5 in ms - assert ms.count(7) == 1 - assert not 4 in ms - assert ms.lower_bound(3) == 3 - assert ms.upper_bound(3) == 5 - assert ms.upper_bound(7) is None - - ms.remove(5) - - assert len(ms) == 3 - assert not 5 in ms - - ms.add(4) - - assert 4 in ms - assert len(ms) == 4 diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_queue.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_queue.py deleted file mode 100644 index 81e1e996e..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_queue.py +++ /dev/null @@ -1,116 +0,0 @@ -from pydatastructs.miscellaneous_data_structures import Queue -from pydatastructs.miscellaneous_data_structures.queue import ( - ArrayQueue, LinkedListQueue, PriorityQueue, - LinkedListPriorityQueue) -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils.misc_util import _check_type - -def test_Queue(): - q = Queue(implementation='array') - q1 = Queue() - assert _check_type(q, ArrayQueue) is True - assert _check_type(q1, ArrayQueue) is True - q2 = Queue(implementation='linked_list') - assert _check_type(q2, LinkedListQueue) is True - assert raises(NotImplementedError, lambda: Queue(implementation='')) - -def test_ArrayQueue(): - q1 = Queue() - raises(IndexError, lambda: q1.popleft()) - q1 = Queue(implementation='array', items=[0]) - q1.append(1) - q1.append(2) - q1.append(3) - assert str(q1) == '[0, 1, 2, 3]' - assert len(q1) == 4 - assert q1.popleft() == 0 - assert q1.popleft() == 1 - assert len(q1) == 2 - assert q1.popleft() == 2 - assert q1.popleft() == 3 - assert len(q1) == 0 - - q2 = Queue(implementation='array', items=[0], double_ended=True) - q2.append(1) - q2.append(2) - q2.appendleft(3) - assert str(q2) == '[3, 0, 1, 2]' - assert len(q2) == 4 - assert q2.popleft() == 3 - assert q2.pop() == 2 - assert len(q2) == 2 - assert q2.popleft() == 0 - assert q2.pop() == 1 - assert len(q2) == 0 - - q1 = Queue(implementation='array', items=[0]) - assert raises(NotImplementedError, lambda: q1.appendleft(2)) - - -def test_LinkedListQueue(): - q1 = Queue(implementation='linked_list') - q1.append(1) - assert raises(TypeError, lambda: Queue(implementation='linked_list', items={0, 1})) - q1 = Queue(implementation='linked_list', items = [0, 1]) - q1.append(2) - q1.append(3) - assert str(q1) == ("['(0, None)', '(1, None)', " - "'(2, None)', '(3, None)']") - assert len(q1) == 4 - assert q1.popleft().key == 0 - assert q1.popleft().key == 1 - assert len(q1) == 2 - assert q1.popleft().key == 2 - assert q1.popleft().key == 3 - assert len(q1) == 0 - raises(IndexError, lambda: q1.popleft()) - - q1 = Queue(implementation='linked_list',items=['a',None,type,{}]) - assert len(q1) == 4 - - front = q1.front - assert front.key == q1.popleft().key - - rear = q1.rear - for _ in range(len(q1)-1): - q1.popleft() - - assert rear.key == q1.popleft().key - - q1 = Queue(implementation='linked_list', double_ended=True) - q1.appendleft(1) - q2 = Queue(implementation='linked_list', items=[0, 1]) - assert raises(NotImplementedError, lambda: q2.appendleft(1)) - q1 = Queue(implementation='linked_list', items = [0, 1], double_ended=True) - q1.appendleft(2) - q1.append(3) - assert str(q1) == "['(2, None)', '(0, None)', '(1, None)', '(3, None)']" - assert len(q1) == 4 - assert q1.popleft().key == 2 - assert q1.pop().key == 3 - assert len(q1) == 2 - assert q1.pop().key == 1 - assert q1.popleft().key == 0 - assert len(q1) == 0 - assert raises(IndexError, lambda: q1.popleft()) - -def test_PriorityQueue(): - pq1 = PriorityQueue(implementation='linked_list') - assert _check_type(pq1, LinkedListPriorityQueue) is True - assert raises(NotImplementedError, lambda: Queue(implementation='')) - -def test_ImplementationPriorityQueue(): - impls = ['linked_list', 'binomial_heap', 'binary_heap'] - for impl in impls: - pq1 = PriorityQueue(implementation=impl) - pq1.push(1, 4) - pq1.push(2, 3) - pq1.push(3, 2) - assert pq1.peek.data == 3 - assert pq1.pop() == 3 - assert pq1.peek.data == 2 - assert pq1.pop() == 2 - assert pq1.peek.data == 1 - assert pq1.pop() == 1 - assert pq1.is_empty is True - assert raises(IndexError, lambda: pq1.peek) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_dynamic.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_dynamic.py deleted file mode 100644 index f655c546d..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_dynamic.py +++ /dev/null @@ -1,71 +0,0 @@ -from pydatastructs import ( - RangeQueryDynamic, minimum, - greatest_common_divisor, summation, - OneDimensionalArray) -from pydatastructs.utils.raises_util import raises -import random, math -from copy import deepcopy - -def _test_RangeQueryDynamic_common(func, gen_expected): - - array = OneDimensionalArray(int, []) - raises(ValueError, lambda: RangeQueryDynamic(array, func)) - - array = OneDimensionalArray(int, [1]) - rq = RangeQueryDynamic(array, func) - assert rq.query(0, 0) == 1 - raises(ValueError, lambda: rq.query(0, -1)) - raises(IndexError, lambda: rq.query(0, 1)) - - array_sizes = [3, 6, 12, 24, 48, 96] - random.seed(0) - for array_size in array_sizes: - inputs = [] - for i in range(array_size): - for j in range(i + 1, array_size): - inputs.append((i, j)) - - data_structures = ["array", "segment_tree"] - for ds in data_structures: - data = random.sample(range(-2*array_size, 2*array_size), array_size) - array = OneDimensionalArray(int, data) - rmq = RangeQueryDynamic(array, func, data_structure=ds) - for input in inputs: - assert rmq.query(input[0], input[1]) == gen_expected(data, input[0], input[1]) - - data_copy = deepcopy(data) - for _ in range(array_size//2): - index = random.randint(0, array_size - 1) - value = random.randint(0, 4 * array_size) - data_copy[index] = value - rmq.update(index, value) - - for input in inputs: - assert rmq.query(input[0], input[1]) == gen_expected(data_copy, input[0], input[1]) - -def test_RangeQueryDynamic_minimum(): - - def _gen_minimum_expected(data, i, j): - return min(data[i:j + 1]) - - _test_RangeQueryDynamic_common(minimum, _gen_minimum_expected) - -def test_RangeQueryDynamic_greatest_common_divisor(): - - def _gen_gcd_expected(data, i, j): - if j == i: - return data[i] - else: - expected_gcd = math.gcd(data[i], data[i + 1]) - for idx in range(i + 2, j + 1): - expected_gcd = math.gcd(expected_gcd, data[idx]) - return expected_gcd - - _test_RangeQueryDynamic_common(greatest_common_divisor, _gen_gcd_expected) - -def test_RangeQueryDynamic_summation(): - - def _gen_summation_expected(data, i, j): - return sum(data[i:j + 1]) - - return _test_RangeQueryDynamic_common(summation, _gen_summation_expected) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_static.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_static.py deleted file mode 100644 index e898653c9..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_static.py +++ /dev/null @@ -1,63 +0,0 @@ -from pydatastructs import ( - RangeQueryStatic, minimum, - greatest_common_divisor, summation, - OneDimensionalArray) -from pydatastructs.utils.raises_util import raises -import random, math - -def _test_RangeQueryStatic_common(func, gen_expected): - - array = OneDimensionalArray(int, []) - raises(ValueError, lambda: RangeQueryStatic(array, func)) - - array = OneDimensionalArray(int, [1]) - rq = RangeQueryStatic(array, func) - assert rq.query(0, 0) == 1 - raises(ValueError, lambda: rq.query(0, -1)) - raises(IndexError, lambda: rq.query(0, 1)) - - array_sizes = [3, 6, 12, 24, 48, 96] - random.seed(0) - for array_size in array_sizes: - data = random.sample(range(-2*array_size, 2*array_size), array_size) - array = OneDimensionalArray(int, data) - - expected = [] - inputs = [] - for i in range(array_size): - for j in range(i + 1, array_size): - inputs.append((i, j)) - expected.append(gen_expected(data, i, j)) - - data_structures = ["array", "sparse_table"] - for ds in data_structures: - rmq = RangeQueryStatic(array, func, data_structure=ds) - for input, correct in zip(inputs, expected): - assert rmq.query(input[0], input[1]) == correct - -def test_RangeQueryStatic_minimum(): - - def _gen_minimum_expected(data, i, j): - return min(data[i:j + 1]) - - _test_RangeQueryStatic_common(minimum, _gen_minimum_expected) - -def test_RangeQueryStatic_greatest_common_divisor(): - - def _gen_gcd_expected(data, i, j): - if j == i: - return data[i] - else: - expected_gcd = math.gcd(data[i], data[i + 1]) - for idx in range(i + 2, j + 1): - expected_gcd = math.gcd(expected_gcd, data[idx]) - return expected_gcd - - _test_RangeQueryStatic_common(greatest_common_divisor, _gen_gcd_expected) - -def test_RangeQueryStatic_summation(): - - def _gen_summation_expected(data, i, j): - return sum(data[i:j + 1]) - - return _test_RangeQueryStatic_common(summation, _gen_summation_expected) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_stack.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_stack.py deleted file mode 100644 index 2d9d08b82..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_stack.py +++ /dev/null @@ -1,77 +0,0 @@ -from pydatastructs.miscellaneous_data_structures import Stack -from pydatastructs.miscellaneous_data_structures.stack import ArrayStack, LinkedListStack -from pydatastructs.miscellaneous_data_structures._backend.cpp import _stack -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils.misc_util import _check_type, Backend - - -def test_Stack(): - s = Stack(implementation='array') - s1 = Stack() - assert _check_type(s, ArrayStack) is True - assert _check_type(s1, ArrayStack) is True - s2 = Stack(implementation='linked_list') - assert _check_type(s2, LinkedListStack) is True - assert raises(NotImplementedError, lambda: Stack(implementation='')) - - s3 = Stack(backend=Backend.CPP) - assert _check_type(s3, _stack.ArrayStack) is True - s4 = Stack(implementation="array", backend=Backend.CPP) - assert _check_type(s4, _stack.ArrayStack) is True - -def test_ArrayStack(): - s = Stack(implementation='array') - s.push(1) - s.push(2) - s.push(3) - assert s.peek == 3 - assert str(s) == '[1, 2, 3]' - assert s.pop() == 3 - assert s.pop() == 2 - assert s.pop() == 1 - assert s.is_empty is True - assert raises(IndexError, lambda : s.pop()) - _s = Stack(items=[1, 2, 3]) - assert str(_s) == '[1, 2, 3]' - assert len(_s) == 3 - - # Cpp test - s1 = Stack(implementation="array", backend=Backend.CPP) - s1.push(1) - s1.push(2) - s1.push(3) - assert s1.peek == 3 - assert str(s1) == "['1', '2', '3']" - assert s1.pop() == 3 - assert s1.pop() == 2 - assert s1.pop() == 1 - assert s1.is_empty is True - assert raises(IndexError, lambda : s1.pop()) - _s1 = Stack(items=[1, 2, 3], backend=Backend.CPP) - assert str(_s1) == "['1', '2', '3']" - assert len(_s1) == 3 - -def test_LinkedListStack(): - s = Stack(implementation='linked_list') - s.push(1) - s.push(2) - s.push(3) - assert s.peek.key == 3 - assert str(s) == ("['(1, None)', '(2, None)', '(3, None)']") - assert s.pop().key == 3 - assert s.pop().key == 2 - assert s.pop().key == 1 - assert s.is_empty is True - assert raises(IndexError, lambda : s.pop()) - assert str(s) == '[]' - _s = Stack(implementation='linked_list',items=[1, 2, 3]) - assert str(_s) == "['(1, None)', '(2, None)', '(3, None)']" - assert len(_s) == 3 - - s = Stack(implementation='linked_list',items=['a',None,type,{}]) - assert len(s) == 4 - assert s.size == 4 - - peek = s.peek - assert peek.key == s.pop().key - assert raises(TypeError, lambda: Stack(implementation='linked_list', items={0, 1})) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/__init__.py deleted file mode 100644 index 33930b426..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -__all__ = [] - -from . import ( - trie, - algorithms -) - -from .trie import ( - Trie -) - -__all__.extend(trie.__all__) - -from .algorithms import ( - find -) - -__all__.extend(algorithms.__all__) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/algorithms.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/algorithms.py deleted file mode 100644 index 1e26b9411..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/algorithms.py +++ /dev/null @@ -1,247 +0,0 @@ -from pydatastructs.linear_data_structures.arrays import ( - DynamicOneDimensionalArray, OneDimensionalArray) -from pydatastructs.utils.misc_util import ( - Backend, raise_if_backend_is_not_python) - -__all__ = [ - 'find' -] - -PRIME_NUMBER, MOD = 257, 1000000007 - -def find(text, query, algorithm, **kwargs): - """ - Finds occurrence of a query string within the text string. - - Parameters - ========== - - text: str - The string on which query is to be performed. - query: str - The string which is to be searched in the text. - algorithm: str - The algorithm which should be used for - searching. - Currently the following algorithms are - supported, - - 'kmp' -> Knuth-Morris-Pratt as given in [1]. - - 'rabin_karp' -> Rabin–Karp algorithm as given in [2]. - - 'boyer_moore' -> Boyer-Moore algorithm as given in [3]. - - 'z_function' -> Z-function algorithm as given in [4]. - - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - DynamicOneDimensionalArray - An array of starting positions of the portions - in the text which match with the given query. - - Examples - ======== - - >>> from pydatastructs.strings.algorithms import find - >>> text = "abcdefabcabe" - >>> pos = find(text, "ab", algorithm="kmp") - >>> str(pos) - "['0', '6', '9']" - >>> pos = find(text, "abc", algorithm="kmp") - >>> str(pos) - "['0', '6']" - >>> pos = find(text, "abe", algorithm="kmp") - >>> str(pos) - "['9']" - >>> pos = find(text, "abed", algorithm="kmp") - >>> str(pos) - '[]' - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Knuth%E2%80%93Morris%E2%80%93Pratt_algorithm - .. [2] https://en.wikipedia.org/wiki/Rabin%E2%80%93Karp_algorithm - .. [3] https://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_string-search_algorithm - .. [4] https://usaco.guide/CPH.pdf#page=257 - """ - raise_if_backend_is_not_python( - find, kwargs.get('backend', Backend.PYTHON)) - import pydatastructs.strings.algorithms as algorithms - func = "_" + algorithm - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently %s algoithm for searching strings " - "inside a text isn't implemented yet." - %(algorithm)) - return getattr(algorithms, func)(text, query) - - -def _knuth_morris_pratt(text, query): - if len(text) == 0 or len(query) == 0: - return DynamicOneDimensionalArray(int, 0) - kmp_table = _build_kmp_table(query) - return _do_match(text, query, kmp_table) - -_kmp = _knuth_morris_pratt - -def _build_kmp_table(query): - pos, cnd = 1, 0 - kmp_table = OneDimensionalArray(int, len(query) + 1) - - kmp_table[0] = -1 - - while pos < len(query): - if query[pos] == query[cnd]: - kmp_table[pos] = kmp_table[cnd] - else: - kmp_table[pos] = cnd - while cnd >= 0 and query[pos] != query[cnd]: - cnd = kmp_table[cnd] - pos, cnd = pos + 1, cnd + 1 - kmp_table[pos] = cnd - - return kmp_table - - - -def _do_match(string, query, kmp_table): - j, k = 0, 0 - positions = DynamicOneDimensionalArray(int, 0) - - while j < len(string): - if query[k] == string[j]: - j = j + 1 - k = k + 1 - if k == len(query): - positions.append(j - k) - k = kmp_table[k] - else: - k = kmp_table[k] - if k < 0: - j = j + 1 - k = k + 1 - - return positions - -def _p_pow(length, p=PRIME_NUMBER, m=MOD): - p_pow = OneDimensionalArray(int, length) - p_pow[0] = 1 - for i in range(1, length): - p_pow[i] = (p_pow[i-1] * p) % m - return p_pow - -def _hash_str(string, p=PRIME_NUMBER, m=MOD): - hash_value = 0 - p_pow = _p_pow(len(string), p, m) - for i in range(len(string)): - hash_value = (hash_value + ord(string[i]) * p_pow[i]) % m - return hash_value - -def _rabin_karp(text, query): - t = len(text) - q = len(query) - positions = DynamicOneDimensionalArray(int, 0) - if q == 0 or t == 0: - return positions - - query_hash = _hash_str(query) - text_hash = OneDimensionalArray(int, t + 1) - text_hash.fill(0) - p_pow = _p_pow(t) - - for i in range(t): - text_hash[i+1] = (text_hash[i] + ord(text[i]) * p_pow[i]) % MOD - for i in range(t - q + 1): - curr_hash = (text_hash[i + q] + MOD - text_hash[i]) % MOD - if curr_hash == (query_hash * p_pow[i]) % MOD: - positions.append(i) - - return positions - -def _boyer_moore(text, query): - positions = DynamicOneDimensionalArray(int, 0) - text_length, query_length = len(text), len(query) - - if text_length == 0 or query_length == 0: - return positions - - # Preprocessing Step - bad_match_table = dict() - for i in range(query_length): - bad_match_table[query[i]] = i - - shift = 0 - # Matching procedure - while shift <= text_length-query_length: - j = query_length - 1 - while j >= 0 and query[j] == text[shift + j]: - j -= 1 - if j < 0: - positions.append(shift) - if shift + query_length < text_length: - if text[shift + query_length] in bad_match_table: - shift += query_length - bad_match_table[text[shift + query_length]] - else: - shift += query_length + 1 - else: - shift += 1 - else: - letter_pos = text[shift + j] - if letter_pos in bad_match_table: - shift += max(1, j - bad_match_table[letter_pos]) - else: - shift += max(1, j + 1) - return positions - -def _z_vector(text, query): - string = text - if query != "": - string = query + str("$") + text - - z_fct = OneDimensionalArray(int, len(string)) - z_fct.fill(0) - - curr_pos = 1 - seg_left = 0 - seg_right = 0 - - for curr_pos in range(1,len(string)): - if curr_pos <= seg_right: - z_fct[curr_pos] = min(seg_right - curr_pos + 1, z_fct[curr_pos - seg_left]) - - while curr_pos + z_fct[curr_pos] < len(string) and \ - string[z_fct[curr_pos]] == string[curr_pos + z_fct[curr_pos]]: - z_fct[curr_pos] += 1 - - if curr_pos + z_fct[curr_pos] - 1 > seg_right: - seg_left = curr_pos - seg_right = curr_pos + z_fct[curr_pos] - 1 - - final_z_fct = DynamicOneDimensionalArray(int, 0) - start_index = 0 - if query != "": - start_index = len(query) + 1 - for pos in range(start_index, len(string)): - final_z_fct.append(z_fct[pos]) - - return final_z_fct - -def _z_function(text, query): - positions = DynamicOneDimensionalArray(int, 0) - if len(text) == 0 or len(query) == 0: - return positions - - fct = _z_vector(text, query) - for pos in range(len(fct)): - if fct[pos] == len(query): - positions.append(pos) - - return positions diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/tests/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/tests/test_algorithms.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/tests/test_algorithms.py deleted file mode 100644 index 37622cf80..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/tests/test_algorithms.py +++ /dev/null @@ -1,76 +0,0 @@ -from pydatastructs.strings import find - -import random, string - -def test_kmp(): - _test_common_string_matching('kmp') - -def test_rka(): - _test_common_string_matching('rabin_karp') - -def test_bm(): - _test_common_string_matching('boyer_moore') - -def test_zf(): - _test_common_string_matching('z_function') - -def _test_common_string_matching(algorithm): - true_text_pattern_dictionary = { - "Knuth-Morris-Pratt": "-Morris-", - "abcabcabcabdabcabdabcabca": "abcabdabcabca", - "aefcdfaecdaefaefcdaefeaefcdcdeae": "aefcdaefeaefcd", - "aaaaaaaa": "aaa", - "fullstringmatch": "fullstringmatch", - "z-function": "z-fun" - } - for test_case_key in true_text_pattern_dictionary: - text = test_case_key - query = true_text_pattern_dictionary[test_case_key] - positions = find(text, query, algorithm) - for i in range(positions._last_pos_filled): - p = positions[i] - assert text[p:p + len(query)] == query - - false_text_pattern_dictionary = { - "Knuth-Morris-Pratt": "-Pratt-", - "abcabcabcabdabcabdabcabca": "qwertyuiopzxcvbnm", - "aefcdfaecdaefaefcdaefeaefcdcdeae": "cdaefaefe", - "fullstringmatch": "fullstrinmatch", - "z-function": "function-", - "abc": "", - "": "abc" - } - - for test_case_key in false_text_pattern_dictionary: - text = test_case_key - query = false_text_pattern_dictionary[test_case_key] - positions = find(text, query, algorithm) - assert positions.size == 0 - - random.seed(1000) - - def gen_random_string(length): - ascii = string.ascii_uppercase - digits = string.digits - return ''.join(random.choices(ascii + digits, k=length)) - - for _ in range(100): - query = gen_random_string(random.randint(3, 10)) - num_times = random.randint(1, 10) - freq = 0 - text = "" - while freq < num_times: - rand_str = gen_random_string(random.randint(5, 10)) - if rand_str != query: - freq += 1 - text += query + rand_str + query - positions = find(text, query, algorithm) - assert positions._num == num_times * 2 - for i in range(positions._last_pos_filled): - p = positions[i] - assert text[p:p + len(query)] == query - - text = gen_random_string(len(query)) - if text != query: - positions = find(text, query, algorithm) - assert positions.size == 0 diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/tests/test_trie.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/tests/test_trie.py deleted file mode 100644 index 059104708..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/tests/test_trie.py +++ /dev/null @@ -1,49 +0,0 @@ -from pydatastructs import Trie - -def test_Trie(): - - strings = ["A", "to", "tea", "ted", "ten", "i", - "in", "inn", "Amfn", "snbr"] - trie = Trie() - for string in strings: - trie.insert(string) - - prefix_strings = ["te", "t", "Am", "snb"] - - for string in strings: - assert trie.is_inserted(string) - - for string in strings[::-1]: - assert trie.is_inserted(string) - - for string in prefix_strings: - assert trie.is_present(string) - assert not trie.is_inserted(string) - - assert sorted(trie.strings_with_prefix("t")) == ['tea', 'ted', 'ten', 'to'] - assert sorted(trie.strings_with_prefix("te")) == ["tea", "ted", "ten"] - assert trie.strings_with_prefix("i") == ["i", "in", "inn"] - assert trie.strings_with_prefix("a") == [] - - remove_order = ["to", "tea", "ted", "ten", "inn", "in", "A"] - - assert trie.delete("z") is None - - for string in remove_order: - trie.delete(string) - for present in strings: - if present == string: - assert not trie.is_inserted(present) - else: - assert trie.is_present(present) - assert trie.is_inserted(present) - strings.remove(string) - - prefix_strings_1 = ["dict", "dicts", "dicts_lists_tuples"] - trie_1 = Trie() - - for i in range(len(prefix_strings_1)): - trie_1.insert(prefix_strings_1[i]) - for j in range(i + 1): - assert trie_1.is_inserted(prefix_strings_1[j]) - assert trie_1.is_present(prefix_strings_1[j]) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/trie.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/trie.py deleted file mode 100644 index cdf6666cf..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/strings/trie.py +++ /dev/null @@ -1,201 +0,0 @@ -from pydatastructs.utils.misc_util import ( - TrieNode, Backend, - raise_if_backend_is_not_python) -from collections import deque -import copy - -__all__ = [ - 'Trie' -] - -Stack = Queue = deque - -class Trie(object): - """ - Represents the trie data structure for storing strings. - - Parameters - ========== - - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import Trie - >>> trie = Trie() - >>> trie.insert("a") - >>> trie.insert("aa") - >>> trie.strings_with_prefix("a") - ['a', 'aa'] - >>> trie.is_present("aa") - True - >>> trie.delete("aa") - True - >>> trie.is_present("aa") - False - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Trie - """ - - __slots__ = ['root'] - - @classmethod - def methods(cls): - return ['__new__', 'insert', 'is_present', 'delete', - 'strings_with_prefix'] - - def __new__(cls, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.root = TrieNode() - return obj - - def insert(self, string: str) -> None: - """ - Inserts the given string into the trie. - - Parameters - ========== - - string: str - - Returns - ======= - - None - """ - walk = self.root - for char in string: - if walk.get_child(char) is None: - newNode = TrieNode(char) - walk.add_child(newNode) - walk = newNode - else: - walk = walk.get_child(char) - walk.is_terminal = True - - def is_present(self, string: str) -> bool: - """ - Checks if the given string is present as a prefix in the trie. - - Parameters - ========== - - string: str - - Returns - ======= - - True if the given string is present as a prefix; - False in all other cases. - """ - walk = self.root - for char in string: - if walk.get_child(char) is None: - return False - walk = walk.get_child(char) - return True - - def is_inserted(self, string: str) -> bool: - """ - Checks if the given string was inserted in the trie. - - Parameters - ========== - - string: str - - Returns - ======= - - True if the given string was inserted in trie; - False in all other cases. - """ - walk = self.root - for char in string: - if walk.get_child(char) is None: - return False - walk = walk.get_child(char) - return walk.is_terminal - - def delete(self, string: str) -> bool: - """ - Deletes the given string from the trie. - - Parameters - ========== - - string: str - - Returns - ======= - - True if successfully deleted; - None if the string is not present in the trie. - """ - path = [] - walk = self.root - size = len(string) - for i in range(size): - char = string[i] - path.append(walk) - if walk.get_child(char) is None: - return None - walk = walk.get_child(char) - path.append(walk) - i = len(path) - 1 - path[i].is_terminal = False - while not path[i]._children and i >= 1: - path[i-1].remove_child(path[i].char) - i -= 1 - if path[i].is_terminal: - return True - return True - - def strings_with_prefix(self, string: str) -> list: - """ - Generates a list of all strings with the given prefix. - - Parameters - ========== - - string: str - - Returns - ======= - - strings: list - The list of strings with the given prefix. - """ - - def _collect(prefix: str, node: TrieNode, strings: list) -> str: - TrieNode_stack = Stack() - TrieNode_stack.append((node, prefix)) - while TrieNode_stack: - walk, curr_prefix = TrieNode_stack.pop() - if walk.is_terminal: - strings.append(curr_prefix + walk.char) - for child in walk._children: - TrieNode_stack.append((walk.get_child(child), curr_prefix + walk.char)) - - strings = [] - prefix = "" - walk = self.root - for char in string: - walk = walk.get_child(char) - if walk is None: - return strings - prefix += char - if walk.is_terminal: - strings.append(walk.char) - for child in walk._children: - _collect(prefix, walk.get_child(child), strings) - return strings diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/__init__.py deleted file mode 100644 index 892730122..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -__all__ = [] - -from . import ( - binary_trees, - m_ary_trees, - space_partitioning_trees, - heaps, -) - -from .binary_trees import ( - BinaryTree, - BinarySearchTree, - BinaryTreeTraversal, - AVLTree, - BinaryIndexedTree, - CartesianTree, - Treap, - SplayTree, - RedBlackTree -) -__all__.extend(binary_trees.__all__) - -from .m_ary_trees import ( - MAryTreeNode, MAryTree -) - -__all__.extend(m_ary_trees.__all__) - -from .space_partitioning_trees import ( - OneDimensionalSegmentTree -) -__all__.extend(space_partitioning_trees.__all__) - -from .heaps import ( - BinaryHeap, - TernaryHeap, - DHeap, - BinomialHeap -) -__all__.extend(heaps.__all__) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/_backend/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/_backend/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/binary_trees.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/binary_trees.py deleted file mode 100644 index 48446d1d4..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/binary_trees.py +++ /dev/null @@ -1,1888 +0,0 @@ -import random -from collections import deque as Queue -from pydatastructs.utils import TreeNode, CartesianTreeNode, RedBlackTreeNode -from pydatastructs.miscellaneous_data_structures import Stack -from pydatastructs.linear_data_structures import OneDimensionalArray -from pydatastructs.linear_data_structures.arrays import ArrayForTrees -from pydatastructs.utils.misc_util import Backend -from pydatastructs.trees._backend.cpp import _trees - -__all__ = [ - 'AVLTree', - 'BinaryTree', - 'BinarySearchTree', - 'BinaryTreeTraversal', - 'BinaryIndexedTree', - 'CartesianTree', - 'Treap', - 'SplayTree', - 'RedBlackTree' -] - -class BinaryTree(object): - """ - Abstract binary tree. - - Parameters - ========== - - key - Required if tree is to be instantiated with - root otherwise not needed. - root_data - Optional, the root node of the binary tree. - If not of type TreeNode, it will consider - root as data and a new root node will - be created. - comp: lambda/function - Optional, A lambda function which will be used - for comparison of keys. Should return a - bool value. By default it implements less - than operator. - is_order_statistic: bool - Set it to True, if you want to use the - order statistic features of the tree. - backend: pydatastructs.Backend - The backend to be used. Available backends: Python and C++ - Optional, by default, the Python backend is used. For faster execution, use the C++ backend. - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Binary_tree - """ - - __slots__ = ['root_idx', 'comparator', 'tree', 'size', - 'is_order_statistic'] - - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - if comp is None: - comp = lambda key1, key2: key1 < key2 - return _trees.BinaryTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp - obj = object.__new__(cls) - if key is None and root_data is not None: - raise ValueError('Key required.') - key = None if root_data is None else key - root = TreeNode(key, root_data) - root.is_root = True - obj.root_idx = 0 - obj.tree, obj.size = ArrayForTrees(TreeNode, [root]), 1 - obj.comparator = lambda key1, key2: key1 < key2 \ - if comp is None else comp - obj.is_order_statistic = is_order_statistic - return obj - - @classmethod - def methods(cls): - return ['__new__', '__str__'] - - def insert(self, key, data=None): - """ - Inserts data by the passed key using iterative - algorithm. - - Parameters - ========== - - key - The key for comparison. - - data - The data to be inserted. - - Returns - ======= - - None - """ - raise NotImplementedError("This is an abstract method.") - - def delete(self, key, **kwargs): - """ - Deletes the data with the passed key - using iterative algorithm. - - Parameters - ========== - - key - The key of the node which is - to be deleted. - balancing_info: bool - Optional, by default, False - The information needed for updating - the tree is returned if this parameter - is set to True. It is not meant for - user facing APIs. - - Returns - ======= - - True - If the node is deleted successfully. - None - If the node to be deleted doesn't exists. - - Note - ==== - - The node is deleted means that the connection to that - node are removed but the it is still in three. This - is being done to keep the complexity of deletion, O(logn). - """ - raise NotImplementedError("This is an abstract method.") - - def search(self, key, **kwargs): - """ - Searches for the data in the binary search tree - using iterative algorithm. - - Parameters - ========== - - key - The key for searching. - parent: bool - If true then returns index of the - parent of the node with the passed - key. - By default, False - - Returns - ======= - - int - If the node with the passed key is - in the tree. - tuple - The index of the searched node and - the index of the parent of that node. - None - In all other cases. - """ - raise NotImplementedError("This is an abstract method.") - - - def __str__(self): - to_be_printed = ['' for i in range(self.tree._last_pos_filled + 1)] - for i in range(self.tree._last_pos_filled + 1): - if self.tree[i] is not None: - node = self.tree[i] - to_be_printed[i] = (node.left, node.key, node.data, node.right) - return str(to_be_printed) - -class BinarySearchTree(BinaryTree): - """ - Represents binary search trees. - - Examples - ======== - - >>> from pydatastructs.trees import BinarySearchTree as BST - >>> b = BST() - >>> b.insert(1, 1) - >>> b.insert(2, 2) - >>> child = b.tree[b.root_idx].right - >>> b.tree[child].data - 2 - >>> b.search(1) - 0 - >>> b.search(-1) is None - True - >>> b.delete(1) is True - True - >>> b.search(1) is None - True - >>> b.delete(2) is True - True - >>> b.search(2) is None - True - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Binary_search_tree - - See Also - ======== - - pydatastructs.trees.binary_tree.BinaryTree - """ - - @classmethod - def methods(cls): - return ['insert', 'search', 'delete', 'select', - 'rank', 'lowest_common_ancestor'] - - left_size = lambda self, node: self.tree[node.left].size \ - if node.left is not None else 0 - right_size = lambda self, node: self.tree[node.right].size \ - if node.right is not None else 0 - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - if comp is None: - comp = lambda key1, key2: key1 < key2 - return _trees.BinarySearchTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp - return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) - - def _update_size(self, start_idx): - if self.is_order_statistic: - walk = start_idx - while walk is not None: - self.tree[walk].size = ( - self.left_size(self.tree[walk]) + - self.right_size(self.tree[walk]) + 1) - walk = self.tree[walk].parent - - def insert(self, key, data=None): - res = self.search(key) - if res is not None: - self.tree[res].data = data - return None - walk = self.root_idx - if self.tree[walk].key is None: - self.tree[walk].key = key - self.tree[walk].data = data - return None - new_node, prev_node, flag = TreeNode(key, data), self.root_idx, True - while flag: - if not self.comparator(key, self.tree[walk].key): - if self.tree[walk].right is None: - new_node.parent = prev_node - self.tree.append(new_node) - self.tree[walk].right = self.size - self.size += 1 - flag = False - prev_node = walk = self.tree[walk].right - else: - if self.tree[walk].left is None: - new_node.parent = prev_node - self.tree.append(new_node) - self.tree[walk].left = self.size - self.size += 1 - flag = False - prev_node = walk = self.tree[walk].left - self._update_size(walk) - - def search(self, key, **kwargs): - ret_parent = kwargs.get('parent', False) - parent = None - walk = self.root_idx - if self.tree[walk].key is None: - return None - while walk is not None: - if self.tree[walk].key == key: - break - parent = walk - if self.comparator(key, self.tree[walk].key): - walk = self.tree[walk].left - else: - walk = self.tree[walk].right - return (walk, parent) if ret_parent else walk - - def _bound_helper(self, node_idx, bound_key, is_upper=False): - if node_idx is None: - return None - if self.tree[node_idx].key is None: - return None - - if self.tree[node_idx].key == bound_key: - if not is_upper: - return self.tree[node_idx].key - else: - return self._bound_helper(self.tree[node_idx].right, - bound_key, is_upper) - - if self.comparator(self.tree[node_idx].key, bound_key): - return self._bound_helper(self.tree[node_idx].right, - bound_key, is_upper) - else: - res_bound = self._bound_helper(self.tree[node_idx].left, - bound_key, is_upper) - return res_bound if res_bound is not None else self.tree[node_idx].key - - - def lower_bound(self, key, **kwargs): - """ - Finds the lower bound of the given key in the tree - - Parameters - ========== - - key - The key for comparison - - Examples - ======== - - >>> from pydatastructs.trees import BinarySearchTree as BST - >>> b = BST() - >>> b.insert(10, 10) - >>> b.insert(18, 18) - >>> b.insert(7, 7) - >>> b.lower_bound(9) - 10 - >>> b.lower_bound(7) - 7 - >>> b.lower_bound(20) is None - True - - Returns - ======= - - value - The lower bound of the given key. - Returns None if the value doesn't exist - """ - return self._bound_helper(self.root_idx, key) - - - def upper_bound(self, key, **kwargs): - """ - Finds the upper bound of the given key in the tree - - Parameters - ========== - - key - The key for comparison - - Examples - ======== - - >>> from pydatastructs.trees import BinarySearchTree as BST - >>> b = BST() - >>> b.insert(10, 10) - >>> b.insert(18, 18) - >>> b.insert(7, 7) - >>> b.upper_bound(9) - 10 - >>> b.upper_bound(7) - 10 - >>> b.upper_bound(20) is None - True - - Returns - ======= - - value - The upper bound of the given key. - Returns None if the value doesn't exist - """ - return self._bound_helper(self.root_idx, key, True) - - - def delete(self, key, **kwargs): - (walk, parent) = self.search(key, parent=True) - a = None - if walk is None: - return None - if self.tree[walk].left is None and \ - self.tree[walk].right is None: - if parent is None: - self.tree[self.root_idx].data = None - self.tree[self.root_idx].key = None - else: - if self.tree[parent].left == walk: - self.tree[parent].left = None - else: - self.tree[parent].right = None - a = parent - par_key, root_key = (self.tree[parent].key, - self.tree[self.root_idx].key) - new_indices = self.tree.delete(walk) - if new_indices is not None: - a = new_indices[par_key] - self.root_idx = new_indices[root_key] - self._update_size(a) - - elif self.tree[walk].left is not None and \ - self.tree[walk].right is not None: - twalk = self.tree[walk].right - par = walk - flag = False - while self.tree[twalk].left is not None: - flag = True - par = twalk - twalk = self.tree[twalk].left - self.tree[walk].data = self.tree[twalk].data - self.tree[walk].key = self.tree[twalk].key - if flag: - self.tree[par].left = self.tree[twalk].right - else: - self.tree[par].right = self.tree[twalk].right - if self.tree[twalk].right is not None: - self.tree[self.tree[twalk].right].parent = par - if twalk is not None: - a = par - par_key, root_key = (self.tree[par].key, - self.tree[self.root_idx].key) - new_indices = self.tree.delete(twalk) - if new_indices is not None: - a = new_indices[par_key] - self.root_idx = new_indices[root_key] - self._update_size(a) - - else: - if self.tree[walk].left is not None: - child = self.tree[walk].left - else: - child = self.tree[walk].right - if parent is None: - self.tree[self.root_idx].left = self.tree[child].left - self.tree[self.root_idx].right = self.tree[child].right - self.tree[self.root_idx].data = self.tree[child].data - self.tree[self.root_idx].key = self.tree[child].key - self.tree[self.root_idx].parent = None - root_key = self.tree[self.root_idx].key - new_indices = self.tree.delete(child) - if new_indices is not None: - self.root_idx = new_indices[root_key] - else: - if self.tree[parent].left == walk: - self.tree[parent].left = child - else: - self.tree[parent].right = child - self.tree[child].parent = parent - a = parent - par_key, root_key = (self.tree[parent].key, - self.tree[self.root_idx].key) - new_indices = self.tree.delete(walk) - if new_indices is not None: - parent = new_indices[par_key] - self.tree[child].parent = new_indices[par_key] - a = new_indices[par_key] - self.root_idx = new_indices[root_key] - self._update_size(a) - - if kwargs.get("balancing_info", False) is not False: - return a - return True - - def select(self, i): - """ - Finds the i-th smallest node in the tree. - - Parameters - ========== - - i: int - A positive integer - - Returns - ======= - - n: TreeNode - The node with the i-th smallest key - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Order_statistic_tree - """ - i -= 1 # The algorithm is based on zero indexing - if i < 0: - raise ValueError("Expected a positive integer, got %d"%(i + 1)) - if i >= self.tree._num: - raise ValueError("%d is greater than the size of the " - "tree which is, %d"%(i + 1, self.tree._num)) - walk = self.root_idx - while walk is not None: - l = self.left_size(self.tree[walk]) - if i == l: - return self.tree[walk] - left_walk = self.tree[walk].left - right_walk = self.tree[walk].right - if left_walk is None and right_walk is None: - raise IndexError("The traversal is terminated " - "due to no child nodes ahead.") - if i < l: - if left_walk is not None and \ - self.comparator(self.tree[left_walk].key, - self.tree[walk].key): - walk = left_walk - else: - walk = right_walk - else: - if right_walk is not None and \ - not self.comparator(self.tree[right_walk].key, - self.tree[walk].key): - walk = right_walk - else: - walk = left_walk - i -= (l + 1) - - def rank(self, x): - """ - Finds the rank of the given node, i.e. - its index in the sorted list of nodes - of the tree. - - Parameters - ========== - - x: key - The key of the node whose rank is to be found out. - """ - walk = self.search(x) - if walk is None: - return None - r = self.left_size(self.tree[walk]) + 1 - while self.tree[walk].key != self.tree[self.root_idx].key: - p = self.tree[walk].parent - if walk == self.tree[p].right: - r += self.left_size(self.tree[p]) + 1 - walk = p - return r - - def _simple_path(self, key, root): - """ - Utility funtion to find the simple path between root and node. - - Parameters - ========== - - key: Node.key - Key of the node to be searched - - Returns - ======= - - path: list - """ - - stack = Stack() - stack.push(root) - path = [] - node_idx = -1 - - while not stack.is_empty: - node = stack.pop() - if self.tree[node].key == key: - node_idx = node - break - if self.tree[node].left: - stack.push(self.tree[node].left) - if self.tree[node].right: - stack.push(self.tree[node].right) - - if node_idx == -1: - return path - - while node_idx != 0: - path.append(node_idx) - node_idx = self.tree[node_idx].parent - path.append(0) - path.reverse() - - return path - - def _lca_1(self, j, k): - root = self.root_idx - path1 = self._simple_path(j, root) - path2 = self._simple_path(k, root) - if not path1 or not path2: - raise ValueError("One of two path doesn't exists. See %s, %s" - %(path1, path2)) - - n, m = len(path1), len(path2) - i = j = 0 - while i < n and j < m: - if path1[i] != path2[j]: - return self.tree[path1[i - 1]].key - i += 1 - j += 1 - if path1 < path2: - return self.tree[path1[-1]].key - return self.tree[path2[-1]].key - - def _lca_2(self, j, k): - curr_root = self.root_idx - u, v = self.search(j), self.search(k) - if (u is None) or (v is None): - raise ValueError("One of the nodes with key %s " - "or %s doesn't exits"%(j, k)) - u_left = self.comparator(self.tree[u].key, \ - self.tree[curr_root].key) - v_left = self.comparator(self.tree[v].key, \ - self.tree[curr_root].key) - - while not (u_left ^ v_left): - if u_left and v_left: - curr_root = self.tree[curr_root].left - else: - curr_root = self.tree[curr_root].right - - if curr_root == u or curr_root == v: - if curr_root is None: - return None - return self.tree[curr_root].key - u_left = self.comparator(self.tree[u].key, \ - self.tree[curr_root].key) - v_left = self.comparator(self.tree[v].key, \ - self.tree[curr_root].key) - - if curr_root is None: - return curr_root - return self.tree[curr_root].key - - def lowest_common_ancestor(self, j, k, algorithm=1): - - """ - Computes the lowest common ancestor of two nodes. - - Parameters - ========== - - j: Node.key - Key of first node - - k: Node.key - Key of second node - - algorithm: int - The algorithm to be used for computing the - lowest common ancestor. - Optional, by default uses algorithm 1. - - 1 -> Determines the lowest common ancestor by finding - the first intersection of the paths from v and w - to the root. - - 2 -> Modifed version of the algorithm given in the - following publication, - D. Harel. A linear time algorithm for the - lowest common ancestors problem. In 21s - Annual Symposium On Foundations of - Computer Science, pages 308-319, 1980. - - Returns - ======= - - Node.key - The key of the lowest common ancestor in the tree. - if both the nodes are present in the tree. - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Lowest_common_ancestor - - .. [2] https://pdfs.semanticscholar.org/e75b/386cc554214aa0ebd6bd6dbdd0e490da3739.pdf - - """ - return getattr(self, "_lca_"+str(algorithm))(j, k) - -class SelfBalancingBinaryTree(BinarySearchTree): - """ - Represents Base class for all rotation based balancing trees like AVL tree, Red Black tree, Splay Tree. - """ - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - if comp is None: - comp = lambda key1, key2: key1 < key2 - return _trees.SelfBalancingBinaryTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp - return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) - - def _right_rotate(self, j, k): - y = self.tree[k].right - if y is not None: - self.tree[y].parent = j - self.tree[j].left = y - self.tree[k].parent = self.tree[j].parent - if self.tree[k].parent is not None: - if self.tree[self.tree[k].parent].left == j: - self.tree[self.tree[k].parent].left = k - else: - self.tree[self.tree[k].parent].right = k - self.tree[j].parent = k - self.tree[k].right = j - kp = self.tree[k].parent - if kp is None: - self.root_idx = k - - def _left_right_rotate(self, j, k): - i = self.tree[k].right - v, w = self.tree[i].left, self.tree[i].right - self.tree[k].right, self.tree[j].left = v, w - if v is not None: - self.tree[v].parent = k - if w is not None: - self.tree[w].parent = j - self.tree[i].left, self.tree[i].right, self.tree[i].parent = \ - k, j, self.tree[j].parent - self.tree[k].parent, self.tree[j].parent = i, i - ip = self.tree[i].parent - if ip is not None: - if self.tree[ip].left == j: - self.tree[ip].left = i - else: - self.tree[ip].right = i - else: - self.root_idx = i - - def _right_left_rotate(self, j, k): - i = self.tree[k].left - v, w = self.tree[i].left, self.tree[i].right - self.tree[k].left, self.tree[j].right = w, v - if v is not None: - self.tree[v].parent = j - if w is not None: - self.tree[w].parent = k - self.tree[i].right, self.tree[i].left, self.tree[i].parent = \ - k, j, self.tree[j].parent - self.tree[k].parent, self.tree[j].parent = i, i - ip = self.tree[i].parent - if ip is not None: - if self.tree[ip].left == j: - self.tree[ip].left = i - else: - self.tree[ip].right = i - else: - self.root_idx = i - - def _left_rotate(self, j, k): - y = self.tree[k].left - if y is not None: - self.tree[y].parent = j - self.tree[j].right = y - self.tree[k].parent = self.tree[j].parent - if self.tree[k].parent is not None: - if self.tree[self.tree[k].parent].left == j: - self.tree[self.tree[k].parent].left = k - else: - self.tree[self.tree[k].parent].right = k - self.tree[j].parent = k - self.tree[k].left = j - kp = self.tree[k].parent - if kp is None: - self.root_idx = k - -class CartesianTree(SelfBalancingBinaryTree): - """ - Represents cartesian trees. - - Examples - ======== - - >>> from pydatastructs.trees import CartesianTree as CT - >>> c = CT() - >>> c.insert(1, 4, 1) - >>> c.insert(2, 3, 2) - >>> child = c.tree[c.root_idx].left - >>> c.tree[child].data - 1 - >>> c.search(1) - 0 - >>> c.search(-1) is None - True - >>> c.delete(1) is True - True - >>> c.search(1) is None - True - >>> c.delete(2) is True - True - >>> c.search(2) is None - True - - References - ========== - - .. [1] https://www.cs.princeton.edu/courses/archive/spr09/cos423/Lectures/geo-st.pdf - - See Also - ======== - - pydatastructs.trees.binary_trees.SelfBalancingBinaryTree - """ - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - if comp is None: - comp = lambda key1, key2: key1 < key2 - return _trees.CartesianTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp - return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) - - @classmethod - def methods(cls): - return ['__new__', '__str__', 'insert', 'delete'] - - def _bubble_up(self, node_idx): - node = self.tree[node_idx] - parent_idx = self.tree[node_idx].parent - parent = self.tree[parent_idx] - while (node.parent is not None) and (parent.priority > node.priority): - if parent.right == node_idx: - self._left_rotate(parent_idx, node_idx) - else: - self._right_rotate(parent_idx, node_idx) - node = self.tree[node_idx] - parent_idx = self.tree[node_idx].parent - if parent_idx is not None: - parent = self.tree[parent_idx] - if node.parent is None: - self.tree[node_idx].is_root = True - - def _trickle_down(self, node_idx): - node = self.tree[node_idx] - while node.left is not None or node.right is not None: - if node.left is None: - self._left_rotate(node_idx, self.tree[node_idx].right) - elif node.right is None: - self._right_rotate(node_idx, self.tree[node_idx].left) - elif self.tree[node.left].priority < self.tree[node.right].priority: - self._right_rotate(node_idx, self.tree[node_idx].left) - else: - self._left_rotate(node_idx, self.tree[node_idx].right) - node = self.tree[node_idx] - - def insert(self, key, priority, data=None): - super(CartesianTree, self).insert(key, data) - node_idx = super(CartesianTree, self).search(key) - node = self.tree[node_idx] - new_node = CartesianTreeNode(key, priority, data) - new_node.parent = node.parent - new_node.left = node.left - new_node.right = node.right - self.tree[node_idx] = new_node - if node.is_root: - self.tree[node_idx].is_root = True - else: - self._bubble_up(node_idx) - - def delete(self, key, **kwargs): - balancing_info = kwargs.get('balancing_info', False) - node_idx = super(CartesianTree, self).search(key) - if node_idx is not None: - self._trickle_down(node_idx) - return super(CartesianTree, self).delete(key, balancing_info = balancing_info) - - def __str__(self): - to_be_printed = ['' for i in range(self.tree._last_pos_filled + 1)] - for i in range(self.tree._last_pos_filled + 1): - if self.tree[i] is not None: - node = self.tree[i] - to_be_printed[i] = (node.left, node.key, node.priority, node.data, node.right) - return str(to_be_printed) - -class Treap(CartesianTree): - """ - Represents treaps. - - Examples - ======== - - >>> from pydatastructs.trees import Treap as T - >>> t = T() - >>> t.insert(1, 1) - >>> t.insert(2, 2) - >>> t.search(1) - 0 - >>> t.search(-1) is None - True - >>> t.delete(1) is True - True - >>> t.search(1) is None - True - >>> t.delete(2) is True - True - >>> t.search(2) is None - True - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Treap - - """ - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - if comp is None: - comp = lambda key1, key2: key1 < key2 - return _trees.Treap(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp - return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) - - @classmethod - def methods(cls): - return ['__new__', 'insert'] - - def insert(self, key, data=None): - priority = random.random() - super(Treap, self).insert(key, priority, data) - -class AVLTree(SelfBalancingBinaryTree): - """ - Represents AVL trees. - - References - ========== - - .. [1] https://courses.cs.washington.edu/courses/cse373/06sp/handouts/lecture12.pdf - .. [2] https://en.wikipedia.org/wiki/AVL_tree - .. [3] http://faculty.cs.niu.edu/~freedman/340/340notes/340avl2.htm - - See Also - ======== - - pydatastructs.trees.binary_trees.BinaryTree - """ - - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - if comp is None: - comp = lambda key1, key2: key1 < key2 - return _trees.AVLTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp - return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) - - @classmethod - def methods(cls): - return ['__new__', 'set_tree', 'insert', 'delete'] - - left_height = lambda self, node: self.tree[node.left].height \ - if node.left is not None else -1 - right_height = lambda self, node: self.tree[node.right].height \ - if node.right is not None else -1 - balance_factor = lambda self, node: self.right_height(node) - \ - self.left_height(node) - - def set_tree(self, arr): - self.tree = arr - - def _right_rotate(self, j, k): - super(AVLTree, self)._right_rotate(j, k) - self.tree[j].height = max(self.left_height(self.tree[j]), - self.right_height(self.tree[j])) + 1 - if self.is_order_statistic: - self.tree[j].size = (self.left_size(self.tree[j]) + - self.right_size(self.tree[j]) + 1) - - def _left_right_rotate(self, j, k): - super(AVLTree, self)._left_right_rotate(j, k) - self.tree[j].height = max(self.left_height(self.tree[j]), - self.right_height(self.tree[j])) + 1 - self.tree[k].height = max(self.left_height(self.tree[k]), - self.right_height(self.tree[k])) + 1 - if self.is_order_statistic: - self.tree[j].size = (self.left_size(self.tree[j]) + - self.right_size(self.tree[j]) + 1) - self.tree[k].size = (self.left_size(self.tree[k]) + - self.right_size(self.tree[k]) + 1) - - def _right_left_rotate(self, j, k): - super(AVLTree, self)._right_left_rotate(j, k) - self.tree[j].height = max(self.left_height(self.tree[j]), - self.right_height(self.tree[j])) + 1 - self.tree[k].height = max(self.left_height(self.tree[k]), - self.right_height(self.tree[k])) + 1 - if self.is_order_statistic: - self.tree[j].size = (self.left_size(self.tree[j]) + - self.right_size(self.tree[j]) + 1) - self.tree[k].size = (self.left_size(self.tree[k]) + - self.right_size(self.tree[k]) + 1) - - def _left_rotate(self, j, k): - super(AVLTree, self)._left_rotate(j, k) - self.tree[j].height = max(self.left_height(self.tree[j]), - self.right_height(self.tree[j])) + 1 - self.tree[k].height = max(self.left_height(self.tree[k]), - self.right_height(self.tree[k])) + 1 - if self.is_order_statistic: - self.tree[j].size = (self.left_size(self.tree[j]) + - self.right_size(self.tree[j]) + 1) - - def _balance_insertion(self, curr, last): - walk = last - path = Queue() - path.append(curr), path.append(last) - while walk is not None: - self.tree[walk].height = max(self.left_height(self.tree[walk]), - self.right_height(self.tree[walk])) + 1 - if self.is_order_statistic: - self.tree[walk].size = (self.left_size(self.tree[walk]) + - self.right_size(self.tree[walk]) + 1) - last = path.popleft() - last2last = path.popleft() - if self.balance_factor(self.tree[walk]) not in (1, 0, -1): - l = self.tree[walk].left - if l is not None and l == last and self.tree[l].left == last2last: - self._right_rotate(walk, last) - r = self.tree[walk].right - if r is not None and r == last and self.tree[r].right == last2last: - self._left_rotate(walk, last) - if l is not None and l == last and self.tree[l].right == last2last: - self._left_right_rotate(walk, last) - if r is not None and r == last and self.tree[r].left == last2last: - self._right_left_rotate(walk, last) - path.append(walk), path.append(last) - walk = self.tree[walk].parent - - def insert(self, key, data=None): - super(AVLTree, self).insert(key, data) - self._balance_insertion(self.size - 1, self.tree[self.size-1].parent) - - def _balance_deletion(self, start_idx, key): - walk = start_idx - while walk is not None: - self.tree[walk].height = max(self.left_height(self.tree[walk]), - self.right_height(self.tree[walk])) + 1 - if self.is_order_statistic: - self.tree[walk].size = (self.left_size(self.tree[walk]) + - self.right_size(self.tree[walk]) + 1) - if self.balance_factor(self.tree[walk]) not in (1, 0, -1): - if self.balance_factor(self.tree[walk]) < 0: - b = self.tree[walk].left - if self.balance_factor(self.tree[b]) <= 0: - self._right_rotate(walk, b) - else: - self._left_right_rotate(walk, b) - else: - b = self.tree[walk].right - if self.balance_factor(self.tree[b]) >= 0: - self._left_rotate(walk, b) - else: - self._right_left_rotate(walk, b) - walk = self.tree[walk].parent - - - def delete(self, key, **kwargs): - a = super(AVLTree, self).delete(key, balancing_info=True) - self._balance_deletion(a, key) - return True - -class SplayTree(SelfBalancingBinaryTree): - """ - Represents Splay Trees. - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Splay_tree - - """ - - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - if comp is None: - comp = lambda key1, key2: key1 < key2 - return _trees.SplayTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp - return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) - - @classmethod - def methods(cls): - return ['__new__', 'insert', 'delete', 'join', 'split'] - - def _zig(self, x, p): - if self.tree[p].left == x: - super(SplayTree, self)._right_rotate(p, x) - else: - super(SplayTree, self)._left_rotate(p, x) - - def _zig_zig(self, x, p): - super(SplayTree, self)._right_rotate(self.tree[p].parent, p) - super(SplayTree, self)._right_rotate(p, x) - - def _zig_zag(self, p): - super(SplayTree, self)._left_right_rotate(self.tree[p].parent, p) - - def _zag_zag(self, x, p): - super(SplayTree, self)._left_rotate(self.tree[p].parent, p) - super(SplayTree, self)._left_rotate(p, x) - - def _zag_zig(self, p): - super(SplayTree, self)._right_left_rotate(self.tree[p].parent, p) - - def splay(self, x, p): - while self.tree[x].parent is not None: - if self.tree[p].parent is None: - self._zig(x, p) - elif self.tree[p].left == x and \ - self.tree[self.tree[p].parent].left == p: - self._zig_zig(x, p) - elif self.tree[p].right == x and \ - self.tree[self.tree[p].parent].right == p: - self._zag_zag(x, p) - elif self.tree[p].left == x and \ - self.tree[self.tree[p].parent].right == p: - self._zag_zig(p) - else: - self._zig_zag(p) - p = self.tree[x].parent - - def insert(self, key, x): - super(SelfBalancingBinaryTree, self).insert(key, x) - e, p = super(SelfBalancingBinaryTree, self).search(key, parent=True) - self.tree[self.size-1].parent = p - self.splay(e, p) - - def delete(self, x): - e, p = super(SelfBalancingBinaryTree, self).search(x, parent=True) - if e is None: - return - self.splay(e, p) - status = super(SelfBalancingBinaryTree, self).delete(x) - return status - - def join(self, other): - """ - Joins two trees current and other such that all elements of - the current splay tree are smaller than the elements of the other tree. - - Parameters - ========== - - other: SplayTree - SplayTree which needs to be joined with the self tree. - - """ - maxm = self.root_idx - while self.tree[maxm].right is not None: - maxm = self.tree[maxm].right - minm = other.root_idx - while other.tree[minm].left is not None: - minm = other.tree[minm].left - if not self.comparator(self.tree[maxm].key, - other.tree[minm].key): - raise ValueError("Elements of %s aren't less " - "than that of %s"%(self, other)) - self.splay(maxm, self.tree[maxm].parent) - idx_update = self.tree._size - for node in other.tree: - if node is not None: - node_copy = TreeNode(node.key, node.data) - if node.left is not None: - node_copy.left = node.left + idx_update - if node.right is not None: - node_copy.right = node.right + idx_update - self.tree.append(node_copy) - else: - self.tree.append(node) - self.tree[self.root_idx].right = \ - other.root_idx + idx_update - - def split(self, x): - """ - Splits current splay tree into two trees such that one tree contains nodes - with key less than or equal to x and the other tree containing - nodes with key greater than x. - - Parameters - ========== - - x: key - Key of the element on the basis of which split is performed. - - Returns - ======= - - other: SplayTree - SplayTree containing elements with key greater than x. - - """ - e, p = super(SelfBalancingBinaryTree, self).search(x, parent=True) - if e is None: - return - self.splay(e, p) - other = SplayTree(None, None) - if self.tree[self.root_idx].right is not None: - traverse = BinaryTreeTraversal(self) - elements = traverse.depth_first_search(order='pre_order', node=self.tree[self.root_idx].right) - for i in range(len(elements)): - super(SelfBalancingBinaryTree, other).insert(elements[i].key, elements[i].data) - for j in range(len(elements) - 1, -1, -1): - e, p = super(SelfBalancingBinaryTree, self).search(elements[j].key, parent=True) - self.tree[e] = None - self.tree[self.root_idx].right = None - return other - -class RedBlackTree(SelfBalancingBinaryTree): - """ - Represents Red Black trees. - - Examples - ======== - - >>> from pydatastructs.trees import RedBlackTree as RB - >>> b = RB() - >>> b.insert(1, 1) - >>> b.insert(2, 2) - >>> child = b.tree[b.root_idx].right - >>> b.tree[child].data - 2 - >>> b.search(1) - 0 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Red%E2%80%93black_tree - - See Also - ======== - - pydatastructs.trees.binary_trees.SelfBalancingBinaryTree - """ - - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - if comp is None: - comp = lambda key1, key2: key1 < key2 - return _trees.RedBlackTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp - return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) - - @classmethod - def methods(cls): - return ['__new__', 'insert', 'delete'] - - def _get_parent(self, node_idx): - return self.tree[node_idx].parent - - def _get_grand_parent(self, node_idx): - parent_idx=self._get_parent(node_idx) - return self.tree[parent_idx].parent - - def _get_sibling(self, node_idx): - parent_idx=self._get_parent(node_idx) - if parent_idx is None: - return None - node = self.tree[parent_idx] - if node_idx==node.left: - sibling_idx=node.right - return sibling_idx - else: - sibling_idx=node.left - return sibling_idx - - def _get_uncle(self, node_idx): - parent_idx=self._get_parent(node_idx) - return self._get_sibling(parent_idx) - - def _is_onleft(self, node_idx): - parent = self._get_parent(node_idx) - if self.tree[parent].left == node_idx: - return True - return False - - def _is_onright(self, node_idx): - if self._is_onleft(node_idx) is False: - return True - return False - - def __fix_insert(self, node_idx): - while self._get_parent(node_idx) is not None and \ - self.tree[self._get_parent(node_idx)].color == 1 and self.tree[node_idx].color==1: - parent_idx=self._get_parent(node_idx) - grand_parent_idx=self._get_grand_parent(node_idx) - uncle_idx = self._get_uncle(node_idx) - if uncle_idx is not None and self.tree[uncle_idx].color == 1: - self.tree[uncle_idx].color = 0 - self.tree[parent_idx].color = 0 - self.tree[grand_parent_idx].color = 1 - node_idx= grand_parent_idx - else: - self.tree[self.root_idx].is_root=False - if self._is_onright(parent_idx): - if self._is_onleft(node_idx): - self._right_rotate(parent_idx, node_idx) - node_idx=parent_idx - parent_idx=self._get_parent(node_idx) - node_idx=parent_idx - parent_idx=self._get_parent(node_idx) - self._left_rotate(parent_idx, node_idx) - elif self._is_onleft(parent_idx): - if self._is_onright(node_idx): - self._left_rotate(parent_idx, node_idx) - node_idx=parent_idx - parent_idx=self._get_parent(node_idx) - node_idx=parent_idx - parent_idx=self._get_parent(node_idx) - self._right_rotate(parent_idx, node_idx) - self.tree[node_idx].color = 0 - self.tree[parent_idx].color = 1 - self.tree[self.root_idx].is_root=True - if self.tree[node_idx].is_root: - break - self.tree[self.root_idx].color=0 - - def insert(self, key, data=None): - super(RedBlackTree, self).insert(key, data) - node_idx = super(RedBlackTree, self).search(key) - node = self.tree[node_idx] - new_node = RedBlackTreeNode(key, data) - new_node.parent = node.parent - new_node.left = node.left - new_node.right = node.right - self.tree[node_idx] = new_node - if node.is_root: - self.tree[node_idx].is_root = True - self.tree[node_idx].color=0 - elif self.tree[self.tree[node_idx].parent].color==1: - self.__fix_insert(node_idx) - - def _find_predecessor(self, node_idx): - while self.tree[node_idx].right is not None: - node_idx = self.tree[node_idx].right - return node_idx - - def _transplant_values(self, node_idx1, node_idx2): - parent = self.tree[node_idx1].parent - if self.tree[node_idx1].is_root and self._has_one_child(node_idx1): - self.tree[self.root_idx].key = self.tree[node_idx2].key - self.tree[self.root_idx].data = self.tree[node_idx2].data - self.tree[self.root_idx].left = self.tree[node_idx2].left - self.tree[self.root_idx].right = self.tree[node_idx2].right - self.tree[node_idx1].parent = None - return self.tree[self.root_idx].key - else: - self.tree[node_idx1].key = self.tree[node_idx2].key - self.tree[node_idx1].data = self.tree[node_idx2].data - - def _has_one_child(self, node_idx): - if self._is_leaf(node_idx) is False and self._has_two_child(node_idx) is False: - return True - return False - - def _is_leaf(self, node_idx): - if self.tree[node_idx].left is None and self.tree[node_idx].right is None: - return True - return False - - def _has_two_child(self, node_idx): - if self.tree[node_idx].left is not None and self.tree[node_idx].right is not None: - return True - return False - - def __has_red_child(self, node_idx): - left_idx = self.tree[node_idx].left - right_idx = self.tree[node_idx].right - if (left_idx is not None and self.tree[left_idx].color == 1) or \ - (right_idx is not None and self.tree[right_idx].color == 1): - return True - return False - - def _replace_node(self, node_idx): - if self._is_leaf(node_idx): - return None - elif self._has_one_child(node_idx): - if self.tree[node_idx].left is not None: - child = self.tree[node_idx].left - else: - child = self.tree[node_idx].right - return child - else: - return self._find_predecessor(self.tree[node_idx].left) - - def __walk1_walk_isblack(self, color, node_idx1): - if (node_idx1 is None or self.tree[node_idx1].color == 0) and (color == 0): - return True - return False - - def __left_left_siblingcase(self, node_idx): - left_idx = self.tree[node_idx].left - parent = self._get_parent(node_idx) - parent_color = self.tree[parent].color - self.tree[left_idx].color = self.tree[node_idx].color - self.tree[node_idx].color = parent_color - self._right_rotate(parent, node_idx) - - def __right_left_siblingcase(self, node_idx): - left_idx = self.tree[node_idx].left - parent = self._get_parent(node_idx) - parent_color = self.tree[parent].color - self.tree[left_idx].color = parent_color - self._right_rotate(node_idx, left_idx) - child = self._get_parent(node_idx) - self._left_rotate(parent, child) - - def __left_right_siblingcase(self, node_idx): - right_idx = self.tree[node_idx].right - parent = self._get_parent(node_idx) - parent_color = self.tree[parent].color - self.tree[right_idx].color = parent_color - self._left_rotate(node_idx, right_idx) - child = self._get_parent(node_idx) - self._right_rotate(parent, child) - - def __right_right_siblingcase(self, node_idx): - right_idx = self.tree[node_idx].right - parent = self._get_parent(node_idx) - parent_color = self.tree[parent].color - self.tree[right_idx].color = self.tree[node_idx].color - self.tree[node_idx].color = parent_color - self._left_rotate(parent, node_idx) - - def __fix_deletion(self, node_idx): - node = self.tree[node_idx] - color = node.color - while node_idx!= self.root_idx and color == 0: - sibling_idx = self._get_sibling(node_idx) - parent_idx = self._get_parent(node_idx) - if sibling_idx is None: - node_idx = parent_idx - continue - else: - if self.tree[sibling_idx].color == 1: - self.tree[self.root_idx].is_root = False - self.tree[parent_idx].color = 1 - self.tree[sibling_idx].color = 0 - if self._is_onleft(sibling_idx): - self._right_rotate(parent_idx, sibling_idx) - else: - self._left_rotate(parent_idx, sibling_idx) - self.tree[self.root_idx].is_root = True - continue - else: - if self.__has_red_child(sibling_idx): - self.tree[self.root_idx].is_root = False - left_idx = self.tree[sibling_idx].left - if self.tree[sibling_idx].left is not None and \ - self.tree[left_idx].color == 1: - if self._is_onleft(sibling_idx): - self.__left_left_siblingcase(sibling_idx) - else: - self.__right_left_siblingcase(sibling_idx) - else: - if self._is_onleft(sibling_idx): - self.__left_right_siblingcase(sibling_idx) - else: - self.__right_right_siblingcase(sibling_idx) - self.tree[self.root_idx].is_root = True - self.tree[parent_idx].color = 0 - else: - self.tree[sibling_idx].color = 1 - if self.tree[parent_idx].color == 0: - node_idx = parent_idx - continue - else: - self.tree[parent_idx].color = 0 - color = 1 - - def _remove_node(self, node_idx): - parent = self._get_parent(node_idx) - a = parent - if self._is_leaf(node_idx): - par_key, root_key = (self.tree[parent].key, self.tree[self.root_idx].key) - new_indices = self.tree.delete(node_idx) - if new_indices is not None: - a = new_indices[par_key] - self.root_idx = new_indices[root_key] - elif self._has_one_child(node_idx): - child = self._replace_node(node_idx) - parent = self._get_parent(node_idx) - par_key, root_key = (self.tree[parent].key, self.tree[self.root_idx].key) - new_indices = self.tree.delete(node_idx) - self._update_size(a) - - def _delete_root(self, node_idx, node_idx1): - if self._is_leaf(node_idx): - self.tree[self.root_idx].data = None - self.tree[self.root_idx].key = None - elif self._has_one_child(node_idx): - root_key = self._transplant_values(node_idx, node_idx1) - new_indices = self.tree.delete(node_idx1) - if new_indices is not None: - self.root_idx = new_indices[root_key] - - def __leaf_case(self, node_idx, node_idx1): - walk = node_idx - walk1 = node_idx1 - parent = self._get_parent(node_idx) - color = self.tree[walk].color - if parent is None: - self._delete_root(walk, walk1) - else: - if self.__walk1_walk_isblack(color, walk1): - self.__fix_deletion(walk) - else: - sibling_idx = self._get_sibling(walk) - if sibling_idx is not None: - self.tree[sibling_idx].color = 1 - if self._is_onleft(walk): - self.tree[parent].left = None - else: - self.tree[parent].right = None - self._remove_node(walk) - - def __one_child_case(self, node_idx, node_idx1): - walk = node_idx - walk1 = node_idx1 - walk_original_color = self.tree[walk].color - parent = self._get_parent(node_idx) - if parent is None: - self._delete_root(walk, walk1) - else: - if self._is_onleft(walk): - self.tree[parent].left = walk1 - else: - self.tree[parent].right = walk1 - self.tree[walk1].parent = parent - a = self._remove_node(walk) - if self.__walk1_walk_isblack(walk_original_color, walk1): - self.__fix_deletion(walk1) - else: - self.tree[walk1].color = 0 - - def __two_child_case(self, node_idx): - walk = node_idx - successor = self._replace_node(walk) - self._transplant_values(walk, successor) - walk = successor - walk1 = self._replace_node(walk) - return walk, walk1 - - def delete(self, key, **kwargs): - walk = super(RedBlackTree, self).search(key) - if walk is not None: - walk1 = self._replace_node(walk) - if self._has_two_child(walk): - walk, walk1 = self.__two_child_case(walk) - if self._is_leaf(walk): - self.__leaf_case(walk, walk1) - elif self._has_one_child(walk): - self.__one_child_case(walk, walk1) - return True - else: - return None - -class BinaryTreeTraversal(object): - """ - Represents the traversals possible in - a binary tree. - - Parameters - ========== - - tree: BinaryTree - The binary tree for whose traversal - is to be done. - backend: pydatastructs.Backend - The backend to be used. Available backends: Python and C++ - Optional, by default, the Python backend is used. For faster execution, use the C++ backend. - - Traversals - ========== - - - Depth First Search - In Order, Post Order, Pre Order Out Order - - - Breadth First Search - - Examples - ======== - - >>> from pydatastructs import BinarySearchTree as BST - >>> from pydatastructs import BinaryTreeTraversal as BTT - >>> b = BST(2, 2) - >>> b.insert(1, 1) - >>> b.insert(3, 3) - >>> trav = BTT(b) - >>> dfs = trav.depth_first_search() - >>> [str(n) for n in dfs] - ['(None, 1, 1, None)', '(1, 2, 2, 2)', '(None, 3, 3, None)'] - >>> bfs = trav.breadth_first_search() - >>> [str(n) for n in bfs] - ['(1, 2, 2, 2)', '(None, 1, 1, None)', '(None, 3, 3, None)'] - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Tree_traversal - """ - - @classmethod - def methods(cls): - return ['__new__', 'depth_first_search', - 'breadth_first_search'] - - __slots__ = ['tree'] - - def __new__(cls, tree, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - return _trees.BinaryTreeTraversal(tree, **kwargs) - if not isinstance(tree, BinaryTree): - raise TypeError("%s is not a binary tree"%(tree)) - obj = object.__new__(cls) - obj.tree = tree - return obj - - def _pre_order(self, node): - """ - Utility method for computing pre-order - of a binary tree using iterative algorithm. - """ - visit = [] - tree, size = self.tree.tree, self.tree.size - s = Stack() - s.push(node) - while not s.is_empty: - node = s.pop() - visit.append(tree[node]) - if tree[node].right is not None: - s.push(tree[node].right) - if tree[node].left is not None: - s.push(tree[node].left) - return visit - - def _in_order(self, node): - """ - Utility method for computing in-order - of a binary tree using iterative algorithm. - """ - visit = [] - tree, size = self.tree.tree, self.tree.size - s = Stack() - while not s.is_empty or node is not None: - if node is not None: - s.push(node) - node = tree[node].left - else: - node = s.pop() - visit.append(tree[node]) - node = tree[node].right - return visit - - def _post_order(self, node): - """ - Utility method for computing post-order - of a binary tree using iterative algorithm. - """ - visit = [] - tree, size = self.tree.tree, self.tree.size - s = Stack() - s.push(node) - last = OneDimensionalArray(int, size) - last.fill(False) - while not s.is_empty: - node = s.peek - l, r = tree[node].left, tree[node].right - cl, cr = l is None or last[l], r is None or last[r] - if cl and cr: - s.pop() - visit.append(tree[node]) - last[node] = True - continue - if not cr: - s.push(r) - if not cl: - s.push(l) - return visit - - def _out_order(self, node): - """ - Utility method for computing out-order - of a binary tree using iterative algorithm. - """ - return reversed(self._in_order(node)) - - def depth_first_search(self, order='in_order', node=None): - """ - Computes the depth first search traversal of the binary - trees. - - Parameters - ========== - - order : str - One of the strings, 'in_order', 'post_order', - 'pre_order', 'out_order'. - By default, it is set to, 'in_order'. - node : int - The index of the node from where the traversal - is to be instantiated. - - Returns - ======= - - list - Each element is of type 'TreeNode'. - """ - if node is None: - node = self.tree.root_idx - if order not in ('in_order', 'post_order', 'pre_order', 'out_order'): - raise NotImplementedError( - "%s order is not implemented yet." - "We only support `in_order`, `post_order`, " - "`pre_order` and `out_order` traversals.") - return getattr(self, '_' + order)(node) - - def breadth_first_search(self, node=None, strategy='queue'): - """ - Computes the breadth first search traversal of a binary tree. - - Parameters - ========== - - node : int - The index of the node from where the traversal has to be instantiated. - By default, set to, root index. - - strategy : str - The strategy using which the computation has to happen. - By default, it is set 'queue'. - - Returns - ======= - - list - Each element of the list is of type `TreeNode`. - """ - # TODO: IMPLEMENT ITERATIVE DEEPENING-DEPTH FIRST SEARCH STRATEGY - strategies = ('queue',) - if strategy not in strategies: - raise NotImplementedError( - "%s startegy is not implemented yet"%(strategy)) - if node is None: - node = self.tree.root_idx - q, visit, tree = Queue(), [], self.tree.tree - q.append(node) - while len(q) > 0: - node = q.popleft() - visit.append(tree[node]) - if tree[node].left is not None: - q.append(tree[node].left) - if tree[node].right is not None: - q.append(tree[node].right) - return visit - -class BinaryIndexedTree(object): - """ - Represents binary indexed trees - a.k.a fenwick trees. - - Parameters - ========== - - array: list/tuple - The array whose elements are to be - considered for the queries. - backend: pydatastructs.Backend - The backend to be used. Available backends: Python and C++ - Optional, by default, the Python backend is used. For faster execution, use the C++ backend. - - Examples - ======== - - >>> from pydatastructs import BinaryIndexedTree - >>> bit = BinaryIndexedTree([1, 2, 3]) - >>> bit.get_sum(0, 2) - 6 - >>> bit.update(0, 100) - >>> bit.get_sum(0, 2) - 105 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Fenwick_tree - """ - - __slots__ = ['tree', 'array', 'flag'] - - def __new__(cls, array, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - return _trees.BinaryIndexedTree(type(array[0]), array, **kwargs) - obj = object.__new__(cls) - obj.array = OneDimensionalArray(type(array[0]), array) - obj.tree = [0] * (obj.array._size + 2) - obj.flag = [0] * (obj.array._size) - for index in range(obj.array._size): - obj.update(index, array[index]) - return obj - - @classmethod - def methods(cls): - return ['update', 'get_prefix_sum', - 'get_sum'] - - def update(self, index, value): - """ - Updates value at the given index. - - Parameters - ========== - - index: int - Index of element to be updated. - - value - The value to be inserted. - """ - _index, _value = index, value - if self.flag[index] == 0: - self.flag[index] = 1 - index += 1 - while index < self.array._size + 1: - self.tree[index] += value - index = index + (index & (-index)) - else: - value = value - self.array[index] - index += 1 - while index < self.array._size + 1: - self.tree[index] += value - index = index + (index & (-index)) - self.array[_index] = _value - - def get_prefix_sum(self, index): - """ - Computes sum of elements from index 0 to given index. - - Parameters - ========== - - index: int - Index till which sum has to be calculated. - - Returns - ======= - - sum: int - The required sum. - """ - index += 1 - sum = 0 - while index > 0: - sum += self.tree[index] - index = index - (index & (-index)) - return sum - - def get_sum(self, left_index, right_index): - """ - Get sum of elements from left index to right index. - - Parameters - ========== - - left_index: int - Starting index from where sum has to be computed. - - right_index: int - Ending index till where sum has to be computed. - - Returns - ======= - - sum: int - The required sum - """ - if left_index >= 1: - return self.get_prefix_sum(right_index) - \ - self.get_prefix_sum(left_index - 1) - else: - return self.get_prefix_sum(right_index) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/heaps.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/heaps.py deleted file mode 100644 index 12133a6f1..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/heaps.py +++ /dev/null @@ -1,582 +0,0 @@ -from pydatastructs.utils.misc_util import ( - _check_type, TreeNode, BinomialTreeNode, - Backend, raise_if_backend_is_not_python) -from pydatastructs.linear_data_structures.arrays import ( - DynamicOneDimensionalArray, Array) -from pydatastructs.miscellaneous_data_structures.binomial_trees import BinomialTree - -__all__ = [ - 'BinaryHeap', - 'TernaryHeap', - 'DHeap', - 'BinomialHeap' -] - -class Heap(object): - """ - Abstract class for representing heaps. - """ - pass - - -class DHeap(Heap): - """ - Represents D-ary Heap. - - Parameters - ========== - - elements: list, tuple, Array - Optional, by default 'None'. - list/tuple/Array of initial TreeNode in Heap. - heap_property: str - If the key stored in each node is - either greater than or equal to - the keys in the node's children - then pass 'max'. - If the key stored in each node is - either less than or equal to - the keys in the node's children - then pass 'min'. - By default, the heap property is - set to 'min'. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs.trees.heaps import DHeap - >>> min_heap = DHeap(heap_property="min", d=3) - >>> min_heap.insert(1, 1) - >>> min_heap.insert(5, 5) - >>> min_heap.insert(7, 7) - >>> min_heap.extract().key - 1 - >>> min_heap.insert(4, 4) - >>> min_heap.extract().key - 4 - - >>> max_heap = DHeap(heap_property='max', d=2) - >>> max_heap.insert(1, 1) - >>> max_heap.insert(5, 5) - >>> max_heap.insert(7, 7) - >>> max_heap.extract().key - 7 - >>> max_heap.insert(6, 6) - >>> max_heap.extract().key - 6 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/D-ary_heap - """ - __slots__ = ['_comp', 'heap', 'd', 'heap_property', '_last_pos_filled'] - - def __new__(cls, elements=None, heap_property="min", d=4, - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = Heap.__new__(cls) - obj.heap_property = heap_property - obj.d = d - if heap_property == "min": - obj._comp = lambda key_parent, key_child: key_parent <= key_child - elif heap_property == "max": - obj._comp = lambda key_parent, key_child: key_parent >= key_child - else: - raise ValueError("%s is invalid heap property"%(heap_property)) - if elements is None: - elements = DynamicOneDimensionalArray(TreeNode, 0) - elif _check_type(elements, (list,tuple)): - elements = DynamicOneDimensionalArray(TreeNode, len(elements), elements) - elif _check_type(elements, Array): - elements = DynamicOneDimensionalArray(TreeNode, len(elements), elements._data) - else: - raise ValueError(f'Expected a list/tuple/Array of TreeNode got {type(elements)}') - obj.heap = elements - obj._last_pos_filled = obj.heap._last_pos_filled - obj._build() - return obj - - @classmethod - def methods(cls): - return ['__new__', 'insert', 'extract', '__str__', 'is_empty'] - - def _build(self): - for i in range(self._last_pos_filled + 1): - self.heap[i]._leftmost, self.heap[i]._rightmost = \ - self.d*i + 1, self.d*i + self.d - for i in range((self._last_pos_filled + 1)//self.d, -1, -1): - self._heapify(i) - - def _swap(self, idx1, idx2): - idx1_key, idx1_data = \ - self.heap[idx1].key, self.heap[idx1].data - self.heap[idx1].key, self.heap[idx1].data = \ - self.heap[idx2].key, self.heap[idx2].data - self.heap[idx2].key, self.heap[idx2].data = \ - idx1_key, idx1_data - - def _heapify(self, i): - while True: - target = i - l = self.d*i + 1 - r = self.d*i + self.d - - for j in range(l, r+1): - if j <= self._last_pos_filled: - target = j if self._comp(self.heap[j].key, self.heap[target].key) \ - else target - else: - break - - if target != i: - self._swap(target, i) - i = target - else: - break - - def insert(self, key, data=None): - """ - Insert a new element to the heap according to heap property. - - Parameters - ========== - - key - The key for comparison. - data - The data to be inserted. - - Returns - ======= - - None - """ - new_node = TreeNode(key, data) - self.heap.append(new_node) - self._last_pos_filled += 1 - i = self._last_pos_filled - self.heap[i]._leftmost, self.heap[i]._rightmost = self.d*i + 1, self.d*i + self.d - - while True: - parent = (i - 1)//self.d - if i == 0 or self._comp(self.heap[parent].key, self.heap[i].key): - break - else: - self._swap(i, parent) - i = parent - - def extract(self): - """ - Extract root element of the Heap. - - Returns - ======= - - root_element: TreeNode - The TreeNode at the root of the heap, - if the heap is not empty. - - None - If the heap is empty. - """ - if self._last_pos_filled == -1: - raise IndexError("Heap is empty.") - else: - element_to_be_extracted = TreeNode(self.heap[0].key, self.heap[0].data) - self._swap(0, self._last_pos_filled) - self.heap.delete(self._last_pos_filled) - self._last_pos_filled -= 1 - self._heapify(0) - return element_to_be_extracted - - def __str__(self): - to_be_printed = ['' for i in range(self._last_pos_filled + 1)] - for i in range(self._last_pos_filled + 1): - node = self.heap[i] - if node._leftmost <= self._last_pos_filled: - if node._rightmost <= self._last_pos_filled: - children = list(range(node._leftmost, node._rightmost + 1)) - else: - children = list(range(node._leftmost, self._last_pos_filled + 1)) - else: - children = [] - to_be_printed[i] = (node.key, node.data, children) - return str(to_be_printed) - - @property - def is_empty(self): - """ - Checks if the heap is empty. - """ - return self.heap._last_pos_filled == -1 - - -class BinaryHeap(DHeap): - """ - Represents Binary Heap. - - Parameters - ========== - - elements: list, tuple - Optional, by default 'None'. - List/tuple of initial elements in Heap. - heap_property: str - If the key stored in each node is - either greater than or equal to - the keys in the node's children - then pass 'max'. - If the key stored in each node is - either less than or equal to - the keys in the node's children - then pass 'min'. - By default, the heap property is - set to 'min'. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs.trees.heaps import BinaryHeap - >>> min_heap = BinaryHeap(heap_property="min") - >>> min_heap.insert(1, 1) - >>> min_heap.insert(5, 5) - >>> min_heap.insert(7, 7) - >>> min_heap.extract().key - 1 - >>> min_heap.insert(4, 4) - >>> min_heap.extract().key - 4 - - >>> max_heap = BinaryHeap(heap_property='max') - >>> max_heap.insert(1, 1) - >>> max_heap.insert(5, 5) - >>> max_heap.insert(7, 7) - >>> max_heap.extract().key - 7 - >>> max_heap.insert(6, 6) - >>> max_heap.extract().key - 6 - - References - ========== - - .. [1] https://en.m.wikipedia.org/wiki/Binary_heap - """ - def __new__(cls, elements=None, heap_property="min", - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = DHeap.__new__(cls, elements, heap_property, 2) - return obj - - @classmethod - def methods(cls): - return ['__new__'] - - -class TernaryHeap(DHeap): - """ - Represents Ternary Heap. - - Parameters - ========== - - elements: list, tuple - Optional, by default 'None'. - List/tuple of initial elements in Heap. - heap_property: str - If the key stored in each node is - either greater than or equal to - the keys in the node's children - then pass 'max'. - If the key stored in each node is - either less than or equal to - the keys in the node's children - then pass 'min'. - By default, the heap property is - set to 'min'. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs.trees.heaps import TernaryHeap - >>> min_heap = TernaryHeap(heap_property="min") - >>> min_heap.insert(1, 1) - >>> min_heap.insert(5, 5) - >>> min_heap.insert(7, 7) - >>> min_heap.insert(3, 3) - >>> min_heap.extract().key - 1 - >>> min_heap.insert(4, 4) - >>> min_heap.extract().key - 3 - - >>> max_heap = TernaryHeap(heap_property='max') - >>> max_heap.insert(1, 1) - >>> max_heap.insert(5, 5) - >>> max_heap.insert(7, 7) - >>> min_heap.insert(3, 3) - >>> max_heap.extract().key - 7 - >>> max_heap.insert(6, 6) - >>> max_heap.extract().key - 6 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/D-ary_heap - .. [2] https://ece.uwaterloo.ca/~dwharder/aads/Algorithms/d-ary_heaps/Ternary_heaps/ - """ - def __new__(cls, elements=None, heap_property="min", - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = DHeap.__new__(cls, elements, heap_property, 3) - return obj - - @classmethod - def methods(cls): - return ['__new__'] - - -class BinomialHeap(Heap): - """ - Represents binomial heap. - - Parameters - ========== - - root_list: list/tuple/Array - By default, [] - The list of BinomialTree object references - in sorted order. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import BinomialHeap - >>> b = BinomialHeap() - >>> b.insert(1, 1) - >>> b.insert(2, 2) - >>> b.find_minimum().key - 1 - >>> b.find_minimum().children[0].key - 2 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Binomial_heap - """ - __slots__ = ['root_list'] - - def __new__(cls, root_list=None, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - if root_list is None: - root_list = [] - if not all((_check_type(root, BinomialTree)) - for root in root_list): - raise TypeError("The root_list should contain " - "references to objects of BinomialTree.") - obj = Heap.__new__(cls) - obj.root_list = root_list - return obj - - @classmethod - def methods(cls): - return ['__new__', 'merge_tree', 'merge', 'insert', - 'find_minimum', 'is_emtpy', 'decrease_key', 'delete', - 'delete_minimum'] - - def merge_tree(self, tree1, tree2): - """ - Merges two BinomialTree objects. - - Parameters - ========== - - tree1: BinomialTree - - tree2: BinomialTree - """ - if (not _check_type(tree1, BinomialTree)) or \ - (not _check_type(tree2, BinomialTree)): - raise TypeError("Both the trees should be of type " - "BinomalTree.") - ret_value = None - if tree1.root.key <= tree2.root.key: - tree1.add_sub_tree(tree2) - ret_value = tree1 - else: - tree2.add_sub_tree(tree1) - ret_value = tree2 - return ret_value - - def _merge_heap_last_new_tree(self, new_root_list, new_tree): - """ - Merges last tree node in root list with the incoming tree. - """ - pos = -1 - if len(new_root_list) > 0 and new_root_list[pos].order == new_tree.order: - new_root_list[pos] = self.merge_tree(new_root_list[pos], new_tree) - else: - new_root_list.append(new_tree) - - def merge(self, other_heap): - """ - Merges current binomial heap with the given binomial heap. - - Parameters - ========== - - other_heap: BinomialHeap - """ - if not _check_type(other_heap, BinomialHeap): - raise TypeError("Other heap is not of type BinomialHeap.") - new_root_list = [] - i, j = 0, 0 - while (i < len(self.root_list)) and \ - (j < len(other_heap.root_list)): - new_tree = None - while self.root_list[i] is None: - i += 1 - while other_heap.root_list[j] is None: - j += 1 - if self.root_list[i].order == other_heap.root_list[j].order: - new_tree = self.merge_tree(self.root_list[i], - other_heap.root_list[j]) - i += 1 - j += 1 - else: - if self.root_list[i].order < other_heap.root_list[j].order: - new_tree = self.root_list[i] - i += 1 - else: - new_tree = other_heap.root_list[j] - j += 1 - self._merge_heap_last_new_tree(new_root_list, new_tree) - - while i < len(self.root_list): - new_tree = self.root_list[i] - self._merge_heap_last_new_tree(new_root_list, new_tree) - i += 1 - while j < len(other_heap.root_list): - new_tree = other_heap.root_list[j] - self._merge_heap_last_new_tree(new_root_list, new_tree) - j += 1 - self.root_list = new_root_list - - def insert(self, key, data=None): - """ - Inserts new node with the given key and data. - - key - The key of the node which can be operated - upon by relational operators. - - data - The data to be stored in the new node. - """ - new_node = BinomialTreeNode(key, data) - new_tree = BinomialTree(root=new_node, order=0) - new_heap = BinomialHeap(root_list=[new_tree]) - self.merge(new_heap) - - def find_minimum(self, **kwargs): - """ - Finds the node with the minimum key. - - Returns - ======= - - min_node: BinomialTreeNode - """ - if self.is_empty: - raise IndexError("Binomial heap is empty.") - min_node = None - idx, min_idx = 0, None - for tree in self.root_list: - if ((min_node is None) or - (tree is not None and tree.root is not None and - min_node.key > tree.root.key)): - min_node = tree.root - min_idx = idx - idx += 1 - if kwargs.get('get_index', None) is not None: - return min_node, min_idx - return min_node - - def delete_minimum(self): - """ - Deletes the node with minimum key. - """ - min_node, min_idx = self.find_minimum(get_index=True) - child_root_list = [] - for k, child in enumerate(min_node.children): - if child is not None: - child_root_list.append(BinomialTree(root=child, order=k)) - self.root_list.remove(self.root_list[min_idx]) - child_heap = BinomialHeap(root_list=child_root_list) - self.merge(child_heap) - - @property - def is_empty(self): - return not self.root_list - - def decrease_key(self, node, new_key): - """ - Decreases the key of the given node. - - Parameters - ========== - - node: BinomialTreeNode - The node whose key is to be reduced. - new_key - The new key of the given node, - should be less than the current key. - """ - if node.key <= new_key: - raise ValueError("The new key " - "should be less than current node's key.") - node.key = new_key - while ((not node.is_root) and - (node.parent.key > node.key)): - node.parent.key, node.key = \ - node.key, node.parent.key - node.parent.data, node.data = \ - node.data, node.parent.data - node = node.parent - - def delete(self, node): - """ - Deletes the given node. - - Parameters - ========== - - node: BinomialTreeNode - The node which is to be deleted. - """ - self.decrease_key(node, self.find_minimum().key - 1) - self.delete_minimum() diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/m_ary_trees.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/m_ary_trees.py deleted file mode 100644 index a06fda9ee..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/m_ary_trees.py +++ /dev/null @@ -1,172 +0,0 @@ -from pydatastructs.utils import MAryTreeNode -from pydatastructs.linear_data_structures.arrays import ArrayForTrees -from pydatastructs.utils.misc_util import ( - Backend, raise_if_backend_is_not_python) - -__all__ = [ - 'MAryTree' -] - -class MAryTree(object): - """ - Abstract m-ary tree. - - Parameters - ========== - - key - Required if tree is to be instantiated with - root otherwise not needed. - root_data - Optional, the root node of the binary tree. - If not of type MAryTreeNode, it will consider - root as data and a new root node will - be created. - comp: lambda - Optional, A lambda function which will be used - for comparison of keys. Should return a - bool value. By default it implements less - than operator. - is_order_statistic: bool - Set it to True, if you want to use the - order statistic features of the tree. - max_children - Optional, specifies the maximum number of children - a node can have. Defaults to 2 in case nothing is - specified. - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/M-ary_tree - """ - - __slots__ = ['root_idx', 'max_children', 'comparator', 'tree', 'size', - 'is_order_statistic'] - - - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, max_children=2, - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - if key is None and root_data is not None: - raise ValueError('Key required.') - key = None if root_data is None else key - root = MAryTreeNode(key, root_data) - root.is_root = True - obj.root_idx = 0 - obj.max_children = max_children - obj.tree, obj.size = ArrayForTrees(MAryTreeNode, [root]), 1 - obj.comparator = lambda key1, key2: key1 < key2 \ - if comp is None else comp - obj.is_order_statistic = is_order_statistic - return obj - - @classmethod - def methods(cls): - return ['__new__', '__str__'] - - def insert(self, key, data=None): - """ - Inserts data by the passed key using iterative - algorithm. - - Parameters - ========== - - key - The key for comparison. - data - The data to be inserted. - - Returns - ======= - - None - """ - raise NotImplementedError("This is an abstract method.") - - def delete(self, key, **kwargs): - """ - Deletes the data with the passed key - using iterative algorithm. - - Parameters - ========== - - key - The key of the node which is - to be deleted. - - Returns - ======= - - True - If the node is deleted successfully. - - None - If the node to be deleted doesn't exists. - - Note - ==== - - The node is deleted means that the connection to that - node are removed but the it is still in tree. - """ - raise NotImplementedError("This is an abstract method.") - - def search(self, key, **kwargs): - """ - Searches for the data in the binary search tree - using iterative algorithm. - - Parameters - ========== - - key - The key for searching. - parent: bool - If true then returns index of the - parent of the node with the passed - key. - By default, False - - Returns - ======= - - int - If the node with the passed key is - in the tree. - tuple - The index of the searched node and - the index of the parent of that node. - None - In all other cases. - """ - raise NotImplementedError("This is an abstract method.") - - def to_binary_tree(self): - """ - Converts an m-ary tree to a binary tree. - - Returns - ======= - - TreeNode - The root of the newly created binary tree. - """ - raise NotImplementedError("This is an abstract method.") - - - def __str__(self): - to_be_printed = ['' for i in range(self.tree._last_pos_filled + 1)] - for i in range(self.tree._last_pos_filled + 1): - if self.tree[i] is not None: - node = self.tree[i] - to_be_printed[i] = (node.key, node.data) - for j in node.children: - if j is not None: - to_be_printed[i].append(j) - return str(to_be_printed) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/space_partitioning_trees.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/space_partitioning_trees.py deleted file mode 100644 index f13c1f280..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/space_partitioning_trees.py +++ /dev/null @@ -1,242 +0,0 @@ -from pydatastructs.utils import TreeNode -from collections import deque as Queue -from pydatastructs.utils.misc_util import ( - _check_type, Backend, - raise_if_backend_is_not_python) - -__all__ = [ - 'OneDimensionalSegmentTree' -] - -class OneDimensionalSegmentTree(object): - """ - Represents one dimensional segment trees. - - Parameters - ========== - - segs: list/tuple/set - The segs should contains tuples/list/set of size 2 - denoting the start and end points of the intervals. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalSegmentTree as ODST - >>> segt = ODST([(3, 8), (9, 20)]) - >>> segt.build() - >>> segt.tree[0].key - [False, 2, 3, False] - >>> len(segt.query(4)) - 1 - - Note - ==== - - All the segments are assumed to be closed intervals, - i.e., the ends are points of segments are also included in - computation. - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Segment_tree - - """ - - __slots__ = ['segments', 'tree', 'root_idx', 'cache'] - - def __new__(cls, segs, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - if any((not isinstance(seg, (tuple, list, set)) or len(seg) != 2) - for seg in segs): - raise ValueError('%s is invalid set of intervals'%(segs)) - for i in range(len(segs)): - segs[i] = list(segs[i]) - segs[i].sort() - obj.segments = list(segs) - obj.tree, obj.root_idx, obj.cache = [], None, False - return obj - - @classmethod - def methods(cls): - return ['build', 'query', '__str__'] - - def _union(self, i1, i2): - """ - Helper function for taking union of two - intervals. - """ - return TreeNode([i1.key[0], i1.key[1], i2.key[2], i2.key[3]], None) - - def _intersect(self, i1, i2): - """ - Helper function for finding intersection of two - intervals. - """ - if i1 is None or i2 is None: - return False - if i1.key[2] < i2.key[1] or i2.key[2] < i1.key[1]: - return False - c1, c2 = None, None - if i1.key[2] == i2.key[1]: - c1 = (i1.key[3] and i2.key[0]) - if i2.key[2] == i1.key[1]: - c2 = (i2.key[3] and i1.key[0]) - if c1 is False and c2 is False: - return False - return True - - def _contains(self, i1, i2): - """ - Helper function for checking if the first interval - is contained in second interval. - """ - if i1 is None or i2 is None: - return False - if i1.key[1] < i2.key[1] and i1.key[2] > i2.key[2]: - return True - if i1.key[1] == i2.key[1] and i1.key[2] > i2.key[2]: - return (i1.key[0] or not i2.key[0]) - if i1.key[1] < i2.key[1] and i1.key[2] == i2.key[2]: - return i1.key[3] or not i2.key[3] - if i1.key[1] == i2.key[1] and i1.key[2] == i2.key[2]: - return not ((not i1.key[3] and i2.key[3]) or (not i1.key[0] and i2.key[0])) - return False - - def _iterate(self, calls, I, idx): - """ - Helper function for filling the calls - stack. Used for imitating the stack based - approach used in recursion. - """ - if self.tree[idx].right is None: - rc = None - else: - rc = self.tree[self.tree[idx].right] - if self.tree[idx].left is None: - lc = None - else: - lc = self.tree[self.tree[idx].left] - if self._intersect(I, rc): - calls.append(self.tree[idx].right) - if self._intersect(I, lc): - calls.append(self.tree[idx].left) - return calls - - def build(self): - """ - Builds the segment tree from the segments, - using iterative algorithm based on queues. - """ - if self.cache: - return None - endpoints = [] - for segment in self.segments: - endpoints.extend(segment) - endpoints.sort() - - elem_int = Queue() - elem_int.append(TreeNode([False, endpoints[0] - 1, endpoints[0], False], None)) - i = 0 - while i < len(endpoints) - 1: - elem_int.append(TreeNode([True, endpoints[i], endpoints[i], True], None)) - elem_int.append(TreeNode([False, endpoints[i], endpoints[i+1], False], None)) - i += 1 - elem_int.append(TreeNode([True, endpoints[i], endpoints[i], True], None)) - elem_int.append(TreeNode([False, endpoints[i], endpoints[i] + 1, False], None)) - - self.tree = [] - while len(elem_int) > 1: - m = len(elem_int) - while m >= 2: - I1 = elem_int.popleft() - I2 = elem_int.popleft() - I = self._union(I1, I2) - I.left = len(self.tree) - I.right = len(self.tree) + 1 - self.tree.append(I1), self.tree.append(I2) - elem_int.append(I) - m -= 2 - if m & 1 == 1: - Il = elem_int.popleft() - elem_int.append(Il) - - Ir = elem_int.popleft() - Ir.left, Ir.right = -3, -2 - self.tree.append(Ir) - self.root_idx = -1 - - for segment in self.segments: - I = TreeNode([True, segment[0], segment[1], True], None) - calls = [self.root_idx] - while calls: - idx = calls.pop() - if self._contains(I, self.tree[idx]): - if self.tree[idx].data is None: - self.tree[idx].data = [] - self.tree[idx].data.append(I) - continue - calls = self._iterate(calls, I, idx) - self.cache = True - - def query(self, qx, init_node=None): - """ - Queries the segment tree. - - Parameters - ========== - - qx: int/float - The query point - - init_node: int - The index of the node from which the query process - is to be started. - - Returns - ======= - - intervals: set - The set of the intervals which contain the query - point. - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Segment_tree - """ - if not self.cache: - self.build() - if init_node is None: - init_node = self.root_idx - qn = TreeNode([True, qx, qx, True], None) - intervals = [] - calls = [init_node] - while calls: - idx = calls.pop() - if _check_type(self.tree[idx].data, list): - intervals.extend(self.tree[idx].data) - calls = self._iterate(calls, qn, idx) - return set(intervals) - - def __str__(self): - """ - Used for printing. - """ - if not self.cache: - self.build() - str_tree = [] - for seg in self.tree: - if seg.data is None: - data = None - else: - data = [str(sd) for sd in seg.data] - str_tree.append((seg.left, seg.key, data, seg.right)) - return str(str_tree) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_binary_trees.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_binary_trees.py deleted file mode 100644 index 826100b78..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_binary_trees.py +++ /dev/null @@ -1,820 +0,0 @@ -from pydatastructs.trees.binary_trees import ( - BinaryTree, BinarySearchTree, BinaryTreeTraversal, AVLTree, - ArrayForTrees, BinaryIndexedTree, SelfBalancingBinaryTree, SplayTree, CartesianTree, Treap, RedBlackTree) -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils.misc_util import TreeNode -from copy import deepcopy -from pydatastructs.utils.misc_util import Backend -import random -from pydatastructs.utils._backend.cpp import _nodes - -def _test_BinarySearchTree(backend): - BST = BinarySearchTree - b = BST(8, 8, backend=backend) - b.delete(8) - b.insert(8, 8) - b.insert(3, 3) - b.insert(10, 10) - b.insert(1, 1) - b.insert(6, 6) - b.insert(4, 4) - b.insert(7, 7) - b.insert(14, 14) - b.insert(13, 13) - # Explicit check for the __str__ method of Binary Trees Class - assert str(b) == \ - ("[(1, 8, 8, 2), (3, 3, 3, 4), (None, 10, 10, 7), (None, 1, 1, None), " - "(5, 6, 6, 6), (None, 4, 4, None), (None, 7, 7, None), (8, 14, 14, None), " - "(None, 13, 13, None)]") - assert b.root_idx == 0 - - assert b.tree[0].left == 1 - assert b.tree[0].key == 8 - assert b.tree[0].data == 8 - assert b.tree[0].right == 2 - - trav = BinaryTreeTraversal(b, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [1, 3, 4, 6, 7, 8, 10, 13, 14] - assert [node.key for node in pre_order] == [8, 3, 1, 6, 4, 7, 10, 14, 13] - - assert b.search(10) == 2 - assert b.search(-1) is None - assert b.delete(13) is True - assert b.search(13) is None - assert b.delete(10) is True - assert b.search(10) is None - assert b.delete(3) is True - assert b.search(3) is None - assert b.delete(13) is None - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [1, 4, 6, 7, 8, 14] - assert [node.key for node in pre_order] == [8, 4, 1, 6, 7, 14] - - b.delete(7) - b.delete(6) - b.delete(1) - b.delete(4) - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [8, 14] - assert [node.key for node in pre_order] == [8, 14] - - bc = BST(1, 1, backend=backend) - assert bc.insert(1, 2) is None - - b = BST(-8, 8, backend=backend) - b.insert(-3, 3) - b.insert(-10, 10) - b.insert(-1, 1) - b.insert(-6, 6) - b.insert(-4, 4) - b.insert(-7, 7) - b.insert(-14, 14) - b.insert(-13, 13) - - b.delete(-13) - b.delete(-10) - b.delete(-3) - b.delete(-13) - assert str(b) == "[(7, -8, 8, 1), (4, -1, 1, None), '', '', (6, -6, 6, 5), (None, -4, 4, None), (None, -7, 7, None), (None, -14, 14, None)]" - - bl = BST(backend=backend) - nodes = [50, 30, 90, 70, 100, 60, 80, 55, 20, 40, 15, 10, 16, 17, 18] - for node in nodes: - bl.insert(node, node) - - assert bl.lowest_common_ancestor(80, 55, 2) == 70 - assert bl.lowest_common_ancestor(60, 70, 2) == 70 - assert bl.lowest_common_ancestor(18, 18, 2) == 18 - assert bl.lowest_common_ancestor(40, 90, 2) == 50 - - assert bl.lowest_common_ancestor(18, 10, 2) == 15 - assert bl.lowest_common_ancestor(55, 100, 2) == 90 - assert bl.lowest_common_ancestor(16, 80, 2) == 50 - assert bl.lowest_common_ancestor(30, 55, 2) == 50 - - assert raises(ValueError, lambda: bl.lowest_common_ancestor(60, 200, 2)) - assert raises(ValueError, lambda: bl.lowest_common_ancestor(200, 60, 2)) - assert raises(ValueError, lambda: bl.lowest_common_ancestor(-3, 4, 2)) - - assert bl.lowest_common_ancestor(80, 55, 1) == 70 - assert bl.lowest_common_ancestor(60, 70, 1) == 70 - assert bl.lowest_common_ancestor(18, 18, 1) == 18 - assert bl.lowest_common_ancestor(40, 90, 1) == 50 - - assert bl.lowest_common_ancestor(18, 10, 1) == 15 - assert bl.lowest_common_ancestor(55, 100, 1) == 90 - assert bl.lowest_common_ancestor(16, 80, 1) == 50 - assert bl.lowest_common_ancestor(30, 55, 1) == 50 - - assert raises(ValueError, lambda: bl.lowest_common_ancestor(60, 200, 1)) - assert raises(ValueError, lambda: bl.lowest_common_ancestor(200, 60, 1)) - assert raises(ValueError, lambda: bl.lowest_common_ancestor(-3, 4, 1)) - -def test_BinarySearchTree(): - _test_BinarySearchTree(Backend.PYTHON) - -def test_cpp_BinarySearchTree(): - _test_BinarySearchTree(Backend.CPP) - -def _test_BinaryTreeTraversal(backend): - BST = BinarySearchTree - BTT = BinaryTreeTraversal - b = BST('F', 'F', backend=backend) - b.insert('B', 'B') - b.insert('A', 'A') - b.insert('G', 'G') - b.insert('D', 'D') - b.insert('C', 'C') - b.insert('E', 'E') - b.insert('I', 'I') - b.insert('H', 'H') - - trav = BTT(b, backend=backend) - pre = trav.depth_first_search(order='pre_order') - assert [node.key for node in pre] == ['F', 'B', 'A', 'D', 'C', 'E', 'G', 'I', 'H'] - - ino = trav.depth_first_search() - assert [node.key for node in ino] == ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'] - - out = trav.depth_first_search(order='out_order') - assert [node.key for node in out] == ['I', 'H', 'G', 'F', 'E', 'D', 'C', 'B', 'A'] - - post = trav.depth_first_search(order='post_order') - assert [node.key for node in post] == ['A', 'C', 'E', 'D', 'B', 'H', 'I', 'G', 'F'] - - bfs = trav.breadth_first_search() - assert [node.key for node in bfs] == ['F', 'B', 'G', 'A', 'D', 'I', 'C', 'E', 'H'] - - assert raises(NotImplementedError, lambda: trav.breadth_first_search(strategy='iddfs')) - assert raises(NotImplementedError, lambda: trav.depth_first_search(order='in_out_order')) - assert raises(TypeError, lambda: BTT(1)) - -def test_BinaryTreeTraversal(): - _test_BinaryTreeTraversal(Backend.PYTHON) - -def test_cpp_BinaryTreeTraversal(): - _test_BinaryTreeTraversal(Backend.CPP) - -def _test_AVLTree(backend): - a = AVLTree('M', 'M', backend=backend) - a.insert('N', 'N') - a.insert('O', 'O') - a.insert('L', 'L') - a.insert('K', 'K') - a.insert('Q', 'Q') - a.insert('P', 'P') - a.insert('H', 'H') - a.insert('I', 'I') - a.insert('A', 'A') - assert a.root_idx == 1 - - trav = BinaryTreeTraversal(a, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == ['A', 'H', 'I', 'K', 'L', 'M', 'N', 'O', 'P', 'Q'] - assert [node.key for node in pre_order] == ['N', 'I', 'H', 'A', 'L', 'K', 'M', 'P', 'O', 'Q'] - - assert [a.balance_factor(a.tree[i]) for i in range(a.tree.size) if a.tree[i] is not None] == \ - [0, -1, 0, 0, 0, 0, 0, -1, 0, 0] - a1 = AVLTree(1, 1, backend=backend) - a1.insert(2, 2) - a1.insert(3, 3) - a1.insert(4, 4) - a1.insert(5, 5) - - trav = BinaryTreeTraversal(a1, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [1, 2, 3, 4, 5] - assert [node.key for node in pre_order] == [2, 1, 4, 3, 5] - - a3 = AVLTree(-1, 1, backend=backend) - a3.insert(-2, 2) - a3.insert(-3, 3) - a3.insert(-4, 4) - a3.insert(-5, 5) - - trav = BinaryTreeTraversal(a3, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [-5, -4, -3, -2, -1] - assert [node.key for node in pre_order] == [-2, -4, -5, -3, -1] - - a2 = AVLTree(backend=backend) - a2.insert(1, 1) - a2.insert(1, 1) - - trav = BinaryTreeTraversal(a2, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [1] - assert [node.key for node in pre_order] == [1] - - a3 = AVLTree(backend=backend) - a3.set_tree( ArrayForTrees(TreeNode, 0, backend=backend) ) - for i in range(0,7): - a3.tree.append(TreeNode(i, i, backend=backend)) - a3.tree[0].left = 1 - a3.tree[0].right = 6 - a3.tree[1].left = 5 - a3.tree[1].right = 2 - a3.tree[2].left = 3 - a3.tree[2].right = 4 - a3._left_right_rotate(0, 1) - assert str(a3) == "[(4, 0, 0, 6), (5, 1, 1, 3), (1, 2, 2, 0), (None, 3, 3, None), (None, 4, 4, None), (None, 5, 5, None), (None, 6, 6, None)]" - - trav = BinaryTreeTraversal(a3, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [5, 1, 3, 2, 4, 0, 6] - assert [node.key for node in pre_order] == [2, 1, 5, 3, 0, 4, 6] - - a4 = AVLTree(backend=backend) - a4.set_tree( ArrayForTrees(TreeNode, 0, backend=backend) ) - for i in range(0,7): - a4.tree.append(TreeNode(i, i,backend=backend)) - a4.tree[0].left = 1 - a4.tree[0].right = 2 - a4.tree[2].left = 3 - a4.tree[2].right = 4 - a4.tree[3].left = 5 - a4.tree[3].right = 6 - a4._right_left_rotate(0, 2) - - trav = BinaryTreeTraversal(a4, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [1, 0, 5, 3, 6, 2, 4] - assert [node.key for node in pre_order] == [3,0,1,5,2,6,4] - - a5 = AVLTree(is_order_statistic=True,backend=backend) - if backend==Backend.PYTHON: - a5.set_tree( ArrayForTrees(TreeNode, [ - TreeNode(10, 10), - TreeNode(5, 5), - TreeNode(17, 17), - TreeNode(2, 2), - TreeNode(9, 9), - TreeNode(12, 12), - TreeNode(20, 20), - TreeNode(3, 3), - TreeNode(11, 11), - TreeNode(15, 15), - TreeNode(18, 18), - TreeNode(30, 30), - TreeNode(13, 13), - TreeNode(33, 33) - ]) ) - else: - a5.set_tree( ArrayForTrees(_nodes.TreeNode, [ - TreeNode(10, 10,backend=backend), - TreeNode(5, 5,backend=backend), - TreeNode(17, 17,backend=backend), - TreeNode(2, 2,backend=backend), - TreeNode(9, 9,backend=backend), - TreeNode(12, 12,backend=backend), - TreeNode(20, 20,backend=backend), - TreeNode(3, 3,backend=backend), - TreeNode(11, 11,backend=backend), - TreeNode(15, 15,backend=backend), - TreeNode(18, 18,backend=backend), - TreeNode(30, 30,backend=backend), - TreeNode(13, 13,backend=backend), - TreeNode(33, 33,backend=backend) - ],backend=backend) ) - - a5.tree[0].left, a5.tree[0].right, a5.tree[0].parent, a5.tree[0].height = \ - 1, 2, None, 4 - a5.tree[1].left, a5.tree[1].right, a5.tree[1].parent, a5.tree[1].height = \ - 3, 4, 0, 2 - a5.tree[2].left, a5.tree[2].right, a5.tree[2].parent, a5.tree[2].height = \ - 5, 6, 0, 3 - a5.tree[3].left, a5.tree[3].right, a5.tree[3].parent, a5.tree[3].height = \ - None, 7, 1, 1 - a5.tree[4].left, a5.tree[4].right, a5.tree[4].parent, a5.tree[4].height = \ - None, None, 1, 0 - a5.tree[5].left, a5.tree[5].right, a5.tree[5].parent, a5.tree[5].height = \ - 8, 9, 2, 2 - a5.tree[6].left, a5.tree[6].right, a5.tree[6].parent, a5.tree[6].height = \ - 10, 11, 2, 2 - a5.tree[7].left, a5.tree[7].right, a5.tree[7].parent, a5.tree[7].height = \ - None, None, 3, 0 - a5.tree[8].left, a5.tree[8].right, a5.tree[8].parent, a5.tree[8].height = \ - None, None, 5, 0 - a5.tree[9].left, a5.tree[9].right, a5.tree[9].parent, a5.tree[9].height = \ - 12, None, 5, 1 - a5.tree[10].left, a5.tree[10].right, a5.tree[10].parent, a5.tree[10].height = \ - None, None, 6, 0 - a5.tree[11].left, a5.tree[11].right, a5.tree[11].parent, a5.tree[11].height = \ - None, 13, 6, 1 - a5.tree[12].left, a5.tree[12].right, a5.tree[12].parent, a5.tree[12].height = \ - None, None, 9, 0 - a5.tree[13].left, a5.tree[13].right, a5.tree[13].parent, a5.tree[13].height = \ - None, None, 11, 0 - - # testing order statistics - a5.tree[0].size = 14 - a5.tree[1].size = 4 - a5.tree[2].size = 9 - a5.tree[3].size = 2 - a5.tree[4].size = 1 - a5.tree[5].size = 4 - a5.tree[6].size = 4 - a5.tree[7].size = 1 - a5.tree[8].size = 1 - a5.tree[9].size = 2 - a5.tree[10].size = 1 - a5.tree[11].size = 2 - a5.tree[12].size = 1 - a5.tree[13].size = 1 - assert str(a5) == "[(1, 10, 10, 2), (3, 5, 5, 4), (5, 17, 17, 6), (None, 2, 2, 7), (None, 9, 9, None), (8, 12, 12, 9), (10, 20, 20, 11), (None, 3, 3, None), (None, 11, 11, None), (12, 15, 15, None), (None, 18, 18, None), (None, 30, 30, 13), (None, 13, 13, None), (None, 33, 33, None)]" - - assert raises(ValueError, lambda: a5.select(0)) - assert raises(ValueError, lambda: a5.select(15)) - - assert a5.rank(-1) is None - def test_select_rank(expected_output): - if backend==Backend.PYTHON: - output = [] - for i in range(len(expected_output)): - output.append(a5.select(i + 1).key) - assert output == expected_output - output = [] - expected_ranks = [i + 1 for i in range(len(expected_output))] - for i in range(len(expected_output)): - output.append(a5.rank(expected_output[i])) - assert output == expected_ranks - - test_select_rank([2, 3, 5, 9, 10, 11, 12, 13, 15, 17, 18, 20, 30, 33]) - a5.delete(9) - a5.delete(13) - a5.delete(20) - assert str(a5) == "[(7, 10, 10, 5), (None, 5, 5, None), (0, 17, 17, 6), (None, 2, 2, None), '', (8, 12, 12, 9), (10, 30, 30, 13), (3, 3, 3, 1), (None, 11, 11, None), (None, 15, 15, None), (None, 18, 18, None), '', '', (None, 33, 33, None)]" - - trav = BinaryTreeTraversal(a5, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [2, 3, 5, 10, 11, 12, 15, 17, 18, 30, 33] - assert [node.key for node in pre_order] == [17, 10, 3, 2, 5, 12, 11, 15, 30, 18, 33] - - test_select_rank([2, 3, 5, 10, 11, 12, 15, 17, 18, 30, 33]) - a5.delete(10) - a5.delete(17) - assert str(a5) == "[(7, 11, 11, 5), (None, 5, 5, None), (0, 18, 18, 6), (None, 2, 2, None), '', (None, 12, 12, 9), (None, 30, 30, 13), (3, 3, 3, 1), '', (None, 15, 15, None), '', '', '', (None, 33, 33, None)]" - test_select_rank([2, 3, 5, 11, 12, 15, 18, 30, 33]) - a5.delete(11) - a5.delete(30) - test_select_rank([2, 3, 5, 12, 15, 18, 33]) - a5.delete(12) - test_select_rank([2, 3, 5, 15, 18, 33]) - a5.delete(15) - test_select_rank([2, 3, 5, 18, 33]) - a5.delete(18) - test_select_rank([2, 3, 5, 33]) - a5.delete(33) - test_select_rank([2, 3, 5]) - a5.delete(5) - test_select_rank([2, 3]) - a5.delete(3) - test_select_rank([2]) - a5.delete(2) - test_select_rank([]) - assert str(a5) == "[(None, None, None, None)]" - -def test_AVLTree(): - _test_AVLTree(backend=Backend.PYTHON) -def test_cpp_AVLTree(): - _test_AVLTree(backend=Backend.CPP) - -def _test_BinaryIndexedTree(backend): - - FT = BinaryIndexedTree - - t = FT([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], backend=backend) - - assert t.get_sum(0, 2) == 6 - assert t.get_sum(0, 4) == 15 - assert t.get_sum(0, 9) == 55 - t.update(0, 100) - assert t.get_sum(0, 2) == 105 - assert t.get_sum(0, 4) == 114 - assert t.get_sum(1, 9) == 54 - -def test_BinaryIndexedTree(): - _test_BinaryIndexedTree(Backend.PYTHON) - -def test_cpp_BinaryIndexedTree(): - _test_BinaryIndexedTree(Backend.CPP) - -def _test_CartesianTree(backend): - tree = CartesianTree(backend=backend) - tree.insert(3, 1, 3) - tree.insert(1, 6, 1) - tree.insert(0, 9, 0) - tree.insert(5, 11, 5) - tree.insert(4, 14, 4) - tree.insert(9, 17, 9) - tree.insert(7, 22, 7) - tree.insert(6, 42, 6) - tree.insert(8, 49, 8) - tree.insert(2, 99, 2) - # Explicit check for the redefined __str__ method of Cartesian Trees Class - - trav = BinaryTreeTraversal(tree, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] - assert [node.key for node in pre_order] == [3, 1, 0, 2, 5, 4, 9, 7, 6, 8] - - tree.insert(1.5, 4, 1.5) - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [0, 1, 1.5, 2, 3, 4, 5, 6, 7, 8, 9] - assert [node.key for node in pre_order] == [3, 1.5, 1, 0, 2, 5, 4, 9, 7, 6, 8] - - k = tree.search(1.5) - assert tree.tree[tree.tree[k].parent].key == 3 - tree.delete(1.5) - assert tree.root_idx == 0 - tree.tree[tree.tree[tree.root_idx].left].key == 1 - tree.delete(8) - assert tree.search(8) is None - tree.delete(7) - assert tree.search(7) is None - tree.delete(3) - assert tree.search(3) is None - assert tree.delete(18) is None - -def test_CartesianTree(): - _test_CartesianTree(backend=Backend.PYTHON) - -def test_cpp_CartesianTree(): - _test_CartesianTree(backend=Backend.CPP) - -def _test_Treap(backend): - - random.seed(0) - tree = Treap(backend=backend) - tree.insert(7, 7) - tree.insert(2, 2) - tree.insert(3, 3) - tree.insert(4, 4) - tree.insert(5, 5) - - assert isinstance(tree.tree[0].priority, float) - tree.delete(1) - assert tree.search(1) is None - assert tree.search(2) == 1 - assert tree.delete(1) is None - -def test_Treap(): - _test_Treap(Backend.PYTHON) - -def test_cpp_Treap(): - _test_Treap(Backend.CPP) - -def _test_SelfBalancingBinaryTree(backend): - """ - https://github.com/codezonediitj/pydatastructs/issues/234 - """ - tree = SelfBalancingBinaryTree(backend=backend) - tree.insert(5, 5) - tree.insert(5.5, 5.5) - tree.insert(4.5, 4.5) - tree.insert(4.6, 4.6) - tree.insert(4.4, 4.4) - tree.insert(4.55, 4.55) - tree.insert(4.65, 4.65) - original_tree = str(tree) - tree._right_rotate(3, 5) - - assert str(tree) == "[(2, 5, 5, 1), (None, 5.5, 5.5, None), (4, 4.5, 4.5, 5), (None, 4.6, 4.6, 6), (None, 4.4, 4.4, None), (None, 4.55, 4.55, 3), (None, 4.65, 4.65, None)]" - assert tree.tree[3].parent == 5 - assert tree.tree[2].right != 3 - assert tree.tree[tree.tree[5].parent].right == 5 - assert tree.root_idx == 0 - - trav = BinaryTreeTraversal(tree, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [4.4, 4.5, 4.55, 4.6, 4.65, 5, 5.5] - assert [node.key for node in pre_order] == [5, 4.5, 4.4, 4.55, 4.6, 4.65, 5.5] - - assert tree.tree[tree.tree[3].parent].right == 3 - tree._left_rotate(5, 3) - assert str(tree) == original_tree - tree.insert(4.54, 4.54) - tree.insert(4.56, 4.56) - tree._left_rotate(5, 8) - assert tree.tree[tree.tree[8].parent].left == 8 - assert str(tree) == "[(2, 5, 5, 1), (None, 5.5, 5.5, None), (4, 4.5, 4.5, 3), (8, 4.6, 4.6, 6), (None, 4.4, 4.4, None), (7, 4.55, 4.55, None), (None, 4.65, 4.65, None), (None, 4.54, 4.54, None), (5, 4.56, 4.56, None)]" - - tree._left_right_rotate(0, 2) - assert str(tree) == "[(6, 5, 5, 1), (None, 5.5, 5.5, None), (4, 4.5, 4.5, 8), (2, 4.6, 4.6, 0), (None, 4.4, 4.4, None), (7, 4.55, 4.55, None), (None, 4.65, 4.65, None), (None, 4.54, 4.54, None), (5, 4.56, 4.56, None)]" - - tree._right_left_rotate(0, 2) - assert str(tree) == "[(6, 5, 5, None), (None, 5.5, 5.5, None), (None, 4.5, 4.5, 8), (2, 4.6, 4.6, 4), (0, 4.4, 4.4, 2), (7, 4.55, 4.55, None), (None, 4.65, 4.65, None), (None, 4.54, 4.54, None), (5, 4.56, 4.56, None)]" - -def test_SelfBalancingBinaryTree(): - _test_SelfBalancingBinaryTree(Backend.PYTHON) -def test_cpp_SelfBalancingBinaryTree(): - _test_SelfBalancingBinaryTree(Backend.CPP) - -def _test_SplayTree(backend): - t = SplayTree(100, 100, backend=backend) - t.insert(50, 50) - t.insert(200, 200) - t.insert(40, 40) - t.insert(30, 30) - t.insert(20, 20) - t.insert(55, 55) - assert str(t) == "[(None, 100, 100, None), (None, 50, 50, None), (0, 200, 200, None), (None, 40, 40, 1), (5, 30, 30, 3), (None, 20, 20, None), (4, 55, 55, 2)]" - assert t.root_idx == 6 - - trav = BinaryTreeTraversal(t, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [20, 30, 40, 50, 55, 100, 200] - assert [node.key for node in pre_order] == [55, 30, 20, 40, 50, 200, 100] - - t.delete(40) - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [20, 30, 50, 55, 100, 200] - assert [node.key for node in pre_order] == [50, 30, 20, 55, 200, 100] - - t.delete(150) - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [20, 30, 50, 55, 100, 200] - assert [node.key for node in pre_order] == [50, 30, 20, 55, 200, 100] - - t1 = SplayTree(1000, 1000, backend=backend) - t1.insert(2000, 2000) - - trav2 = BinaryTreeTraversal(t1, backend=backend) - in_order = trav2.depth_first_search(order='in_order') - pre_order = trav2.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [1000, 2000] - assert [node.key for node in pre_order] == [2000, 1000] - - t.join(t1) - assert str(t) == "[(None, 100, 100, None), '', (6, 200, 200, 8), (4, 50, 50, None), (5, 30, 30, None), (None, 20, 20, None), (3, 55, 55, 0), (None, 1000, 1000, None), (7, 2000, 2000, None), '']" - - if backend == Backend.PYTHON: - trav3 = BinaryTreeTraversal(t, backend=backend) - in_order = trav3.depth_first_search(order='in_order') - pre_order = trav3.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [20, 30, 50, 55, 100, 200, 1000, 2000] - assert [node.key for node in pre_order] == [200, 55, 50, 30, 20, 100, 2000, 1000] - - s = t.split(200) - assert str(s) == "[(1, 2000, 2000, None), (None, 1000, 1000, None)]" - - trav4 = BinaryTreeTraversal(s, backend=backend) - in_order = trav4.depth_first_search(order='in_order') - pre_order = trav4.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [1000, 2000] - assert [node.key for node in pre_order] == [2000, 1000] - - if backend == Backend.PYTHON: - trav5 = BinaryTreeTraversal(t, backend=backend) - in_order = trav5.depth_first_search(order='in_order') - pre_order = trav5.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [20, 30, 50, 55, 100, 200] - assert [node.key for node in pre_order] == [200, 55, 50, 30, 20, 100] - -def test_SplayTree(): - _test_SplayTree(Backend.PYTHON) - -def test_cpp_SplayTree(): - _test_SplayTree(Backend.CPP) - -def _test_RedBlackTree(backend): - tree = RedBlackTree(backend=backend) - tree.insert(10, 10) - tree.insert(18, 18) - tree.insert(7, 7) - tree.insert(15, 15) - tree.insert(16, 16) - tree.insert(30, 30) - tree.insert(25, 25) - tree.insert(40, 40) - tree.insert(60, 60) - tree.insert(2, 2) - tree.insert(17, 17) - tree.insert(6, 6) - assert str(tree) == "[(11, 10, 10, 3), (10, 18, 18, None), (None, 7, 7, None), (None, 15, 15, None), (0, 16, 16, 6), (None, 30, 30, None), (1, 25, 25, 7), (5, 40, 40, 8), (None, 60, 60, None), (None, 2, 2, None), (None, 17, 17, None), (9, 6, 6, 2)]" - assert tree.root_idx == 4 - - trav = BinaryTreeTraversal(tree, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [2, 6, 7, 10, 15, 16, 17, 18, 25, 30, 40, 60] - assert [node.key for node in pre_order] == [16, 10, 6, 2, 7, 15, 25, 18, 17, 40, 30, 60] - - assert tree.lower_bound(0) == 2 - assert tree.lower_bound(2) == 2 - assert tree.lower_bound(3) == 6 - assert tree.lower_bound(7) == 7 - assert tree.lower_bound(25) == 25 - assert tree.lower_bound(32) == 40 - assert tree.lower_bound(41) == 60 - assert tree.lower_bound(60) == 60 - assert tree.lower_bound(61) is None - - assert tree.upper_bound(0) == 2 - assert tree.upper_bound(2) == 6 - assert tree.upper_bound(3) == 6 - assert tree.upper_bound(7) == 10 - assert tree.upper_bound(25) == 30 - assert tree.upper_bound(32) == 40 - assert tree.upper_bound(41) == 60 - assert tree.upper_bound(60) is None - assert tree.upper_bound(61) is None - - tree = RedBlackTree(backend=backend) - - assert tree.lower_bound(1) is None - assert tree.upper_bound(0) is None - - tree.insert(10) - tree.insert(20) - tree.insert(30) - tree.insert(40) - tree.insert(50) - tree.insert(60) - tree.insert(70) - tree.insert(80) - tree.insert(90) - tree.insert(100) - tree.insert(110) - tree.insert(120) - tree.insert(130) - tree.insert(140) - tree.insert(150) - tree.insert(160) - tree.insert(170) - tree.insert(180) - assert str(tree) == "[(None, 10, None, None), (0, 20, None, 2), (None, 30, None, None), (1, 40, None, 5), (None, 50, None, None), (4, 60, None, 6), (None, 70, None, None), (3, 80, None, 11), (None, 90, None, None), (8, 100, None, 10), (None, 110, None, None), (9, 120, None, 13), (None, 130, None, None), (12, 140, None, 15), (None, 150, None, None), (14, 160, None, 16), (None, 170, None, 17), (None, 180, None, None)]" - - assert tree._get_sibling(7) is None - - trav = BinaryTreeTraversal(tree, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 60, 70, 80, 90, - 100, 110, 120, 130, 140, 150, 160, 170, 180] - assert [node.key for node in pre_order] == [80, 40, 20, 10, 30, 60, 50, 70, 120, 100, - 90, 110, 140, 130, 160, 150, 170, 180] - - tree.delete(180) - tree.delete(130) - tree.delete(110) - tree.delete(190) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, - 120, 140, 150, 160, 170] - assert [node.key for node in pre_order] == [80, 40, 20, 10, 30, 60, 50, 70, 120, 100, - 90, 160, 140, 150, 170] - - tree.delete(170) - tree.delete(100) - tree.delete(60) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 70, 80, 90, 120, 140, 150, 160] - assert [node.key for node in pre_order] == [80, 40, 20, 10, 30, 50, 70, 120, 90, 150, 140, 160] - - tree.delete(70) - tree.delete(140) - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 80, 90, 120, 150, 160] - assert [node.key for node in pre_order] == [80, 40, 20, 10, 30, 50, 120, 90, 150, 160] - - tree.delete(150) - tree.delete(120) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 80, 90, 160] - assert [node.key for node in pre_order] == [40, 20, 10, 30, 80, 50, 90, 160] - - tree.delete(50) - tree.delete(80) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [10, 20, 30, 40, 90, 160] - assert [node.key for node in pre_order] == [40, 20, 10, 30, 90, 160] - - tree.delete(30) - tree.delete(20) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [10, 40, 90, 160] - assert [node.key for node in pre_order] == [40, 10, 90, 160] - - tree.delete(10) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [40, 90, 160] - assert [node.key for node in pre_order] == [90, 40, 160] - - tree.delete(40) - tree.delete(90) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [160] - assert [node.key for node in pre_order] == [160] - - tree.delete(160) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order if node.key is not None] == [] - assert [node.key for node in pre_order if node.key is not None] == [] - - tree = RedBlackTree(backend=backend) - tree.insert(50) - tree.insert(40) - tree.insert(30) - tree.insert(20) - tree.insert(10) - tree.insert(5) - - trav = BinaryTreeTraversal(tree, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [5, 10, 20, 30, 40, 50] - assert [node.key for node in pre_order] == [40, 20, 10, 5, 30, 50] - - assert tree.search(50) == 0 - assert tree.search(20) == 3 - assert tree.search(30) == 2 - tree.delete(50) - tree.delete(20) - tree.delete(30) - assert tree.search(50) is None - assert tree.search(20) is None - assert tree.search(30) is None - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [5, 10, 40] - assert [node.key for node in pre_order] == [10, 5, 40] - - tree = RedBlackTree(backend=backend) - tree.insert(10) - tree.insert(5) - tree.insert(20) - tree.insert(15) - - trav = BinaryTreeTraversal(tree, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [5, 10, 15, 20] - assert [node.key for node in pre_order] == [10, 5, 20, 15] - - tree.delete(5) - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [10, 15, 20] - assert [node.key for node in pre_order] == [15, 10, 20] - - tree = RedBlackTree(backend=backend) - tree.insert(10) - tree.insert(5) - tree.insert(20) - tree.insert(15) - tree.insert(2) - tree.insert(6) - - trav = BinaryTreeTraversal(tree,backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [2, 5, 6, 10, 15, 20] - assert [node.key for node in pre_order] == [10, 5, 2, 6, 20, 15] - - tree.delete(10) - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [2, 5, 6, 15, 20] - assert [node.key for node in pre_order] == [6, 5, 2, 20, 15] - -def test_RedBlackTree(): - _test_RedBlackTree(Backend.PYTHON) - -def test_cpp_RedBlackTree(): - _test_RedBlackTree(Backend.CPP) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_heaps.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_heaps.py deleted file mode 100644 index dece2f132..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_heaps.py +++ /dev/null @@ -1,236 +0,0 @@ -from pydatastructs.trees.heaps import BinaryHeap, TernaryHeap, BinomialHeap, DHeap -from pydatastructs.linear_data_structures.arrays import DynamicOneDimensionalArray -from pydatastructs.miscellaneous_data_structures.binomial_trees import BinomialTree -from pydatastructs.utils.misc_util import TreeNode, BinomialTreeNode -from pydatastructs.utils.raises_util import raises -from collections import deque as Queue - -def test_BinaryHeap(): - - max_heap = BinaryHeap(heap_property="max") - - assert raises(IndexError, lambda: max_heap.extract()) - - max_heap.insert(100, 100) - max_heap.insert(19, 19) - max_heap.insert(36, 36) - max_heap.insert(17, 17) - max_heap.insert(3, 3) - max_heap.insert(25, 25) - max_heap.insert(1, 1) - max_heap.insert(2, 2) - max_heap.insert(7, 7) - assert str(max_heap) == \ - ("[(100, 100, [1, 2]), (19, 19, [3, 4]), " - "(36, 36, [5, 6]), (17, 17, [7, 8]), " - "(3, 3, []), (25, 25, []), (1, 1, []), " - "(2, 2, []), (7, 7, [])]") - - assert max_heap.extract().key == 100 - - expected_sorted_elements = [36, 25, 19, 17, 7, 3, 2, 1] - l = max_heap.heap[0].left - l = max_heap.heap[0].right - sorted_elements = [] - for _ in range(8): - sorted_elements.append(max_heap.extract().key) - assert expected_sorted_elements == sorted_elements - - elements = [ - TreeNode(7, 7), TreeNode(25, 25), TreeNode(100, 100), - TreeNode(1, 1), TreeNode(2, 2), TreeNode(3, 3), - TreeNode(17, 17), TreeNode(19, 19), TreeNode(36, 36) - ] - min_heap = BinaryHeap(elements=elements, heap_property="min") - assert min_heap.extract().key == 1 - - expected_sorted_elements = [2, 3, 7, 17, 19, 25, 36, 100] - sorted_elements = [min_heap.extract().key for _ in range(8)] - assert expected_sorted_elements == sorted_elements - - non_TreeNode_elements = [ - (7, 7), TreeNode(25, 25), TreeNode(100, 100), - TreeNode(1, 1), (2, 2), TreeNode(3, 3), - TreeNode(17, 17), TreeNode(19, 19), TreeNode(36, 36) - ] - assert raises(TypeError, lambda: - BinaryHeap(elements = non_TreeNode_elements, heap_property='min')) - - non_TreeNode_elements = DynamicOneDimensionalArray(int, 0) - non_TreeNode_elements.append(1) - non_TreeNode_elements.append(2) - assert raises(TypeError, lambda: - BinaryHeap(elements = non_TreeNode_elements, heap_property='min')) - - non_heapable = "[1, 2, 3]" - assert raises(ValueError, lambda: - BinaryHeap(elements = non_heapable, heap_property='min')) - -def test_TernaryHeap(): - max_heap = TernaryHeap(heap_property="max") - assert raises(IndexError, lambda: max_heap.extract()) - max_heap.insert(100, 100) - max_heap.insert(19, 19) - max_heap.insert(36, 36) - max_heap.insert(17, 17) - max_heap.insert(3, 3) - max_heap.insert(25, 25) - max_heap.insert(1, 1) - max_heap.insert(2, 2) - max_heap.insert(7, 7) - assert str(max_heap) == \ - ('[(100, 100, [1, 2, 3]), (25, 25, [4, 5, 6]), ' - '(36, 36, [7, 8]), (17, 17, []), ' - '(3, 3, []), (19, 19, []), (1, 1, []), ' - '(2, 2, []), (7, 7, [])]') - - assert max_heap.extract().key == 100 - - expected_sorted_elements = [36, 25, 19, 17, 7, 3, 2, 1] - sorted_elements = [] - for _ in range(8): - sorted_elements.append(max_heap.extract().key) - assert expected_sorted_elements == sorted_elements - - elements = [ - TreeNode(7, 7), TreeNode(25, 25), TreeNode(100, 100), - TreeNode(1, 1), TreeNode(2, 2), TreeNode(3, 3), - TreeNode(17, 17), TreeNode(19, 19), TreeNode(36, 36) - ] - min_heap = TernaryHeap(elements=elements, heap_property="min") - expected_extracted_element = min_heap.heap[0].key - assert min_heap.extract().key == expected_extracted_element - - expected_sorted_elements = [2, 3, 7, 17, 19, 25, 36, 100] - sorted_elements = [min_heap.extract().key for _ in range(8)] - assert expected_sorted_elements == sorted_elements - -def test_DHeap(): - assert raises(ValueError, lambda: DHeap(heap_property="none", d=4)) - max_heap = DHeap(heap_property="max", d=5) - assert raises(IndexError, lambda: max_heap.extract()) - max_heap.insert(100, 100) - max_heap.insert(19, 19) - max_heap.insert(36, 36) - max_heap.insert(17, 17) - max_heap.insert(3, 3) - max_heap.insert(25, 25) - max_heap.insert(1, 1) - max_heap = DHeap(max_heap.heap, heap_property="max", d=4) - max_heap.insert(2, 2) - max_heap.insert(7, 7) - assert str(max_heap) == \ - ('[(100, 100, [1, 2, 3, 4]), (25, 25, [5, 6, 7, 8]), ' - '(36, 36, []), (17, 17, []), (3, 3, []), (19, 19, []), ' - '(1, 1, []), (2, 2, []), (7, 7, [])]') - - assert max_heap.extract().key == 100 - - expected_sorted_elements = [36, 25, 19, 17, 7, 3, 2, 1] - sorted_elements = [] - for _ in range(8): - sorted_elements.append(max_heap.extract().key) - assert expected_sorted_elements == sorted_elements - - elements = [ - TreeNode(7, 7), TreeNode(25, 25), TreeNode(100, 100), - TreeNode(1, 1), TreeNode(2, 2), TreeNode(3, 3), - TreeNode(17, 17), TreeNode(19, 19), TreeNode(36, 36) - ] - min_heap = DHeap(elements=DynamicOneDimensionalArray(TreeNode, 9, elements), heap_property="min") - assert min_heap.extract().key == 1 - - expected_sorted_elements = [2, 3, 7, 17, 19, 25, 36, 100] - sorted_elements = [min_heap.extract().key for _ in range(8)] - assert expected_sorted_elements == sorted_elements - -def test_BinomialHeap(): - - # Corner cases - assert raises(TypeError, lambda: - BinomialHeap( - root_list=[BinomialTreeNode(1, 1), None]) - ) is True - tree1 = BinomialTree(BinomialTreeNode(1, 1), 0) - tree2 = BinomialTree(BinomialTreeNode(2, 2), 0) - bh = BinomialHeap(root_list=[tree1, tree2]) - assert raises(TypeError, lambda: - bh.merge_tree(BinomialTreeNode(2, 2), None)) - assert raises(TypeError, lambda: - bh.merge(None)) - - # Testing BinomialHeap.merge - nodes = [BinomialTreeNode(1, 1), # 0 - BinomialTreeNode(3, 3), # 1 - BinomialTreeNode(9, 9), # 2 - BinomialTreeNode(11, 11), # 3 - BinomialTreeNode(6, 6), # 4 - BinomialTreeNode(14, 14), # 5 - BinomialTreeNode(2, 2), # 6 - BinomialTreeNode(7, 7), # 7 - BinomialTreeNode(4, 4), # 8 - BinomialTreeNode(8, 8), # 9 - BinomialTreeNode(12, 12), # 10 - BinomialTreeNode(10, 10), # 11 - BinomialTreeNode(5, 5), # 12 - BinomialTreeNode(21, 21)] # 13 - - nodes[2].add_children(nodes[3]) - nodes[4].add_children(nodes[5]) - nodes[6].add_children(nodes[9], nodes[8], nodes[7]) - nodes[7].add_children(nodes[11], nodes[10]) - nodes[8].add_children(nodes[12]) - nodes[10].add_children(nodes[13]) - - tree11 = BinomialTree(nodes[0], 0) - tree12 = BinomialTree(nodes[2], 1) - tree13 = BinomialTree(nodes[6], 3) - tree21 = BinomialTree(nodes[1], 0) - - heap1 = BinomialHeap(root_list=[tree11, tree12, tree13]) - heap2 = BinomialHeap(root_list=[tree21]) - - def bfs(heap): - bfs_trav = [] - for i in range(len(heap.root_list)): - layer = [] - bfs_q = Queue() - bfs_q.append(heap.root_list[i].root) - while len(bfs_q) != 0: - curr_node = bfs_q.popleft() - if curr_node is not None: - layer.append(curr_node.key) - for _i in range(curr_node.children._last_pos_filled + 1): - bfs_q.append(curr_node.children[_i]) - if layer != []: - bfs_trav.append(layer) - return bfs_trav - - heap1.merge(heap2) - expected_bfs_trav = [[1, 3, 9, 11], [2, 8, 4, 7, 5, 10, 12, 21]] - assert bfs(heap1) == expected_bfs_trav - - # Testing Binomial.find_minimum - assert heap1.find_minimum().key == 1 - - # Testing Binomial.delete_minimum - heap1.delete_minimum() - assert bfs(heap1) == [[3], [9, 11], [2, 8, 4, 7, 5, 10, 12, 21]] - assert raises(ValueError, lambda: heap1.decrease_key(nodes[3], 15)) - heap1.decrease_key(nodes[3], 0) - assert bfs(heap1) == [[3], [0, 9], [2, 8, 4, 7, 5, 10, 12, 21]] - heap1.delete(nodes[12]) - assert bfs(heap1) == [[3, 8], [0, 9, 2, 7, 4, 10, 12, 21]] - - # Testing BinomialHeap.insert - heap = BinomialHeap() - assert raises(IndexError, lambda: heap.find_minimum()) - heap.insert(1, 1) - heap.insert(3, 3) - heap.insert(6, 6) - heap.insert(9, 9) - heap.insert(14, 14) - heap.insert(11, 11) - heap.insert(2, 2) - heap.insert(7, 7) - assert bfs(heap) == [[1, 3, 6, 2, 9, 7, 11, 14]] diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_m_ary_trees.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_m_ary_trees.py deleted file mode 100644 index 6cbc84ace..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_m_ary_trees.py +++ /dev/null @@ -1,5 +0,0 @@ -from pydatastructs import MAryTree - -def test_MAryTree(): - m = MAryTree(1, 1) - assert str(m) == '[(1, 1)]' diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_space_partitioning_tree.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_space_partitioning_tree.py deleted file mode 100644 index 99f0e84cc..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/trees/tests/test_space_partitioning_tree.py +++ /dev/null @@ -1,20 +0,0 @@ -from pydatastructs import OneDimensionalSegmentTree -from pydatastructs.utils.raises_util import raises - -def test_OneDimensionalSegmentTree(): - ODST = OneDimensionalSegmentTree - segt = ODST([(0, 5), (1, 6), (9, 13), (1, 2), (3, 8), (9, 20)]) - assert segt.cache is False - segt2 = ODST([(1, 4)]) - assert str(segt2) == ("[(None, [False, 0, 1, False], None, None), " - "(None, [True, 1, 1, True], ['(None, [True, 1, 4, True], None, None)'], " - "None), (None, [False, 1, 4, False], None, None), (None, [True, 4, 4, True], " - "None, None), (0, [False, 0, 1, True], None, 1), (2, [False, 1, 4, True], " - "['(None, [True, 1, 4, True], None, None)'], 3), (4, [False, 0, 4, True], " - "None, 5), (None, [False, 4, 5, False], None, None), (-3, [False, 0, 5, " - "False], None, -2)]") - assert len(segt.query(1.5)) == 3 - assert segt.cache is True - assert len(segt.query(-1)) == 0 - assert len(segt.query(2.8)) == 2 - assert raises(ValueError, lambda: ODST([(1, 2, 3)])) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/__init__.py deleted file mode 100644 index c4971be32..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -__all__ = [] - -from . import ( - misc_util, - testing_util, -) - -from .misc_util import ( - TreeNode, - MAryTreeNode, - LinkedListNode, - BinomialTreeNode, - AdjacencyListGraphNode, - AdjacencyMatrixGraphNode, - GraphEdge, - Set, - CartesianTreeNode, - RedBlackTreeNode, - TrieNode, - SkipNode, - summation, - greatest_common_divisor, - minimum, - Backend -) -from .testing_util import test - -__all__.extend(misc_util.__all__) -__all__.extend(testing_util.__all__) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/_backend/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/_backend/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/misc_util.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/misc_util.py deleted file mode 100644 index 3672c58b9..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/misc_util.py +++ /dev/null @@ -1,632 +0,0 @@ -import math, pydatastructs -from enum import Enum -from pydatastructs.utils._backend.cpp import _nodes, _graph_utils - -__all__ = [ - 'TreeNode', - 'MAryTreeNode', - 'LinkedListNode', - 'BinomialTreeNode', - 'AdjacencyListGraphNode', - 'AdjacencyMatrixGraphNode', - 'GraphEdge', - 'Set', - 'CartesianTreeNode', - 'RedBlackTreeNode', - 'TrieNode', - 'SkipNode', - 'minimum', - 'summation', - 'greatest_common_divisor', - 'Backend' -] - - -class Backend(Enum): - - PYTHON = 'Python' - CPP = 'Cpp' - LLVM = 'Llvm' - - def __str__(self): - return self.value - -def raise_if_backend_is_not_python(api, backend): - if backend != Backend.PYTHON: - raise ValueError("As of {} version, only {} backend is supported for {} API".format( - pydatastructs.__version__, str(Backend.PYTHON), api)) - -_check_type = lambda a, t: isinstance(a, t) -NoneType = type(None) - -class Node(object): - """ - Abstract class representing a node. - """ - pass - -class TreeNode(Node): - """ - Represents node in trees. - - Parameters - ========== - - key - Required for comparison operations. - data - Any valid data to be stored in the node. - left: int - Optional, index of the left child node. - right: int - Optional, index of the right child node. - backend: pydatastructs.Backend - The backend to be used. Available backends: Python and C++ - Optional, by default, the Python backend is used. For faster execution, use the C++ backend. - """ - - __slots__ = ['key', 'data', 'left', 'right', 'is_root', - 'height', 'parent', 'size'] - - @classmethod - def methods(cls): - return ['__new__', '__str__'] - - def __new__(cls, key, data=None, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - return _nodes.TreeNode(key, data, **kwargs) - obj = Node.__new__(cls) - obj.data, obj.key = data, key - obj.left, obj.right, obj.parent, obj.height, obj.size = \ - None, None, None, 0, 1 - obj.is_root = False - return obj - - def __str__(self): - """ - Used for printing. - """ - return str((self.left, self.key, self.data, self.right)) - -class CartesianTreeNode(TreeNode): - """ - Represents node in cartesian trees. - - Parameters - ========== - - key - Required for comparison operations. - data - Any valid data to be stored in the node. - priority: int - An integer value for heap property. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - __slots__ = ['key', 'data', 'priority'] - - def __new__(cls, key, priority, data=None, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = TreeNode.__new__(cls, key, data) - obj.priority = priority - return obj - - def __str__(self): - """ - Used for printing. - """ - return str((self.left, self.key, self.priority, self.data, self.right)) - -class RedBlackTreeNode(TreeNode): - """ - Represents node in red-black trees. - - Parameters - ========== - - key - Required for comparison operations. - data - Any valid data to be stored in the node. - color - 0 for black and 1 for red. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - __slots__ = ['key', 'data', 'color'] - - @classmethod - def methods(cls): - return ['__new__'] - - def __new__(cls, key, data=None, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = TreeNode.__new__(cls, key, data) - obj.color = 1 - return obj - -class BinomialTreeNode(TreeNode): - """ - Represents node in binomial trees. - - Parameters - ========== - - key - Required for comparison operations. - data - Any valid data to be stored in the node. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Note - ==== - - The following are the data members of the class: - - parent: BinomialTreeNode - A reference to the BinomialTreeNode object - which is a prent of this. - children: DynamicOneDimensionalArray - An array of references to BinomialTreeNode objects - which are children this node. - is_root: bool, by default, False - If the current node is a root of the tree then - set it to True otherwise False. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - __slots__ = ['parent', 'key', 'children', 'data', 'is_root'] - - @classmethod - def methods(cls): - return ['__new__', 'add_children', '__str__'] - - def __new__(cls, key, data=None, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - from pydatastructs.linear_data_structures.arrays import DynamicOneDimensionalArray - obj = Node.__new__(cls) - obj.data, obj.key = data, key - obj.children, obj.parent, obj.is_root = ( - DynamicOneDimensionalArray(BinomialTreeNode, 0), - None, - False - ) - return obj - - def add_children(self, *children): - """ - Adds children of current node. - """ - for child in children: - self.children.append(child) - child.parent = self - - def __str__(self): - """ - For printing the key and data. - """ - return str((self.key, self.data)) - -class MAryTreeNode(TreeNode): - """ - Represents node in an M-ary trees. - - Parameters - ========== - - key - Required for comparison operations. - data - Any valid data to be stored in the node. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Note - ==== - - The following are the data members of the class: - - children: DynamicOneDimensionalArray - An array of indices which stores the children of - this node in the M-ary tree array - is_root: bool, by default, False - If the current node is a root of the tree then - set it to True otherwise False. - """ - __slots__ = ['key', 'children', 'data', 'is_root'] - - @classmethod - def methods(cls): - return ['__new__', 'add_children', '__str__'] - - def __new__(cls, key, data=None, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - from pydatastructs.linear_data_structures.arrays import DynamicOneDimensionalArray - obj = Node.__new__(cls) - obj.data = data - obj.key = key - obj.is_root = False - obj.children = DynamicOneDimensionalArray(int, 0) - return obj - - def add_children(self, *children): - """ - Adds children of current node. - """ - for child in children: - self.children.append(child) - - def __str__(self): - return str((self.key, self.data)) - - -class LinkedListNode(Node): - """ - Represents node in linked lists. - - Parameters - ========== - - key - Any valid identifier to uniquely - identify the node in the linked list. - data - Any valid data to be stored in the node. - links - List of names of attributes which should - be used as links to other nodes. - addrs - List of address of nodes to be assigned to - each of the attributes in links. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - @classmethod - def methods(cls): - return ['__new__', '__str__'] - - def __new__(cls, key, data=None, links=None, addrs=None, - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - if links is None: - links = ['next'] - if addrs is None: - addrs = [None] - obj = Node.__new__(cls) - obj.key = key - obj.data = data - for link, addr in zip(links, addrs): - obj.__setattr__(link, addr) - obj.__slots__ = ['key', 'data'] + links - return obj - - def __str__(self): - return str((self.key, self.data)) - -class SkipNode(Node): - """ - Represents node in linked lists. - - Parameters - ========== - - key - Any valid identifier to uniquely - identify the node in the skip list. - data - Any valid data to be stored in the node. - next - Reference to the node lying just forward - to the current node. - Optional, by default, None. - down - Reference to the node lying just below the - current node. - Optional, by default, None. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - - __slots__ = ['key', 'data', 'next', 'down'] - - def __new__(cls, key, data=None, next=None, down=None, - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = Node.__new__(cls) - obj.key, obj.data = key, data - obj.next, obj.down = next, down - return obj - - def __str__(self): - return str((self.key, self.data)) - -class GraphNode(Node): - """ - Abastract class for graph nodes/vertices. - """ - def __str__(self): - return str((self.name, self.data)) - -class AdjacencyListGraphNode(GraphNode): - """ - Represents nodes for adjacency list implementation - of graphs. - - Parameters - ========== - - name: str - The name of the node by which it is identified - in the graph. Must be unique. - data - The data to be stored at each graph node. - adjacency_list: list - Any valid iterator to initialize the adjacent - nodes of the current node. - Optional, by default, None - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - @classmethod - def methods(cls): - return ['__new__', 'add_adjacent_node', - 'remove_adjacent_node'] - - def __new__(cls, name, data=None, adjacency_list=[], - **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.PYTHON: - obj = GraphNode.__new__(cls) - obj.name, obj.data = str(name), data - obj._impl = 'adjacency_list' - if len(adjacency_list) > 0: - for node in adjacency_list: - obj.__setattr__(node.name, node) - obj.adjacent = adjacency_list if len(adjacency_list) > 0 \ - else [] - return obj - else: - return _graph_utils.AdjacencyListGraphNode(name, data, adjacency_list) - - def add_adjacent_node(self, name, data=None): - """ - Adds adjacent node to the current node's - adjacency list with given name and data. - """ - if hasattr(self, name): - getattr(self, name).data = data - else: - new_node = AdjacencyListGraphNode(name, data) - self.__setattr__(new_node.name, new_node) - self.adjacent.append(new_node.name) - - def remove_adjacent_node(self, name): - """ - Removes node with given name from - adjacency list. - """ - if not hasattr(self, name): - raise ValueError("%s is not adjacent to %s"%(name, self.name)) - self.adjacent.remove(name) - delattr(self, name) - -class AdjacencyMatrixGraphNode(GraphNode): - """ - Represents nodes for adjacency matrix implementation - of graphs. - - Parameters - ========== - - name: str - The index of the node in the AdjacencyMatrix. - data - The data to be stored at each graph node. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - __slots__ = ['name', 'data'] - - @classmethod - def methods(cls): - return ['__new__'] - - def __new__(cls, name, data=None, - **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.PYTHON: - obj = GraphNode.__new__(cls) - obj.name, obj.data, obj.is_connected = \ - str(name), data, None - obj._impl = 'adjacency_matrix' - return obj - else: - return _graph_utils.AdjacencyMatrixGraphNode(str(name), data) - -class GraphEdge(object): - """ - Represents the concept of edges in graphs. - - Parameters - ========== - - node1: GraphNode or it's child classes - The source node of the edge. - node2: GraphNode or it's child classes - The target node of the edge. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - @classmethod - def methods(cls): - return ['__new__', '__str__'] - - def __new__(cls, node1, node2, value=None, - **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.PYTHON: - obj = object.__new__(cls) - obj.source, obj.target = node1, node2 - obj.value = value - return obj - else: - return _graph_utils.GraphEdge(node1, node2, value) - - def __str__(self): - return str((self.source.name, self.target.name)) - -class Set(object): - """ - Represents a set in a forest of disjoint sets. - - Parameters - ========== - - key: Hashable python object - The key which uniquely identifies - the set. - data: Python object - The data to be stored in the set. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - - __slots__ = ['parent', 'size', 'key', 'data'] - - @classmethod - def methods(cls): - return ['__new__'] - - def __new__(cls, key, data=None, - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.key = key - obj.data = data - obj.parent, obj.size = [None]*2 - return obj - -class TrieNode(Node): - """ - Represents nodes in the trie data structure. - - Parameters - ========== - - char: The character stored in the current node. - Optional, by default None. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - - __slots__ = ['char', '_children', 'is_terminal'] - - @classmethod - def methods(cls): - return ['__new__', 'add_child', 'get_child', 'remove_child'] - - def __new__(cls, char=None, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = Node.__new__(cls) - obj.char = char - obj._children = {} - obj.is_terminal = False - return obj - - def add_child(self, trie_node) -> None: - self._children[trie_node.char] = trie_node - - def get_child(self, char: str): - return self._children.get(char, None) - - def remove_child(self, char: str) -> None: - self._children.pop(char) - -def _comp(u, v, tcomp): - """ - Overloaded comparator for comparing - two values where any one of them can be - `None`. - """ - if u is None and v is not None: - return False - elif u is not None and v is None: - return True - elif u is None and v is None: - return False - else: - return tcomp(u, v) - -def _check_range_query_inputs(input, bounds): - start, end = input - if start >= end: - raise ValueError("Input (%d, %d) range is empty."%(start, end)) - if start < bounds[0] or end > bounds[1]: - raise IndexError("Input (%d, %d) range is out of " - "bounds of array indices (%d, %d)." - %(start, end, bounds[0], bounds[1])) - -def minimum(x_y): - if len(x_y) == 1: - return x_y[0] - - x, y = x_y - if x is None or y is None: - return x if y is None else y - - return min(x, y) - -def greatest_common_divisor(x_y): - if len(x_y) == 1: - return x_y[0] - - x, y = x_y - if x is None or y is None: - return x if y is None else y - - return math.gcd(x, y) - -def summation(x_y): - if len(x_y) == 1: - return x_y[0] - - x, y = x_y - if x is None or y is None: - return x if y is None else y - - return x + y diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/raises_util.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/raises_util.py deleted file mode 100644 index 3a324d38d..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/raises_util.py +++ /dev/null @@ -1,17 +0,0 @@ -import pytest - -def raises(exception, code): - """ - Utility for testing exceptions. - - Parameters - ========== - - exception - A valid python exception - code: lambda - Code that causes exception - """ - with pytest.raises(exception): - code() - return True diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/testing_util.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/testing_util.py deleted file mode 100644 index e5c0627b5..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/testing_util.py +++ /dev/null @@ -1,83 +0,0 @@ -import os -import pathlib -import glob -import types - -__all__ = ['test'] - - -# Root pydatastructs directory -ROOT_DIR = pathlib.Path(os.path.abspath(__file__)).parents[1] - - -SKIP_FILES = ['testing_util.py'] - -def test(submodules=None, only_benchmarks=False, - benchmarks_size=1000, **kwargs): - """ - Runs the library tests using pytest - - Parameters - ========== - - submodules: Optional, list[str] - List of submodules test to run. By default runs - all the tests - """ - try: - import pytest - except ImportError: - raise Exception("pytest must be installed. Use `pip install pytest` " - "to install it.") - - # set benchmarks size - os.environ["PYDATASTRUCTS_BENCHMARK_SIZE"] = str(benchmarks_size) - test_files = [] - if submodules: - if not isinstance(submodules, (list, tuple)): - submodules = [submodules] - for path in glob.glob(f'{ROOT_DIR}/**/test_*.py', recursive=True): - skip_test = False - for skip in SKIP_FILES: - if skip in path: - skip_test = True - break - if skip_test: - continue - for sub_var in submodules: - if isinstance(sub_var, types.ModuleType): - sub = sub_var.__name__.split('.')[-1] - elif isinstance(sub_var, str): - sub = sub_var - else: - raise Exception("Submodule should be of type: str or module") - if sub in path: - if not only_benchmarks: - if 'benchmarks' not in path: - test_files.append(path) - else: - if 'benchmarks' in path: - test_files.append(path) - break - else: - for path in glob.glob(f'{ROOT_DIR}/**/test_*.py', recursive=True): - skip_test = False - for skip in SKIP_FILES: - if skip in path: - skip_test = True - break - if skip_test: - continue - if not only_benchmarks: - if 'benchmarks' not in path: - test_files.append(path) - else: - if 'benchmarks' in path: - test_files.append(path) - - extra_args = [] - if kwargs.get("n", False) is not False: - extra_args.append("-n") - extra_args.append(str(kwargs["n"])) - - pytest.main(extra_args + test_files) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/tests/__init__.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/tests/test_code_quality.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/tests/test_code_quality.py deleted file mode 100644 index 67afe49e8..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/tests/test_code_quality.py +++ /dev/null @@ -1,239 +0,0 @@ -import os, re, sys, pydatastructs, inspect -from typing import Type -import pytest - -def _list_files(checker): - root_path = os.path.abspath( - os.path.join( - os.path.split(__file__)[0], - os.pardir, os.pardir)) - code_files = [] - for (dirpath, _, filenames) in os.walk(root_path): - for _file in filenames: - if checker(_file): - code_files.append(os.path.join(dirpath, _file)) - return code_files - -checker = lambda _file: (re.match(r".*\.py$", _file) or - re.match(r".*\.cpp$", _file) or - re.match(r".*\.hpp$", _file)) -code_files = _list_files(checker) - -def test_trailing_white_spaces(): - messages = [("The following places in your code " - "end with white spaces.")] - msg = "{}:{}" - for file_path in code_files: - file = open(file_path, "r") - line = file.readline() - line_number = 1 - while line != "": - if line.endswith(" \n") or line.endswith("\t\n") \ - or line.endswith(" ") or line.endswith("\t"): - messages.append(msg.format(file_path, line_number)) - line = file.readline() - line_number += 1 - file.close() - - if len(messages) > 1: - assert False, '\n'.join(messages) - -def test_final_new_lines(): - messages = [("The following files in your code " - "do not end with a single new line.")] - msg1 = "No new line in {}:{}" - msg2 = "More than one new line in {}:{}" - for file_path in code_files: - file = open(file_path, "r") - lines = [] - line = file.readline() - while line != "": - lines.append(line) - line = file.readline() - if lines: - if lines[-1][-1] != "\n": - messages.append(msg1.format(file_path, len(lines))) - if lines[-1] == "\n" and lines[-2][-1] == "\n": - messages.append(msg2.format(file_path, len(lines))) - file.close() - - if len(messages) > 1: - assert False, '\n'.join(messages) - -def test_comparison_True_False_None(): - messages = [("The following places in your code " - "use `!=` or `==` for comparing True/False/None." - "Please use `is` instead.")] - msg = "{}:{}" - checker = lambda _file: re.match(r".*\.py$", _file) - py_files = _list_files(checker) - for file_path in py_files: - if file_path.find("test_code_quality.py") == -1: - file = open(file_path, "r") - line = file.readline() - line_number = 1 - while line != "": - if ((line.find("== True") != -1) or - (line.find("== False") != -1) or - (line.find("== None") != -1) or - (line.find("!= True") != -1) or - (line.find("!= False") != -1) or - (line.find("!= None") != -1)): - messages.append(msg.format(file_path, line_number)) - line = file.readline() - line_number += 1 - file.close() - - if len(messages) > 1: - assert False, '\n'.join(messages) - -@pytest.mark.xfail -def test_reinterpret_cast(): - - def is_variable(str): - for ch in str: - if not (ch == '_' or ch.isalnum()): - return False - return True - - checker = lambda _file: (re.match(r".*\.cpp$", _file) or - re.match(r".*\.hpp$", _file)) - cpp_files = _list_files(checker) - messages = [("The following lines should use reinterpret_cast" - " to cast pointers from one type to another")] - msg = "Casting to {} at {}:{}" - for file_path in cpp_files: - file = open(file_path, "r") - line = file.readline() - line_number = 1 - while line != "": - found_open = False - between_open_close = "" - for char in line: - if char == '(': - found_open = True - elif char == ')': - if (between_open_close and - between_open_close[-1] == '*' and - is_variable(between_open_close[:-1])): - messages.append(msg.format(between_open_close[:-1], - file_path, line_number)) - between_open_close = "" - found_open = False - elif char != ' ' and found_open: - between_open_close += char - line = file.readline() - line_number += 1 - file.close() - - if len(messages) > 1: - assert False, '\n'.join(messages) - -def test_presence_of_tabs(): - messages = [("The following places in your code " - "use tabs instead of spaces.")] - msg = "{}:{}" - for file_path in code_files: - file = open(file_path, "r") - line_number = 1 - line = file.readline() - while line != "": - if (line.find('\t') != -1): - messages.append(msg.format(file_path, line_number)) - line = file.readline() - line_number += 1 - file.close() - - if len(messages) > 1: - assert False, '\n'.join(messages) - -def _apis(): - import pydatastructs as pyds - return [ - pyds.graphs.adjacency_list.AdjacencyList, - pyds.graphs.adjacency_matrix.AdjacencyMatrix, - pyds.DoublyLinkedList, pyds.SinglyLinkedList, - pyds.SinglyCircularLinkedList, - pyds.DoublyCircularLinkedList, - pyds.OneDimensionalArray, pyds.MultiDimensionalArray, - pyds.DynamicOneDimensionalArray, - pyds.trees.BinaryTree, pyds.BinarySearchTree, - pyds.AVLTree, pyds.SplayTree, pyds.BinaryTreeTraversal, - pyds.DHeap, pyds.BinaryHeap, pyds.TernaryHeap, pyds.BinomialHeap, - pyds.MAryTree, pyds.OneDimensionalSegmentTree, - pyds.Queue, pyds.miscellaneous_data_structures.queue.ArrayQueue, - pyds.miscellaneous_data_structures.queue.LinkedListQueue, - pyds.PriorityQueue, - pyds.miscellaneous_data_structures.queue.LinkedListPriorityQueue, - pyds.miscellaneous_data_structures.queue.BinaryHeapPriorityQueue, - pyds.miscellaneous_data_structures.queue.BinomialHeapPriorityQueue, - pyds.Stack, pyds.miscellaneous_data_structures.stack.ArrayStack, - pyds.miscellaneous_data_structures.stack.LinkedListStack, - pyds.DisjointSetForest, pyds.BinomialTree, pyds.TreeNode, pyds.MAryTreeNode, - pyds.LinkedListNode, pyds.BinomialTreeNode, pyds.AdjacencyListGraphNode, - pyds.AdjacencyMatrixGraphNode, pyds.GraphEdge, pyds.Set, pyds.BinaryIndexedTree, - pyds.CartesianTree, pyds.CartesianTreeNode, pyds.Treap, pyds.RedBlackTreeNode, pyds.RedBlackTree, - pyds.Trie, pyds.TrieNode, pyds.SkipList, pyds.RangeQueryStatic, pyds.RangeQueryDynamic, pyds.SparseTable, - pyds.miscellaneous_data_structures.segment_tree.OneDimensionalArraySegmentTree, - pyds.bubble_sort, pyds.linear_search, pyds.binary_search, pyds.jump_search, - pyds.selection_sort, pyds.insertion_sort, pyds.quick_sort, pyds.intro_sort] - -def test_public_api(): - pyds = pydatastructs - apis = _apis() - print("\n\nAPI Report") - print("==========") - for name in apis: - if inspect.isclass(name): - _class = name - mro = _class.__mro__ - must_methods = _class.methods() - print("\n" + str(name)) - print("Methods Implemented") - print(must_methods) - print("Parent Classes") - print(mro[1:]) - for supercls in mro: - if supercls != _class: - for method in must_methods: - if hasattr(supercls, method) and \ - getattr(supercls, method) == \ - getattr(_class, method): - assert False, ("%s class doesn't " - "have %s method implemented."%( - _class, method - )) - -def test_backend_argument_message(): - - import pydatastructs as pyds - backend_implemented = [ - pyds.OneDimensionalArray, - pyds.DynamicOneDimensionalArray, - pyds.quick_sort, - pyds.AdjacencyListGraphNode, - pyds.AdjacencyMatrixGraphNode, - pyds.GraphEdge - ] - - def call_and_raise(api, pos_args_count=0): - try: - if pos_args_count == 0: - api(backend=None) - elif pos_args_count == 1: - api(None, backend=None) - elif pos_args_count == 2: - api(None, None, backend=None) - except ValueError as value_error: - assert str(api) in value_error.args[0] - except TypeError as type_error: - max_pos_args_count = 2 - if pos_args_count <= max_pos_args_count: - call_and_raise(api, pos_args_count + 1) - else: - raise type_error - - apis = _apis() - for api in apis: - if api not in backend_implemented: - call_and_raise(api, 0) diff --git a/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/tests/test_misc_util.py b/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/tests/test_misc_util.py deleted file mode 100644 index 13ba2ec8e..000000000 --- a/build-install/usr/lib/python3.12/site-packages/pydatastructs/utils/tests/test_misc_util.py +++ /dev/null @@ -1,84 +0,0 @@ -from pydatastructs.utils import (TreeNode, AdjacencyListGraphNode, AdjacencyMatrixGraphNode, - GraphEdge, BinomialTreeNode, MAryTreeNode, CartesianTreeNode, RedBlackTreeNode, SkipNode) -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils.misc_util import Backend - -def test_cpp_TreeNode(): - n = TreeNode(1,100,backend=Backend.CPP) - assert str(n) == "(None, 1, 100, None)" - -def test_AdjacencyListGraphNode(): - g_1 = AdjacencyListGraphNode('g_1', 1) - g_2 = AdjacencyListGraphNode('g_2', 2) - g = AdjacencyListGraphNode('g', 0, adjacency_list=[g_1, g_2]) - g.add_adjacent_node('g_3', 3) - assert g.g_1.name == 'g_1' - assert g.g_2.name == 'g_2' - assert g.g_3.name == 'g_3' - g.remove_adjacent_node('g_3') - assert hasattr(g, 'g_3') is False - assert raises(ValueError, lambda: g.remove_adjacent_node('g_3')) - g.add_adjacent_node('g_1', 4) - assert g.g_1.data == 4 - assert str(g) == "('g', 0)" - - h_1 = AdjacencyListGraphNode('h_1', 1, backend = Backend.CPP) - h_2 = AdjacencyListGraphNode('h_2', 2, backend = Backend.CPP) - assert str(h_1) == "('h_1', 1)" - h = AdjacencyListGraphNode('h', 0, adjacency_list = [h_1, h_2], backend = Backend.CPP) - h.add_adjacent_node('h_3', 3) - assert h.adjacent['h_1'].name == 'h_1' - assert h.adjacent['h_2'].name == 'h_2' - assert h.adjacent['h_3'].name == 'h_3' - h.remove_adjacent_node('h_3') - assert 'h_3' not in h.adjacent - assert raises(ValueError, lambda: h.remove_adjacent_node('h_3')) - h.add_adjacent_node('h_1', 4) - assert h.adjacent['h_1'] == 4 - assert str(h) == "('h', 0)" - h_5 = AdjacencyListGraphNode('h_5', h_1, backend = Backend.CPP) - assert h_5.data == h_1 - -def test_AdjacencyMatrixGraphNode(): - g = AdjacencyMatrixGraphNode("1", 3) - g2 = AdjacencyMatrixGraphNode("1", 3, backend = Backend.CPP) - assert str(g) == "('1', 3)" - assert str(g2) == "('1', 3)" - g3 = AdjacencyListGraphNode("3", g2, backend = Backend.CPP) - assert g3.data == g2 - - -def test_GraphEdge(): - g_1 = AdjacencyListGraphNode('g_1', 1) - g_2 = AdjacencyListGraphNode('g_2', 2) - e = GraphEdge(g_1, g_2, value=2) - assert str(e) == "('g_1', 'g_2')" - - h_1 = AdjacencyListGraphNode('h_1', 1, backend = Backend.CPP) - h_2 = AdjacencyListGraphNode('h_2', 2, backend = Backend.CPP) - e2 = GraphEdge(h_1, h_2, value = 2, backend = Backend.CPP) - assert str(e2) == "('h_1', 'h_2', 2)" - -def test_BinomialTreeNode(): - b = BinomialTreeNode(1,1) - b.add_children(*[BinomialTreeNode(i,i) for i in range(2,10)]) - assert str(b) == '(1, 1)' - assert str(b.children) == "['(2, 2)', '(3, 3)', '(4, 4)', '(5, 5)', '(6, 6)', '(7, 7)', '(8, 8)', '(9, 9)']" - -def test_MAryTreeNode(): - m = MAryTreeNode(1, 1) - m.add_children(*list(range(2, 10))) - assert str(m) == "(1, 1)" - assert str(m.children) == "['2', '3', '4', '5', '6', '7', '8', '9']" - -def test_CartesianTreeNode(): - c = CartesianTreeNode(1, 1, 1) - assert str(c) == "(None, 1, 1, 1, None)" - -def test_RedBlackTreeNode(): - c = RedBlackTreeNode(1, 1) - assert str(c) == "(None, 1, 1, None)" - -def test_SkipNode(): - c = SkipNode(1) - assert str(c) == '(1, None)' diff --git a/lib/python3.12/site-packages/pydatastructs/__init__.py b/lib/python3.12/site-packages/pydatastructs/__init__.py deleted file mode 100644 index 27cc5a202..000000000 --- a/lib/python3.12/site-packages/pydatastructs/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .utils import * -from .linear_data_structures import * -from .trees import * -from .miscellaneous_data_structures import * -from .graphs import * -from .strings import * - -__version__ = "1.0.1-dev" diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/__init__.py b/lib/python3.12/site-packages/pydatastructs/graphs/__init__.py deleted file mode 100644 index 21e0a5f35..000000000 --- a/lib/python3.12/site-packages/pydatastructs/graphs/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -__all__ = [] - -from . import graph -from .graph import ( - Graph -) -__all__.extend(graph.__all__) - -from . import algorithms -from . import adjacency_list -from . import adjacency_matrix - -from .algorithms import ( - breadth_first_search, - breadth_first_search_parallel, - minimum_spanning_tree, - minimum_spanning_tree_parallel, - strongly_connected_components, - depth_first_search, - shortest_paths, - all_pair_shortest_paths, - topological_sort, - topological_sort_parallel, - max_flow, - find_bridges -) - -__all__.extend(algorithms.__all__) diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/_backend/__init__.py b/lib/python3.12/site-packages/pydatastructs/graphs/_backend/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_list.py b/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_list.py deleted file mode 100644 index bd901b380..000000000 --- a/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_list.py +++ /dev/null @@ -1,101 +0,0 @@ -from pydatastructs.graphs.graph import Graph -from pydatastructs.graphs._backend.cpp import _graph -from pydatastructs.utils.misc_util import ( - GraphEdge, Backend, raise_if_backend_is_not_python) - -__all__ = [ - 'AdjacencyList' -] - -class AdjacencyList(Graph): - """ - Adjacency list implementation of graphs. - - See also - ======== - - pydatastructs.graphs.graph.Graph - """ - def __new__(cls, *vertices, **kwargs): - - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.PYTHON: - obj = object.__new__(cls) - for vertex in vertices: - obj.__setattr__(vertex.name, vertex) - obj.vertices = [vertex.name for vertex in vertices] - obj.edge_weights = {} - obj._impl = 'adjacency_list' - return obj - else: - graph = _graph.AdjacencyListGraph() - for vertice in vertices: - graph.add_vertex(vertice) - return graph - - @classmethod - def methods(self): - return ['is_adjacent', 'neighbors', - 'add_vertex', 'remove_vertex', 'add_edge', - 'get_edge', 'remove_edge', '__new__'] - - def is_adjacent(self, node1, node2): - node1 = self.__getattribute__(node1) - return hasattr(node1, node2) - - def num_vertices(self): - return len(self.vertices) - - def num_edges(self): - return sum(len(self.neighbors(v)) for v in self.vertices) - - def neighbors(self, node): - node = self.__getattribute__(node) - return [self.__getattribute__(name) for name in node.adjacent] - - def add_vertex(self, node): - if not hasattr(self, node.name): - self.vertices.append(node.name) - self.__setattr__(node.name, node) - - def remove_vertex(self, name): - delattr(self, name) - self.vertices.remove(name) - for node in self.vertices: - node_obj = self.__getattribute__(node) - if hasattr(node_obj, name): - delattr(node_obj, name) - node_obj.adjacent.remove(name) - - def add_edge(self, source, target, cost=None): - source, target = str(source), str(target) - error_msg = ("Vertex %s is not present in the graph." - "Call Graph.add_vertex to add a new" - "vertex. Graph.add_edge is only responsible" - "for adding edges and it will not add new" - "vertices on its own. This is done to maintain" - "clear separation between the functionality of" - "these two methods.") - if not hasattr(self, source): - raise ValueError(error_msg % (source)) - if not hasattr(self, target): - raise ValueError(error_msg % (target)) - - source, target = self.__getattribute__(source), \ - self.__getattribute__(target) - source.add_adjacent_node(target.name) - if cost is not None: - self.edge_weights[source.name + "_" + target.name] = \ - GraphEdge(source, target, cost) - - def get_edge(self, source, target): - return self.edge_weights.get( - source + "_" + target, - None) - - def remove_edge(self, source, target): - source, target = self.__getattribute__(source), \ - self.__getattribute__(target) - source.remove_adjacent_node(target.name) - self.edge_weights.pop(source.name + "_" + target.name, - None) diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_matrix.py b/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_matrix.py deleted file mode 100644 index 9c2326b86..000000000 --- a/lib/python3.12/site-packages/pydatastructs/graphs/adjacency_matrix.py +++ /dev/null @@ -1,100 +0,0 @@ -from pydatastructs.graphs.graph import Graph -from pydatastructs.graphs._backend.cpp import _graph -from pydatastructs.utils.misc_util import ( - GraphEdge, raise_if_backend_is_not_python, - Backend) - -__all__ = [ - 'AdjacencyMatrix' -] - -class AdjacencyMatrix(Graph): - """ - Adjacency matrix implementation of graphs. - - See also - ======== - - pydatastructs.graphs.graph.Graph - """ - def __new__(cls, *vertices, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.PYTHON: - obj = object.__new__(cls) - obj.vertices = [vertex.name for vertex in vertices] - for vertex in vertices: - obj.__setattr__(vertex.name, vertex) - obj.matrix = {} - for vertex in vertices: - obj.matrix[vertex.name] = {} - obj.edge_weights = {} - obj._impl = 'adjacency_matrix' - return obj - else: - return _graph.AdjacencyMatrixGraph(vertices) - - @classmethod - def methods(self): - return ['is_adjacent', 'neighbors', - 'add_edge', 'get_edge', 'remove_edge', - '__new__'] - - def is_adjacent(self, node1, node2): - node1, node2 = str(node1), str(node2) - row = self.matrix.get(node1, {}) - return row.get(node2, False) is not False - - def num_vertices(self): - return len(self.vertices) - - def num_edges(self): - return sum(len(v) for v in self.matrix.values()) - - def neighbors(self, node): - node = str(node) - neighbors = [] - row = self.matrix.get(node, {}) - for node, presence in row.items(): - if presence: - neighbors.append(self.__getattribute__( - str(node))) - return neighbors - - def add_vertex(self, node): - raise NotImplementedError("Currently we allow " - "adjacency matrix for static graphs only") - - def remove_vertex(self, node): - raise NotImplementedError("Currently we allow " - "adjacency matrix for static graphs only.") - - def add_edge(self, source, target, cost=None): - source, target = str(source), str(target) - error_msg = ("Vertex %s is not present in the graph." - "Call Graph.add_vertex to add a new" - "vertex. Graph.add_edge is only responsible" - "for adding edges and it will not add new" - "vertices on its own. This is done to maintain" - "clear separation between the functionality of" - "these two methods.") - if source not in self.matrix: - raise ValueError(error_msg % (source)) - if target not in self.matrix: - raise ValueError(error_msg % (target)) - - self.matrix[source][target] = True - if cost is not None: - self.edge_weights[source + "_" + target] = \ - GraphEdge(self.__getattribute__(source), - self.__getattribute__(target), - cost) - - def get_edge(self, source, target): - return self.edge_weights.get( - str(source) + "_" + str(target), - None) - - def remove_edge(self, source, target): - source, target = str(source), str(target) - self.matrix[source][target] = False - self.edge_weights.pop(str(source) + "_" + str(target), None) diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/algorithms.py b/lib/python3.12/site-packages/pydatastructs/graphs/algorithms.py deleted file mode 100644 index 9324b7278..000000000 --- a/lib/python3.12/site-packages/pydatastructs/graphs/algorithms.py +++ /dev/null @@ -1,1386 +0,0 @@ -""" -Contains algorithms associated with graph -data structure. -""" -from collections import deque -from concurrent.futures import ThreadPoolExecutor -from pydatastructs.utils.misc_util import ( - _comp, raise_if_backend_is_not_python, Backend, AdjacencyListGraphNode) -from pydatastructs.miscellaneous_data_structures import ( - DisjointSetForest, PriorityQueue) -from pydatastructs.graphs.graph import Graph -from pydatastructs.linear_data_structures.algorithms import merge_sort_parallel -from pydatastructs import PriorityQueue - -__all__ = [ - 'breadth_first_search', - 'breadth_first_search_parallel', - 'minimum_spanning_tree', - 'minimum_spanning_tree_parallel', - 'strongly_connected_components', - 'depth_first_search', - 'shortest_paths', - 'all_pair_shortest_paths', - 'topological_sort', - 'topological_sort_parallel', - 'max_flow', - 'find_bridges' -] - -Stack = Queue = deque - -def breadth_first_search( - graph, source_node, operation, *args, **kwargs): - """ - Implementation of serial breadth first search(BFS) - algorithm. - - Parameters - ========== - - graph: Graph - The graph on which BFS is to be performed. - source_node: str - The name of the source node from where the BFS is - to be initiated. - operation: function - The function which is to be applied - on every node when it is visited. - The prototype which is to be followed is, - `function_name(curr_node, next_node, - arg_1, arg_2, . . ., arg_n)`. - Here, the first two arguments denote, the - current node and the node next to current node. - The rest of the arguments are optional and you can - provide your own stuff there. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Note - ==== - - You should pass all the arguments which you are going - to use in the prototype of your `operation` after - passing the operation function. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode - >>> V1 = AdjacencyListGraphNode("V1") - >>> V2 = AdjacencyListGraphNode("V2") - >>> V3 = AdjacencyListGraphNode("V3") - >>> G = Graph(V1, V2, V3) - >>> from pydatastructs import breadth_first_search - >>> def f(curr_node, next_node, dest_node): - ... return curr_node != dest_node - ... - >>> G.add_edge(V1.name, V2.name) - >>> G.add_edge(V2.name, V3.name) - >>> breadth_first_search(G, V1.name, f, V3.name) - """ - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.PYTHON: - import pydatastructs.graphs.algorithms as algorithms - func = "_breadth_first_search_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently breadth first search isn't implemented for " - "%s graphs."%(graph._impl)) - return getattr(algorithms, func)( - graph, source_node, operation, *args, **kwargs) - else: - from pydatastructs.graphs._backend.cpp._algorithms import bfs_adjacency_list, bfs_adjacency_matrix - if (graph._impl == "adjacency_list"): - extra_args = args if args else () - return bfs_adjacency_list(graph, source_node, operation, extra_args) - if (graph._impl == "adjacency_matrix"): - extra_args = args if args else () - return bfs_adjacency_matrix(graph, source_node, operation, extra_args) - -def _breadth_first_search_adjacency_list( - graph, source_node, operation, *args, **kwargs): - bfs_queue = Queue() - visited = {} - bfs_queue.append(source_node) - visited[source_node] = True - while len(bfs_queue) != 0: - curr_node = bfs_queue.popleft() - next_nodes = graph.neighbors(curr_node) - if len(next_nodes) != 0: - for next_node in next_nodes: - if visited.get(next_node.name, False) is False: - status = operation(curr_node, next_node.name, *args, **kwargs) - if not status: - return None - bfs_queue.append(next_node.name) - visited[next_node.name] = True - else: - status = operation(curr_node, "", *args, **kwargs) - if not status: - return None - -_breadth_first_search_adjacency_matrix = _breadth_first_search_adjacency_list - -def breadth_first_search_parallel( - graph, source_node, num_threads, operation, *args, **kwargs): - """ - Parallel implementation of breadth first search on graphs. - - Parameters - ========== - - graph: Graph - The graph on which BFS is to be performed. - source_node: str - The name of the source node from where the BFS is - to be initiated. - num_threads: int - Number of threads to be used for computation. - operation: function - The function which is to be applied - on every node when it is visited. - The prototype which is to be followed is, - `function_name(curr_node, next_node, - arg_1, arg_2, . . ., arg_n)`. - Here, the first two arguments denote, the - current node and the node next to current node. - The rest of the arguments are optional and you can - provide your own stuff there. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Note - ==== - - You should pass all the arguments which you are going - to use in the prototype of your `operation` after - passing the operation function. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode - >>> V1 = AdjacencyListGraphNode("V1") - >>> V2 = AdjacencyListGraphNode("V2") - >>> V3 = AdjacencyListGraphNode("V3") - >>> G = Graph(V1, V2, V3) - >>> from pydatastructs import breadth_first_search_parallel - >>> def f(curr_node, next_node, dest_node): - ... return curr_node != dest_node - ... - >>> G.add_edge(V1.name, V2.name) - >>> G.add_edge(V2.name, V3.name) - >>> breadth_first_search_parallel(G, V1.name, 3, f, V3.name) - """ - raise_if_backend_is_not_python( - breadth_first_search_parallel, kwargs.get('backend', Backend.PYTHON)) - import pydatastructs.graphs.algorithms as algorithms - func = "_breadth_first_search_parallel_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently breadth first search isn't implemented for " - "%s graphs."%(graph._impl)) - return getattr(algorithms, func)( - graph, source_node, num_threads, operation, *args, **kwargs) - -def _generate_layer(**kwargs): - _args, _kwargs = kwargs.get('args'), kwargs.get('kwargs') - (graph, curr_node, next_layer, visited, operation) = _args[0:5] - op_args, op_kwargs = _args[5:], _kwargs - next_nodes = graph.neighbors(curr_node) - status = True - if len(next_nodes) != 0: - for next_node in next_nodes: - if visited.get(next_node, False) is False: - status = status and operation(curr_node, next_node.name, *op_args, **op_kwargs) - next_layer.add(next_node.name) - visited[next_node.name] = True - else: - status = status and operation(curr_node, "", *op_args, **op_kwargs) - return status - -def _breadth_first_search_parallel_adjacency_list( - graph, source_node, num_threads, operation, *args, **kwargs): - visited, layers = {}, {} - layers[0] = set() - layers[0].add(source_node) - visited[source_node] = True - layer = 0 - while len(layers[layer]) != 0: - layers[layer+1] = set() - with ThreadPoolExecutor(max_workers=num_threads) as Executor: - for node in layers[layer]: - status = Executor.submit( - _generate_layer, args= - (graph, node, layers[layer+1], visited, - operation, *args), kwargs=kwargs).result() - layer += 1 - if not status: - return None - -_breadth_first_search_parallel_adjacency_matrix = _breadth_first_search_parallel_adjacency_list - -def _generate_mst_object(graph): - mst = Graph(*[getattr(graph, str(v)) for v in graph.vertices]) - return mst - -def _sort_edges(graph, num_threads=None): - edges = list(graph.edge_weights.items()) - if num_threads is None: - sort_key = lambda item: item[1].value - return sorted(edges, key=sort_key) - - merge_sort_parallel(edges, num_threads, - comp=lambda u,v: u[1].value <= v[1].value) - return edges - -def _minimum_spanning_tree_kruskal_adjacency_list(graph): - mst = _generate_mst_object(graph) - dsf = DisjointSetForest() - for v in graph.vertices: - dsf.make_set(v) - for _, edge in _sort_edges(graph): - u, v = edge.source.name, edge.target.name - if dsf.find_root(u) is not dsf.find_root(v): - mst.add_edge(u, v, edge.value) - mst.add_edge(v, u, edge.value) - dsf.union(u, v) - return mst - -_minimum_spanning_tree_kruskal_adjacency_matrix = \ - _minimum_spanning_tree_kruskal_adjacency_list - -def _minimum_spanning_tree_prim_adjacency_list(graph): - q = PriorityQueue(implementation='binomial_heap') - e = {} - mst = Graph(implementation='adjacency_list') - q.push(next(iter(graph.vertices)), 0) - while not q.is_empty: - v = q.pop() - if not hasattr(mst, v): - mst.add_vertex(graph.__getattribute__(v)) - if e.get(v, None) is not None: - edge = e[v] - mst.add_vertex(edge.target) - mst.add_edge(edge.source.name, edge.target.name, edge.value) - mst.add_edge(edge.target.name, edge.source.name, edge.value) - for w_node in graph.neighbors(v): - w = w_node.name - vw = graph.edge_weights[v + '_' + w] - q.push(w, vw.value) - if e.get(w, None) is None or \ - e[w].value > vw.value: - e[w] = vw - return mst - -def minimum_spanning_tree(graph, algorithm, **kwargs): - """ - Computes a minimum spanning tree for the given - graph and algorithm. - - Parameters - ========== - - graph: Graph - The graph whose minimum spanning tree - has to be computed. - algorithm: str - The algorithm which should be used for - computing a minimum spanning tree. - Currently the following algorithms are - supported, - - 'kruskal' -> Kruskal's algorithm as given in [1]. - - 'prim' -> Prim's algorithm as given in [2]. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - mst: Graph - A minimum spanning tree using the implementation - same as the graph provided in the input. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode - >>> from pydatastructs import minimum_spanning_tree - >>> u = AdjacencyListGraphNode('u') - >>> v = AdjacencyListGraphNode('v') - >>> G = Graph(u, v) - >>> G.add_edge(u.name, v.name, 3) - >>> mst = minimum_spanning_tree(G, 'kruskal') - >>> u_n = mst.neighbors(u.name) - >>> mst.get_edge(u.name, u_n[0].name).value - 3 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Kruskal%27s_algorithm - .. [2] https://en.wikipedia.org/wiki/Prim%27s_algorithm - - Note - ==== - - The concept of minimum spanning tree is valid only for - connected and undirected graphs. So, this function - should be used only for such graphs. Using with other - types of graphs may lead to unwanted results. - """ - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.PYTHON: - import pydatastructs.graphs.algorithms as algorithms - func = "_minimum_spanning_tree_" + algorithm + "_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently %s algoithm for %s implementation of graphs " - "isn't implemented for finding minimum spanning trees." - %(algorithm, graph._impl)) - return getattr(algorithms, func)(graph) - else: - from pydatastructs.graphs._backend.cpp._algorithms import minimum_spanning_tree_prim_adjacency_list - if graph._impl == "adjacency_list" and algorithm == 'prim': - return minimum_spanning_tree_prim_adjacency_list(graph) - -def _minimum_spanning_tree_parallel_kruskal_adjacency_list(graph, num_threads): - mst = _generate_mst_object(graph) - dsf = DisjointSetForest() - for v in graph.vertices: - dsf.make_set(v) - edges = _sort_edges(graph, num_threads) - for _, edge in edges: - u, v = edge.source.name, edge.target.name - if dsf.find_root(u) is not dsf.find_root(v): - mst.add_edge(u, v, edge.value) - mst.add_edge(v, u, edge.value) - dsf.union(u, v) - return mst - -_minimum_spanning_tree_parallel_kruskal_adjacency_matrix = \ - _minimum_spanning_tree_parallel_kruskal_adjacency_list - -def _find_min(q, v, i): - if not q.is_empty: - v[i] = q.peek - else: - v[i] = None - -def _minimum_spanning_tree_parallel_prim_adjacency_list(graph, num_threads): - q = [PriorityQueue(implementation='binomial_heap') for _ in range(num_threads)] - e = [{} for _ in range(num_threads)] - v2q = {} - mst = Graph(implementation='adjacency_list') - - itr = iter(graph.vertices) - for i in range(len(graph.vertices)): - v2q[next(itr)] = i%len(q) - q[0].push(next(iter(graph.vertices)), 0) - - while True: - - _vs = [None for _ in range(num_threads)] - with ThreadPoolExecutor(max_workers=num_threads) as Executor: - for i in range(num_threads): - Executor.submit(_find_min, q[i], _vs, i).result() - v = None - - for i in range(num_threads): - if _comp(_vs[i], v, lambda u, v: u.key < v.key): - v = _vs[i] - if v is None: - break - v = v.data - idx = v2q[v] - q[idx].pop() - - if not hasattr(mst, v): - mst.add_vertex(graph.__getattribute__(v)) - if e[idx].get(v, None) is not None: - edge = e[idx][v] - mst.add_vertex(edge.target) - mst.add_edge(edge.source.name, edge.target.name, edge.value) - mst.add_edge(edge.target.name, edge.source.name, edge.value) - for w_node in graph.neighbors(v): - w = w_node.name - vw = graph.edge_weights[v + '_' + w] - j = v2q[w] - q[j].push(w, vw.value) - if e[j].get(w, None) is None or \ - e[j][w].value > vw.value: - e[j][w] = vw - - return mst - -def minimum_spanning_tree_parallel(graph, algorithm, num_threads, **kwargs): - """ - Computes a minimum spanning tree for the given - graph and algorithm using the given number of threads. - - Parameters - ========== - - graph: Graph - The graph whose minimum spanning tree - has to be computed. - algorithm: str - The algorithm which should be used for - computing a minimum spanning tree. - Currently the following algorithms are - supported, - - 'kruskal' -> Kruskal's algorithm as given in [1]. - - 'prim' -> Prim's algorithm as given in [2]. - num_threads: int - The number of threads to be used. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - mst: Graph - A minimum spanning tree using the implementation - same as the graph provided in the input. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode - >>> from pydatastructs import minimum_spanning_tree_parallel - >>> u = AdjacencyListGraphNode('u') - >>> v = AdjacencyListGraphNode('v') - >>> G = Graph(u, v) - >>> G.add_edge(u.name, v.name, 3) - >>> mst = minimum_spanning_tree_parallel(G, 'kruskal', 3) - >>> u_n = mst.neighbors(u.name) - >>> mst.get_edge(u.name, u_n[0].name).value - 3 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Kruskal%27s_algorithm#Parallel_algorithm - .. [2] https://en.wikipedia.org/wiki/Prim%27s_algorithm#Parallel_algorithm - - Note - ==== - - The concept of minimum spanning tree is valid only for - connected and undirected graphs. So, this function - should be used only for such graphs. Using with other - types of graphs will lead to unwanted results. - """ - raise_if_backend_is_not_python( - minimum_spanning_tree_parallel, kwargs.get('backend', Backend.PYTHON)) - import pydatastructs.graphs.algorithms as algorithms - func = "_minimum_spanning_tree_parallel_" + algorithm + "_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently %s algoithm for %s implementation of graphs " - "isn't implemented for finding minimum spanning trees." - %(algorithm, graph._impl)) - return getattr(algorithms, func)(graph, num_threads) - -def _visit(graph, vertex, visited, incoming, L): - stack = [vertex] - while stack: - top = stack[-1] - if not visited.get(top, False): - visited[top] = True - for node in graph.neighbors(top): - if incoming.get(node.name, None) is None: - incoming[node.name] = [] - incoming[node.name].append(top) - if not visited.get(node.name, False): - stack.append(node.name) - if top is stack[-1]: - L.append(stack.pop()) - -def _assign(graph, u, incoming, assigned, component): - stack = [u] - while stack: - top = stack[-1] - if not assigned.get(top, False): - assigned[top] = True - component.add(top) - for u in incoming[top]: - if not assigned.get(u, False): - stack.append(u) - if top is stack[-1]: - stack.pop() - -def _strongly_connected_components_kosaraju_adjacency_list(graph): - visited, incoming, L = {}, {}, [] - for u in graph.vertices: - if not visited.get(u, False): - _visit(graph, u, visited, incoming, L) - - assigned = {} - components = [] - for i in range(-1, -len(L) - 1, -1): - comp = set() - if not assigned.get(L[i], False): - _assign(graph, L[i], incoming, assigned, comp) - if comp: - components.append(comp) - - return components - -_strongly_connected_components_kosaraju_adjacency_matrix = \ - _strongly_connected_components_kosaraju_adjacency_list - -def _tarjan_dfs(u, graph, index, stack, indices, low_links, on_stacks, components): - indices[u] = index[0] - low_links[u] = index[0] - index[0] += 1 - stack.append(u) - on_stacks[u] = True - - for node in graph.neighbors(u): - v = node.name - if indices[v] == -1: - _tarjan_dfs(v, graph, index, stack, indices, low_links, on_stacks, components) - low_links[u] = min(low_links[u], low_links[v]) - elif on_stacks[v]: - low_links[u] = min(low_links[u], low_links[v]) - - if low_links[u] == indices[u]: - component = set() - while stack: - w = stack.pop() - on_stacks[w] = False - component.add(w) - if w == u: - break - components.append(component) - -def _strongly_connected_components_tarjan_adjacency_list(graph): - index = [0] # mutable object - stack = Stack([]) - indices, low_links, on_stacks = {}, {}, {} - - for u in graph.vertices: - indices[u] = -1 - low_links[u] = -1 - on_stacks[u] = False - - components = [] - - for u in graph.vertices: - if indices[u] == -1: - _tarjan_dfs(u, graph, index, stack, indices, low_links, on_stacks, components) - - return components - -_strongly_connected_components_tarjan_adjacency_matrix = \ - _strongly_connected_components_tarjan_adjacency_list - -def strongly_connected_components(graph, algorithm, **kwargs): - """ - Computes strongly connected components for the given - graph and algorithm. - - Parameters - ========== - - graph: Graph - The graph whose minimum spanning tree - has to be computed. - algorithm: str - The algorithm which should be used for - computing strongly connected components. - Currently the following algorithms are - supported, - - 'kosaraju' -> Kosaraju's algorithm as given in [1]. - 'tarjan' -> Tarjan's algorithm as given in [2]. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - components: list - Python list with each element as set of vertices. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode - >>> from pydatastructs import strongly_connected_components - >>> v1, v2, v3 = [AdjacencyListGraphNode(i) for i in range(3)] - >>> g = Graph(v1, v2, v3) - >>> g.add_edge(v1.name, v2.name) - >>> g.add_edge(v2.name, v3.name) - >>> g.add_edge(v3.name, v1.name) - >>> scc = strongly_connected_components(g, 'kosaraju') - >>> scc == [{'2', '0', '1'}] - True - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Kosaraju%27s_algorithm - .. [2] https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm - - """ - raise_if_backend_is_not_python( - strongly_connected_components, kwargs.get('backend', Backend.PYTHON)) - import pydatastructs.graphs.algorithms as algorithms - func = "_strongly_connected_components_" + algorithm + "_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently %s algoithm for %s implementation of graphs " - "isn't implemented for finding strongly connected components." - %(algorithm, graph._impl)) - return getattr(algorithms, func)(graph) - -def depth_first_search( - graph, source_node, operation, *args, **kwargs): - """ - Implementation of depth first search (DFS) - algorithm. - - Parameters - ========== - - graph: Graph - The graph on which DFS is to be performed. - source_node: str - The name of the source node from where the DFS is - to be initiated. - operation: function - The function which is to be applied - on every node when it is visited. - The prototype which is to be followed is, - `function_name(curr_node, next_node, - arg_1, arg_2, . . ., arg_n)`. - Here, the first two arguments denote, the - current node and the node next to current node. - The rest of the arguments are optional and you can - provide your own stuff there. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Note - ==== - - You should pass all the arguments which you are going - to use in the prototype of your `operation` after - passing the operation function. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode - >>> V1 = AdjacencyListGraphNode("V1") - >>> V2 = AdjacencyListGraphNode("V2") - >>> V3 = AdjacencyListGraphNode("V3") - >>> G = Graph(V1, V2, V3) - >>> from pydatastructs import depth_first_search - >>> def f(curr_node, next_node, dest_node): - ... return curr_node != dest_node - ... - >>> G.add_edge(V1.name, V2.name) - >>> G.add_edge(V2.name, V3.name) - >>> depth_first_search(G, V1.name, f, V3.name) - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Depth-first_search - """ - raise_if_backend_is_not_python( - depth_first_search, kwargs.get('backend', Backend.PYTHON)) - import pydatastructs.graphs.algorithms as algorithms - func = "_depth_first_search_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently depth first search isn't implemented for " - "%s graphs."%(graph._impl)) - return getattr(algorithms, func)( - graph, source_node, operation, *args, **kwargs) - -def _depth_first_search_adjacency_list( - graph, source_node, operation, *args, **kwargs): - dfs_stack = Stack() - visited = {} - dfs_stack.append(source_node) - visited[source_node] = True - while len(dfs_stack) != 0: - curr_node = dfs_stack.pop() - next_nodes = graph.neighbors(curr_node) - if len(next_nodes) != 0: - for next_node in next_nodes: - if next_node.name not in visited: - status = operation(curr_node, next_node.name, *args, **kwargs) - if not status: - return None - dfs_stack.append(next_node.name) - visited[next_node.name] = True - else: - status = operation(curr_node, "", *args, **kwargs) - if not status: - return None - -_depth_first_search_adjacency_matrix = _depth_first_search_adjacency_list - -def shortest_paths(graph: Graph, algorithm: str, - source: str, target: str="", - **kwargs) -> tuple: - """ - Finds shortest paths in the given graph from a given source. - - Parameters - ========== - - graph: Graph - The graph under consideration. - algorithm: str - The algorithm to be used. Currently, the following algorithms - are implemented, - - 'bellman_ford' -> Bellman-Ford algorithm as given in [1] - - 'dijkstra' -> Dijkstra algorithm as given in [2]. - source: str - The name of the source the node. - target: str - The name of the target node. - Optional, by default, all pair shortest paths - are returned. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - (distances, predecessors): (dict, dict) - If target is not provided and algorithm used - is 'bellman_ford'/'dijkstra'. - (distances[target], predecessors): (float, dict) - If target is provided and algorithm used is - 'bellman_ford'/'dijkstra'. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode - >>> from pydatastructs import shortest_paths - >>> V1 = AdjacencyListGraphNode("V1") - >>> V2 = AdjacencyListGraphNode("V2") - >>> V3 = AdjacencyListGraphNode("V3") - >>> G = Graph(V1, V2, V3) - >>> G.add_edge('V2', 'V3', 10) - >>> G.add_edge('V1', 'V2', 11) - >>> shortest_paths(G, 'bellman_ford', 'V1') - ({'V1': 0, 'V2': 11, 'V3': 21}, {'V1': None, 'V2': 'V1', 'V3': 'V2'}) - >>> shortest_paths(G, 'dijkstra', 'V1') - ({'V2': 11, 'V3': 21, 'V1': 0}, {'V1': None, 'V2': 'V1', 'V3': 'V2'}) - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Bellman%E2%80%93Ford_algorithm - .. [2] https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm - """ - backend = kwargs.get('backend', Backend.PYTHON) - if (backend == Backend.PYTHON): - import pydatastructs.graphs.algorithms as algorithms - func = "_" + algorithm + "_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently %s algorithm isn't implemented for " - "finding shortest paths in graphs."%(algorithm)) - return getattr(algorithms, func)(graph, source, target) - else: - from pydatastructs.graphs._backend.cpp._algorithms import shortest_paths_dijkstra_adjacency_list - if graph._impl == "adjacency_list" and algorithm == 'dijkstra': - return shortest_paths_dijkstra_adjacency_list(graph, source, target) - -def _bellman_ford_adjacency_list(graph: Graph, source: str, target: str) -> tuple: - distances, predecessor, visited, cnts = {}, {}, {}, {} - - for v in graph.vertices: - distances[v] = float('inf') - predecessor[v] = None - visited[v] = False - cnts[v] = 0 - distances[source] = 0 - verticy_num = len(graph.vertices) - - que = Queue([source]) - - while que: - u = que.popleft() - visited[u] = False - neighbors = graph.neighbors(u) - for neighbor in neighbors: - v = neighbor.name - edge_str = u + '_' + v - if distances[u] != float('inf') and distances[u] + graph.edge_weights[edge_str].value < distances[v]: - distances[v] = distances[u] + graph.edge_weights[edge_str].value - predecessor[v] = u - cnts[v] = cnts[u] + 1 - if cnts[v] >= verticy_num: - raise ValueError("Graph contains a negative weight cycle.") - if not visited[v]: - que.append(v) - visited[v] = True - - if target != "": - return (distances[target], predecessor) - return (distances, predecessor) - -_bellman_ford_adjacency_matrix = _bellman_ford_adjacency_list - -def _dijkstra_adjacency_list(graph: Graph, start: str, target: str): - V = len(graph.vertices) - visited, dist, pred = {}, {}, {} - for v in graph.vertices: - visited[v] = False - pred[v] = None - if v != start: - dist[v] = float('inf') - dist[start] = 0 - pq = PriorityQueue(implementation='binomial_heap') - for vertex in dist: - pq.push(vertex, dist[vertex]) - for _ in range(V): - u = pq.pop() - visited[u] = True - for v in graph.vertices: - edge_str = u + '_' + v - if (edge_str in graph.edge_weights and graph.edge_weights[edge_str].value >= 0 and - visited[v] is False and dist[v] > dist[u] + graph.edge_weights[edge_str].value): - dist[v] = dist[u] + graph.edge_weights[edge_str].value - pred[v] = u - pq.push(v, dist[v]) - - if target != "": - return (dist[target], pred) - return dist, pred - -_dijkstra_adjacency_matrix = _dijkstra_adjacency_list - -def all_pair_shortest_paths(graph: Graph, algorithm: str, - **kwargs) -> tuple: - """ - Finds shortest paths between all pairs of vertices in the given graph. - - Parameters - ========== - - graph: Graph - The graph under consideration. - algorithm: str - The algorithm to be used. Currently, the following algorithms - are implemented, - - 'floyd_warshall' -> Floyd Warshall algorithm as given in [1]. - 'johnson' -> Johnson's Algorithm as given in [2] - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - (distances, predecessors): (dict, dict) - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode - >>> from pydatastructs import all_pair_shortest_paths - >>> V1 = AdjacencyListGraphNode("V1") - >>> V2 = AdjacencyListGraphNode("V2") - >>> V3 = AdjacencyListGraphNode("V3") - >>> G = Graph(V1, V2, V3) - >>> G.add_edge('V2', 'V3', 10) - >>> G.add_edge('V1', 'V2', 11) - >>> G.add_edge('V3', 'V1', 5) - >>> dist, _ = all_pair_shortest_paths(G, 'floyd_warshall') - >>> dist['V1']['V3'] - 21 - >>> dist['V3']['V1'] - 5 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm - .. [2] https://en.wikipedia.org/wiki/Johnson's_algorithm - """ - raise_if_backend_is_not_python( - all_pair_shortest_paths, kwargs.get('backend', Backend.PYTHON)) - import pydatastructs.graphs.algorithms as algorithms - func = "_" + algorithm + "_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently %s algorithm isn't implemented for " - "finding shortest paths in graphs."%(algorithm)) - return getattr(algorithms, func)(graph) - -def _floyd_warshall_adjacency_list(graph: Graph): - dist, next_vertex = {}, {} - V, E = graph.vertices, graph.edge_weights - - for v in V: - dist[v] = {} - next_vertex[v] = {} - - for name, edge in E.items(): - dist[edge.source.name][edge.target.name] = edge.value - next_vertex[edge.source.name][edge.target.name] = edge.source.name - - for v in V: - dist[v][v] = 0 - next_vertex[v][v] = v - - for k in V: - for i in V: - for j in V: - dist_i_j = dist.get(i, {}).get(j, float('inf')) - dist_i_k = dist.get(i, {}).get(k, float('inf')) - dist_k_j = dist.get(k, {}).get(j, float('inf')) - next_i_k = next_vertex.get(i + '_' + k, None) - if dist_i_j > dist_i_k + dist_k_j: - dist[i][j] = dist_i_k + dist_k_j - next_vertex[i][j] = next_i_k - - return (dist, next_vertex) - -_floyd_warshall_adjacency_matrix = _floyd_warshall_adjacency_list - -def _johnson_adjacency_list(graph: Graph): - new_vertex = AdjacencyListGraphNode('__q__') - graph.add_vertex(new_vertex) - - for vertex in graph.vertices: - if vertex != '__q__': - graph.add_edge('__q__', vertex, 0) - - distances, predecessors = shortest_paths(graph, 'bellman_ford', '__q__') - - edges_to_remove = [] - for edge in graph.edge_weights: - edge_node = graph.edge_weights[edge] - if edge_node.source.name == '__q__': - edges_to_remove.append((edge_node.source.name, edge_node.target.name)) - - for u, v in edges_to_remove: - graph.remove_edge(u, v) - graph.remove_vertex('__q__') - - for edge in graph.edge_weights: - edge_node = graph.edge_weights[edge] - u, v = edge_node.source.name, edge_node.target.name - graph.edge_weights[edge].value += (distances[u] - distances[v]) - - all_distances = {} - all_next_vertex = {} - - for vertex in graph.vertices: - u = vertex - dijkstra_dist, dijkstra_pred = shortest_paths(graph, 'dijkstra', u) - all_distances[u] = {} - all_next_vertex[u] = {} - for v in graph.vertices: - if dijkstra_pred[v] is None or dijkstra_pred[v] == u : - all_next_vertex[u][v] = u - else: - all_next_vertex[u][v] = None - if v in dijkstra_dist: - all_distances[u][v] = dijkstra_dist[v] - distances[u] + distances[v] - else: - all_distances[u][v] = float('inf') - - return (all_distances, all_next_vertex) - -def topological_sort(graph: Graph, algorithm: str, - **kwargs) -> list: - """ - Performs topological sort on the given graph using given algorithm. - - Parameters - ========== - - graph: Graph - The graph under consideration. - algorithm: str - The algorithm to be used. - Currently, following are supported, - - 'kahn' -> Kahn's algorithm as given in [1]. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - list - The list of topologically sorted vertices. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode, topological_sort - >>> v_1 = AdjacencyListGraphNode('v_1') - >>> v_2 = AdjacencyListGraphNode('v_2') - >>> graph = Graph(v_1, v_2) - >>> graph.add_edge('v_1', 'v_2') - >>> topological_sort(graph, 'kahn') - ['v_1', 'v_2'] - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm - """ - raise_if_backend_is_not_python( - topological_sort, kwargs.get('backend', Backend.PYTHON)) - import pydatastructs.graphs.algorithms as algorithms - func = "_" + algorithm + "_" + graph._impl - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently %s algorithm isn't implemented for " - "performing topological sort on %s graphs."%(algorithm, graph._impl)) - return getattr(algorithms, func)(graph) - -def _kahn_adjacency_list(graph: Graph) -> list: - S = Queue() - in_degree = {u: 0 for u in graph.vertices} - for u in graph.vertices: - for v in graph.neighbors(u): - in_degree[v.name] += 1 - for u in graph.vertices: - if in_degree[u] == 0: - S.append(u) - in_degree.pop(u) - - L = [] - while S: - n = S.popleft() - L.append(n) - for m in graph.neighbors(n): - graph.remove_edge(n, m.name) - in_degree[m.name] -= 1 - if in_degree[m.name] == 0: - S.append(m.name) - in_degree.pop(m.name) - - if in_degree: - raise ValueError("Graph is not acyclic.") - return L - -def topological_sort_parallel(graph: Graph, algorithm: str, num_threads: int, - **kwargs) -> list: - """ - Performs topological sort on the given graph using given algorithm using - given number of threads. - - Parameters - ========== - - graph: Graph - The graph under consideration. - algorithm: str - The algorithm to be used. - Currently, following are supported, - - 'kahn' -> Kahn's algorithm as given in [1]. - num_threads: int - The maximum number of threads to be used. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - list - The list of topologically sorted vertices. - - Examples - ======== - - >>> from pydatastructs import Graph, AdjacencyListGraphNode, topological_sort_parallel - >>> v_1 = AdjacencyListGraphNode('v_1') - >>> v_2 = AdjacencyListGraphNode('v_2') - >>> graph = Graph(v_1, v_2) - >>> graph.add_edge('v_1', 'v_2') - >>> topological_sort_parallel(graph, 'kahn', 1) - ['v_1', 'v_2'] - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm - """ - raise_if_backend_is_not_python( - topological_sort_parallel, kwargs.get('backend', Backend.PYTHON)) - import pydatastructs.graphs.algorithms as algorithms - func = "_" + algorithm + "_" + graph._impl + '_parallel' - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently %s algorithm isn't implemented for " - "performing topological sort on %s graphs."%(algorithm, graph._impl)) - return getattr(algorithms, func)(graph, num_threads) - -def _kahn_adjacency_list_parallel(graph: Graph, num_threads: int) -> list: - num_vertices = len(graph.vertices) - - def _collect_source_nodes(graph: Graph) -> list: - S = [] - in_degree = {u: 0 for u in graph.vertices} - for u in graph.vertices: - for v in graph.neighbors(u): - in_degree[v.name] += 1 - for u in in_degree: - if in_degree[u] == 0: - S.append(u) - return list(S) - - def _job(graph: Graph, u: str): - for v in graph.neighbors(u): - graph.remove_edge(u, v.name) - - L = [] - source_nodes = _collect_source_nodes(graph) - while source_nodes: - with ThreadPoolExecutor(max_workers=num_threads) as Executor: - for node in source_nodes: - L.append(node) - Executor.submit(_job, graph, node) - for node in source_nodes: - graph.remove_vertex(node) - source_nodes = _collect_source_nodes(graph) - - if len(L) != num_vertices: - raise ValueError("Graph is not acyclic.") - return L - - -def _breadth_first_search_max_flow(graph: Graph, source_node, sink_node, flow_passed, for_dinic=False): - bfs_queue = Queue() - parent, currentPathC = {}, {} - currentPathC[source_node] = float('inf') - bfs_queue.append(source_node) - while len(bfs_queue) != 0: - curr_node = bfs_queue.popleft() - next_nodes = graph.neighbors(curr_node) - if len(next_nodes) != 0: - for next_node in next_nodes: - capacity = graph.get_edge(curr_node, next_node.name).value - fp = flow_passed.get((curr_node, next_node.name), 0) - if capacity and parent.get(next_node.name, False) is False and capacity - fp > 0: - parent[next_node.name] = curr_node - next_flow = min(currentPathC[curr_node], capacity - fp) - currentPathC[next_node.name] = next_flow - if next_node.name == sink_node and not for_dinic: - return (next_flow, parent) - bfs_queue.append(next_node.name) - return (0, parent) - - -def _max_flow_edmonds_karp_(graph: Graph, source, sink): - m_flow = 0 - flow_passed = {} - new_flow, parent = _breadth_first_search_max_flow(graph, source, sink, flow_passed) - while new_flow != 0: - m_flow += new_flow - current = sink - while current != source: - prev = parent[current] - fp = flow_passed.get((prev, current), 0) - flow_passed[(prev, current)] = fp + new_flow - fp = flow_passed.get((current, prev), 0) - flow_passed[(current, prev)] = fp - new_flow - current = prev - new_flow, parent = _breadth_first_search_max_flow(graph, source, sink, flow_passed) - return m_flow - - -def _depth_first_search_max_flow_dinic(graph: Graph, u, parent, sink_node, flow, flow_passed): - if u == sink_node: - return flow - - next_nodes = graph.neighbors(u) - if len(next_nodes) != 0: - for next_node in next_nodes: - capacity = graph.get_edge(u, next_node.name).value - fp = flow_passed.get((u, next_node.name), 0) - parent_cond = parent.get(next_node.name, None) - if parent_cond and parent_cond == u and capacity - fp > 0: - path_flow = _depth_first_search_max_flow_dinic(graph, - next_node.name, - parent, sink_node, - min(flow, capacity - fp), flow_passed) - if path_flow > 0: - fp = flow_passed.get((u, next_node.name), 0) - flow_passed[(u, next_node.name)] = fp + path_flow - fp = flow_passed.get((next_node.name, u), 0) - flow_passed[(next_node.name, u)] = fp - path_flow - return path_flow - return 0 - - -def _max_flow_dinic_(graph: Graph, source, sink): - max_flow = 0 - flow_passed = {} - while True: - next_flow, parent = _breadth_first_search_max_flow(graph, source, sink, flow_passed, True) - if parent.get(sink, False) is False: - break - - while True: - path_flow = _depth_first_search_max_flow_dinic(graph, source, - parent, sink, - float('inf'), - flow_passed) - if path_flow <= 0: - break - max_flow += path_flow - - return max_flow - - -def max_flow(graph, source, sink, algorithm='edmonds_karp', **kwargs): - raise_if_backend_is_not_python( - max_flow, kwargs.get('backend', Backend.PYTHON)) - - import pydatastructs.graphs.algorithms as algorithms - func = "_max_flow_" + algorithm + "_" - if not hasattr(algorithms, func): - raise NotImplementedError( - f"Currently {algorithm} algorithm isn't implemented for " - "performing max flow on graphs.") - return getattr(algorithms, func)(graph, source, sink) - - -def find_bridges(graph): - """ - Finds all bridges in an undirected graph using Tarjan's Algorithm. - - Parameters - ========== - graph : Graph - An undirected graph instance. - - Returns - ========== - List[tuple] - A list of bridges, where each bridge is represented as a tuple (u, v) - with u <= v. - - Example - ======== - >>> from pydatastructs import Graph, AdjacencyListGraphNode, find_bridges - >>> v0 = AdjacencyListGraphNode(0) - >>> v1 = AdjacencyListGraphNode(1) - >>> v2 = AdjacencyListGraphNode(2) - >>> v3 = AdjacencyListGraphNode(3) - >>> v4 = AdjacencyListGraphNode(4) - >>> graph = Graph(v0, v1, v2, v3, v4, implementation='adjacency_list') - >>> graph.add_edge(v0.name, v1.name) - >>> graph.add_edge(v1.name, v2.name) - >>> graph.add_edge(v2.name, v3.name) - >>> graph.add_edge(v3.name, v4.name) - >>> find_bridges(graph) - [('0', '1'), ('1', '2'), ('2', '3'), ('3', '4')] - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Bridge_(graph_theory) - """ - - vertices = list(graph.vertices) - processed_vertices = [] - for v in vertices: - if hasattr(v, "name"): - processed_vertices.append(v.name) - else: - processed_vertices.append(v) - - n = len(processed_vertices) - adj = {v: [] for v in processed_vertices} - for v in processed_vertices: - for neighbor in graph.neighbors(v): - if hasattr(neighbor, "name"): - nbr = neighbor.name - else: - nbr = neighbor - adj[v].append(nbr) - - mapping = {v: idx for idx, v in enumerate(processed_vertices)} - inv_mapping = {idx: v for v, idx in mapping.items()} - - n_adj = [[] for _ in range(n)] - for v in processed_vertices: - idx_v = mapping[v] - for u in adj[v]: - idx_u = mapping[u] - n_adj[idx_v].append(idx_u) - - visited = [False] * n - disc = [0] * n - low = [0] * n - parent = [-1] * n - bridges_idx = [] - time = 0 - - def dfs(u): - nonlocal time - visited[u] = True - disc[u] = low[u] = time - time += 1 - for v in n_adj[u]: - if not visited[v]: - parent[v] = u - dfs(v) - low[u] = min(low[u], low[v]) - if low[v] > disc[u]: - bridges_idx.append((u, v)) - elif v != parent[u]: - low[u] = min(low[u], disc[v]) - - for i in range(n): - if not visited[i]: - dfs(i) - - bridges = [] - for u, v in bridges_idx: - a = inv_mapping[u] - b = inv_mapping[v] - if a <= b: - bridges.append((a, b)) - else: - bridges.append((b, a)) - bridges.sort() - return bridges diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/graph.py b/lib/python3.12/site-packages/pydatastructs/graphs/graph.py deleted file mode 100644 index 39c2692e3..000000000 --- a/lib/python3.12/site-packages/pydatastructs/graphs/graph.py +++ /dev/null @@ -1,163 +0,0 @@ - -from pydatastructs.utils.misc_util import Backend, raise_if_backend_is_not_python - -__all__ = [ - 'Graph' -] - -class Graph(object): - """ - Represents generic concept of graphs. - - Parameters - ========== - - implementation: str - The implementation to be used for storing - graph in memory. It can be figured out - from type of the vertices(if passed at construction). - Currently the following implementations are supported, - - 'adjacency_list' -> Adjacency list implementation. - - 'adjacency_matrix' -> Adjacency matrix implementation. - - By default, 'adjacency_list'. - vertices: GraphNode(s) - For AdjacencyList implementation vertices - can be passed for initializing the graph. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs.graphs import Graph - >>> from pydatastructs.utils import AdjacencyListGraphNode - >>> v_1 = AdjacencyListGraphNode('v_1', 1) - >>> v_2 = AdjacencyListGraphNode('v_2', 2) - >>> g = Graph(v_1, v_2) - >>> g.add_edge('v_1', 'v_2') - >>> g.add_edge('v_2', 'v_1') - >>> g.is_adjacent('v_1', 'v_2') - True - >>> g.is_adjacent('v_2', 'v_1') - True - >>> g.remove_edge('v_1', 'v_2') - >>> g.is_adjacent('v_1', 'v_2') - False - >>> g.is_adjacent('v_2', 'v_1') - True - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Graph_(abstract_data_type) - - Note - ==== - - Make sure to create nodes (AdjacencyListGraphNode or AdjacencyMatrixGraphNode) - and them in your graph using Graph.add_vertex before adding edges whose - end points require either of the nodes that you added. In other words, - Graph.add_edge doesn't add new nodes on its own if the input - nodes are not already present in the Graph. - - """ - - __slots__ = ['_impl'] - - def __new__(cls, *args, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - try: - default_impl = args[0]._impl if args else 'adjacency_list' - except: - default_impl = 'adjacency_list' - implementation = kwargs.get('implementation', default_impl) - if implementation == 'adjacency_list': - from pydatastructs.graphs.adjacency_list import AdjacencyList - obj = AdjacencyList(*args, **kwargs) - return obj - elif implementation == 'adjacency_matrix': - from pydatastructs.graphs.adjacency_matrix import AdjacencyMatrix - obj = AdjacencyMatrix(*args, **kwargs) - return obj - else: - raise NotImplementedError("%s implementation is not a part " - "of the library currently."%(implementation)) - - def is_adjacent(self, node1, node2): - """ - Checks if the nodes with the given - with the given names are adjacent - to each other. - """ - raise NotImplementedError( - "This is an abstract method.") - - def neighbors(self, node): - """ - Lists the neighbors of the node - with given name. - """ - raise NotImplementedError( - "This is an abstract method.") - - def add_vertex(self, node): - """ - Adds the input vertex to the node, or does nothing - if the input vertex is already in the graph. - """ - raise NotImplementedError( - "This is an abstract method.") - - def remove_vertex(self, node): - """ - Removes the input vertex along with all the edges - pointing towards it. - """ - raise NotImplementedError( - "This is an abstract method.") - - def add_edge(self, source, target, cost=None): - """ - Adds the edge starting at first parameter - i.e., source and ending at the second - parameter i.e., target. - """ - raise NotImplementedError( - "This is an abstract method.") - - def get_edge(self, source, target): - """ - Returns GraphEdge object if there - is an edge between source and target - otherwise None. - """ - raise NotImplementedError( - "This is an abstract method.") - - def remove_edge(self, source, target): - """ - Removes the edge starting at first parameter - i.e., source and ending at the second - parameter i.e., target. - """ - raise NotImplementedError( - "This is an abstract method.") - - def num_vertices(self): - """ - Number of vertices - """ - raise NotImplementedError( - "This is an abstract method.") - - def num_edges(self): - """ - Number of edges - """ - raise NotImplementedError( - "This is an abstract method.") diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/tests/__init__.py b/lib/python3.12/site-packages/pydatastructs/graphs/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_list.py b/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_list.py deleted file mode 100644 index 3a9cdb14f..000000000 --- a/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_list.py +++ /dev/null @@ -1,83 +0,0 @@ -from pydatastructs.graphs import Graph -from pydatastructs.utils import AdjacencyListGraphNode -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils.misc_util import Backend - -def test_adjacency_list(): - v_1 = AdjacencyListGraphNode('v_1', 1) - v_2 = AdjacencyListGraphNode('v_2', 2) - g = Graph(v_1, v_2, implementation='adjacency_list') - v_3 = AdjacencyListGraphNode('v_3', 3) - g.add_vertex(v_2) - g.add_vertex(v_3) - g.add_edge('v_1', 'v_2') - g.add_edge('v_2', 'v_3') - g.add_edge('v_3', 'v_1') - assert g.is_adjacent('v_1', 'v_2') is True - assert g.is_adjacent('v_2', 'v_3') is True - assert g.is_adjacent('v_3', 'v_1') is True - assert g.is_adjacent('v_2', 'v_1') is False - assert g.is_adjacent('v_3', 'v_2') is False - assert g.is_adjacent('v_1', 'v_3') is False - neighbors = g.neighbors('v_1') - assert neighbors == [v_2] - v = AdjacencyListGraphNode('v', 4) - g.add_vertex(v) - g.add_edge('v_1', 'v', 0) - g.add_edge('v_2', 'v', 0) - g.add_edge('v_3', 'v', 0) - assert g.is_adjacent('v_1', 'v') is True - assert g.is_adjacent('v_2', 'v') is True - assert g.is_adjacent('v_3', 'v') is True - e1 = g.get_edge('v_1', 'v') - e2 = g.get_edge('v_2', 'v') - e3 = g.get_edge('v_3', 'v') - assert (e1.source.name, e1.target.name) == ('v_1', 'v') - assert (e2.source.name, e2.target.name) == ('v_2', 'v') - assert (e3.source.name, e3.target.name) == ('v_3', 'v') - g.remove_edge('v_1', 'v') - assert g.is_adjacent('v_1', 'v') is False - g.remove_vertex('v') - assert g.is_adjacent('v_2', 'v') is False - assert g.is_adjacent('v_3', 'v') is False - - assert raises(ValueError, lambda: g.add_edge('u', 'v')) - assert raises(ValueError, lambda: g.add_edge('v', 'x')) - - v_4 = AdjacencyListGraphNode('v_4', 4, backend = Backend.CPP) - v_5 = AdjacencyListGraphNode('v_5', 5, backend = Backend.CPP) - g2 = Graph(v_4,v_5,implementation = 'adjacency_list', backend = Backend.CPP) - v_6 = AdjacencyListGraphNode('v_6', 6, backend = Backend.CPP) - assert raises(ValueError, lambda: g2.add_vertex(v_5)) - g2.add_vertex(v_6) - g2.add_edge('v_4', 'v_5') - g2.add_edge('v_5', 'v_6') - g2.add_edge('v_4', 'v_6') - assert g2.is_adjacent('v_4', 'v_5') is True - assert g2.is_adjacent('v_5', 'v_6') is True - assert g2.is_adjacent('v_4', 'v_6') is True - assert g2.is_adjacent('v_5', 'v_4') is False - assert g2.is_adjacent('v_6', 'v_5') is False - assert g2.is_adjacent('v_6', 'v_4') is False - assert g2.num_edges() == 3 - assert g2.num_vertices() == 3 - neighbors = g2.neighbors('v_4') - assert neighbors == [v_6, v_5] - v = AdjacencyListGraphNode('v', 4, backend = Backend.CPP) - g2.add_vertex(v) - g2.add_edge('v_4', 'v', 0) - g2.add_edge('v_5', 'v', 0) - g2.add_edge('v_6', 'v', "h") - assert g2.is_adjacent('v_4', 'v') is True - assert g2.is_adjacent('v_5', 'v') is True - assert g2.is_adjacent('v_6', 'v') is True - e1 = g2.get_edge('v_4', 'v') - e2 = g2.get_edge('v_5', 'v') - e3 = g2.get_edge('v_6', 'v') - assert (str(e1)) == "('v_4', 'v', 0)" - assert (str(e2)) == "('v_5', 'v', 0)" - assert (str(e3)) == "('v_6', 'v', h)" - g2.remove_edge('v_4', 'v') - assert g2.is_adjacent('v_4', 'v') is False - g2.remove_vertex('v') - assert raises(ValueError, lambda: g2.add_edge('v_4', 'v')) diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_matrix.py b/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_matrix.py deleted file mode 100644 index 27dc81790..000000000 --- a/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_adjacency_matrix.py +++ /dev/null @@ -1,53 +0,0 @@ -from pydatastructs.graphs import Graph -from pydatastructs.utils import AdjacencyMatrixGraphNode -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils.misc_util import Backend - -def test_AdjacencyMatrix(): - v_0 = AdjacencyMatrixGraphNode(0, 0) - v_1 = AdjacencyMatrixGraphNode(1, 1) - v_2 = AdjacencyMatrixGraphNode(2, 2) - g = Graph(v_0, v_1, v_2) - g.add_edge(0, 1, 0) - g.add_edge(1, 2, 0) - g.add_edge(2, 0, 0) - e1 = g.get_edge(0, 1) - e2 = g.get_edge(1, 2) - e3 = g.get_edge(2, 0) - assert (e1.source.name, e1.target.name) == ('0', '1') - assert (e2.source.name, e2.target.name) == ('1', '2') - assert (e3.source.name, e3.target.name) == ('2', '0') - assert g.is_adjacent(0, 1) is True - assert g.is_adjacent(1, 2) is True - assert g.is_adjacent(2, 0) is True - assert g.is_adjacent(1, 0) is False - assert g.is_adjacent(2, 1) is False - assert g.is_adjacent(0, 2) is False - neighbors = g.neighbors(0) - assert neighbors == [v_1] - g.remove_edge(0, 1) - assert g.is_adjacent(0, 1) is False - assert raises(ValueError, lambda: g.add_edge('u', 'v')) - assert raises(ValueError, lambda: g.add_edge('v', 'x')) - assert raises(ValueError, lambda: g.add_edge(2, 3)) - assert raises(ValueError, lambda: g.add_edge(3, 2)) - - v_3 = AdjacencyMatrixGraphNode('0', 0, backend = Backend.CPP) - v_4 = AdjacencyMatrixGraphNode('1', 1, backend = Backend.CPP) - v_5 = AdjacencyMatrixGraphNode('2', 2, backend = Backend.CPP) - g2 = Graph(v_3, v_4, v_5, implementation = 'adjacency_matrix', backend = Backend.CPP) - g2.add_edge('0', '1', 0) - g2.add_edge('1', '2', 0) - g2.add_edge('2', '0', 0) - assert g2.is_adjacent('0', '1') is True - assert g2.is_adjacent('1', '2') is True - assert g2.is_adjacent('2', '0') is True - assert g2.is_adjacent('1', '0') is False - assert g2.is_adjacent('2', '1') is False - assert g2.is_adjacent('0', '2') is False - neighbors = g2.neighbors('0') - assert neighbors == [v_4] - g2.remove_edge('0', '1') - assert g2.is_adjacent('0', '1') is False - assert raises(ValueError, lambda: g2.add_edge('u', 'v')) - assert raises(ValueError, lambda: g2.add_edge('v', 'x')) diff --git a/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_algorithms.py b/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_algorithms.py deleted file mode 100644 index 04ebcccda..000000000 --- a/lib/python3.12/site-packages/pydatastructs/graphs/tests/test_algorithms.py +++ /dev/null @@ -1,596 +0,0 @@ -from pydatastructs import (breadth_first_search, Graph, -breadth_first_search_parallel, minimum_spanning_tree, -minimum_spanning_tree_parallel, strongly_connected_components, -depth_first_search, shortest_paths,all_pair_shortest_paths, topological_sort, -topological_sort_parallel, max_flow, find_bridges) -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils.misc_util import AdjacencyListGraphNode, AdjacencyMatrixGraphNode -from pydatastructs.graphs._backend.cpp import _graph -from pydatastructs.graphs._backend.cpp import _algorithms -from pydatastructs.utils.misc_util import Backend - -def test_breadth_first_search(): - - def _test_breadth_first_search(ds): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - - V1 = GraphNode(0) - V2 = GraphNode(1) - V3 = GraphNode(2) - - G1 = Graph(V1, V2, V3) - - assert G1.num_vertices() == 3 - - edges = [ - (V1.name, V2.name), - (V2.name, V3.name), - (V1.name, V3.name) - ] - - for edge in edges: - G1.add_edge(*edge) - - assert G1.num_edges() == len(edges) - - parent = {} - def bfs_tree(curr_node, next_node, parent): - if next_node != "": - parent[next_node] = curr_node - return True - - breadth_first_search(G1, V1.name, bfs_tree, parent) - assert (parent[V3.name] == V1.name and parent[V2.name] == V1.name) or \ - (parent[V3.name] == V2.name and parent[V2.name] == V1.name) - - if (ds=='List'): - parent = {} - V9 = AdjacencyListGraphNode("9",0,backend = Backend.CPP) - V10 = AdjacencyListGraphNode("10",0,backend = Backend.CPP) - V11 = AdjacencyListGraphNode("11",0,backend = Backend.CPP) - G2 = Graph(V9, V10, V11,implementation = 'adjacency_list', backend = Backend.CPP) - assert G2.num_vertices()==3 - G2.add_edge("9", "10") - G2.add_edge("10", "11") - breadth_first_search(G2, "9", bfs_tree, parent, backend = Backend.CPP) - assert parent[V10] == V9 - assert parent[V11] == V10 - - if (ds == 'Matrix'): - parent3 = {} - V12 = AdjacencyMatrixGraphNode("12", 0, backend = Backend.CPP) - V13 = AdjacencyMatrixGraphNode("13", 0, backend = Backend.CPP) - V14 = AdjacencyMatrixGraphNode("14", 0, backend = Backend.CPP) - G3 = Graph(V12, V13, V14, implementation = 'adjacency_matrix', backend = Backend.CPP) - assert G3.num_vertices() == 3 - G3.add_edge("12", "13") - G3.add_edge("13", "14") - breadth_first_search(G3, "12", bfs_tree, parent3, backend = Backend.CPP) - assert parent3[V13] == V12 - assert parent3[V14] == V13 - - V4 = GraphNode(0) - V5 = GraphNode(1) - V6 = GraphNode(2) - V7 = GraphNode(3) - V8 = GraphNode(4) - - edges = [ - (V4.name, V5.name), - (V5.name, V6.name), - (V6.name, V7.name), - (V6.name, V4.name), - (V7.name, V8.name) - ] - - G2 = Graph(V4, V5, V6, V7, V8) - - for edge in edges: - G2.add_edge(*edge) - - assert G2.num_edges() == len(edges) - - path = [] - def path_finder(curr_node, next_node, dest_node, parent, path): - if next_node != "": - parent[next_node] = curr_node - if curr_node == dest_node: - node = curr_node - path.append(node) - while node is not None: - if parent.get(node, None) is not None: - path.append(parent[node]) - node = parent.get(node, None) - path.reverse() - return False - return True - - parent.clear() - breadth_first_search(G2, V4.name, path_finder, V7.name, parent, path) - assert path == [V4.name, V5.name, V6.name, V7.name] - - _test_breadth_first_search("List") - _test_breadth_first_search("Matrix") - -def test_breadth_first_search_parallel(): - - def _test_breadth_first_search_parallel(ds): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - - V1 = GraphNode(0) - V2 = GraphNode(1) - V3 = GraphNode(2) - V4 = GraphNode(3) - V5 = GraphNode(4) - V6 = GraphNode(5) - V7 = GraphNode(6) - V8 = GraphNode(7) - - - G1 = Graph(V1, V2, V3, V4, V5, V6, V7, V8) - - edges = [ - (V1.name, V2.name), - (V1.name, V3.name), - (V1.name, V4.name), - (V2.name, V5.name), - (V2.name, V6.name), - (V3.name, V6.name), - (V3.name, V7.name), - (V4.name, V7.name), - (V4.name, V8.name) - ] - - for edge in edges: - G1.add_edge(*edge) - - parent = {} - def bfs_tree(curr_node, next_node, parent): - if next_node != "": - parent[next_node] = curr_node - return True - - breadth_first_search_parallel(G1, V1.name, 5, bfs_tree, parent) - assert (parent[V2.name] == V1.name and parent[V3.name] == V1.name and - parent[V4.name] == V1.name and parent[V5.name] == V2.name and - (parent[V6.name] in (V2.name, V3.name)) and - (parent[V7.name] in (V3.name, V4.name)) and (parent[V8.name] == V4.name)) - - _test_breadth_first_search_parallel("List") - _test_breadth_first_search_parallel("Matrix") - -def test_minimum_spanning_tree(): - - def _test_minimum_spanning_tree(func, ds, algorithm, *args): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - a, b, c, d, e = [GraphNode(x) for x in [0, 1, 2, 3, 4]] - graph = Graph(a, b, c, d, e) - graph.add_edge(a.name, c.name, 10) - graph.add_edge(c.name, a.name, 10) - graph.add_edge(a.name, d.name, 7) - graph.add_edge(d.name, a.name, 7) - graph.add_edge(c.name, d.name, 9) - graph.add_edge(d.name, c.name, 9) - graph.add_edge(d.name, b.name, 32) - graph.add_edge(b.name, d.name, 32) - graph.add_edge(d.name, e.name, 23) - graph.add_edge(e.name, d.name, 23) - mst = func(graph, algorithm, *args) - expected_mst = [('0_3', 7), ('2_3', 9), ('3_4', 23), ('3_1', 32), - ('3_0', 7), ('3_2', 9), ('4_3', 23), ('1_3', 32)] - assert len(expected_mst) == len(mst.edge_weights.items()) - for k, v in mst.edge_weights.items(): - assert (k, v.value) in expected_mst - - def _test_minimum_spanning_tree_cpp(ds, algorithm, *args): - if (ds == 'List' and algorithm == "prim"): - a1 = AdjacencyListGraphNode('a', 0, backend = Backend.CPP) - b1 = AdjacencyListGraphNode('b', 0, backend = Backend.CPP) - c1 = AdjacencyListGraphNode('c', 0, backend = Backend.CPP) - d1 = AdjacencyListGraphNode('d', 0, backend = Backend.CPP) - e1 = AdjacencyListGraphNode('e', 0, backend = Backend.CPP) - g = Graph(a1, b1, c1, d1, e1, backend = Backend.CPP) - g.add_edge(a1.name, c1.name, 10) - g.add_edge(c1.name, a1.name, 10) - g.add_edge(a1.name, d1.name, 7) - g.add_edge(d1.name, a1.name, 7) - g.add_edge(c1.name, d1.name, 9) - g.add_edge(d1.name, c1.name, 9) - g.add_edge(d1.name, b1.name, 32) - g.add_edge(b1.name, d1.name, 32) - g.add_edge(d1.name, e1.name, 23) - g.add_edge(e1.name, d1.name, 23) - mst = minimum_spanning_tree(g, "prim", backend = Backend.CPP) - expected_mst = ["('a', 'd', 7)", "('d', 'c', 9)", "('e', 'd', 23)", "('b', 'd', 32)", - "('d', 'a', 7)", "('c', 'd', 9)", "('d', 'e', 23)", "('d', 'b', 32)"] - assert str(mst.get_edge('a', 'd')) in expected_mst - assert str(mst.get_edge('e', 'd')) in expected_mst - assert str(mst.get_edge('d', 'c')) in expected_mst - assert str(mst.get_edge('b', 'd')) in expected_mst - assert mst.num_edges() == 8 - a=AdjacencyListGraphNode('0', 0, backend = Backend.CPP) - b=AdjacencyListGraphNode('1', 0, backend = Backend.CPP) - c=AdjacencyListGraphNode('2', 0, backend = Backend.CPP) - d=AdjacencyListGraphNode('3', 0, backend = Backend.CPP) - g2 = Graph(a,b,c,d,backend = Backend.CPP) - g2.add_edge('0', '1', 74) - g2.add_edge('1', '0', 74) - g2.add_edge('0', '3', 55) - g2.add_edge('3', '0', 55) - g2.add_edge('1', '2', 74) - g2.add_edge('2', '1', 74) - mst2=minimum_spanning_tree(g2, "prim", backend = Backend.CPP) - assert mst2.num_edges() == 6 - - fmst = minimum_spanning_tree - fmstp = minimum_spanning_tree_parallel - _test_minimum_spanning_tree(fmst, "List", "kruskal") - _test_minimum_spanning_tree(fmst, "Matrix", "kruskal") - _test_minimum_spanning_tree(fmst, "List", "prim") - _test_minimum_spanning_tree(fmstp, "List", "kruskal", 3) - _test_minimum_spanning_tree(fmstp, "Matrix", "kruskal", 3) - _test_minimum_spanning_tree(fmstp, "List", "prim", 3) - _test_minimum_spanning_tree_cpp("List", "prim") - -def test_strongly_connected_components(): - - def _test_strongly_connected_components(func, ds, algorithm, *args): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - a, b, c, d, e, f, g, h = \ - [GraphNode(chr(x)) for x in range(ord('a'), ord('h') + 1)] - graph = Graph(a, b, c, d, e, f, g, h) - graph.add_edge(a.name, b.name) - graph.add_edge(b.name, c.name) - graph.add_edge(b.name, f.name) - graph.add_edge(b.name, e.name) - graph.add_edge(c.name, d.name) - graph.add_edge(c.name, g.name) - graph.add_edge(d.name, h.name) - graph.add_edge(d.name, c.name) - graph.add_edge(e.name, f.name) - graph.add_edge(e.name, a.name) - graph.add_edge(f.name, g.name) - graph.add_edge(g.name, f.name) - graph.add_edge(h.name, d.name) - graph.add_edge(h.name, g.name) - comps = func(graph, algorithm) - expected_comps = [{'e', 'a', 'b'}, {'d', 'c', 'h'}, {'g', 'f'}] - assert comps.sort() == expected_comps.sort() - - scc = strongly_connected_components - _test_strongly_connected_components(scc, "List", "kosaraju") - _test_strongly_connected_components(scc, "Matrix", "kosaraju") - _test_strongly_connected_components(scc, "List", "tarjan") - _test_strongly_connected_components(scc, "Matrix", "tarjan") - -def test_depth_first_search(): - - def _test_depth_first_search(ds): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - - V1 = GraphNode(0) - V2 = GraphNode(1) - V3 = GraphNode(2) - - G1 = Graph(V1, V2, V3) - - edges = [ - (V1.name, V2.name), - (V2.name, V3.name), - (V1.name, V3.name) - ] - - for edge in edges: - G1.add_edge(*edge) - - parent = {} - def dfs_tree(curr_node, next_node, parent): - if next_node != "": - parent[next_node] = curr_node - return True - - depth_first_search(G1, V1.name, dfs_tree, parent) - assert (parent[V3.name] == V1.name and parent[V2.name] == V1.name) or \ - (parent[V3.name] == V2.name and parent[V2.name] == V1.name) - - V4 = GraphNode(0) - V5 = GraphNode(1) - V6 = GraphNode(2) - V7 = GraphNode(3) - V8 = GraphNode(4) - - edges = [ - (V4.name, V5.name), - (V5.name, V6.name), - (V6.name, V7.name), - (V6.name, V4.name), - (V7.name, V8.name) - ] - - G2 = Graph(V4, V5, V6, V7, V8) - - for edge in edges: - G2.add_edge(*edge) - - path = [] - def path_finder(curr_node, next_node, dest_node, parent, path): - if next_node != "": - parent[next_node] = curr_node - if curr_node == dest_node: - node = curr_node - path.append(node) - while node is not None: - if parent.get(node, None) is not None: - path.append(parent[node]) - node = parent.get(node, None) - path.reverse() - return False - return True - - parent.clear() - depth_first_search(G2, V4.name, path_finder, V7.name, parent, path) - assert path == [V4.name, V5.name, V6.name, V7.name] - - _test_depth_first_search("List") - _test_depth_first_search("Matrix") - -def test_shortest_paths(): - - def _test_shortest_paths_positive_edges(ds, algorithm): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - vertices = [GraphNode('S'), GraphNode('C'), - GraphNode('SLC'), GraphNode('SF'), - GraphNode('D')] - - graph = Graph(*vertices) - graph.add_edge('S', 'SLC', 2) - graph.add_edge('C', 'S', 4) - graph.add_edge('C', 'D', 2) - graph.add_edge('SLC', 'C', 2) - graph.add_edge('SLC', 'D', 3) - graph.add_edge('SF', 'SLC', 2) - graph.add_edge('SF', 'S', 2) - graph.add_edge('D', 'SF', 3) - dist, pred = shortest_paths(graph, algorithm, 'SLC') - assert dist == {'S': 6, 'C': 2, 'SLC': 0, 'SF': 6, 'D': 3} - assert pred == {'S': 'C', 'C': 'SLC', 'SLC': None, 'SF': 'D', 'D': 'SLC'} - dist, pred = shortest_paths(graph, algorithm, 'SLC', 'SF') - assert dist == 6 - assert pred == {'S': 'C', 'C': 'SLC', 'SLC': None, 'SF': 'D', 'D': 'SLC'} - graph.remove_edge('SLC', 'D') - graph.add_edge('D', 'SLC', -10) - assert raises(ValueError, lambda: shortest_paths(graph, 'bellman_ford', 'SLC')) - - if (ds == 'List' and algorithm == 'dijkstra'): - vertices2 = [AdjacencyListGraphNode('S', 0, backend = Backend.CPP), AdjacencyListGraphNode('C', 0, backend = Backend.CPP), - AdjacencyListGraphNode('SLC', 0, backend = Backend.CPP), AdjacencyListGraphNode('SF', 0, backend = Backend.CPP), - AdjacencyListGraphNode('D', 0, backend = Backend.CPP)] - graph2 = Graph(*vertices2, backend = Backend.CPP) - graph2.add_edge('S', 'SLC', 2) - graph2.add_edge('C', 'S', 4) - graph2.add_edge('C', 'D', 2) - graph2.add_edge('SLC', 'C', 2) - graph2.add_edge('SLC', 'D', 3) - graph2.add_edge('SF', 'SLC', 2) - graph2.add_edge('SF', 'S', 2) - graph2.add_edge('D', 'SF', 3) - (dist2, pred2) = shortest_paths(graph2, algorithm, 'SLC', backend = Backend.CPP) - assert dist2 == {'S': 6, 'C': 2, 'SLC': 0, 'SF': 6, 'D': 3} - assert pred2 == {'S': 'C', 'C': 'SLC', 'SLC': None, 'SF': 'D', 'D': 'SLC'} - - - - def _test_shortest_paths_negative_edges(ds, algorithm): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - vertices = [GraphNode('s'), GraphNode('a'), - GraphNode('b'), GraphNode('c'), - GraphNode('d')] - - graph = Graph(*vertices) - graph.add_edge('s', 'a', 3) - graph.add_edge('s', 'b', 2) - graph.add_edge('a', 'c', 1) - graph.add_edge('b', 'd', 1) - graph.add_edge('b', 'a', -2) - graph.add_edge('c', 'd', 1) - dist, pred = shortest_paths(graph, algorithm, 's') - assert dist == {'s': 0, 'a': 0, 'b': 2, 'c': 1, 'd': 2} - assert pred == {'s': None, 'a': 'b', 'b': 's', 'c': 'a', 'd': 'c'} - dist, pred = shortest_paths(graph, algorithm, 's', 'd') - assert dist == 2 - assert pred == {'s': None, 'a': 'b', 'b': 's', 'c': 'a', 'd': 'c'} - - _test_shortest_paths_positive_edges("List", 'bellman_ford') - _test_shortest_paths_positive_edges("Matrix", 'bellman_ford') - _test_shortest_paths_negative_edges("List", 'bellman_ford') - _test_shortest_paths_negative_edges("Matrix", 'bellman_ford') - _test_shortest_paths_positive_edges("List", 'dijkstra') - _test_shortest_paths_positive_edges("Matrix", 'dijkstra') - -def test_all_pair_shortest_paths(): - - def _test_shortest_paths_negative_edges(ds, algorithm): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - vertices = [GraphNode('1'), GraphNode('2'), - GraphNode('3'), GraphNode('4')] - - graph = Graph(*vertices) - graph.add_edge('1', '3', -2) - graph.add_edge('2', '1', 4) - graph.add_edge('2', '3', 3) - graph.add_edge('3', '4', 2) - graph.add_edge('4', '2', -1) - dist, next_v = all_pair_shortest_paths(graph, algorithm) - assert dist == {'1': {'3': -2, '1': 0, '4': 0, '2': -1}, - '2': {'1': 4, '3': 2, '2': 0, '4': 4}, - '3': {'4': 2, '3': 0, '1': 5, '2': 1}, - '4': {'2': -1, '4': 0, '1': 3, '3': 1}} - assert next_v == {'1': {'3': '1', '1': '1', '4': None, '2': None}, - '2': {'1': '2', '3': None, '2': '2', '4': None}, - '3': {'4': '3', '3': '3', '1': None, '2': None}, - '4': {'2': '4', '4': '4', '1': None, '3': None}} - - _test_shortest_paths_negative_edges("List", 'floyd_warshall') - _test_shortest_paths_negative_edges("Matrix", 'floyd_warshall') - _test_shortest_paths_negative_edges("List", 'johnson') - -def test_topological_sort(): - - def _test_topological_sort(func, ds, algorithm, threads=None): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - vertices = [GraphNode('2'), GraphNode('3'), GraphNode('5'), - GraphNode('7'), GraphNode('8'), GraphNode('10'), - GraphNode('11'), GraphNode('9')] - - graph = Graph(*vertices) - graph.add_edge('5', '11') - graph.add_edge('7', '11') - graph.add_edge('7', '8') - graph.add_edge('3', '8') - graph.add_edge('3', '10') - graph.add_edge('11', '2') - graph.add_edge('11', '9') - graph.add_edge('11', '10') - graph.add_edge('8', '9') - if threads is not None: - l = func(graph, algorithm, threads) - else: - l = func(graph, algorithm) - assert all([(l1 in l[0:3]) for l1 in ('3', '5', '7')] + - [(l2 in l[3:5]) for l2 in ('8', '11')] + - [(l3 in l[5:]) for l3 in ('10', '9', '2')]) - - _test_topological_sort(topological_sort, "List", "kahn") - _test_topological_sort(topological_sort_parallel, "List", "kahn", 3) - - -def test_max_flow(): - def _test_max_flow(ds, algorithm): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - - a = GraphNode('a') - b = GraphNode('b') - c = GraphNode('c') - d = GraphNode('d') - e = GraphNode('e') - - G = Graph(a, b, c, d, e) - - G.add_edge('a', 'b', 3) - G.add_edge('a', 'c', 4) - G.add_edge('b', 'c', 2) - G.add_edge('b', 'd', 3) - G.add_edge('c', 'd', 1) - G.add_edge('d', 'e', 6) - - assert max_flow(G, 'a', 'e', algorithm) == 4 - assert max_flow(G, 'a', 'c', algorithm) == 6 - - a = GraphNode('a') - b = GraphNode('b') - c = GraphNode('c') - d = GraphNode('d') - e = GraphNode('e') - f = GraphNode('f') - - G2 = Graph(a, b, c, d, e, f) - - G2.add_edge('a', 'b', 16) - G2.add_edge('a', 'c', 13) - G2.add_edge('b', 'c', 10) - G2.add_edge('b', 'd', 12) - G2.add_edge('c', 'b', 4) - G2.add_edge('c', 'e', 14) - G2.add_edge('d', 'c', 9) - G2.add_edge('d', 'f', 20) - G2.add_edge('e', 'd', 7) - G2.add_edge('e', 'f', 4) - - assert max_flow(G2, 'a', 'f', algorithm) == 23 - assert max_flow(G2, 'a', 'd', algorithm) == 19 - - a = GraphNode('a') - b = GraphNode('b') - c = GraphNode('c') - d = GraphNode('d') - - G3 = Graph(a, b, c, d) - - G3.add_edge('a', 'b', 3) - G3.add_edge('a', 'c', 2) - G3.add_edge('b', 'c', 2) - G3.add_edge('b', 'd', 3) - G3.add_edge('c', 'd', 2) - - assert max_flow(G3, 'a', 'd', algorithm) == 5 - assert max_flow(G3, 'a', 'b', algorithm) == 3 - - - _test_max_flow("List", "edmonds_karp") - _test_max_flow("Matrix", "edmonds_karp") - _test_max_flow("List", "dinic") - _test_max_flow("Matrix", "dinic") - - -def test_find_bridges(): - def _test_find_bridges(ds): - import pydatastructs.utils.misc_util as utils - GraphNode = getattr(utils, "Adjacency" + ds + "GraphNode") - - impl = 'adjacency_list' if ds == "List" else 'adjacency_matrix' - - v0 = GraphNode(0) - v1 = GraphNode(1) - v2 = GraphNode(2) - v3 = GraphNode(3) - v4 = GraphNode(4) - - G1 = Graph(v0, v1, v2, v3, v4, implementation=impl) - G1.add_edge(v0.name, v1.name) - G1.add_edge(v1.name, v2.name) - G1.add_edge(v2.name, v3.name) - G1.add_edge(v3.name, v4.name) - - bridges = find_bridges(G1) - expected_bridges = [('0', '1'), ('1', '2'), ('2', '3'), ('3', '4')] - assert sorted(bridges) == sorted(expected_bridges) - - u0 = GraphNode(0) - u1 = GraphNode(1) - u2 = GraphNode(2) - - G2 = Graph(u0, u1, u2, implementation=impl) - G2.add_edge(u0.name, u1.name) - G2.add_edge(u1.name, u2.name) - G2.add_edge(u2.name, u0.name) - - bridges = find_bridges(G2) - assert bridges == [] - - w0 = GraphNode(0) - w1 = GraphNode(1) - w2 = GraphNode(2) - w3 = GraphNode(3) - w4 = GraphNode(4) - - G3 = Graph(w0, w1, w2, w3, w4, implementation=impl) - G3.add_edge(w0.name, w1.name) - G3.add_edge(w1.name, w2.name) - G3.add_edge(w3.name, w4.name) - - bridges = find_bridges(G3) - expected_bridges = [('0', '1'), ('1', '2'), ('3', '4')] - assert sorted(bridges) == sorted(expected_bridges) - - _test_find_bridges("List") - _test_find_bridges("Matrix") diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/__init__.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/__init__.py deleted file mode 100644 index c6b3341d2..000000000 --- a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/__init__.py +++ /dev/null @@ -1,53 +0,0 @@ -__all__ = [] - -from . import ( - arrays, - linked_lists, - algorithms, -) - -from .arrays import ( - OneDimensionalArray, - DynamicOneDimensionalArray, - MultiDimensionalArray, - ArrayForTrees -) -__all__.extend(arrays.__all__) - -from .linked_lists import ( - SinglyLinkedList, - DoublyLinkedList, - SinglyCircularLinkedList, - DoublyCircularLinkedList, - SkipList -) -__all__.extend(linked_lists.__all__) - -from .algorithms import ( - merge_sort_parallel, - brick_sort, - brick_sort_parallel, - heapsort, - matrix_multiply_parallel, - counting_sort, - bucket_sort, - cocktail_shaker_sort, - quick_sort, - longest_common_subsequence, - is_ordered, - upper_bound, - lower_bound, - longest_increasing_subsequence, - next_permutation, - prev_permutation, - bubble_sort, - linear_search, - binary_search, - jump_search, - selection_sort, - insertion_sort, - intro_sort, - shell_sort, - radix_sort -) -__all__.extend(algorithms.__all__) diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/_backend/__init__.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/_backend/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/algorithms.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/algorithms.py deleted file mode 100644 index 6d383fdca..000000000 --- a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/algorithms.py +++ /dev/null @@ -1,2010 +0,0 @@ -from pydatastructs.linear_data_structures.arrays import ( - OneDimensionalArray, DynamicArray, DynamicOneDimensionalArray, Array) -from pydatastructs.linear_data_structures._backend.cpp import _algorithms, _arrays -from pydatastructs.utils.misc_util import ( - _check_type, _comp, Backend, - raise_if_backend_is_not_python) -from concurrent.futures import ThreadPoolExecutor -from math import log, floor, sqrt - -__all__ = [ - 'merge_sort_parallel', - 'brick_sort', - 'brick_sort_parallel', - 'heapsort', - 'matrix_multiply_parallel', - 'counting_sort', - 'bucket_sort', - 'cocktail_shaker_sort', - 'quick_sort', - 'longest_common_subsequence', - 'is_ordered', - 'upper_bound', - 'lower_bound', - 'longest_increasing_subsequence', - 'next_permutation', - 'prev_permutation', - 'bubble_sort', - 'linear_search', - 'binary_search', - 'jump_search', - 'selection_sort', - 'insertion_sort', - 'intro_sort', - 'shell_sort', - 'radix_sort' -] - -def _merge(array, sl, el, sr, er, end, comp): - l, r = [], [] - for i in range(sl, el + 1): - if i <= end: - l.append(array[i]) - array[i] = None - for i in range(sr, er + 1): - if i <= end: - r.append(array[i]) - array[i] = None - i, j, k = 0, 0, sl - while i < len(l) and j < len(r): - if _comp(l[i], r[j], comp): - array[k] = l[i] - i += 1 - else: - array[k] = r[j] - j += 1 - k += 1 - - while i < len(l): - array[k] = l[i] - i += 1 - k += 1 - - while j < len(r): - array[k] = r[j] - j += 1 - k += 1 - -def merge_sort_parallel(array, num_threads, **kwargs): - """ - Implements parallel merge sort. - - Parameters - ========== - - array: Array - The array which is to be sorted. - num_threads: int - The maximum number of threads - to be used for sorting. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, merge_sort_parallel - >>> arr = OneDimensionalArray(int,[3, 2, 1]) - >>> merge_sort_parallel(arr, 3) - >>> [arr[0], arr[1], arr[2]] - [1, 2, 3] - >>> merge_sort_parallel(arr, 3, comp=lambda u, v: u > v) - >>> [arr[0], arr[1], arr[2]] - [3, 2, 1] - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Merge_sort - """ - raise_if_backend_is_not_python( - merge_sort_parallel, kwargs.get('backend', Backend.PYTHON)) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get("comp", lambda u, v: u <= v) - for size in range(floor(log(end - start + 1, 2)) + 1): - pow_2 = 2**size - with ThreadPoolExecutor(max_workers=num_threads) as Executor: - i = start - while i <= end: - Executor.submit( - _merge, - array, - i, i + pow_2 - 1, - i + pow_2, i + 2*pow_2 - 1, - end, comp).result() - i = i + 2*pow_2 - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - -def brick_sort(array, **kwargs): - """ - Implements Brick Sort / Odd Even sorting algorithm - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - >>> from pydatastructs import OneDimensionalArray, brick_sort - >>> arr = OneDimensionalArray(int,[3, 2, 1]) - >>> brick_sort(arr) - >>> [arr[0], arr[1], arr[2]] - [1, 2, 3] - >>> brick_sort(arr, comp=lambda u, v: u > v) - >>> [arr[0], arr[1], arr[2]] - [3, 2, 1] - - References - ========== - .. [1] https://www.geeksforgeeks.org/odd-even-sort-brick-sort/ - """ - raise_if_backend_is_not_python( - brick_sort, kwargs.get('backend', Backend.PYTHON)) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get("comp", lambda u, v: u <= v) - - is_sorted = False - while is_sorted is False: - is_sorted = True - for i in range(start+1, end, 2): - if _comp(array[i+1], array[i], comp): - array[i], array[i+1] = array[i+1], array[i] - is_sorted = False - for i in range(start, end, 2): - if _comp(array[i+1], array[i], comp): - array[i], array[i+1] = array[i+1], array[i] - is_sorted = False - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - -def _brick_sort_swap(array, i, j, comp, is_sorted): - if _comp(array[j], array[i], comp): - array[i], array[j] = array[j], array[i] - is_sorted[0] = False - -def brick_sort_parallel(array, num_threads, **kwargs): - """ - Implements Concurrent Brick Sort / Odd Even sorting algorithm - - Parameters - ========== - - array: Array/list - The array which is to be sorted. - num_threads: int - The maximum number of threads - to be used for sorting. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, brick_sort_parallel - >>> arr = OneDimensionalArray(int,[3, 2, 1]) - >>> brick_sort_parallel(arr, num_threads=5) - >>> [arr[0], arr[1], arr[2]] - [1, 2, 3] - >>> brick_sort_parallel(arr, num_threads=5, comp=lambda u, v: u > v) - >>> [arr[0], arr[1], arr[2]] - [3, 2, 1] - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Odd%E2%80%93even_sort - """ - raise_if_backend_is_not_python( - brick_sort_parallel, kwargs.get('backend', Backend.PYTHON)) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get("comp", lambda u, v: u <= v) - - is_sorted = [False] - with ThreadPoolExecutor(max_workers=num_threads) as Executor: - while is_sorted[0] is False: - is_sorted[0] = True - for i in range(start + 1, end, 2): - Executor.submit(_brick_sort_swap, array, i, i + 1, comp, is_sorted).result() - - for i in range(start, end, 2): - Executor.submit(_brick_sort_swap, array, i, i + 1, comp, is_sorted).result() - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - -def heapsort(array, **kwargs): - """ - Implements Heapsort algorithm. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, heapsort - >>> arr = OneDimensionalArray(int,[3, 2, 1]) - >>> heapsort(arr) - >>> [arr[0], arr[1], arr[2]] - [1, 2, 3] - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Heapsort - - Note - ==== - - This function does not support custom comparators as is the case with - other sorting functions in this file. - """ - raise_if_backend_is_not_python( - heapsort, kwargs.get('backend', Backend.PYTHON)) - from pydatastructs.trees.heaps import BinaryHeap - - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - - h = BinaryHeap(heap_property="min") - for i in range(start, end+1): - if array[i] is not None: - h.insert(array[i]) - array[i] = None - - i = start - while not h.is_empty: - array[i] = h.extract().key - i += 1 - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - -def counting_sort(array: Array, **kwargs) -> Array: - """ - Performs counting sort on the given array. - - Parameters - ========== - - array: Array - The array which is to be sorted. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs import DynamicOneDimensionalArray as DODA, counting_sort - >>> arr = DODA(int, [5, 78, 1, 0]) - >>> out = counting_sort(arr) - >>> str(out) - "['0', '1', '5', '78']" - >>> arr.delete(2) - >>> out = counting_sort(arr) - >>> str(out) - "['0', '5', '78']" - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Counting_sort - - Note - ==== - - Since, counting sort is a non-comparison sorting algorithm, - custom comparators aren't allowed. - The ouput array doesn't contain any `None` value. - """ - raise_if_backend_is_not_python( - counting_sort, kwargs.get('backend', Backend.PYTHON)) - max_val, min_val = array[0], array[0] - none_count = 0 - for i in range(len(array)): - if array[i] is not None: - if max_val is None or max_val < array[i]: - max_val = array[i] - if min_val is None or array[i] < min_val: - min_val = array[i] - else: - none_count += 1 - if min_val is None or max_val is None: - return array - - count = [0 for _ in range(max_val - min_val + 1)] - for i in range(len(array)): - if array[i] is not None: - count[array[i] - min_val] += 1 - - total = 0 - for i in range(max_val - min_val + 1): - count[i], total = total, count[i] + total - - output = type(array)(array._dtype, - [array[i] for i in range(len(array)) - if array[i] is not None]) - if _check_type(output, DynamicArray): - output._modify(force=True) - - for i in range(len(array)): - x = array[i] - if x is not None: - output[count[x-min_val]] = x - count[x-min_val] += 1 - - return output - -def _matrix_multiply_helper(m1, m2, row, col): - s = 0 - for i in range(len(m1)): - s += m1[row][i] * m2[i][col] - return s - -def matrix_multiply_parallel(matrix_1, matrix_2, num_threads): - """ - Implements concurrent Matrix multiplication - - Parameters - ========== - - matrix_1: Any matrix representation - Left matrix - matrix_2: Any matrix representation - Right matrix - num_threads: int - The maximum number of threads - to be used for multiplication. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Raises - ====== - - ValueError - When the columns in matrix_1 are not equal to the rows in matrix_2 - - Returns - ======= - - C: list - The result of matrix multiplication. - - Examples - ======== - - >>> from pydatastructs import matrix_multiply_parallel - >>> I = [[1, 1, 0], [0, 1, 0], [0, 0, 1]] - >>> J = [[2, 1, 2], [1, 2, 1], [2, 2, 2]] - >>> matrix_multiply_parallel(I, J, num_threads=5) - [[3, 3, 3], [1, 2, 1], [2, 2, 2]] - - References - ========== - .. [1] https://www3.nd.edu/~zxu2/acms60212-40212/Lec-07-3.pdf - """ - row_matrix_1, col_matrix_1 = len(matrix_1), len(matrix_1[0]) - row_matrix_2, col_matrix_2 = len(matrix_2), len(matrix_2[0]) - - if col_matrix_1 != row_matrix_2: - raise ValueError("Matrix size mismatch: %s * %s"%( - (row_matrix_1, col_matrix_1), (row_matrix_2, col_matrix_2))) - - C = [[None for i in range(col_matrix_1)] for j in range(row_matrix_2)] - - with ThreadPoolExecutor(max_workers=num_threads) as Executor: - for i in range(row_matrix_1): - for j in range(col_matrix_2): - C[i][j] = Executor.submit(_matrix_multiply_helper, - matrix_1, - matrix_2, - i, j).result() - - return C - -def _bucket_sort_helper(bucket: Array) -> Array: - for i in range(1, len(bucket)): - key = bucket[i] - j = i - 1 - while j >= 0 and bucket[j] > key: - bucket[j+1] = bucket[j] - j -= 1 - bucket[j+1] = key - return bucket - -def bucket_sort(array: Array, **kwargs) -> Array: - """ - Performs bucket sort on the given array. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs import DynamicOneDimensionalArray as DODA, bucket_sort - >>> arr = DODA(int, [5, 78, 1, 0]) - >>> out = bucket_sort(arr) - >>> str(out) - "['0', '1', '5', '78']" - >>> arr.delete(2) - >>> out = bucket_sort(arr) - >>> str(out) - "['0', '1', '78']" - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Bucket_sort - - Note - ==== - - This function does not support custom comparators as is the case with - other sorting functions in this file. - """ - raise_if_backend_is_not_python( - bucket_sort, kwargs.get('backend', Backend.PYTHON)) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - - #Find maximum value in the list and use length of the list to determine which value in the list goes into which bucket - max_value = None - for i in range(start, end+1): - if array[i] is not None: - max_value = array[i] - - count = 0 - for i in range(start, end+1): - if array[i] is not None: - count += 1 - if array[i] > max_value: - max_value = array[i] - - number_of_null_values = end - start + 1 - count - size = max_value // count - - # Create n empty buckets where n is equal to the length of the input list - buckets_list = [[] for _ in range(count)] - - # Put list elements into different buckets based on the size - for i in range(start, end + 1): - if array[i] is not None: - j = array[i] // size - if j is not count: - buckets_list[j].append(array[i]) - else: - buckets_list[count-1].append(array[i]) - - # Sort elements within the buckets using Insertion Sort - for z in range(count): - _bucket_sort_helper(buckets_list[z]) - - # Concatenate buckets with sorted elements into a single array - sorted_list = [] - for x in range(count): - sorted_list.extend(buckets_list[x]) - for i in range(end, end - number_of_null_values, -1): - array[i] = None - for i in range(start, end - number_of_null_values + 1): - array[i] = sorted_list[i-start] - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - return array - -def cocktail_shaker_sort(array: Array, **kwargs) -> Array: - """ - Performs cocktail sort on the given array. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray as ODA, cocktail_shaker_sort - >>> arr = ODA(int, [5, 78, 1, 0]) - >>> out = cocktail_shaker_sort(arr) - >>> str(out) - '[0, 1, 5, 78]' - >>> arr = ODA(int, [21, 37, 5]) - >>> out = cocktail_shaker_sort(arr) - >>> str(out) - '[5, 21, 37]' - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Cocktail_shaker_sort - """ - raise_if_backend_is_not_python( - cocktail_shaker_sort, kwargs.get('backend', Backend.PYTHON)) - def swap(i, j): - array[i], array[j] = array[j], array[i] - - lower = kwargs.get('start', 0) - upper = kwargs.get('end', len(array) - 1) - comp = kwargs.get("comp", lambda u, v: u <= v) - - swapping = False - while (not swapping and upper - lower >= 1): - - swapping = True - for j in range(lower, upper): - if _comp(array[j], array[j+1], comp) is False: - swap(j + 1, j) - swapping = False - - upper = upper - 1 - for j in range(upper, lower, -1): - if _comp(array[j-1], array[j], comp) is False: - swap(j, j - 1) - swapping = False - lower = lower + 1 - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - - return array - -def quick_sort(array: Array, **kwargs) -> Array: - """ - Performs quick sort on the given array. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - pick_pivot_element: lambda/function - The function implementing the pivot picking - logic for quick sort. Should accept, `low`, - `high`, and `array` in this order, where `low` - represents the left end of the current partition, - `high` represents the right end, and `array` is - the original input array to `quick_sort` function. - Optional, by default, picks the element at `high` - index of the current partition as pivot. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray as ODA, quick_sort - >>> arr = ODA(int, [5, 78, 1, 0]) - >>> out = quick_sort(arr) - >>> str(out) - '[0, 1, 5, 78]' - >>> arr = ODA(int, [21, 37, 5]) - >>> out = quick_sort(arr) - >>> str(out) - '[5, 21, 37]' - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Quicksort - """ - backend = kwargs.pop("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _algorithms.quick_sort(array, **kwargs) - from pydatastructs import Stack - comp = kwargs.get("comp", lambda u, v: u <= v) - pick_pivot_element = kwargs.get("pick_pivot_element", - lambda low, high, array: array[high]) - - def partition(low, high, pick_pivot_element): - i = (low - 1) - x = pick_pivot_element(low, high, array) - for j in range(low , high): - if _comp(array[j], x, comp) is True: - i = i + 1 - array[i], array[j] = array[j], array[i] - array[i + 1], array[high] = array[high], array[i + 1] - return (i + 1) - - lower = kwargs.get('start', 0) - upper = kwargs.get('end', len(array) - 1) - stack = Stack() - - stack.push(lower) - stack.push(upper) - - while stack.is_empty is False: - high = stack.pop() - low = stack.pop() - p = partition(low, high, pick_pivot_element) - if p - 1 > low: - stack.push(low) - stack.push(p - 1) - if p + 1 < high: - stack.push(p + 1) - stack.push(high) - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - - return array - -def longest_common_subsequence(seq1: OneDimensionalArray, seq2: OneDimensionalArray, - **kwargs) -> OneDimensionalArray: - """ - Finds the longest common subsequence between the - two given sequences. - - Parameters - ======== - - seq1: OneDimensionalArray - The first sequence. - seq2: OneDimensionalArray - The second sequence. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: OneDimensionalArray - The longest common subsequence. - - Examples - ======== - - >>> from pydatastructs import longest_common_subsequence as LCS, OneDimensionalArray as ODA - >>> arr1 = ODA(str, ['A', 'B', 'C', 'D', 'E']) - >>> arr2 = ODA(str, ['A', 'B', 'C', 'G' ,'D', 'E', 'F']) - >>> lcs = LCS(arr1, arr2) - >>> str(lcs) - "['A', 'B', 'C', 'D', 'E']" - >>> arr1 = ODA(str, ['A', 'P', 'P']) - >>> arr2 = ODA(str, ['A', 'p', 'P', 'S', 'P']) - >>> lcs = LCS(arr1, arr2) - >>> str(lcs) - "['A', 'P', 'P']" - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Longest_common_subsequence_problem - - Note - ==== - - The data types of elements across both the sequences - should be same and should be comparable. - """ - raise_if_backend_is_not_python( - longest_common_subsequence, kwargs.get('backend', Backend.PYTHON)) - row = len(seq1) - col = len(seq2) - check_mat = {0: [(0, []) for _ in range(col + 1)]} - - for i in range(1, row + 1): - check_mat[i] = [(0, []) for _ in range(col + 1)] - for j in range(1, col + 1): - if seq1[i-1] == seq2[j-1]: - temp = check_mat[i-1][j-1][1][:] - temp.append(seq1[i-1]) - check_mat[i][j] = (check_mat[i-1][j-1][0] + 1, temp) - else: - if check_mat[i-1][j][0] > check_mat[i][j-1][0]: - check_mat[i][j] = check_mat[i-1][j] - else: - check_mat[i][j] = check_mat[i][j-1] - - return OneDimensionalArray(seq1._dtype, check_mat[row][col][-1]) - -def is_ordered(array, **kwargs): - """ - Checks whether the given array is ordered or not. - - Parameters - ========== - - array: OneDimensionalArray - The array which is to be checked for having - specified ordering among its elements. - start: int - The starting index of the portion of the array - under consideration. - Optional, by default 0 - end: int - The ending index of the portion of the array - under consideration. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for specifying the desired ordering. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - True if the specified ordering is present - from start to end (inclusive) otherwise False. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, is_ordered - >>> arr = OneDimensionalArray(int, [1, 2, 3, 4]) - >>> is_ordered(arr) - True - >>> arr1 = OneDimensionalArray(int, [1, 2, 3]) - >>> is_ordered(arr1, start=0, end=1, comp=lambda u, v: u > v) - False - """ - backend = kwargs.pop("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _algorithms.is_ordered(array, **kwargs) - lower = kwargs.get('start', 0) - upper = kwargs.get('end', len(array) - 1) - comp = kwargs.get("comp", lambda u, v: u <= v) - - for i in range(lower + 1, upper + 1): - if array[i] is None or array[i - 1] is None: - continue - if comp(array[i], array[i - 1]): - return False - return True - -def upper_bound(array, value, **kwargs): - """ - Finds the index of the first occurence of an element greater than the given - value according to specified order, in the given OneDimensionalArray using a variation of binary search method. - - Parameters - ========== - - array: OneDimensionalArray - The array in which the upper bound has to be found. - start: int - The staring index of the portion of the array in which the upper bound - of a given value has to be looked for. - Optional, by default 0 - end: int, optional - The ending index of the portion of the array in which the upper bound - of a given value has to be looked for. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for specifying the desired ordering. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - index: int - Index of the upper bound of the given value in the given OneDimensionalArray. - - Examples - ======== - - >>> from pydatastructs import upper_bound, OneDimensionalArray as ODA - >>> arr1 = ODA(int, [4, 5, 5, 6, 7]) - >>> ub = upper_bound(arr1, 5, start=0, end=4) - >>> ub - 3 - >>> arr2 = ODA(int, [7, 6, 5, 5, 4]) - >>> ub = upper_bound(arr2, 5, comp=lambda x, y: x > y) - >>> ub - 4 - - Note - ==== - - DynamicOneDimensionalArray objects may not work as expected. - """ - raise_if_backend_is_not_python( - upper_bound, kwargs.get('backend', Backend.PYTHON)) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array)) - comp = kwargs.get('comp', lambda x, y: x < y) - index = end - inclusive_end = end - 1 - if comp(value, array[start]): - index = start - while start <= inclusive_end: - mid = (start + inclusive_end)//2 - if not comp(value, array[mid]): - start = mid + 1 - else: - index = mid - inclusive_end = mid - 1 - return index - -def lower_bound(array, value, **kwargs): - """ - Finds the the index of the first occurence of an element which is not - less than the given value according to specified order, - in the given OneDimensionalArray using a variation of binary search method. - - Parameters - ========== - - array: OneDimensionalArray - The array in which the lower bound has to be found. - start: int - The staring index of the portion of the array in which the upper bound - of a given value has to be looked for. - Optional, by default 0 - end: int, optional - The ending index of the portion of the array in which the upper bound - of a given value has to be looked for. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for specifying the desired ordering. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - index: int - Index of the lower bound of the given value in the given OneDimensionalArray - - Examples - ======== - - >>> from pydatastructs import lower_bound, OneDimensionalArray as ODA - >>> arr1 = ODA(int, [4, 5, 5, 6, 7]) - >>> lb = lower_bound(arr1, 5, end=4, comp=lambda x, y : x < y) - >>> lb - 1 - >>> arr = ODA(int, [7, 6, 5, 5, 4]) - >>> lb = lower_bound(arr, 5, start=0, comp=lambda x, y : x > y) - >>> lb - 2 - - Note - ==== - - DynamicOneDimensionalArray objects may not work as expected. - """ - raise_if_backend_is_not_python( - lower_bound, kwargs.get('backend', Backend.PYTHON)) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array)) - comp = kwargs.get('comp', lambda x, y: x < y) - index = end - inclusive_end = end - 1 - if not comp(array[start], value): - index = start - while start <= inclusive_end: - mid = (start + inclusive_end)//2 - if comp(array[mid], value): - start = mid + 1 - else: - index = mid - inclusive_end = mid - 1 - return index - -def longest_increasing_subsequence(array, **kwargs): - """ - Returns the longest increasing subsequence (as a OneDimensionalArray) that - can be obtained from a given OneDimensionalArray. A subsequence - of an array is an ordered subset of the array's elements having the same - sequential ordering as the original array. Here, an increasing - sequence stands for a strictly increasing sequence of numbers. - - Parameters - ========== - - array: OneDimensionalArray - The given array in the form of a OneDimensionalArray - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: OneDimensionalArray - Returns the longest increasing subsequence that can be obtained - from the given array - - Examples - ======== - - >>> from pydatastructs import lower_bound, OneDimensionalArray as ODA - >>> from pydatastructs import longest_increasing_subsequence as LIS - >>> array = ODA(int, [2, 5, 3, 7, 11, 8, 10, 13, 6]) - >>> longest_inc_subsequence = LIS(array) - >>> str(longest_inc_subsequence) - '[2, 3, 7, 8, 10, 13]' - >>> array2 = ODA(int, [3, 4, -1, 5, 8, 2, 2 ,2, 3, 12, 7, 9, 10]) - >>> longest_inc_subsequence = LIS(array2) - >>> str(longest_inc_subsequence) - '[-1, 2, 3, 7, 9, 10]' - """ - raise_if_backend_is_not_python( - longest_increasing_subsequence, - kwargs.get('backend', Backend.PYTHON)) - n = len(array) - dp = OneDimensionalArray(int, n) - dp.fill(0) - parent = OneDimensionalArray(int, n) - parent.fill(-1) - length = 0 - for i in range(1, n): - if array[i] <= array[dp[0]]: - dp[0] = i - elif array[dp[length]] < array[i]: - length += 1 - dp[length] = i - parent[i] = dp[length - 1] - else: - curr_array = [array[dp[i]] for i in range(length)] - ceil = lower_bound(curr_array, array[i]) - dp[ceil] = i - parent[i] = dp[ceil - 1] - ans = DynamicOneDimensionalArray(int, 0) - last_index = dp[length] - while last_index != -1: - ans.append(array[last_index]) - last_index = parent[last_index] - n = ans._last_pos_filled + 1 - ans_ODA = OneDimensionalArray(int, n) - for i in range(n): - ans_ODA[n-1-i] = ans[i] - return ans_ODA - -def _permutation_util(array, start, end, comp, perm_comp): - size = end - start + 1 - permute = OneDimensionalArray(int, size) - for i, j in zip(range(start, end + 1), range(size)): - permute[j] = array[i] - i = size - 1 - while i > 0 and perm_comp(permute[i - 1], permute[i], comp): - i -= 1 - if i > 0: - left, right = i, size - 1 - while left <= right: - mid = left + (right - left) // 2 - if not perm_comp(permute[i - 1], permute[mid], comp): - left = mid + 1 - else: - right = mid - 1 - permute[i - 1], permute[left - 1] = \ - permute[left - 1], permute[i - 1] - left, right = i, size - 1 - while left < right: - permute[left], permute[right] = permute[right], permute[left] - left += 1 - right -= 1 - result = True if i > 0 else False - return result, permute - -def next_permutation(array, **kwargs): - """ - If the function can determine the next higher permutation, it - returns `True` and the permutation in a new array. - If that is not possible, because it is already at the largest possible - permutation, it returns the elements according to the first permutation - and returns `False` and the permutation in a new array. - - Parameters - ========== - - array: OneDimensionalArray - The array which is to be used for finding next permutation. - start: int - The staring index of the considered portion of the array. - Optional, by default 0 - end: int, optional - The ending index of the considered portion of the array. - Optional, by default the index of the last position filled. - comp: lambda/function - The comparator which is to be used for specifying the - desired lexicographical ordering. - Optional, by default, less than is - used for comparing two values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - - Returns - ======= - - output: bool, OneDimensionalArray - First element is `True` if the function can rearrange - the given portion of the input array as a lexicographically - greater permutation, otherwise returns `False`. - Second element is an array having the next permutation. - - - Examples - ======== - - >>> from pydatastructs import next_permutation, OneDimensionalArray as ODA - >>> array = ODA(int, [1, 2, 3, 4]) - >>> is_greater, next_permute = next_permutation(array) - >>> is_greater, str(next_permute) - (True, '[1, 2, 4, 3]') - >>> array = ODA(int, [3, 2, 1]) - >>> is_greater, next_permute = next_permutation(array) - >>> is_greater, str(next_permute) - (False, '[1, 2, 3]') - - References - ========== - - .. [1] http://www.cplusplus.com/reference/algorithm/next_permutation/ - """ - raise_if_backend_is_not_python( - next_permutation, kwargs.get('backend', Backend.PYTHON)) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get('comp', lambda x, y: x < y) - - def _next_permutation_comp(x, y, _comp): - if _comp(x, y): - return False - else: - return True - - return _permutation_util(array, start, end, comp, - _next_permutation_comp) - -def prev_permutation(array, **kwargs): - """ - If the function can determine the next lower permutation, it - returns `True` and the permutation in a new array. - If that is not possible, because it is already at the lowest possible - permutation, it returns the elements according to the last permutation - and returns `False` and the permutation in a new array. - - Parameters - ========== - - array: OneDimensionalArray - The array which is to be used for finding next permutation. - start: int - The staring index of the considered portion of the array. - Optional, by default 0 - end: int, optional - The ending index of the considered portion of the array. - Optional, by default the index of the last position filled. - comp: lambda/function - The comparator which is to be used for specifying the - desired lexicographical ordering. - Optional, by default, less than is - used for comparing two values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - - Returns - ======= - - output: bool, OneDimensionalArray - First element is `True` if the function can rearrange - the given portion of the input array as a lexicographically - smaller permutation, otherwise returns `False`. - Second element is an array having the previous permutation. - - - Examples - ======== - - >>> from pydatastructs import prev_permutation, OneDimensionalArray as ODA - >>> array = ODA(int, [1, 2, 4, 3]) - >>> is_lower, prev_permute = prev_permutation(array) - >>> is_lower, str(prev_permute) - (True, '[1, 2, 3, 4]') - >>> array = ODA(int, [1, 2, 3, 4]) - >>> is_lower, prev_permute = prev_permutation(array) - >>> is_lower, str(prev_permute) - (False, '[4, 3, 2, 1]') - - References - ========== - - .. [1] http://www.cplusplus.com/reference/algorithm/prev_permutation/ - """ - raise_if_backend_is_not_python( - prev_permutation, kwargs.get('backend', Backend.PYTHON)) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get('comp', lambda x, y: x < y) - - def _prev_permutation_comp(x, y, _comp): - if _comp(x, y): - return True - else: - return False - - return _permutation_util(array, start, end, comp, - _prev_permutation_comp) - -def bubble_sort(array, **kwargs): - """ - Implements bubble sort algorithm. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, bubble_sort - >>> arr = OneDimensionalArray(int,[3, 2, 1]) - >>> out = bubble_sort(arr) - >>> str(out) - '[1, 2, 3]' - >>> out = bubble_sort(arr, comp=lambda u, v: u > v) - >>> str(out) - '[3, 2, 1]' - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Bubble_sort - """ - backend = kwargs.pop("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _algorithms.bubble_sort(array, **kwargs) - if backend == Backend.LLVM: - return _algorithms.bubble_sort_llvm(array, **kwargs) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get("comp", lambda u, v: u <= v) - arr_len = len(array) - for i in range(arr_len - 1): - for j in range(start , end): - if not _comp(array[j], array[j + 1], comp): - array[j], array[j + 1] = array[j + 1], array[j] - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - - return array - -def selection_sort(array, **kwargs): - """ - Implements selection sort algorithm. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, selection_sort - >>> arr = OneDimensionalArray(int,[3, 2, 1]) - >>> out = selection_sort(arr) - >>> str(out) - '[1, 2, 3]' - >>> out = selection_sort(arr, comp=lambda u, v: u > v) - >>> str(out) - '[3, 2, 1]' - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Selection_sort - """ - backend = kwargs.pop("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _algorithms.bubble_sort(array, **kwargs) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get('comp', lambda u, v: u <= v) - - for i in range(start, end + 1): - jMin = i - for j in range(i + 1, end + 1): - if not _comp(array[jMin], array[j], comp): - jMin = j - if jMin != i: - array[i], array[jMin] = array[jMin], array[i] - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - - return array - -def insertion_sort(array, **kwargs): - """ - Implements insertion sort algorithm. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, insertion_sort - >>> arr = OneDimensionalArray(int,[3, 2, 1]) - >>> out = insertion_sort(arr) - >>> str(out) - '[1, 2, 3]' - >>> out = insertion_sort(arr, comp=lambda u, v: u > v) - >>> str(out) - '[3, 2, 1]' - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Insertion_sort - """ - backend = kwargs.pop("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _algorithms.insertion_sort(array, **kwargs) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get('comp', lambda u, v: u <= v) - - for i in range(start + 1, end + 1): - temp = array[i] - j = i - while j > start and not _comp(array[j - 1], temp, comp): - array[j] = array[j - 1] - j -= 1 - array[j] = temp - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - - return array - -def linear_search(array, value, **kwargs): - """ - Implements linear search algorithm. - - Parameters - ========== - - array: OneDimensionalArray - The array which is to be searched. - value: - The value which is to be searched - inside the array. - start: int - The starting index of the portion - which is to be searched. - Optional, by default 0 - end: int - The ending index of the portion which - is to be searched. - Optional, by default the index - of the last position filled. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: int - The index of value if found. - If not found, returns None. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, linear_search - >>> arr = OneDimensionalArray(int,[3, 2, 1]) - >>> linear_search(arr, 2) - 1 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Linear_search - """ - backend = kwargs.pop("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _algorithms.linear_search(array, value, **kwargs) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - - for i in range(start, end + 1): - if array[i] == value: - return i - - return None - -def binary_search(array, value, **kwargs): - """ - Implements binary search algorithm. - - Parameters - ========== - - array: OneDimensionalArray - The array which is to be searched. - value: - The value which is to be searched - inside the array. - start: int - The starting index of the portion - which is to be searched. - Optional, by default 0 - end: int - The ending index of the portion which - is to be searched. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for performing comparisons. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: int - The index of elem if found. - If not found, returns None. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, binary_search - >>> arr = OneDimensionalArray(int,[1, 2, 3, 5, 10, 12]) - >>> binary_search(arr, 5) - 3 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Binary_search_algorithm - - Note - ==== - - This algorithm assumes that the portion of the array - to be searched is already sorted. - """ - backend = kwargs.pop("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _algorithms.binary_search(array, value, **kwargs) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get("comp", lambda u, v: u <= v) - - left = start - right = end - while left <= right: - middle = left//2 + right//2 + left % 2 * right % 2 - if array[middle] == value: - return middle - if comp(array[middle], value): - left = middle + 1 - else: - right = middle - 1 - - return None - -def jump_search(array, value, **kwargs): - """ - Implements jump search algorithm. - - Parameters - ========== - - array: OneDimensionalArray - The array which is to be searched. - value: - The value which is to be searched - inside the array. - start: int - The starting index of the portion - which is to be searched. - Optional, by default 0 - end: int - The ending index of the portion which - is to be searched. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for performing comparisons. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: int - The index of elem if found. - If not found, returns None. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, jump_search - >>> arr = OneDimensionalArray(int,[1, 2, 3, 5, 10, 12]) - >>> linear_search(arr, 5) - 3 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Jump_search - - Note - ==== - - This algorithm assumes that the portion of the array - to be searched is already sorted. - """ - backend = kwargs.pop("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _algorithms.jump_search(array, value, **kwargs) - start = kwargs.get('start', 0) - end = kwargs.get('end', len(array) - 1) - comp = kwargs.get("comp", lambda u, v: u < v) - - step = int(sqrt(end - start + 1)) - current_position = step - prev = start - while comp(array[min(current_position, end)], value): - prev = current_position - current_position += step - if prev > end: - return None - while prev <= min(current_position, end): - if array[prev] == value: - return prev - prev += 1 - - return None - -def intro_sort(array, **kwargs) -> Array: - """ - Performs intro sort on the given array. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - maxdepth: Enables the user to define the maximum - recursion depth, takes value 2*log(length(A)) - by default (ref: Wikipedia[1]). - ins_threshold: Threshold under which insertion - sort has to be performed, default value is - 16 (ref: Wikipedia[1]). - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray as ODA, intro_sort - >>> arr = ODA(int, [5, 78, 1, 0]) - >>> out = intro_sort(arr) - >>> str(out) - '[0, 1, 5, 78]' - >>> arr = ODA(int, [21, 37, 5]) - >>> out = intro_sort(arr) - >>> str(out) - '[5, 21, 37]' - - Note - ==== - - This function does not support custom comparators as - is the case with other sorting functions in this file. - This is because of heapsort's limitation. - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Introsort - """ - raise_if_backend_is_not_python( - intro_sort, kwargs.get('backend', Backend.PYTHON)) - - # Always sorts in increasing order, this is because of - # heapsort's limitation - comp = lambda u, v: u <= v - lower = kwargs.get('start', 0) - upper = kwargs.get('end', len(array) - 1) - n = upper - lower + 1 - if n <= 0: - maxdepth = 0 - else: - maxdepth = kwargs.get("maxdepth", int(2 * (log(n)/log(2)))) - - ins_threshold = kwargs.get("ins_threshold", 16) - - def partition(array, lower, upper): - pivot = array[lower] - left = lower + 1 - right = upper - done = False - while not done: - while left <= right and _comp(array[left], pivot, comp): - left += 1 - while _comp(pivot, array[right], comp) and right >= left: - right -= 1 - if right < left: - done = True - else: - array[left], array[right] = array[right], array[left] - left+=1 - right-=1 - - array[lower], array[right] = array[right], array[lower] - return right - - if n < ins_threshold: - return insertion_sort(array, start=lower, end=upper) - elif maxdepth == 0: - heapsort(array, start=lower, end=upper) - return array - else: - p = partition(array, lower, upper) - - intro_sort(array, start=lower, end=p-1, maxdepth=maxdepth-1, ins_threshold=ins_threshold) - intro_sort(array, start=p+1, end=upper, maxdepth=maxdepth-1, ins_threshold=ins_threshold) - - return array - -def shell_sort(array, *args, **kwargs): - """ - Implements shell sort algorithm. - - Parameters - ========== - - array: Array - The array which is to be sorted. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - comp: lambda/function - The comparator which is to be used - for sorting. If the function returns - False then only swapping is performed. - Optional, by default, less than or - equal to is used for comparing two - values. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs.linear_data_structures.algorithms import OneDimensionalArray, shell_sort - >>> arr = OneDimensionalArray(int, [3, 2, 1]) - >>> out = shell_sort(arr) - >>> str(out) - '[1, 2, 3]' - >>> out = shell_sort(arr, comp=lambda u, v: u > v) - >>> str(out) - '[3, 2, 1]' - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Shellsort - """ - start = int(kwargs.get('start', 0)) - end = int(kwargs.get('end', len(array) - 1)) - comp = kwargs.get('comp', lambda u, v: u <= v) - - n = end - start + 1 - gap = n // 2 - while gap > 0: - for i in range(start + gap, end + 1): - temp = array[i] - j = i - while j >= start + gap and not _comp(array[j - gap], temp, comp): - array[j] = array[j - gap] - j -= gap - array[j] = temp - gap //= 2 - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - - return array - -def radix_sort(array, *args, **kwargs): - """ - Implements radix sort algorithm for non-negative integers. - - Parameters - ========== - - array: Array - The array which is to be sorted. Must contain non-negative integers. - start: int - The starting index of the portion - which is to be sorted. - Optional, by default 0 - end: int - The ending index of the portion which - is to be sorted. - Optional, by default the index - of the last position filled. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - output: Array - The sorted array. - - Examples - ======== - - >>> from pydatastructs.linear_data_structures.algorithms import OneDimensionalArray, radix_sort - >>> arr = OneDimensionalArray(int, [170, 45, 75, 90, 802, 24, 2, 66]) - >>> out = radix_sort(arr) - >>> str(out) - '[2, 24, 45, 66, 75, 90, 170, 802]' - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Radix_sort - """ - start = int(kwargs.get('start', 0)) - end = int(kwargs.get('end', len(array) - 1)) - - n = end - start + 1 - max_val = array[start] - for i in range(start + 1, end + 1): - if array[i] is not None and array[i] > max_val: - max_val = array[i] - exp = 1 - while max_val // exp > 0: - count = [0] * 10 - output = [None] * n - - for i in range(start, end + 1): - if array[i] is not None: - digit = (array[i] // exp) % 10 - count[digit] += 1 - - for i in range(1, 10): - count[i] += count[i - 1] - - for i in range(end, start - 1, -1): - if array[i] is not None: - digit = (array[i] // exp) % 10 - count[digit] -= 1 - output[count[digit]] = array[i] - - for i in range(n): - array[start + i] = output[i] - - exp *= 10 - - if _check_type(array, (DynamicArray, _arrays.DynamicOneDimensionalArray)): - array._modify(True) - - return array diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/arrays.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/arrays.py deleted file mode 100644 index 2e0c3fd97..000000000 --- a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/arrays.py +++ /dev/null @@ -1,473 +0,0 @@ -from pydatastructs.utils.misc_util import ( - _check_type, NoneType, Backend, - raise_if_backend_is_not_python) -from pydatastructs.linear_data_structures._backend.cpp import _arrays - -__all__ = [ - 'OneDimensionalArray', - 'MultiDimensionalArray', - 'DynamicOneDimensionalArray' -] - -class Array(object): - """ - Abstract class for arrays in pydatastructs. - """ - def __str__(self) -> str: - return str(self._data) - -class OneDimensionalArray(Array): - """ - Represents one dimensional static arrays of - fixed size. - - Parameters - ========== - - dtype: type - A valid object type. - size: int - The number of elements in the array. - elements: list - The elements in the array, all should - be of same type. - init: a python type - The initial value with which the element has - to be initialized. By default none, used only - when the data is not given. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Raises - ====== - - ValueError - When the number of elements in the list do not - match with the size. - More than three parameters are passed as arguments. - Types of arguments is not as mentioned in the docstring. - - Note - ==== - - At least one parameter should be passed as an argument along - with the dtype. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray - >>> arr = OneDimensionalArray(int, 5) - >>> arr.fill(6) - >>> arr[0] - 6 - >>> arr[0] = 7.2 - >>> arr[0] - 7 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Array_data_structure#One-dimensional_arrays - """ - - __slots__ = ['_size', '_data', '_dtype'] - - def __new__(cls, dtype=NoneType, *args, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - return _arrays.OneDimensionalArray(dtype, *args, **kwargs) - if dtype is NoneType: - raise ValueError("Data type is not defined.") - if len(args) not in (1, 2): - raise ValueError("Too few arguments to create a 1D array," - " pass either size of the array" - " or list of elements or both.") - obj = Array.__new__(cls) - obj._dtype = dtype - if len(args) == 2: - if _check_type(args[0], list) and \ - _check_type(args[1], int): - for i in range(len(args[0])): - if _check_type(args[0][i], dtype) is False: - args[0][i] = dtype(args[0][i]) - size, data = args[1], list(args[0]) - elif _check_type(args[1], list) and \ - _check_type(args[0], int): - for i in range(len(args[1])): - if _check_type(args[1][i], dtype) is False: - args[1][i] = dtype(args[1][i]) - size, data = args[0], list(args[1]) - else: - raise TypeError("Expected type of size is int and " - "expected type of data is list/tuple.") - if size != len(data): - raise ValueError("Conflict in the size, %s and length of data, %s" - %(size, len(data))) - obj._size, obj._data = size, data - - elif len(args) == 1: - if _check_type(args[0], int): - obj._size = args[0] - init = kwargs.get('init', None) - obj._data = [init for i in range(args[0])] - elif _check_type(args[0], (list, tuple)): - for i in range(len(args[0])): - if _check_type(args[0][i], dtype) is False: - args[0][i] = dtype(args[0][i]) - obj._size, obj._data = len(args[0]), \ - list(args[0]) - else: - raise TypeError("Expected type of size is int and " - "expected type of data is list/tuple.") - - return obj - - @classmethod - def methods(cls): - return ['__new__', '__getitem__', - '__setitem__', 'fill', '__len__'] - - def __getitem__(self, i): - if i >= self._size or i < 0: - raise IndexError(("Index, {} out of range, " - "[{}, {}).".format(i, 0, self._size))) - return self._data.__getitem__(i) - - def __setitem__(self, idx, elem): - if elem is None: - self._data[idx] = None - else: - if _check_type(elem, self._dtype) is False: - elem = self._dtype(elem) - self._data[idx] = elem - - def fill(self, elem): - elem = self._dtype(elem) - for i in range(self._size): - self._data[i] = elem - - def __len__(self): - return self._size - -class MultiDimensionalArray(Array): - """ - Represents a multi-dimensional array. - - Parameters - ========== - - dtype: type - A valid object type. - *args: int - The dimensions of the array. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Raises - ====== - - IndexError - Index goes out of boundaries, or - the number of index given is not - the same as the number of dimensions. - ValueError - When there's no dimensions or the - dimension size is 0. - - Examples - ======== - - >>> from pydatastructs import MultiDimensionalArray as MDA - >>> arr = MDA(int, 5, 6, 9) - >>> arr.fill(32) - >>> arr[3, 0, 0] - 32 - >>> arr[3, 0, 0] = 7 - >>> arr[3, 0, 0] - 7 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Array_data_structure#Multidimensional_arrays - - """ - __slots__ = ['_sizes', '_data', '_dtype'] - - def __new__(cls, dtype: type = NoneType, *args, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - if dtype is NoneType: - raise ValueError("Data type is not defined.") - elif not args: - raise ValueError("Too few arguments to create a " - "multi dimensional array, pass dimensions.") - if len(args) == 1: - obj = Array.__new__(cls) - obj._dtype = dtype - obj._sizes = (args[0], 1) - obj._data = [None] * args[0] - return obj - - dimensions = args - for dimension in dimensions: - if dimension < 1: - raise ValueError("Size of dimension cannot be less than 1") - n_dimensions = len(dimensions) - d_sizes = [] - index = 0 - while n_dimensions > 1: - size = dimensions[index] - for i in range(index+1, len(dimensions)): - size = size * dimensions[i] - d_sizes.append(size) - n_dimensions -= 1 - index += 1 - d_sizes.append(dimensions[index]) - d_sizes.append(1) - obj = Array.__new__(cls) - obj._dtype = dtype - obj._sizes = tuple(d_sizes) - obj._data = [None] * obj._sizes[1] * dimensions[0] - return obj - - @classmethod - def methods(cls) -> list: - return ['__new__', '__getitem__', '__setitem__', 'fill', 'shape'] - - def __getitem__(self, indices): - self._compare_shape(indices) - if isinstance(indices, int): - return self._data[indices] - position = 0 - for i in range(0, len(indices)): - position += self._sizes[i + 1] * indices[i] - return self._data[position] - - def __setitem__(self, indices, element) -> None: - self._compare_shape(indices) - if isinstance(indices, int): - self._data[indices] = element - else: - position = 0 - for i in range(0, len(indices)): - position += self._sizes[i + 1] * indices[i] - self._data[position] = element - - def _compare_shape(self, indices) -> None: - indices = [indices] if isinstance(indices, int) else indices - if len(indices) != len(self._sizes) - 1: - raise IndexError("Shape mismatch, current shape is %s" % str(self.shape)) - if any(indices[i] >= self._sizes[i] for i in range(len(indices))): - raise IndexError("Index out of range.") - - def fill(self, element) -> None: - element = self._dtype(element) - for i in range(len(self._data)): - self._data[i] = element - - @property - def shape(self) -> tuple: - shape = [] - size = len(self._sizes) - for i in range(1, size): - shape.append(self._sizes[i-1]//self._sizes[i]) - return tuple(shape) - -class DynamicArray(Array): - """ - Abstract class for dynamic arrays. - """ - pass - -class DynamicOneDimensionalArray(DynamicArray, OneDimensionalArray): - """ - Represents resizable and dynamic one - dimensional arrays. - - Parameters - ========== - - dtype: type - A valid object type. - size: int - The number of elements in the array. - elements: list/tuple - The elements in the array, all should - be of same type. - init: a python type - The inital value with which the element has - to be initialized. By default none, used only - when the data is not given. - load_factor: float, by default 0.25 - The number below which if the ratio, Num(T)/Size(T) - falls then the array is contracted such that at - most only half the positions are filled. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Raises - ====== - - ValueError - When the number of elements in the list do not - match with the size. - More than three parameters are passed as arguments. - Types of arguments is not as mentioned in the docstring. - The load factor is not of floating point type. - - Note - ==== - - At least one parameter should be passed as an argument along - with the dtype. - Num(T) means the number of positions which are not None in the - array. - Size(T) means the maximum number of elements that the array can hold. - - Examples - ======== - - >>> from pydatastructs import DynamicOneDimensionalArray as DODA - >>> arr = DODA(int, 0) - >>> arr.append(1) - >>> arr.append(2) - >>> arr[0] - 1 - >>> arr.delete(0) - >>> arr[0] - >>> arr[1] - 2 - >>> arr.append(3) - >>> arr.append(4) - >>> [arr[i] for i in range(arr.size)] - [None, 2, 3, 4, None, None, None] - - References - ========== - - .. [1] http://www.cs.nthu.edu.tw/~wkhon/algo09/lectures/lecture16.pdf - """ - - __slots__ = ['_load_factor', '_num', '_last_pos_filled', '_size'] - - def __new__(cls, dtype=NoneType, *args, **kwargs): - backend = kwargs.get("backend", Backend.PYTHON) - if backend == Backend.CPP: - return _arrays.DynamicOneDimensionalArray(dtype, *args, **kwargs) - obj = super().__new__(cls, dtype, *args, **kwargs) - obj._load_factor = float(kwargs.get('load_factor', 0.25)) - obj._num = 0 if obj._size == 0 or obj[0] is None else obj._size - obj._last_pos_filled = obj._num - 1 - return obj - - @classmethod - def methods(cls): - return ['__new__', '_modify', - 'append', 'delete', 'size', - '__str__', '__reversed__'] - - def _modify(self, force=False): - """ - Contracts the array if Num(T)/Size(T) falls - below load factor. - """ - if force: - i = -1 - while self._data[i] is None: - i -= 1 - self._last_pos_filled = i%self._size - if (self._num/self._size < self._load_factor): - arr_new = OneDimensionalArray(self._dtype, 2*self._num + 1) - j = 0 - for i in range(self._last_pos_filled + 1): - if self._data[i] is not None: - arr_new[j] = self[i] - j += 1 - self._last_pos_filled = j - 1 - self._data = arr_new._data - self._size = arr_new._size - - def append(self, el): - if self._last_pos_filled + 1 == self._size: - arr_new = OneDimensionalArray(self._dtype, 2*self._size + 1) - for i in range(self._last_pos_filled + 1): - arr_new[i] = self[i] - arr_new[self._last_pos_filled + 1] = el - self._size = arr_new._size - self._data = arr_new._data - else: - self[self._last_pos_filled + 1] = el - self._last_pos_filled += 1 - self._num += 1 - self._modify() - - def delete(self, idx): - if idx <= self._last_pos_filled and idx >= 0 and \ - self[idx] is not None: - self[idx] = None - self._num -= 1 - if self._last_pos_filled == idx: - self._last_pos_filled -= 1 - return self._modify() - - @property - def size(self): - return self._size - - def __str__(self): - to_be_printed = ['' for _ in range(self._last_pos_filled + 1)] - for i in range(self._last_pos_filled + 1): - if self._data[i] is not None: - to_be_printed[i] = str(self._data[i]) - return str(to_be_printed) - - def __reversed__(self): - for i in range(self._last_pos_filled, -1, -1): - yield self._data[i] - -class ArrayForTrees(DynamicOneDimensionalArray): - """ - Utility dynamic array for storing nodes of a tree. - - Parameters - ========== - - backend: pydatastructs.Backend - The backend to be used. Available backends: Python and C++ - Optional, by default, the Python backend is used. For faster execution, use the C++ backend. - - See Also - ======== - - pydatastructs.linear_data_structures.arrays.DynamicOneDimensionalArray - """ - def _modify(self): - if self._num/self._size < self._load_factor: - new_indices = {} - arr_new = OneDimensionalArray(self._dtype, 2*self._num + 1) - j = 0 - for i in range(self._last_pos_filled + 1): - if self[i] is not None: - arr_new[j] = self[i] - new_indices[self[i].key] = j - j += 1 - for i in range(j): - if arr_new[i].left is not None: - arr_new[i].left = new_indices[self[arr_new[i].left].key] - if arr_new[i].right is not None: - arr_new[i].right = new_indices[self[arr_new[i].right].key] - if arr_new[i].parent is not None: - arr_new[i].parent = new_indices[self[arr_new[i].parent].key] - self._last_pos_filled = j - 1 - self._data = arr_new._data - self._size = arr_new._size - return new_indices - return None diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/linked_lists.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/linked_lists.py deleted file mode 100644 index 09178daf1..000000000 --- a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/linked_lists.py +++ /dev/null @@ -1,819 +0,0 @@ -import math, random -from pydatastructs.utils.misc_util import _check_type, LinkedListNode, SkipNode -from pydatastructs.utils.misc_util import ( - Backend, raise_if_backend_is_not_python) - -__all__ = [ - 'SinglyLinkedList', - 'DoublyLinkedList', - 'SinglyCircularLinkedList', - 'DoublyCircularLinkedList', - 'SkipList' -] - -class LinkedList(object): - """ - Abstract class for Linked List. - """ - __slots__ = ['head', 'size'] - - def __len__(self): - return self.size - - @property - def is_empty(self): - return self.size == 0 - - def search(self, key): - curr_node = self.head - while curr_node is not None: - if curr_node.key == key: - return curr_node - curr_node = curr_node.next - if curr_node is self.head: - return None - return None - - def __str__(self): - """ - For printing the linked list. - """ - elements = [] - current_node = self.head - while current_node is not None: - elements.append(str(current_node)) - current_node = current_node.next - if current_node == self.head: - break - return str(elements) - - def insert_after(self, prev_node, key, data=None): - """ - Inserts a new node after the prev_node. - - Parameters - ========== - - prev_node: LinkedListNode - The node after which the - new node is to be inserted. - - key - Any valid identifier to uniquely - identify the node in the linked list. - - data - Any valid data to be stored in the node. - """ - raise NotImplementedError('This is an abstract method') - - def insert_at(self, index, key, data=None): - """ - Inserts a new node at the input index. - - Parameters - ========== - - index: int - An integer satisfying python indexing properties. - - key - Any valid identifier to uniquely - identify the node in the linked list. - - data - Any valid data to be stored in the node. - """ - raise NotImplementedError('This is an abstract method') - - def extract(self, index): - """ - Extracts the node at the index of the list. - - Parameters - ========== - - index: int - An integer satisfying python indexing properties. - - Returns - ======= - - current_node: LinkedListNode - The node at index i. - """ - raise NotImplementedError('This is an abstract method') - - def __getitem__(self, index): - """ - Returns - ======= - - current_node: LinkedListNode - The node at given index. - """ - if index < 0: - index = self.size + index - - if index >= self.size: - raise IndexError('%d index is out of range.'%(index)) - - counter = 0 - current_node = self.head - while counter != index: - current_node = current_node.next - counter += 1 - return current_node - - def appendleft(self, key, data=None): - """ - Pushes a new node at the start i.e., - the left of the list. - - Parameters - ========== - - key - Any valid identifier to uniquely - identify the node in the linked list. - - data - Any valid data to be stored in the node. - """ - self.insert_at(0, key, data) - - def append(self, key, data=None): - """ - Appends a new node at the end of the list. - - Parameters - ========== - - key - Any valid identifier to uniquely - identify the node in the linked list. - - data - Any valid data to be stored in the node. - """ - self.insert_at(self.size, key, data) - - def insert_before(self, next_node, key, data=None): - """ - Inserts a new node before the next_node. - - Parameters - ========== - - next_node: LinkedListNode - The node before which the - new node is to be inserted. - - key - Any valid identifier to uniquely - identify the node in the linked list. - - data - Any valid data to be stored in the node. - """ - raise NotImplementedError('This is an abstract method') - - def popleft(self): - """ - Extracts the Node from the left - i.e. start of the list. - - Returns - ======= - - old_head: LinkedListNode - The leftmost element of linked - list. - """ - return self.extract(0) - - def popright(self): - """ - Extracts the node from the right - of the linked list. - - Returns - ======= - - old_tail: LinkedListNode - The leftmost element of linked - list. - """ - return self.extract(-1) - -class DoublyLinkedList(LinkedList): - """ - Represents Doubly Linked List - - Parameters - ========== - - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import DoublyLinkedList - >>> dll = DoublyLinkedList() - >>> dll.append(6) - >>> dll[0].key - 6 - >>> dll.head.key - 6 - >>> dll.append(5) - >>> dll.appendleft(2) - >>> str(dll) - "['(2, None)', '(6, None)', '(5, None)']" - >>> dll[0].key = 7.2 - >>> dll.extract(1).key - 6 - >>> str(dll) - "['(7.2, None)', '(5, None)']" - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Doubly_linked_list - - """ - __slots__ = ['head', 'tail', 'size'] - - def __new__(cls, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = LinkedList.__new__(cls) - obj.head = None - obj.tail = None - obj.size = 0 - return obj - - @classmethod - def methods(cls): - return ['__new__', 'insert_after', - 'insert_before', 'insert_at', 'extract'] - - def insert_after(self, prev_node, key, data=None): - self.size += 1 - new_node = LinkedListNode(key, data, - links=['next', 'prev'], - addrs=[None, None]) - new_node.next = prev_node.next - if new_node.next is not None: - new_node.next.prev = new_node - prev_node.next = new_node - new_node.prev = prev_node - - if new_node.next is None: - self.tail = new_node - - def insert_before(self, next_node, key, data=None): - self.size += 1 - new_node = LinkedListNode(key, data, - links=['next', 'prev'], - addrs=[None, None]) - new_node.prev = next_node.prev - next_node.prev = new_node - new_node.next = next_node - if new_node.prev is not None: - new_node.prev.next = new_node - else: - self.head = new_node - - def insert_at(self, index, key, data=None): - if self.size == 0 and (index in (0, -1)): - index = 0 - - if index < 0: - index = self.size + index - - if index > self.size: - raise IndexError('%d index is out of range.'%(index)) - - self.size += 1 - new_node = LinkedListNode(key, data, - links=['next', 'prev'], - addrs=[None, None]) - if self.size == 1: - self.head, self.tail = \ - new_node, new_node - elif index == self.size - 1: - new_node.prev = self.tail - new_node.next = self.tail.next - self.tail.next = new_node - self.tail = new_node - else: - counter = 0 - current_node = self.head - prev_node = None - while counter != index: - prev_node = current_node - current_node = current_node.next - counter += 1 - new_node.prev = prev_node - new_node.next = current_node - if prev_node is not None: - prev_node.next = new_node - if current_node is not None: - current_node.prev = new_node - if new_node.next is None: - self.tail = new_node - if new_node.prev is None: - self.head = new_node - - def extract(self, index): - if self.is_empty: - raise ValueError("The list is empty.") - - if index < 0: - index = self.size + index - - if index >= self.size: - raise IndexError('%d is out of range.'%(index)) - - self.size -= 1 - counter = 0 - current_node = self.head - prev_node = None - while counter != index: - prev_node = current_node - current_node = current_node.next - counter += 1 - if prev_node is not None: - prev_node.next = current_node.next - if current_node.next is not None: - current_node.next.prev = prev_node - if index == 0: - self.head = current_node.next - if index == self.size: - self.tail = current_node.prev - return current_node - -class SinglyLinkedList(LinkedList): - """ - Represents Singly Linked List - - Parameters - ========== - - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import SinglyLinkedList - >>> sll = SinglyLinkedList() - >>> sll.append(6) - >>> sll[0].key - 6 - >>> sll.head.key - 6 - >>> sll.append(5) - >>> sll.appendleft(2) - >>> str(sll) - "['(2, None)', '(6, None)', '(5, None)']" - >>> sll[0].key = 7.2 - >>> sll.extract(1).key - 6 - >>> str(sll) - "['(7.2, None)', '(5, None)']" - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Singly_linked_list - - """ - __slots__ = ['head', 'tail', 'size'] - - def __new__(cls, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = LinkedList.__new__(cls) - obj.head = None - obj.tail = None - obj.size = 0 - return obj - - @classmethod - def methods(cls): - return ['insert_after', 'insert_at', - 'extract'] - - def insert_after(self, prev_node, key, data=None): - self.size += 1 - new_node = LinkedListNode(key, data, - links=['next'], - addrs=[None]) - new_node.next = prev_node.next - prev_node.next = new_node - - if new_node.next is None: - self.tail = new_node - - def insert_at(self, index, key, data=None): - if self.size == 0 and (index in (0, -1)): - index = 0 - - if index < 0: - index = self.size + index - - if index > self.size: - raise IndexError('%d index is out of range.'%(index)) - - self.size += 1 - new_node = LinkedListNode(key, data, - links=['next'], - addrs=[None]) - if self.size == 1: - self.head, self.tail = \ - new_node, new_node - elif index == self.size - 1: - new_node.next = self.tail.next - self.tail.next = new_node - self.tail = new_node - else: - counter = 0 - current_node = self.head - prev_node = None - while counter != index: - prev_node = current_node - current_node = current_node.next - counter += 1 - new_node.next = current_node - if prev_node is not None: - prev_node.next = new_node - if new_node.next is None: - self.tail = new_node - if index == 0: - self.head = new_node - - def extract(self, index): - if self.is_empty: - raise ValueError("The list is empty.") - - if index < 0: - index = self.size + index - - if index >= self.size: - raise IndexError('%d is out of range.'%(index)) - - self.size -= 1 - counter = 0 - current_node = self.head - prev_node = None - while counter != index: - prev_node = current_node - current_node = current_node.next - counter += 1 - if prev_node is not None: - prev_node.next = current_node.next - if index == 0: - self.head = current_node.next - if index == self.size: - self.tail = prev_node - return current_node - -class SinglyCircularLinkedList(SinglyLinkedList): - """ - Represents Singly Circular Linked List. - - Parameters - ========== - - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - - Examples - ======== - - >>> from pydatastructs import SinglyCircularLinkedList - >>> scll = SinglyCircularLinkedList() - >>> scll.append(6) - >>> scll[0].key - 6 - >>> scll.head.key - 6 - >>> scll.append(5) - >>> scll.appendleft(2) - >>> str(scll) - "['(2, None)', '(6, None)', '(5, None)']" - >>> scll[0].key = 7.2 - >>> scll.extract(1).key - 6 - >>> str(scll) - "['(7.2, None)', '(5, None)']" - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Linked_list#Circular_linked_list - - """ - - @classmethod - def methods(cls): - return ['insert_after', 'insert_at', 'extract'] - - def insert_after(self, prev_node, key, data=None): - super(SinglyCircularLinkedList, self).\ - insert_after(prev_node, key, data) - if prev_node.next.next == self.head: - self.tail = prev_node.next - - def insert_at(self, index, key, data=None): - super(SinglyCircularLinkedList, self).insert_at(index, key, data) - if self.size == 1: - self.head.next = self.head - new_node = self.__getitem__(index) - if index == 0: - self.tail.next = new_node - if new_node.next == self.head: - self.tail = new_node - - def extract(self, index): - node = super(SinglyCircularLinkedList, self).extract(index) - if self.tail is None: - self.head = None - elif index == 0: - self.tail.next = self.head - return node - -class DoublyCircularLinkedList(DoublyLinkedList): - """ - Represents Doubly Circular Linked List - - Parameters - ========== - - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import DoublyCircularLinkedList - >>> dcll = DoublyCircularLinkedList() - >>> dcll.append(6) - >>> dcll[0].key - 6 - >>> dcll.head.key - 6 - >>> dcll.append(5) - >>> dcll.appendleft(2) - >>> str(dcll) - "['(2, None)', '(6, None)', '(5, None)']" - >>> dcll[0].key = 7.2 - >>> dcll.extract(1).key - 6 - >>> str(dcll) - "['(7.2, None)', '(5, None)']" - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Doubly_linked_list#Circular_doubly_linked_lists - - """ - - @classmethod - def methods(cls): - return ['insert_after', 'insert_before', - 'insert_at', 'extract'] - - def insert_after(self, prev_node, key, data=None): - super(DoublyCircularLinkedList, self)\ - .insert_after(prev_node, key, data) - if prev_node.next.next == self.head: - self.tail = prev_node.next - - def insert_before(self, next_node, key, data=None): - super(DoublyCircularLinkedList, self).\ - insert_before(next_node, key, data) - if next_node == self.head: - self.head = next_node.prev - - def insert_at(self, index, key, data=None): - super(DoublyCircularLinkedList, self).\ - insert_at(index, key, data) - if self.size == 1: - self.head.next = self.head - self.head.prev = self.head - new_node = self.__getitem__(index) - if index == 0: - self.tail.next = new_node - new_node.prev = self.tail - if new_node.next == self.head: - self.tail = new_node - new_node.next = self.head - self.head.prev = new_node - - def extract(self, index): - node = super(DoublyCircularLinkedList, self).extract(index) - if self.tail is None: - self.head = None - elif index == 0: - self.tail.next = self.head - return node - -class SkipList(object): - """ - Represents Skip List - - Parameters - ========== - - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import SkipList - >>> sl = SkipList() - >>> sl.insert(6) - >>> sl.insert(1) - >>> sl.insert(3) - >>> node = sl.extract(1) - >>> str(node) - '(1, None)' - >>> sl.insert(4) - >>> sl.insert(2) - >>> sl.search(4) - True - >>> sl.search(10) - False - - """ - - __slots__ = ['head', 'tail', '_levels', '_num_nodes', 'seed'] - - def __new__(cls, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.head, obj.tail = None, None - obj._num_nodes = 0 - obj._levels = 0 - obj._add_level() - return obj - - @classmethod - def methods(cls): - return ['__new__', 'levels', 'search', - 'extract', '__str__', 'size'] - - def _add_level(self): - self.tail = SkipNode(math.inf, next=None, down=self.tail) - self.head = SkipNode(-math.inf, next=self.tail, down=self.head) - self._levels += 1 - - @property - def levels(self): - """ - Returns the number of levels in the - current skip list. - """ - return self._levels - - def _search(self, key) -> list: - path = [] - node = self.head - while node: - if node.next.key >= key: - path.append(node) - node = node.down - else: - node = node.next - return path - - def search(self, key) -> bool: - return self._search(key)[-1].next.key == key - - def insert(self, key, data=None): - """ - Inserts a new node to the skip list. - - Parameters - ========== - - key - Any valid identifier to uniquely - identify the node in the linked list. - - data - Any valid data to be stored in the node. - """ - path = self._search(key) - tip = path[-1] - below = SkipNode(key=key, data=data, next=tip.next) - tip.next = below - total_level = self._levels - level = 1 - while random.getrandbits(1) % 2 == 0 and level <= total_level: - if level == total_level: - self._add_level() - prev = self.head - else: - prev = path[total_level - 1 - level] - below = SkipNode(key=key, data=None, next=prev.next, down=below) - prev.next = below - level += 1 - self._num_nodes += 1 - - @property - def size(self): - return self._num_nodes - - def extract(self, key): - """ - Extracts the node with the given key in the skip list. - - Parameters - ========== - - key - The key of the node under consideration. - - Returns - ======= - - return_node: SkipNode - The node with given key. - """ - path = self._search(key) - tip = path[-1] - if tip.next.key != key: - raise KeyError('Node with key %s is not there in %s'%(key, self)) - return_node = SkipNode(tip.next.key, tip.next.data) - total_level = self._levels - level = total_level - 1 - while level >= 0 and path[level].next.key == key: - path[level].next = path[level].next.next - level -= 1 - walk = self.head - while walk is not None: - if walk.next is self.tail: - self._levels -= 1 - self.head = walk.down - self.tail = self.tail.down - walk = walk.down - else: - break - self._num_nodes -= 1 - if self._levels == 0: - self._add_level() - return return_node - - def __str__(self): - node2row = {} - node2col = {} - walk = self.head - curr_level = self._levels - 1 - while walk is not None: - curr_node = walk - col = 0 - while curr_node is not None: - if curr_node.key != math.inf and curr_node.key != -math.inf: - node2row[curr_node] = curr_level - if walk.down is None: - node2col[curr_node.key] = col - col += 1 - curr_node = curr_node.next - walk = walk.down - curr_level -= 1 - sl_mat = [[str(None) for _ in range(self._num_nodes)] for _ in range(self._levels)] - walk = self.head - while walk is not None: - curr_node = walk - while curr_node is not None: - if curr_node in node2row: - row = node2row[curr_node] - col = node2col[curr_node.key] - sl_mat[row][col] = str(curr_node) - curr_node = curr_node.next - walk = walk.down - sl_str = "" - for level_list in sl_mat[::-1]: - for node_str in level_list: - sl_str += node_str + " " - if len(sl_str) > 0: - sl_str += "\n" - return sl_str diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/__init__.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_algorithms.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_algorithms.py deleted file mode 100644 index 3e287bb74..000000000 --- a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_algorithms.py +++ /dev/null @@ -1,423 +0,0 @@ -from pydatastructs import ( - merge_sort_parallel, DynamicOneDimensionalArray, - OneDimensionalArray, brick_sort, brick_sort_parallel, - heapsort, matrix_multiply_parallel, counting_sort, bucket_sort, - cocktail_shaker_sort, quick_sort, longest_common_subsequence, is_ordered, - upper_bound, lower_bound, longest_increasing_subsequence, next_permutation, - prev_permutation, bubble_sort, linear_search, binary_search, jump_search, - selection_sort, insertion_sort, intro_sort, shell_sort, radix_sort, Backend) - -from pydatastructs.utils.raises_util import raises -import random - -def _test_common_sort(sort, *args, **kwargs): - random.seed(1000) - - n = random.randint(10, 20) - arr = DynamicOneDimensionalArray(int, 0) - generated_ints = [] - for _ in range(n): - integer = random.randint(1, 1000) - generated_ints.append(integer) - arr.append(integer) - for _ in range(n//3): - integer = random.randint(0, n//2) - generated_ints.append(integer) - arr.delete(integer) - expected_arr_1 = [686, 779, 102, 134, 362, 448, - 480, 548, None, None, None, - 228, 688, 247, 373, 696, None, - None, None, None, None, None, - None, None, None, None, None, - None, None, None, None] - sort(arr, *args, **kwargs, start=2, end=10) - assert arr._data == expected_arr_1 - sort(arr, *args, **kwargs) - expected_arr_2 = [102, 134, 228, 247, 362, 373, 448, - 480, 548, 686, 688, 696, 779, - None, None, None, None, None, None, - None, None, None, None, None, - None, None, None, None, None, None, None] - assert arr._data == expected_arr_2 - assert (arr._last_pos_filled, arr._num, arr._size) == (12, 13, 31) - - arr = DynamicOneDimensionalArray(int, 0, backend=Backend.CPP) - int_idx = 0 - for _ in range(n): - arr.append(generated_ints[int_idx]) - int_idx += 1 - for _ in range(n//3): - arr.delete(generated_ints[int_idx]) - int_idx += 1 - sort(arr, *args, **kwargs, start=2, end=10) - for i in range(len(expected_arr_1)): - assert arr[i] == expected_arr_1[i] - sort(arr, *args, **kwargs) - for i in range(len(expected_arr_2)): - assert arr[i] == expected_arr_2[i] - assert (arr._last_pos_filled, arr._num, arr.size) == (12, 13, 31) - - n = random.randint(10, 20) - arr = OneDimensionalArray(int, n) - generated_ints.clear() - for i in range(n): - integer = random.randint(1, 1000) - arr[i] = integer - generated_ints.append(integer) - expected_arr_3 = [42, 695, 147, 500, 768, - 998, 473, 732, 728, 426, - 709, 910] - sort(arr, *args, **kwargs, start=2, end=5) - assert arr._data == expected_arr_3 - - arr = OneDimensionalArray(int, n, backend=Backend.CPP) - int_idx = 0 - for i in range(n): - arr[i] = generated_ints[int_idx] - int_idx += 1 - sort(arr, *args, **kwargs, start=2, end=5) - for i in range(len(expected_arr_3)): - assert arr[i] == expected_arr_3[i] - -def test_merge_sort_parallel(): - _test_common_sort(merge_sort_parallel, num_threads=5) - -def test_brick_sort(): - _test_common_sort(brick_sort) - -def test_brick_sort_parallel(): - _test_common_sort(brick_sort_parallel, num_threads=3) - -def test_heapsort(): - _test_common_sort(heapsort) - -def test_bucket_sort(): - _test_common_sort(bucket_sort) - -def test_counting_sort(): - random.seed(1000) - - n = random.randint(10, 20) - arr = DynamicOneDimensionalArray(int, 0) - for _ in range(n): - arr.append(random.randint(1, 1000)) - for _ in range(n//3): - arr.delete(random.randint(0, n//2)) - - expected_arr = [102, 134, 228, 247, 362, 373, 448, - 480, 548, 686, 688, 696, 779] - assert counting_sort(arr)._data == expected_arr - -def test_cocktail_shaker_sort(): - _test_common_sort(cocktail_shaker_sort) - -def test_quick_sort(): - _test_common_sort(quick_sort) - _test_common_sort(quick_sort, backend=Backend.CPP) - -def test_intro_sort(): - _test_common_sort(intro_sort) - -def test_bubble_sort(): - _test_common_sort(bubble_sort) - _test_common_sort(bubble_sort, backend=Backend.CPP) - _test_common_sort(bubble_sort, backend=Backend.LLVM) - -def test_selection_sort(): - _test_common_sort(selection_sort) - _test_common_sort(selection_sort, backend=Backend.CPP) - -def test_insertion_sort(): - _test_common_sort(insertion_sort) - _test_common_sort(insertion_sort, backend=Backend.CPP) - -def test_matrix_multiply_parallel(): - ODA = OneDimensionalArray - - expected_result = [[3, 3, 3], [1, 2, 1], [2, 2, 2]] - - I = ODA(ODA, [ODA(int, [1, 1, 0]), ODA(int, [0, 1, 0]), ODA(int, [0, 0, 1])]) - J = ODA(ODA, [ODA(int, [2, 1, 2]), ODA(int, [1, 2, 1]), ODA(int, [2, 2, 2])]) - output = matrix_multiply_parallel(I, J, num_threads=5) - assert expected_result == output - - I = [[1, 1, 0], [0, 1, 0], [0, 0, 1]] - J = [[2, 1, 2], [1, 2, 1], [2, 2, 2]] - output = matrix_multiply_parallel(I, J, num_threads=5) - assert expected_result == output - - I = [[1, 1, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]] - J = [[2, 1, 2], [1, 2, 1], [2, 2, 2]] - assert raises(ValueError, lambda: matrix_multiply_parallel(I, J, num_threads=5)) - - I = [[1, 1, 0], [0, 1, 0], [0, 0, 1]] - J = [[2, 1, 2], [1, 2, 1], [2, 2, 2]] - output = matrix_multiply_parallel(I, J, num_threads=1) - assert expected_result == output - -def test_longest_common_sequence(): - ODA = OneDimensionalArray - expected_result = "['A', 'S', 'C', 'I', 'I']" - - str1 = ODA(str, ['A', 'A', 'S', 'C', 'C', 'I', 'I']) - str2 = ODA(str, ['A', 'S', 'S', 'C', 'I', 'I', 'I', 'I']) - output = longest_common_subsequence(str1, str2) - assert str(output) == expected_result - - expected_result = "['O', 'V', 'A']" - - I = ODA(str, ['O', 'V', 'A', 'L']) - J = ODA(str, ['F', 'O', 'R', 'V', 'A', 'E', 'W']) - output = longest_common_subsequence(I, J) - assert str(output) == expected_result - - X = ODA(int, [1, 2, 3, 4, 5, 6, 6, 5, 4, 3, 2, 1]) - Y = ODA(int, [1, 2, 3, 4, 4, 3, 2, 1]) - output = longest_common_subsequence(X, Y) - assert str(output) == '[1, 2, 3, 4, 4, 3, 2, 1]' - - Z = ODA(int, []) - output = longest_common_subsequence(Y, Z) - assert str(output) == '[]' - -def test_is_ordered(): - def _test_inner_ordered(*args, **kwargs): - ODA = OneDimensionalArray - DODA = DynamicOneDimensionalArray - - expected_result = True - arr = ODA(int, [1, 2, 5, 6]) - output = is_ordered(arr, **kwargs) - assert output == expected_result - - expected_result = False - arr1 = ODA(int, [4, 3, 2, 1]) - output = is_ordered(arr1, **kwargs) - assert output == expected_result - - expected_result = True - arr2 = ODA(int, [6, 1, 2, 3, 4, 5]) - output = is_ordered(arr2, start=1, end=5, **kwargs) - assert output == expected_result - - expected_result = True - arr3 = ODA(int, [0, -1, -2, -3, -4, 4]) - output = is_ordered(arr3, start=1, end=4, - comp=lambda u, v: u > v, **kwargs) - assert output == expected_result - - expected_result = True - arr4 = DODA(int, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) - arr4.delete(0) - output = is_ordered(arr4, **kwargs) - assert output == expected_result - - _test_inner_ordered() - _test_inner_ordered(backend=Backend.CPP) - - -def test_upper_bound(): - ODA = OneDimensionalArray - arr1 = ODA(int, [3, 3, 3]) - output = upper_bound(arr1, 3) - expected_result = 3 - assert expected_result == output - - arr2 = ODA(int, [4, 4, 5, 6]) - output = upper_bound(arr2, 4, end=3) - expected_result = 2 - assert expected_result == output - - arr3 = ODA(int, [6, 6, 7, 8, 9]) - output = upper_bound(arr3, 5, start=2, end=4) - expected_result = 2 - assert expected_result == output - - arr4 = ODA(int, [3, 4, 4, 6]) - output = upper_bound(arr4, 5, start=1, end=3) - expected_result = 3 - assert expected_result == output - - arr5 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) - output = upper_bound(arr5, 6, comp=lambda x, y: x > y) - expected_result = 5 - assert expected_result == output - - arr6 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) - output = upper_bound(arr6, 2, start=2, comp=lambda x, y: x > y) - expected_result = 8 - assert expected_result == output - - arr7 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) - output = upper_bound(arr7, 9, start=3, end=7, comp=lambda x, y: x > y) - expected_result = 3 - assert expected_result == output - - arr8 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) - output = upper_bound(arr8, 6, end=3, comp=lambda x, y: x > y) - expected_result = 3 - assert expected_result == output - - -def test_lower_bound(): - ODA = OneDimensionalArray - arr1 = ODA(int, [3, 3, 3]) - output = lower_bound(arr1, 3, start=1) - expected_result = 1 - assert expected_result == output - - arr2 = ODA(int, [4, 4, 4, 4, 5, 6]) - output = lower_bound(arr2, 5, end=3) - expected_result = 3 - assert expected_result == output - - arr3 = ODA(int, [6, 6, 7, 8, 9]) - output = lower_bound(arr3, 5, end=3) - expected_result = 0 - assert expected_result == output - - arr4 = ODA(int, [3, 4, 4, 4]) - output = lower_bound(arr4, 5) - expected_result = 4 - assert expected_result == output - - arr5 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) - output = lower_bound(arr5, 5, comp=lambda x, y: x > y) - expected_result = 5 - assert expected_result == output - - arr6 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) - output = lower_bound(arr6, 2, start=4, comp=lambda x, y: x > y) - expected_result = 8 - assert expected_result == output - - arr7 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) - output = lower_bound(arr7, 9, end=5, comp=lambda x, y: x > y) - expected_result = 0 - assert expected_result == output - - arr8 = ODA(int, [7, 6, 6, 6, 6, 5, 4, 3]) - output = lower_bound(arr8, 6, end=3, comp=lambda x, y: x > y) - expected_result = 1 - assert expected_result == output - -def test_longest_increasing_subsequence(): - ODA = OneDimensionalArray - - arr1 = ODA(int, [2, 5, 3, 7, 11, 8, 10, 13, 6]) - output = longest_increasing_subsequence(arr1) - expected_result = [2, 3, 7, 8, 10, 13] - assert str(expected_result) == str(output) - - arr2 = ODA(int, [3, 4, -1, 5, 8, 2, 2, 2, 3, 12, 7, 9, 10]) - output = longest_increasing_subsequence(arr2) - expected_result = [-1, 2, 3, 7, 9, 10] - assert str(expected_result) == str(output) - - arr3 = ODA(int, [6, 6, 6, 19, 9]) - output = longest_increasing_subsequence(arr3) - expected_result = [6, 9] - assert str(expected_result) == str(output) - - arr4 = ODA(int, [5, 4, 4, 3, 3, 6, 6, 8]) - output = longest_increasing_subsequence(arr4) - expected_result = [3, 6, 8] - assert str(expected_result) == str(output) - - arr5 = ODA(int, [7, 6, 6, 6, 5, 4, 3]) - output = longest_increasing_subsequence(arr5) - expected_result = [3] - assert str(expected_result) == str(output) - -def _test_permutation_common(array, expected_perms, func): - num_perms = len(expected_perms) - - output = [] - for _ in range(num_perms): - signal, array = func(array) - output.append(array) - if not signal: - break - - assert len(output) == len(expected_perms) - for perm1, perm2 in zip(output, expected_perms): - assert str(perm1) == str(perm2) - -def test_next_permutation(): - ODA = OneDimensionalArray - - array = ODA(int, [1, 2, 3]) - expected_perms = [[1, 3, 2], [2, 1, 3], - [2, 3, 1], [3, 1, 2], - [3, 2, 1], [1, 2, 3]] - _test_permutation_common(array, expected_perms, next_permutation) - -def test_prev_permutation(): - ODA = OneDimensionalArray - - array = ODA(int, [3, 2, 1]) - expected_perms = [[3, 1, 2], [2, 3, 1], - [2, 1, 3], [1, 3, 2], - [1, 2, 3], [3, 2, 1]] - _test_permutation_common(array, expected_perms, prev_permutation) - -def test_next_prev_permutation(): - ODA = OneDimensionalArray - random.seed(1000) - - for i in range(100): - data = set(random.sample(range(1, 10000), 10)) - array = ODA(int, list(data)) - - _, next_array = next_permutation(array) - _, orig_array = prev_permutation(next_array) - assert str(orig_array) == str(array) - - _, prev_array = prev_permutation(array) - _, orig_array = next_permutation(prev_array) - assert str(orig_array) == str(array) - -def _test_common_search(search_func, sort_array=True, **kwargs): - ODA = OneDimensionalArray - - array = ODA(int, [1, 2, 5, 7, 10, 29, 40]) - for i in range(len(array)): - assert i == search_func(array, array[i], **kwargs) - - checker_array = [None, None, 2, 3, 4, 5, None] - for i in range(len(array)): - assert checker_array[i] == search_func(array, array[i], start=2, end=5, **kwargs) - - random.seed(1000) - - for i in range(25): - data = list(set(random.sample(range(1, 10000), 100))) - - if sort_array: - data.sort() - - array = ODA(int, list(data)) - - for i in range(len(array)): - assert search_func(array, array[i], **kwargs) == i - - for _ in range(50): - assert search_func(array, random.randint(10001, 50000), **kwargs) is None - -def test_linear_search(): - _test_common_search(linear_search, sort_array=False) - _test_common_search(linear_search, sort_array=False, backend=Backend.CPP) - -def test_binary_search(): - _test_common_search(binary_search) - _test_common_search(binary_search, backend=Backend.CPP) - -def test_jump_search(): - _test_common_search(jump_search) - _test_common_search(jump_search, backend=Backend.CPP) - -def test_shell_sort(): - _test_common_sort(shell_sort) - -def test_radix_sort(): - _test_common_sort(radix_sort) diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_arrays.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_arrays.py deleted file mode 100644 index 886510113..000000000 --- a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_arrays.py +++ /dev/null @@ -1,157 +0,0 @@ -from pydatastructs.linear_data_structures import ( - OneDimensionalArray, DynamicOneDimensionalArray, - MultiDimensionalArray, ArrayForTrees) -from pydatastructs.utils.misc_util import Backend -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils import TreeNode -from pydatastructs.utils._backend.cpp import _nodes - -def test_OneDimensionalArray(): - ODA = OneDimensionalArray - A = ODA(int, 5, [1.0, 2, 3, 4, 5], init=6) - A[1] = 2.0 - assert str(A) == '[1, 2, 3, 4, 5]' - assert A - assert ODA(int, [1.0, 2, 3, 4, 5], 5) - assert ODA(int, 5) - assert ODA(int, [1.0, 2, 3]) - assert raises(IndexError, lambda: A[7]) - assert raises(IndexError, lambda: A[-1]) - assert raises(ValueError, lambda: ODA()) - assert raises(ValueError, lambda: ODA(int, 1, 2, 3)) - assert raises(TypeError, lambda: ODA(int, 5.0, set([1, 2, 3]))) - assert raises(TypeError, lambda: ODA(int, 5.0)) - assert raises(TypeError, lambda: ODA(int, set([1, 2, 3]))) - assert raises(ValueError, lambda: ODA(int, 3, [1])) - - A = ODA(int, 5, [1, 2, 3, 4, 5], init=6, backend=Backend.CPP) - A[1] = 2 - assert str(A) == "['1', '2', '3', '4', '5']" - assert A - assert ODA(int, [1, 2, 3, 4, 5], 5, backend=Backend.CPP) - assert ODA(int, 5, backend=Backend.CPP) - assert ODA(int, [1, 2, 3], backend=Backend.CPP) - assert raises(TypeError, lambda: ODA(int, [1.0, 2, 3, 4, 5], 5, backend=Backend.CPP)) - assert raises(TypeError, lambda: ODA(int, [1.0, 2, 3], backend=Backend.CPP)) - assert raises(IndexError, lambda: A[7]) - assert raises(IndexError, lambda: A[-1]) - assert raises(ValueError, lambda: ODA(backend=Backend.CPP)) - assert raises(ValueError, lambda: ODA(int, 1, 2, 3, backend=Backend.CPP)) - assert raises(TypeError, lambda: ODA(int, 5.0, set([1, 2, 3]), backend=Backend.CPP)) - assert raises(TypeError, lambda: ODA(int, 5.0, backend=Backend.CPP)) - assert raises(TypeError, lambda: ODA(int, set([1, 2, 3]), backend=Backend.CPP)) - assert raises(ValueError, lambda: ODA(int, 3, [1], backend=Backend.CPP)) - assert raises(ValueError, lambda: ODA(int, 3, [1], backend=Backend.CPP)) - assert raises(TypeError, lambda: A.fill(2.0)) - - -def test_MultiDimensionalArray(): - assert raises(ValueError, lambda: MultiDimensionalArray(int, 2, -1, 3)) - assert MultiDimensionalArray(int, 10).shape == (10,) - array = MultiDimensionalArray(int, 5, 9, 3, 8) - assert array.shape == (5, 9, 3, 8) - array.fill(5) - array[1, 3, 2, 5] = 2.0 - assert array - assert array[1, 3, 2, 5] == 2.0 - assert array[1, 3, 0, 5] == 5 - assert array[1, 2, 2, 5] == 5 - assert array[2, 3, 2, 5] == 5 - assert raises(IndexError, lambda: array[5]) - assert raises(IndexError, lambda: array[4, 10]) - assert raises(IndexError, lambda: array[-1]) - assert raises(IndexError, lambda: array[2, 3, 2, 8]) - assert raises(ValueError, lambda: MultiDimensionalArray()) - assert raises(ValueError, lambda: MultiDimensionalArray(int)) - assert raises(TypeError, lambda: MultiDimensionalArray(int, 5, 6, "")) - array = MultiDimensionalArray(int, 3, 2, 2) - array.fill(1) - array[0, 0, 0] = 0 - array[0, 0, 1] = 0 - array[1, 0, 0] = 0 - array[2, 1, 1] = 0 - assert str(array) == '[0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0]' - array = MultiDimensionalArray(int, 4) - assert array.shape == (4,) - array.fill(5) - array[3] = 3 - assert array[3] == 3 - -def test_DynamicOneDimensionalArray(): - DODA = DynamicOneDimensionalArray - A = DODA(int, 0) - A.append(1) - A.append(2) - A.append(3) - A.append(4) - assert str(A) == "['1', '2', '3', '4']" - A.delete(0) - A.delete(0) - A.delete(15) - A.delete(-1) - A.delete(1) - A.delete(2) - assert A._data == [4, None, None] - assert str(A) == "['4']" - assert A.size == 3 - A.fill(4) - assert A._data == [4, 4, 4] - b = DynamicOneDimensionalArray(int, 0) - b.append(1) - b.append(2) - b.append(3) - b.append(4) - b.append(5) - assert b._data == [1, 2, 3, 4, 5, None, None] - assert list(reversed(b)) == [5, 4, 3, 2, 1] - - A = DODA(int, 0, backend=Backend.CPP) - A.append(1) - A.append(2) - A.append(3) - A.append(4) - assert str(A) == "['1', '2', '3', '4']" - A.delete(0) - A.delete(0) - A.delete(15) - A.delete(-1) - A.delete(1) - A.delete(2) - assert [A[i] for i in range(A.size)] == [4, None, None] - assert A.size == 3 - A.fill(4) - assert [A[0], A[1], A[2]] == [4, 4, 4] - b = DODA(int, 0, backend=Backend.CPP) - b.append(1) - b.append(2) - b.append(3) - b.append(4) - b.append(5) - assert [b[i] for i in range(b.size)] == [1, 2, 3, 4, 5, None, None] - -def test_DynamicOneDimensionalArray2(): - DODA = DynamicOneDimensionalArray - root = TreeNode(1, 100) - A = DODA(TreeNode, [root]) - assert str(A[0]) == "(None, 1, 100, None)" - -def _test_ArrayForTrees(backend): - AFT = ArrayForTrees - root = TreeNode(1, 100,backend=backend) - if backend==Backend.PYTHON: - A = AFT(TreeNode, [root], backend=backend) - B = AFT(TreeNode, 0, backend=backend) - else: - A = AFT(_nodes.TreeNode, [root], backend=backend) - B = AFT(_nodes.TreeNode, 0, backend=backend) - assert str(A) == "['(None, 1, 100, None)']" - node = TreeNode(2, 200, backend=backend) - A.append(node) - assert str(A) == "['(None, 1, 100, None)', '(None, 2, 200, None)']" - assert str(B) == "[]" - -def test_ArrayForTrees(): - _test_ArrayForTrees(Backend.PYTHON) - -def test_cpp_ArrayForTrees(): - _test_ArrayForTrees(Backend.CPP) diff --git a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_linked_lists.py b/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_linked_lists.py deleted file mode 100644 index b7f172ddc..000000000 --- a/lib/python3.12/site-packages/pydatastructs/linear_data_structures/tests/test_linked_lists.py +++ /dev/null @@ -1,193 +0,0 @@ -from pydatastructs.linear_data_structures import DoublyLinkedList, SinglyLinkedList, SinglyCircularLinkedList, DoublyCircularLinkedList, SkipList -from pydatastructs.utils.raises_util import raises -import copy, random - -def test_DoublyLinkedList(): - random.seed(1000) - dll = DoublyLinkedList() - assert raises(IndexError, lambda: dll[2]) - dll.appendleft(5) - dll.append(1) - dll.appendleft(2) - dll.append(3) - dll.insert_after(dll[-1], 4) - dll.insert_after(dll[2], 6) - dll.insert_before(dll[4], 1.1) - dll.insert_before(dll[0], 7) - dll.insert_at(0, 2) - dll.insert_at(-1, 9) - dll.extract(2) - assert dll.popleft().key == 2 - assert dll.popright().key == 4 - assert dll.search(3) == dll[-2] - assert dll.search(-1) is None - dll[-2].key = 0 - assert str(dll) == ("['(7, None)', '(5, None)', '(1, None)', " - "'(6, None)', '(1.1, None)', '(0, None)', " - "'(9, None)']") - assert len(dll) == 7 - assert raises(IndexError, lambda: dll.insert_at(8, None)) - assert raises(IndexError, lambda: dll.extract(20)) - dll_copy = DoublyCircularLinkedList() - for i in range(dll.size): - dll_copy.append(dll[i]) - for i in range(len(dll)): - if i%2 == 0: - dll.popleft() - else: - dll.popright() - assert str(dll) == "[]" - for _ in range(len(dll_copy)): - index = random.randint(0, len(dll_copy) - 1) - dll_copy.extract(index) - assert str(dll_copy) == "[]" - assert raises(ValueError, lambda: dll_copy.extract(1)) - -def test_SinglyLinkedList(): - random.seed(1000) - sll = SinglyLinkedList() - assert raises(IndexError, lambda: sll[2]) - sll.appendleft(5) - sll.append(1) - sll.appendleft(2) - sll.append(3) - sll.insert_after(sll[1], 4) - sll.insert_after(sll[-1], 6) - sll.insert_at(0, 2) - sll.insert_at(-1, 9) - sll.extract(2) - assert sll.popleft().key == 2 - assert sll.popright().key == 6 - sll[-2].key = 0 - assert str(sll) == ("['(2, None)', '(4, None)', '(1, None)', " - "'(0, None)', '(9, None)']") - assert len(sll) == 5 - assert raises(IndexError, lambda: sll.insert_at(6, None)) - assert raises(IndexError, lambda: sll.extract(20)) - sll_copy = DoublyCircularLinkedList() - for i in range(sll.size): - sll_copy.append(sll[i]) - for i in range(len(sll)): - if i%2 == 0: - sll.popleft() - else: - sll.popright() - assert str(sll) == "[]" - for _ in range(len(sll_copy)): - index = random.randint(0, len(sll_copy) - 1) - sll_copy.extract(index) - assert str(sll_copy) == "[]" - assert raises(ValueError, lambda: sll_copy.extract(1)) - -def test_SinglyCircularLinkedList(): - random.seed(1000) - scll = SinglyCircularLinkedList() - assert raises(IndexError, lambda: scll[2]) - scll.appendleft(5) - scll.append(1) - scll.appendleft(2) - scll.append(3) - scll.insert_after(scll[1], 4) - scll.insert_after(scll[-1], 6) - scll.insert_at(0, 2) - scll.insert_at(-1, 9) - scll.extract(2) - assert scll.popleft().key == 2 - assert scll.popright().key == 6 - assert scll.search(-1) is None - scll[-2].key = 0 - assert str(scll) == ("['(2, None)', '(4, None)', '(1, None)', " - "'(0, None)', '(9, None)']") - assert len(scll) == 5 - assert raises(IndexError, lambda: scll.insert_at(6, None)) - assert raises(IndexError, lambda: scll.extract(20)) - scll_copy = DoublyCircularLinkedList() - for i in range(scll.size): - scll_copy.append(scll[i]) - for i in range(len(scll)): - if i%2 == 0: - scll.popleft() - else: - scll.popright() - assert str(scll) == "[]" - for _ in range(len(scll_copy)): - index = random.randint(0, len(scll_copy) - 1) - scll_copy.extract(index) - assert str(scll_copy) == "[]" - assert raises(ValueError, lambda: scll_copy.extract(1)) - -def test_DoublyCircularLinkedList(): - random.seed(1000) - dcll = DoublyCircularLinkedList() - assert raises(IndexError, lambda: dcll[2]) - dcll.appendleft(5) - dcll.append(1) - dcll.appendleft(2) - dcll.append(3) - dcll.insert_after(dcll[-1], 4) - dcll.insert_after(dcll[2], 6) - dcll.insert_before(dcll[4], 1) - dcll.insert_before(dcll[0], 7) - dcll.insert_at(0, 2) - dcll.insert_at(-1, 9) - dcll.extract(2) - assert dcll.popleft().key == 2 - assert dcll.popright().key == 4 - dcll[-2].key = 0 - assert str(dcll) == ("['(7, None)', '(5, None)', '(1, None)', " - "'(6, None)', '(1, None)', '(0, None)', " - "'(9, None)']") - assert len(dcll) == 7 - assert raises(IndexError, lambda: dcll.insert_at(8, None)) - assert raises(IndexError, lambda: dcll.extract(20)) - dcll_copy = DoublyCircularLinkedList() - for i in range(dcll.size): - dcll_copy.append(dcll[i]) - for i in range(len(dcll)): - if i%2 == 0: - dcll.popleft() - else: - dcll.popright() - assert str(dcll) == "[]" - for _ in range(len(dcll_copy)): - index = random.randint(0, len(dcll_copy) - 1) - dcll_copy.extract(index) - assert str(dcll_copy) == "[]" - assert raises(ValueError, lambda: dcll_copy.extract(1)) - -def test_SkipList(): - random.seed(0) - sl = SkipList() - sl.insert(2) - sl.insert(10) - sl.insert(92) - sl.insert(1) - sl.insert(4) - sl.insert(27) - sl.extract(10) - assert str(sl) == ("(1, None) None None None None \n" - "(1, None) None None None None \n" - "(1, None) (2, None) (4, None) (27, None) (92, None) \n") - assert raises(KeyError, lambda: sl.extract(15)) - assert sl.search(1) is True - assert sl.search(47) is False - - sl = SkipList() - - for a in range(0, 20, 2): - sl.insert(a) - assert sl.search(16) is True - for a in range(4, 20, 4): - sl.extract(a) - assert sl.search(10) is True - for a in range(4, 20, 4): - sl.insert(a) - for a in range(0, 20, 2): - sl.extract(a) - assert sl.search(3) is False - - li = SkipList() - li.insert(1) - li.insert(2) - assert li.levels == 1 - assert li.size == 2 diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/__init__.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/__init__.py deleted file mode 100644 index 6ed099769..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/__init__.py +++ /dev/null @@ -1,51 +0,0 @@ -__all__ = [] - -from . import ( - stack, - binomial_trees, - queue, - disjoint_set, - sparse_table, -) - -from .binomial_trees import ( - BinomialTree -) -__all__.extend(binomial_trees.__all__) - -from .stack import ( - Stack, -) -__all__.extend(stack.__all__) - -from .queue import ( - Queue, - PriorityQueue -) -__all__.extend(queue.__all__) - -from .disjoint_set import ( - DisjointSetForest, -) -__all__.extend(disjoint_set.__all__) - -from .sparse_table import ( - SparseTable, -) -__all__.extend(sparse_table.__all__) - -from .segment_tree import ( - ArraySegmentTree, -) -__all__.extend(segment_tree.__all__) - -from .algorithms import ( - RangeQueryStatic, - RangeQueryDynamic -) -__all__.extend(algorithms.__all__) - -from .multiset import ( - Multiset -) -__all__.extend(multiset.__all__) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/_backend/__init__.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/_backend/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/algorithms.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/algorithms.py deleted file mode 100644 index 3c2f86516..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/algorithms.py +++ /dev/null @@ -1,335 +0,0 @@ -from pydatastructs.miscellaneous_data_structures.sparse_table import SparseTable -from pydatastructs.miscellaneous_data_structures.segment_tree import ArraySegmentTree -from pydatastructs.utils.misc_util import ( - _check_range_query_inputs, Backend, - raise_if_backend_is_not_python) - -__all__ = [ - 'RangeQueryStatic', - 'RangeQueryDynamic' -] - - -class RangeQueryStatic: - """ - Produces results for range queries of different kinds - by using specified data structure. - - Parameters - ========== - - array: OneDimensionalArray - The array for which we need to answer queries. - All the elements should be of type `int`. - func: callable - The function to be used for generating results - of a query. It should accept only one tuple as an - argument. The size of the tuple will be either 1 or 2 - and any one of the elements can be `None`. You can treat - `None` in whatever way you want according to the query - you are performing. For example, in case of range minimum - queries, `None` can be treated as infinity. We provide - the following which can be used as an argument value for this - parameter, - - `minimum` - For range minimum queries. - - `greatest_common_divisor` - For queries finding greatest - common divisor of a range. - - `summation` - For range sum queries. - data_structure: str - The data structure to be used for performing - range queries. - Currently the following data structures are supported, - - 'array' -> Array data structure. - Each query takes O(end - start) time asymptotically. - - 'sparse_table' -> Sparse table data structure. - Each query takes O(log(end - start)) time - asymptotically. - - By default, 'sparse_table'. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, RangeQueryStatic - >>> from pydatastructs import minimum - >>> arr = OneDimensionalArray(int, [4, 6, 1, 5, 7, 3]) - >>> RMQ = RangeQueryStatic(arr, minimum) - >>> RMQ.query(3, 4) - 5 - >>> RMQ.query(0, 4) - 1 - >>> RMQ.query(0, 2) - 1 - - Note - ==== - - The array once passed as an input should not be modified - once the `RangeQueryStatic` constructor is called. If you - have updated the array, then you need to create a new - `RangeQueryStatic` object with this updated array. - """ - - def __new__(cls, array, func, data_structure='sparse_table', **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - if len(array) == 0: - raise ValueError("Input %s array is empty."%(array)) - - if data_structure == 'array': - return RangeQueryStaticArray(array, func) - elif data_structure == 'sparse_table': - return RangeQueryStaticSparseTable(array, func) - else: - raise NotImplementedError( - "Currently %s data structure for range " - "query without updates isn't implemented yet." - % (data_structure)) - - @classmethod - def methods(cls): - return ['query'] - - def query(start, end): - """ - Method to perform a query in [start, end) range. - - Parameters - ========== - - start: int - The starting index of the range. - end: int - The ending index of the range. - """ - raise NotImplementedError( - "This is an abstract method.") - - -class RangeQueryStaticSparseTable(RangeQueryStatic): - - __slots__ = ["sparse_table", "bounds"] - - def __new__(cls, array, func, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - sparse_table = SparseTable(array, func) - obj.bounds = (0, len(array)) - obj.sparse_table = sparse_table - return obj - - @classmethod - def methods(cls): - return ['query'] - - def query(self, start, end): - _check_range_query_inputs((start, end + 1), self.bounds) - return self.sparse_table.query(start, end) - - -class RangeQueryStaticArray(RangeQueryStatic): - - __slots__ = ["array", "func"] - - def __new__(cls, array, func): - obj = object.__new__(cls) - obj.array = array - obj.func = func - return obj - - @classmethod - def methods(cls): - return ['query'] - - def query(self, start, end): - _check_range_query_inputs((start, end + 1), (0, len(self.array))) - - rsize = end - start + 1 - - if rsize == 1: - return self.func((self.array[start],)) - - query_ans = self.func((self.array[start], self.array[start + 1])) - for i in range(start + 2, end + 1): - query_ans = self.func((query_ans, self.array[i])) - return query_ans - -class RangeQueryDynamic: - """ - Produces results for range queries of different kinds - while allowing point updates by using specified - data structure. - - Parameters - ========== - - array: OneDimensionalArray - The array for which we need to answer queries. - All the elements should be of type `int`. - func: callable - The function to be used for generating results - of a query. It should accept only one tuple as an - argument. The size of the tuple will be either 1 or 2 - and any one of the elements can be `None`. You can treat - `None` in whatever way you want according to the query - you are performing. For example, in case of range minimum - queries, `None` can be treated as infinity. We provide - the following which can be used as an argument value for this - parameter, - - `minimum` - For range minimum queries. - - `greatest_common_divisor` - For queries finding greatest - common divisor of a range. - - `summation` - For range sum queries. - data_structure: str - The data structure to be used for performing - range queries. - Currently the following data structures are supported, - - 'array' -> Array data structure. - Each query takes O(end - start) time asymptotically. - Each point update takes O(1) time asymptotically. - - 'segment_tree' -> Segment tree data structure. - Each query takes O(log(end - start)) time - asymptotically. - Each point update takes O(log(len(array))) time - asymptotically. - - By default, 'segment_tree'. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalArray, RangeQueryDynamic - >>> from pydatastructs import minimum - >>> arr = OneDimensionalArray(int, [4, 6, 1, 5, 7, 3]) - >>> RMQ = RangeQueryDynamic(arr, minimum) - >>> RMQ.query(3, 4) - 5 - >>> RMQ.query(0, 4) - 1 - >>> RMQ.query(0, 2) - 1 - >>> RMQ.update(2, 0) - >>> RMQ.query(0, 2) - 0 - - Note - ==== - - The array once passed as an input should be modified - only with `RangeQueryDynamic.update` method. - """ - - def __new__(cls, array, func, data_structure='segment_tree', **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - - if len(array) == 0: - raise ValueError("Input %s array is empty."%(array)) - - if data_structure == 'array': - return RangeQueryDynamicArray(array, func, **kwargs) - elif data_structure == 'segment_tree': - return RangeQueryDynamicSegmentTree(array, func, **kwargs) - else: - raise NotImplementedError( - "Currently %s data structure for range " - "query with point updates isn't implemented yet." - % (data_structure)) - - @classmethod - def methods(cls): - return ['query', 'update'] - - def query(start, end): - """ - Method to perform a query in [start, end) range. - - Parameters - ========== - - start: int - The starting index of the range. - end: int - The ending index of the range. - """ - raise NotImplementedError( - "This is an abstract method.") - - def update(self, index, value): - """ - Method to update index with a new value. - - Parameters - ========== - - index: int - The index to be update. - value: int - The new value. - """ - raise NotImplementedError( - "This is an abstract method.") - -class RangeQueryDynamicArray(RangeQueryDynamic): - - __slots__ = ["range_query_static"] - - def __new__(cls, array, func, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.range_query_static = RangeQueryStaticArray(array, func) - return obj - - @classmethod - def methods(cls): - return ['query', 'update'] - - def query(self, start, end): - return self.range_query_static.query(start, end) - - def update(self, index, value): - self.range_query_static.array[index] = value - -class RangeQueryDynamicSegmentTree(RangeQueryDynamic): - - __slots__ = ["segment_tree", "bounds"] - - def __new__(cls, array, func, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.pop('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.segment_tree = ArraySegmentTree(array, func, dimensions=1) - obj.segment_tree.build() - obj.bounds = (0, len(array)) - return obj - - @classmethod - def methods(cls): - return ['query', 'update'] - - def query(self, start, end): - _check_range_query_inputs((start, end + 1), self.bounds) - return self.segment_tree.query(start, end) - - def update(self, index, value): - self.segment_tree.update(index, value) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/binomial_trees.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/binomial_trees.py deleted file mode 100644 index 9ea91d828..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/binomial_trees.py +++ /dev/null @@ -1,91 +0,0 @@ -from pydatastructs.utils.misc_util import ( - BinomialTreeNode, _check_type, Backend, - raise_if_backend_is_not_python) - -__all__ = [ - 'BinomialTree' -] - -class BinomialTree(object): - """ - Represents binomial trees - - Parameters - ========== - - root: BinomialTreeNode - The root of the binomial tree. - By default, None - order: int - The order of the binomial tree. - By default, None - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import BinomialTree, BinomialTreeNode - >>> root = BinomialTreeNode(1, 1) - >>> tree = BinomialTree(root, 0) - >>> tree.is_empty - False - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Binomial_heap - """ - __slots__ = ['root', 'order'] - - def __new__(cls, root=None, order=None, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - if root is not None and \ - not _check_type(root, BinomialTreeNode): - raise TypeError("%s i.e., root should be of " - "type BinomialTreeNode."%(root)) - if order is not None and not _check_type(order, int): - raise TypeError("%s i.e., order should be of " - "type int."%(order)) - obj = object.__new__(cls) - if root is not None: - root.is_root = True - obj.root = root - obj.order = order - return obj - - @classmethod - def methods(cls): - return ['add_sub_tree', '__new__', 'is_empty'] - - def add_sub_tree(self, other_tree): - """ - Adds a sub tree to current tree. - - Parameters - ========== - - other_tree: BinomialTree - - Raises - ====== - - ValueError: If order of the two trees - are different. - """ - if not _check_type(other_tree, BinomialTree): - raise TypeError("%s i.e., other_tree should be of " - "type BinomialTree"%(other_tree)) - if self.order != other_tree.order: - raise ValueError("Orders of both the trees should be same.") - self.root.children.append(other_tree.root) - other_tree.root.parent = self.root - other_tree.root.is_root = False - self.order += 1 - - @property - def is_empty(self): - return self.root is None diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/disjoint_set.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/disjoint_set.py deleted file mode 100644 index 9a5caef5b..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/disjoint_set.py +++ /dev/null @@ -1,143 +0,0 @@ -from pydatastructs.utils import Set -from pydatastructs.utils.misc_util import ( - Backend, raise_if_backend_is_not_python) - -__all__ = ['DisjointSetForest'] - -class DisjointSetForest(object): - """ - Represents a forest of disjoint set trees. - - Parameters - ========== - - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import DisjointSetForest - >>> dst = DisjointSetForest() - >>> dst.make_set(1) - >>> dst.make_set(2) - >>> dst.union(1, 2) - >>> dst.find_root(2).key - 1 - >>> dst.make_root(2) - >>> dst.find_root(2).key - 2 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Disjoint-set_data_structure - """ - - __slots__ = ['tree'] - - def __new__(cls, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.tree = dict() - return obj - - @classmethod - def methods(cls): - return ['make_set', '__new__', 'find_root', 'union'] - - def make_set(self, key, data=None): - """ - Adds a singleton set to the tree - of disjoint sets with given key - and optionally data. - """ - if self.tree.get(key, None) is None: - new_set = Set(key, data) - self.tree[key] = new_set - new_set.parent = new_set - new_set.size = 1 - - def find_root(self, key): - """ - Finds the root of the set - with the given key by path - splitting algorithm. - """ - if self.tree.get(key, None) is None: - raise KeyError("Invalid key, %s"%(key)) - _set = self.tree[key] - while _set.parent is not _set: - _set, _set.parent = _set.parent, _set.parent.parent - return _set - - def union(self, key1, key2): - """ - Takes the union of the two - disjoint set trees with given - keys. The union is done by size. - """ - x_root = self.find_root(key1) - y_root = self.find_root(key2) - - if x_root is not y_root: - if x_root.size < y_root.size: - x_root, y_root = y_root, x_root - - y_root.parent = x_root - x_root.size += y_root.size - - def make_root(self, key): - """ - Finds the set to which the key belongs - and makes it as the root of the set. - """ - if self.tree.get(key, None) is None: - raise KeyError("Invalid key, %s"%(key)) - - key_set = self.tree[key] - if key_set.parent is not key_set: - current_parent = key_set.parent - # Remove this key subtree size from all its ancestors - while current_parent.parent is not current_parent: - current_parent.size -= key_set.size - current_parent = current_parent.parent - - all_set_size = current_parent.size # This is the root node - current_parent.size -= key_set.size - - # Make parent of current root as key - current_parent.parent = key_set - # size of new root will be same as previous root's size - key_set.size = all_set_size - # Make parent of key as itself - key_set.parent = key_set - - def find_size(self, key): - """ - Finds the size of set to which the key belongs. - """ - if self.tree.get(key, None) is None: - raise KeyError("Invalid key, %s"%(key)) - - return self.find_root(key).size - - def disjoint_sets(self): - """ - Returns a list of disjoint sets in the data structure. - """ - result = dict() - for key in self.tree.keys(): - parent = self.find_root(key).key - members = result.get(parent, []) - members.append(key) - result[parent] = members - sorted_groups = [] - for v in result.values(): - sorted_groups.append(v) - sorted_groups[-1].sort() - sorted_groups.sort() - return sorted_groups diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/multiset.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/multiset.py deleted file mode 100644 index 397978224..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/multiset.py +++ /dev/null @@ -1,42 +0,0 @@ -__all__ = [ - 'Multiset' -] - - -class Multiset: - def __init__(self, *args): - # TODO: Implement dict in pydatastructs - self.counter = dict() - from pydatastructs.trees import RedBlackTree - self.tree = RedBlackTree() - self._n = 0 - for arg in args: - self.add(arg) - - def add(self, element): - self.counter[element] = self.counter.get(element, 0) + 1 - self._n += 1 - if self.counter[element] == 1: - self.tree.insert(element) - - def remove(self, element): - if self.counter[element] == 1: - self.tree.delete(element) - if self.counter.get(element, 0) > 0: - self._n -= 1 - self.counter[element] -= 1 - - def lower_bound(self, element): - return self.tree.lower_bound(element) - - def upper_bound(self, element): - return self.tree.upper_bound(element) - - def __contains__(self, element): - return self.counter.get(element, 0) > 0 - - def __len__(self): - return self._n - - def count(self, element): - return self.counter.get(element, 0) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/queue.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/queue.py deleted file mode 100644 index 033ef9af3..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/queue.py +++ /dev/null @@ -1,498 +0,0 @@ -from pydatastructs.linear_data_structures import DynamicOneDimensionalArray, SinglyLinkedList -from pydatastructs.utils.misc_util import ( - NoneType, Backend, raise_if_backend_is_not_python) -from pydatastructs.trees.heaps import BinaryHeap, BinomialHeap -from copy import deepcopy as dc - -__all__ = [ - 'Queue', - 'PriorityQueue' -] - -class Queue(object): - """Representation of queue data structure. - - Parameters - ========== - - implementation : str - Implementation to be used for queue. - By default, 'array' - items : list/tuple - Optional, by default, None - The inital items in the queue. - dtype : A valid python type - Optional, by default NoneType if item - is None. - Required only for 'array' implementation. - double_ended : bool - Optional, by default, False. - Set to True if the queue should support - additional, appendleft and pop operations - from left and right sides respectively. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import Queue - >>> q = Queue() - >>> q.append(1) - >>> q.append(2) - >>> q.append(3) - >>> q.popleft() - 1 - >>> len(q) - 2 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Queue_(abstract_data_type) - """ - - def __new__(cls, implementation='array', **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - if implementation == 'array': - return ArrayQueue( - kwargs.get('items', None), - kwargs.get('dtype', int), - kwargs.get('double_ended', False)) - elif implementation == 'linked_list': - return LinkedListQueue( - kwargs.get('items', None), - kwargs.get('double_ended', False) - ) - else: - raise NotImplementedError( - "%s hasn't been implemented yet."%(implementation)) - - @classmethod - def methods(cls): - return ['__new__'] - - def _double_ended_check(self): - if not self._double_ended: - raise NotImplementedError( - "This method is only supported for " - "double ended queues.") - - def append(self, *args, **kwargs): - raise NotImplementedError( - "This is an abstract method.") - - def appendleft(self, *args, **kwargs): - raise NotImplementedError( - "This is an abstract method.") - - def pop(self, *args, **kwargs): - raise NotImplementedError( - "This is an abstract method.") - - def popleft(self, *args, **kwargs): - raise NotImplementedError( - "This is an abstract method.") - - @property - def is_empty(self): - raise NotImplementedError( - "This is an abstract method.") - - -class ArrayQueue(Queue): - - __slots__ = ['_front', '_rear', '_double_ended'] - - def __new__(cls, items=None, dtype=NoneType, double_ended=False, - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - if items is None: - items = DynamicOneDimensionalArray(dtype, 0) - else: - dtype = type(items[0]) - items = DynamicOneDimensionalArray(dtype, items) - obj = object.__new__(cls) - obj.items, obj._front = items, -1 - if items.size == 0: - obj._front = -1 - obj._rear = -1 - else: - obj._front = 0 - obj._rear = items._num - 1 - obj._double_ended = double_ended - return obj - - @classmethod - def methods(cls): - return ['__new__', 'append', 'appendleft', 'popleft', - 'pop', 'is_empty', '__len__', '__str__', 'front', - 'rear'] - - def append(self, x): - if self.is_empty: - self._front = 0 - self.items._dtype = type(x) - self.items.append(x) - self._rear += 1 - - def appendleft(self, x): - self._double_ended_check() - temp = [] - if self.is_empty: - self._front = 0 - self._rear = -1 - self.items._dtype = type(x) - temp.append(x) - for i in range(self._front, self._rear + 1): - temp.append(self.items._data[i]) - self.items = DynamicOneDimensionalArray(type(temp[0]), temp) - self._rear += 1 - - def popleft(self): - if self.is_empty: - raise IndexError("Queue is empty.") - return_value = dc(self.items[self._front]) - front_temp = self._front - if self._front == self._rear: - self._front = -1 - self._rear = -1 - else: - if (self.items._num - 1)/self.items._size < \ - self.items._load_factor: - self._front = 0 - else: - self._front += 1 - self.items.delete(front_temp) - return return_value - - def pop(self): - self._double_ended_check() - if self.is_empty: - raise IndexError("Queue is empty.") - - return_value = dc(self.items[self._rear]) - rear_temp = self._rear - if self._front == self._rear: - self._front = -1 - self._rear = -1 - else: - if (self.items._num - 1)/self.items._size < \ - self.items._load_factor: - self._front = 0 - else: - self._rear -= 1 - self.items.delete(rear_temp) - return return_value - - @property - def front(self): - return self._front - - @property - def rear(self): - return self._rear - - @property - def is_empty(self): - return self.__len__() == 0 - - def __len__(self): - return self.items._num - - def __str__(self): - _data = [] - for i in range(self._front, self._rear + 1): - _data.append(self.items._data[i]) - return str(_data) - -class LinkedListQueue(Queue): - - __slots__ = ['queue', '_double_ended'] - - def __new__(cls, items=None, double_ended=False, - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.queue = SinglyLinkedList() - if items is None: - pass - elif type(items) in (list, tuple): - for x in items: - obj.append(x) - else: - raise TypeError("Expected type: list/tuple") - obj._double_ended = double_ended - return obj - - @classmethod - def methods(cls): - return ['__new__', 'append', 'appendleft', 'pop', 'popleft', - 'is_empty', '__len__', '__str__', 'front', 'rear'] - - def append(self, x): - self.queue.append(x) - - def appendleft(self, x): - self._double_ended_check() - if self._double_ended: - self.queue.appendleft(x) - - def pop(self): - self._double_ended_check() - if self.is_empty: - raise IndexError("Queue is empty.") - return_value = self.queue.popright() - return return_value - - def popleft(self): - if self.is_empty: - raise IndexError("Queue is empty.") - return_value = self.queue.popleft() - return return_value - - @property - def is_empty(self): - return self.__len__() == 0 - - @property - def front(self): - return self.queue.head - - @property - def rear(self): - return self.queue.tail - - def __len__(self): - return self.queue.size - - def __str__(self): - return str(self.queue) - -class PriorityQueue(object): - """ - Represents the concept of priority queue. - - Parameters - ========== - - implementation: str - The implementation which is to be - used for supporting operations - of priority queue. - The following implementations are supported, - - 'linked_list' -> Linked list implementation. - - 'binary_heap' -> Binary heap implementation. - - 'binomial_heap' -> Binomial heap implementation. - Doesn't support custom comparators, minimum - key data is extracted in every pop. - - Optional, by default, 'binary_heap' implementation - is used. - comp: function - The comparator to be used while comparing priorities. - Must return a bool object. - By default, `lambda u, v: u < v` is used to compare - priorities i.e., minimum priority elements are extracted - by pop operation. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import PriorityQueue - >>> pq = PriorityQueue() - >>> pq.push(1, 2) - >>> pq.push(2, 3) - >>> pq.pop() - 1 - >>> pq2 = PriorityQueue(comp=lambda u, v: u > v) - >>> pq2.push(1, 2) - >>> pq2.push(2, 3) - >>> pq2.pop() - 2 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Priority_queue - """ - - def __new__(cls, implementation='binary_heap', **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - comp = kwargs.get("comp", lambda u, v: u < v) - if implementation == 'linked_list': - return LinkedListPriorityQueue(comp) - elif implementation == 'binary_heap': - return BinaryHeapPriorityQueue(comp) - elif implementation == 'binomial_heap': - return BinomialHeapPriorityQueue() - else: - raise NotImplementedError( - "%s implementation is not currently supported " - "by priority queue.") - - @classmethod - def methods(cls): - return ['__new__'] - - def push(self, value, priority): - """ - Pushes the value to the priority queue - according to the given priority. - - value - Value to be pushed. - priority - Priority to be given to the value. - """ - raise NotImplementedError( - "This is an abstract method.") - - def pop(self): - """ - Pops out the value from the priority queue. - """ - raise NotImplementedError( - "This is an abstract method.") - - @property - def peek(self): - """ - Returns the pointer to the value which will be - popped out by `pop` method. - """ - raise NotImplementedError( - "This is an abstract method.") - - @property - def is_empty(self): - """ - Checks if the priority queue is empty. - """ - raise NotImplementedError( - "This is an abstract method.") - -class LinkedListPriorityQueue(PriorityQueue): - - __slots__ = ['items', 'comp'] - - @classmethod - def methods(cls): - return ['__new__', 'push', 'pop', 'peek', 'is_empty'] - - def __new__(cls, comp, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.items = SinglyLinkedList() - obj.comp = comp - return obj - - def push(self, value, priority): - self.items.append(priority, value) - - def pop(self): - _, max_i = self._find_peek(return_index=True) - pop_val = self.items.extract(max_i) - return pop_val.data - - def _find_peek(self, return_index=False): - if self.is_empty: - raise IndexError("Priority queue is empty.") - - walk = self.items.head - i, max_i, max_p = 0, 0, walk - while walk is not None: - if self.comp(walk.key, max_p.key): - max_i = i - max_p = walk - i += 1 - walk = walk.next - if return_index: - return max_p, max_i - return max_p - - @property - def peek(self): - return self._find_peek() - - @property - def is_empty(self): - return self.items.size == 0 - -class BinaryHeapPriorityQueue(PriorityQueue): - - __slots__ = ['items'] - - @classmethod - def methods(cls): - return ['__new__', 'push', 'pop', 'peek', 'is_empty'] - - def __new__(cls, comp, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.items = BinaryHeap() - obj.items._comp = comp - return obj - - def push(self, value, priority): - self.items.insert(priority, value) - - def pop(self): - node = self.items.extract() - return node.data - - @property - def peek(self): - if self.items.is_empty: - raise IndexError("Priority queue is empty.") - return self.items.heap[0] - - @property - def is_empty(self): - return self.items.is_empty - -class BinomialHeapPriorityQueue(PriorityQueue): - - __slots__ = ['items'] - - @classmethod - def methods(cls): - return ['__new__', 'push', 'pop', 'peek', 'is_empty'] - - def __new__(cls, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.items = BinomialHeap() - return obj - - def push(self, value, priority): - self.items.insert(priority, value) - - def pop(self): - node = self.items.find_minimum() - self.items.delete_minimum() - return node.data - - @property - def peek(self): - return self.items.find_minimum() - - @property - def is_empty(self): - return self.items.is_empty diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/segment_tree.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/segment_tree.py deleted file mode 100644 index 0895ba6da..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/segment_tree.py +++ /dev/null @@ -1,225 +0,0 @@ -from .stack import Stack -from pydatastructs.utils.misc_util import (TreeNode, - Backend, raise_if_backend_is_not_python) - -__all__ = ['ArraySegmentTree'] - -class ArraySegmentTree(object): - """ - Represents the segment tree data structure, - defined on arrays. - - Parameters - ========== - - array: Array - The array to be used for filling the segment tree. - func: callable - The function to be used for filling the segment tree. - It should accept only one tuple as an argument. The - size of the tuple will be either 1 or 2 and any one - of the elements can be `None`. You can treat `None` in - whatever way you want. For example, in case of minimum - values, `None` can be treated as infinity. We provide - the following which can be used as an argument value for this - parameter, - - `minimum` - For range minimum queries. - - `greatest_common_divisor` - For queries finding greatest - common divisor of a range. - - `summation` - For range sum queries. - dimensions: int - The number of dimensions of the array to be used - for the segment tree. - Optional, by default 1. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import ArraySegmentTree, minimum - >>> from pydatastructs import OneDimensionalArray - >>> arr = OneDimensionalArray(int, [1, 2, 3, 4, 5]) - >>> s_t = ArraySegmentTree(arr, minimum) - >>> s_t.build() - >>> s_t.query(0, 1) - 1 - >>> s_t.query(1, 3) - 2 - >>> s_t.update(2, -1) - >>> s_t.query(1, 3) - -1 - >>> arr = OneDimensionalArray(int, [1, 2]) - >>> s_t = ArraySegmentTree(arr, minimum) - >>> s_t.build() - >>> str(s_t) - "['((0, 1), 1)', '((0, 0), 1)', '', '', '((1, 1), 2)', '', '']" - - References - ========== - - .. [1] https://cp-algorithms.com/data_structures/segment_tree.html - """ - def __new__(cls, array, func, **kwargs): - - dimensions = kwargs.pop("dimensions", 1) - if dimensions == 1: - return OneDimensionalArraySegmentTree(array, func, **kwargs) - else: - raise NotImplementedError("ArraySegmentTree do not support " - "{}-dimensional arrays as of now.".format(dimensions)) - - def build(self): - """ - Generates segment tree nodes when called. - Nothing happens if nodes are already generated. - """ - raise NotImplementedError( - "This is an abstract method.") - - def update(self, index, value): - """ - Updates the value at given index. - """ - raise NotImplementedError( - "This is an abstract method.") - - def query(self, start, end): - """ - Queries [start, end] range according - to the function provided while constructing - `ArraySegmentTree` object. - """ - raise NotImplementedError( - "This is an abstract method.") - - def __str__(self): - recursion_stack = Stack(implementation='linked_list') - recursion_stack.push(self._root) - to_be_printed = [] - while not recursion_stack.is_empty: - node = recursion_stack.pop().key - if node is not None: - to_be_printed.append(str((node.key, node.data))) - else: - to_be_printed.append('') - if node is not None: - recursion_stack.push(node.right) - recursion_stack.push(node.left) - return str(to_be_printed) - - -class OneDimensionalArraySegmentTree(ArraySegmentTree): - - __slots__ = ["_func", "_array", "_root", "_backend"] - - def __new__(cls, array, func, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - raise_if_backend_is_not_python(cls, backend) - - obj = object.__new__(cls) - obj._func = func - obj._array = array - obj._root = None - obj._backend = backend - return obj - - @classmethod - def methods(self): - return ['__new__', 'build', 'update', - 'query'] - - @property - def is_ready(self): - return self._root is not None - - def build(self): - if self.is_ready: - return - - recursion_stack = Stack(implementation='linked_list') - node = TreeNode((0, len(self._array) - 1), None, backend=self._backend) - node.is_root = True - self._root = node - recursion_stack.push(node) - - while not recursion_stack.is_empty: - node = recursion_stack.peek.key - start, end = node.key - if start == end: - node.data = self._array[start] - recursion_stack.pop() - continue - - if (node.left is not None and - node.right is not None): - recursion_stack.pop() - node.data = self._func((node.left.data, node.right.data)) - else: - mid = (start + end) // 2 - if node.left is None: - left_node = TreeNode((start, mid), None) - node.left = left_node - recursion_stack.push(left_node) - if node.right is None: - right_node = TreeNode((mid + 1, end), None) - node.right = right_node - recursion_stack.push(right_node) - - def update(self, index, value): - if not self.is_ready: - raise ValueError("{} tree is not built yet. ".format(self) + - "Call .build method to prepare the segment tree.") - - recursion_stack = Stack(implementation='linked_list') - recursion_stack.push((self._root, None)) - - while not recursion_stack.is_empty: - node, child = recursion_stack.peek.key - start, end = node.key - if start == end: - self._array[index] = value - node.data = value - recursion_stack.pop() - if not recursion_stack.is_empty: - parent_node = recursion_stack.pop() - recursion_stack.push((parent_node.key[0], node)) - continue - - if child is not None: - node.data = self._func((node.left.data, node.right.data)) - recursion_stack.pop() - if not recursion_stack.is_empty: - parent_node = recursion_stack.pop() - recursion_stack.push((parent_node.key[0], node)) - else: - mid = (start + end) // 2 - if start <= index and index <= mid: - recursion_stack.push((node.left, None)) - else: - recursion_stack.push((node.right, None)) - - def _query(self, node, start, end, l, r): - if r < start or end < l: - return None - - if l <= start and end <= r: - return node.data - - mid = (start + end) // 2 - left_result = self._query(node.left, start, mid, l, r) - right_result = self._query(node.right, mid + 1, end, l, r) - return self._func((left_result, right_result)) - - def query(self, start, end): - if not self.is_ready: - raise ValueError("{} tree is not built yet. ".format(self) + - "Call .build method to prepare the segment tree.") - - return self._query(self._root, 0, len(self._array) - 1, - start, end) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/sparse_table.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/sparse_table.py deleted file mode 100644 index 55ec4e9b3..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/sparse_table.py +++ /dev/null @@ -1,108 +0,0 @@ -from pydatastructs.linear_data_structures.arrays import OneDimensionalArray -from pydatastructs.utils.misc_util import ( - Backend, raise_if_backend_is_not_python) -import math - -__all__ = ['SparseTable'] - - -class SparseTable(object): - """ - Represents the sparse table data structure. - - Parameters - ========== - - array: OneDimensionalArray - The array to be used for filling the sparse table. - func: callable - The function to be used for filling the sparse table. - It should accept only one tuple as an argument. The - size of the tuple will be either 1 or 2 and any one - of the elements can be `None`. You can treat `None` in - whatever way you want. For example, in case of minimum - values, `None` can be treated as infinity. We provide - the following which can be used as an argument value for this - parameter, - - `minimum` - For range minimum queries. - - `greatest_common_divisor` - For queries finding greatest - common divisor of a range. - - `summation` - For range sum queries. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import SparseTable, minimum - >>> from pydatastructs import OneDimensionalArray - >>> arr = OneDimensionalArray(int, [1, 2, 3, 4, 5]) - >>> s_t = SparseTable(arr, minimum) - >>> str(s_t) - "['[1, 1, 1]', '[2, 2, 2]', '[3, 3, None]', '[4, 4, None]', '[5, None, None]']" - - References - ========== - - .. [1] https://cp-algorithms.com/data_structures/sparse-table.html - """ - - __slots__ = ['_table', 'func'] - - def __new__(cls, array, func, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - - # TODO: If possible remove the following check. - if len(array) == 0: - raise ValueError("Input %s array is empty."%(array)) - - obj = object.__new__(cls) - size = len(array) - log_size = int(math.log2(size)) + 1 - obj._table = [OneDimensionalArray(int, log_size) for _ in range(size)] - obj.func = func - - for i in range(size): - obj._table[i][0] = func((array[i],)) - - for j in range(1, log_size + 1): - for i in range(size - (1 << j) + 1): - obj._table[i][j] = func((obj._table[i][j - 1], - obj._table[i + (1 << (j - 1))][j - 1])) - - return obj - - @classmethod - def methods(cls): - return ['query', '__str__'] - - def query(self, start, end): - """ - Method to perform a query on sparse table in [start, end) - range. - - Parameters - ========== - - start: int - The starting index of the range. - end: int - The ending index of the range. - """ - j = int(math.log2(end - start + 1)) + 1 - answer = None - while j >= 0: - if start + (1 << j) - 1 <= end: - answer = self.func((answer, self._table[start][j])) - start += 1 << j - j -= 1 - return answer - - def __str__(self): - return str([str(array) for array in self._table]) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/stack.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/stack.py deleted file mode 100644 index 38f72b43f..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/stack.py +++ /dev/null @@ -1,200 +0,0 @@ -from pydatastructs.linear_data_structures import DynamicOneDimensionalArray, SinglyLinkedList -from pydatastructs.miscellaneous_data_structures._backend.cpp import _stack -from pydatastructs.utils.misc_util import ( - _check_type, NoneType, Backend, - raise_if_backend_is_not_python) -from copy import deepcopy as dc - -__all__ = [ - 'Stack' -] - -class Stack(object): - """Representation of stack data structure - - Parameters - ========== - - implementation : str - Implementation to be used for stack. - By default, 'array' - Currently only supports 'array' - implementation. - items : list/tuple - Optional, by default, None - The inital items in the stack. - For array implementation. - dtype : A valid python type - Optional, by default NoneType if item - is None, otherwise takes the data - type of DynamicOneDimensionalArray - For array implementation. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import Stack - >>> s = Stack() - >>> s.push(1) - >>> s.push(2) - >>> s.push(3) - >>> str(s) - '[1, 2, 3]' - >>> s.pop() - 3 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Stack_(abstract_data_type) - """ - - def __new__(cls, implementation='array', **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if implementation == 'array': - items = kwargs.get('items', None) - dtype = kwargs.get('dtype', int) - if backend == Backend.CPP: - return _stack.ArrayStack(items, dtype) - - return ArrayStack(items, dtype) - if implementation == 'linked_list': - raise_if_backend_is_not_python(cls, backend) - - return LinkedListStack( - kwargs.get('items', None) - ) - raise NotImplementedError( - "%s hasn't been implemented yet."%(implementation)) - - @classmethod - def methods(cls): - return ['__new__'] - - def push(self, *args, **kwargs): - raise NotImplementedError( - "This is an abstract method.") - - def pop(self, *args, **kwargs): - raise NotImplementedError( - "This is an abstract method.") - - @property - def is_empty(self): - raise NotImplementedError( - "This is an abstract method.") - - @property - def peek(self): - raise NotImplementedError( - "This is an abstract method.") - -class ArrayStack(Stack): - - __slots__ = ['items'] - - def __new__(cls, items=None, dtype=NoneType, - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - if items is None: - items = DynamicOneDimensionalArray(dtype, 0) - else: - items = DynamicOneDimensionalArray(dtype, items) - obj = object.__new__(cls) - obj.items = items - return obj - - @classmethod - def methods(cls): - return ['__new__', 'push', 'pop', 'is_emtpy', - 'peek', '__len__', '__str__'] - - def push(self, x): - if self.is_empty: - self.items._dtype = type(x) - self.items.append(x) - - def pop(self): - if self.is_empty: - raise IndexError("Stack is empty") - - top_element = dc(self.items[self.items._last_pos_filled]) - self.items.delete(self.items._last_pos_filled) - return top_element - - @property - def is_empty(self): - return self.items._last_pos_filled == -1 - - @property - def peek(self): - return self.items[self.items._last_pos_filled] - - def __len__(self): - return self.items._num - - def __str__(self): - """ - Used for printing. - """ - return str(self.items._data) - - -class LinkedListStack(Stack): - - __slots__ = ['stack'] - - def __new__(cls, items=None, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.stack = SinglyLinkedList() - if items is None: - pass - elif type(items) in (list, tuple): - for x in items: - obj.push(x) - else: - raise TypeError("Expected type: list/tuple") - return obj - - @classmethod - def methods(cls): - return ['__new__', 'push', 'pop', 'is_emtpy', - 'peek', '__len__', '__str__'] - - def push(self, x): - self.stack.appendleft(x) - - def pop(self): - if self.is_empty: - raise IndexError("Stack is empty") - return self.stack.popleft() - - @property - def is_empty(self): - return self.__len__() == 0 - - @property - def peek(self): - return self.stack.head - - @property - def size(self): - return self.stack.size - - def __len__(self): - return self.stack.size - - def __str__(self): - elements = [] - current_node = self.peek - while current_node is not None: - elements.append(str(current_node)) - current_node = current_node.next - return str(elements[::-1]) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/__init__.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_binomial_trees.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_binomial_trees.py deleted file mode 100644 index 1275e9aec..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_binomial_trees.py +++ /dev/null @@ -1,17 +0,0 @@ -from pydatastructs.miscellaneous_data_structures.binomial_trees import BinomialTree -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils.misc_util import BinomialTreeNode - -# only tests the corner cases -def test_BinomialTree(): - assert raises(TypeError, lambda: BinomialTree(1, 1)) - assert raises(TypeError, lambda: BinomialTree(None, 1.5)) - - bt = BinomialTree() - assert raises(TypeError, lambda: bt.add_sub_tree(None)) - bt1 = BinomialTree(BinomialTreeNode(1, 1), 0) - node = BinomialTreeNode(2, 2) - node.add_children(BinomialTreeNode(3, 3)) - bt2 = BinomialTree(node, 1) - assert raises(ValueError, lambda: bt1.add_sub_tree(bt2)) - assert bt1.is_empty is False diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_disjoint_set.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_disjoint_set.py deleted file mode 100644 index fcabd3112..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_disjoint_set.py +++ /dev/null @@ -1,70 +0,0 @@ -from pydatastructs import DisjointSetForest -from pydatastructs.utils.raises_util import raises - -def test_DisjointSetForest(): - - dst = DisjointSetForest() - for i in range(8): - dst.make_set(i+1) - - dst.union(1, 2) - dst.union(1, 5) - assert dst.find_size(2) == 3 - dst.union(1, 6) - dst.union(1, 8) - dst.union(3, 4) - assert dst.find_size(3) == 2 - - assert (dst.find_root(1) == dst.find_root(2) == - dst.find_root(5) == dst.find_root(6) == dst.find_root(8)) - assert dst.disjoint_sets() == [[1, 2, 5, 6, 8], [3, 4], [7]] - assert dst.find_root(3) == dst.find_root(4) - assert dst.find_root(7).key == 7 - - assert raises(KeyError, lambda: dst.find_root(9)) - assert raises(KeyError, lambda: dst.find_size(9)) - dst.union(3, 1) - assert dst.find_root(3).key == 1 - assert dst.find_root(5).key == 1 - dst.make_root(6) - assert dst.disjoint_sets() == [[1, 2, 3, 4, 5, 6, 8], [7]] - assert dst.find_root(3).key == 6 - assert dst.find_root(5).key == 6 - dst.make_root(5) - assert dst.find_root(1).key == 5 - assert dst.find_root(5).key == 5 - assert raises(KeyError, lambda: dst.make_root(9)) - - dst = DisjointSetForest() - for i in range(6): - dst.make_set(i) - assert dst.tree[2].size == 1 - dst.union(2, 3) - assert dst.tree[2].size == 2 - assert dst.tree[3].size == 1 - dst.union(1, 4) - dst.union(2, 4) - assert dst.disjoint_sets() == [[0], [1, 2, 3, 4], [5]] - # current tree - ############### - # 2 - # / \ - # 1 3 - # / - # 4 - ############### - assert dst.tree[2].size == 4 - assert dst.tree[1].size == 2 - assert dst.tree[3].size == dst.tree[4].size == 1 - dst.make_root(4) - # New tree - ############### - # 4 - # | - # 2 - # / \ - # 1 3 - ############### - assert dst.tree[4].size == 4 - assert dst.tree[2].size == 3 - assert dst.tree[1].size == dst.tree[3].size == 1 diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_multiset.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_multiset.py deleted file mode 100644 index fb412704a..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_multiset.py +++ /dev/null @@ -1,39 +0,0 @@ -from pydatastructs.miscellaneous_data_structures import Multiset - -def test_Multiset(): - - ms = Multiset() - ms.add(5) - ms.add(5) - ms.add(3) - ms.add(7) - assert len(ms) == 4 - assert 5 in ms - assert ms.count(5) == 2 - assert ms.count(3) == 1 - assert ms.count(-3) == 0 - assert not 4 in ms - ms.remove(5) - assert 5 in ms - assert ms.lower_bound(5) == 5 - assert ms.upper_bound(5) == 7 - - ms = Multiset(5, 3, 7, 2) - - assert len(ms) == 4 - assert 5 in ms - assert ms.count(7) == 1 - assert not 4 in ms - assert ms.lower_bound(3) == 3 - assert ms.upper_bound(3) == 5 - assert ms.upper_bound(7) is None - - ms.remove(5) - - assert len(ms) == 3 - assert not 5 in ms - - ms.add(4) - - assert 4 in ms - assert len(ms) == 4 diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_queue.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_queue.py deleted file mode 100644 index 81e1e996e..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_queue.py +++ /dev/null @@ -1,116 +0,0 @@ -from pydatastructs.miscellaneous_data_structures import Queue -from pydatastructs.miscellaneous_data_structures.queue import ( - ArrayQueue, LinkedListQueue, PriorityQueue, - LinkedListPriorityQueue) -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils.misc_util import _check_type - -def test_Queue(): - q = Queue(implementation='array') - q1 = Queue() - assert _check_type(q, ArrayQueue) is True - assert _check_type(q1, ArrayQueue) is True - q2 = Queue(implementation='linked_list') - assert _check_type(q2, LinkedListQueue) is True - assert raises(NotImplementedError, lambda: Queue(implementation='')) - -def test_ArrayQueue(): - q1 = Queue() - raises(IndexError, lambda: q1.popleft()) - q1 = Queue(implementation='array', items=[0]) - q1.append(1) - q1.append(2) - q1.append(3) - assert str(q1) == '[0, 1, 2, 3]' - assert len(q1) == 4 - assert q1.popleft() == 0 - assert q1.popleft() == 1 - assert len(q1) == 2 - assert q1.popleft() == 2 - assert q1.popleft() == 3 - assert len(q1) == 0 - - q2 = Queue(implementation='array', items=[0], double_ended=True) - q2.append(1) - q2.append(2) - q2.appendleft(3) - assert str(q2) == '[3, 0, 1, 2]' - assert len(q2) == 4 - assert q2.popleft() == 3 - assert q2.pop() == 2 - assert len(q2) == 2 - assert q2.popleft() == 0 - assert q2.pop() == 1 - assert len(q2) == 0 - - q1 = Queue(implementation='array', items=[0]) - assert raises(NotImplementedError, lambda: q1.appendleft(2)) - - -def test_LinkedListQueue(): - q1 = Queue(implementation='linked_list') - q1.append(1) - assert raises(TypeError, lambda: Queue(implementation='linked_list', items={0, 1})) - q1 = Queue(implementation='linked_list', items = [0, 1]) - q1.append(2) - q1.append(3) - assert str(q1) == ("['(0, None)', '(1, None)', " - "'(2, None)', '(3, None)']") - assert len(q1) == 4 - assert q1.popleft().key == 0 - assert q1.popleft().key == 1 - assert len(q1) == 2 - assert q1.popleft().key == 2 - assert q1.popleft().key == 3 - assert len(q1) == 0 - raises(IndexError, lambda: q1.popleft()) - - q1 = Queue(implementation='linked_list',items=['a',None,type,{}]) - assert len(q1) == 4 - - front = q1.front - assert front.key == q1.popleft().key - - rear = q1.rear - for _ in range(len(q1)-1): - q1.popleft() - - assert rear.key == q1.popleft().key - - q1 = Queue(implementation='linked_list', double_ended=True) - q1.appendleft(1) - q2 = Queue(implementation='linked_list', items=[0, 1]) - assert raises(NotImplementedError, lambda: q2.appendleft(1)) - q1 = Queue(implementation='linked_list', items = [0, 1], double_ended=True) - q1.appendleft(2) - q1.append(3) - assert str(q1) == "['(2, None)', '(0, None)', '(1, None)', '(3, None)']" - assert len(q1) == 4 - assert q1.popleft().key == 2 - assert q1.pop().key == 3 - assert len(q1) == 2 - assert q1.pop().key == 1 - assert q1.popleft().key == 0 - assert len(q1) == 0 - assert raises(IndexError, lambda: q1.popleft()) - -def test_PriorityQueue(): - pq1 = PriorityQueue(implementation='linked_list') - assert _check_type(pq1, LinkedListPriorityQueue) is True - assert raises(NotImplementedError, lambda: Queue(implementation='')) - -def test_ImplementationPriorityQueue(): - impls = ['linked_list', 'binomial_heap', 'binary_heap'] - for impl in impls: - pq1 = PriorityQueue(implementation=impl) - pq1.push(1, 4) - pq1.push(2, 3) - pq1.push(3, 2) - assert pq1.peek.data == 3 - assert pq1.pop() == 3 - assert pq1.peek.data == 2 - assert pq1.pop() == 2 - assert pq1.peek.data == 1 - assert pq1.pop() == 1 - assert pq1.is_empty is True - assert raises(IndexError, lambda: pq1.peek) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_dynamic.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_dynamic.py deleted file mode 100644 index f655c546d..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_dynamic.py +++ /dev/null @@ -1,71 +0,0 @@ -from pydatastructs import ( - RangeQueryDynamic, minimum, - greatest_common_divisor, summation, - OneDimensionalArray) -from pydatastructs.utils.raises_util import raises -import random, math -from copy import deepcopy - -def _test_RangeQueryDynamic_common(func, gen_expected): - - array = OneDimensionalArray(int, []) - raises(ValueError, lambda: RangeQueryDynamic(array, func)) - - array = OneDimensionalArray(int, [1]) - rq = RangeQueryDynamic(array, func) - assert rq.query(0, 0) == 1 - raises(ValueError, lambda: rq.query(0, -1)) - raises(IndexError, lambda: rq.query(0, 1)) - - array_sizes = [3, 6, 12, 24, 48, 96] - random.seed(0) - for array_size in array_sizes: - inputs = [] - for i in range(array_size): - for j in range(i + 1, array_size): - inputs.append((i, j)) - - data_structures = ["array", "segment_tree"] - for ds in data_structures: - data = random.sample(range(-2*array_size, 2*array_size), array_size) - array = OneDimensionalArray(int, data) - rmq = RangeQueryDynamic(array, func, data_structure=ds) - for input in inputs: - assert rmq.query(input[0], input[1]) == gen_expected(data, input[0], input[1]) - - data_copy = deepcopy(data) - for _ in range(array_size//2): - index = random.randint(0, array_size - 1) - value = random.randint(0, 4 * array_size) - data_copy[index] = value - rmq.update(index, value) - - for input in inputs: - assert rmq.query(input[0], input[1]) == gen_expected(data_copy, input[0], input[1]) - -def test_RangeQueryDynamic_minimum(): - - def _gen_minimum_expected(data, i, j): - return min(data[i:j + 1]) - - _test_RangeQueryDynamic_common(minimum, _gen_minimum_expected) - -def test_RangeQueryDynamic_greatest_common_divisor(): - - def _gen_gcd_expected(data, i, j): - if j == i: - return data[i] - else: - expected_gcd = math.gcd(data[i], data[i + 1]) - for idx in range(i + 2, j + 1): - expected_gcd = math.gcd(expected_gcd, data[idx]) - return expected_gcd - - _test_RangeQueryDynamic_common(greatest_common_divisor, _gen_gcd_expected) - -def test_RangeQueryDynamic_summation(): - - def _gen_summation_expected(data, i, j): - return sum(data[i:j + 1]) - - return _test_RangeQueryDynamic_common(summation, _gen_summation_expected) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_static.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_static.py deleted file mode 100644 index e898653c9..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_range_query_static.py +++ /dev/null @@ -1,63 +0,0 @@ -from pydatastructs import ( - RangeQueryStatic, minimum, - greatest_common_divisor, summation, - OneDimensionalArray) -from pydatastructs.utils.raises_util import raises -import random, math - -def _test_RangeQueryStatic_common(func, gen_expected): - - array = OneDimensionalArray(int, []) - raises(ValueError, lambda: RangeQueryStatic(array, func)) - - array = OneDimensionalArray(int, [1]) - rq = RangeQueryStatic(array, func) - assert rq.query(0, 0) == 1 - raises(ValueError, lambda: rq.query(0, -1)) - raises(IndexError, lambda: rq.query(0, 1)) - - array_sizes = [3, 6, 12, 24, 48, 96] - random.seed(0) - for array_size in array_sizes: - data = random.sample(range(-2*array_size, 2*array_size), array_size) - array = OneDimensionalArray(int, data) - - expected = [] - inputs = [] - for i in range(array_size): - for j in range(i + 1, array_size): - inputs.append((i, j)) - expected.append(gen_expected(data, i, j)) - - data_structures = ["array", "sparse_table"] - for ds in data_structures: - rmq = RangeQueryStatic(array, func, data_structure=ds) - for input, correct in zip(inputs, expected): - assert rmq.query(input[0], input[1]) == correct - -def test_RangeQueryStatic_minimum(): - - def _gen_minimum_expected(data, i, j): - return min(data[i:j + 1]) - - _test_RangeQueryStatic_common(minimum, _gen_minimum_expected) - -def test_RangeQueryStatic_greatest_common_divisor(): - - def _gen_gcd_expected(data, i, j): - if j == i: - return data[i] - else: - expected_gcd = math.gcd(data[i], data[i + 1]) - for idx in range(i + 2, j + 1): - expected_gcd = math.gcd(expected_gcd, data[idx]) - return expected_gcd - - _test_RangeQueryStatic_common(greatest_common_divisor, _gen_gcd_expected) - -def test_RangeQueryStatic_summation(): - - def _gen_summation_expected(data, i, j): - return sum(data[i:j + 1]) - - return _test_RangeQueryStatic_common(summation, _gen_summation_expected) diff --git a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_stack.py b/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_stack.py deleted file mode 100644 index 2d9d08b82..000000000 --- a/lib/python3.12/site-packages/pydatastructs/miscellaneous_data_structures/tests/test_stack.py +++ /dev/null @@ -1,77 +0,0 @@ -from pydatastructs.miscellaneous_data_structures import Stack -from pydatastructs.miscellaneous_data_structures.stack import ArrayStack, LinkedListStack -from pydatastructs.miscellaneous_data_structures._backend.cpp import _stack -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils.misc_util import _check_type, Backend - - -def test_Stack(): - s = Stack(implementation='array') - s1 = Stack() - assert _check_type(s, ArrayStack) is True - assert _check_type(s1, ArrayStack) is True - s2 = Stack(implementation='linked_list') - assert _check_type(s2, LinkedListStack) is True - assert raises(NotImplementedError, lambda: Stack(implementation='')) - - s3 = Stack(backend=Backend.CPP) - assert _check_type(s3, _stack.ArrayStack) is True - s4 = Stack(implementation="array", backend=Backend.CPP) - assert _check_type(s4, _stack.ArrayStack) is True - -def test_ArrayStack(): - s = Stack(implementation='array') - s.push(1) - s.push(2) - s.push(3) - assert s.peek == 3 - assert str(s) == '[1, 2, 3]' - assert s.pop() == 3 - assert s.pop() == 2 - assert s.pop() == 1 - assert s.is_empty is True - assert raises(IndexError, lambda : s.pop()) - _s = Stack(items=[1, 2, 3]) - assert str(_s) == '[1, 2, 3]' - assert len(_s) == 3 - - # Cpp test - s1 = Stack(implementation="array", backend=Backend.CPP) - s1.push(1) - s1.push(2) - s1.push(3) - assert s1.peek == 3 - assert str(s1) == "['1', '2', '3']" - assert s1.pop() == 3 - assert s1.pop() == 2 - assert s1.pop() == 1 - assert s1.is_empty is True - assert raises(IndexError, lambda : s1.pop()) - _s1 = Stack(items=[1, 2, 3], backend=Backend.CPP) - assert str(_s1) == "['1', '2', '3']" - assert len(_s1) == 3 - -def test_LinkedListStack(): - s = Stack(implementation='linked_list') - s.push(1) - s.push(2) - s.push(3) - assert s.peek.key == 3 - assert str(s) == ("['(1, None)', '(2, None)', '(3, None)']") - assert s.pop().key == 3 - assert s.pop().key == 2 - assert s.pop().key == 1 - assert s.is_empty is True - assert raises(IndexError, lambda : s.pop()) - assert str(s) == '[]' - _s = Stack(implementation='linked_list',items=[1, 2, 3]) - assert str(_s) == "['(1, None)', '(2, None)', '(3, None)']" - assert len(_s) == 3 - - s = Stack(implementation='linked_list',items=['a',None,type,{}]) - assert len(s) == 4 - assert s.size == 4 - - peek = s.peek - assert peek.key == s.pop().key - assert raises(TypeError, lambda: Stack(implementation='linked_list', items={0, 1})) diff --git a/lib/python3.12/site-packages/pydatastructs/strings/__init__.py b/lib/python3.12/site-packages/pydatastructs/strings/__init__.py deleted file mode 100644 index 33930b426..000000000 --- a/lib/python3.12/site-packages/pydatastructs/strings/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -__all__ = [] - -from . import ( - trie, - algorithms -) - -from .trie import ( - Trie -) - -__all__.extend(trie.__all__) - -from .algorithms import ( - find -) - -__all__.extend(algorithms.__all__) diff --git a/lib/python3.12/site-packages/pydatastructs/strings/algorithms.py b/lib/python3.12/site-packages/pydatastructs/strings/algorithms.py deleted file mode 100644 index 1e26b9411..000000000 --- a/lib/python3.12/site-packages/pydatastructs/strings/algorithms.py +++ /dev/null @@ -1,247 +0,0 @@ -from pydatastructs.linear_data_structures.arrays import ( - DynamicOneDimensionalArray, OneDimensionalArray) -from pydatastructs.utils.misc_util import ( - Backend, raise_if_backend_is_not_python) - -__all__ = [ - 'find' -] - -PRIME_NUMBER, MOD = 257, 1000000007 - -def find(text, query, algorithm, **kwargs): - """ - Finds occurrence of a query string within the text string. - - Parameters - ========== - - text: str - The string on which query is to be performed. - query: str - The string which is to be searched in the text. - algorithm: str - The algorithm which should be used for - searching. - Currently the following algorithms are - supported, - - 'kmp' -> Knuth-Morris-Pratt as given in [1]. - - 'rabin_karp' -> Rabin–Karp algorithm as given in [2]. - - 'boyer_moore' -> Boyer-Moore algorithm as given in [3]. - - 'z_function' -> Z-function algorithm as given in [4]. - - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Returns - ======= - - DynamicOneDimensionalArray - An array of starting positions of the portions - in the text which match with the given query. - - Examples - ======== - - >>> from pydatastructs.strings.algorithms import find - >>> text = "abcdefabcabe" - >>> pos = find(text, "ab", algorithm="kmp") - >>> str(pos) - "['0', '6', '9']" - >>> pos = find(text, "abc", algorithm="kmp") - >>> str(pos) - "['0', '6']" - >>> pos = find(text, "abe", algorithm="kmp") - >>> str(pos) - "['9']" - >>> pos = find(text, "abed", algorithm="kmp") - >>> str(pos) - '[]' - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Knuth%E2%80%93Morris%E2%80%93Pratt_algorithm - .. [2] https://en.wikipedia.org/wiki/Rabin%E2%80%93Karp_algorithm - .. [3] https://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_string-search_algorithm - .. [4] https://usaco.guide/CPH.pdf#page=257 - """ - raise_if_backend_is_not_python( - find, kwargs.get('backend', Backend.PYTHON)) - import pydatastructs.strings.algorithms as algorithms - func = "_" + algorithm - if not hasattr(algorithms, func): - raise NotImplementedError( - "Currently %s algoithm for searching strings " - "inside a text isn't implemented yet." - %(algorithm)) - return getattr(algorithms, func)(text, query) - - -def _knuth_morris_pratt(text, query): - if len(text) == 0 or len(query) == 0: - return DynamicOneDimensionalArray(int, 0) - kmp_table = _build_kmp_table(query) - return _do_match(text, query, kmp_table) - -_kmp = _knuth_morris_pratt - -def _build_kmp_table(query): - pos, cnd = 1, 0 - kmp_table = OneDimensionalArray(int, len(query) + 1) - - kmp_table[0] = -1 - - while pos < len(query): - if query[pos] == query[cnd]: - kmp_table[pos] = kmp_table[cnd] - else: - kmp_table[pos] = cnd - while cnd >= 0 and query[pos] != query[cnd]: - cnd = kmp_table[cnd] - pos, cnd = pos + 1, cnd + 1 - kmp_table[pos] = cnd - - return kmp_table - - - -def _do_match(string, query, kmp_table): - j, k = 0, 0 - positions = DynamicOneDimensionalArray(int, 0) - - while j < len(string): - if query[k] == string[j]: - j = j + 1 - k = k + 1 - if k == len(query): - positions.append(j - k) - k = kmp_table[k] - else: - k = kmp_table[k] - if k < 0: - j = j + 1 - k = k + 1 - - return positions - -def _p_pow(length, p=PRIME_NUMBER, m=MOD): - p_pow = OneDimensionalArray(int, length) - p_pow[0] = 1 - for i in range(1, length): - p_pow[i] = (p_pow[i-1] * p) % m - return p_pow - -def _hash_str(string, p=PRIME_NUMBER, m=MOD): - hash_value = 0 - p_pow = _p_pow(len(string), p, m) - for i in range(len(string)): - hash_value = (hash_value + ord(string[i]) * p_pow[i]) % m - return hash_value - -def _rabin_karp(text, query): - t = len(text) - q = len(query) - positions = DynamicOneDimensionalArray(int, 0) - if q == 0 or t == 0: - return positions - - query_hash = _hash_str(query) - text_hash = OneDimensionalArray(int, t + 1) - text_hash.fill(0) - p_pow = _p_pow(t) - - for i in range(t): - text_hash[i+1] = (text_hash[i] + ord(text[i]) * p_pow[i]) % MOD - for i in range(t - q + 1): - curr_hash = (text_hash[i + q] + MOD - text_hash[i]) % MOD - if curr_hash == (query_hash * p_pow[i]) % MOD: - positions.append(i) - - return positions - -def _boyer_moore(text, query): - positions = DynamicOneDimensionalArray(int, 0) - text_length, query_length = len(text), len(query) - - if text_length == 0 or query_length == 0: - return positions - - # Preprocessing Step - bad_match_table = dict() - for i in range(query_length): - bad_match_table[query[i]] = i - - shift = 0 - # Matching procedure - while shift <= text_length-query_length: - j = query_length - 1 - while j >= 0 and query[j] == text[shift + j]: - j -= 1 - if j < 0: - positions.append(shift) - if shift + query_length < text_length: - if text[shift + query_length] in bad_match_table: - shift += query_length - bad_match_table[text[shift + query_length]] - else: - shift += query_length + 1 - else: - shift += 1 - else: - letter_pos = text[shift + j] - if letter_pos in bad_match_table: - shift += max(1, j - bad_match_table[letter_pos]) - else: - shift += max(1, j + 1) - return positions - -def _z_vector(text, query): - string = text - if query != "": - string = query + str("$") + text - - z_fct = OneDimensionalArray(int, len(string)) - z_fct.fill(0) - - curr_pos = 1 - seg_left = 0 - seg_right = 0 - - for curr_pos in range(1,len(string)): - if curr_pos <= seg_right: - z_fct[curr_pos] = min(seg_right - curr_pos + 1, z_fct[curr_pos - seg_left]) - - while curr_pos + z_fct[curr_pos] < len(string) and \ - string[z_fct[curr_pos]] == string[curr_pos + z_fct[curr_pos]]: - z_fct[curr_pos] += 1 - - if curr_pos + z_fct[curr_pos] - 1 > seg_right: - seg_left = curr_pos - seg_right = curr_pos + z_fct[curr_pos] - 1 - - final_z_fct = DynamicOneDimensionalArray(int, 0) - start_index = 0 - if query != "": - start_index = len(query) + 1 - for pos in range(start_index, len(string)): - final_z_fct.append(z_fct[pos]) - - return final_z_fct - -def _z_function(text, query): - positions = DynamicOneDimensionalArray(int, 0) - if len(text) == 0 or len(query) == 0: - return positions - - fct = _z_vector(text, query) - for pos in range(len(fct)): - if fct[pos] == len(query): - positions.append(pos) - - return positions diff --git a/lib/python3.12/site-packages/pydatastructs/strings/tests/__init__.py b/lib/python3.12/site-packages/pydatastructs/strings/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/lib/python3.12/site-packages/pydatastructs/strings/tests/test_algorithms.py b/lib/python3.12/site-packages/pydatastructs/strings/tests/test_algorithms.py deleted file mode 100644 index 37622cf80..000000000 --- a/lib/python3.12/site-packages/pydatastructs/strings/tests/test_algorithms.py +++ /dev/null @@ -1,76 +0,0 @@ -from pydatastructs.strings import find - -import random, string - -def test_kmp(): - _test_common_string_matching('kmp') - -def test_rka(): - _test_common_string_matching('rabin_karp') - -def test_bm(): - _test_common_string_matching('boyer_moore') - -def test_zf(): - _test_common_string_matching('z_function') - -def _test_common_string_matching(algorithm): - true_text_pattern_dictionary = { - "Knuth-Morris-Pratt": "-Morris-", - "abcabcabcabdabcabdabcabca": "abcabdabcabca", - "aefcdfaecdaefaefcdaefeaefcdcdeae": "aefcdaefeaefcd", - "aaaaaaaa": "aaa", - "fullstringmatch": "fullstringmatch", - "z-function": "z-fun" - } - for test_case_key in true_text_pattern_dictionary: - text = test_case_key - query = true_text_pattern_dictionary[test_case_key] - positions = find(text, query, algorithm) - for i in range(positions._last_pos_filled): - p = positions[i] - assert text[p:p + len(query)] == query - - false_text_pattern_dictionary = { - "Knuth-Morris-Pratt": "-Pratt-", - "abcabcabcabdabcabdabcabca": "qwertyuiopzxcvbnm", - "aefcdfaecdaefaefcdaefeaefcdcdeae": "cdaefaefe", - "fullstringmatch": "fullstrinmatch", - "z-function": "function-", - "abc": "", - "": "abc" - } - - for test_case_key in false_text_pattern_dictionary: - text = test_case_key - query = false_text_pattern_dictionary[test_case_key] - positions = find(text, query, algorithm) - assert positions.size == 0 - - random.seed(1000) - - def gen_random_string(length): - ascii = string.ascii_uppercase - digits = string.digits - return ''.join(random.choices(ascii + digits, k=length)) - - for _ in range(100): - query = gen_random_string(random.randint(3, 10)) - num_times = random.randint(1, 10) - freq = 0 - text = "" - while freq < num_times: - rand_str = gen_random_string(random.randint(5, 10)) - if rand_str != query: - freq += 1 - text += query + rand_str + query - positions = find(text, query, algorithm) - assert positions._num == num_times * 2 - for i in range(positions._last_pos_filled): - p = positions[i] - assert text[p:p + len(query)] == query - - text = gen_random_string(len(query)) - if text != query: - positions = find(text, query, algorithm) - assert positions.size == 0 diff --git a/lib/python3.12/site-packages/pydatastructs/strings/tests/test_trie.py b/lib/python3.12/site-packages/pydatastructs/strings/tests/test_trie.py deleted file mode 100644 index 059104708..000000000 --- a/lib/python3.12/site-packages/pydatastructs/strings/tests/test_trie.py +++ /dev/null @@ -1,49 +0,0 @@ -from pydatastructs import Trie - -def test_Trie(): - - strings = ["A", "to", "tea", "ted", "ten", "i", - "in", "inn", "Amfn", "snbr"] - trie = Trie() - for string in strings: - trie.insert(string) - - prefix_strings = ["te", "t", "Am", "snb"] - - for string in strings: - assert trie.is_inserted(string) - - for string in strings[::-1]: - assert trie.is_inserted(string) - - for string in prefix_strings: - assert trie.is_present(string) - assert not trie.is_inserted(string) - - assert sorted(trie.strings_with_prefix("t")) == ['tea', 'ted', 'ten', 'to'] - assert sorted(trie.strings_with_prefix("te")) == ["tea", "ted", "ten"] - assert trie.strings_with_prefix("i") == ["i", "in", "inn"] - assert trie.strings_with_prefix("a") == [] - - remove_order = ["to", "tea", "ted", "ten", "inn", "in", "A"] - - assert trie.delete("z") is None - - for string in remove_order: - trie.delete(string) - for present in strings: - if present == string: - assert not trie.is_inserted(present) - else: - assert trie.is_present(present) - assert trie.is_inserted(present) - strings.remove(string) - - prefix_strings_1 = ["dict", "dicts", "dicts_lists_tuples"] - trie_1 = Trie() - - for i in range(len(prefix_strings_1)): - trie_1.insert(prefix_strings_1[i]) - for j in range(i + 1): - assert trie_1.is_inserted(prefix_strings_1[j]) - assert trie_1.is_present(prefix_strings_1[j]) diff --git a/lib/python3.12/site-packages/pydatastructs/strings/trie.py b/lib/python3.12/site-packages/pydatastructs/strings/trie.py deleted file mode 100644 index cdf6666cf..000000000 --- a/lib/python3.12/site-packages/pydatastructs/strings/trie.py +++ /dev/null @@ -1,201 +0,0 @@ -from pydatastructs.utils.misc_util import ( - TrieNode, Backend, - raise_if_backend_is_not_python) -from collections import deque -import copy - -__all__ = [ - 'Trie' -] - -Stack = Queue = deque - -class Trie(object): - """ - Represents the trie data structure for storing strings. - - Parameters - ========== - - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import Trie - >>> trie = Trie() - >>> trie.insert("a") - >>> trie.insert("aa") - >>> trie.strings_with_prefix("a") - ['a', 'aa'] - >>> trie.is_present("aa") - True - >>> trie.delete("aa") - True - >>> trie.is_present("aa") - False - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Trie - """ - - __slots__ = ['root'] - - @classmethod - def methods(cls): - return ['__new__', 'insert', 'is_present', 'delete', - 'strings_with_prefix'] - - def __new__(cls, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.root = TrieNode() - return obj - - def insert(self, string: str) -> None: - """ - Inserts the given string into the trie. - - Parameters - ========== - - string: str - - Returns - ======= - - None - """ - walk = self.root - for char in string: - if walk.get_child(char) is None: - newNode = TrieNode(char) - walk.add_child(newNode) - walk = newNode - else: - walk = walk.get_child(char) - walk.is_terminal = True - - def is_present(self, string: str) -> bool: - """ - Checks if the given string is present as a prefix in the trie. - - Parameters - ========== - - string: str - - Returns - ======= - - True if the given string is present as a prefix; - False in all other cases. - """ - walk = self.root - for char in string: - if walk.get_child(char) is None: - return False - walk = walk.get_child(char) - return True - - def is_inserted(self, string: str) -> bool: - """ - Checks if the given string was inserted in the trie. - - Parameters - ========== - - string: str - - Returns - ======= - - True if the given string was inserted in trie; - False in all other cases. - """ - walk = self.root - for char in string: - if walk.get_child(char) is None: - return False - walk = walk.get_child(char) - return walk.is_terminal - - def delete(self, string: str) -> bool: - """ - Deletes the given string from the trie. - - Parameters - ========== - - string: str - - Returns - ======= - - True if successfully deleted; - None if the string is not present in the trie. - """ - path = [] - walk = self.root - size = len(string) - for i in range(size): - char = string[i] - path.append(walk) - if walk.get_child(char) is None: - return None - walk = walk.get_child(char) - path.append(walk) - i = len(path) - 1 - path[i].is_terminal = False - while not path[i]._children and i >= 1: - path[i-1].remove_child(path[i].char) - i -= 1 - if path[i].is_terminal: - return True - return True - - def strings_with_prefix(self, string: str) -> list: - """ - Generates a list of all strings with the given prefix. - - Parameters - ========== - - string: str - - Returns - ======= - - strings: list - The list of strings with the given prefix. - """ - - def _collect(prefix: str, node: TrieNode, strings: list) -> str: - TrieNode_stack = Stack() - TrieNode_stack.append((node, prefix)) - while TrieNode_stack: - walk, curr_prefix = TrieNode_stack.pop() - if walk.is_terminal: - strings.append(curr_prefix + walk.char) - for child in walk._children: - TrieNode_stack.append((walk.get_child(child), curr_prefix + walk.char)) - - strings = [] - prefix = "" - walk = self.root - for char in string: - walk = walk.get_child(char) - if walk is None: - return strings - prefix += char - if walk.is_terminal: - strings.append(walk.char) - for child in walk._children: - _collect(prefix, walk.get_child(child), strings) - return strings diff --git a/lib/python3.12/site-packages/pydatastructs/trees/__init__.py b/lib/python3.12/site-packages/pydatastructs/trees/__init__.py deleted file mode 100644 index 892730122..000000000 --- a/lib/python3.12/site-packages/pydatastructs/trees/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -__all__ = [] - -from . import ( - binary_trees, - m_ary_trees, - space_partitioning_trees, - heaps, -) - -from .binary_trees import ( - BinaryTree, - BinarySearchTree, - BinaryTreeTraversal, - AVLTree, - BinaryIndexedTree, - CartesianTree, - Treap, - SplayTree, - RedBlackTree -) -__all__.extend(binary_trees.__all__) - -from .m_ary_trees import ( - MAryTreeNode, MAryTree -) - -__all__.extend(m_ary_trees.__all__) - -from .space_partitioning_trees import ( - OneDimensionalSegmentTree -) -__all__.extend(space_partitioning_trees.__all__) - -from .heaps import ( - BinaryHeap, - TernaryHeap, - DHeap, - BinomialHeap -) -__all__.extend(heaps.__all__) diff --git a/lib/python3.12/site-packages/pydatastructs/trees/_backend/__init__.py b/lib/python3.12/site-packages/pydatastructs/trees/_backend/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/lib/python3.12/site-packages/pydatastructs/trees/binary_trees.py b/lib/python3.12/site-packages/pydatastructs/trees/binary_trees.py deleted file mode 100644 index 48446d1d4..000000000 --- a/lib/python3.12/site-packages/pydatastructs/trees/binary_trees.py +++ /dev/null @@ -1,1888 +0,0 @@ -import random -from collections import deque as Queue -from pydatastructs.utils import TreeNode, CartesianTreeNode, RedBlackTreeNode -from pydatastructs.miscellaneous_data_structures import Stack -from pydatastructs.linear_data_structures import OneDimensionalArray -from pydatastructs.linear_data_structures.arrays import ArrayForTrees -from pydatastructs.utils.misc_util import Backend -from pydatastructs.trees._backend.cpp import _trees - -__all__ = [ - 'AVLTree', - 'BinaryTree', - 'BinarySearchTree', - 'BinaryTreeTraversal', - 'BinaryIndexedTree', - 'CartesianTree', - 'Treap', - 'SplayTree', - 'RedBlackTree' -] - -class BinaryTree(object): - """ - Abstract binary tree. - - Parameters - ========== - - key - Required if tree is to be instantiated with - root otherwise not needed. - root_data - Optional, the root node of the binary tree. - If not of type TreeNode, it will consider - root as data and a new root node will - be created. - comp: lambda/function - Optional, A lambda function which will be used - for comparison of keys. Should return a - bool value. By default it implements less - than operator. - is_order_statistic: bool - Set it to True, if you want to use the - order statistic features of the tree. - backend: pydatastructs.Backend - The backend to be used. Available backends: Python and C++ - Optional, by default, the Python backend is used. For faster execution, use the C++ backend. - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Binary_tree - """ - - __slots__ = ['root_idx', 'comparator', 'tree', 'size', - 'is_order_statistic'] - - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - if comp is None: - comp = lambda key1, key2: key1 < key2 - return _trees.BinaryTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp - obj = object.__new__(cls) - if key is None and root_data is not None: - raise ValueError('Key required.') - key = None if root_data is None else key - root = TreeNode(key, root_data) - root.is_root = True - obj.root_idx = 0 - obj.tree, obj.size = ArrayForTrees(TreeNode, [root]), 1 - obj.comparator = lambda key1, key2: key1 < key2 \ - if comp is None else comp - obj.is_order_statistic = is_order_statistic - return obj - - @classmethod - def methods(cls): - return ['__new__', '__str__'] - - def insert(self, key, data=None): - """ - Inserts data by the passed key using iterative - algorithm. - - Parameters - ========== - - key - The key for comparison. - - data - The data to be inserted. - - Returns - ======= - - None - """ - raise NotImplementedError("This is an abstract method.") - - def delete(self, key, **kwargs): - """ - Deletes the data with the passed key - using iterative algorithm. - - Parameters - ========== - - key - The key of the node which is - to be deleted. - balancing_info: bool - Optional, by default, False - The information needed for updating - the tree is returned if this parameter - is set to True. It is not meant for - user facing APIs. - - Returns - ======= - - True - If the node is deleted successfully. - None - If the node to be deleted doesn't exists. - - Note - ==== - - The node is deleted means that the connection to that - node are removed but the it is still in three. This - is being done to keep the complexity of deletion, O(logn). - """ - raise NotImplementedError("This is an abstract method.") - - def search(self, key, **kwargs): - """ - Searches for the data in the binary search tree - using iterative algorithm. - - Parameters - ========== - - key - The key for searching. - parent: bool - If true then returns index of the - parent of the node with the passed - key. - By default, False - - Returns - ======= - - int - If the node with the passed key is - in the tree. - tuple - The index of the searched node and - the index of the parent of that node. - None - In all other cases. - """ - raise NotImplementedError("This is an abstract method.") - - - def __str__(self): - to_be_printed = ['' for i in range(self.tree._last_pos_filled + 1)] - for i in range(self.tree._last_pos_filled + 1): - if self.tree[i] is not None: - node = self.tree[i] - to_be_printed[i] = (node.left, node.key, node.data, node.right) - return str(to_be_printed) - -class BinarySearchTree(BinaryTree): - """ - Represents binary search trees. - - Examples - ======== - - >>> from pydatastructs.trees import BinarySearchTree as BST - >>> b = BST() - >>> b.insert(1, 1) - >>> b.insert(2, 2) - >>> child = b.tree[b.root_idx].right - >>> b.tree[child].data - 2 - >>> b.search(1) - 0 - >>> b.search(-1) is None - True - >>> b.delete(1) is True - True - >>> b.search(1) is None - True - >>> b.delete(2) is True - True - >>> b.search(2) is None - True - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Binary_search_tree - - See Also - ======== - - pydatastructs.trees.binary_tree.BinaryTree - """ - - @classmethod - def methods(cls): - return ['insert', 'search', 'delete', 'select', - 'rank', 'lowest_common_ancestor'] - - left_size = lambda self, node: self.tree[node.left].size \ - if node.left is not None else 0 - right_size = lambda self, node: self.tree[node.right].size \ - if node.right is not None else 0 - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - if comp is None: - comp = lambda key1, key2: key1 < key2 - return _trees.BinarySearchTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp - return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) - - def _update_size(self, start_idx): - if self.is_order_statistic: - walk = start_idx - while walk is not None: - self.tree[walk].size = ( - self.left_size(self.tree[walk]) + - self.right_size(self.tree[walk]) + 1) - walk = self.tree[walk].parent - - def insert(self, key, data=None): - res = self.search(key) - if res is not None: - self.tree[res].data = data - return None - walk = self.root_idx - if self.tree[walk].key is None: - self.tree[walk].key = key - self.tree[walk].data = data - return None - new_node, prev_node, flag = TreeNode(key, data), self.root_idx, True - while flag: - if not self.comparator(key, self.tree[walk].key): - if self.tree[walk].right is None: - new_node.parent = prev_node - self.tree.append(new_node) - self.tree[walk].right = self.size - self.size += 1 - flag = False - prev_node = walk = self.tree[walk].right - else: - if self.tree[walk].left is None: - new_node.parent = prev_node - self.tree.append(new_node) - self.tree[walk].left = self.size - self.size += 1 - flag = False - prev_node = walk = self.tree[walk].left - self._update_size(walk) - - def search(self, key, **kwargs): - ret_parent = kwargs.get('parent', False) - parent = None - walk = self.root_idx - if self.tree[walk].key is None: - return None - while walk is not None: - if self.tree[walk].key == key: - break - parent = walk - if self.comparator(key, self.tree[walk].key): - walk = self.tree[walk].left - else: - walk = self.tree[walk].right - return (walk, parent) if ret_parent else walk - - def _bound_helper(self, node_idx, bound_key, is_upper=False): - if node_idx is None: - return None - if self.tree[node_idx].key is None: - return None - - if self.tree[node_idx].key == bound_key: - if not is_upper: - return self.tree[node_idx].key - else: - return self._bound_helper(self.tree[node_idx].right, - bound_key, is_upper) - - if self.comparator(self.tree[node_idx].key, bound_key): - return self._bound_helper(self.tree[node_idx].right, - bound_key, is_upper) - else: - res_bound = self._bound_helper(self.tree[node_idx].left, - bound_key, is_upper) - return res_bound if res_bound is not None else self.tree[node_idx].key - - - def lower_bound(self, key, **kwargs): - """ - Finds the lower bound of the given key in the tree - - Parameters - ========== - - key - The key for comparison - - Examples - ======== - - >>> from pydatastructs.trees import BinarySearchTree as BST - >>> b = BST() - >>> b.insert(10, 10) - >>> b.insert(18, 18) - >>> b.insert(7, 7) - >>> b.lower_bound(9) - 10 - >>> b.lower_bound(7) - 7 - >>> b.lower_bound(20) is None - True - - Returns - ======= - - value - The lower bound of the given key. - Returns None if the value doesn't exist - """ - return self._bound_helper(self.root_idx, key) - - - def upper_bound(self, key, **kwargs): - """ - Finds the upper bound of the given key in the tree - - Parameters - ========== - - key - The key for comparison - - Examples - ======== - - >>> from pydatastructs.trees import BinarySearchTree as BST - >>> b = BST() - >>> b.insert(10, 10) - >>> b.insert(18, 18) - >>> b.insert(7, 7) - >>> b.upper_bound(9) - 10 - >>> b.upper_bound(7) - 10 - >>> b.upper_bound(20) is None - True - - Returns - ======= - - value - The upper bound of the given key. - Returns None if the value doesn't exist - """ - return self._bound_helper(self.root_idx, key, True) - - - def delete(self, key, **kwargs): - (walk, parent) = self.search(key, parent=True) - a = None - if walk is None: - return None - if self.tree[walk].left is None and \ - self.tree[walk].right is None: - if parent is None: - self.tree[self.root_idx].data = None - self.tree[self.root_idx].key = None - else: - if self.tree[parent].left == walk: - self.tree[parent].left = None - else: - self.tree[parent].right = None - a = parent - par_key, root_key = (self.tree[parent].key, - self.tree[self.root_idx].key) - new_indices = self.tree.delete(walk) - if new_indices is not None: - a = new_indices[par_key] - self.root_idx = new_indices[root_key] - self._update_size(a) - - elif self.tree[walk].left is not None and \ - self.tree[walk].right is not None: - twalk = self.tree[walk].right - par = walk - flag = False - while self.tree[twalk].left is not None: - flag = True - par = twalk - twalk = self.tree[twalk].left - self.tree[walk].data = self.tree[twalk].data - self.tree[walk].key = self.tree[twalk].key - if flag: - self.tree[par].left = self.tree[twalk].right - else: - self.tree[par].right = self.tree[twalk].right - if self.tree[twalk].right is not None: - self.tree[self.tree[twalk].right].parent = par - if twalk is not None: - a = par - par_key, root_key = (self.tree[par].key, - self.tree[self.root_idx].key) - new_indices = self.tree.delete(twalk) - if new_indices is not None: - a = new_indices[par_key] - self.root_idx = new_indices[root_key] - self._update_size(a) - - else: - if self.tree[walk].left is not None: - child = self.tree[walk].left - else: - child = self.tree[walk].right - if parent is None: - self.tree[self.root_idx].left = self.tree[child].left - self.tree[self.root_idx].right = self.tree[child].right - self.tree[self.root_idx].data = self.tree[child].data - self.tree[self.root_idx].key = self.tree[child].key - self.tree[self.root_idx].parent = None - root_key = self.tree[self.root_idx].key - new_indices = self.tree.delete(child) - if new_indices is not None: - self.root_idx = new_indices[root_key] - else: - if self.tree[parent].left == walk: - self.tree[parent].left = child - else: - self.tree[parent].right = child - self.tree[child].parent = parent - a = parent - par_key, root_key = (self.tree[parent].key, - self.tree[self.root_idx].key) - new_indices = self.tree.delete(walk) - if new_indices is not None: - parent = new_indices[par_key] - self.tree[child].parent = new_indices[par_key] - a = new_indices[par_key] - self.root_idx = new_indices[root_key] - self._update_size(a) - - if kwargs.get("balancing_info", False) is not False: - return a - return True - - def select(self, i): - """ - Finds the i-th smallest node in the tree. - - Parameters - ========== - - i: int - A positive integer - - Returns - ======= - - n: TreeNode - The node with the i-th smallest key - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Order_statistic_tree - """ - i -= 1 # The algorithm is based on zero indexing - if i < 0: - raise ValueError("Expected a positive integer, got %d"%(i + 1)) - if i >= self.tree._num: - raise ValueError("%d is greater than the size of the " - "tree which is, %d"%(i + 1, self.tree._num)) - walk = self.root_idx - while walk is not None: - l = self.left_size(self.tree[walk]) - if i == l: - return self.tree[walk] - left_walk = self.tree[walk].left - right_walk = self.tree[walk].right - if left_walk is None and right_walk is None: - raise IndexError("The traversal is terminated " - "due to no child nodes ahead.") - if i < l: - if left_walk is not None and \ - self.comparator(self.tree[left_walk].key, - self.tree[walk].key): - walk = left_walk - else: - walk = right_walk - else: - if right_walk is not None and \ - not self.comparator(self.tree[right_walk].key, - self.tree[walk].key): - walk = right_walk - else: - walk = left_walk - i -= (l + 1) - - def rank(self, x): - """ - Finds the rank of the given node, i.e. - its index in the sorted list of nodes - of the tree. - - Parameters - ========== - - x: key - The key of the node whose rank is to be found out. - """ - walk = self.search(x) - if walk is None: - return None - r = self.left_size(self.tree[walk]) + 1 - while self.tree[walk].key != self.tree[self.root_idx].key: - p = self.tree[walk].parent - if walk == self.tree[p].right: - r += self.left_size(self.tree[p]) + 1 - walk = p - return r - - def _simple_path(self, key, root): - """ - Utility funtion to find the simple path between root and node. - - Parameters - ========== - - key: Node.key - Key of the node to be searched - - Returns - ======= - - path: list - """ - - stack = Stack() - stack.push(root) - path = [] - node_idx = -1 - - while not stack.is_empty: - node = stack.pop() - if self.tree[node].key == key: - node_idx = node - break - if self.tree[node].left: - stack.push(self.tree[node].left) - if self.tree[node].right: - stack.push(self.tree[node].right) - - if node_idx == -1: - return path - - while node_idx != 0: - path.append(node_idx) - node_idx = self.tree[node_idx].parent - path.append(0) - path.reverse() - - return path - - def _lca_1(self, j, k): - root = self.root_idx - path1 = self._simple_path(j, root) - path2 = self._simple_path(k, root) - if not path1 or not path2: - raise ValueError("One of two path doesn't exists. See %s, %s" - %(path1, path2)) - - n, m = len(path1), len(path2) - i = j = 0 - while i < n and j < m: - if path1[i] != path2[j]: - return self.tree[path1[i - 1]].key - i += 1 - j += 1 - if path1 < path2: - return self.tree[path1[-1]].key - return self.tree[path2[-1]].key - - def _lca_2(self, j, k): - curr_root = self.root_idx - u, v = self.search(j), self.search(k) - if (u is None) or (v is None): - raise ValueError("One of the nodes with key %s " - "or %s doesn't exits"%(j, k)) - u_left = self.comparator(self.tree[u].key, \ - self.tree[curr_root].key) - v_left = self.comparator(self.tree[v].key, \ - self.tree[curr_root].key) - - while not (u_left ^ v_left): - if u_left and v_left: - curr_root = self.tree[curr_root].left - else: - curr_root = self.tree[curr_root].right - - if curr_root == u or curr_root == v: - if curr_root is None: - return None - return self.tree[curr_root].key - u_left = self.comparator(self.tree[u].key, \ - self.tree[curr_root].key) - v_left = self.comparator(self.tree[v].key, \ - self.tree[curr_root].key) - - if curr_root is None: - return curr_root - return self.tree[curr_root].key - - def lowest_common_ancestor(self, j, k, algorithm=1): - - """ - Computes the lowest common ancestor of two nodes. - - Parameters - ========== - - j: Node.key - Key of first node - - k: Node.key - Key of second node - - algorithm: int - The algorithm to be used for computing the - lowest common ancestor. - Optional, by default uses algorithm 1. - - 1 -> Determines the lowest common ancestor by finding - the first intersection of the paths from v and w - to the root. - - 2 -> Modifed version of the algorithm given in the - following publication, - D. Harel. A linear time algorithm for the - lowest common ancestors problem. In 21s - Annual Symposium On Foundations of - Computer Science, pages 308-319, 1980. - - Returns - ======= - - Node.key - The key of the lowest common ancestor in the tree. - if both the nodes are present in the tree. - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Lowest_common_ancestor - - .. [2] https://pdfs.semanticscholar.org/e75b/386cc554214aa0ebd6bd6dbdd0e490da3739.pdf - - """ - return getattr(self, "_lca_"+str(algorithm))(j, k) - -class SelfBalancingBinaryTree(BinarySearchTree): - """ - Represents Base class for all rotation based balancing trees like AVL tree, Red Black tree, Splay Tree. - """ - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - if comp is None: - comp = lambda key1, key2: key1 < key2 - return _trees.SelfBalancingBinaryTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp - return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) - - def _right_rotate(self, j, k): - y = self.tree[k].right - if y is not None: - self.tree[y].parent = j - self.tree[j].left = y - self.tree[k].parent = self.tree[j].parent - if self.tree[k].parent is not None: - if self.tree[self.tree[k].parent].left == j: - self.tree[self.tree[k].parent].left = k - else: - self.tree[self.tree[k].parent].right = k - self.tree[j].parent = k - self.tree[k].right = j - kp = self.tree[k].parent - if kp is None: - self.root_idx = k - - def _left_right_rotate(self, j, k): - i = self.tree[k].right - v, w = self.tree[i].left, self.tree[i].right - self.tree[k].right, self.tree[j].left = v, w - if v is not None: - self.tree[v].parent = k - if w is not None: - self.tree[w].parent = j - self.tree[i].left, self.tree[i].right, self.tree[i].parent = \ - k, j, self.tree[j].parent - self.tree[k].parent, self.tree[j].parent = i, i - ip = self.tree[i].parent - if ip is not None: - if self.tree[ip].left == j: - self.tree[ip].left = i - else: - self.tree[ip].right = i - else: - self.root_idx = i - - def _right_left_rotate(self, j, k): - i = self.tree[k].left - v, w = self.tree[i].left, self.tree[i].right - self.tree[k].left, self.tree[j].right = w, v - if v is not None: - self.tree[v].parent = j - if w is not None: - self.tree[w].parent = k - self.tree[i].right, self.tree[i].left, self.tree[i].parent = \ - k, j, self.tree[j].parent - self.tree[k].parent, self.tree[j].parent = i, i - ip = self.tree[i].parent - if ip is not None: - if self.tree[ip].left == j: - self.tree[ip].left = i - else: - self.tree[ip].right = i - else: - self.root_idx = i - - def _left_rotate(self, j, k): - y = self.tree[k].left - if y is not None: - self.tree[y].parent = j - self.tree[j].right = y - self.tree[k].parent = self.tree[j].parent - if self.tree[k].parent is not None: - if self.tree[self.tree[k].parent].left == j: - self.tree[self.tree[k].parent].left = k - else: - self.tree[self.tree[k].parent].right = k - self.tree[j].parent = k - self.tree[k].left = j - kp = self.tree[k].parent - if kp is None: - self.root_idx = k - -class CartesianTree(SelfBalancingBinaryTree): - """ - Represents cartesian trees. - - Examples - ======== - - >>> from pydatastructs.trees import CartesianTree as CT - >>> c = CT() - >>> c.insert(1, 4, 1) - >>> c.insert(2, 3, 2) - >>> child = c.tree[c.root_idx].left - >>> c.tree[child].data - 1 - >>> c.search(1) - 0 - >>> c.search(-1) is None - True - >>> c.delete(1) is True - True - >>> c.search(1) is None - True - >>> c.delete(2) is True - True - >>> c.search(2) is None - True - - References - ========== - - .. [1] https://www.cs.princeton.edu/courses/archive/spr09/cos423/Lectures/geo-st.pdf - - See Also - ======== - - pydatastructs.trees.binary_trees.SelfBalancingBinaryTree - """ - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - if comp is None: - comp = lambda key1, key2: key1 < key2 - return _trees.CartesianTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp - return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) - - @classmethod - def methods(cls): - return ['__new__', '__str__', 'insert', 'delete'] - - def _bubble_up(self, node_idx): - node = self.tree[node_idx] - parent_idx = self.tree[node_idx].parent - parent = self.tree[parent_idx] - while (node.parent is not None) and (parent.priority > node.priority): - if parent.right == node_idx: - self._left_rotate(parent_idx, node_idx) - else: - self._right_rotate(parent_idx, node_idx) - node = self.tree[node_idx] - parent_idx = self.tree[node_idx].parent - if parent_idx is not None: - parent = self.tree[parent_idx] - if node.parent is None: - self.tree[node_idx].is_root = True - - def _trickle_down(self, node_idx): - node = self.tree[node_idx] - while node.left is not None or node.right is not None: - if node.left is None: - self._left_rotate(node_idx, self.tree[node_idx].right) - elif node.right is None: - self._right_rotate(node_idx, self.tree[node_idx].left) - elif self.tree[node.left].priority < self.tree[node.right].priority: - self._right_rotate(node_idx, self.tree[node_idx].left) - else: - self._left_rotate(node_idx, self.tree[node_idx].right) - node = self.tree[node_idx] - - def insert(self, key, priority, data=None): - super(CartesianTree, self).insert(key, data) - node_idx = super(CartesianTree, self).search(key) - node = self.tree[node_idx] - new_node = CartesianTreeNode(key, priority, data) - new_node.parent = node.parent - new_node.left = node.left - new_node.right = node.right - self.tree[node_idx] = new_node - if node.is_root: - self.tree[node_idx].is_root = True - else: - self._bubble_up(node_idx) - - def delete(self, key, **kwargs): - balancing_info = kwargs.get('balancing_info', False) - node_idx = super(CartesianTree, self).search(key) - if node_idx is not None: - self._trickle_down(node_idx) - return super(CartesianTree, self).delete(key, balancing_info = balancing_info) - - def __str__(self): - to_be_printed = ['' for i in range(self.tree._last_pos_filled + 1)] - for i in range(self.tree._last_pos_filled + 1): - if self.tree[i] is not None: - node = self.tree[i] - to_be_printed[i] = (node.left, node.key, node.priority, node.data, node.right) - return str(to_be_printed) - -class Treap(CartesianTree): - """ - Represents treaps. - - Examples - ======== - - >>> from pydatastructs.trees import Treap as T - >>> t = T() - >>> t.insert(1, 1) - >>> t.insert(2, 2) - >>> t.search(1) - 0 - >>> t.search(-1) is None - True - >>> t.delete(1) is True - True - >>> t.search(1) is None - True - >>> t.delete(2) is True - True - >>> t.search(2) is None - True - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Treap - - """ - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - if comp is None: - comp = lambda key1, key2: key1 < key2 - return _trees.Treap(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp - return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) - - @classmethod - def methods(cls): - return ['__new__', 'insert'] - - def insert(self, key, data=None): - priority = random.random() - super(Treap, self).insert(key, priority, data) - -class AVLTree(SelfBalancingBinaryTree): - """ - Represents AVL trees. - - References - ========== - - .. [1] https://courses.cs.washington.edu/courses/cse373/06sp/handouts/lecture12.pdf - .. [2] https://en.wikipedia.org/wiki/AVL_tree - .. [3] http://faculty.cs.niu.edu/~freedman/340/340notes/340avl2.htm - - See Also - ======== - - pydatastructs.trees.binary_trees.BinaryTree - """ - - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - if comp is None: - comp = lambda key1, key2: key1 < key2 - return _trees.AVLTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp - return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) - - @classmethod - def methods(cls): - return ['__new__', 'set_tree', 'insert', 'delete'] - - left_height = lambda self, node: self.tree[node.left].height \ - if node.left is not None else -1 - right_height = lambda self, node: self.tree[node.right].height \ - if node.right is not None else -1 - balance_factor = lambda self, node: self.right_height(node) - \ - self.left_height(node) - - def set_tree(self, arr): - self.tree = arr - - def _right_rotate(self, j, k): - super(AVLTree, self)._right_rotate(j, k) - self.tree[j].height = max(self.left_height(self.tree[j]), - self.right_height(self.tree[j])) + 1 - if self.is_order_statistic: - self.tree[j].size = (self.left_size(self.tree[j]) + - self.right_size(self.tree[j]) + 1) - - def _left_right_rotate(self, j, k): - super(AVLTree, self)._left_right_rotate(j, k) - self.tree[j].height = max(self.left_height(self.tree[j]), - self.right_height(self.tree[j])) + 1 - self.tree[k].height = max(self.left_height(self.tree[k]), - self.right_height(self.tree[k])) + 1 - if self.is_order_statistic: - self.tree[j].size = (self.left_size(self.tree[j]) + - self.right_size(self.tree[j]) + 1) - self.tree[k].size = (self.left_size(self.tree[k]) + - self.right_size(self.tree[k]) + 1) - - def _right_left_rotate(self, j, k): - super(AVLTree, self)._right_left_rotate(j, k) - self.tree[j].height = max(self.left_height(self.tree[j]), - self.right_height(self.tree[j])) + 1 - self.tree[k].height = max(self.left_height(self.tree[k]), - self.right_height(self.tree[k])) + 1 - if self.is_order_statistic: - self.tree[j].size = (self.left_size(self.tree[j]) + - self.right_size(self.tree[j]) + 1) - self.tree[k].size = (self.left_size(self.tree[k]) + - self.right_size(self.tree[k]) + 1) - - def _left_rotate(self, j, k): - super(AVLTree, self)._left_rotate(j, k) - self.tree[j].height = max(self.left_height(self.tree[j]), - self.right_height(self.tree[j])) + 1 - self.tree[k].height = max(self.left_height(self.tree[k]), - self.right_height(self.tree[k])) + 1 - if self.is_order_statistic: - self.tree[j].size = (self.left_size(self.tree[j]) + - self.right_size(self.tree[j]) + 1) - - def _balance_insertion(self, curr, last): - walk = last - path = Queue() - path.append(curr), path.append(last) - while walk is not None: - self.tree[walk].height = max(self.left_height(self.tree[walk]), - self.right_height(self.tree[walk])) + 1 - if self.is_order_statistic: - self.tree[walk].size = (self.left_size(self.tree[walk]) + - self.right_size(self.tree[walk]) + 1) - last = path.popleft() - last2last = path.popleft() - if self.balance_factor(self.tree[walk]) not in (1, 0, -1): - l = self.tree[walk].left - if l is not None and l == last and self.tree[l].left == last2last: - self._right_rotate(walk, last) - r = self.tree[walk].right - if r is not None and r == last and self.tree[r].right == last2last: - self._left_rotate(walk, last) - if l is not None and l == last and self.tree[l].right == last2last: - self._left_right_rotate(walk, last) - if r is not None and r == last and self.tree[r].left == last2last: - self._right_left_rotate(walk, last) - path.append(walk), path.append(last) - walk = self.tree[walk].parent - - def insert(self, key, data=None): - super(AVLTree, self).insert(key, data) - self._balance_insertion(self.size - 1, self.tree[self.size-1].parent) - - def _balance_deletion(self, start_idx, key): - walk = start_idx - while walk is not None: - self.tree[walk].height = max(self.left_height(self.tree[walk]), - self.right_height(self.tree[walk])) + 1 - if self.is_order_statistic: - self.tree[walk].size = (self.left_size(self.tree[walk]) + - self.right_size(self.tree[walk]) + 1) - if self.balance_factor(self.tree[walk]) not in (1, 0, -1): - if self.balance_factor(self.tree[walk]) < 0: - b = self.tree[walk].left - if self.balance_factor(self.tree[b]) <= 0: - self._right_rotate(walk, b) - else: - self._left_right_rotate(walk, b) - else: - b = self.tree[walk].right - if self.balance_factor(self.tree[b]) >= 0: - self._left_rotate(walk, b) - else: - self._right_left_rotate(walk, b) - walk = self.tree[walk].parent - - - def delete(self, key, **kwargs): - a = super(AVLTree, self).delete(key, balancing_info=True) - self._balance_deletion(a, key) - return True - -class SplayTree(SelfBalancingBinaryTree): - """ - Represents Splay Trees. - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Splay_tree - - """ - - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - if comp is None: - comp = lambda key1, key2: key1 < key2 - return _trees.SplayTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp - return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) - - @classmethod - def methods(cls): - return ['__new__', 'insert', 'delete', 'join', 'split'] - - def _zig(self, x, p): - if self.tree[p].left == x: - super(SplayTree, self)._right_rotate(p, x) - else: - super(SplayTree, self)._left_rotate(p, x) - - def _zig_zig(self, x, p): - super(SplayTree, self)._right_rotate(self.tree[p].parent, p) - super(SplayTree, self)._right_rotate(p, x) - - def _zig_zag(self, p): - super(SplayTree, self)._left_right_rotate(self.tree[p].parent, p) - - def _zag_zag(self, x, p): - super(SplayTree, self)._left_rotate(self.tree[p].parent, p) - super(SplayTree, self)._left_rotate(p, x) - - def _zag_zig(self, p): - super(SplayTree, self)._right_left_rotate(self.tree[p].parent, p) - - def splay(self, x, p): - while self.tree[x].parent is not None: - if self.tree[p].parent is None: - self._zig(x, p) - elif self.tree[p].left == x and \ - self.tree[self.tree[p].parent].left == p: - self._zig_zig(x, p) - elif self.tree[p].right == x and \ - self.tree[self.tree[p].parent].right == p: - self._zag_zag(x, p) - elif self.tree[p].left == x and \ - self.tree[self.tree[p].parent].right == p: - self._zag_zig(p) - else: - self._zig_zag(p) - p = self.tree[x].parent - - def insert(self, key, x): - super(SelfBalancingBinaryTree, self).insert(key, x) - e, p = super(SelfBalancingBinaryTree, self).search(key, parent=True) - self.tree[self.size-1].parent = p - self.splay(e, p) - - def delete(self, x): - e, p = super(SelfBalancingBinaryTree, self).search(x, parent=True) - if e is None: - return - self.splay(e, p) - status = super(SelfBalancingBinaryTree, self).delete(x) - return status - - def join(self, other): - """ - Joins two trees current and other such that all elements of - the current splay tree are smaller than the elements of the other tree. - - Parameters - ========== - - other: SplayTree - SplayTree which needs to be joined with the self tree. - - """ - maxm = self.root_idx - while self.tree[maxm].right is not None: - maxm = self.tree[maxm].right - minm = other.root_idx - while other.tree[minm].left is not None: - minm = other.tree[minm].left - if not self.comparator(self.tree[maxm].key, - other.tree[minm].key): - raise ValueError("Elements of %s aren't less " - "than that of %s"%(self, other)) - self.splay(maxm, self.tree[maxm].parent) - idx_update = self.tree._size - for node in other.tree: - if node is not None: - node_copy = TreeNode(node.key, node.data) - if node.left is not None: - node_copy.left = node.left + idx_update - if node.right is not None: - node_copy.right = node.right + idx_update - self.tree.append(node_copy) - else: - self.tree.append(node) - self.tree[self.root_idx].right = \ - other.root_idx + idx_update - - def split(self, x): - """ - Splits current splay tree into two trees such that one tree contains nodes - with key less than or equal to x and the other tree containing - nodes with key greater than x. - - Parameters - ========== - - x: key - Key of the element on the basis of which split is performed. - - Returns - ======= - - other: SplayTree - SplayTree containing elements with key greater than x. - - """ - e, p = super(SelfBalancingBinaryTree, self).search(x, parent=True) - if e is None: - return - self.splay(e, p) - other = SplayTree(None, None) - if self.tree[self.root_idx].right is not None: - traverse = BinaryTreeTraversal(self) - elements = traverse.depth_first_search(order='pre_order', node=self.tree[self.root_idx].right) - for i in range(len(elements)): - super(SelfBalancingBinaryTree, other).insert(elements[i].key, elements[i].data) - for j in range(len(elements) - 1, -1, -1): - e, p = super(SelfBalancingBinaryTree, self).search(elements[j].key, parent=True) - self.tree[e] = None - self.tree[self.root_idx].right = None - return other - -class RedBlackTree(SelfBalancingBinaryTree): - """ - Represents Red Black trees. - - Examples - ======== - - >>> from pydatastructs.trees import RedBlackTree as RB - >>> b = RB() - >>> b.insert(1, 1) - >>> b.insert(2, 2) - >>> child = b.tree[b.root_idx].right - >>> b.tree[child].data - 2 - >>> b.search(1) - 0 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Red%E2%80%93black_tree - - See Also - ======== - - pydatastructs.trees.binary_trees.SelfBalancingBinaryTree - """ - - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - if comp is None: - comp = lambda key1, key2: key1 < key2 - return _trees.RedBlackTree(key, root_data, comp, is_order_statistic, **kwargs) # If any argument is not given, then it is passed as None, except for comp - return super().__new__(cls, key, root_data, comp, is_order_statistic, **kwargs) - - @classmethod - def methods(cls): - return ['__new__', 'insert', 'delete'] - - def _get_parent(self, node_idx): - return self.tree[node_idx].parent - - def _get_grand_parent(self, node_idx): - parent_idx=self._get_parent(node_idx) - return self.tree[parent_idx].parent - - def _get_sibling(self, node_idx): - parent_idx=self._get_parent(node_idx) - if parent_idx is None: - return None - node = self.tree[parent_idx] - if node_idx==node.left: - sibling_idx=node.right - return sibling_idx - else: - sibling_idx=node.left - return sibling_idx - - def _get_uncle(self, node_idx): - parent_idx=self._get_parent(node_idx) - return self._get_sibling(parent_idx) - - def _is_onleft(self, node_idx): - parent = self._get_parent(node_idx) - if self.tree[parent].left == node_idx: - return True - return False - - def _is_onright(self, node_idx): - if self._is_onleft(node_idx) is False: - return True - return False - - def __fix_insert(self, node_idx): - while self._get_parent(node_idx) is not None and \ - self.tree[self._get_parent(node_idx)].color == 1 and self.tree[node_idx].color==1: - parent_idx=self._get_parent(node_idx) - grand_parent_idx=self._get_grand_parent(node_idx) - uncle_idx = self._get_uncle(node_idx) - if uncle_idx is not None and self.tree[uncle_idx].color == 1: - self.tree[uncle_idx].color = 0 - self.tree[parent_idx].color = 0 - self.tree[grand_parent_idx].color = 1 - node_idx= grand_parent_idx - else: - self.tree[self.root_idx].is_root=False - if self._is_onright(parent_idx): - if self._is_onleft(node_idx): - self._right_rotate(parent_idx, node_idx) - node_idx=parent_idx - parent_idx=self._get_parent(node_idx) - node_idx=parent_idx - parent_idx=self._get_parent(node_idx) - self._left_rotate(parent_idx, node_idx) - elif self._is_onleft(parent_idx): - if self._is_onright(node_idx): - self._left_rotate(parent_idx, node_idx) - node_idx=parent_idx - parent_idx=self._get_parent(node_idx) - node_idx=parent_idx - parent_idx=self._get_parent(node_idx) - self._right_rotate(parent_idx, node_idx) - self.tree[node_idx].color = 0 - self.tree[parent_idx].color = 1 - self.tree[self.root_idx].is_root=True - if self.tree[node_idx].is_root: - break - self.tree[self.root_idx].color=0 - - def insert(self, key, data=None): - super(RedBlackTree, self).insert(key, data) - node_idx = super(RedBlackTree, self).search(key) - node = self.tree[node_idx] - new_node = RedBlackTreeNode(key, data) - new_node.parent = node.parent - new_node.left = node.left - new_node.right = node.right - self.tree[node_idx] = new_node - if node.is_root: - self.tree[node_idx].is_root = True - self.tree[node_idx].color=0 - elif self.tree[self.tree[node_idx].parent].color==1: - self.__fix_insert(node_idx) - - def _find_predecessor(self, node_idx): - while self.tree[node_idx].right is not None: - node_idx = self.tree[node_idx].right - return node_idx - - def _transplant_values(self, node_idx1, node_idx2): - parent = self.tree[node_idx1].parent - if self.tree[node_idx1].is_root and self._has_one_child(node_idx1): - self.tree[self.root_idx].key = self.tree[node_idx2].key - self.tree[self.root_idx].data = self.tree[node_idx2].data - self.tree[self.root_idx].left = self.tree[node_idx2].left - self.tree[self.root_idx].right = self.tree[node_idx2].right - self.tree[node_idx1].parent = None - return self.tree[self.root_idx].key - else: - self.tree[node_idx1].key = self.tree[node_idx2].key - self.tree[node_idx1].data = self.tree[node_idx2].data - - def _has_one_child(self, node_idx): - if self._is_leaf(node_idx) is False and self._has_two_child(node_idx) is False: - return True - return False - - def _is_leaf(self, node_idx): - if self.tree[node_idx].left is None and self.tree[node_idx].right is None: - return True - return False - - def _has_two_child(self, node_idx): - if self.tree[node_idx].left is not None and self.tree[node_idx].right is not None: - return True - return False - - def __has_red_child(self, node_idx): - left_idx = self.tree[node_idx].left - right_idx = self.tree[node_idx].right - if (left_idx is not None and self.tree[left_idx].color == 1) or \ - (right_idx is not None and self.tree[right_idx].color == 1): - return True - return False - - def _replace_node(self, node_idx): - if self._is_leaf(node_idx): - return None - elif self._has_one_child(node_idx): - if self.tree[node_idx].left is not None: - child = self.tree[node_idx].left - else: - child = self.tree[node_idx].right - return child - else: - return self._find_predecessor(self.tree[node_idx].left) - - def __walk1_walk_isblack(self, color, node_idx1): - if (node_idx1 is None or self.tree[node_idx1].color == 0) and (color == 0): - return True - return False - - def __left_left_siblingcase(self, node_idx): - left_idx = self.tree[node_idx].left - parent = self._get_parent(node_idx) - parent_color = self.tree[parent].color - self.tree[left_idx].color = self.tree[node_idx].color - self.tree[node_idx].color = parent_color - self._right_rotate(parent, node_idx) - - def __right_left_siblingcase(self, node_idx): - left_idx = self.tree[node_idx].left - parent = self._get_parent(node_idx) - parent_color = self.tree[parent].color - self.tree[left_idx].color = parent_color - self._right_rotate(node_idx, left_idx) - child = self._get_parent(node_idx) - self._left_rotate(parent, child) - - def __left_right_siblingcase(self, node_idx): - right_idx = self.tree[node_idx].right - parent = self._get_parent(node_idx) - parent_color = self.tree[parent].color - self.tree[right_idx].color = parent_color - self._left_rotate(node_idx, right_idx) - child = self._get_parent(node_idx) - self._right_rotate(parent, child) - - def __right_right_siblingcase(self, node_idx): - right_idx = self.tree[node_idx].right - parent = self._get_parent(node_idx) - parent_color = self.tree[parent].color - self.tree[right_idx].color = self.tree[node_idx].color - self.tree[node_idx].color = parent_color - self._left_rotate(parent, node_idx) - - def __fix_deletion(self, node_idx): - node = self.tree[node_idx] - color = node.color - while node_idx!= self.root_idx and color == 0: - sibling_idx = self._get_sibling(node_idx) - parent_idx = self._get_parent(node_idx) - if sibling_idx is None: - node_idx = parent_idx - continue - else: - if self.tree[sibling_idx].color == 1: - self.tree[self.root_idx].is_root = False - self.tree[parent_idx].color = 1 - self.tree[sibling_idx].color = 0 - if self._is_onleft(sibling_idx): - self._right_rotate(parent_idx, sibling_idx) - else: - self._left_rotate(parent_idx, sibling_idx) - self.tree[self.root_idx].is_root = True - continue - else: - if self.__has_red_child(sibling_idx): - self.tree[self.root_idx].is_root = False - left_idx = self.tree[sibling_idx].left - if self.tree[sibling_idx].left is not None and \ - self.tree[left_idx].color == 1: - if self._is_onleft(sibling_idx): - self.__left_left_siblingcase(sibling_idx) - else: - self.__right_left_siblingcase(sibling_idx) - else: - if self._is_onleft(sibling_idx): - self.__left_right_siblingcase(sibling_idx) - else: - self.__right_right_siblingcase(sibling_idx) - self.tree[self.root_idx].is_root = True - self.tree[parent_idx].color = 0 - else: - self.tree[sibling_idx].color = 1 - if self.tree[parent_idx].color == 0: - node_idx = parent_idx - continue - else: - self.tree[parent_idx].color = 0 - color = 1 - - def _remove_node(self, node_idx): - parent = self._get_parent(node_idx) - a = parent - if self._is_leaf(node_idx): - par_key, root_key = (self.tree[parent].key, self.tree[self.root_idx].key) - new_indices = self.tree.delete(node_idx) - if new_indices is not None: - a = new_indices[par_key] - self.root_idx = new_indices[root_key] - elif self._has_one_child(node_idx): - child = self._replace_node(node_idx) - parent = self._get_parent(node_idx) - par_key, root_key = (self.tree[parent].key, self.tree[self.root_idx].key) - new_indices = self.tree.delete(node_idx) - self._update_size(a) - - def _delete_root(self, node_idx, node_idx1): - if self._is_leaf(node_idx): - self.tree[self.root_idx].data = None - self.tree[self.root_idx].key = None - elif self._has_one_child(node_idx): - root_key = self._transplant_values(node_idx, node_idx1) - new_indices = self.tree.delete(node_idx1) - if new_indices is not None: - self.root_idx = new_indices[root_key] - - def __leaf_case(self, node_idx, node_idx1): - walk = node_idx - walk1 = node_idx1 - parent = self._get_parent(node_idx) - color = self.tree[walk].color - if parent is None: - self._delete_root(walk, walk1) - else: - if self.__walk1_walk_isblack(color, walk1): - self.__fix_deletion(walk) - else: - sibling_idx = self._get_sibling(walk) - if sibling_idx is not None: - self.tree[sibling_idx].color = 1 - if self._is_onleft(walk): - self.tree[parent].left = None - else: - self.tree[parent].right = None - self._remove_node(walk) - - def __one_child_case(self, node_idx, node_idx1): - walk = node_idx - walk1 = node_idx1 - walk_original_color = self.tree[walk].color - parent = self._get_parent(node_idx) - if parent is None: - self._delete_root(walk, walk1) - else: - if self._is_onleft(walk): - self.tree[parent].left = walk1 - else: - self.tree[parent].right = walk1 - self.tree[walk1].parent = parent - a = self._remove_node(walk) - if self.__walk1_walk_isblack(walk_original_color, walk1): - self.__fix_deletion(walk1) - else: - self.tree[walk1].color = 0 - - def __two_child_case(self, node_idx): - walk = node_idx - successor = self._replace_node(walk) - self._transplant_values(walk, successor) - walk = successor - walk1 = self._replace_node(walk) - return walk, walk1 - - def delete(self, key, **kwargs): - walk = super(RedBlackTree, self).search(key) - if walk is not None: - walk1 = self._replace_node(walk) - if self._has_two_child(walk): - walk, walk1 = self.__two_child_case(walk) - if self._is_leaf(walk): - self.__leaf_case(walk, walk1) - elif self._has_one_child(walk): - self.__one_child_case(walk, walk1) - return True - else: - return None - -class BinaryTreeTraversal(object): - """ - Represents the traversals possible in - a binary tree. - - Parameters - ========== - - tree: BinaryTree - The binary tree for whose traversal - is to be done. - backend: pydatastructs.Backend - The backend to be used. Available backends: Python and C++ - Optional, by default, the Python backend is used. For faster execution, use the C++ backend. - - Traversals - ========== - - - Depth First Search - In Order, Post Order, Pre Order Out Order - - - Breadth First Search - - Examples - ======== - - >>> from pydatastructs import BinarySearchTree as BST - >>> from pydatastructs import BinaryTreeTraversal as BTT - >>> b = BST(2, 2) - >>> b.insert(1, 1) - >>> b.insert(3, 3) - >>> trav = BTT(b) - >>> dfs = trav.depth_first_search() - >>> [str(n) for n in dfs] - ['(None, 1, 1, None)', '(1, 2, 2, 2)', '(None, 3, 3, None)'] - >>> bfs = trav.breadth_first_search() - >>> [str(n) for n in bfs] - ['(1, 2, 2, 2)', '(None, 1, 1, None)', '(None, 3, 3, None)'] - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Tree_traversal - """ - - @classmethod - def methods(cls): - return ['__new__', 'depth_first_search', - 'breadth_first_search'] - - __slots__ = ['tree'] - - def __new__(cls, tree, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - return _trees.BinaryTreeTraversal(tree, **kwargs) - if not isinstance(tree, BinaryTree): - raise TypeError("%s is not a binary tree"%(tree)) - obj = object.__new__(cls) - obj.tree = tree - return obj - - def _pre_order(self, node): - """ - Utility method for computing pre-order - of a binary tree using iterative algorithm. - """ - visit = [] - tree, size = self.tree.tree, self.tree.size - s = Stack() - s.push(node) - while not s.is_empty: - node = s.pop() - visit.append(tree[node]) - if tree[node].right is not None: - s.push(tree[node].right) - if tree[node].left is not None: - s.push(tree[node].left) - return visit - - def _in_order(self, node): - """ - Utility method for computing in-order - of a binary tree using iterative algorithm. - """ - visit = [] - tree, size = self.tree.tree, self.tree.size - s = Stack() - while not s.is_empty or node is not None: - if node is not None: - s.push(node) - node = tree[node].left - else: - node = s.pop() - visit.append(tree[node]) - node = tree[node].right - return visit - - def _post_order(self, node): - """ - Utility method for computing post-order - of a binary tree using iterative algorithm. - """ - visit = [] - tree, size = self.tree.tree, self.tree.size - s = Stack() - s.push(node) - last = OneDimensionalArray(int, size) - last.fill(False) - while not s.is_empty: - node = s.peek - l, r = tree[node].left, tree[node].right - cl, cr = l is None or last[l], r is None or last[r] - if cl and cr: - s.pop() - visit.append(tree[node]) - last[node] = True - continue - if not cr: - s.push(r) - if not cl: - s.push(l) - return visit - - def _out_order(self, node): - """ - Utility method for computing out-order - of a binary tree using iterative algorithm. - """ - return reversed(self._in_order(node)) - - def depth_first_search(self, order='in_order', node=None): - """ - Computes the depth first search traversal of the binary - trees. - - Parameters - ========== - - order : str - One of the strings, 'in_order', 'post_order', - 'pre_order', 'out_order'. - By default, it is set to, 'in_order'. - node : int - The index of the node from where the traversal - is to be instantiated. - - Returns - ======= - - list - Each element is of type 'TreeNode'. - """ - if node is None: - node = self.tree.root_idx - if order not in ('in_order', 'post_order', 'pre_order', 'out_order'): - raise NotImplementedError( - "%s order is not implemented yet." - "We only support `in_order`, `post_order`, " - "`pre_order` and `out_order` traversals.") - return getattr(self, '_' + order)(node) - - def breadth_first_search(self, node=None, strategy='queue'): - """ - Computes the breadth first search traversal of a binary tree. - - Parameters - ========== - - node : int - The index of the node from where the traversal has to be instantiated. - By default, set to, root index. - - strategy : str - The strategy using which the computation has to happen. - By default, it is set 'queue'. - - Returns - ======= - - list - Each element of the list is of type `TreeNode`. - """ - # TODO: IMPLEMENT ITERATIVE DEEPENING-DEPTH FIRST SEARCH STRATEGY - strategies = ('queue',) - if strategy not in strategies: - raise NotImplementedError( - "%s startegy is not implemented yet"%(strategy)) - if node is None: - node = self.tree.root_idx - q, visit, tree = Queue(), [], self.tree.tree - q.append(node) - while len(q) > 0: - node = q.popleft() - visit.append(tree[node]) - if tree[node].left is not None: - q.append(tree[node].left) - if tree[node].right is not None: - q.append(tree[node].right) - return visit - -class BinaryIndexedTree(object): - """ - Represents binary indexed trees - a.k.a fenwick trees. - - Parameters - ========== - - array: list/tuple - The array whose elements are to be - considered for the queries. - backend: pydatastructs.Backend - The backend to be used. Available backends: Python and C++ - Optional, by default, the Python backend is used. For faster execution, use the C++ backend. - - Examples - ======== - - >>> from pydatastructs import BinaryIndexedTree - >>> bit = BinaryIndexedTree([1, 2, 3]) - >>> bit.get_sum(0, 2) - 6 - >>> bit.update(0, 100) - >>> bit.get_sum(0, 2) - 105 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Fenwick_tree - """ - - __slots__ = ['tree', 'array', 'flag'] - - def __new__(cls, array, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - return _trees.BinaryIndexedTree(type(array[0]), array, **kwargs) - obj = object.__new__(cls) - obj.array = OneDimensionalArray(type(array[0]), array) - obj.tree = [0] * (obj.array._size + 2) - obj.flag = [0] * (obj.array._size) - for index in range(obj.array._size): - obj.update(index, array[index]) - return obj - - @classmethod - def methods(cls): - return ['update', 'get_prefix_sum', - 'get_sum'] - - def update(self, index, value): - """ - Updates value at the given index. - - Parameters - ========== - - index: int - Index of element to be updated. - - value - The value to be inserted. - """ - _index, _value = index, value - if self.flag[index] == 0: - self.flag[index] = 1 - index += 1 - while index < self.array._size + 1: - self.tree[index] += value - index = index + (index & (-index)) - else: - value = value - self.array[index] - index += 1 - while index < self.array._size + 1: - self.tree[index] += value - index = index + (index & (-index)) - self.array[_index] = _value - - def get_prefix_sum(self, index): - """ - Computes sum of elements from index 0 to given index. - - Parameters - ========== - - index: int - Index till which sum has to be calculated. - - Returns - ======= - - sum: int - The required sum. - """ - index += 1 - sum = 0 - while index > 0: - sum += self.tree[index] - index = index - (index & (-index)) - return sum - - def get_sum(self, left_index, right_index): - """ - Get sum of elements from left index to right index. - - Parameters - ========== - - left_index: int - Starting index from where sum has to be computed. - - right_index: int - Ending index till where sum has to be computed. - - Returns - ======= - - sum: int - The required sum - """ - if left_index >= 1: - return self.get_prefix_sum(right_index) - \ - self.get_prefix_sum(left_index - 1) - else: - return self.get_prefix_sum(right_index) diff --git a/lib/python3.12/site-packages/pydatastructs/trees/heaps.py b/lib/python3.12/site-packages/pydatastructs/trees/heaps.py deleted file mode 100644 index 12133a6f1..000000000 --- a/lib/python3.12/site-packages/pydatastructs/trees/heaps.py +++ /dev/null @@ -1,582 +0,0 @@ -from pydatastructs.utils.misc_util import ( - _check_type, TreeNode, BinomialTreeNode, - Backend, raise_if_backend_is_not_python) -from pydatastructs.linear_data_structures.arrays import ( - DynamicOneDimensionalArray, Array) -from pydatastructs.miscellaneous_data_structures.binomial_trees import BinomialTree - -__all__ = [ - 'BinaryHeap', - 'TernaryHeap', - 'DHeap', - 'BinomialHeap' -] - -class Heap(object): - """ - Abstract class for representing heaps. - """ - pass - - -class DHeap(Heap): - """ - Represents D-ary Heap. - - Parameters - ========== - - elements: list, tuple, Array - Optional, by default 'None'. - list/tuple/Array of initial TreeNode in Heap. - heap_property: str - If the key stored in each node is - either greater than or equal to - the keys in the node's children - then pass 'max'. - If the key stored in each node is - either less than or equal to - the keys in the node's children - then pass 'min'. - By default, the heap property is - set to 'min'. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs.trees.heaps import DHeap - >>> min_heap = DHeap(heap_property="min", d=3) - >>> min_heap.insert(1, 1) - >>> min_heap.insert(5, 5) - >>> min_heap.insert(7, 7) - >>> min_heap.extract().key - 1 - >>> min_heap.insert(4, 4) - >>> min_heap.extract().key - 4 - - >>> max_heap = DHeap(heap_property='max', d=2) - >>> max_heap.insert(1, 1) - >>> max_heap.insert(5, 5) - >>> max_heap.insert(7, 7) - >>> max_heap.extract().key - 7 - >>> max_heap.insert(6, 6) - >>> max_heap.extract().key - 6 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/D-ary_heap - """ - __slots__ = ['_comp', 'heap', 'd', 'heap_property', '_last_pos_filled'] - - def __new__(cls, elements=None, heap_property="min", d=4, - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = Heap.__new__(cls) - obj.heap_property = heap_property - obj.d = d - if heap_property == "min": - obj._comp = lambda key_parent, key_child: key_parent <= key_child - elif heap_property == "max": - obj._comp = lambda key_parent, key_child: key_parent >= key_child - else: - raise ValueError("%s is invalid heap property"%(heap_property)) - if elements is None: - elements = DynamicOneDimensionalArray(TreeNode, 0) - elif _check_type(elements, (list,tuple)): - elements = DynamicOneDimensionalArray(TreeNode, len(elements), elements) - elif _check_type(elements, Array): - elements = DynamicOneDimensionalArray(TreeNode, len(elements), elements._data) - else: - raise ValueError(f'Expected a list/tuple/Array of TreeNode got {type(elements)}') - obj.heap = elements - obj._last_pos_filled = obj.heap._last_pos_filled - obj._build() - return obj - - @classmethod - def methods(cls): - return ['__new__', 'insert', 'extract', '__str__', 'is_empty'] - - def _build(self): - for i in range(self._last_pos_filled + 1): - self.heap[i]._leftmost, self.heap[i]._rightmost = \ - self.d*i + 1, self.d*i + self.d - for i in range((self._last_pos_filled + 1)//self.d, -1, -1): - self._heapify(i) - - def _swap(self, idx1, idx2): - idx1_key, idx1_data = \ - self.heap[idx1].key, self.heap[idx1].data - self.heap[idx1].key, self.heap[idx1].data = \ - self.heap[idx2].key, self.heap[idx2].data - self.heap[idx2].key, self.heap[idx2].data = \ - idx1_key, idx1_data - - def _heapify(self, i): - while True: - target = i - l = self.d*i + 1 - r = self.d*i + self.d - - for j in range(l, r+1): - if j <= self._last_pos_filled: - target = j if self._comp(self.heap[j].key, self.heap[target].key) \ - else target - else: - break - - if target != i: - self._swap(target, i) - i = target - else: - break - - def insert(self, key, data=None): - """ - Insert a new element to the heap according to heap property. - - Parameters - ========== - - key - The key for comparison. - data - The data to be inserted. - - Returns - ======= - - None - """ - new_node = TreeNode(key, data) - self.heap.append(new_node) - self._last_pos_filled += 1 - i = self._last_pos_filled - self.heap[i]._leftmost, self.heap[i]._rightmost = self.d*i + 1, self.d*i + self.d - - while True: - parent = (i - 1)//self.d - if i == 0 or self._comp(self.heap[parent].key, self.heap[i].key): - break - else: - self._swap(i, parent) - i = parent - - def extract(self): - """ - Extract root element of the Heap. - - Returns - ======= - - root_element: TreeNode - The TreeNode at the root of the heap, - if the heap is not empty. - - None - If the heap is empty. - """ - if self._last_pos_filled == -1: - raise IndexError("Heap is empty.") - else: - element_to_be_extracted = TreeNode(self.heap[0].key, self.heap[0].data) - self._swap(0, self._last_pos_filled) - self.heap.delete(self._last_pos_filled) - self._last_pos_filled -= 1 - self._heapify(0) - return element_to_be_extracted - - def __str__(self): - to_be_printed = ['' for i in range(self._last_pos_filled + 1)] - for i in range(self._last_pos_filled + 1): - node = self.heap[i] - if node._leftmost <= self._last_pos_filled: - if node._rightmost <= self._last_pos_filled: - children = list(range(node._leftmost, node._rightmost + 1)) - else: - children = list(range(node._leftmost, self._last_pos_filled + 1)) - else: - children = [] - to_be_printed[i] = (node.key, node.data, children) - return str(to_be_printed) - - @property - def is_empty(self): - """ - Checks if the heap is empty. - """ - return self.heap._last_pos_filled == -1 - - -class BinaryHeap(DHeap): - """ - Represents Binary Heap. - - Parameters - ========== - - elements: list, tuple - Optional, by default 'None'. - List/tuple of initial elements in Heap. - heap_property: str - If the key stored in each node is - either greater than or equal to - the keys in the node's children - then pass 'max'. - If the key stored in each node is - either less than or equal to - the keys in the node's children - then pass 'min'. - By default, the heap property is - set to 'min'. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs.trees.heaps import BinaryHeap - >>> min_heap = BinaryHeap(heap_property="min") - >>> min_heap.insert(1, 1) - >>> min_heap.insert(5, 5) - >>> min_heap.insert(7, 7) - >>> min_heap.extract().key - 1 - >>> min_heap.insert(4, 4) - >>> min_heap.extract().key - 4 - - >>> max_heap = BinaryHeap(heap_property='max') - >>> max_heap.insert(1, 1) - >>> max_heap.insert(5, 5) - >>> max_heap.insert(7, 7) - >>> max_heap.extract().key - 7 - >>> max_heap.insert(6, 6) - >>> max_heap.extract().key - 6 - - References - ========== - - .. [1] https://en.m.wikipedia.org/wiki/Binary_heap - """ - def __new__(cls, elements=None, heap_property="min", - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = DHeap.__new__(cls, elements, heap_property, 2) - return obj - - @classmethod - def methods(cls): - return ['__new__'] - - -class TernaryHeap(DHeap): - """ - Represents Ternary Heap. - - Parameters - ========== - - elements: list, tuple - Optional, by default 'None'. - List/tuple of initial elements in Heap. - heap_property: str - If the key stored in each node is - either greater than or equal to - the keys in the node's children - then pass 'max'. - If the key stored in each node is - either less than or equal to - the keys in the node's children - then pass 'min'. - By default, the heap property is - set to 'min'. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs.trees.heaps import TernaryHeap - >>> min_heap = TernaryHeap(heap_property="min") - >>> min_heap.insert(1, 1) - >>> min_heap.insert(5, 5) - >>> min_heap.insert(7, 7) - >>> min_heap.insert(3, 3) - >>> min_heap.extract().key - 1 - >>> min_heap.insert(4, 4) - >>> min_heap.extract().key - 3 - - >>> max_heap = TernaryHeap(heap_property='max') - >>> max_heap.insert(1, 1) - >>> max_heap.insert(5, 5) - >>> max_heap.insert(7, 7) - >>> min_heap.insert(3, 3) - >>> max_heap.extract().key - 7 - >>> max_heap.insert(6, 6) - >>> max_heap.extract().key - 6 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/D-ary_heap - .. [2] https://ece.uwaterloo.ca/~dwharder/aads/Algorithms/d-ary_heaps/Ternary_heaps/ - """ - def __new__(cls, elements=None, heap_property="min", - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = DHeap.__new__(cls, elements, heap_property, 3) - return obj - - @classmethod - def methods(cls): - return ['__new__'] - - -class BinomialHeap(Heap): - """ - Represents binomial heap. - - Parameters - ========== - - root_list: list/tuple/Array - By default, [] - The list of BinomialTree object references - in sorted order. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import BinomialHeap - >>> b = BinomialHeap() - >>> b.insert(1, 1) - >>> b.insert(2, 2) - >>> b.find_minimum().key - 1 - >>> b.find_minimum().children[0].key - 2 - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Binomial_heap - """ - __slots__ = ['root_list'] - - def __new__(cls, root_list=None, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - if root_list is None: - root_list = [] - if not all((_check_type(root, BinomialTree)) - for root in root_list): - raise TypeError("The root_list should contain " - "references to objects of BinomialTree.") - obj = Heap.__new__(cls) - obj.root_list = root_list - return obj - - @classmethod - def methods(cls): - return ['__new__', 'merge_tree', 'merge', 'insert', - 'find_minimum', 'is_emtpy', 'decrease_key', 'delete', - 'delete_minimum'] - - def merge_tree(self, tree1, tree2): - """ - Merges two BinomialTree objects. - - Parameters - ========== - - tree1: BinomialTree - - tree2: BinomialTree - """ - if (not _check_type(tree1, BinomialTree)) or \ - (not _check_type(tree2, BinomialTree)): - raise TypeError("Both the trees should be of type " - "BinomalTree.") - ret_value = None - if tree1.root.key <= tree2.root.key: - tree1.add_sub_tree(tree2) - ret_value = tree1 - else: - tree2.add_sub_tree(tree1) - ret_value = tree2 - return ret_value - - def _merge_heap_last_new_tree(self, new_root_list, new_tree): - """ - Merges last tree node in root list with the incoming tree. - """ - pos = -1 - if len(new_root_list) > 0 and new_root_list[pos].order == new_tree.order: - new_root_list[pos] = self.merge_tree(new_root_list[pos], new_tree) - else: - new_root_list.append(new_tree) - - def merge(self, other_heap): - """ - Merges current binomial heap with the given binomial heap. - - Parameters - ========== - - other_heap: BinomialHeap - """ - if not _check_type(other_heap, BinomialHeap): - raise TypeError("Other heap is not of type BinomialHeap.") - new_root_list = [] - i, j = 0, 0 - while (i < len(self.root_list)) and \ - (j < len(other_heap.root_list)): - new_tree = None - while self.root_list[i] is None: - i += 1 - while other_heap.root_list[j] is None: - j += 1 - if self.root_list[i].order == other_heap.root_list[j].order: - new_tree = self.merge_tree(self.root_list[i], - other_heap.root_list[j]) - i += 1 - j += 1 - else: - if self.root_list[i].order < other_heap.root_list[j].order: - new_tree = self.root_list[i] - i += 1 - else: - new_tree = other_heap.root_list[j] - j += 1 - self._merge_heap_last_new_tree(new_root_list, new_tree) - - while i < len(self.root_list): - new_tree = self.root_list[i] - self._merge_heap_last_new_tree(new_root_list, new_tree) - i += 1 - while j < len(other_heap.root_list): - new_tree = other_heap.root_list[j] - self._merge_heap_last_new_tree(new_root_list, new_tree) - j += 1 - self.root_list = new_root_list - - def insert(self, key, data=None): - """ - Inserts new node with the given key and data. - - key - The key of the node which can be operated - upon by relational operators. - - data - The data to be stored in the new node. - """ - new_node = BinomialTreeNode(key, data) - new_tree = BinomialTree(root=new_node, order=0) - new_heap = BinomialHeap(root_list=[new_tree]) - self.merge(new_heap) - - def find_minimum(self, **kwargs): - """ - Finds the node with the minimum key. - - Returns - ======= - - min_node: BinomialTreeNode - """ - if self.is_empty: - raise IndexError("Binomial heap is empty.") - min_node = None - idx, min_idx = 0, None - for tree in self.root_list: - if ((min_node is None) or - (tree is not None and tree.root is not None and - min_node.key > tree.root.key)): - min_node = tree.root - min_idx = idx - idx += 1 - if kwargs.get('get_index', None) is not None: - return min_node, min_idx - return min_node - - def delete_minimum(self): - """ - Deletes the node with minimum key. - """ - min_node, min_idx = self.find_minimum(get_index=True) - child_root_list = [] - for k, child in enumerate(min_node.children): - if child is not None: - child_root_list.append(BinomialTree(root=child, order=k)) - self.root_list.remove(self.root_list[min_idx]) - child_heap = BinomialHeap(root_list=child_root_list) - self.merge(child_heap) - - @property - def is_empty(self): - return not self.root_list - - def decrease_key(self, node, new_key): - """ - Decreases the key of the given node. - - Parameters - ========== - - node: BinomialTreeNode - The node whose key is to be reduced. - new_key - The new key of the given node, - should be less than the current key. - """ - if node.key <= new_key: - raise ValueError("The new key " - "should be less than current node's key.") - node.key = new_key - while ((not node.is_root) and - (node.parent.key > node.key)): - node.parent.key, node.key = \ - node.key, node.parent.key - node.parent.data, node.data = \ - node.data, node.parent.data - node = node.parent - - def delete(self, node): - """ - Deletes the given node. - - Parameters - ========== - - node: BinomialTreeNode - The node which is to be deleted. - """ - self.decrease_key(node, self.find_minimum().key - 1) - self.delete_minimum() diff --git a/lib/python3.12/site-packages/pydatastructs/trees/m_ary_trees.py b/lib/python3.12/site-packages/pydatastructs/trees/m_ary_trees.py deleted file mode 100644 index a06fda9ee..000000000 --- a/lib/python3.12/site-packages/pydatastructs/trees/m_ary_trees.py +++ /dev/null @@ -1,172 +0,0 @@ -from pydatastructs.utils import MAryTreeNode -from pydatastructs.linear_data_structures.arrays import ArrayForTrees -from pydatastructs.utils.misc_util import ( - Backend, raise_if_backend_is_not_python) - -__all__ = [ - 'MAryTree' -] - -class MAryTree(object): - """ - Abstract m-ary tree. - - Parameters - ========== - - key - Required if tree is to be instantiated with - root otherwise not needed. - root_data - Optional, the root node of the binary tree. - If not of type MAryTreeNode, it will consider - root as data and a new root node will - be created. - comp: lambda - Optional, A lambda function which will be used - for comparison of keys. Should return a - bool value. By default it implements less - than operator. - is_order_statistic: bool - Set it to True, if you want to use the - order statistic features of the tree. - max_children - Optional, specifies the maximum number of children - a node can have. Defaults to 2 in case nothing is - specified. - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/M-ary_tree - """ - - __slots__ = ['root_idx', 'max_children', 'comparator', 'tree', 'size', - 'is_order_statistic'] - - - def __new__(cls, key=None, root_data=None, comp=None, - is_order_statistic=False, max_children=2, - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - if key is None and root_data is not None: - raise ValueError('Key required.') - key = None if root_data is None else key - root = MAryTreeNode(key, root_data) - root.is_root = True - obj.root_idx = 0 - obj.max_children = max_children - obj.tree, obj.size = ArrayForTrees(MAryTreeNode, [root]), 1 - obj.comparator = lambda key1, key2: key1 < key2 \ - if comp is None else comp - obj.is_order_statistic = is_order_statistic - return obj - - @classmethod - def methods(cls): - return ['__new__', '__str__'] - - def insert(self, key, data=None): - """ - Inserts data by the passed key using iterative - algorithm. - - Parameters - ========== - - key - The key for comparison. - data - The data to be inserted. - - Returns - ======= - - None - """ - raise NotImplementedError("This is an abstract method.") - - def delete(self, key, **kwargs): - """ - Deletes the data with the passed key - using iterative algorithm. - - Parameters - ========== - - key - The key of the node which is - to be deleted. - - Returns - ======= - - True - If the node is deleted successfully. - - None - If the node to be deleted doesn't exists. - - Note - ==== - - The node is deleted means that the connection to that - node are removed but the it is still in tree. - """ - raise NotImplementedError("This is an abstract method.") - - def search(self, key, **kwargs): - """ - Searches for the data in the binary search tree - using iterative algorithm. - - Parameters - ========== - - key - The key for searching. - parent: bool - If true then returns index of the - parent of the node with the passed - key. - By default, False - - Returns - ======= - - int - If the node with the passed key is - in the tree. - tuple - The index of the searched node and - the index of the parent of that node. - None - In all other cases. - """ - raise NotImplementedError("This is an abstract method.") - - def to_binary_tree(self): - """ - Converts an m-ary tree to a binary tree. - - Returns - ======= - - TreeNode - The root of the newly created binary tree. - """ - raise NotImplementedError("This is an abstract method.") - - - def __str__(self): - to_be_printed = ['' for i in range(self.tree._last_pos_filled + 1)] - for i in range(self.tree._last_pos_filled + 1): - if self.tree[i] is not None: - node = self.tree[i] - to_be_printed[i] = (node.key, node.data) - for j in node.children: - if j is not None: - to_be_printed[i].append(j) - return str(to_be_printed) diff --git a/lib/python3.12/site-packages/pydatastructs/trees/space_partitioning_trees.py b/lib/python3.12/site-packages/pydatastructs/trees/space_partitioning_trees.py deleted file mode 100644 index f13c1f280..000000000 --- a/lib/python3.12/site-packages/pydatastructs/trees/space_partitioning_trees.py +++ /dev/null @@ -1,242 +0,0 @@ -from pydatastructs.utils import TreeNode -from collections import deque as Queue -from pydatastructs.utils.misc_util import ( - _check_type, Backend, - raise_if_backend_is_not_python) - -__all__ = [ - 'OneDimensionalSegmentTree' -] - -class OneDimensionalSegmentTree(object): - """ - Represents one dimensional segment trees. - - Parameters - ========== - - segs: list/tuple/set - The segs should contains tuples/list/set of size 2 - denoting the start and end points of the intervals. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Examples - ======== - - >>> from pydatastructs import OneDimensionalSegmentTree as ODST - >>> segt = ODST([(3, 8), (9, 20)]) - >>> segt.build() - >>> segt.tree[0].key - [False, 2, 3, False] - >>> len(segt.query(4)) - 1 - - Note - ==== - - All the segments are assumed to be closed intervals, - i.e., the ends are points of segments are also included in - computation. - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Segment_tree - - """ - - __slots__ = ['segments', 'tree', 'root_idx', 'cache'] - - def __new__(cls, segs, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - if any((not isinstance(seg, (tuple, list, set)) or len(seg) != 2) - for seg in segs): - raise ValueError('%s is invalid set of intervals'%(segs)) - for i in range(len(segs)): - segs[i] = list(segs[i]) - segs[i].sort() - obj.segments = list(segs) - obj.tree, obj.root_idx, obj.cache = [], None, False - return obj - - @classmethod - def methods(cls): - return ['build', 'query', '__str__'] - - def _union(self, i1, i2): - """ - Helper function for taking union of two - intervals. - """ - return TreeNode([i1.key[0], i1.key[1], i2.key[2], i2.key[3]], None) - - def _intersect(self, i1, i2): - """ - Helper function for finding intersection of two - intervals. - """ - if i1 is None or i2 is None: - return False - if i1.key[2] < i2.key[1] or i2.key[2] < i1.key[1]: - return False - c1, c2 = None, None - if i1.key[2] == i2.key[1]: - c1 = (i1.key[3] and i2.key[0]) - if i2.key[2] == i1.key[1]: - c2 = (i2.key[3] and i1.key[0]) - if c1 is False and c2 is False: - return False - return True - - def _contains(self, i1, i2): - """ - Helper function for checking if the first interval - is contained in second interval. - """ - if i1 is None or i2 is None: - return False - if i1.key[1] < i2.key[1] and i1.key[2] > i2.key[2]: - return True - if i1.key[1] == i2.key[1] and i1.key[2] > i2.key[2]: - return (i1.key[0] or not i2.key[0]) - if i1.key[1] < i2.key[1] and i1.key[2] == i2.key[2]: - return i1.key[3] or not i2.key[3] - if i1.key[1] == i2.key[1] and i1.key[2] == i2.key[2]: - return not ((not i1.key[3] and i2.key[3]) or (not i1.key[0] and i2.key[0])) - return False - - def _iterate(self, calls, I, idx): - """ - Helper function for filling the calls - stack. Used for imitating the stack based - approach used in recursion. - """ - if self.tree[idx].right is None: - rc = None - else: - rc = self.tree[self.tree[idx].right] - if self.tree[idx].left is None: - lc = None - else: - lc = self.tree[self.tree[idx].left] - if self._intersect(I, rc): - calls.append(self.tree[idx].right) - if self._intersect(I, lc): - calls.append(self.tree[idx].left) - return calls - - def build(self): - """ - Builds the segment tree from the segments, - using iterative algorithm based on queues. - """ - if self.cache: - return None - endpoints = [] - for segment in self.segments: - endpoints.extend(segment) - endpoints.sort() - - elem_int = Queue() - elem_int.append(TreeNode([False, endpoints[0] - 1, endpoints[0], False], None)) - i = 0 - while i < len(endpoints) - 1: - elem_int.append(TreeNode([True, endpoints[i], endpoints[i], True], None)) - elem_int.append(TreeNode([False, endpoints[i], endpoints[i+1], False], None)) - i += 1 - elem_int.append(TreeNode([True, endpoints[i], endpoints[i], True], None)) - elem_int.append(TreeNode([False, endpoints[i], endpoints[i] + 1, False], None)) - - self.tree = [] - while len(elem_int) > 1: - m = len(elem_int) - while m >= 2: - I1 = elem_int.popleft() - I2 = elem_int.popleft() - I = self._union(I1, I2) - I.left = len(self.tree) - I.right = len(self.tree) + 1 - self.tree.append(I1), self.tree.append(I2) - elem_int.append(I) - m -= 2 - if m & 1 == 1: - Il = elem_int.popleft() - elem_int.append(Il) - - Ir = elem_int.popleft() - Ir.left, Ir.right = -3, -2 - self.tree.append(Ir) - self.root_idx = -1 - - for segment in self.segments: - I = TreeNode([True, segment[0], segment[1], True], None) - calls = [self.root_idx] - while calls: - idx = calls.pop() - if self._contains(I, self.tree[idx]): - if self.tree[idx].data is None: - self.tree[idx].data = [] - self.tree[idx].data.append(I) - continue - calls = self._iterate(calls, I, idx) - self.cache = True - - def query(self, qx, init_node=None): - """ - Queries the segment tree. - - Parameters - ========== - - qx: int/float - The query point - - init_node: int - The index of the node from which the query process - is to be started. - - Returns - ======= - - intervals: set - The set of the intervals which contain the query - point. - - References - ========== - - .. [1] https://en.wikipedia.org/wiki/Segment_tree - """ - if not self.cache: - self.build() - if init_node is None: - init_node = self.root_idx - qn = TreeNode([True, qx, qx, True], None) - intervals = [] - calls = [init_node] - while calls: - idx = calls.pop() - if _check_type(self.tree[idx].data, list): - intervals.extend(self.tree[idx].data) - calls = self._iterate(calls, qn, idx) - return set(intervals) - - def __str__(self): - """ - Used for printing. - """ - if not self.cache: - self.build() - str_tree = [] - for seg in self.tree: - if seg.data is None: - data = None - else: - data = [str(sd) for sd in seg.data] - str_tree.append((seg.left, seg.key, data, seg.right)) - return str(str_tree) diff --git a/lib/python3.12/site-packages/pydatastructs/trees/tests/__init__.py b/lib/python3.12/site-packages/pydatastructs/trees/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/lib/python3.12/site-packages/pydatastructs/trees/tests/test_binary_trees.py b/lib/python3.12/site-packages/pydatastructs/trees/tests/test_binary_trees.py deleted file mode 100644 index 826100b78..000000000 --- a/lib/python3.12/site-packages/pydatastructs/trees/tests/test_binary_trees.py +++ /dev/null @@ -1,820 +0,0 @@ -from pydatastructs.trees.binary_trees import ( - BinaryTree, BinarySearchTree, BinaryTreeTraversal, AVLTree, - ArrayForTrees, BinaryIndexedTree, SelfBalancingBinaryTree, SplayTree, CartesianTree, Treap, RedBlackTree) -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils.misc_util import TreeNode -from copy import deepcopy -from pydatastructs.utils.misc_util import Backend -import random -from pydatastructs.utils._backend.cpp import _nodes - -def _test_BinarySearchTree(backend): - BST = BinarySearchTree - b = BST(8, 8, backend=backend) - b.delete(8) - b.insert(8, 8) - b.insert(3, 3) - b.insert(10, 10) - b.insert(1, 1) - b.insert(6, 6) - b.insert(4, 4) - b.insert(7, 7) - b.insert(14, 14) - b.insert(13, 13) - # Explicit check for the __str__ method of Binary Trees Class - assert str(b) == \ - ("[(1, 8, 8, 2), (3, 3, 3, 4), (None, 10, 10, 7), (None, 1, 1, None), " - "(5, 6, 6, 6), (None, 4, 4, None), (None, 7, 7, None), (8, 14, 14, None), " - "(None, 13, 13, None)]") - assert b.root_idx == 0 - - assert b.tree[0].left == 1 - assert b.tree[0].key == 8 - assert b.tree[0].data == 8 - assert b.tree[0].right == 2 - - trav = BinaryTreeTraversal(b, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [1, 3, 4, 6, 7, 8, 10, 13, 14] - assert [node.key for node in pre_order] == [8, 3, 1, 6, 4, 7, 10, 14, 13] - - assert b.search(10) == 2 - assert b.search(-1) is None - assert b.delete(13) is True - assert b.search(13) is None - assert b.delete(10) is True - assert b.search(10) is None - assert b.delete(3) is True - assert b.search(3) is None - assert b.delete(13) is None - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [1, 4, 6, 7, 8, 14] - assert [node.key for node in pre_order] == [8, 4, 1, 6, 7, 14] - - b.delete(7) - b.delete(6) - b.delete(1) - b.delete(4) - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [8, 14] - assert [node.key for node in pre_order] == [8, 14] - - bc = BST(1, 1, backend=backend) - assert bc.insert(1, 2) is None - - b = BST(-8, 8, backend=backend) - b.insert(-3, 3) - b.insert(-10, 10) - b.insert(-1, 1) - b.insert(-6, 6) - b.insert(-4, 4) - b.insert(-7, 7) - b.insert(-14, 14) - b.insert(-13, 13) - - b.delete(-13) - b.delete(-10) - b.delete(-3) - b.delete(-13) - assert str(b) == "[(7, -8, 8, 1), (4, -1, 1, None), '', '', (6, -6, 6, 5), (None, -4, 4, None), (None, -7, 7, None), (None, -14, 14, None)]" - - bl = BST(backend=backend) - nodes = [50, 30, 90, 70, 100, 60, 80, 55, 20, 40, 15, 10, 16, 17, 18] - for node in nodes: - bl.insert(node, node) - - assert bl.lowest_common_ancestor(80, 55, 2) == 70 - assert bl.lowest_common_ancestor(60, 70, 2) == 70 - assert bl.lowest_common_ancestor(18, 18, 2) == 18 - assert bl.lowest_common_ancestor(40, 90, 2) == 50 - - assert bl.lowest_common_ancestor(18, 10, 2) == 15 - assert bl.lowest_common_ancestor(55, 100, 2) == 90 - assert bl.lowest_common_ancestor(16, 80, 2) == 50 - assert bl.lowest_common_ancestor(30, 55, 2) == 50 - - assert raises(ValueError, lambda: bl.lowest_common_ancestor(60, 200, 2)) - assert raises(ValueError, lambda: bl.lowest_common_ancestor(200, 60, 2)) - assert raises(ValueError, lambda: bl.lowest_common_ancestor(-3, 4, 2)) - - assert bl.lowest_common_ancestor(80, 55, 1) == 70 - assert bl.lowest_common_ancestor(60, 70, 1) == 70 - assert bl.lowest_common_ancestor(18, 18, 1) == 18 - assert bl.lowest_common_ancestor(40, 90, 1) == 50 - - assert bl.lowest_common_ancestor(18, 10, 1) == 15 - assert bl.lowest_common_ancestor(55, 100, 1) == 90 - assert bl.lowest_common_ancestor(16, 80, 1) == 50 - assert bl.lowest_common_ancestor(30, 55, 1) == 50 - - assert raises(ValueError, lambda: bl.lowest_common_ancestor(60, 200, 1)) - assert raises(ValueError, lambda: bl.lowest_common_ancestor(200, 60, 1)) - assert raises(ValueError, lambda: bl.lowest_common_ancestor(-3, 4, 1)) - -def test_BinarySearchTree(): - _test_BinarySearchTree(Backend.PYTHON) - -def test_cpp_BinarySearchTree(): - _test_BinarySearchTree(Backend.CPP) - -def _test_BinaryTreeTraversal(backend): - BST = BinarySearchTree - BTT = BinaryTreeTraversal - b = BST('F', 'F', backend=backend) - b.insert('B', 'B') - b.insert('A', 'A') - b.insert('G', 'G') - b.insert('D', 'D') - b.insert('C', 'C') - b.insert('E', 'E') - b.insert('I', 'I') - b.insert('H', 'H') - - trav = BTT(b, backend=backend) - pre = trav.depth_first_search(order='pre_order') - assert [node.key for node in pre] == ['F', 'B', 'A', 'D', 'C', 'E', 'G', 'I', 'H'] - - ino = trav.depth_first_search() - assert [node.key for node in ino] == ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'] - - out = trav.depth_first_search(order='out_order') - assert [node.key for node in out] == ['I', 'H', 'G', 'F', 'E', 'D', 'C', 'B', 'A'] - - post = trav.depth_first_search(order='post_order') - assert [node.key for node in post] == ['A', 'C', 'E', 'D', 'B', 'H', 'I', 'G', 'F'] - - bfs = trav.breadth_first_search() - assert [node.key for node in bfs] == ['F', 'B', 'G', 'A', 'D', 'I', 'C', 'E', 'H'] - - assert raises(NotImplementedError, lambda: trav.breadth_first_search(strategy='iddfs')) - assert raises(NotImplementedError, lambda: trav.depth_first_search(order='in_out_order')) - assert raises(TypeError, lambda: BTT(1)) - -def test_BinaryTreeTraversal(): - _test_BinaryTreeTraversal(Backend.PYTHON) - -def test_cpp_BinaryTreeTraversal(): - _test_BinaryTreeTraversal(Backend.CPP) - -def _test_AVLTree(backend): - a = AVLTree('M', 'M', backend=backend) - a.insert('N', 'N') - a.insert('O', 'O') - a.insert('L', 'L') - a.insert('K', 'K') - a.insert('Q', 'Q') - a.insert('P', 'P') - a.insert('H', 'H') - a.insert('I', 'I') - a.insert('A', 'A') - assert a.root_idx == 1 - - trav = BinaryTreeTraversal(a, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == ['A', 'H', 'I', 'K', 'L', 'M', 'N', 'O', 'P', 'Q'] - assert [node.key for node in pre_order] == ['N', 'I', 'H', 'A', 'L', 'K', 'M', 'P', 'O', 'Q'] - - assert [a.balance_factor(a.tree[i]) for i in range(a.tree.size) if a.tree[i] is not None] == \ - [0, -1, 0, 0, 0, 0, 0, -1, 0, 0] - a1 = AVLTree(1, 1, backend=backend) - a1.insert(2, 2) - a1.insert(3, 3) - a1.insert(4, 4) - a1.insert(5, 5) - - trav = BinaryTreeTraversal(a1, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [1, 2, 3, 4, 5] - assert [node.key for node in pre_order] == [2, 1, 4, 3, 5] - - a3 = AVLTree(-1, 1, backend=backend) - a3.insert(-2, 2) - a3.insert(-3, 3) - a3.insert(-4, 4) - a3.insert(-5, 5) - - trav = BinaryTreeTraversal(a3, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [-5, -4, -3, -2, -1] - assert [node.key for node in pre_order] == [-2, -4, -5, -3, -1] - - a2 = AVLTree(backend=backend) - a2.insert(1, 1) - a2.insert(1, 1) - - trav = BinaryTreeTraversal(a2, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [1] - assert [node.key for node in pre_order] == [1] - - a3 = AVLTree(backend=backend) - a3.set_tree( ArrayForTrees(TreeNode, 0, backend=backend) ) - for i in range(0,7): - a3.tree.append(TreeNode(i, i, backend=backend)) - a3.tree[0].left = 1 - a3.tree[0].right = 6 - a3.tree[1].left = 5 - a3.tree[1].right = 2 - a3.tree[2].left = 3 - a3.tree[2].right = 4 - a3._left_right_rotate(0, 1) - assert str(a3) == "[(4, 0, 0, 6), (5, 1, 1, 3), (1, 2, 2, 0), (None, 3, 3, None), (None, 4, 4, None), (None, 5, 5, None), (None, 6, 6, None)]" - - trav = BinaryTreeTraversal(a3, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [5, 1, 3, 2, 4, 0, 6] - assert [node.key for node in pre_order] == [2, 1, 5, 3, 0, 4, 6] - - a4 = AVLTree(backend=backend) - a4.set_tree( ArrayForTrees(TreeNode, 0, backend=backend) ) - for i in range(0,7): - a4.tree.append(TreeNode(i, i,backend=backend)) - a4.tree[0].left = 1 - a4.tree[0].right = 2 - a4.tree[2].left = 3 - a4.tree[2].right = 4 - a4.tree[3].left = 5 - a4.tree[3].right = 6 - a4._right_left_rotate(0, 2) - - trav = BinaryTreeTraversal(a4, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [1, 0, 5, 3, 6, 2, 4] - assert [node.key for node in pre_order] == [3,0,1,5,2,6,4] - - a5 = AVLTree(is_order_statistic=True,backend=backend) - if backend==Backend.PYTHON: - a5.set_tree( ArrayForTrees(TreeNode, [ - TreeNode(10, 10), - TreeNode(5, 5), - TreeNode(17, 17), - TreeNode(2, 2), - TreeNode(9, 9), - TreeNode(12, 12), - TreeNode(20, 20), - TreeNode(3, 3), - TreeNode(11, 11), - TreeNode(15, 15), - TreeNode(18, 18), - TreeNode(30, 30), - TreeNode(13, 13), - TreeNode(33, 33) - ]) ) - else: - a5.set_tree( ArrayForTrees(_nodes.TreeNode, [ - TreeNode(10, 10,backend=backend), - TreeNode(5, 5,backend=backend), - TreeNode(17, 17,backend=backend), - TreeNode(2, 2,backend=backend), - TreeNode(9, 9,backend=backend), - TreeNode(12, 12,backend=backend), - TreeNode(20, 20,backend=backend), - TreeNode(3, 3,backend=backend), - TreeNode(11, 11,backend=backend), - TreeNode(15, 15,backend=backend), - TreeNode(18, 18,backend=backend), - TreeNode(30, 30,backend=backend), - TreeNode(13, 13,backend=backend), - TreeNode(33, 33,backend=backend) - ],backend=backend) ) - - a5.tree[0].left, a5.tree[0].right, a5.tree[0].parent, a5.tree[0].height = \ - 1, 2, None, 4 - a5.tree[1].left, a5.tree[1].right, a5.tree[1].parent, a5.tree[1].height = \ - 3, 4, 0, 2 - a5.tree[2].left, a5.tree[2].right, a5.tree[2].parent, a5.tree[2].height = \ - 5, 6, 0, 3 - a5.tree[3].left, a5.tree[3].right, a5.tree[3].parent, a5.tree[3].height = \ - None, 7, 1, 1 - a5.tree[4].left, a5.tree[4].right, a5.tree[4].parent, a5.tree[4].height = \ - None, None, 1, 0 - a5.tree[5].left, a5.tree[5].right, a5.tree[5].parent, a5.tree[5].height = \ - 8, 9, 2, 2 - a5.tree[6].left, a5.tree[6].right, a5.tree[6].parent, a5.tree[6].height = \ - 10, 11, 2, 2 - a5.tree[7].left, a5.tree[7].right, a5.tree[7].parent, a5.tree[7].height = \ - None, None, 3, 0 - a5.tree[8].left, a5.tree[8].right, a5.tree[8].parent, a5.tree[8].height = \ - None, None, 5, 0 - a5.tree[9].left, a5.tree[9].right, a5.tree[9].parent, a5.tree[9].height = \ - 12, None, 5, 1 - a5.tree[10].left, a5.tree[10].right, a5.tree[10].parent, a5.tree[10].height = \ - None, None, 6, 0 - a5.tree[11].left, a5.tree[11].right, a5.tree[11].parent, a5.tree[11].height = \ - None, 13, 6, 1 - a5.tree[12].left, a5.tree[12].right, a5.tree[12].parent, a5.tree[12].height = \ - None, None, 9, 0 - a5.tree[13].left, a5.tree[13].right, a5.tree[13].parent, a5.tree[13].height = \ - None, None, 11, 0 - - # testing order statistics - a5.tree[0].size = 14 - a5.tree[1].size = 4 - a5.tree[2].size = 9 - a5.tree[3].size = 2 - a5.tree[4].size = 1 - a5.tree[5].size = 4 - a5.tree[6].size = 4 - a5.tree[7].size = 1 - a5.tree[8].size = 1 - a5.tree[9].size = 2 - a5.tree[10].size = 1 - a5.tree[11].size = 2 - a5.tree[12].size = 1 - a5.tree[13].size = 1 - assert str(a5) == "[(1, 10, 10, 2), (3, 5, 5, 4), (5, 17, 17, 6), (None, 2, 2, 7), (None, 9, 9, None), (8, 12, 12, 9), (10, 20, 20, 11), (None, 3, 3, None), (None, 11, 11, None), (12, 15, 15, None), (None, 18, 18, None), (None, 30, 30, 13), (None, 13, 13, None), (None, 33, 33, None)]" - - assert raises(ValueError, lambda: a5.select(0)) - assert raises(ValueError, lambda: a5.select(15)) - - assert a5.rank(-1) is None - def test_select_rank(expected_output): - if backend==Backend.PYTHON: - output = [] - for i in range(len(expected_output)): - output.append(a5.select(i + 1).key) - assert output == expected_output - output = [] - expected_ranks = [i + 1 for i in range(len(expected_output))] - for i in range(len(expected_output)): - output.append(a5.rank(expected_output[i])) - assert output == expected_ranks - - test_select_rank([2, 3, 5, 9, 10, 11, 12, 13, 15, 17, 18, 20, 30, 33]) - a5.delete(9) - a5.delete(13) - a5.delete(20) - assert str(a5) == "[(7, 10, 10, 5), (None, 5, 5, None), (0, 17, 17, 6), (None, 2, 2, None), '', (8, 12, 12, 9), (10, 30, 30, 13), (3, 3, 3, 1), (None, 11, 11, None), (None, 15, 15, None), (None, 18, 18, None), '', '', (None, 33, 33, None)]" - - trav = BinaryTreeTraversal(a5, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [2, 3, 5, 10, 11, 12, 15, 17, 18, 30, 33] - assert [node.key for node in pre_order] == [17, 10, 3, 2, 5, 12, 11, 15, 30, 18, 33] - - test_select_rank([2, 3, 5, 10, 11, 12, 15, 17, 18, 30, 33]) - a5.delete(10) - a5.delete(17) - assert str(a5) == "[(7, 11, 11, 5), (None, 5, 5, None), (0, 18, 18, 6), (None, 2, 2, None), '', (None, 12, 12, 9), (None, 30, 30, 13), (3, 3, 3, 1), '', (None, 15, 15, None), '', '', '', (None, 33, 33, None)]" - test_select_rank([2, 3, 5, 11, 12, 15, 18, 30, 33]) - a5.delete(11) - a5.delete(30) - test_select_rank([2, 3, 5, 12, 15, 18, 33]) - a5.delete(12) - test_select_rank([2, 3, 5, 15, 18, 33]) - a5.delete(15) - test_select_rank([2, 3, 5, 18, 33]) - a5.delete(18) - test_select_rank([2, 3, 5, 33]) - a5.delete(33) - test_select_rank([2, 3, 5]) - a5.delete(5) - test_select_rank([2, 3]) - a5.delete(3) - test_select_rank([2]) - a5.delete(2) - test_select_rank([]) - assert str(a5) == "[(None, None, None, None)]" - -def test_AVLTree(): - _test_AVLTree(backend=Backend.PYTHON) -def test_cpp_AVLTree(): - _test_AVLTree(backend=Backend.CPP) - -def _test_BinaryIndexedTree(backend): - - FT = BinaryIndexedTree - - t = FT([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], backend=backend) - - assert t.get_sum(0, 2) == 6 - assert t.get_sum(0, 4) == 15 - assert t.get_sum(0, 9) == 55 - t.update(0, 100) - assert t.get_sum(0, 2) == 105 - assert t.get_sum(0, 4) == 114 - assert t.get_sum(1, 9) == 54 - -def test_BinaryIndexedTree(): - _test_BinaryIndexedTree(Backend.PYTHON) - -def test_cpp_BinaryIndexedTree(): - _test_BinaryIndexedTree(Backend.CPP) - -def _test_CartesianTree(backend): - tree = CartesianTree(backend=backend) - tree.insert(3, 1, 3) - tree.insert(1, 6, 1) - tree.insert(0, 9, 0) - tree.insert(5, 11, 5) - tree.insert(4, 14, 4) - tree.insert(9, 17, 9) - tree.insert(7, 22, 7) - tree.insert(6, 42, 6) - tree.insert(8, 49, 8) - tree.insert(2, 99, 2) - # Explicit check for the redefined __str__ method of Cartesian Trees Class - - trav = BinaryTreeTraversal(tree, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] - assert [node.key for node in pre_order] == [3, 1, 0, 2, 5, 4, 9, 7, 6, 8] - - tree.insert(1.5, 4, 1.5) - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [0, 1, 1.5, 2, 3, 4, 5, 6, 7, 8, 9] - assert [node.key for node in pre_order] == [3, 1.5, 1, 0, 2, 5, 4, 9, 7, 6, 8] - - k = tree.search(1.5) - assert tree.tree[tree.tree[k].parent].key == 3 - tree.delete(1.5) - assert tree.root_idx == 0 - tree.tree[tree.tree[tree.root_idx].left].key == 1 - tree.delete(8) - assert tree.search(8) is None - tree.delete(7) - assert tree.search(7) is None - tree.delete(3) - assert tree.search(3) is None - assert tree.delete(18) is None - -def test_CartesianTree(): - _test_CartesianTree(backend=Backend.PYTHON) - -def test_cpp_CartesianTree(): - _test_CartesianTree(backend=Backend.CPP) - -def _test_Treap(backend): - - random.seed(0) - tree = Treap(backend=backend) - tree.insert(7, 7) - tree.insert(2, 2) - tree.insert(3, 3) - tree.insert(4, 4) - tree.insert(5, 5) - - assert isinstance(tree.tree[0].priority, float) - tree.delete(1) - assert tree.search(1) is None - assert tree.search(2) == 1 - assert tree.delete(1) is None - -def test_Treap(): - _test_Treap(Backend.PYTHON) - -def test_cpp_Treap(): - _test_Treap(Backend.CPP) - -def _test_SelfBalancingBinaryTree(backend): - """ - https://github.com/codezonediitj/pydatastructs/issues/234 - """ - tree = SelfBalancingBinaryTree(backend=backend) - tree.insert(5, 5) - tree.insert(5.5, 5.5) - tree.insert(4.5, 4.5) - tree.insert(4.6, 4.6) - tree.insert(4.4, 4.4) - tree.insert(4.55, 4.55) - tree.insert(4.65, 4.65) - original_tree = str(tree) - tree._right_rotate(3, 5) - - assert str(tree) == "[(2, 5, 5, 1), (None, 5.5, 5.5, None), (4, 4.5, 4.5, 5), (None, 4.6, 4.6, 6), (None, 4.4, 4.4, None), (None, 4.55, 4.55, 3), (None, 4.65, 4.65, None)]" - assert tree.tree[3].parent == 5 - assert tree.tree[2].right != 3 - assert tree.tree[tree.tree[5].parent].right == 5 - assert tree.root_idx == 0 - - trav = BinaryTreeTraversal(tree, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [4.4, 4.5, 4.55, 4.6, 4.65, 5, 5.5] - assert [node.key for node in pre_order] == [5, 4.5, 4.4, 4.55, 4.6, 4.65, 5.5] - - assert tree.tree[tree.tree[3].parent].right == 3 - tree._left_rotate(5, 3) - assert str(tree) == original_tree - tree.insert(4.54, 4.54) - tree.insert(4.56, 4.56) - tree._left_rotate(5, 8) - assert tree.tree[tree.tree[8].parent].left == 8 - assert str(tree) == "[(2, 5, 5, 1), (None, 5.5, 5.5, None), (4, 4.5, 4.5, 3), (8, 4.6, 4.6, 6), (None, 4.4, 4.4, None), (7, 4.55, 4.55, None), (None, 4.65, 4.65, None), (None, 4.54, 4.54, None), (5, 4.56, 4.56, None)]" - - tree._left_right_rotate(0, 2) - assert str(tree) == "[(6, 5, 5, 1), (None, 5.5, 5.5, None), (4, 4.5, 4.5, 8), (2, 4.6, 4.6, 0), (None, 4.4, 4.4, None), (7, 4.55, 4.55, None), (None, 4.65, 4.65, None), (None, 4.54, 4.54, None), (5, 4.56, 4.56, None)]" - - tree._right_left_rotate(0, 2) - assert str(tree) == "[(6, 5, 5, None), (None, 5.5, 5.5, None), (None, 4.5, 4.5, 8), (2, 4.6, 4.6, 4), (0, 4.4, 4.4, 2), (7, 4.55, 4.55, None), (None, 4.65, 4.65, None), (None, 4.54, 4.54, None), (5, 4.56, 4.56, None)]" - -def test_SelfBalancingBinaryTree(): - _test_SelfBalancingBinaryTree(Backend.PYTHON) -def test_cpp_SelfBalancingBinaryTree(): - _test_SelfBalancingBinaryTree(Backend.CPP) - -def _test_SplayTree(backend): - t = SplayTree(100, 100, backend=backend) - t.insert(50, 50) - t.insert(200, 200) - t.insert(40, 40) - t.insert(30, 30) - t.insert(20, 20) - t.insert(55, 55) - assert str(t) == "[(None, 100, 100, None), (None, 50, 50, None), (0, 200, 200, None), (None, 40, 40, 1), (5, 30, 30, 3), (None, 20, 20, None), (4, 55, 55, 2)]" - assert t.root_idx == 6 - - trav = BinaryTreeTraversal(t, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [20, 30, 40, 50, 55, 100, 200] - assert [node.key for node in pre_order] == [55, 30, 20, 40, 50, 200, 100] - - t.delete(40) - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [20, 30, 50, 55, 100, 200] - assert [node.key for node in pre_order] == [50, 30, 20, 55, 200, 100] - - t.delete(150) - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [20, 30, 50, 55, 100, 200] - assert [node.key for node in pre_order] == [50, 30, 20, 55, 200, 100] - - t1 = SplayTree(1000, 1000, backend=backend) - t1.insert(2000, 2000) - - trav2 = BinaryTreeTraversal(t1, backend=backend) - in_order = trav2.depth_first_search(order='in_order') - pre_order = trav2.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [1000, 2000] - assert [node.key for node in pre_order] == [2000, 1000] - - t.join(t1) - assert str(t) == "[(None, 100, 100, None), '', (6, 200, 200, 8), (4, 50, 50, None), (5, 30, 30, None), (None, 20, 20, None), (3, 55, 55, 0), (None, 1000, 1000, None), (7, 2000, 2000, None), '']" - - if backend == Backend.PYTHON: - trav3 = BinaryTreeTraversal(t, backend=backend) - in_order = trav3.depth_first_search(order='in_order') - pre_order = trav3.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [20, 30, 50, 55, 100, 200, 1000, 2000] - assert [node.key for node in pre_order] == [200, 55, 50, 30, 20, 100, 2000, 1000] - - s = t.split(200) - assert str(s) == "[(1, 2000, 2000, None), (None, 1000, 1000, None)]" - - trav4 = BinaryTreeTraversal(s, backend=backend) - in_order = trav4.depth_first_search(order='in_order') - pre_order = trav4.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [1000, 2000] - assert [node.key for node in pre_order] == [2000, 1000] - - if backend == Backend.PYTHON: - trav5 = BinaryTreeTraversal(t, backend=backend) - in_order = trav5.depth_first_search(order='in_order') - pre_order = trav5.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [20, 30, 50, 55, 100, 200] - assert [node.key for node in pre_order] == [200, 55, 50, 30, 20, 100] - -def test_SplayTree(): - _test_SplayTree(Backend.PYTHON) - -def test_cpp_SplayTree(): - _test_SplayTree(Backend.CPP) - -def _test_RedBlackTree(backend): - tree = RedBlackTree(backend=backend) - tree.insert(10, 10) - tree.insert(18, 18) - tree.insert(7, 7) - tree.insert(15, 15) - tree.insert(16, 16) - tree.insert(30, 30) - tree.insert(25, 25) - tree.insert(40, 40) - tree.insert(60, 60) - tree.insert(2, 2) - tree.insert(17, 17) - tree.insert(6, 6) - assert str(tree) == "[(11, 10, 10, 3), (10, 18, 18, None), (None, 7, 7, None), (None, 15, 15, None), (0, 16, 16, 6), (None, 30, 30, None), (1, 25, 25, 7), (5, 40, 40, 8), (None, 60, 60, None), (None, 2, 2, None), (None, 17, 17, None), (9, 6, 6, 2)]" - assert tree.root_idx == 4 - - trav = BinaryTreeTraversal(tree, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [2, 6, 7, 10, 15, 16, 17, 18, 25, 30, 40, 60] - assert [node.key for node in pre_order] == [16, 10, 6, 2, 7, 15, 25, 18, 17, 40, 30, 60] - - assert tree.lower_bound(0) == 2 - assert tree.lower_bound(2) == 2 - assert tree.lower_bound(3) == 6 - assert tree.lower_bound(7) == 7 - assert tree.lower_bound(25) == 25 - assert tree.lower_bound(32) == 40 - assert tree.lower_bound(41) == 60 - assert tree.lower_bound(60) == 60 - assert tree.lower_bound(61) is None - - assert tree.upper_bound(0) == 2 - assert tree.upper_bound(2) == 6 - assert tree.upper_bound(3) == 6 - assert tree.upper_bound(7) == 10 - assert tree.upper_bound(25) == 30 - assert tree.upper_bound(32) == 40 - assert tree.upper_bound(41) == 60 - assert tree.upper_bound(60) is None - assert tree.upper_bound(61) is None - - tree = RedBlackTree(backend=backend) - - assert tree.lower_bound(1) is None - assert tree.upper_bound(0) is None - - tree.insert(10) - tree.insert(20) - tree.insert(30) - tree.insert(40) - tree.insert(50) - tree.insert(60) - tree.insert(70) - tree.insert(80) - tree.insert(90) - tree.insert(100) - tree.insert(110) - tree.insert(120) - tree.insert(130) - tree.insert(140) - tree.insert(150) - tree.insert(160) - tree.insert(170) - tree.insert(180) - assert str(tree) == "[(None, 10, None, None), (0, 20, None, 2), (None, 30, None, None), (1, 40, None, 5), (None, 50, None, None), (4, 60, None, 6), (None, 70, None, None), (3, 80, None, 11), (None, 90, None, None), (8, 100, None, 10), (None, 110, None, None), (9, 120, None, 13), (None, 130, None, None), (12, 140, None, 15), (None, 150, None, None), (14, 160, None, 16), (None, 170, None, 17), (None, 180, None, None)]" - - assert tree._get_sibling(7) is None - - trav = BinaryTreeTraversal(tree, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 60, 70, 80, 90, - 100, 110, 120, 130, 140, 150, 160, 170, 180] - assert [node.key for node in pre_order] == [80, 40, 20, 10, 30, 60, 50, 70, 120, 100, - 90, 110, 140, 130, 160, 150, 170, 180] - - tree.delete(180) - tree.delete(130) - tree.delete(110) - tree.delete(190) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, - 120, 140, 150, 160, 170] - assert [node.key for node in pre_order] == [80, 40, 20, 10, 30, 60, 50, 70, 120, 100, - 90, 160, 140, 150, 170] - - tree.delete(170) - tree.delete(100) - tree.delete(60) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 70, 80, 90, 120, 140, 150, 160] - assert [node.key for node in pre_order] == [80, 40, 20, 10, 30, 50, 70, 120, 90, 150, 140, 160] - - tree.delete(70) - tree.delete(140) - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 80, 90, 120, 150, 160] - assert [node.key for node in pre_order] == [80, 40, 20, 10, 30, 50, 120, 90, 150, 160] - - tree.delete(150) - tree.delete(120) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [10, 20, 30, 40, 50, 80, 90, 160] - assert [node.key for node in pre_order] == [40, 20, 10, 30, 80, 50, 90, 160] - - tree.delete(50) - tree.delete(80) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [10, 20, 30, 40, 90, 160] - assert [node.key for node in pre_order] == [40, 20, 10, 30, 90, 160] - - tree.delete(30) - tree.delete(20) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [10, 40, 90, 160] - assert [node.key for node in pre_order] == [40, 10, 90, 160] - - tree.delete(10) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [40, 90, 160] - assert [node.key for node in pre_order] == [90, 40, 160] - - tree.delete(40) - tree.delete(90) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [160] - assert [node.key for node in pre_order] == [160] - - tree.delete(160) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order if node.key is not None] == [] - assert [node.key for node in pre_order if node.key is not None] == [] - - tree = RedBlackTree(backend=backend) - tree.insert(50) - tree.insert(40) - tree.insert(30) - tree.insert(20) - tree.insert(10) - tree.insert(5) - - trav = BinaryTreeTraversal(tree, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [5, 10, 20, 30, 40, 50] - assert [node.key for node in pre_order] == [40, 20, 10, 5, 30, 50] - - assert tree.search(50) == 0 - assert tree.search(20) == 3 - assert tree.search(30) == 2 - tree.delete(50) - tree.delete(20) - tree.delete(30) - assert tree.search(50) is None - assert tree.search(20) is None - assert tree.search(30) is None - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [5, 10, 40] - assert [node.key for node in pre_order] == [10, 5, 40] - - tree = RedBlackTree(backend=backend) - tree.insert(10) - tree.insert(5) - tree.insert(20) - tree.insert(15) - - trav = BinaryTreeTraversal(tree, backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [5, 10, 15, 20] - assert [node.key for node in pre_order] == [10, 5, 20, 15] - - tree.delete(5) - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [10, 15, 20] - assert [node.key for node in pre_order] == [15, 10, 20] - - tree = RedBlackTree(backend=backend) - tree.insert(10) - tree.insert(5) - tree.insert(20) - tree.insert(15) - tree.insert(2) - tree.insert(6) - - trav = BinaryTreeTraversal(tree,backend=backend) - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [2, 5, 6, 10, 15, 20] - assert [node.key for node in pre_order] == [10, 5, 2, 6, 20, 15] - - tree.delete(10) - - in_order = trav.depth_first_search(order='in_order') - pre_order = trav.depth_first_search(order='pre_order') - assert [node.key for node in in_order] == [2, 5, 6, 15, 20] - assert [node.key for node in pre_order] == [6, 5, 2, 20, 15] - -def test_RedBlackTree(): - _test_RedBlackTree(Backend.PYTHON) - -def test_cpp_RedBlackTree(): - _test_RedBlackTree(Backend.CPP) diff --git a/lib/python3.12/site-packages/pydatastructs/trees/tests/test_heaps.py b/lib/python3.12/site-packages/pydatastructs/trees/tests/test_heaps.py deleted file mode 100644 index dece2f132..000000000 --- a/lib/python3.12/site-packages/pydatastructs/trees/tests/test_heaps.py +++ /dev/null @@ -1,236 +0,0 @@ -from pydatastructs.trees.heaps import BinaryHeap, TernaryHeap, BinomialHeap, DHeap -from pydatastructs.linear_data_structures.arrays import DynamicOneDimensionalArray -from pydatastructs.miscellaneous_data_structures.binomial_trees import BinomialTree -from pydatastructs.utils.misc_util import TreeNode, BinomialTreeNode -from pydatastructs.utils.raises_util import raises -from collections import deque as Queue - -def test_BinaryHeap(): - - max_heap = BinaryHeap(heap_property="max") - - assert raises(IndexError, lambda: max_heap.extract()) - - max_heap.insert(100, 100) - max_heap.insert(19, 19) - max_heap.insert(36, 36) - max_heap.insert(17, 17) - max_heap.insert(3, 3) - max_heap.insert(25, 25) - max_heap.insert(1, 1) - max_heap.insert(2, 2) - max_heap.insert(7, 7) - assert str(max_heap) == \ - ("[(100, 100, [1, 2]), (19, 19, [3, 4]), " - "(36, 36, [5, 6]), (17, 17, [7, 8]), " - "(3, 3, []), (25, 25, []), (1, 1, []), " - "(2, 2, []), (7, 7, [])]") - - assert max_heap.extract().key == 100 - - expected_sorted_elements = [36, 25, 19, 17, 7, 3, 2, 1] - l = max_heap.heap[0].left - l = max_heap.heap[0].right - sorted_elements = [] - for _ in range(8): - sorted_elements.append(max_heap.extract().key) - assert expected_sorted_elements == sorted_elements - - elements = [ - TreeNode(7, 7), TreeNode(25, 25), TreeNode(100, 100), - TreeNode(1, 1), TreeNode(2, 2), TreeNode(3, 3), - TreeNode(17, 17), TreeNode(19, 19), TreeNode(36, 36) - ] - min_heap = BinaryHeap(elements=elements, heap_property="min") - assert min_heap.extract().key == 1 - - expected_sorted_elements = [2, 3, 7, 17, 19, 25, 36, 100] - sorted_elements = [min_heap.extract().key for _ in range(8)] - assert expected_sorted_elements == sorted_elements - - non_TreeNode_elements = [ - (7, 7), TreeNode(25, 25), TreeNode(100, 100), - TreeNode(1, 1), (2, 2), TreeNode(3, 3), - TreeNode(17, 17), TreeNode(19, 19), TreeNode(36, 36) - ] - assert raises(TypeError, lambda: - BinaryHeap(elements = non_TreeNode_elements, heap_property='min')) - - non_TreeNode_elements = DynamicOneDimensionalArray(int, 0) - non_TreeNode_elements.append(1) - non_TreeNode_elements.append(2) - assert raises(TypeError, lambda: - BinaryHeap(elements = non_TreeNode_elements, heap_property='min')) - - non_heapable = "[1, 2, 3]" - assert raises(ValueError, lambda: - BinaryHeap(elements = non_heapable, heap_property='min')) - -def test_TernaryHeap(): - max_heap = TernaryHeap(heap_property="max") - assert raises(IndexError, lambda: max_heap.extract()) - max_heap.insert(100, 100) - max_heap.insert(19, 19) - max_heap.insert(36, 36) - max_heap.insert(17, 17) - max_heap.insert(3, 3) - max_heap.insert(25, 25) - max_heap.insert(1, 1) - max_heap.insert(2, 2) - max_heap.insert(7, 7) - assert str(max_heap) == \ - ('[(100, 100, [1, 2, 3]), (25, 25, [4, 5, 6]), ' - '(36, 36, [7, 8]), (17, 17, []), ' - '(3, 3, []), (19, 19, []), (1, 1, []), ' - '(2, 2, []), (7, 7, [])]') - - assert max_heap.extract().key == 100 - - expected_sorted_elements = [36, 25, 19, 17, 7, 3, 2, 1] - sorted_elements = [] - for _ in range(8): - sorted_elements.append(max_heap.extract().key) - assert expected_sorted_elements == sorted_elements - - elements = [ - TreeNode(7, 7), TreeNode(25, 25), TreeNode(100, 100), - TreeNode(1, 1), TreeNode(2, 2), TreeNode(3, 3), - TreeNode(17, 17), TreeNode(19, 19), TreeNode(36, 36) - ] - min_heap = TernaryHeap(elements=elements, heap_property="min") - expected_extracted_element = min_heap.heap[0].key - assert min_heap.extract().key == expected_extracted_element - - expected_sorted_elements = [2, 3, 7, 17, 19, 25, 36, 100] - sorted_elements = [min_heap.extract().key for _ in range(8)] - assert expected_sorted_elements == sorted_elements - -def test_DHeap(): - assert raises(ValueError, lambda: DHeap(heap_property="none", d=4)) - max_heap = DHeap(heap_property="max", d=5) - assert raises(IndexError, lambda: max_heap.extract()) - max_heap.insert(100, 100) - max_heap.insert(19, 19) - max_heap.insert(36, 36) - max_heap.insert(17, 17) - max_heap.insert(3, 3) - max_heap.insert(25, 25) - max_heap.insert(1, 1) - max_heap = DHeap(max_heap.heap, heap_property="max", d=4) - max_heap.insert(2, 2) - max_heap.insert(7, 7) - assert str(max_heap) == \ - ('[(100, 100, [1, 2, 3, 4]), (25, 25, [5, 6, 7, 8]), ' - '(36, 36, []), (17, 17, []), (3, 3, []), (19, 19, []), ' - '(1, 1, []), (2, 2, []), (7, 7, [])]') - - assert max_heap.extract().key == 100 - - expected_sorted_elements = [36, 25, 19, 17, 7, 3, 2, 1] - sorted_elements = [] - for _ in range(8): - sorted_elements.append(max_heap.extract().key) - assert expected_sorted_elements == sorted_elements - - elements = [ - TreeNode(7, 7), TreeNode(25, 25), TreeNode(100, 100), - TreeNode(1, 1), TreeNode(2, 2), TreeNode(3, 3), - TreeNode(17, 17), TreeNode(19, 19), TreeNode(36, 36) - ] - min_heap = DHeap(elements=DynamicOneDimensionalArray(TreeNode, 9, elements), heap_property="min") - assert min_heap.extract().key == 1 - - expected_sorted_elements = [2, 3, 7, 17, 19, 25, 36, 100] - sorted_elements = [min_heap.extract().key for _ in range(8)] - assert expected_sorted_elements == sorted_elements - -def test_BinomialHeap(): - - # Corner cases - assert raises(TypeError, lambda: - BinomialHeap( - root_list=[BinomialTreeNode(1, 1), None]) - ) is True - tree1 = BinomialTree(BinomialTreeNode(1, 1), 0) - tree2 = BinomialTree(BinomialTreeNode(2, 2), 0) - bh = BinomialHeap(root_list=[tree1, tree2]) - assert raises(TypeError, lambda: - bh.merge_tree(BinomialTreeNode(2, 2), None)) - assert raises(TypeError, lambda: - bh.merge(None)) - - # Testing BinomialHeap.merge - nodes = [BinomialTreeNode(1, 1), # 0 - BinomialTreeNode(3, 3), # 1 - BinomialTreeNode(9, 9), # 2 - BinomialTreeNode(11, 11), # 3 - BinomialTreeNode(6, 6), # 4 - BinomialTreeNode(14, 14), # 5 - BinomialTreeNode(2, 2), # 6 - BinomialTreeNode(7, 7), # 7 - BinomialTreeNode(4, 4), # 8 - BinomialTreeNode(8, 8), # 9 - BinomialTreeNode(12, 12), # 10 - BinomialTreeNode(10, 10), # 11 - BinomialTreeNode(5, 5), # 12 - BinomialTreeNode(21, 21)] # 13 - - nodes[2].add_children(nodes[3]) - nodes[4].add_children(nodes[5]) - nodes[6].add_children(nodes[9], nodes[8], nodes[7]) - nodes[7].add_children(nodes[11], nodes[10]) - nodes[8].add_children(nodes[12]) - nodes[10].add_children(nodes[13]) - - tree11 = BinomialTree(nodes[0], 0) - tree12 = BinomialTree(nodes[2], 1) - tree13 = BinomialTree(nodes[6], 3) - tree21 = BinomialTree(nodes[1], 0) - - heap1 = BinomialHeap(root_list=[tree11, tree12, tree13]) - heap2 = BinomialHeap(root_list=[tree21]) - - def bfs(heap): - bfs_trav = [] - for i in range(len(heap.root_list)): - layer = [] - bfs_q = Queue() - bfs_q.append(heap.root_list[i].root) - while len(bfs_q) != 0: - curr_node = bfs_q.popleft() - if curr_node is not None: - layer.append(curr_node.key) - for _i in range(curr_node.children._last_pos_filled + 1): - bfs_q.append(curr_node.children[_i]) - if layer != []: - bfs_trav.append(layer) - return bfs_trav - - heap1.merge(heap2) - expected_bfs_trav = [[1, 3, 9, 11], [2, 8, 4, 7, 5, 10, 12, 21]] - assert bfs(heap1) == expected_bfs_trav - - # Testing Binomial.find_minimum - assert heap1.find_minimum().key == 1 - - # Testing Binomial.delete_minimum - heap1.delete_minimum() - assert bfs(heap1) == [[3], [9, 11], [2, 8, 4, 7, 5, 10, 12, 21]] - assert raises(ValueError, lambda: heap1.decrease_key(nodes[3], 15)) - heap1.decrease_key(nodes[3], 0) - assert bfs(heap1) == [[3], [0, 9], [2, 8, 4, 7, 5, 10, 12, 21]] - heap1.delete(nodes[12]) - assert bfs(heap1) == [[3, 8], [0, 9, 2, 7, 4, 10, 12, 21]] - - # Testing BinomialHeap.insert - heap = BinomialHeap() - assert raises(IndexError, lambda: heap.find_minimum()) - heap.insert(1, 1) - heap.insert(3, 3) - heap.insert(6, 6) - heap.insert(9, 9) - heap.insert(14, 14) - heap.insert(11, 11) - heap.insert(2, 2) - heap.insert(7, 7) - assert bfs(heap) == [[1, 3, 6, 2, 9, 7, 11, 14]] diff --git a/lib/python3.12/site-packages/pydatastructs/trees/tests/test_m_ary_trees.py b/lib/python3.12/site-packages/pydatastructs/trees/tests/test_m_ary_trees.py deleted file mode 100644 index 6cbc84ace..000000000 --- a/lib/python3.12/site-packages/pydatastructs/trees/tests/test_m_ary_trees.py +++ /dev/null @@ -1,5 +0,0 @@ -from pydatastructs import MAryTree - -def test_MAryTree(): - m = MAryTree(1, 1) - assert str(m) == '[(1, 1)]' diff --git a/lib/python3.12/site-packages/pydatastructs/trees/tests/test_space_partitioning_tree.py b/lib/python3.12/site-packages/pydatastructs/trees/tests/test_space_partitioning_tree.py deleted file mode 100644 index 99f0e84cc..000000000 --- a/lib/python3.12/site-packages/pydatastructs/trees/tests/test_space_partitioning_tree.py +++ /dev/null @@ -1,20 +0,0 @@ -from pydatastructs import OneDimensionalSegmentTree -from pydatastructs.utils.raises_util import raises - -def test_OneDimensionalSegmentTree(): - ODST = OneDimensionalSegmentTree - segt = ODST([(0, 5), (1, 6), (9, 13), (1, 2), (3, 8), (9, 20)]) - assert segt.cache is False - segt2 = ODST([(1, 4)]) - assert str(segt2) == ("[(None, [False, 0, 1, False], None, None), " - "(None, [True, 1, 1, True], ['(None, [True, 1, 4, True], None, None)'], " - "None), (None, [False, 1, 4, False], None, None), (None, [True, 4, 4, True], " - "None, None), (0, [False, 0, 1, True], None, 1), (2, [False, 1, 4, True], " - "['(None, [True, 1, 4, True], None, None)'], 3), (4, [False, 0, 4, True], " - "None, 5), (None, [False, 4, 5, False], None, None), (-3, [False, 0, 5, " - "False], None, -2)]") - assert len(segt.query(1.5)) == 3 - assert segt.cache is True - assert len(segt.query(-1)) == 0 - assert len(segt.query(2.8)) == 2 - assert raises(ValueError, lambda: ODST([(1, 2, 3)])) diff --git a/lib/python3.12/site-packages/pydatastructs/utils/__init__.py b/lib/python3.12/site-packages/pydatastructs/utils/__init__.py deleted file mode 100644 index c4971be32..000000000 --- a/lib/python3.12/site-packages/pydatastructs/utils/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -__all__ = [] - -from . import ( - misc_util, - testing_util, -) - -from .misc_util import ( - TreeNode, - MAryTreeNode, - LinkedListNode, - BinomialTreeNode, - AdjacencyListGraphNode, - AdjacencyMatrixGraphNode, - GraphEdge, - Set, - CartesianTreeNode, - RedBlackTreeNode, - TrieNode, - SkipNode, - summation, - greatest_common_divisor, - minimum, - Backend -) -from .testing_util import test - -__all__.extend(misc_util.__all__) -__all__.extend(testing_util.__all__) diff --git a/lib/python3.12/site-packages/pydatastructs/utils/_backend/__init__.py b/lib/python3.12/site-packages/pydatastructs/utils/_backend/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/lib/python3.12/site-packages/pydatastructs/utils/misc_util.py b/lib/python3.12/site-packages/pydatastructs/utils/misc_util.py deleted file mode 100644 index 3672c58b9..000000000 --- a/lib/python3.12/site-packages/pydatastructs/utils/misc_util.py +++ /dev/null @@ -1,632 +0,0 @@ -import math, pydatastructs -from enum import Enum -from pydatastructs.utils._backend.cpp import _nodes, _graph_utils - -__all__ = [ - 'TreeNode', - 'MAryTreeNode', - 'LinkedListNode', - 'BinomialTreeNode', - 'AdjacencyListGraphNode', - 'AdjacencyMatrixGraphNode', - 'GraphEdge', - 'Set', - 'CartesianTreeNode', - 'RedBlackTreeNode', - 'TrieNode', - 'SkipNode', - 'minimum', - 'summation', - 'greatest_common_divisor', - 'Backend' -] - - -class Backend(Enum): - - PYTHON = 'Python' - CPP = 'Cpp' - LLVM = 'Llvm' - - def __str__(self): - return self.value - -def raise_if_backend_is_not_python(api, backend): - if backend != Backend.PYTHON: - raise ValueError("As of {} version, only {} backend is supported for {} API".format( - pydatastructs.__version__, str(Backend.PYTHON), api)) - -_check_type = lambda a, t: isinstance(a, t) -NoneType = type(None) - -class Node(object): - """ - Abstract class representing a node. - """ - pass - -class TreeNode(Node): - """ - Represents node in trees. - - Parameters - ========== - - key - Required for comparison operations. - data - Any valid data to be stored in the node. - left: int - Optional, index of the left child node. - right: int - Optional, index of the right child node. - backend: pydatastructs.Backend - The backend to be used. Available backends: Python and C++ - Optional, by default, the Python backend is used. For faster execution, use the C++ backend. - """ - - __slots__ = ['key', 'data', 'left', 'right', 'is_root', - 'height', 'parent', 'size'] - - @classmethod - def methods(cls): - return ['__new__', '__str__'] - - def __new__(cls, key, data=None, **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.CPP: - return _nodes.TreeNode(key, data, **kwargs) - obj = Node.__new__(cls) - obj.data, obj.key = data, key - obj.left, obj.right, obj.parent, obj.height, obj.size = \ - None, None, None, 0, 1 - obj.is_root = False - return obj - - def __str__(self): - """ - Used for printing. - """ - return str((self.left, self.key, self.data, self.right)) - -class CartesianTreeNode(TreeNode): - """ - Represents node in cartesian trees. - - Parameters - ========== - - key - Required for comparison operations. - data - Any valid data to be stored in the node. - priority: int - An integer value for heap property. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - __slots__ = ['key', 'data', 'priority'] - - def __new__(cls, key, priority, data=None, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = TreeNode.__new__(cls, key, data) - obj.priority = priority - return obj - - def __str__(self): - """ - Used for printing. - """ - return str((self.left, self.key, self.priority, self.data, self.right)) - -class RedBlackTreeNode(TreeNode): - """ - Represents node in red-black trees. - - Parameters - ========== - - key - Required for comparison operations. - data - Any valid data to be stored in the node. - color - 0 for black and 1 for red. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - __slots__ = ['key', 'data', 'color'] - - @classmethod - def methods(cls): - return ['__new__'] - - def __new__(cls, key, data=None, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = TreeNode.__new__(cls, key, data) - obj.color = 1 - return obj - -class BinomialTreeNode(TreeNode): - """ - Represents node in binomial trees. - - Parameters - ========== - - key - Required for comparison operations. - data - Any valid data to be stored in the node. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Note - ==== - - The following are the data members of the class: - - parent: BinomialTreeNode - A reference to the BinomialTreeNode object - which is a prent of this. - children: DynamicOneDimensionalArray - An array of references to BinomialTreeNode objects - which are children this node. - is_root: bool, by default, False - If the current node is a root of the tree then - set it to True otherwise False. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - __slots__ = ['parent', 'key', 'children', 'data', 'is_root'] - - @classmethod - def methods(cls): - return ['__new__', 'add_children', '__str__'] - - def __new__(cls, key, data=None, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - from pydatastructs.linear_data_structures.arrays import DynamicOneDimensionalArray - obj = Node.__new__(cls) - obj.data, obj.key = data, key - obj.children, obj.parent, obj.is_root = ( - DynamicOneDimensionalArray(BinomialTreeNode, 0), - None, - False - ) - return obj - - def add_children(self, *children): - """ - Adds children of current node. - """ - for child in children: - self.children.append(child) - child.parent = self - - def __str__(self): - """ - For printing the key and data. - """ - return str((self.key, self.data)) - -class MAryTreeNode(TreeNode): - """ - Represents node in an M-ary trees. - - Parameters - ========== - - key - Required for comparison operations. - data - Any valid data to be stored in the node. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - - Note - ==== - - The following are the data members of the class: - - children: DynamicOneDimensionalArray - An array of indices which stores the children of - this node in the M-ary tree array - is_root: bool, by default, False - If the current node is a root of the tree then - set it to True otherwise False. - """ - __slots__ = ['key', 'children', 'data', 'is_root'] - - @classmethod - def methods(cls): - return ['__new__', 'add_children', '__str__'] - - def __new__(cls, key, data=None, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - from pydatastructs.linear_data_structures.arrays import DynamicOneDimensionalArray - obj = Node.__new__(cls) - obj.data = data - obj.key = key - obj.is_root = False - obj.children = DynamicOneDimensionalArray(int, 0) - return obj - - def add_children(self, *children): - """ - Adds children of current node. - """ - for child in children: - self.children.append(child) - - def __str__(self): - return str((self.key, self.data)) - - -class LinkedListNode(Node): - """ - Represents node in linked lists. - - Parameters - ========== - - key - Any valid identifier to uniquely - identify the node in the linked list. - data - Any valid data to be stored in the node. - links - List of names of attributes which should - be used as links to other nodes. - addrs - List of address of nodes to be assigned to - each of the attributes in links. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - @classmethod - def methods(cls): - return ['__new__', '__str__'] - - def __new__(cls, key, data=None, links=None, addrs=None, - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - if links is None: - links = ['next'] - if addrs is None: - addrs = [None] - obj = Node.__new__(cls) - obj.key = key - obj.data = data - for link, addr in zip(links, addrs): - obj.__setattr__(link, addr) - obj.__slots__ = ['key', 'data'] + links - return obj - - def __str__(self): - return str((self.key, self.data)) - -class SkipNode(Node): - """ - Represents node in linked lists. - - Parameters - ========== - - key - Any valid identifier to uniquely - identify the node in the skip list. - data - Any valid data to be stored in the node. - next - Reference to the node lying just forward - to the current node. - Optional, by default, None. - down - Reference to the node lying just below the - current node. - Optional, by default, None. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - - __slots__ = ['key', 'data', 'next', 'down'] - - def __new__(cls, key, data=None, next=None, down=None, - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = Node.__new__(cls) - obj.key, obj.data = key, data - obj.next, obj.down = next, down - return obj - - def __str__(self): - return str((self.key, self.data)) - -class GraphNode(Node): - """ - Abastract class for graph nodes/vertices. - """ - def __str__(self): - return str((self.name, self.data)) - -class AdjacencyListGraphNode(GraphNode): - """ - Represents nodes for adjacency list implementation - of graphs. - - Parameters - ========== - - name: str - The name of the node by which it is identified - in the graph. Must be unique. - data - The data to be stored at each graph node. - adjacency_list: list - Any valid iterator to initialize the adjacent - nodes of the current node. - Optional, by default, None - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - @classmethod - def methods(cls): - return ['__new__', 'add_adjacent_node', - 'remove_adjacent_node'] - - def __new__(cls, name, data=None, adjacency_list=[], - **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.PYTHON: - obj = GraphNode.__new__(cls) - obj.name, obj.data = str(name), data - obj._impl = 'adjacency_list' - if len(adjacency_list) > 0: - for node in adjacency_list: - obj.__setattr__(node.name, node) - obj.adjacent = adjacency_list if len(adjacency_list) > 0 \ - else [] - return obj - else: - return _graph_utils.AdjacencyListGraphNode(name, data, adjacency_list) - - def add_adjacent_node(self, name, data=None): - """ - Adds adjacent node to the current node's - adjacency list with given name and data. - """ - if hasattr(self, name): - getattr(self, name).data = data - else: - new_node = AdjacencyListGraphNode(name, data) - self.__setattr__(new_node.name, new_node) - self.adjacent.append(new_node.name) - - def remove_adjacent_node(self, name): - """ - Removes node with given name from - adjacency list. - """ - if not hasattr(self, name): - raise ValueError("%s is not adjacent to %s"%(name, self.name)) - self.adjacent.remove(name) - delattr(self, name) - -class AdjacencyMatrixGraphNode(GraphNode): - """ - Represents nodes for adjacency matrix implementation - of graphs. - - Parameters - ========== - - name: str - The index of the node in the AdjacencyMatrix. - data - The data to be stored at each graph node. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - __slots__ = ['name', 'data'] - - @classmethod - def methods(cls): - return ['__new__'] - - def __new__(cls, name, data=None, - **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.PYTHON: - obj = GraphNode.__new__(cls) - obj.name, obj.data, obj.is_connected = \ - str(name), data, None - obj._impl = 'adjacency_matrix' - return obj - else: - return _graph_utils.AdjacencyMatrixGraphNode(str(name), data) - -class GraphEdge(object): - """ - Represents the concept of edges in graphs. - - Parameters - ========== - - node1: GraphNode or it's child classes - The source node of the edge. - node2: GraphNode or it's child classes - The target node of the edge. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - @classmethod - def methods(cls): - return ['__new__', '__str__'] - - def __new__(cls, node1, node2, value=None, - **kwargs): - backend = kwargs.get('backend', Backend.PYTHON) - if backend == Backend.PYTHON: - obj = object.__new__(cls) - obj.source, obj.target = node1, node2 - obj.value = value - return obj - else: - return _graph_utils.GraphEdge(node1, node2, value) - - def __str__(self): - return str((self.source.name, self.target.name)) - -class Set(object): - """ - Represents a set in a forest of disjoint sets. - - Parameters - ========== - - key: Hashable python object - The key which uniquely identifies - the set. - data: Python object - The data to be stored in the set. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - - __slots__ = ['parent', 'size', 'key', 'data'] - - @classmethod - def methods(cls): - return ['__new__'] - - def __new__(cls, key, data=None, - **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = object.__new__(cls) - obj.key = key - obj.data = data - obj.parent, obj.size = [None]*2 - return obj - -class TrieNode(Node): - """ - Represents nodes in the trie data structure. - - Parameters - ========== - - char: The character stored in the current node. - Optional, by default None. - backend: pydatastructs.Backend - The backend to be used. - Optional, by default, the best available - backend is used. - """ - - __slots__ = ['char', '_children', 'is_terminal'] - - @classmethod - def methods(cls): - return ['__new__', 'add_child', 'get_child', 'remove_child'] - - def __new__(cls, char=None, **kwargs): - raise_if_backend_is_not_python( - cls, kwargs.get('backend', Backend.PYTHON)) - obj = Node.__new__(cls) - obj.char = char - obj._children = {} - obj.is_terminal = False - return obj - - def add_child(self, trie_node) -> None: - self._children[trie_node.char] = trie_node - - def get_child(self, char: str): - return self._children.get(char, None) - - def remove_child(self, char: str) -> None: - self._children.pop(char) - -def _comp(u, v, tcomp): - """ - Overloaded comparator for comparing - two values where any one of them can be - `None`. - """ - if u is None and v is not None: - return False - elif u is not None and v is None: - return True - elif u is None and v is None: - return False - else: - return tcomp(u, v) - -def _check_range_query_inputs(input, bounds): - start, end = input - if start >= end: - raise ValueError("Input (%d, %d) range is empty."%(start, end)) - if start < bounds[0] or end > bounds[1]: - raise IndexError("Input (%d, %d) range is out of " - "bounds of array indices (%d, %d)." - %(start, end, bounds[0], bounds[1])) - -def minimum(x_y): - if len(x_y) == 1: - return x_y[0] - - x, y = x_y - if x is None or y is None: - return x if y is None else y - - return min(x, y) - -def greatest_common_divisor(x_y): - if len(x_y) == 1: - return x_y[0] - - x, y = x_y - if x is None or y is None: - return x if y is None else y - - return math.gcd(x, y) - -def summation(x_y): - if len(x_y) == 1: - return x_y[0] - - x, y = x_y - if x is None or y is None: - return x if y is None else y - - return x + y diff --git a/lib/python3.12/site-packages/pydatastructs/utils/raises_util.py b/lib/python3.12/site-packages/pydatastructs/utils/raises_util.py deleted file mode 100644 index 3a324d38d..000000000 --- a/lib/python3.12/site-packages/pydatastructs/utils/raises_util.py +++ /dev/null @@ -1,17 +0,0 @@ -import pytest - -def raises(exception, code): - """ - Utility for testing exceptions. - - Parameters - ========== - - exception - A valid python exception - code: lambda - Code that causes exception - """ - with pytest.raises(exception): - code() - return True diff --git a/lib/python3.12/site-packages/pydatastructs/utils/testing_util.py b/lib/python3.12/site-packages/pydatastructs/utils/testing_util.py deleted file mode 100644 index e5c0627b5..000000000 --- a/lib/python3.12/site-packages/pydatastructs/utils/testing_util.py +++ /dev/null @@ -1,83 +0,0 @@ -import os -import pathlib -import glob -import types - -__all__ = ['test'] - - -# Root pydatastructs directory -ROOT_DIR = pathlib.Path(os.path.abspath(__file__)).parents[1] - - -SKIP_FILES = ['testing_util.py'] - -def test(submodules=None, only_benchmarks=False, - benchmarks_size=1000, **kwargs): - """ - Runs the library tests using pytest - - Parameters - ========== - - submodules: Optional, list[str] - List of submodules test to run. By default runs - all the tests - """ - try: - import pytest - except ImportError: - raise Exception("pytest must be installed. Use `pip install pytest` " - "to install it.") - - # set benchmarks size - os.environ["PYDATASTRUCTS_BENCHMARK_SIZE"] = str(benchmarks_size) - test_files = [] - if submodules: - if not isinstance(submodules, (list, tuple)): - submodules = [submodules] - for path in glob.glob(f'{ROOT_DIR}/**/test_*.py', recursive=True): - skip_test = False - for skip in SKIP_FILES: - if skip in path: - skip_test = True - break - if skip_test: - continue - for sub_var in submodules: - if isinstance(sub_var, types.ModuleType): - sub = sub_var.__name__.split('.')[-1] - elif isinstance(sub_var, str): - sub = sub_var - else: - raise Exception("Submodule should be of type: str or module") - if sub in path: - if not only_benchmarks: - if 'benchmarks' not in path: - test_files.append(path) - else: - if 'benchmarks' in path: - test_files.append(path) - break - else: - for path in glob.glob(f'{ROOT_DIR}/**/test_*.py', recursive=True): - skip_test = False - for skip in SKIP_FILES: - if skip in path: - skip_test = True - break - if skip_test: - continue - if not only_benchmarks: - if 'benchmarks' not in path: - test_files.append(path) - else: - if 'benchmarks' in path: - test_files.append(path) - - extra_args = [] - if kwargs.get("n", False) is not False: - extra_args.append("-n") - extra_args.append(str(kwargs["n"])) - - pytest.main(extra_args + test_files) diff --git a/lib/python3.12/site-packages/pydatastructs/utils/tests/__init__.py b/lib/python3.12/site-packages/pydatastructs/utils/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/lib/python3.12/site-packages/pydatastructs/utils/tests/test_code_quality.py b/lib/python3.12/site-packages/pydatastructs/utils/tests/test_code_quality.py deleted file mode 100644 index 67afe49e8..000000000 --- a/lib/python3.12/site-packages/pydatastructs/utils/tests/test_code_quality.py +++ /dev/null @@ -1,239 +0,0 @@ -import os, re, sys, pydatastructs, inspect -from typing import Type -import pytest - -def _list_files(checker): - root_path = os.path.abspath( - os.path.join( - os.path.split(__file__)[0], - os.pardir, os.pardir)) - code_files = [] - for (dirpath, _, filenames) in os.walk(root_path): - for _file in filenames: - if checker(_file): - code_files.append(os.path.join(dirpath, _file)) - return code_files - -checker = lambda _file: (re.match(r".*\.py$", _file) or - re.match(r".*\.cpp$", _file) or - re.match(r".*\.hpp$", _file)) -code_files = _list_files(checker) - -def test_trailing_white_spaces(): - messages = [("The following places in your code " - "end with white spaces.")] - msg = "{}:{}" - for file_path in code_files: - file = open(file_path, "r") - line = file.readline() - line_number = 1 - while line != "": - if line.endswith(" \n") or line.endswith("\t\n") \ - or line.endswith(" ") or line.endswith("\t"): - messages.append(msg.format(file_path, line_number)) - line = file.readline() - line_number += 1 - file.close() - - if len(messages) > 1: - assert False, '\n'.join(messages) - -def test_final_new_lines(): - messages = [("The following files in your code " - "do not end with a single new line.")] - msg1 = "No new line in {}:{}" - msg2 = "More than one new line in {}:{}" - for file_path in code_files: - file = open(file_path, "r") - lines = [] - line = file.readline() - while line != "": - lines.append(line) - line = file.readline() - if lines: - if lines[-1][-1] != "\n": - messages.append(msg1.format(file_path, len(lines))) - if lines[-1] == "\n" and lines[-2][-1] == "\n": - messages.append(msg2.format(file_path, len(lines))) - file.close() - - if len(messages) > 1: - assert False, '\n'.join(messages) - -def test_comparison_True_False_None(): - messages = [("The following places in your code " - "use `!=` or `==` for comparing True/False/None." - "Please use `is` instead.")] - msg = "{}:{}" - checker = lambda _file: re.match(r".*\.py$", _file) - py_files = _list_files(checker) - for file_path in py_files: - if file_path.find("test_code_quality.py") == -1: - file = open(file_path, "r") - line = file.readline() - line_number = 1 - while line != "": - if ((line.find("== True") != -1) or - (line.find("== False") != -1) or - (line.find("== None") != -1) or - (line.find("!= True") != -1) or - (line.find("!= False") != -1) or - (line.find("!= None") != -1)): - messages.append(msg.format(file_path, line_number)) - line = file.readline() - line_number += 1 - file.close() - - if len(messages) > 1: - assert False, '\n'.join(messages) - -@pytest.mark.xfail -def test_reinterpret_cast(): - - def is_variable(str): - for ch in str: - if not (ch == '_' or ch.isalnum()): - return False - return True - - checker = lambda _file: (re.match(r".*\.cpp$", _file) or - re.match(r".*\.hpp$", _file)) - cpp_files = _list_files(checker) - messages = [("The following lines should use reinterpret_cast" - " to cast pointers from one type to another")] - msg = "Casting to {} at {}:{}" - for file_path in cpp_files: - file = open(file_path, "r") - line = file.readline() - line_number = 1 - while line != "": - found_open = False - between_open_close = "" - for char in line: - if char == '(': - found_open = True - elif char == ')': - if (between_open_close and - between_open_close[-1] == '*' and - is_variable(between_open_close[:-1])): - messages.append(msg.format(between_open_close[:-1], - file_path, line_number)) - between_open_close = "" - found_open = False - elif char != ' ' and found_open: - between_open_close += char - line = file.readline() - line_number += 1 - file.close() - - if len(messages) > 1: - assert False, '\n'.join(messages) - -def test_presence_of_tabs(): - messages = [("The following places in your code " - "use tabs instead of spaces.")] - msg = "{}:{}" - for file_path in code_files: - file = open(file_path, "r") - line_number = 1 - line = file.readline() - while line != "": - if (line.find('\t') != -1): - messages.append(msg.format(file_path, line_number)) - line = file.readline() - line_number += 1 - file.close() - - if len(messages) > 1: - assert False, '\n'.join(messages) - -def _apis(): - import pydatastructs as pyds - return [ - pyds.graphs.adjacency_list.AdjacencyList, - pyds.graphs.adjacency_matrix.AdjacencyMatrix, - pyds.DoublyLinkedList, pyds.SinglyLinkedList, - pyds.SinglyCircularLinkedList, - pyds.DoublyCircularLinkedList, - pyds.OneDimensionalArray, pyds.MultiDimensionalArray, - pyds.DynamicOneDimensionalArray, - pyds.trees.BinaryTree, pyds.BinarySearchTree, - pyds.AVLTree, pyds.SplayTree, pyds.BinaryTreeTraversal, - pyds.DHeap, pyds.BinaryHeap, pyds.TernaryHeap, pyds.BinomialHeap, - pyds.MAryTree, pyds.OneDimensionalSegmentTree, - pyds.Queue, pyds.miscellaneous_data_structures.queue.ArrayQueue, - pyds.miscellaneous_data_structures.queue.LinkedListQueue, - pyds.PriorityQueue, - pyds.miscellaneous_data_structures.queue.LinkedListPriorityQueue, - pyds.miscellaneous_data_structures.queue.BinaryHeapPriorityQueue, - pyds.miscellaneous_data_structures.queue.BinomialHeapPriorityQueue, - pyds.Stack, pyds.miscellaneous_data_structures.stack.ArrayStack, - pyds.miscellaneous_data_structures.stack.LinkedListStack, - pyds.DisjointSetForest, pyds.BinomialTree, pyds.TreeNode, pyds.MAryTreeNode, - pyds.LinkedListNode, pyds.BinomialTreeNode, pyds.AdjacencyListGraphNode, - pyds.AdjacencyMatrixGraphNode, pyds.GraphEdge, pyds.Set, pyds.BinaryIndexedTree, - pyds.CartesianTree, pyds.CartesianTreeNode, pyds.Treap, pyds.RedBlackTreeNode, pyds.RedBlackTree, - pyds.Trie, pyds.TrieNode, pyds.SkipList, pyds.RangeQueryStatic, pyds.RangeQueryDynamic, pyds.SparseTable, - pyds.miscellaneous_data_structures.segment_tree.OneDimensionalArraySegmentTree, - pyds.bubble_sort, pyds.linear_search, pyds.binary_search, pyds.jump_search, - pyds.selection_sort, pyds.insertion_sort, pyds.quick_sort, pyds.intro_sort] - -def test_public_api(): - pyds = pydatastructs - apis = _apis() - print("\n\nAPI Report") - print("==========") - for name in apis: - if inspect.isclass(name): - _class = name - mro = _class.__mro__ - must_methods = _class.methods() - print("\n" + str(name)) - print("Methods Implemented") - print(must_methods) - print("Parent Classes") - print(mro[1:]) - for supercls in mro: - if supercls != _class: - for method in must_methods: - if hasattr(supercls, method) and \ - getattr(supercls, method) == \ - getattr(_class, method): - assert False, ("%s class doesn't " - "have %s method implemented."%( - _class, method - )) - -def test_backend_argument_message(): - - import pydatastructs as pyds - backend_implemented = [ - pyds.OneDimensionalArray, - pyds.DynamicOneDimensionalArray, - pyds.quick_sort, - pyds.AdjacencyListGraphNode, - pyds.AdjacencyMatrixGraphNode, - pyds.GraphEdge - ] - - def call_and_raise(api, pos_args_count=0): - try: - if pos_args_count == 0: - api(backend=None) - elif pos_args_count == 1: - api(None, backend=None) - elif pos_args_count == 2: - api(None, None, backend=None) - except ValueError as value_error: - assert str(api) in value_error.args[0] - except TypeError as type_error: - max_pos_args_count = 2 - if pos_args_count <= max_pos_args_count: - call_and_raise(api, pos_args_count + 1) - else: - raise type_error - - apis = _apis() - for api in apis: - if api not in backend_implemented: - call_and_raise(api, 0) diff --git a/lib/python3.12/site-packages/pydatastructs/utils/tests/test_misc_util.py b/lib/python3.12/site-packages/pydatastructs/utils/tests/test_misc_util.py deleted file mode 100644 index 13ba2ec8e..000000000 --- a/lib/python3.12/site-packages/pydatastructs/utils/tests/test_misc_util.py +++ /dev/null @@ -1,84 +0,0 @@ -from pydatastructs.utils import (TreeNode, AdjacencyListGraphNode, AdjacencyMatrixGraphNode, - GraphEdge, BinomialTreeNode, MAryTreeNode, CartesianTreeNode, RedBlackTreeNode, SkipNode) -from pydatastructs.utils.raises_util import raises -from pydatastructs.utils.misc_util import Backend - -def test_cpp_TreeNode(): - n = TreeNode(1,100,backend=Backend.CPP) - assert str(n) == "(None, 1, 100, None)" - -def test_AdjacencyListGraphNode(): - g_1 = AdjacencyListGraphNode('g_1', 1) - g_2 = AdjacencyListGraphNode('g_2', 2) - g = AdjacencyListGraphNode('g', 0, adjacency_list=[g_1, g_2]) - g.add_adjacent_node('g_3', 3) - assert g.g_1.name == 'g_1' - assert g.g_2.name == 'g_2' - assert g.g_3.name == 'g_3' - g.remove_adjacent_node('g_3') - assert hasattr(g, 'g_3') is False - assert raises(ValueError, lambda: g.remove_adjacent_node('g_3')) - g.add_adjacent_node('g_1', 4) - assert g.g_1.data == 4 - assert str(g) == "('g', 0)" - - h_1 = AdjacencyListGraphNode('h_1', 1, backend = Backend.CPP) - h_2 = AdjacencyListGraphNode('h_2', 2, backend = Backend.CPP) - assert str(h_1) == "('h_1', 1)" - h = AdjacencyListGraphNode('h', 0, adjacency_list = [h_1, h_2], backend = Backend.CPP) - h.add_adjacent_node('h_3', 3) - assert h.adjacent['h_1'].name == 'h_1' - assert h.adjacent['h_2'].name == 'h_2' - assert h.adjacent['h_3'].name == 'h_3' - h.remove_adjacent_node('h_3') - assert 'h_3' not in h.adjacent - assert raises(ValueError, lambda: h.remove_adjacent_node('h_3')) - h.add_adjacent_node('h_1', 4) - assert h.adjacent['h_1'] == 4 - assert str(h) == "('h', 0)" - h_5 = AdjacencyListGraphNode('h_5', h_1, backend = Backend.CPP) - assert h_5.data == h_1 - -def test_AdjacencyMatrixGraphNode(): - g = AdjacencyMatrixGraphNode("1", 3) - g2 = AdjacencyMatrixGraphNode("1", 3, backend = Backend.CPP) - assert str(g) == "('1', 3)" - assert str(g2) == "('1', 3)" - g3 = AdjacencyListGraphNode("3", g2, backend = Backend.CPP) - assert g3.data == g2 - - -def test_GraphEdge(): - g_1 = AdjacencyListGraphNode('g_1', 1) - g_2 = AdjacencyListGraphNode('g_2', 2) - e = GraphEdge(g_1, g_2, value=2) - assert str(e) == "('g_1', 'g_2')" - - h_1 = AdjacencyListGraphNode('h_1', 1, backend = Backend.CPP) - h_2 = AdjacencyListGraphNode('h_2', 2, backend = Backend.CPP) - e2 = GraphEdge(h_1, h_2, value = 2, backend = Backend.CPP) - assert str(e2) == "('h_1', 'h_2', 2)" - -def test_BinomialTreeNode(): - b = BinomialTreeNode(1,1) - b.add_children(*[BinomialTreeNode(i,i) for i in range(2,10)]) - assert str(b) == '(1, 1)' - assert str(b.children) == "['(2, 2)', '(3, 3)', '(4, 4)', '(5, 5)', '(6, 6)', '(7, 7)', '(8, 8)', '(9, 9)']" - -def test_MAryTreeNode(): - m = MAryTreeNode(1, 1) - m.add_children(*list(range(2, 10))) - assert str(m) == "(1, 1)" - assert str(m.children) == "['2', '3', '4', '5', '6', '7', '8', '9']" - -def test_CartesianTreeNode(): - c = CartesianTreeNode(1, 1, 1) - assert str(c) == "(None, 1, 1, 1, None)" - -def test_RedBlackTreeNode(): - c = RedBlackTreeNode(1, 1) - assert str(c) == "(None, 1, 1, None)" - -def test_SkipNode(): - c = SkipNode(1) - assert str(c) == '(1, None)' diff --git a/pydatastructs/graphs/_backend/cpp/graph.cpp b/pydatastructs/graphs/_backend/cpp/graph.cpp index d41ca60f4..67b1b4572 100644 --- a/pydatastructs/graphs/_backend/cpp/graph.cpp +++ b/pydatastructs/graphs/_backend/cpp/graph.cpp @@ -4,45 +4,64 @@ #include "AdjacencyMatrix.hpp" #include "AdjacencyListGraphNode.hpp" #include "AdjacencyMatrixGraphNode.hpp" +#include "GraphEdge.hpp" +#include "GraphNode.hpp" #include "graph_bindings.hpp" +#include "Algorithms.hpp" -#ifdef __cplusplus -extern "C" { -#endif - -PyMODINIT_FUNC PyInit__graph(void); - -#ifdef __cplusplus -} -#endif +static PyMethodDef GraphMethods[] = { + {"bfs_adjacency_list", (PyCFunction)breadth_first_search_adjacency_list, METH_VARARGS | METH_KEYWORDS, "Run BFS on adjacency list with callback"}, + {"bfs_adjacency_matrix", (PyCFunction)breadth_first_search_adjacency_matrix, METH_VARARGS | METH_KEYWORDS, "Run BFS on adjacency matrix with callback"}, + {"minimum_spanning_tree_prim_adjacency_list", (PyCFunction)minimum_spanning_tree_prim_adjacency_list, METH_VARARGS | METH_KEYWORDS, "Run Prim's algorithm on adjacency list"}, + {"shortest_paths_dijkstra_adjacency_list", (PyCFunction)shortest_paths_dijkstra_adjacency_list, METH_VARARGS | METH_KEYWORDS, "Dijkstra's algorithm for adjacency list graphs"}, + {NULL, NULL, 0, NULL} +}; static struct PyModuleDef graph_module = { PyModuleDef_HEAD_INIT, "_graph", "C++ module for graphs", -1, - NULL, + GraphMethods, }; PyMODINIT_FUNC PyInit__graph(void) { PyObject* m; - if (PyType_Ready(&AdjacencyListGraphType) < 0) + if (PyType_Ready(&GraphNodeType) < 0) return NULL; if (PyType_Ready(&AdjacencyListGraphNodeType) < 0) return NULL; - if (PyType_Ready(&AdjacencyMatrixGraphType) < 0) + if (PyType_Ready(&AdjacencyMatrixGraphNodeType) < 0) return NULL; - if (PyType_Ready(&AdjacencyMatrixGraphNodeType) < 0) + if (PyType_Ready(&GraphEdgeType) < 0) + return NULL; + + if (PyType_Ready(&AdjacencyListGraphType) < 0) + return NULL; + + if (PyType_Ready(&AdjacencyMatrixGraphType) < 0) return NULL; m = PyModule_Create(&graph_module); if (m == NULL) return NULL; + Py_INCREF(&GraphNodeType); + PyModule_AddObject(m, "GraphNode", (PyObject*)&GraphNodeType); + + Py_INCREF(&AdjacencyListGraphNodeType); + PyModule_AddObject(m, "AdjacencyListGraphNode", (PyObject*)&AdjacencyListGraphNodeType); + + Py_INCREF(&AdjacencyMatrixGraphNodeType); + PyModule_AddObject(m, "AdjacencyMatrixGraphNode", (PyObject*)&AdjacencyMatrixGraphNodeType); + + Py_INCREF(&GraphEdgeType); + PyModule_AddObject(m, "GraphEdge", (PyObject*)&GraphEdgeType); + Py_INCREF(&AdjacencyListGraphType); if (PyModule_AddObject(m, "AdjacencyListGraph", (PyObject*)&AdjacencyListGraphType) < 0) { Py_DECREF(&AdjacencyListGraphType); @@ -50,13 +69,6 @@ PyMODINIT_FUNC PyInit__graph(void) { return NULL; } - Py_INCREF(&AdjacencyListGraphNodeType); - if (PyModule_AddObject(m, "AdjacencyListGraphNode", (PyObject*)&AdjacencyListGraphNodeType) < 0) { - Py_DECREF(&AdjacencyListGraphNodeType); - Py_DECREF(m); - return NULL; - } - Py_INCREF(&AdjacencyMatrixGraphType); if (PyModule_AddObject(m, "AdjacencyMatrixGraph", (PyObject*)&AdjacencyMatrixGraphType) < 0) { Py_DECREF(&AdjacencyMatrixGraphType); diff --git a/pydatastructs/graphs/algorithms.py b/pydatastructs/graphs/algorithms.py index 9324b7278..6c2182fed 100644 --- a/pydatastructs/graphs/algorithms.py +++ b/pydatastructs/graphs/algorithms.py @@ -92,7 +92,7 @@ def breadth_first_search( return getattr(algorithms, func)( graph, source_node, operation, *args, **kwargs) else: - from pydatastructs.graphs._backend.cpp._algorithms import bfs_adjacency_list, bfs_adjacency_matrix + from pydatastructs.graphs._backend.cpp._graph import bfs_adjacency_list, bfs_adjacency_matrix if (graph._impl == "adjacency_list"): extra_args = args if args else () return bfs_adjacency_list(graph, source_node, operation, extra_args) @@ -349,7 +349,7 @@ def minimum_spanning_tree(graph, algorithm, **kwargs): %(algorithm, graph._impl)) return getattr(algorithms, func)(graph) else: - from pydatastructs.graphs._backend.cpp._algorithms import minimum_spanning_tree_prim_adjacency_list + from pydatastructs.graphs._backend.cpp._graph import minimum_spanning_tree_prim_adjacency_list if graph._impl == "adjacency_list" and algorithm == 'prim': return minimum_spanning_tree_prim_adjacency_list(graph) @@ -814,7 +814,7 @@ def shortest_paths(graph: Graph, algorithm: str, "finding shortest paths in graphs."%(algorithm)) return getattr(algorithms, func)(graph, source, target) else: - from pydatastructs.graphs._backend.cpp._algorithms import shortest_paths_dijkstra_adjacency_list + from pydatastructs.graphs._backend.cpp._graph import shortest_paths_dijkstra_adjacency_list if graph._impl == "adjacency_list" and algorithm == 'dijkstra': return shortest_paths_dijkstra_adjacency_list(graph, source, target) diff --git a/pydatastructs/graphs/meson.build b/pydatastructs/graphs/meson.build index 1b7e452b7..2cda248cc 100644 --- a/pydatastructs/graphs/meson.build +++ b/pydatastructs/graphs/meson.build @@ -20,18 +20,15 @@ py_include = include_directories('../utils/_backend/cpp') python.extension_module( '_graph', - '_backend/cpp/graph.cpp', + [ + '_backend/cpp/graph.cpp', + '_backend/cpp/algorithms.cpp', + '../utils/_backend/cpp/graph_utils.cpp', + ], include_directories: py_include, install: true, subdir: 'pydatastructs/graphs/_backend/cpp' ) -python.extension_module( - '_algorithms', - '_backend/cpp/algorithms.cpp', - include_directories: py_include, - install: true, - subdir: 'pydatastructs/graphs/_backend/cpp' -) subdir('tests') \ No newline at end of file diff --git a/pydatastructs/graphs/tests/test_algorithms.py b/pydatastructs/graphs/tests/test_algorithms.py index 04ebcccda..7dd2e1b78 100644 --- a/pydatastructs/graphs/tests/test_algorithms.py +++ b/pydatastructs/graphs/tests/test_algorithms.py @@ -6,7 +6,6 @@ from pydatastructs.utils.raises_util import raises from pydatastructs.utils.misc_util import AdjacencyListGraphNode, AdjacencyMatrixGraphNode from pydatastructs.graphs._backend.cpp import _graph -from pydatastructs.graphs._backend.cpp import _algorithms from pydatastructs.utils.misc_util import Backend def test_breadth_first_search(): diff --git a/pydatastructs/utils/_backend/cpp/AdjacencyListGraphNode.hpp b/pydatastructs/utils/_backend/cpp/AdjacencyListGraphNode.hpp index 4a41b0c3b..8eba3752d 100644 --- a/pydatastructs/utils/_backend/cpp/AdjacencyListGraphNode.hpp +++ b/pydatastructs/utils/_backend/cpp/AdjacencyListGraphNode.hpp @@ -79,11 +79,6 @@ static PyObject* AdjacencyListGraphNode_new(PyTypeObject* type, PyObject* args, for (Py_ssize_t i = 0; i < size; i++) { PyObject* node = PyList_GetItem(adjacency_list, i); - if (PyType_Ready(&AdjacencyListGraphNodeType) < 0) { - PyErr_SetString(PyExc_RuntimeError, "Failed to initialize AdjacencyListGraphNodeType"); - return NULL; - } - if (!PyObject_IsInstance(node, (PyObject*)&AdjacencyListGraphNodeType)) { PyErr_SetString(PyExc_TypeError, "Adjacency list must contain only AdjacencyListGraphNode instances"); return NULL; @@ -254,45 +249,5 @@ static PyMethodDef AdjacencyListGraphNode_methods[] = { {NULL} }; -inline PyTypeObject AdjacencyListGraphNodeType = { - /* tp_name */ PyVarObject_HEAD_INIT(NULL, 0) "AdjacencyListGraphNode", - /* tp_basicsize */ sizeof(AdjacencyListGraphNode), - /* tp_itemsize */ 0, - /* tp_dealloc */ (destructor)AdjacencyListGraphNode_dealloc, - /* tp_print */ 0, - /* tp_getattr */ 0, - /* tp_setattr */ 0, - /* tp_reserved */ 0, - /* tp_repr */ 0, - /* tp_as_number */ 0, - /* tp_as_sequence */ 0, - /* tp_as_mapping */ 0, - /* tp_hash */ 0, - /* tp_call */ 0, - /* tp_str */ (reprfunc)GraphNode_str, - /* tp_getattro */ 0, - /* tp_setattro */ 0, - /* tp_as_buffer */ 0, - /* tp_flags */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, - /* tp_doc */ "Node Data Structure for an Adjacency List Graph", - /* tp_traverse */ 0, - /* tp_clear */ 0, - /* tp_richcompare */ 0, - /* tp_weaklistoffset */ 0, - /* tp_iter */ 0, - /* tp_iternext */ 0, - /* tp_methods */ AdjacencyListGraphNode_methods, - /* tp_members */ AdjacencyListGraphNode_PyMemberDef, - /* tp_getset */ AdjacencyListGraphNode_getsetters, - /* tp_base */ &GraphNodeType, - /* tp_dict */ 0, - /* tp_descr_get */ 0, - /* tp_descr_set */ 0, - /* tp_dictoffset */ 0, - /* tp_init */ 0, - /* tp_alloc */ 0, - /* tp_new */ AdjacencyListGraphNode_new, -}; - #endif diff --git a/pydatastructs/utils/_backend/cpp/AdjacencyMatrixGraphNode.hpp b/pydatastructs/utils/_backend/cpp/AdjacencyMatrixGraphNode.hpp index f5aaf5c9f..f8c0d2148 100644 --- a/pydatastructs/utils/_backend/cpp/AdjacencyMatrixGraphNode.hpp +++ b/pydatastructs/utils/_backend/cpp/AdjacencyMatrixGraphNode.hpp @@ -28,44 +28,4 @@ static PyObject* AdjacencyMatrixGraphNode_new(PyTypeObject* type, PyObject* args return reinterpret_cast(self); } -inline PyTypeObject AdjacencyMatrixGraphNodeType = { - /* tp_name */ PyVarObject_HEAD_INIT(NULL, 0) "AdjacencyMatrixGraphNode", - /* tp_basicsize */ sizeof(AdjacencyMatrixGraphNode), - /* tp_itemsize */ 0, - /* tp_dealloc */ (destructor)AdjacencyMatrixGraphNode_dealloc, - /* tp_print */ 0, - /* tp_getattr */ 0, - /* tp_setattr */ 0, - /* tp_reserved */ 0, - /* tp_repr */ 0, - /* tp_as_number */ 0, - /* tp_as_sequence */ 0, - /* tp_as_mapping */ 0, - /* tp_hash */ 0, - /* tp_call */ 0, - /* tp_str */ 0, - /* tp_getattro */ 0, - /* tp_setattro */ 0, - /* tp_as_buffer */ 0, - /* tp_flags */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, - /* tp_doc */ "Node Data Structure for an Adjacency Matrix Graph", - /* tp_traverse */ 0, - /* tp_clear */ 0, - /* tp_richcompare */ 0, - /* tp_weaklistoffset */ 0, - /* tp_iter */ 0, - /* tp_iternext */ 0, - /* tp_methods */ 0, - /* tp_members */ 0, - /* tp_getset */ 0, - /* tp_base */ &GraphNodeType, - /* tp_dict */ 0, - /* tp_descr_get */ 0, - /* tp_descr_set */ 0, - /* tp_dictoffset */ 0, - /* tp_init */ 0, - /* tp_alloc */ 0, - /* tp_new */ AdjacencyMatrixGraphNode_new, -}; - #endif diff --git a/pydatastructs/utils/_backend/cpp/GraphEdge.hpp b/pydatastructs/utils/_backend/cpp/GraphEdge.hpp index 4008db17c..a1b5fa864 100644 --- a/pydatastructs/utils/_backend/cpp/GraphEdge.hpp +++ b/pydatastructs/utils/_backend/cpp/GraphEdge.hpp @@ -131,44 +131,4 @@ static PyGetSetDef GraphEdge_getsetters[] = { {NULL} }; -inline PyTypeObject GraphEdgeType = { - /* tp_name */ PyVarObject_HEAD_INIT(NULL, 0) "GraphEdge", - /* tp_basicsize */ sizeof(GraphEdge), - /* tp_itemsize */ 0, - /* tp_dealloc */ (destructor)GraphEdge_dealloc, - /* tp_print */ 0, - /* tp_getattr */ 0, - /* tp_setattr */ 0, - /* tp_reserved */ 0, - /* tp_repr */ 0, - /* tp_as_number */ 0, - /* tp_as_sequence */ 0, - /* tp_as_mapping */ 0, - /* tp_hash */ 0, - /* tp_call */ 0, - /* tp_str */ (reprfunc)GraphEdge_str, - /* tp_getattro */ 0, - /* tp_setattro */ 0, - /* tp_as_buffer */ 0, - /* tp_flags */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, - /* tp_doc */ "Data Structure for a Graph Edge", - /* tp_traverse */ 0, - /* tp_clear */ 0, - /* tp_richcompare */ 0, - /* tp_weaklistoffset */ 0, - /* tp_iter */ 0, - /* tp_iternext */ 0, - /* tp_methods */ 0, - /* tp_members */ 0, - /* tp_getset */ GraphEdge_getsetters, - /* tp_base */ 0, - /* tp_dict */ 0, - /* tp_descr_get */ 0, - /* tp_descr_set */ 0, - /* tp_dictoffset */ 0, - /* tp_init */ 0, - /* tp_alloc */ 0, - /* tp_new */ GraphEdge_new, -}; - #endif diff --git a/pydatastructs/utils/_backend/cpp/GraphNode.hpp b/pydatastructs/utils/_backend/cpp/GraphNode.hpp index 5ef706d52..17a0748fd 100644 --- a/pydatastructs/utils/_backend/cpp/GraphNode.hpp +++ b/pydatastructs/utils/_backend/cpp/GraphNode.hpp @@ -7,6 +7,8 @@ #include #include "Node.hpp" +extern PyTypeObject GraphNodeType; + enum class DataType { None, Int, @@ -204,44 +206,5 @@ static struct PyMemberDef GraphNode_PyMemberDef[] = { }; -static PyTypeObject GraphNodeType = { - /* tp_name */ PyVarObject_HEAD_INIT(NULL, 0) "GraphNode", - /* tp_basicsize */ sizeof(GraphNode), - /* tp_itemsize */ 0, - /* tp_dealloc */ (destructor) GraphNode_dealloc, - /* tp_print */ 0, - /* tp_getattr */ 0, - /* tp_setattr */ 0, - /* tp_reserved */ 0, - /* tp_repr */ 0, - /* tp_as_number */ 0, - /* tp_as_sequence */ 0, - /* tp_as_mapping */ 0, - /* tp_hash */ 0, - /* tp_call */ 0, - /* tp_str */ (reprfunc) GraphNode_str, - /* tp_getattro */ 0, - /* tp_setattro */ 0, - /* tp_as_buffer */ 0, - /* tp_flags */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, - /* tp_doc */ 0, - /* tp_traverse */ 0, - /* tp_clear */ 0, - /* tp_richcompare */ 0, - /* tp_weaklistoffset */ 0, - /* tp_iter */ 0, - /* tp_iternext */ 0, - /* tp_methods */ 0, - /* tp_members */ GraphNode_PyMemberDef, - /* tp_getset */ GraphNode_getsetters, - /* tp_base */ &PyBaseObject_Type, - /* tp_dict */ 0, - /* tp_descr_get */ 0, - /* tp_descr_set */ 0, - /* tp_dictoffset */ 0, - /* tp_init */ 0, - /* tp_alloc */ 0, - /* tp_new */ GraphNode_new, -}; #endif diff --git a/pydatastructs/utils/_backend/cpp/graph_utils.cpp b/pydatastructs/utils/_backend/cpp/graph_utils.cpp index d47390fc1..cf7ec1585 100644 --- a/pydatastructs/utils/_backend/cpp/graph_utils.cpp +++ b/pydatastructs/utils/_backend/cpp/graph_utils.cpp @@ -5,6 +5,167 @@ #include "GraphEdge.hpp" #include "graph_bindings.hpp" +PyTypeObject GraphNodeType = { + /* tp_name */ PyVarObject_HEAD_INIT(NULL, 0) "GraphNode", + /* tp_basicsize */ sizeof(GraphNode), + /* tp_itemsize */ 0, + /* tp_dealloc */ (destructor) GraphNode_dealloc, + /* tp_print */ 0, + /* tp_getattr */ 0, + /* tp_setattr */ 0, + /* tp_reserved */ 0, + /* tp_repr */ 0, + /* tp_as_number */ 0, + /* tp_as_sequence */ 0, + /* tp_as_mapping */ 0, + /* tp_hash */ 0, + /* tp_call */ 0, + /* tp_str */ (reprfunc) GraphNode_str, + /* tp_getattro */ 0, + /* tp_setattro */ 0, + /* tp_as_buffer */ 0, + /* tp_flags */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + /* tp_doc */ 0, + /* tp_traverse */ 0, + /* tp_clear */ 0, + /* tp_richcompare */ 0, + /* tp_weaklistoffset */ 0, + /* tp_iter */ 0, + /* tp_iternext */ 0, + /* tp_methods */ 0, + /* tp_members */ GraphNode_PyMemberDef, + /* tp_getset */ GraphNode_getsetters, + /* tp_base */ &PyBaseObject_Type, + /* tp_dict */ 0, + /* tp_descr_get */ 0, + /* tp_descr_set */ 0, + /* tp_dictoffset */ 0, + /* tp_init */ 0, + /* tp_alloc */ 0, + /* tp_new */ GraphNode_new, +}; + +PyTypeObject AdjacencyListGraphNodeType = { + /* tp_name */ PyVarObject_HEAD_INIT(NULL, 0) "AdjacencyListGraphNode", + /* tp_basicsize */ sizeof(AdjacencyListGraphNode), + /* tp_itemsize */ 0, + /* tp_dealloc */ (destructor)AdjacencyListGraphNode_dealloc, + /* tp_print */ 0, + /* tp_getattr */ 0, + /* tp_setattr */ 0, + /* tp_reserved */ 0, + /* tp_repr */ 0, + /* tp_as_number */ 0, + /* tp_as_sequence */ 0, + /* tp_as_mapping */ 0, + /* tp_hash */ 0, + /* tp_call */ 0, + /* tp_str */ (reprfunc)GraphNode_str, + /* tp_getattro */ 0, + /* tp_setattro */ 0, + /* tp_as_buffer */ 0, + /* tp_flags */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + /* tp_doc */ "Node Data Structure for an Adjacency List Graph", + /* tp_traverse */ 0, + /* tp_clear */ 0, + /* tp_richcompare */ 0, + /* tp_weaklistoffset */ 0, + /* tp_iter */ 0, + /* tp_iternext */ 0, + /* tp_methods */ AdjacencyListGraphNode_methods, + /* tp_members */ AdjacencyListGraphNode_PyMemberDef, + /* tp_getset */ AdjacencyListGraphNode_getsetters, + /* tp_base */ &GraphNodeType, + /* tp_dict */ 0, + /* tp_descr_get */ 0, + /* tp_descr_set */ 0, + /* tp_dictoffset */ 0, + /* tp_init */ 0, + /* tp_alloc */ 0, + /* tp_new */ AdjacencyListGraphNode_new, +}; + +PyTypeObject AdjacencyMatrixGraphNodeType = { + /* tp_name */ PyVarObject_HEAD_INIT(NULL, 0) "AdjacencyMatrixGraphNode", + /* tp_basicsize */ sizeof(AdjacencyMatrixGraphNode), + /* tp_itemsize */ 0, + /* tp_dealloc */ (destructor)AdjacencyMatrixGraphNode_dealloc, + /* tp_print */ 0, + /* tp_getattr */ 0, + /* tp_setattr */ 0, + /* tp_reserved */ 0, + /* tp_repr */ 0, + /* tp_as_number */ 0, + /* tp_as_sequence */ 0, + /* tp_as_mapping */ 0, + /* tp_hash */ 0, + /* tp_call */ 0, + /* tp_str */ 0, + /* tp_getattro */ 0, + /* tp_setattro */ 0, + /* tp_as_buffer */ 0, + /* tp_flags */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + /* tp_doc */ "Node Data Structure for an Adjacency Matrix Graph", + /* tp_traverse */ 0, + /* tp_clear */ 0, + /* tp_richcompare */ 0, + /* tp_weaklistoffset */ 0, + /* tp_iter */ 0, + /* tp_iternext */ 0, + /* tp_methods */ 0, + /* tp_members */ 0, + /* tp_getset */ 0, + /* tp_base */ &GraphNodeType, + /* tp_dict */ 0, + /* tp_descr_get */ 0, + /* tp_descr_set */ 0, + /* tp_dictoffset */ 0, + /* tp_init */ 0, + /* tp_alloc */ 0, + /* tp_new */ AdjacencyMatrixGraphNode_new, +}; + +PyTypeObject GraphEdgeType = { + /* tp_name */ PyVarObject_HEAD_INIT(NULL, 0) "GraphEdge", + /* tp_basicsize */ sizeof(GraphEdge), + /* tp_itemsize */ 0, + /* tp_dealloc */ (destructor)GraphEdge_dealloc, + /* tp_print */ 0, + /* tp_getattr */ 0, + /* tp_setattr */ 0, + /* tp_reserved */ 0, + /* tp_repr */ 0, + /* tp_as_number */ 0, + /* tp_as_sequence */ 0, + /* tp_as_mapping */ 0, + /* tp_hash */ 0, + /* tp_call */ 0, + /* tp_str */ (reprfunc)GraphEdge_str, + /* tp_getattro */ 0, + /* tp_setattro */ 0, + /* tp_as_buffer */ 0, + /* tp_flags */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + /* tp_doc */ "Data Structure for a Graph Edge", + /* tp_traverse */ 0, + /* tp_clear */ 0, + /* tp_richcompare */ 0, + /* tp_weaklistoffset */ 0, + /* tp_iter */ 0, + /* tp_iternext */ 0, + /* tp_methods */ 0, + /* tp_members */ 0, + /* tp_getset */ GraphEdge_getsetters, + /* tp_base */ 0, + /* tp_dict */ 0, + /* tp_descr_get */ 0, + /* tp_descr_set */ 0, + /* tp_dictoffset */ 0, + /* tp_init */ 0, + /* tp_alloc */ 0, + /* tp_new */ GraphEdge_new, +}; + + static struct PyModuleDef graph_utils_struct = { PyModuleDef_HEAD_INIT, "_graph_utils", diff --git a/pydatastructs/utils/meson.build b/pydatastructs/utils/meson.build index 3288115b1..71ac945f2 100644 --- a/pydatastructs/utils/meson.build +++ b/pydatastructs/utils/meson.build @@ -17,14 +17,10 @@ python.install_sources( python.extension_module( '_nodes', - '_backend/cpp/nodes.cpp', - install: true, - subdir: 'pydatastructs/utils/_backend/cpp' -) - -python.extension_module( - '_graph_utils', - '_backend/cpp/graph_utils.cpp', + [ + '_backend/cpp/nodes.cpp', + '_backend/cpp/graph_utils.cpp', # ADD THIS LINE + ], install: true, subdir: 'pydatastructs/utils/_backend/cpp' ) diff --git a/pydatastructs/utils/misc_util.py b/pydatastructs/utils/misc_util.py index 3672c58b9..529fc58fa 100644 --- a/pydatastructs/utils/misc_util.py +++ b/pydatastructs/utils/misc_util.py @@ -1,6 +1,7 @@ import math, pydatastructs from enum import Enum -from pydatastructs.utils._backend.cpp import _nodes, _graph_utils +from pydatastructs.utils._backend.cpp import _nodes + __all__ = [ 'TreeNode', @@ -411,7 +412,8 @@ def __new__(cls, name, data=None, adjacency_list=[], else [] return obj else: - return _graph_utils.AdjacencyListGraphNode(name, data, adjacency_list) + from pydatastructs.graphs._backend.cpp import _graph + return _graph.AdjacencyListGraphNode(name, data, adjacency_list) def add_adjacent_node(self, name, data=None): """ @@ -468,7 +470,8 @@ def __new__(cls, name, data=None, obj._impl = 'adjacency_matrix' return obj else: - return _graph_utils.AdjacencyMatrixGraphNode(str(name), data) + from pydatastructs.graphs._backend.cpp import _graph + return _graph.AdjacencyMatrixGraphNode(str(name), data) class GraphEdge(object): """ @@ -499,7 +502,8 @@ def __new__(cls, node1, node2, value=None, obj.value = value return obj else: - return _graph_utils.GraphEdge(node1, node2, value) + from pydatastructs.graphs._backend.cpp import _graph + return _graph.GraphEdge(node1, node2, value) def __str__(self): return str((self.source.name, self.target.name)) From cc1f9c7590efb04ac2beae0664c1b57b4583f20c Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Sat, 4 Oct 2025 21:08:57 +0530 Subject: [PATCH 30/47] bug fix --- .github/workflows/ci.yml | 2 +- .../_backend/cpp/algorithms/llvm_algorithms.py | 5 +---- pydatastructs/linear_data_structures/meson.build | 4 +++- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c1b995530..22c78c4aa 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -154,7 +154,7 @@ jobs: spin build -v - name: Run tests run: | - spin test -v + python -m pytest --import-mode=importlib pydatastructs - name: Build Documentation run: | diff --git a/pydatastructs/linear_data_structures/_backend/cpp/algorithms/llvm_algorithms.py b/pydatastructs/linear_data_structures/_backend/cpp/algorithms/llvm_algorithms.py index 397fec152..24b14609d 100644 --- a/pydatastructs/linear_data_structures/_backend/cpp/algorithms/llvm_algorithms.py +++ b/pydatastructs/linear_data_structures/_backend/cpp/algorithms/llvm_algorithms.py @@ -30,10 +30,7 @@ def _ensure_target_machine(): binding.initialize_all_asmprinters() target = binding.Target.from_default_triple() - _target_machine = target.create_target_machine( - opt=3, - features="+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+avx,+avx2" - ) + _target_machine = target.create_target_machine(opt=3) except Exception as e: raise RuntimeError(f"Failed to initialize LLVM target machine: {e}") diff --git a/pydatastructs/linear_data_structures/meson.build b/pydatastructs/linear_data_structures/meson.build index fca4004cc..59f9acc2e 100644 --- a/pydatastructs/linear_data_structures/meson.build +++ b/pydatastructs/linear_data_structures/meson.build @@ -26,7 +26,9 @@ python.extension_module( '_algorithms', '_backend/cpp/algorithms/algorithms.cpp', install: true, - subdir: 'pydatastructs/linear_data_structures/_backend/cpp' + subdir: 'pydatastructs/linear_data_structures/_backend/cpp', + cpp_args: ['-std=c++17'], + override_options: ['cpp_std=c++17'], ) subdir('tests') From e60a23493071fb2c30ad72afbd67b6239a39c8d0 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Sat, 4 Oct 2025 21:11:04 +0530 Subject: [PATCH 31/47] bug fix --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 22c78c4aa..c1b995530 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -154,7 +154,7 @@ jobs: spin build -v - name: Run tests run: | - python -m pytest --import-mode=importlib pydatastructs + spin test -v - name: Build Documentation run: | From 3c185c5496fd3fef05bb7f8ded7d0c773f221033 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Sat, 4 Oct 2025 21:39:42 +0530 Subject: [PATCH 32/47] bug fix --- pydatastructs/utils/meson.build | 1 - 1 file changed, 1 deletion(-) diff --git a/pydatastructs/utils/meson.build b/pydatastructs/utils/meson.build index 71ac945f2..b3c9314ed 100644 --- a/pydatastructs/utils/meson.build +++ b/pydatastructs/utils/meson.build @@ -19,7 +19,6 @@ python.extension_module( '_nodes', [ '_backend/cpp/nodes.cpp', - '_backend/cpp/graph_utils.cpp', # ADD THIS LINE ], install: true, subdir: 'pydatastructs/utils/_backend/cpp' From 1353b412fb2c793ab95dbdebe0ffd98124458d47 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Sat, 4 Oct 2025 23:56:42 +0530 Subject: [PATCH 33/47] bug fix --- .../_backend/cpp/algorithms/quadratic_time_sort.hpp | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pydatastructs/linear_data_structures/_backend/cpp/algorithms/quadratic_time_sort.hpp b/pydatastructs/linear_data_structures/_backend/cpp/algorithms/quadratic_time_sort.hpp index 0e6b32d07..210382455 100644 --- a/pydatastructs/linear_data_structures/_backend/cpp/algorithms/quadratic_time_sort.hpp +++ b/pydatastructs/linear_data_structures/_backend/cpp/algorithms/quadratic_time_sort.hpp @@ -244,9 +244,15 @@ static PyObject* bubble_sort_llvm(PyObject* self, PyObject* args, PyObject* kwds PyErr_Clear(); } Py_DECREF(sys_path); + if (!mod) { + return NULL; + } PyObject* fn = PyObject_GetAttrString(mod, "get_bubble_sort_ptr"); Py_DECREF(mod); + if (!fn) { + return NULL; + } PyObject* arg = PyUnicode_FromString(dtype_str); if (!arg) { @@ -257,6 +263,9 @@ static PyObject* bubble_sort_llvm(PyObject* self, PyObject* args, PyObject* kwds PyObject* addr_obj = PyObject_CallFunctionObjArgs(fn, arg, NULL); Py_DECREF(fn); Py_DECREF(arg); + if (!addr_obj) { + return NULL; + } return addr_obj; }; From 0146fef454e078d5e2f48ee089215db5f251a664 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Sun, 5 Oct 2025 12:46:55 +0530 Subject: [PATCH 34/47] bug fix --- pydatastructs/linear_data_structures/meson.build | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pydatastructs/linear_data_structures/meson.build b/pydatastructs/linear_data_structures/meson.build index 59f9acc2e..a797ded66 100644 --- a/pydatastructs/linear_data_structures/meson.build +++ b/pydatastructs/linear_data_structures/meson.build @@ -15,6 +15,12 @@ python.install_sources( subdir: 'pydatastructs/linear_data_structures/_backend' ) +python.install_sources( + ['_backend/cpp/algorithms/llvm_algorithms.py'], + subdir: 'pydatastructs/linear_data_structures/_backend/cpp/algorithms' +) + + python.extension_module( '_arrays', '_backend/cpp/arrays/arrays.cpp', From a583e0c243876597ba1ba9790c13298cb0c01db6 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Sun, 5 Oct 2025 12:47:21 +0530 Subject: [PATCH 35/47] bug fix --- pydatastructs/linear_data_structures/meson.build | 1 - 1 file changed, 1 deletion(-) diff --git a/pydatastructs/linear_data_structures/meson.build b/pydatastructs/linear_data_structures/meson.build index a797ded66..439dd7455 100644 --- a/pydatastructs/linear_data_structures/meson.build +++ b/pydatastructs/linear_data_structures/meson.build @@ -20,7 +20,6 @@ python.install_sources( subdir: 'pydatastructs/linear_data_structures/_backend/cpp/algorithms' ) - python.extension_module( '_arrays', '_backend/cpp/arrays/arrays.cpp', From 01c71f09df488e117fb61fca1e7dc399803704a3 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Fri, 10 Oct 2025 09:23:18 +0530 Subject: [PATCH 36/47] upgraded python versions --- .github/workflows/ci.yml | 32 +++++++++++++++++--------------- pyproject.toml | 2 +- setup.py | 2 +- 3 files changed, 19 insertions(+), 17 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c1b995530..fe6dd5149 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,23 +7,17 @@ on: branches: [main] jobs: - test-ubuntu-py38: - runs-on: ${{matrix.os}} + test-ubuntu-py39-coverage: + runs-on: ubuntu-latest timeout-minutes: 20 - strategy: - fail-fast: false - matrix: - os: [ubuntu-latest] - python-version: - - "3.8" steps: - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} + - name: Set up Python 3.9 uses: actions/setup-python@v4 with: - python-version: ${{ matrix.python-version }} + python-version: "3.9" - name: Upgrade pip version run: | @@ -33,6 +27,7 @@ jobs: run: | python -m pip install -r requirements.txt python -m pip install -r docs/requirements.txt + python -m pip install spin - name: Install lcov run: | @@ -45,7 +40,7 @@ jobs: CFLAGS: "--coverage" run: | spin build -v - # coverage tests + - name: Run tests run: | spin test -v @@ -75,7 +70,7 @@ jobs: run: | sphinx-build -b html docs/source/ docs/build/html - test-ubuntu-py39-py310: + test-ubuntu-py39-py310-py311: runs-on: ${{matrix.os}} timeout-minutes: 20 strategy: @@ -85,6 +80,7 @@ jobs: python-version: - "3.9" - "3.10" + - "3.11" steps: - uses: actions/checkout@v3 @@ -102,6 +98,7 @@ jobs: run: | python -m pip install -r requirements.txt python -m pip install -r docs/requirements.txt + python -m pip install spin - name: Build package env: @@ -125,9 +122,9 @@ jobs: matrix: os: [macos-latest] python-version: - - "3.8" - "3.9" - "3.10" + - "3.11" steps: - uses: actions/checkout@v3 @@ -145,6 +142,7 @@ jobs: run: | python -m pip install -r requirements.txt python -m pip install -r docs/requirements.txt + python -m pip install spin - name: Build package env: @@ -152,6 +150,7 @@ jobs: CXXFLAGS: "-std=c++17" run: | spin build -v + - name: Run tests run: | spin test -v @@ -168,7 +167,9 @@ jobs: matrix: os: [windows-latest] python-version: - - "3.8" + - "3.9" + - "3.10" + - "3.11" steps: - uses: actions/checkout@v3 @@ -184,7 +185,7 @@ jobs: update-conda: true python-version: ${{ matrix.python-version }} conda-channels: anaconda, conda-forge - # - run: conda --version # This fails due to unknown reasons + - run: which python - name: Upgrade pip version @@ -195,6 +196,7 @@ jobs: run: | python -m pip install -r requirements.txt python -m pip install -r docs/requirements.txt + python -m pip install spin - name: Build package env: diff --git a/pyproject.toml b/pyproject.toml index c2ee41443..e8adc6578 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ name = "pydatastructs" version = "1.0.1.dev0" description = "Data structures and algorithms implemented using Python and C++" readme = "README.md" -requires-python = ">=3.8" +requires-python = ">=3.9" [tool.spin] package = "pydatastructs" diff --git a/setup.py b/setup.py index bbe3faef0..615e8d89d 100644 --- a/setup.py +++ b/setup.py @@ -34,6 +34,6 @@ "Topic :: Scientific/Engineering :: Information Analysis", "Topic :: Software Development :: Libraries" ], - python_requires='>=3.5', + python_requires='>=3.9', ext_modules=extensions ) From 33ad75d3033f3ec4ba40bb754f35722148f8115e Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Fri, 10 Oct 2025 09:39:25 +0530 Subject: [PATCH 37/47] bug fix --- .github/workflows/ci.yml | 35 +++++++++-------------------------- 1 file changed, 9 insertions(+), 26 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fe6dd5149..92b51b89d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -160,37 +160,23 @@ jobs: sphinx-build -b html docs/source/ docs/build/html test-windows: - runs-on: ${{matrix.os}} - timeout-minutes: 20 + runs-on: windows-latest strategy: fail-fast: false matrix: - os: [windows-latest] - python-version: - - "3.9" - - "3.10" - - "3.11" + python-version: ["3.9", "3.10", "3.11"] steps: - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - - name: Setup conda - uses: s-weigand/setup-conda@v1 + - uses: actions/setup-python@v4 with: - update-conda: true python-version: ${{ matrix.python-version }} - conda-channels: anaconda, conda-forge - - run: which python + - uses: ilammy/msvc-dev-cmd@v1 - - name: Upgrade pip version - run: | - python -m pip install --upgrade pip + - name: Upgrade pip + run: python -m pip install --upgrade pip - name: Install requirements run: | @@ -201,13 +187,10 @@ jobs: - name: Build package env: CL: "/std:c++17" - run: | - spin build -v + run: spin build -v - name: Run tests - run: | - spin test -v + run: spin test -v - name: Build Documentation - run: | - sphinx-build -b html docs/source/ docs/build/html + run: sphinx-build -b html docs/source/ docs/build/html From b901ed22f282c4748fd5f69dc6db6e302fe98cbb Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Fri, 10 Oct 2025 09:44:24 +0530 Subject: [PATCH 38/47] bug fix --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 92b51b89d..0c6bb3610 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -186,7 +186,7 @@ jobs: - name: Build package env: - CL: "/std:c++17" + CL: "/std:c++17 /Zc:strictStrings-" run: spin build -v - name: Run tests From 421702a359db92875858dddcb8a77220c2378555 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Fri, 10 Oct 2025 09:48:02 +0530 Subject: [PATCH 39/47] bug fix --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0c6bb3610..3a9b066d5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -186,7 +186,7 @@ jobs: - name: Build package env: - CL: "/std:c++17 /Zc:strictStrings-" + CXXFLAGS: "/std:c++17 /Zc:strictStrings-" run: spin build -v - name: Run tests From ea3ffda8f87e532f4d3be33af2811c77d7860971 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Fri, 10 Oct 2025 09:51:40 +0530 Subject: [PATCH 40/47] bug fix --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3a9b066d5..1289c9a63 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -187,6 +187,7 @@ jobs: - name: Build package env: CXXFLAGS: "/std:c++17 /Zc:strictStrings-" + CL: "/std:c++17 /Zc:strictStrings- /MD" run: spin build -v - name: Run tests From 4bc3aec2af5790816612fd787a479b8e96f62337 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Fri, 10 Oct 2025 09:55:40 +0530 Subject: [PATCH 41/47] bug fix --- .github/workflows/ci.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1289c9a63..7209d7d5a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -186,8 +186,9 @@ jobs: - name: Build package env: - CXXFLAGS: "/std:c++17 /Zc:strictStrings-" - CL: "/std:c++17 /Zc:strictStrings- /MD" + CFLAGS: "/MD" + CXXFLAGS: "/std:c++17 /MD /Zc:strictStrings-" + CL: "/std:c++17 /MD /Zc:strictStrings-" run: spin build -v - name: Run tests From 6727abad28caa86f9c98cdf07de4901c7786eebe Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Fri, 10 Oct 2025 10:19:31 +0530 Subject: [PATCH 42/47] bug fix --- meson.build | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/meson.build b/meson.build index 46b3a7b1d..00c1b9e98 100644 --- a/meson.build +++ b/meson.build @@ -2,6 +2,10 @@ project('pydatastructs', 'cpp', version : '1.0.1-dev', default_options : ['cpp_std=c++17']) +if host_machine.system() == 'windows' + add_project_arguments('/MD', language: 'cpp') +endif + python = import('python').find_installation(pure: false) subdir('pydatastructs') From 5a9240c5f95ebe68b770e4ddf8080df91d9169ce Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Sat, 11 Oct 2025 13:41:57 +0530 Subject: [PATCH 43/47] bug fix --- meson.build | 3 --- pydatastructs/utils/meson.build | 6 ++++++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/meson.build b/meson.build index 00c1b9e98..86aa0c2cb 100644 --- a/meson.build +++ b/meson.build @@ -2,9 +2,6 @@ project('pydatastructs', 'cpp', version : '1.0.1-dev', default_options : ['cpp_std=c++17']) -if host_machine.system() == 'windows' - add_project_arguments('/MD', language: 'cpp') -endif python = import('python').find_installation(pure: false) diff --git a/pydatastructs/utils/meson.build b/pydatastructs/utils/meson.build index b3c9314ed..816afa4ec 100644 --- a/pydatastructs/utils/meson.build +++ b/pydatastructs/utils/meson.build @@ -15,11 +15,17 @@ python.install_sources( subdir: 'pydatastructs/utils/_backend' ) +cpp_args = [] +if host_machine.system() == 'windows' + cpp_args += ['/MD'] +endif + python.extension_module( '_nodes', [ '_backend/cpp/nodes.cpp', ], + cpp_args: cpp_args, install: true, subdir: 'pydatastructs/utils/_backend/cpp' ) From 30c6493bb076ae3436a4c1970fac1039e0a30f0d Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Sat, 11 Oct 2025 13:48:15 +0530 Subject: [PATCH 44/47] bug fix --- pydatastructs/utils/_backend/cpp/nodes.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/pydatastructs/utils/_backend/cpp/nodes.cpp b/pydatastructs/utils/_backend/cpp/nodes.cpp index 4e50d6966..94951285c 100644 --- a/pydatastructs/utils/_backend/cpp/nodes.cpp +++ b/pydatastructs/utils/_backend/cpp/nodes.cpp @@ -11,7 +11,6 @@ static struct PyModuleDef nodes_struct = { }; PyMODINIT_FUNC PyInit__nodes(void) { - Py_Initialize(); PyObject *nodes = PyModule_Create(&nodes_struct); if (PyType_Ready(&NodeType) < 0) { From 798b1ba11c538a74e2c39ea07bbe7f89147b00a5 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Sat, 11 Oct 2025 13:56:51 +0530 Subject: [PATCH 45/47] bug fix --- pydatastructs/utils/_backend/cpp/utils.hpp | 35 +++++++++++++++++++--- 1 file changed, 31 insertions(+), 4 deletions(-) diff --git a/pydatastructs/utils/_backend/cpp/utils.hpp b/pydatastructs/utils/_backend/cpp/utils.hpp index 36d323b9a..5e41f3ad9 100644 --- a/pydatastructs/utils/_backend/cpp/utils.hpp +++ b/pydatastructs/utils/_backend/cpp/utils.hpp @@ -6,10 +6,37 @@ #include #include -static PyObject *PyZero = PyLong_FromLong(0); -static PyObject *PyOne = PyLong_FromLong(1); -static PyObject *PyTwo = PyLong_FromLong(2); -static PyObject *PyThree = PyLong_FromLong(3); +static PyObject* get_PyZero() { + static PyObject* PyZero = nullptr; + if (PyZero == nullptr) { + PyZero = PyLong_FromLong(0); + } + return PyZero; +} + +static PyObject* get_PyOne() { + static PyObject* PyOne = nullptr; + if (PyOne == nullptr) { + PyOne = PyLong_FromLong(1); + } + return PyOne; +} + +static PyObject* get_PyTwo() { + static PyObject* PyTwo = nullptr; + if (PyTwo == nullptr) { + PyTwo = PyLong_FromLong(2); + } + return PyTwo; +} + +static PyObject* get_PyThree() { + static PyObject* PyThree = nullptr; + if (PyThree == nullptr) { + PyThree = PyLong_FromLong(3); + } + return PyThree; +} static const char* _encoding = "utf-8"; static const char* _invalid_char = ""; From a2804e0ffec7fbd45520cb9a9e1eadd90eb1725f Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Sat, 11 Oct 2025 14:02:13 +0530 Subject: [PATCH 46/47] bug fix --- pydatastructs/utils/_backend/cpp/utils.hpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pydatastructs/utils/_backend/cpp/utils.hpp b/pydatastructs/utils/_backend/cpp/utils.hpp index 5e41f3ad9..511c224c6 100644 --- a/pydatastructs/utils/_backend/cpp/utils.hpp +++ b/pydatastructs/utils/_backend/cpp/utils.hpp @@ -37,6 +37,10 @@ static PyObject* get_PyThree() { } return PyThree; } +#define PyZero get_PyZero() +#define PyOne get_PyOne() +#define PyTwo get_PyTwo() +#define PyThree get_PyThree() static const char* _encoding = "utf-8"; static const char* _invalid_char = ""; From c2a711108df51c7f12313b04dc0dac7483c19c77 Mon Sep 17 00:00:00 2001 From: Prerak Singh Date: Sat, 11 Oct 2025 14:10:14 +0530 Subject: [PATCH 47/47] bug fix --- pydatastructs/graphs/tests/test_adjacency_list.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pydatastructs/graphs/tests/test_adjacency_list.py b/pydatastructs/graphs/tests/test_adjacency_list.py index 3a9cdb14f..2544082b6 100644 --- a/pydatastructs/graphs/tests/test_adjacency_list.py +++ b/pydatastructs/graphs/tests/test_adjacency_list.py @@ -62,7 +62,7 @@ def test_adjacency_list(): assert g2.num_edges() == 3 assert g2.num_vertices() == 3 neighbors = g2.neighbors('v_4') - assert neighbors == [v_6, v_5] + assert set(neighbors) == {v_6, v_5} v = AdjacencyListGraphNode('v', 4, backend = Backend.CPP) g2.add_vertex(v) g2.add_edge('v_4', 'v', 0)