diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 633444494..eb4c0c035 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -3,7 +3,7 @@
Some tips for you:
1. If this is your first time to submit PR, please read the
- [contributing guidelines](https://github.com/apache/incubator-hugegraph-doc/blob/master/contribution.md)
+ [contributing guidelines](https://github.com/apache/hugegraph-doc/blob/master/contribution.md)
2. If a PR fix/close an issue, type the message "close xxx" below (Remember to update both EN & CN doc)
@@ -15,4 +15,3 @@
- close #xxx
-
diff --git a/.github/workflows/hugo.yml b/.github/workflows/hugo.yml
index 01fbd69c8..17a1e1858 100644
--- a/.github/workflows/hugo.yml
+++ b/.github/workflows/hugo.yml
@@ -40,6 +40,9 @@ jobs:
restore-keys: |
${{ runner.os }}-hugomod-
+ - name: Check Links
+ run: bash dist/validate-links.sh
+
- name: Build Site (minify)
run: npm i && hugo --minify
diff --git a/.github/workflows/validate-release.yml b/.github/workflows/validate-release.yml
index 414602208..f9a9b9221 100644
--- a/.github/workflows/validate-release.yml
+++ b/.github/workflows/validate-release.yml
@@ -25,7 +25,7 @@ jobs:
runs-on: ${{ matrix.os }}
env:
SCRIPT_PATH: hugegraph-dist/scripts/
- URL_PREFIX: https://dist.apache.org/repos/dist/dev/incubator/hugegraph/
+ URL_PREFIX: https://dist.apache.org/repos/dist/dev/hugegraph/
USER: ${{ inputs.gpg_user }}
# TODO: parse version from the running branch name & also adapt the input version
RELEASE_VERSION: ''
@@ -70,6 +70,10 @@ jobs:
if [[ ${{ matrix.os }} =~ "ubuntu" ]]; then
sudo apt-get install -y subversion
fi
+ if ! svn ls "${URL_PREFIX}/${{ inputs.release_version }}/" >/dev/null 2>&1; then
+ echo "Release path not found: ${URL_PREFIX}/${{ inputs.release_version }}/" && exit 1
+ fi
+ echo "Using SVN prefix: ${URL_PREFIX}"
rm -rf dist/${{ inputs.release_version }}
svn co ${URL_PREFIX}/${{ inputs.release_version }} dist/${{ inputs.release_version }}
@@ -80,7 +84,7 @@ jobs:
shasum --version 1>/dev/null || exit
gpg --version 1>/dev/null || exit
- wget https://downloads.apache.org/incubator/hugegraph/KEYS || exit
+ wget https://downloads.apache.org/hugegraph/KEYS || exit
echo "Import KEYS:" && gpg --import KEYS
# TODO: how to trust all public keys in gpg list, currently only trust the first one
echo -e "5\ny\n" | gpg --batch --command-fd 0 --edit-key $USER trust
@@ -110,25 +114,25 @@ jobs:
for i in *src.tar.gz; do
echo "$i"
- # 4.1 check the directory name include "incubating"
- if [[ ! "$i" =~ "incubating" ]]; then
- echo "The package name $i should include incubating" && exit 1
+ # 4.1 package naming should follow post-graduation TLP naming
+ if [[ ! "$i" =~ ^apache-hugegraph ]]; then
+ echo "The package name $i should start with apache-hugegraph" && exit 1
+ fi
+ if [[ "$i" =~ "incubating" ]]; then
+ echo "The package name $i should not contain incubating in post-graduation releases" && exit 1
fi
tar xzvf "$i" || exit
pushd "$(basename "$i" .tar.gz)" || exit
echo "Start to check the package content: $(basename "$i" .tar.gz)"
- # 4.2 check the directory include "NOTICE" and "LICENSE" and "DISCLAIMER" file
+ # 4.2 check the directory include "NOTICE" and "LICENSE" file
if [[ ! -f "LICENSE" ]]; then
echo "The package should include LICENSE file" && exit 1
fi
if [[ ! -f "NOTICE" ]]; then
echo "The package should include NOTICE file" && exit 1
fi
- if [[ ! -f "DISCLAIMER" ]]; then
- echo "The package should include DISCLAIMER file" && exit 1
- fi
# 4.3: ensure doesn't contains ASF CATEGORY X License dependencies in LICENSE and NOTICE files
COUNT=$(grep -E "$CATEGORY_X" LICENSE NOTICE | wc -l)
@@ -191,7 +195,7 @@ jobs:
cd dist/${{ inputs.release_version }} || exit
ls -lh
- pushd ./*hugegraph-incubating*src/hugegraph-server/*hugegraph*${{ inputs.release_version }} || exit
+ pushd ./*hugegraph*${{ inputs.release_version }}*src/hugegraph-server/*hugegraph-server*${{ inputs.release_version }}* || exit
bin/init-store.sh || exit
sleep 3
bin/start-hugegraph.sh || exit
@@ -233,7 +237,7 @@ jobs:
popd || exit
popd || exit
# stop server
- pushd ./*hugegraph-incubating*src/hugegraph-server/*hugegraph*${{ inputs.release_version }} || exit
+ pushd ./*hugegraph*${{ inputs.release_version }}*src/hugegraph-server/*hugegraph-server*${{ inputs.release_version }}* || exit
bin/stop-hugegraph.sh || exit
popd || exit
@@ -253,9 +257,12 @@ jobs:
echo "$i"
- # 7.1 check the directory name include "incubating"
- if [[ ! "$i" =~ "incubating" ]]; then
- echo "The package name $i should include incubating" && exit 1
+ # 7.1 package naming should follow post-graduation TLP naming
+ if [[ ! "$i" =~ ^apache-hugegraph ]]; then
+ echo "The package name $i should start with apache-hugegraph" && exit 1
+ fi
+ if [[ "$i" =~ "incubating" ]]; then
+ echo "The package name $i should not contain incubating in post-graduation releases" && exit 1
fi
tar xzvf "$i" || exit
@@ -263,16 +270,13 @@ jobs:
ls -lh
echo "Start to check the package content: $(basename "$i" .tar.gz)"
- # 7.2 check root dir include "NOTICE"/"LICENSE"/"DISCLAIMER" & "licenses" dir
+ # 7.2 check root dir include "NOTICE"/"LICENSE" & "licenses" dir
if [[ ! -f "LICENSE" ]]; then
echo "The package should include LICENSE file" && exit 1
fi
if [[ ! -f "NOTICE" ]]; then
echo "The package should include NOTICE file" && exit 1
fi
- if [[ ! -f "DISCLAIMER" ]]; then
- echo "The package should include DISCLAIMER file" && exit 1
- fi
if [[ ! -d "licenses" ]]; then
echo "The package should include licenses dir" && exit 1
fi
@@ -302,7 +306,7 @@ jobs:
cd dist/${{ inputs.release_version }} || exit
# TODO: run pd & store
- pushd ./*hugegraph-incubating*${{ inputs.release_version }}/*hugegraph-server-incubating*${{ inputs.release_version }} || exit
+ pushd ./*hugegraph*${{ inputs.release_version }}*/*hugegraph-server*${{ inputs.release_version }}* || exit
bin/init-store.sh || exit
sleep 3
bin/start-hugegraph.sh || exit
@@ -341,7 +345,7 @@ jobs:
popd || exit
# stop server
- pushd ./*hugegraph-incubating*${{ inputs.release_version }}/*hugegraph-server-incubating*${{ inputs.release_version }} || exit
+ pushd ./*hugegraph*${{ inputs.release_version }}*/*hugegraph-server*${{ inputs.release_version }}* || exit
bin/stop-hugegraph.sh || exit
popd || exit
diff --git a/AGENTS.md b/AGENTS.md
index 9108afe52..c4a054515 100644
--- a/AGENTS.md
+++ b/AGENTS.md
@@ -1,171 +1,110 @@
-# AI Development Agent Instructions
+# AGENTS.md
This file provides guidance to AI coding assistants (Claude Code, Cursor, GitHub Copilot, etc.) when working with code in this repository.
## Project Overview
-This is the **Apache HugeGraph documentation website** repository (`hugegraph-doc`), built with Hugo static site generator using the Docsy theme. The site provides comprehensive documentation for the HugeGraph graph database system, including quickstart guides, API references, configuration guides, and contribution guidelines.
+Apache HugeGraph documentation website built with Hugo static site generator and the Docsy theme. The site is bilingual (Chinese/English) and covers the complete HugeGraph graph database ecosystem.
-The documentation is multilingual, supporting both **Chinese (cn)** and **English (en)** content.
-
-## Development Setup
-
-### Prerequisites
-
-1. **Hugo Extended** (v0.95.0 recommended, v0.102.3 used in CI)
- - Must be the "extended" version (includes SASS/SCSS support)
- - Download from: https://github.com/gohugoio/hugo/releases
- - Install location: `/usr/bin` or `/usr/local/bin`
-
-2. **Node.js and npm** (v16+ as specified in CI)
-
-### Quick Start
+## Development Commands
```bash
-# Install npm dependencies (autoprefixer, postcss, postcss-cli)
+# Install dependencies
npm install
-# Start local development server (with auto-reload)
+# Start development server (auto-reload enabled)
hugo server
-# Custom server with different ip/port
-hugo server -b http://127.0.0.1 -p 80 --bind=0.0.0.0
-
# Build production site (output to ./public)
hugo --minify
-```
-
-## Project Structure
-
-### Key Directories
-
-- **`content/`** - All documentation content in Markdown
- - `content/cn/` - Chinese (simplified) documentation
- - `content/en/` - English documentation
- - Each language has parallel structure: `docs/`, `blog/`, `community/`, `about/`
-
-- **`themes/docsy/`** - The Docsy Hugo theme (submodule or vendored)
-
-- **`static/`** - Static assets (images, files) served directly
-
-- **`assets/`** - Assets processed by Hugo pipelines (SCSS, images for processing)
-
-- **`layouts/`** - Custom Hugo template overrides for the Docsy theme
-
-- **`public/`** - Generated site output (gitignored, created by `hugo` build)
-
-- **`dist/`** - Additional distribution files
-
-### Important Files
-
-- **`config.toml`** - Main site configuration
- - Defines language settings (cn as default, en available)
- - Menu structure and navigation
- - Theme parameters and UI settings
- - Currently shows version `0.13`
-
-- **`package.json`** - Node.js dependencies for CSS processing (postcss, autoprefixer)
-- **`.editorconfig`** - Code style rules (UTF-8, LF line endings, spaces for indentation)
+# Clean build
+rm -rf public/
-- **`contribution.md`** - Contributing guide (Chinese/English mixed)
+# Production build with garbage collection
+HUGO_ENV="production" hugo --gc
-- **`maturity.md`** - Project maturity assessment documentation
+# Custom server configuration
+hugo server -b http://127.0.0.1 -p 80 --bind=0.0.0.0
+```
-## Content Organization
+## Prerequisites
-Documentation is organized into major sections:
+- **Hugo Extended** v0.95.0 recommended (v0.102.3 in CI) - must be the "extended" version for SASS/SCSS support
+- **Node.js** v16+ and npm
+- Download Hugo from: https://github.com/gohugoio/hugo/releases
-- **`quickstart/`** - Getting started guides for HugeGraph components (Server, Loader, Hubble, Tools, Computer, AI)
-- **`config/`** - Configuration documentation
-- **`clients/`** - Client API documentation (Gremlin Console, RESTful API)
-- **`guides/`** - User guides and tutorials
-- **`performance/`** - Performance benchmarks and optimization
-- **`language/`** - Query language documentation
-- **`contribution-guidelines/`** - How to contribute to HugeGraph
-- **`changelog/`** - Release notes and version history
-- **`download/`** - Download links and instructions
+## Architecture
-## Common Tasks
+```
+content/
+├── cn/ # Chinese documentation (default language)
+│ ├── docs/ # Main documentation
+│ ├── blog/ # Blog posts
+│ ├── community/
+│ └── about/
+└── en/ # English documentation (parallel structure)
+
+themes/docsy/ # Docsy theme (submodule)
+layouts/ # Custom template overrides
+assets/ # Processed assets (SCSS, images)
+static/ # Static files served directly
+config.toml # Main site configuration
+```
-### Building and Testing
+### Content Structure
-```bash
-# Build for production (with minification)
-hugo --minify
+Documentation sections in `content/{cn,en}/docs/`:
+- `quickstart/` - Getting started guides for HugeGraph components
+- `config/` - Configuration documentation
+- `clients/` - Client API documentation (Gremlin, RESTful)
+- `guides/` - User guides and tutorials
+- `performance/` - Benchmarks and optimization
+- `language/` - Query language docs
+- `contribution-guidelines/` - Contributing guides
+- `changelog/` - Release notes
+- `download/` - Download instructions
-# Clean previous build
-rm -rf public/
+## Key Configuration Files
-# Build with specific environment
-HUGO_ENV="production" hugo --gc
-```
+- `config.toml` - Site-wide settings, language config, menu structure, version (currently 0.13)
+- `package.json` - Node dependencies for CSS processing (postcss, autoprefixer, mermaid)
+- `.editorconfig` - UTF-8, LF line endings, spaces for indentation
-### Working with Content
+## Working with Content
When editing documentation:
-
1. Maintain parallel structure between `content/cn/` and `content/en/`
-2. Use Markdown format for all documentation files
-3. Include front matter in each file (title, weight, description)
-4. For translated content, ensure both Chinese and English versions are updated
-
-### Theme Customization
-
-- Global site config: `config.toml` (root directory)
-- Theme-specific config: `themes/docsy/config.toml`
-- Custom layouts: Place in `layouts/` to override theme defaults
-- Custom styles: Modify files in `assets/` directory
-
-Refer to [Docsy documentation](https://www.docsy.dev/docs/) for theme customization details.
+2. Use Markdown with Hugo front matter (title, weight, description)
+3. For bilingual changes, update both Chinese and English versions
+4. Include mermaid diagrams where appropriate (mermaid.js is available)
## Deployment
-The site uses GitHub Actions for CI/CD (`.github/workflows/hugo.yml`):
-
-1. **Triggers**: On push to `master` branch or pull requests
-2. **Build process**:
- - Checkout with submodules (for themes)
- - Setup Node v16 and Hugo v0.102.3 extended
- - Run `npm i && hugo --minify`
-3. **Deployment**: Publishes to `asf-site` branch (GitHub Pages)
-
-The deployed site is hosted as part of Apache HugeGraph's documentation infrastructure.
-
-## HugeGraph Architecture Context
-
-This documentation covers the complete HugeGraph ecosystem:
-
-- **HugeGraph-Server** - Core graph database engine with REST API
-- **HugeGraph-Store** - Distributed storage engine with integrated computation
-- **HugeGraph-PD** - Placement Driver for metadata management
-- **HugeGraph-Toolchain**:
- - Client (Java RESTful API client)
- - Loader (data import tool)
- - Hubble (web visualization platform)
- - Tools (deployment and management utilities)
-- **HugeGraph-Computer** - Distributed graph processing system (OLAP)
-- **HugeGraph-AI** - Graph neural networks and LLM/RAG components
+- **CI/CD**: GitHub Actions (`.github/workflows/hugo.yml`)
+- **Trigger**: Push to `master` branch or pull requests
+- **Build**: `npm i && hugo --minify` with Node v16 and Hugo v0.102.3 extended
+- **Deploy**: Publishes to `asf-site` branch (GitHub Pages)
+- **PR Requirements**: Include screenshots showing before/after changes
-## Git Workflow
+## HugeGraph Ecosystem Context
-- **Main branch**: `master` (protected, triggers deployment)
-- **PR requirements**: Include screenshots showing before/after changes in documentation
-- **Commit messages**: Follow Apache commit conventions
-- Always create a new branch from `master` for changes
-- Deployment to `asf-site` branch is automated via GitHub Actions
+This documentation covers:
+- **HugeGraph-Server** - Core graph database with REST API
+- **HugeGraph-Store** - Distributed storage engine
+- **HugeGraph-PD** - Placement Driver for metadata
+- **Toolchain** - Client, Loader, Hubble (web UI), Tools
+- **HugeGraph-Computer** - Distributed OLAP graph processing
+- **HugeGraph-AI** - GNN, LLM/RAG components
## Troubleshooting
-**Error: "TOCSS: failed to transform scss/main.scss"**
-- Cause: Using standard Hugo instead of Hugo Extended
-- Solution: Install Hugo Extended version
+**"TOCSS: failed to transform scss/main.scss"**
+- Install Hugo Extended (not standard Hugo)
-**Error: Module/theme not found**
-- Cause: Git submodules not initialized
-- Solution: `git submodule update --init --recursive`
+**Theme/module not found**
+- Run: `git submodule update --init --recursive`
-**Build fails in CI but works locally**
-- Check Hugo version match (CI uses v0.102.3)
-- Ensure npm dependencies are installed
-- Verify Node.js version (CI uses v16)
+**CI build fails but works locally**
+- Match Hugo version (v0.102.3) and Node.js (v16)
+- Verify npm dependencies are installed
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 000000000..031b534d9
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,208 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+========================================================================
+Apache 2.0 licenses
+========================================================================
+
+The following components are provided under the Apache License. See project link for details.
+The text of each license is the standard Apache 2.0 license.
diff --git a/NOTICE b/NOTICE
new file mode 100644
index 000000000..86e9aa7ca
--- /dev/null
+++ b/NOTICE
@@ -0,0 +1,7 @@
+Apache HugeGraph Documentation
+Copyright 2022-2026 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
+The initial codebase was donated to the ASF by HugeGraph Authors, copyright 2017-2021.
diff --git a/README.md b/README.md
index a80f5ba13..8f19005ba 100644
--- a/README.md
+++ b/README.md
@@ -1,79 +1,201 @@
+# Apache HugeGraph Documentation Website
+
[](https://deepwiki.com/apache/hugegraph-doc)
+[](LICENSE)
+[](https://gohugo.io/)
+
+---
+
+[中文](#中文版) | **English**
+
+This is the **source code repository** for the [HugeGraph documentation website](https://hugegraph.apache.org/docs/).
+
+For the HugeGraph database project, visit [apache/hugegraph](https://github.com/apache/hugegraph).
+
+## Quick Start
+
+Only **3 steps** to run the documentation website locally:
+
+**Prerequisites:** [Hugo Extended](https://github.com/gohugoio/hugo/releases) v0.95+ and Node.js v16+
+
+```bash
+# 1. Clone repository
+git clone https://github.com/apache/hugegraph-doc.git
+cd hugegraph-doc
+
+# 2. Install dependencies
+npm install
+
+# 3. Start development server (auto-reload)
+hugo server
+```
+
+Open http://localhost:1313 to preview.
-## Build/Test/Contribute to website
-
-Please visit the [contribution doc](./contribution.md) to get start, include theme/website description & settings~
-
-### Summary
-
-Apache HugeGraph is an easy-to-use, efficient, general-purpose open-source graph database system
-(Graph Database, [GitHub project address](https://github.com/hugegraph/hugegraph)), implementing the [Apache TinkerPop3](https://tinkerpop.apache.org) framework and fully compatible with the [Gremlin](https://tinkerpop.apache.org/gremlin.html) query language,
-With complete toolchain components, it helps users easily build applications and products based on graph databases. HugeGraph supports fast import of more than 10 billion vertices and edges, and provides millisecond-level relational query capability (OLTP).
-It also supports large-scale distributed graph computing (OLAP).
-
-Typical application scenarios of HugeGraph include deep relationship exploration, association analysis, path search, feature extraction, data clustering, community detection, knowledge graph, etc., and are applicable to business fields such as network security, telecommunication fraud, financial risk control, advertising recommendation, social network and intelligence Robots etc.
-
-### Features
-
-HugeGraph supports graph operations in online and offline environments, batch importing of data and efficient complex relationship analysis. It can seamlessly be integrated with big data platforms.
-HugeGraph supports multi-user parallel operations. Users can enter Gremlin query statements and get graph query results in time. They can also call the HugeGraph API in user programs for graph analysis or queries.
-
-This system has the following features:
-
-- Ease of use: HugeGraph supports the Gremlin graph query language and a RESTful API, providing common interfaces for graph retrieval, and peripheral tools with complete functions to easily implement various graph-based query and analysis operations.
-- Efficiency: HugeGraph has been deeply optimized in graph storage and graph computing, and provides a variety of batch import tools, which can easily complete the rapid import of tens of billions of data, and achieve millisecond-level response for graph retrieval through optimized queries. Supports simultaneous online real-time operations of thousands of users.
-- Universal: HugeGraph supports the Apache Gremlin standard graph query language and the Property Graph standard graph modeling method, and supports graph-based OLTP and OLAP schemes. Integrate Apache Hadoop and Apache Spark big data platform.
-- Scalable: supports distributed storage, multiple copies of data and horizontal expansion, built-in multiple back-end storage engines, and can easily expand the back-end storage engine through plug-ins.
-- Open: HugeGraph code is open source (Apache 2 License), customers can modify and customize independently, and selectively give back to the open source community.
-
-The functions of this system include but are not limited to:
-
-- Supports batch import of data from multiple data sources (including local files, HDFS files, MySQL databases and other data sources), and supports import of multiple file formats (including TXT, CSV, JSON and other formats)
-- With a visual operation interface, it can be used for operation, analysis and display diagrams, reducing the threshold for users to use
-- Optimized graph interface: shortest path (Shortest Path), K-step connected subgraph (K-neighbor), K-step to reach the adjacent point (K-out), personalized recommendation algorithm PersonalRank, etc.
-- Implemented based on the Apache-TinkerPop3 framework, supports Gremlin graph query language
-- Support attribute graph, attributes can be added to vertices and edges, and support rich attribute types
-- Has independent schema metadata information, has powerful graph modeling capabilities, and facilitates third-party system integration
-- Support multi-vertex ID strategy: support primary key ID, support automatic ID generation, support user-defined string ID, support user-defined digital ID
-- The attributes of edges and vertices can be indexed to support precise query, range query, and full-text search
-- The storage system adopts plug-in mode, supporting RocksDB, Cassandra, ScyllaDB, HBase, MySQL, PostgreSQL, Palo, and InMemory, etc.
-- Integrate with big data systems such as Hadoop and Spark GraphX, and support Bulk Load operations
-- Support high availability (HA), multiple copies of data, backup recovery, monitoring, etc.
-
-### Modules
-
-- [HugeGraph-Store]: HugeGraph-Store is a distributed storage engine to manage large-scale graph data by integrating storage and computation within a unified system.
-- [HugeGraph-PD]: HugeGraph-PD (Placement Driver) manages metadata and coordinates storage nodes.
-- [HugeGraph-Server](/docs/quickstart/hugegraph-server): HugeGraph-Server is the core part of the HugeGraph project, containing Core, Backend, API and other submodules;
- - Core: Implements the graph engine, connects to the Backend module downwards, and supports the API module upwards;
- - Backend: Implements the storage of graph data to the backend, supports backends including Memory, Cassandra, ScyllaDB, RocksDB, HBase, MySQL and PostgreSQL, users can choose one according to the actual situation;
- - API: Built-in REST Server, provides RESTful API to users, and is fully compatible with Gremlin queries. (Supports distributed storage and computation pushdown)
-- [HugeGraph-Toolchain](https://github.com/apache/hugegraph-toolchain): (Toolchain)
- - [HugeGraph-Client](/docs/quickstart/hugegraph-client): HugeGraph-Client provides a RESTful API client for connecting to HugeGraph-Server, currently only the Java version is implemented, users of other languages can implement it themselves;
- - [HugeGraph-Loader](/docs/quickstart/hugegraph-loader): HugeGraph-Loader is a data import tool based on HugeGraph-Client, which transforms ordinary text data into vertices and edges of the graph and inserts them into the graph database;
- - [HugeGraph-Hubble](/docs/quickstart/hugegraph-hubble): HugeGraph-Hubble is HugeGraph's Web
- visualization management platform, a one-stop visualization analysis platform, the platform covers the whole process from data modeling, to fast data import, to online and offline analysis of data, and unified management of the graph;
- - [HugeGraph-Tools](/docs/quickstart/hugegraph-tools): HugeGraph-Tools is HugeGraph's deployment and management tool, including graph management, backup/recovery, Gremlin execution and other functions.
-- [HugeGraph-Computer](/docs/quickstart/hugegraph-computer): HugeGraph-Computer is a distributed graph processing system (OLAP).
- It is an implementation of [Pregel](https://kowshik.github.io/JPregel/pregel_paper.pdf). It can run on clusters such as Kubernetes/Yarn, and supports large-scale graph computing.
-- [HugeGraph-AI](/docs/quickstart/hugegraph-ai): HugeGraph-AI is HugeGraph's independent AI
- component, providing training and inference functions of graph neural networks, LLM/Graph RAG combination/Python-Client and other related components, continuously updating.
+> **Troubleshooting:** If you see `TOCSS: failed to transform "scss/main.scss"`,
+> install Hugo **Extended** version, not the standard version.
+
+## Repository Structure
+
+```
+hugegraph-doc/
+├── content/ # 📄 Documentation content (Markdown)
+│ ├── cn/ # 🇨🇳 Chinese documentation
+│ │ ├── docs/ # Main documentation
+│ │ │ ├── quickstart/ # Quick start guides
+│ │ │ ├── config/ # Configuration docs
+│ │ │ ├── clients/ # Client docs
+│ │ │ ├── guides/ # User guides
+│ │ │ └── ...
+│ │ ├── blog/ # Blog posts
+│ │ └── community/ # Community pages
+│ └── en/ # 🇺🇸 English documentation (mirrors cn/ structure)
+│
+├── themes/docsy/ # 🎨 Docsy theme (git submodule)
+├── assets/ # 🖼️ Custom assets (fonts, images, scss)
+├── layouts/ # 📐 Hugo template overrides
+├── static/ # 📁 Static files
+├── config.toml # ⚙️ Site configuration
+└── package.json # 📦 Node.js dependencies
+```
## Contributing
-- Welcome to contribute to HugeGraph, please see [How to Contribute](https://hugegraph.apache.org/docs/contribution-guidelines/contribute/) for more information.
-- Note: It's recommended to use [GitHub Desktop](https://desktop.github.com/) to greatly simplify the PR and commit process.
-- Thank you to all the people who already contributed to HugeGraph!
+### Contribution Workflow
+
+1. **Fork** this repository
+2. Create a **new branch** from `master`
+3. Make your changes
+4. Submit a **Pull Request** with screenshots
+
+### Requirements
+
+| Requirement | Description |
+|-------------|-------------|
+| **Bilingual Updates** | Update **BOTH** `content/cn/` and `content/en/` |
+| **PR Screenshots** | Include **before/after screenshots** in PR |
+| **Markdown** | Use Markdown with Hugo front matter |
+
+### Detailed Guide
+
+See [contribution.md](./contribution.md) for:
+- Platform-specific Hugo installation
+- Docsy theme customization
+- Translation tips
+
+## Commands
+
+| Command | Description |
+|---------|-------------|
+| `hugo server` | Start dev server (hot reload) |
+| `hugo --minify` | Build production to `./public/` |
+| `hugo server -p 8080` | Custom port |
+
+---
+
+## 中文版
+
+这是 [HugeGraph 官方文档网站](https://hugegraph.apache.org/docs/) 的**源代码仓库**。
+
+如果你想查找 HugeGraph 数据库本身,请访问 [apache/hugegraph](https://github.com/apache/hugegraph)。
+
+### 快速开始
+
+只需 **3 步**即可在本地启动文档网站:
+
+**前置条件:** [Hugo Extended](https://github.com/gohugoio/hugo/releases) v0.95+ 和 Node.js v16+
+
+```bash
+# 1. 克隆仓库
+git clone https://github.com/apache/hugegraph-doc.git
+cd hugegraph-doc
+
+# 2. 安装依赖
+npm install
+
+# 3. 启动开发服务器(支持热重载)
+hugo server
+```
+
+打开 http://localhost:1313 预览网站。
+
+> **常见问题:** 如果遇到 `TOCSS: failed to transform "scss/main.scss"` 错误,
+> 说明你需要安装 Hugo **Extended** 版本,而不是标准版本。
+
+### 仓库结构
+
+```
+hugegraph-doc/
+├── content/ # 📄 文档内容 (Markdown)
+│ ├── cn/ # 🇨🇳 中文文档
+│ │ ├── docs/ # 主要文档目录
+│ │ │ ├── quickstart/ # 快速开始指南
+│ │ │ ├── config/ # 配置文档
+│ │ │ ├── clients/ # 客户端文档
+│ │ │ ├── guides/ # 使用指南
+│ │ │ └── ...
+│ │ ├── blog/ # 博客文章
+│ │ └── community/ # 社区页面
+│ └── en/ # 🇺🇸 英文文档(与 cn/ 结构一致)
+│
+├── themes/docsy/ # 🎨 Docsy 主题 (git submodule)
+├── assets/ # 🖼️ 自定义资源 (fonts, images, scss)
+├── layouts/ # 📐 Hugo 模板覆盖
+├── static/ # 📁 静态文件
+├── config.toml # ⚙️ 站点配置
+└── package.json # 📦 Node.js 依赖
+```
+
+### 如何贡献
+
+#### 贡献流程
+
+1. **Fork** 本仓库
+2. 基于 `master` 创建**新分支**
+3. 修改文档内容
+4. 提交 **Pull Request**(附截图)
+
+#### 重要说明
+
+| 要求 | 说明 |
+|------|------|
+| **双语更新** | 修改内容时需**同时更新** `content/cn/` 和 `content/en/` |
+| **PR 截图** | 提交 PR 时需附上修改**前后对比截图** |
+| **Markdown** | 文档使用 Markdown 格式,带 Hugo front matter |
+
+#### 详细指南
+
+查看 [contribution.md](./contribution.md) 了解:
+- 各平台 Hugo 安装方法
+- Docsy 主题定制
+- 翻译技巧
+
+### 常用命令
+
+| 命令 | 说明 |
+|------|------|
+| `hugo server` | 启动开发服务器(热重载) |
+| `hugo --minify` | 构建生产版本到 `./public/` |
+| `hugo server -p 8080` | 指定端口 |
+
+---
+
+## Contact & Community
+
+- **Issues:** [GitHub Issues](https://github.com/apache/hugegraph-doc/issues)
+- **Mailing List:** [dev@hugegraph.apache.org](mailto:dev@hugegraph.apache.org) ([subscribe first](https://hugegraph.apache.org/docs/contribution-guidelines/subscribe/))
+- **Slack:** [ASF Slack](https://the-asf.slack.com/archives/C059UU2FJ23)
+
+
+
+## Contributors
-[](https://github.com/apache/incubator-hugegraph-doc/graphs/contributors)
+Thanks to all contributors to the HugeGraph documentation!
-### Contact Us
+[](https://github.com/apache/hugegraph-doc/graphs/contributors)
---
-- [GitHub Issues](https://github.com/apache/incubator-hugegraph-doc/issues): Feedback on usage issues and functional requirements (quick response)
-- Feedback Email: [dev@hugegraph.apache.org](mailto:dev@hugegraph.apache.org) ([subscriber](https://hugegraph.apache.org/docs/contribution-guidelines/subscribe/) only)
-- Security Email: [security@hugegraph.apache.org](mailto:security@hugegraph.apache.org) (Report SEC problems)
-- WeChat public account: Apache HugeGraph, welcome to scan this QR code to follow us.
+## License
-
+[Apache License 2.0](LICENSE)
diff --git a/config.toml b/config.toml
index 493c1462b..fdb79307f 100644
--- a/config.toml
+++ b/config.toml
@@ -45,7 +45,7 @@ theme = "default"
name = "GitHub"
weight = -99
pre = ""
- url = "https://github.com/apache/incubator-hugegraph"
+ url = "https://github.com/apache/hugegraph"
[[menu.main]]
name ="Download"
weight = -98
@@ -152,16 +152,16 @@ archived_version = false
# The version number for the version of the docs represented in this doc set.
# Used in the "version-banner" partial to display a version number for the
# current doc set.
-version = "0.13"
+version = "1.7"
# A link to latest version of the docs. Used in the "version-banner" partial to
# point people to the main doc site.
url_latest_version = "https://example.com"
# Repository configuration (URLs for in-page links to opening issues and suggesting changes)
-github_repo = "https://github.com/apache/incubator-hugegraph-doc"
+github_repo = "https://github.com/apache/hugegraph-doc"
# An optional link to a related project repo. For example, the sibling repository where your product code lives.
-github_project_repo = "https://github.com/apache/incubator-hugegraph"
+github_project_repo = "https://github.com/apache/hugegraph"
# Specify a value here if your content directory is not in your repo's root directory
# github_subdir = ""
@@ -235,7 +235,7 @@ enable = false
# Developer relevant links. These will show up on right side of footer and in the community page if you have one.
[[params.links.user]]
name ="Introduction"
- url = "../docs/introduction/readme/"
+ url = "../docs/introduction/"
icon = "fa fa-book"
desc = "Increase your understanding about the project"
[[params.links.user]]
diff --git a/content/cn/_index.html b/content/cn/_index.html
index cd7bf8a7f..fb07c957f 100644
--- a/content/cn/_index.html
+++ b/content/cn/_index.html
@@ -11,41 +11,54 @@
Apache
HugeGraph
-
- Incubating
}}">
Learn More
-
+ }}">
Download
- HugeGraph是一款易用、高效、通用的图数据库
- 实现了Apache TinkerPop3框架、兼容Gremlin查询语言。
+ HugeGraph 是一套全栈图系统,覆盖 }}">图数据库、}}">图计算 与 }}">图 AI。
+ 支持从数据存储、实时查询到离线分析的完整图数据处理能力,并支持 }}">Gremlin 与 }}">Cypher 查询语言。
{{< blocks/link-down color="info" >}}
{{< /blocks/cover >}}
{{% blocks/lead color="primary" %}}
-HugeGraph支持百亿以上的顶点(Vertex)和边(Edge)快速导入,毫秒级的关联查询能力,并可与Hadoop、Spark等
-大数据平台集成以进行离线分析,主要应用场景包括关联分析、欺诈检测和知识图谱等。
+HugeGraph 最高支持千亿级图数据高速导入与毫秒级实时查询,可与 Spark、Flink 等大数据平台深度集成,并可通过 }}">HugeGraph 工具链 快速完成导入、可视化与运维。
+在 AI 时代,通过与大语言模型 (LLM) 结合,为智能问答、推荐系统、风控反欺诈、知识图谱等场景提供强大的图计算能力。
{{% /blocks/lead %}}
{{< blocks/section color="dark" >}}
{{% blocks/feature icon="fa-lightbulb" title="易用" %}}
-支持Gremlin图查询语言与RESTful API,并提供图检索常用接口,具备齐全的周边工具,支持分布式存储、数据多副本及横向扩容,内置多种后端存储引擎,轻松实现各种查询、分析。
+支持 Gremlin 图查询语言与 [**RESTful API**]({{< relref "/docs/clients/restful-api" >}}),并提供图检索常用接口,具备齐全的周边工具,支持分布式存储、数据多副本及横向扩容,内置多种后端存储引擎,轻松实现各种查询、分析。
{{% /blocks/feature %}}
{{% blocks/feature icon="fa-shipping-fast" title="高效" %}}
-在图存储和图计算方面做了深度优化,提供支持多种数据源的批量导入工具,轻松完成百亿级数据快速导入,通过优化过的查询达到图检索的毫秒级响应,支持数千用户并发的在线实时操作。
+在图存储和图计算方面做了深度优化,提供 [**批量导入工具**]({{< relref "/docs/quickstart/toolchain/hugegraph-loader" >}}),轻松完成百亿级数据快速导入,通过优化过的查询达到图检索的毫秒级响应,支持数千用户并发的在线实时操作。
{{% /blocks/feature %}}
{{% blocks/feature icon="fa-exchange-alt" title="通用" %}}
-支持Apache Gremlin标准图查询语言和Property Graph标准图建模方法,支持基于图的OLTP和OLAP方案。集成Apache Hadoop及Apache Spark大数据平台,也可插件式轻松扩展后端存储引擎。
+支持 Apache Gremlin 标准图查询语言和 Property Graph 标准图建模方法,支持基于图的 OLTP 和 [**OLAP 图计算**]({{< relref "/docs/quickstart/computing/hugegraph-computer" >}}) 方案。集成 Apache Hadoop 及 Apache Spark 大数据平台,也可插件式轻松扩展后端存储引擎。
+{{% /blocks/feature %}}
+
+
+{{% blocks/feature icon="fa-brain" title="智能化" %}}
+集成 LLM 实现 [**GraphRAG 能力**]({{< relref "/docs/quickstart/hugegraph-ai" >}})、自动化知识图谱构建,内置 20+ 图机器学习算法,轻松构建 AI 驱动的图应用。
+{{% /blocks/feature %}}
+
+
+{{% blocks/feature icon="fa-expand-arrows-alt" title="可扩展" %}}
+支持水平扩容和分布式部署,从单机到 PB 级集群无缝迁移,提供 [**分布式存储引擎**]({{< relref "/docs/quickstart/hugegraph/hugegraph-hstore" >}}) 适配,满足不同规模和性能需求。
+{{% /blocks/feature %}}
+
+
+{{% blocks/feature icon="fa-puzzle-piece" title="开放生态" %}}
+遵循 Apache TinkerPop 标准,提供 [**多语言客户端**]({{< relref "/docs/quickstart/client/hugegraph-client" >}}),兼容主流大数据平台,社区活跃持续演进。
{{% /blocks/feature %}}
@@ -54,7 +67,7 @@ Apache
{{< blocks/section color="blue-deep">}}
-
第一个 Apache 图数据库项目
+首个 Apache 基金会的顶级图项目
{{< /blocks/section >}}
@@ -63,7 +76,7 @@ 第一个 Apache 图数据库项目
{{< blocks/section >}}
{{% blocks/feature icon="far fa-tools" title="使用易用的**工具链**" %}}
-可从[此](https://github.com/apache/incubator-hugegraph-toolchain)获取图数据导入工具, 可视化界面以及备份还原迁移工具, 欢迎使用
+可从[此](https://github.com/apache/hugegraph-toolchain)获取图数据导入工具, 可视化界面以及备份还原迁移工具, 欢迎使用
{{% /blocks/feature %}}
@@ -72,10 +85,10 @@ 第一个 Apache 图数据库项目
{{% /blocks/feature %}}
-{{% blocks/feature icon="fab fa-weixin" title="关注微信" url="https://twitter.com/apache-hugegraph" %}}
-关注微信公众号 "HugeGraph"
+{{% blocks/feature icon="fab fa-weixin" title="关注微信" %}}
+关注微信公众号 "HugeGraph" 获取最新动态
-(推特正在路上...)
+也可以加入 [ASF Slack 频道](https://the-asf.slack.com/archives/C059UU2FJ23)参与讨论
{{% /blocks/feature %}}
@@ -84,7 +97,7 @@ 第一个 Apache 图数据库项目
{{< blocks/section color="blue-light">}}
-
欢迎大家参与 HugeGraph 的任何贡献
+欢迎大家给 HugeGraph 添砖加瓦
{{< /blocks/section >}}
diff --git a/content/cn/blog/hugegraph-ai/agentic_graphrag.md b/content/cn/blog/hugegraph-ai/agentic_graphrag.md
index 417b8c246..a41168490 100644
--- a/content/cn/blog/hugegraph-ai/agentic_graphrag.md
+++ b/content/cn/blog/hugegraph-ai/agentic_graphrag.md
@@ -1,7 +1,7 @@
---
date: 2025-10-29
title: "Agentic GraphRAG"
-linkTitle: "Agentic GraphRAG"
+linkTitle: "Agentic GraphRAG:模块化架构实践"
---
# 项目背景
diff --git a/content/cn/community/_index.md b/content/cn/community/_index.md
index cdade1630..e1a40e16b 100644
--- a/content/cn/community/_index.md
+++ b/content/cn/community/_index.md
@@ -5,4 +5,4 @@ menu:
weight: 40
---
-
+查看 [项目成熟度](/community/maturity/) 评估。
diff --git a/content/cn/docs/SUMMARY.md b/content/cn/docs/SUMMARY.md
index 784773b26..ab5e32bf9 100644
--- a/content/cn/docs/SUMMARY.md
+++ b/content/cn/docs/SUMMARY.md
@@ -1,81 +1,77 @@
# HugeGraph Docs
-* [Download](download.md)
+* [Download](download/download)
## Quickstart
-* [Install HugeGraph-Server](quickstart/hugegraph-server.md)
-* [Load data with HugeGraph-Loader](quickstart/hugegraph-loader.md)
-* [Visual with HugeGraph-Hubble](quickstart/hugegraph-hubble.md)
-* [Develop with HugeGraph-Client](quickstart/hugegraph-client.md)
-* [Manage with HugeGraph-Tools](quickstart/hugegraph-tools.md)
-* [Analysis with HugeGraph-Computer](quickstart/hugegraph-computer.md)
-* [Display with HugeGraph-Studio](quickstart/hugegraph-studio.md)
+* [Install HugeGraph-Server](quickstart/hugegraph/hugegraph-server)
+* [Load data with HugeGraph-Loader](quickstart/toolchain/hugegraph-loader)
+* [Visual with HugeGraph-Hubble](quickstart/toolchain/hugegraph-hubble)
+* [Develop with HugeGraph-Client](quickstart/client/hugegraph-client)
+* [Manage with HugeGraph-Tools](quickstart/toolchain/hugegraph-tools)
+* [Analysis with HugeGraph-Computer](quickstart/computing/hugegraph-computer)
## Config
-* [Config Guide](config/config-guide.md)
-* [Config Options](config/config-option.md)
-* [Config Authentication](config/config-authentication.md)
-* [Config HTTPS](config/config-https.md)
-* [Config Computer](config/config-computer.md)
+* [Config Guide](config/config-guide)
+* [Config Options](config/config-option)
+* [Config Authentication](config/config-authentication)
+* [Config HTTPS](config/config-https)
+* [Config Computer](quickstart/computing/hugegraph-computer)
## API
-* [RESTful API](clients/hugegraph-api.md)
- * [Schema](clients/restful-api/schema.md)
- * [PropertyKey](clients/restful-api/propertykey.md)
- * [VertexLabel](clients/restful-api/vertexlabel.md)
- * [EdgeLabel](clients/restful-api/edgelabel.md)
- * [IndexLabel](clients/restful-api/indexlabel.md)
- * [Rebuild](clients/restful-api/rebuild.md)
- * [Vertex](clients/restful-api/vertex.md)
- * [Edge](clients/restful-api/edge.md)
- * [Traverser](clients/restful-api/traverser.md)
- * [Rank](clients/restful-api/rank.md)
- * [Variable](clients/restful-api/variable.md)
- * [Graphs](clients/restful-api/graphs.md)
- * [Task](clients/restful-api/task.md)
- * [Gremlin](clients/restful-api/gremlin.md)
- * [Cypher](clients/restful-api/cypher.md)
- * [Authentication](clients/restful-api/auth.md)
- * [Other](clients/restful-api/other.md)
-* [Java Client](clients/hugegraph-client.md)
-* [Gremlin Console](clients/gremlin-console.md)
+* [RESTful API](clients/restful-api)
+ * [Schema](clients/restful-api/schema)
+ * [PropertyKey](clients/restful-api/propertykey)
+ * [VertexLabel](clients/restful-api/vertexlabel)
+ * [EdgeLabel](clients/restful-api/edgelabel)
+ * [IndexLabel](clients/restful-api/indexlabel)
+ * [Rebuild](clients/restful-api/rebuild)
+ * [Vertex](clients/restful-api/vertex)
+ * [Edge](clients/restful-api/edge)
+ * [Traverser](clients/restful-api/traverser)
+ * [Rank](clients/restful-api/rank)
+ * [Variable](clients/restful-api/variable)
+ * [Graphs](clients/restful-api/graphs)
+ * [Task](clients/restful-api/task)
+ * [Gremlin](clients/restful-api/gremlin)
+ * [Cypher](clients/restful-api/cypher)
+ * [Authentication](clients/restful-api/auth)
+ * [Other](clients/restful-api/other)
+* [Java Client](clients/hugegraph-client)
+* [Gremlin Console](clients/gremlin-console)
## Guides
-* [Architecture Overview](guides/architectural.md)
-* [Design Concepts](guides/desgin-concept.md)
-* [Custom Plugins](guides/custom-plugin.md)
-* [Backup Restore](guides/backup-restore.md)
-* [FAQ](guides/faq.md)
+* [Architecture Overview](guides/architectural)
+* [Design Concepts](guides/desgin-concept)
+* [Custom Plugins](guides/custom-plugin)
+* [Backup Restore](guides/backup-restore)
+* [FAQ](guides/faq)
## Query Language
-* [Gremlin Query Language](language/hugegraph-gremlin.md)
-* [HugeGraph Examples](language/hugegraph-example.md)
+* [Gremlin Query Language](language/hugegraph-gremlin)
+* [HugeGraph Examples](language/hugegraph-example)
## Performance
-* [HugeGraph Benchmark Performance](performance/hugegraph-benchmark-0.5.6.md)
-* [HugeGraph API Performance-Outdated](content/cn/docs/performance/api-preformance/_index.md)
- * [v0.5.6 Stand-alone(RocksDB)](content/cn/docs/performance/api-preformance/hugegraph-api-0.5.6-RocksDB.md)
- * [v0.5.6 Cluster(Cassandra)](content/cn/docs/performance/api-preformance/hugegraph-api-0.5.6-Cassandra.md)
- * [v0.4.4](content/cn/docs/performance/api-preformance/hugegraph-api-0.4.4.md)
- * [v0.2](content/cn/docs/performance/api-preformance/hugegraph-api-0.2.md)
-* [HugeGraph-Loader Performance](performance/hugegraph-loader-performance.md)
+* [HugeGraph Benchmark Performance](performance/hugegraph-benchmark-0.5.6)
+* [HugeGraph API Performance-Outdated](performance/api-performance)
+ * [v0.5.6 Stand-alone(RocksDB)](performance/api-performance/hugegraph-api-0.5.6-rocksdb)
+ * [v0.5.6 Cluster(Cassandra)](performance/api-performance/hugegraph-api-0.5.6-cassandra)
+ * [v0.4.4](performance/api-performance/hugegraph-api-0.4.4)
+ * [v0.2](performance/api-performance/hugegraph-api-0.2)
+* [HugeGraph-Loader Performance](performance/hugegraph-loader-performance)
## ChangeLogs
-* [Release-1.3.0](changelog/hugegraph-1.3.0-release-notes.md)
-* [Release-1.2.0](changelog/hugegraph-1.2.0-release-notes.md)
-* [Release-1.0.0](changelog/hugegraph-1.0.0-release-notes.md)
-
----
-
-* [Release-0.12.0](changelog/hugegraph-0.12.0-release-notes.md)
-* [Release-0.11.2](changelog/hugegraph-0.11.2-release-notes.md)
-* [Release-0.10.4](changelog/hugegraph-0.10.4-release-notes.md)
-* [Release-0.9.2](changelog/hugegraph-0.9.2-release-notes.md)
-* [Release-0.8.0](changelog/hugegraph-0.8.0-release-notes.md)
-* [Release-0.7.4](changelog/hugegraph-0.7.4-release-notes.md)
-* [Release-0.6.1](changelog/hugegraph-0.6.1-release-notes.md)
-* [Release-0.5.6](changelog/hugegraph-0.5.6-release-notes.md)
-* [Release-0.4.4](changelog/hugegraph-0.4.4-release-notes.md)
-* [Release-0.3.3](changelog/hugegraph-0.3.3-release-notes.md)
-* [Release-0.2.4](changelog/hugegraph-0.2.4-release-notes.md)
-* [Release-0.2](changelog/hugegraph-0.2-release-notes.md)
+* [Release-1.3.0](changelog/hugegraph-1.3.0-release-notes)
+* [Release-1.2.0](changelog/hugegraph-1.2.0-release-notes)
+* [Release-1.0.0](changelog/hugegraph-1.0.0-release-notes)
+* [Release-0.12.0](changelog/hugegraph-0.12.0-release-notes)
+* [Release-0.11.2](changelog/hugegraph-0.11.2-release-notes)
+* [Release-0.10.4](changelog/hugegraph-0.10.4-release-notes)
+* [Release-0.9.2](changelog/hugegraph-0.9.2-release-notes)
+* [Release-0.8.0](changelog/hugegraph-0.8.0-release-notes)
+* [Release-0.7.4](changelog/hugegraph-0.7.4-release-notes)
+* [Release-0.6.1](changelog/hugegraph-0.6.1-release-notes)
+* [Release-0.5.6](changelog/hugegraph-0.5.6-release-notes)
+* [Release-0.4.4](changelog/hugegraph-0.4.4-release-notes)
+* [Release-0.3.3](changelog/hugegraph-0.3.3-release-notes)
+* [Release-0.2.4](changelog/hugegraph-0.2.4-release-notes)
+* [Release-0.2](changelog/hugegraph-0.2-release-notes)
diff --git a/content/cn/docs/_index.md b/content/cn/docs/_index.md
index e7db53bff..82bd55a87 100755
--- a/content/cn/docs/_index.md
+++ b/content/cn/docs/_index.md
@@ -7,4 +7,52 @@ menu:
weight: 20
---
-欢迎阅读HugeGraph文档
+## Apache HugeGraph 文档
+
+Apache HugeGraph 是一套完整的图数据库生态系统,支持 OLTP 实时查询、OLAP 离线分析和 AI 智能应用。
+
+### 按场景快速导航
+
+| 我想要... | 从这里开始 |
+|----------|-----------|
+| **运行图查询** (OLTP) | [HugeGraph Server 快速开始](quickstart/hugegraph/hugegraph-server) |
+| **大规模图计算** (OLAP) | [图计算引擎](quickstart/computing/hugegraph-computer) |
+| **构建 AI/RAG 应用** | [HugeGraph-AI](quickstart/hugegraph-ai/quick_start) |
+| **批量导入数据** | [HugeGraph Loader](quickstart/toolchain/hugegraph-loader) |
+| **可视化管理图** | [Hubble Web UI](quickstart/toolchain/hugegraph-hubble) |
+
+### 生态系统一览
+
+```
+┌─────────────────────────────────────────────────────────────────┐
+│ Apache HugeGraph 生态 │
+├─────────────────────────────────────────────────────────────────┤
+│ ┌─────────────┐ ┌─────────────┐ ┌─────────────────────────┐ │
+│ │ HugeGraph │ │ HugeGraph │ │ HugeGraph-AI │ │
+│ │ Server │ │ Computer │ │ (GraphRAG/ML/Python) │ │
+│ │ (OLTP) │ │ (OLAP) │ │ │ │
+│ └─────────────┘ └─────────────┘ └─────────────────────────┘ │
+│ │ │ │ │
+│ ┌──────┴───────────────┴────────────────────┴──────────────┐ │
+│ │ HugeGraph Toolchain │ │
+│ │ Hubble (UI) | Loader | Client (Java/Go/Python) | Tools │ │
+│ └───────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────┘
+```
+
+### 核心组件
+
+- **HugeGraph Server** - 图数据库核心,REST API + Gremlin + Cypher 支持
+- **HugeGraph Toolchain** - 客户端 SDK、数据导入、可视化、运维工具
+- **HugeGraph Computer** - 分布式图计算 (Vermeer 高性能内存版 / Computer 海量存储外存版)
+- **HugeGraph-AI** - GraphRAG、知识图谱构建、20+ 图机器学习算法
+
+### 部署模式
+
+| 模式 | 适用场景 | 数据规模 |
+|-----|---------|---------|
+| **单机版** | 极速稳定、存算一体 | < 4TB |
+| **分布式** | 海量存储、存算分离 | < 1000TB |
+| **Docker** | 快速体验 | 任意 |
+
+[📖 详细介绍](introduction/)
diff --git a/content/cn/docs/changelog/hugegraph-0.10.4-release-notes.md b/content/cn/docs/changelog/hugegraph-0.10.4-release-notes.md
index c8b480383..5ecab368b 100644
--- a/content/cn/docs/changelog/hugegraph-0.10.4-release-notes.md
+++ b/content/cn/docs/changelog/hugegraph-0.10.4-release-notes.md
@@ -2,7 +2,7 @@
title: "HugeGraph 0.10 Release Notes"
linkTitle: "Release-0.10.4"
draft: true
-weight: 14
+weight: 15
---
### API & Client
diff --git a/content/cn/docs/changelog/hugegraph-0.12.0-release-notes.md b/content/cn/docs/changelog/hugegraph-0.12.0-release-notes.md
index 3735330d5..6f893d24e 100644
--- a/content/cn/docs/changelog/hugegraph-0.12.0-release-notes.md
+++ b/content/cn/docs/changelog/hugegraph-0.12.0-release-notes.md
@@ -1,7 +1,7 @@
---
title: "HugeGraph 0.12 Release Notes"
linkTitle: "Release-0.12.0"
-weight: 1
+weight: 11
---
### API & Client
diff --git a/content/cn/docs/changelog/hugegraph-0.2-release-notes.md b/content/cn/docs/changelog/hugegraph-0.2-release-notes.md
index c4b549045..be7b7ac29 100644
--- a/content/cn/docs/changelog/hugegraph-0.2-release-notes.md
+++ b/content/cn/docs/changelog/hugegraph-0.2-release-notes.md
@@ -2,7 +2,7 @@
title: "HugeGraph 0.2 Release Notes"
linkTitle: "Release-0.2.4"
draft: true
-weight: 23
+weight: 33
---
### API & Java Client
diff --git a/content/cn/docs/changelog/hugegraph-0.2.4-release-notes.md b/content/cn/docs/changelog/hugegraph-0.2.4-release-notes.md
index e826bc014..c80ef0b6e 100644
--- a/content/cn/docs/changelog/hugegraph-0.2.4-release-notes.md
+++ b/content/cn/docs/changelog/hugegraph-0.2.4-release-notes.md
@@ -2,7 +2,7 @@
title: "HugeGraph 0.2.4 Release Notes"
linkTitle: "Release-0.2.4"
draft: true
-weight: 22
+weight: 31
---
### API & Java Client
diff --git a/content/cn/docs/changelog/hugegraph-0.3.3-release-notes.md b/content/cn/docs/changelog/hugegraph-0.3.3-release-notes.md
index b114d1e2a..3bc00ee70 100644
--- a/content/cn/docs/changelog/hugegraph-0.3.3-release-notes.md
+++ b/content/cn/docs/changelog/hugegraph-0.3.3-release-notes.md
@@ -2,7 +2,7 @@
title: "HugeGraph 0.3.3 Release Notes"
linkTitle: "Release-0.3.3"
draft: true
-weight: 21
+weight: 29
---
### API & Java Client
diff --git a/content/cn/docs/changelog/hugegraph-0.4.4-release-notes.md b/content/cn/docs/changelog/hugegraph-0.4.4-release-notes.md
index 93c12089a..4f3829be0 100644
--- a/content/cn/docs/changelog/hugegraph-0.4.4-release-notes.md
+++ b/content/cn/docs/changelog/hugegraph-0.4.4-release-notes.md
@@ -2,7 +2,7 @@
title: "HugeGraph 0.4.4 Release Notes"
linkTitle: "Release-0.4.4"
draft: true
-weight: 20
+weight: 27
---
### API & Java Client
diff --git a/content/cn/docs/changelog/hugegraph-0.5.6-release-notes.md b/content/cn/docs/changelog/hugegraph-0.5.6-release-notes.md
index 0353b6c94..0fb199e4d 100644
--- a/content/cn/docs/changelog/hugegraph-0.5.6-release-notes.md
+++ b/content/cn/docs/changelog/hugegraph-0.5.6-release-notes.md
@@ -2,7 +2,7 @@
title: "HugeGraph 0.5 Release Notes"
linkTitle: "Release-0.5.6"
draft: true
-weight: 19
+weight: 25
---
### API & Java Client
diff --git a/content/cn/docs/changelog/hugegraph-0.6.1-release-notes.md b/content/cn/docs/changelog/hugegraph-0.6.1-release-notes.md
index 57dae14f3..492ef4053 100644
--- a/content/cn/docs/changelog/hugegraph-0.6.1-release-notes.md
+++ b/content/cn/docs/changelog/hugegraph-0.6.1-release-notes.md
@@ -2,7 +2,7 @@
title: "HugeGraph 0.6 Release Notes"
linkTitle: "Release-0.6.1"
draft: true
-weight: 18
+weight: 23
---
### API & Java Client
diff --git a/content/cn/docs/changelog/hugegraph-0.7.4-release-notes.md b/content/cn/docs/changelog/hugegraph-0.7.4-release-notes.md
index 115864755..4631aa004 100644
--- a/content/cn/docs/changelog/hugegraph-0.7.4-release-notes.md
+++ b/content/cn/docs/changelog/hugegraph-0.7.4-release-notes.md
@@ -2,7 +2,7 @@
title: "HugeGraph 0.7 Release Notes"
linkTitle: "Release-0.7.4"
draft: true
-weight: 17
+weight: 21
---
### API & Java Client
diff --git a/content/cn/docs/changelog/hugegraph-0.8.0-release-notes.md b/content/cn/docs/changelog/hugegraph-0.8.0-release-notes.md
index ad50701f4..f72569a00 100644
--- a/content/cn/docs/changelog/hugegraph-0.8.0-release-notes.md
+++ b/content/cn/docs/changelog/hugegraph-0.8.0-release-notes.md
@@ -2,7 +2,7 @@
title: "HugeGraph 0.8 Release Notes"
linkTitle: "Release-0.8.0"
draft: true
-weight: 16
+weight: 19
---
### API & Client
diff --git a/content/cn/docs/changelog/hugegraph-0.9.2-release-notes.md b/content/cn/docs/changelog/hugegraph-0.9.2-release-notes.md
index d6cdfa4d4..af3aaafe6 100644
--- a/content/cn/docs/changelog/hugegraph-0.9.2-release-notes.md
+++ b/content/cn/docs/changelog/hugegraph-0.9.2-release-notes.md
@@ -2,7 +2,7 @@
title: "HugeGraph 0.9 Release Notes"
linkTitle: "Release-0.9.2"
draft: true
-weight: 15
+weight: 17
---
### API & Client
diff --git a/content/cn/docs/changelog/hugegraph-1.0.0-release-notes.md b/content/cn/docs/changelog/hugegraph-1.0.0-release-notes.md
index b2a8c84d0..7340878ce 100644
--- a/content/cn/docs/changelog/hugegraph-1.0.0-release-notes.md
+++ b/content/cn/docs/changelog/hugegraph-1.0.0-release-notes.md
@@ -1,7 +1,7 @@
---
title: "HugeGraph 1.0.0 Release Notes"
linkTitle: "Release-1.0.0"
-weight: 2
+weight: 9
---
### OLTP API & Client 更新
@@ -166,8 +166,8 @@ weight: 2
更加详细的版本变更信息,可以查看各个子仓库的链接:
-- [Server Release Notes](https://github.com/apache/incubator-hugegraph/releases/tag/1.0.0)
-- [Toolchain Release Notes](https://github.com/apache/incubator-hugegraph-toolchain/releases/tag/1.0.0)
-- [Computer Release Notes](https://github.com/apache/incubator-hugegraph-computer/releases/tag/1.0.0)
-- [Commons Release Notes](https://github.com/apache/incubator-hugegraph-commons/releases/tag/1.0.0)
+- [Server Release Notes](https://github.com/apache/hugegraph/releases/tag/1.0.0)
+- [Toolchain Release Notes](https://github.com/apache/hugegraph-toolchain/releases/tag/1.0.0)
+- [Computer Release Notes](https://github.com/apache/hugegraph-computer/releases/tag/1.0.0)
+- [Commons Release Notes](https://github.com/apache/hugegraph-commons/releases/tag/1.0.0)
diff --git a/content/cn/docs/changelog/hugegraph-1.2.0-release-notes.md b/content/cn/docs/changelog/hugegraph-1.2.0-release-notes.md
index 113842c36..2b3d0af33 100644
--- a/content/cn/docs/changelog/hugegraph-1.2.0-release-notes.md
+++ b/content/cn/docs/changelog/hugegraph-1.2.0-release-notes.md
@@ -1,7 +1,7 @@
---
title: "HugeGraph 1.2.0 Release Notes"
linkTitle: "Release-1.2.0"
-weight: 3
+weight: 7
---
### Java version statement
@@ -11,206 +11,206 @@ weight: 3
1. Consider using Java 11 in hugegraph/hugegraph-toolchain/hugegraph-commons, also compatible with Java 8 now.
2. hugegraph-computer required to use Java 11, **not compatible with Java 8 now!**
-**v1.2.0 是倒数第二个兼容 Java8 的大版本**, 到 1.5.0 [PD/Store](https://github.com/apache/incubator-hugegraph/issues/2265) 正式合入 master
+**v1.2.0 是倒数第二个兼容 Java8 的大版本**, 到 1.5.0 [PD/Store](https://github.com/apache/hugegraph/issues/2265) 正式合入 master
后标志着 Java8 兼容的正式终结 (除 Client 外所有组件都将以 Java 11 作为基准,然后逐步迈向 Java17/21).
### hugegraph
#### API Changes
-- feat(api&core): in oltp apis, add statistics info and support full info about vertices and edges ([#2262](https://github.com/apache/incubator-hugegraph/pull/2262))
-- feat(api): support embedded arthas agent in hugegraph-server ([#2278](https://github.com/apache/incubator-hugegraph/pull/2278),[#2337](https://github.com/apache/incubator-hugegraph/pull/2337))
-- feat(api): support metric API Prometheus format & add statistic metric api ([#2286](https://github.com/apache/incubator-hugegraph/pull/2286))
-- feat(api-core): support label & property filtering for both edge and vertex & support kout dfs mode ([#2295](https://github.com/apache/incubator-hugegraph/pull/2295))
-- feat(api): support recording slow query log ([#2327](https://github.com/apache/incubator-hugegraph/pull/2327))
+- feat(api&core): in oltp apis, add statistics info and support full info about vertices and edges ([#2262](https://github.com/apache/hugegraph/pull/2262))
+- feat(api): support embedded arthas agent in hugegraph-server ([#2278](https://github.com/apache/hugegraph/pull/2278),[#2337](https://github.com/apache/hugegraph/pull/2337))
+- feat(api): support metric API Prometheus format & add statistic metric api ([#2286](https://github.com/apache/hugegraph/pull/2286))
+- feat(api-core): support label & property filtering for both edge and vertex & support kout dfs mode ([#2295](https://github.com/apache/hugegraph/pull/2295))
+- feat(api): support recording slow query log ([#2327](https://github.com/apache/hugegraph/pull/2327))
#### Feature Changes
-- feat: support task auto manage by server role state machine ([#2130](https://github.com/apache/incubator-hugegraph/pull/2130))
-- feat: support parallel compress snapshot ([#2136](https://github.com/apache/incubator-hugegraph/pull/2136))
-- feat: use an enhanced CypherAPI to refactor it ([#2143](https://github.com/apache/incubator-hugegraph/pull/2143))
-- feat(perf): support JMH benchmark in HG-test module ([#2238](https://github.com/apache/incubator-hugegraph/pull/2238))
-- feat: optimising adjacency edge queries ([#2242](https://github.com/apache/incubator-hugegraph/pull/2242))
-- Feat: IP white list ([#2299](https://github.com/apache/incubator-hugegraph/pull/2299))
-- feat(cassandra): adapt cassandra from 3.11.12 to 4.0.10 ([#2300](https://github.com/apache/incubator-hugegraph/pull/2300))
-- feat: support Cassandra with docker-compose in server ([#2307](https://github.com/apache/incubator-hugegraph/pull/2307))
-- feat(core): support batch+parallel edges traverse ([#2312](https://github.com/apache/incubator-hugegraph/pull/2312))
-- feat: adapt Dockerfile for new project structur ([#2344](https://github.com/apache/incubator-hugegraph/pull/2344))
-- feat(server):swagger support auth for standardAuth mode by ([#2360](https://github.com/apache/incubator-hugegraph/pull/2360))
-- feat(core): add IntMapByDynamicHash V1 implement ([#2377](https://github.com/apache/incubator-hugegraph/pull/2377))
+- feat: support task auto manage by server role state machine ([#2130](https://github.com/apache/hugegraph/pull/2130))
+- feat: support parallel compress snapshot ([#2136](https://github.com/apache/hugegraph/pull/2136))
+- feat: use an enhanced CypherAPI to refactor it ([#2143](https://github.com/apache/hugegraph/pull/2143))
+- feat(perf): support JMH benchmark in HG-test module ([#2238](https://github.com/apache/hugegraph/pull/2238))
+- feat: optimising adjacency edge queries ([#2242](https://github.com/apache/hugegraph/pull/2242))
+- Feat: IP white list ([#2299](https://github.com/apache/hugegraph/pull/2299))
+- feat(cassandra): adapt cassandra from 3.11.12 to 4.0.10 ([#2300](https://github.com/apache/hugegraph/pull/2300))
+- feat: support Cassandra with docker-compose in server ([#2307](https://github.com/apache/hugegraph/pull/2307))
+- feat(core): support batch+parallel edges traverse ([#2312](https://github.com/apache/hugegraph/pull/2312))
+- feat: adapt Dockerfile for new project structur ([#2344](https://github.com/apache/hugegraph/pull/2344))
+- feat(server):swagger support auth for standardAuth mode by ([#2360](https://github.com/apache/hugegraph/pull/2360))
+- feat(core): add IntMapByDynamicHash V1 implement ([#2377](https://github.com/apache/hugegraph/pull/2377))
#### Bug Fix
-- fix: transfer add_peer/remove_peer command to leader ([#2112](https://github.com/apache/incubator-hugegraph/pull/2112))
-- fix query dirty edges of a vertex with cache ([#2166](https://github.com/apache/incubator-hugegraph/pull/2166))
-- fix exception of vertex-drop with index ([#2181](https://github.com/apache/incubator-hugegraph/pull/2181))
-- fix: remove dup 'From' in filterExpiredResultFromFromBackend ([#2207](https://github.com/apache/incubator-hugegraph/pull/2207))
-- fix: jdbc ssl mode parameter redundant ([#2224](https://github.com/apache/incubator-hugegraph/pull/2224))
-- fix: error when start gremlin-console with sample script ([#2231](https://github.com/apache/incubator-hugegraph/pull/2231))
-- fix(core): support order by id ([#2233](https://github.com/apache/incubator-hugegraph/pull/2233))
-- fix: update ssl_mode value ([#2235](https://github.com/apache/incubator-hugegraph/pull/2235))
-- fix: optimizing ClassNotFoundException error message for MYSQL ([#2246](https://github.com/apache/incubator-hugegraph/pull/2246))
-- fix: asf invalid notification scheme 'discussions_status' ([#2247](https://github.com/apache/incubator-hugegraph/pull/2247))
-- fix: asf invalid notification scheme 'discussions_comment' ([#2250](https://github.com/apache/incubator-hugegraph/pull/2250))
-- fix: incorrect use of 'NO_LIMIT' variable ([#2253](https://github.com/apache/incubator-hugegraph/pull/2253))
-- fix(core): close flat mapper iterator after usage ([#2281](https://github.com/apache/incubator-hugegraph/pull/2281))
-- fix(dist): avoid var PRELOAD cover environmnet vars ([#2302](https://github.com/apache/incubator-hugegraph/pull/2302))
-- fix: base-ref/head-ref missed in dependency-review on master ([#2308](https://github.com/apache/incubator-hugegraph/pull/2308))
-- fix(core): handle schema Cache expandCapacity concurrent problem ([#2332](https://github.com/apache/incubator-hugegraph/pull/2332))
-- fix: in wait-storage.sh, always wait for storage with default rocksdb ([#2333](https://github.com/apache/incubator-hugegraph/pull/2333))
-- fix(api): refactor/downgrade record logic for slow log ([#2347](https://github.com/apache/incubator-hugegraph/pull/2347))
-- fix(api): clean some code for release ([#2348](https://github.com/apache/incubator-hugegraph/pull/2348))
-- fix: remove redirect-to-master from synchronous Gremlin API ([#2356](https://github.com/apache/incubator-hugegraph/pull/2356))
-- fix HBase PrefixFilter bug ([#2364](https://github.com/apache/incubator-hugegraph/pull/2364))
-- chore: fix curl failed to request https urls ([#2378](https://github.com/apache/incubator-hugegraph/pull/2378))
-- fix(api): correct the vertex id in the edge-existence api ([#2380](https://github.com/apache/incubator-hugegraph/pull/2380))
-- fix: github action build docker image failed during the release 1.2 process ([#2386](https://github.com/apache/incubator-hugegraph/pull/2386))
-- fix: TinkerPop unit test lack some lables ([#2387](https://github.com/apache/incubator-hugegraph/pull/2387))
+- fix: transfer add_peer/remove_peer command to leader ([#2112](https://github.com/apache/hugegraph/pull/2112))
+- fix query dirty edges of a vertex with cache ([#2166](https://github.com/apache/hugegraph/pull/2166))
+- fix exception of vertex-drop with index ([#2181](https://github.com/apache/hugegraph/pull/2181))
+- fix: remove dup 'From' in filterExpiredResultFromFromBackend ([#2207](https://github.com/apache/hugegraph/pull/2207))
+- fix: jdbc ssl mode parameter redundant ([#2224](https://github.com/apache/hugegraph/pull/2224))
+- fix: error when start gremlin-console with sample script ([#2231](https://github.com/apache/hugegraph/pull/2231))
+- fix(core): support order by id ([#2233](https://github.com/apache/hugegraph/pull/2233))
+- fix: update ssl_mode value ([#2235](https://github.com/apache/hugegraph/pull/2235))
+- fix: optimizing ClassNotFoundException error message for MYSQL ([#2246](https://github.com/apache/hugegraph/pull/2246))
+- fix: asf invalid notification scheme 'discussions_status' ([#2247](https://github.com/apache/hugegraph/pull/2247))
+- fix: asf invalid notification scheme 'discussions_comment' ([#2250](https://github.com/apache/hugegraph/pull/2250))
+- fix: incorrect use of 'NO_LIMIT' variable ([#2253](https://github.com/apache/hugegraph/pull/2253))
+- fix(core): close flat mapper iterator after usage ([#2281](https://github.com/apache/hugegraph/pull/2281))
+- fix(dist): avoid var PRELOAD cover environmnet vars ([#2302](https://github.com/apache/hugegraph/pull/2302))
+- fix: base-ref/head-ref missed in dependency-review on master ([#2308](https://github.com/apache/hugegraph/pull/2308))
+- fix(core): handle schema Cache expandCapacity concurrent problem ([#2332](https://github.com/apache/hugegraph/pull/2332))
+- fix: in wait-storage.sh, always wait for storage with default rocksdb ([#2333](https://github.com/apache/hugegraph/pull/2333))
+- fix(api): refactor/downgrade record logic for slow log ([#2347](https://github.com/apache/hugegraph/pull/2347))
+- fix(api): clean some code for release ([#2348](https://github.com/apache/hugegraph/pull/2348))
+- fix: remove redirect-to-master from synchronous Gremlin API ([#2356](https://github.com/apache/hugegraph/pull/2356))
+- fix HBase PrefixFilter bug ([#2364](https://github.com/apache/hugegraph/pull/2364))
+- chore: fix curl failed to request https urls ([#2378](https://github.com/apache/hugegraph/pull/2378))
+- fix(api): correct the vertex id in the edge-existence api ([#2380](https://github.com/apache/hugegraph/pull/2380))
+- fix: github action build docker image failed during the release 1.2 process ([#2386](https://github.com/apache/hugegraph/pull/2386))
+- fix: TinkerPop unit test lack some lables ([#2387](https://github.com/apache/hugegraph/pull/2387))
#### Option Changes
-- feat(dist): support pre-load test graph data in docker container ([#2241](https://github.com/apache/incubator-hugegraph/pull/2241))
+- feat(dist): support pre-load test graph data in docker container ([#2241](https://github.com/apache/hugegraph/pull/2241))
#### Other Changes
-- refact: use standard UTF-8 charset & enhance CI configs ([#2095](https://github.com/apache/incubator-hugegraph/pull/2095))
-- move validate release to hugegraph-doc ([#2109](https://github.com/apache/incubator-hugegraph/pull/2109))
-- refact: use a slim way to build docker image on latest code & support zgc ([#2118](https://github.com/apache/incubator-hugegraph/pull/2118))
-- chore: remove stage-repo in pom due to release done & update mail rule ([#2128](https://github.com/apache/incubator-hugegraph/pull/2128))
-- doc: update issue template & README file ([#2131](https://github.com/apache/incubator-hugegraph/pull/2131))
-- chore: cmn algorithm optimization ([#2134](https://github.com/apache/incubator-hugegraph/pull/2134))
-- add github token for license check comment ([#2139](https://github.com/apache/incubator-hugegraph/pull/2139))
-- chore: disable PR up-to-date in branch ([#2150](https://github.com/apache/incubator-hugegraph/pull/2150))
-- refact(core): remove lock of globalMasterInfo to optimize perf ([#2151](https://github.com/apache/incubator-hugegraph/pull/2151))
-- chore: async remove left index shouldn't effect query ([#2199](https://github.com/apache/incubator-hugegraph/pull/2199))
-- refact(rocksdb): clean & reformat some code ([#2200](https://github.com/apache/incubator-hugegraph/pull/2200))
-- refact(core): optimized batch removal of remaining indices consumed by a single consumer ([#2203](https://github.com/apache/incubator-hugegraph/pull/2203))
-- add com.janeluo.ikkanalyzer dependency to core model ([#2206](https://github.com/apache/incubator-hugegraph/pull/2206))
-- refact(core): early stop unnecessary loops in edge cache ([#2211](https://github.com/apache/incubator-hugegraph/pull/2211))
-- doc: update README & add QR code ([#2218](https://github.com/apache/incubator-hugegraph/pull/2218))
-- chore: update .asf.yaml for mail rule ([#2221](https://github.com/apache/incubator-hugegraph/pull/2221))
-- chore: improve the UI & content in README ([#2227](https://github.com/apache/incubator-hugegraph/pull/2227))
-- chore: add pr template ([#2234](https://github.com/apache/incubator-hugegraph/pull/2234))
-- doc: modify ASF and remove meaningless CLA ([#2237](https://github.com/apache/incubator-hugegraph/pull/2237))
-- chore(dist): replace wget to curl to download swagger-ui ([#2277](https://github.com/apache/incubator-hugegraph/pull/2277))
-- Update StandardStateMachineCallback.java ([#2290](https://github.com/apache/incubator-hugegraph/pull/2290))
-- doc: update README about start server with example graph ([#2315](https://github.com/apache/incubator-hugegraph/pull/2315))
-- README.md tiny improve ([#2320](https://github.com/apache/incubator-hugegraph/pull/2320))
-- doc: README.md tiny improve ([#2331](https://github.com/apache/incubator-hugegraph/pull/2331))
-- refact: adjust project structure for merge PD & Store[Breaking Change] ([#2338](https://github.com/apache/incubator-hugegraph/pull/2338))
-- chore: disable raft test in normal PR due to timeout problem ([#2349](https://github.com/apache/incubator-hugegraph/pull/2349))
-- chore(ci): add stage profile settings ([#2361](https://github.com/apache/incubator-hugegraph/pull/2361))
-- refact(api): update common 1.2 & fix jersey client code problem ([#2365](https://github.com/apache/incubator-hugegraph/pull/2365))
-- chore: move server info into GlobalMasterInfo ([#2370](https://github.com/apache/incubator-hugegraph/pull/2370))
-- chore: reset hugegraph version to 1.2.0 ([#2382](https://github.com/apache/incubator-hugegraph/pull/2382))
+- refact: use standard UTF-8 charset & enhance CI configs ([#2095](https://github.com/apache/hugegraph/pull/2095))
+- move validate release to hugegraph-doc ([#2109](https://github.com/apache/hugegraph/pull/2109))
+- refact: use a slim way to build docker image on latest code & support zgc ([#2118](https://github.com/apache/hugegraph/pull/2118))
+- chore: remove stage-repo in pom due to release done & update mail rule ([#2128](https://github.com/apache/hugegraph/pull/2128))
+- doc: update issue template & README file ([#2131](https://github.com/apache/hugegraph/pull/2131))
+- chore: cmn algorithm optimization ([#2134](https://github.com/apache/hugegraph/pull/2134))
+- add github token for license check comment ([#2139](https://github.com/apache/hugegraph/pull/2139))
+- chore: disable PR up-to-date in branch ([#2150](https://github.com/apache/hugegraph/pull/2150))
+- refact(core): remove lock of globalMasterInfo to optimize perf ([#2151](https://github.com/apache/hugegraph/pull/2151))
+- chore: async remove left index shouldn't effect query ([#2199](https://github.com/apache/hugegraph/pull/2199))
+- refact(rocksdb): clean & reformat some code ([#2200](https://github.com/apache/hugegraph/pull/2200))
+- refact(core): optimized batch removal of remaining indices consumed by a single consumer ([#2203](https://github.com/apache/hugegraph/pull/2203))
+- add com.janeluo.ikkanalyzer dependency to core model ([#2206](https://github.com/apache/hugegraph/pull/2206))
+- refact(core): early stop unnecessary loops in edge cache ([#2211](https://github.com/apache/hugegraph/pull/2211))
+- doc: update README & add QR code ([#2218](https://github.com/apache/hugegraph/pull/2218))
+- chore: update .asf.yaml for mail rule ([#2221](https://github.com/apache/hugegraph/pull/2221))
+- chore: improve the UI & content in README ([#2227](https://github.com/apache/hugegraph/pull/2227))
+- chore: add pr template ([#2234](https://github.com/apache/hugegraph/pull/2234))
+- doc: modify ASF and remove meaningless CLA ([#2237](https://github.com/apache/hugegraph/pull/2237))
+- chore(dist): replace wget to curl to download swagger-ui ([#2277](https://github.com/apache/hugegraph/pull/2277))
+- Update StandardStateMachineCallback.java ([#2290](https://github.com/apache/hugegraph/pull/2290))
+- doc: update README about start server with example graph ([#2315](https://github.com/apache/hugegraph/pull/2315))
+- README.md tiny improve ([#2320](https://github.com/apache/hugegraph/pull/2320))
+- doc: README.md tiny improve ([#2331](https://github.com/apache/hugegraph/pull/2331))
+- refact: adjust project structure for merge PD & Store[Breaking Change] ([#2338](https://github.com/apache/hugegraph/pull/2338))
+- chore: disable raft test in normal PR due to timeout problem ([#2349](https://github.com/apache/hugegraph/pull/2349))
+- chore(ci): add stage profile settings ([#2361](https://github.com/apache/hugegraph/pull/2361))
+- refact(api): update common 1.2 & fix jersey client code problem ([#2365](https://github.com/apache/hugegraph/pull/2365))
+- chore: move server info into GlobalMasterInfo ([#2370](https://github.com/apache/hugegraph/pull/2370))
+- chore: reset hugegraph version to 1.2.0 ([#2382](https://github.com/apache/hugegraph/pull/2382))
### hugegraph-computer
#### Feature Changes
-* feat: implement fast-failover for MessageRecvManager and DataClientManager ([#243](https://github.com/apache/incubator-hugegraph-computer/pull/243))
-* feat: implement parallel send data in load graph step ([#248](https://github.com/apache/incubator-hugegraph-computer/pull/248))
-* feat(k8s): init operator project & add webhook ([#259](https://github.com/apache/incubator-hugegraph-computer/pull/259), [#263](https://github.com/apache/incubator-hugegraph-computer/pull/263))
-* feat(core): support load vertex/edge snapshot ([#269](https://github.com/apache/incubator-hugegraph-computer/pull/269))
-* feat(k8s): Add MinIO as internal(default) storage ([#272](https://github.com/apache/incubator-hugegraph-computer/pull/272))
-* feat(algorithm): support random walk in computer ([#274](https://github.com/apache/incubator-hugegraph-computer/pull/274), [#280](https://github.com/apache/incubator-hugegraph-computer/pull/280))
-* feat: use 'foreground' delete policy to cancel k8s job ([#290](https://github.com/apache/incubator-hugegraph-computer/pull/290))
+* feat: implement fast-failover for MessageRecvManager and DataClientManager ([#243](https://github.com/apache/hugegraph-computer/pull/243))
+* feat: implement parallel send data in load graph step ([#248](https://github.com/apache/hugegraph-computer/pull/248))
+* feat(k8s): init operator project & add webhook ([#259](https://github.com/apache/hugegraph-computer/pull/259), [#263](https://github.com/apache/hugegraph-computer/pull/263))
+* feat(core): support load vertex/edge snapshot ([#269](https://github.com/apache/hugegraph-computer/pull/269))
+* feat(k8s): Add MinIO as internal(default) storage ([#272](https://github.com/apache/hugegraph-computer/pull/272))
+* feat(algorithm): support random walk in computer ([#274](https://github.com/apache/hugegraph-computer/pull/274), [#280](https://github.com/apache/hugegraph-computer/pull/280))
+* feat: use 'foreground' delete policy to cancel k8s job ([#290](https://github.com/apache/hugegraph-computer/pull/290))
#### Bug Fix
-* fix: superstep not take effect ([#237](https://github.com/apache/incubator-hugegraph-computer/pull/237))
-* fix(k8s): modify inconsistent apiGroups ([#270](https://github.com/apache/incubator-hugegraph-computer/pull/270))
-* fix(algorithm): record loop is not copied ([#276](https://github.com/apache/incubator-hugegraph-computer/pull/276))
-* refact(core): adaptor for common 1.2 & fix a string of possible CI problem ([#286](https://github.com/apache/incubator-hugegraph-computer/pull/286))
-* fix: remove okhttp1 due to conflicts risk ([#294](https://github.com/apache/incubator-hugegraph-computer/pull/294))
-* fix(core): io.grpc.grpc-core dependency conflic ([#296](https://github.com/apache/incubator-hugegraph-computer/pull/296))
+* fix: superstep not take effect ([#237](https://github.com/apache/hugegraph-computer/pull/237))
+* fix(k8s): modify inconsistent apiGroups ([#270](https://github.com/apache/hugegraph-computer/pull/270))
+* fix(algorithm): record loop is not copied ([#276](https://github.com/apache/hugegraph-computer/pull/276))
+* refact(core): adaptor for common 1.2 & fix a string of possible CI problem ([#286](https://github.com/apache/hugegraph-computer/pull/286))
+* fix: remove okhttp1 due to conflicts risk ([#294](https://github.com/apache/hugegraph-computer/pull/294))
+* fix(core): io.grpc.grpc-core dependency conflic ([#296](https://github.com/apache/hugegraph-computer/pull/296))
#### Option Changes
-* feat(core): isolate namespace for different input data source ([#252](https://github.com/apache/incubator-hugegraph-computer/pull/252))
-* refact(core): support auth config for computer task ([#265](https://github.com/apache/incubator-hugegraph-computer/pull/265))
+* feat(core): isolate namespace for different input data source ([#252](https://github.com/apache/hugegraph-computer/pull/252))
+* refact(core): support auth config for computer task ([#265](https://github.com/apache/hugegraph-computer/pull/265))
#### Other Changes
-* remove apache stage repo & update notification rule ([#232](https://github.com/apache/incubator-hugegraph-computer/pull/232))
-* chore: fix empty license file ([#233](https://github.com/apache/incubator-hugegraph-computer/pull/233))
-* chore: enhance mailbox settings & enable require ci ([#235](https://github.com/apache/incubator-hugegraph-computer/pull/235))
-* fix: typo errors in start-computer.sh ([#238](https://github.com/apache/incubator-hugegraph-computer/pull/238))
-* [Feature-241] Add PULL_REQUEST_TEMPLATE ([#242](https://github.com/apache/incubator-hugegraph-computer/pull/242), [#257](https://github.com/apache/incubator-hugegraph-computer/pull/257))
-* chore: change etcd url only for ci ([#245](https://github.com/apache/incubator-hugegraph-computer/pull/245))
-* doc: update readme & add QR code ([#249](https://github.com/apache/incubator-hugegraph-computer/pull/249))
-* doc(k8s): add building note for missing classes ([#254](https://github.com/apache/incubator-hugegraph-computer/pull/254))
-* chore: reduce mail to dev list ([#255](https://github.com/apache/incubator-hugegraph-computer/pull/255))
-* add: dependency-review ([#266](https://github.com/apache/incubator-hugegraph-computer/pull/266))
-* chore: correct incorrect comment ([#268](https://github.com/apache/incubator-hugegraph-computer/pull/268))
-* refactor(api): ListValue.getFirst() replaces ListValue.get(0) ([#282](https://github.com/apache/incubator-hugegraph-computer/pull/282))
-* Improve: Passing workerId to WorkerStat & Skip wait worker close if master executes failed ([#292](https://github.com/apache/incubator-hugegraph-computer/pull/292))
-* chore: add check dependencies ([#293](https://github.com/apache/incubator-hugegraph-computer/pull/293))
-* chore(license): update license for 1.2.0 ([#299](https://github.com/apache/incubator-hugegraph-computer/pull/299))
+* remove apache stage repo & update notification rule ([#232](https://github.com/apache/hugegraph-computer/pull/232))
+* chore: fix empty license file ([#233](https://github.com/apache/hugegraph-computer/pull/233))
+* chore: enhance mailbox settings & enable require ci ([#235](https://github.com/apache/hugegraph-computer/pull/235))
+* fix: typo errors in start-computer.sh ([#238](https://github.com/apache/hugegraph-computer/pull/238))
+* [Feature-241] Add PULL_REQUEST_TEMPLATE ([#242](https://github.com/apache/hugegraph-computer/pull/242), [#257](https://github.com/apache/hugegraph-computer/pull/257))
+* chore: change etcd url only for ci ([#245](https://github.com/apache/hugegraph-computer/pull/245))
+* doc: update readme & add QR code ([#249](https://github.com/apache/hugegraph-computer/pull/249))
+* doc(k8s): add building note for missing classes ([#254](https://github.com/apache/hugegraph-computer/pull/254))
+* chore: reduce mail to dev list ([#255](https://github.com/apache/hugegraph-computer/pull/255))
+* add: dependency-review ([#266](https://github.com/apache/hugegraph-computer/pull/266))
+* chore: correct incorrect comment ([#268](https://github.com/apache/hugegraph-computer/pull/268))
+* refactor(api): ListValue.getFirst() replaces ListValue.get(0) ([#282](https://github.com/apache/hugegraph-computer/pull/282))
+* Improve: Passing workerId to WorkerStat & Skip wait worker close if master executes failed ([#292](https://github.com/apache/hugegraph-computer/pull/292))
+* chore: add check dependencies ([#293](https://github.com/apache/hugegraph-computer/pull/293))
+* chore(license): update license for 1.2.0 ([#299](https://github.com/apache/hugegraph-computer/pull/299))
### hugegraph-toolchain
#### API Changes
-- feat(client): support edgeExistence api ([#544](https://github.com/apache/incubator-hugegraph-toolchain/pull/544))
-- refact(client): update tests for new OLTP traverser APIs ([#550](https://github.com/apache/incubator-hugegraph-toolchain/pull/550))
+- feat(client): support edgeExistence api ([#544](https://github.com/apache/hugegraph-toolchain/pull/544))
+- refact(client): update tests for new OLTP traverser APIs ([#550](https://github.com/apache/hugegraph-toolchain/pull/550))
#### Feature Changes
-- feat(spark): support spark-sink connector for loader ([#497](https://github.com/apache/incubator-hugegraph-toolchain/pull/497))
-- feat(loader): support kafka as datasource ([#506](https://github.com/apache/incubator-hugegraph-toolchain/pull/506))
-- feat(client): support go client for hugegraph ([#514](https://github.com/apache/incubator-hugegraph-toolchain/pull/514))
-- feat(loader): support docker for loader ([#530](https://github.com/apache/incubator-hugegraph-toolchain/pull/530))
-- feat: update common version and remove jersey code ([#538](https://github.com/apache/incubator-hugegraph-toolchain/pull/538))
+- feat(spark): support spark-sink connector for loader ([#497](https://github.com/apache/hugegraph-toolchain/pull/497))
+- feat(loader): support kafka as datasource ([#506](https://github.com/apache/hugegraph-toolchain/pull/506))
+- feat(client): support go client for hugegraph ([#514](https://github.com/apache/hugegraph-toolchain/pull/514))
+- feat(loader): support docker for loader ([#530](https://github.com/apache/hugegraph-toolchain/pull/530))
+- feat: update common version and remove jersey code ([#538](https://github.com/apache/hugegraph-toolchain/pull/538))
#### Bug Fix
-- fix: convert numbers to strings ([#465](https://github.com/apache/incubator-hugegraph-toolchain/pull/465))
-- fix: hugegraph-spark-loader shell string length limit ([#469](https://github.com/apache/incubator-hugegraph-toolchain/pull/469))
-- fix: spark loader meet Exception: Class is not registered ([#470](https://github.com/apache/incubator-hugegraph-toolchain/pull/470))
-- fix: spark loader Task not serializable ([#471](https://github.com/apache/incubator-hugegraph-toolchain/pull/471))
-- fix: spark with loader has dependency conflicts ([#480](https://github.com/apache/incubator-hugegraph-toolchain/pull/480))
-- fix: spark-loader example schema and struct mismatch ([#504](https://github.com/apache/incubator-hugegraph-toolchain/pull/504))
-- fix(loader): error log ([#499](https://github.com/apache/incubator-hugegraph-toolchain/pull/499))
-- fix: checkstyle && add suppressions.xml ([#500](https://github.com/apache/incubator-hugegraph-toolchain/pull/500))
-- fix(loader): resolve error in loader script ([#510](https://github.com/apache/incubator-hugegraph-toolchain/pull/510))
-- fix: base-ref/head-ref missed in dependency-check-ci on branch push ([#516](https://github.com/apache/incubator-hugegraph-toolchain/pull/516), [#551](https://github.com/apache/incubator-hugegraph-toolchain/pull/551))
-- fix yarn network connection on linux/arm64 arch ([#519](https://github.com/apache/incubator-hugegraph-toolchain/pull/519))
-- fix(hubble): drop-down box could not display all options ([#535](https://github.com/apache/incubator-hugegraph-toolchain/pull/535))
-- fix(hubble): build with node and yarn ([#543](https://github.com/apache/incubator-hugegraph-toolchain/pull/543))
-- fix(loader): loader options ([#548](https://github.com/apache/incubator-hugegraph-toolchain/pull/548))
-- fix(hubble): parent override children dep version ([#549](https://github.com/apache/incubator-hugegraph-toolchain/pull/549))
-- fix: exclude okhttp1 which has different groupID with okhttp3 ([#555](https://github.com/apache/incubator-hugegraph-toolchain/pull/555))
-- fix: github action build docker image failed ([#556](https://github.com/apache/incubator-hugegraph-toolchain/pull/556), [#557](https://github.com/apache/incubator-hugegraph-toolchain/pull/557))
-- fix: build error with npm not exist & tiny improve ([#558](https://github.com/apache/incubator-hugegraph-toolchain/pull/558))
+- fix: convert numbers to strings ([#465](https://github.com/apache/hugegraph-toolchain/pull/465))
+- fix: hugegraph-spark-loader shell string length limit ([#469](https://github.com/apache/hugegraph-toolchain/pull/469))
+- fix: spark loader meet Exception: Class is not registered ([#470](https://github.com/apache/hugegraph-toolchain/pull/470))
+- fix: spark loader Task not serializable ([#471](https://github.com/apache/hugegraph-toolchain/pull/471))
+- fix: spark with loader has dependency conflicts ([#480](https://github.com/apache/hugegraph-toolchain/pull/480))
+- fix: spark-loader example schema and struct mismatch ([#504](https://github.com/apache/hugegraph-toolchain/pull/504))
+- fix(loader): error log ([#499](https://github.com/apache/hugegraph-toolchain/pull/499))
+- fix: checkstyle && add suppressions.xml ([#500](https://github.com/apache/hugegraph-toolchain/pull/500))
+- fix(loader): resolve error in loader script ([#510](https://github.com/apache/hugegraph-toolchain/pull/510))
+- fix: base-ref/head-ref missed in dependency-check-ci on branch push ([#516](https://github.com/apache/hugegraph-toolchain/pull/516), [#551](https://github.com/apache/hugegraph-toolchain/pull/551))
+- fix yarn network connection on linux/arm64 arch ([#519](https://github.com/apache/hugegraph-toolchain/pull/519))
+- fix(hubble): drop-down box could not display all options ([#535](https://github.com/apache/hugegraph-toolchain/pull/535))
+- fix(hubble): build with node and yarn ([#543](https://github.com/apache/hugegraph-toolchain/pull/543))
+- fix(loader): loader options ([#548](https://github.com/apache/hugegraph-toolchain/pull/548))
+- fix(hubble): parent override children dep version ([#549](https://github.com/apache/hugegraph-toolchain/pull/549))
+- fix: exclude okhttp1 which has different groupID with okhttp3 ([#555](https://github.com/apache/hugegraph-toolchain/pull/555))
+- fix: github action build docker image failed ([#556](https://github.com/apache/hugegraph-toolchain/pull/556), [#557](https://github.com/apache/hugegraph-toolchain/pull/557))
+- fix: build error with npm not exist & tiny improve ([#558](https://github.com/apache/hugegraph-toolchain/pull/558))
#### Option Changes
-- set default data when create graph ([#447](https://github.com/apache/incubator-hugegraph-toolchain/pull/447))
+- set default data when create graph ([#447](https://github.com/apache/hugegraph-toolchain/pull/447))
#### Other Changes
-- chore: remove apache stage repo & update mail rule ([#433](https://github.com/apache/incubator-hugegraph-toolchain/pull/433), [#474](https://github.com/apache/incubator-hugegraph-toolchain/pull/474), [#479](https://github.com/apache/incubator-hugegraph-toolchain/pull/479))
-- refact: clean extra store file in all modules ([#434](https://github.com/apache/incubator-hugegraph-toolchain/pull/434))
-- chore: use fixed node.js version 16 to avoid ci problem ([#437](https://github.com/apache/incubator-hugegraph-toolchain/pull/437), [#441](https://github.com/apache/incubator-hugegraph-toolchain/pull/441))
-- chore(hubble): use latest code in Dockerfile ([#440](https://github.com/apache/incubator-hugegraph-toolchain/pull/440))
-- chore: remove maven plugin for docker build ([#443](https://github.com/apache/incubator-hugegraph-toolchain/pull/443))
-- chore: improve spark parallel ([#450](https://github.com/apache/incubator-hugegraph-toolchain/pull/450))
-- doc: fix build status badge link ([#455](https://github.com/apache/incubator-hugegraph-toolchain/pull/455))
-- chore: keep hadoop-hdfs-client and hadoop-common version consistent ([#457](https://github.com/apache/incubator-hugegraph-toolchain/pull/457))
-- doc: add basic contact info & QR code in README ([#462](https://github.com/apache/incubator-hugegraph-toolchain/pull/462), [#475](https://github.com/apache/incubator-hugegraph-toolchain/pull/475))
-- chore: disable PR up-to-date in branch ([#473](https://github.com/apache/incubator-hugegraph-toolchain/pull/473))
-- chore: auto add pr auto label by path ([#466](https://github.com/apache/incubator-hugegraph-toolchain/pull/466), [#528](https://github.com/apache/incubator-hugegraph-toolchain/pull/528))
-- chore: unify the dependencies versions of the entire project ([#478](https://github.com/apache/incubator-hugegraph-toolchain/pull/478))
-- chore(deps): bump async, semver, word-wrap, browserify-sign in hubble-fe ([#484](https://github.com/apache/incubator-hugegraph-toolchain/pull/484), [#491](https://github.com/apache/incubator-hugegraph-toolchain/pull/491), [#494](https://github.com/apache/incubator-hugegraph-toolchain/pull/494), [#529](https://github.com/apache/incubator-hugegraph-toolchain/pull/529))
-- chore: add pr template ([#498](https://github.com/apache/incubator-hugegraph-toolchain/pull/498))
-- doc(hubble): add docker-compose to start with server ([#522](https://github.com/apache/incubator-hugegraph-toolchain/pull/522))
-- chore(ci): add stage profile settings ([#536](https://github.com/apache/incubator-hugegraph-toolchain/pull/536))
-- chore(client): increase the api num as the latest server commit + 10 ([#546](https://github.com/apache/incubator-hugegraph-toolchain/pull/546))
-- chore(spark): install hugegraph from source ([#552](https://github.com/apache/incubator-hugegraph-toolchain/pull/552))
-- doc: adjust docker related desc in readme ([#559](https://github.com/apache/incubator-hugegraph-toolchain/pull/559))
-- chore(license): update license for 1.2 ([#560](https://github.com/apache/incubator-hugegraph-toolchain/pull/560), [#561](https://github.com/apache/incubator-hugegraph-toolchain/pull/561))
+- chore: remove apache stage repo & update mail rule ([#433](https://github.com/apache/hugegraph-toolchain/pull/433), [#474](https://github.com/apache/hugegraph-toolchain/pull/474), [#479](https://github.com/apache/hugegraph-toolchain/pull/479))
+- refact: clean extra store file in all modules ([#434](https://github.com/apache/hugegraph-toolchain/pull/434))
+- chore: use fixed node.js version 16 to avoid ci problem ([#437](https://github.com/apache/hugegraph-toolchain/pull/437), [#441](https://github.com/apache/hugegraph-toolchain/pull/441))
+- chore(hubble): use latest code in Dockerfile ([#440](https://github.com/apache/hugegraph-toolchain/pull/440))
+- chore: remove maven plugin for docker build ([#443](https://github.com/apache/hugegraph-toolchain/pull/443))
+- chore: improve spark parallel ([#450](https://github.com/apache/hugegraph-toolchain/pull/450))
+- doc: fix build status badge link ([#455](https://github.com/apache/hugegraph-toolchain/pull/455))
+- chore: keep hadoop-hdfs-client and hadoop-common version consistent ([#457](https://github.com/apache/hugegraph-toolchain/pull/457))
+- doc: add basic contact info & QR code in README ([#462](https://github.com/apache/hugegraph-toolchain/pull/462), [#475](https://github.com/apache/hugegraph-toolchain/pull/475))
+- chore: disable PR up-to-date in branch ([#473](https://github.com/apache/hugegraph-toolchain/pull/473))
+- chore: auto add pr auto label by path ([#466](https://github.com/apache/hugegraph-toolchain/pull/466), [#528](https://github.com/apache/hugegraph-toolchain/pull/528))
+- chore: unify the dependencies versions of the entire project ([#478](https://github.com/apache/hugegraph-toolchain/pull/478))
+- chore(deps): bump async, semver, word-wrap, browserify-sign in hubble-fe ([#484](https://github.com/apache/hugegraph-toolchain/pull/484), [#491](https://github.com/apache/hugegraph-toolchain/pull/491), [#494](https://github.com/apache/hugegraph-toolchain/pull/494), [#529](https://github.com/apache/hugegraph-toolchain/pull/529))
+- chore: add pr template ([#498](https://github.com/apache/hugegraph-toolchain/pull/498))
+- doc(hubble): add docker-compose to start with server ([#522](https://github.com/apache/hugegraph-toolchain/pull/522))
+- chore(ci): add stage profile settings ([#536](https://github.com/apache/hugegraph-toolchain/pull/536))
+- chore(client): increase the api num as the latest server commit + 10 ([#546](https://github.com/apache/hugegraph-toolchain/pull/546))
+- chore(spark): install hugegraph from source ([#552](https://github.com/apache/hugegraph-toolchain/pull/552))
+- doc: adjust docker related desc in readme ([#559](https://github.com/apache/hugegraph-toolchain/pull/559))
+- chore(license): update license for 1.2 ([#560](https://github.com/apache/hugegraph-toolchain/pull/560), [#561](https://github.com/apache/hugegraph-toolchain/pull/561))
@@ -218,27 +218,27 @@ weight: 3
#### Feature Changes
-- feat(common): replace jersey dependencies with OkHttp (Breaking Change) ([#133](https://github.com/apache/incubator-hugegraph-commons/pull/133))
+- feat(common): replace jersey dependencies with OkHttp (Breaking Change) ([#133](https://github.com/apache/hugegraph-commons/pull/133))
#### Bug Fix
-- fix(common): handle spring-boot2/jersey dependency conflicts ([#131](https://github.com/apache/incubator-hugegraph-commons/pull/131))
-- fix: Assert.assertThrows() should check result of exceptionConsumer ([#135](https://github.com/apache/incubator-hugegraph-commons/pull/135))
-- fix(common): json param convert ([#137](https://github.com/apache/incubator-hugegraph-commons/pull/137))
+- fix(common): handle spring-boot2/jersey dependency conflicts ([#131](https://github.com/apache/hugegraph-commons/pull/131))
+- fix: Assert.assertThrows() should check result of exceptionConsumer ([#135](https://github.com/apache/hugegraph-commons/pull/135))
+- fix(common): json param convert ([#137](https://github.com/apache/hugegraph-commons/pull/137))
#### Other Changes
-- refact(common): add more construction methods for convenient ([#132](https://github.com/apache/incubator-hugegraph-commons/pull/132))
-- add: dependency-review ([#134](https://github.com/apache/incubator-hugegraph-commons/pull/134))
-- refact(common): rename jsonutil to avoid conflicts with server ([#136](https://github.com/apache/incubator-hugegraph-commons/pull/136))
-- doc: update README for release ([#138](https://github.com/apache/incubator-hugegraph-commons/pull/138))
-- update licence ([#139](https://github.com/apache/incubator-hugegraph-commons/pull/139))
+- refact(common): add more construction methods for convenient ([#132](https://github.com/apache/hugegraph-commons/pull/132))
+- add: dependency-review ([#134](https://github.com/apache/hugegraph-commons/pull/134))
+- refact(common): rename jsonutil to avoid conflicts with server ([#136](https://github.com/apache/hugegraph-commons/pull/136))
+- doc: update README for release ([#138](https://github.com/apache/hugegraph-commons/pull/138))
+- update licence ([#139](https://github.com/apache/hugegraph-commons/pull/139))
### Release Details
Please check the release details in each repository:
-- [Server Release Notes](https://github.com/apache/incubator-hugegraph/releases)
-- [Toolchain Release Notes](https://github.com/apache/incubator-hugegraph-toolchain/releases)
-- [Computer Release Notes](https://github.com/apache/incubator-hugegraph-computer/releases)
-- [Commons Release Notes](https://github.com/apache/incubator-hugegraph-commons/releases)
+- [Server Release Notes](https://github.com/apache/hugegraph/releases)
+- [Toolchain Release Notes](https://github.com/apache/hugegraph-toolchain/releases)
+- [Computer Release Notes](https://github.com/apache/hugegraph-computer/releases)
+- [Commons Release Notes](https://github.com/apache/hugegraph-commons/releases)
diff --git a/content/cn/docs/changelog/hugegraph-1.3.0-release-notes.md b/content/cn/docs/changelog/hugegraph-1.3.0-release-notes.md
index 3ea786e6c..ff88da898 100644
--- a/content/cn/docs/changelog/hugegraph-1.3.0-release-notes.md
+++ b/content/cn/docs/changelog/hugegraph-1.3.0-release-notes.md
@@ -1,7 +1,7 @@
---
title: "HugeGraph 1.3.0 Release Notes"
linkTitle: "Release-1.3.0"
-weight: 4
+weight: 5
---
### 运行环境/版本说明
@@ -19,45 +19,45 @@ PS: 未来 HugeGraph 组件的版本会朝着 `Java 11 -> Java 17 -> Java 21`
#### API Changes
-* feat(api): optimize adjacent-edges query ([#2408](https://github.com/apache/incubator-hugegraph/pull/2408))
+* feat(api): optimize adjacent-edges query ([#2408](https://github.com/apache/hugegraph/pull/2408))
#### Feature Changes
-- feat: support docker use the auth when starting ([#2403](https://github.com/apache/incubator-hugegraph/pull/2403))
-- feat: added the OpenTelemetry trace support ([#2477](https://github.com/apache/incubator-hugegraph/pull/2477))
+- feat: support docker use the auth when starting ([#2403](https://github.com/apache/hugegraph/pull/2403))
+- feat: added the OpenTelemetry trace support ([#2477](https://github.com/apache/hugegraph/pull/2477))
#### Bug Fix
-- fix(core): task restore interrupt problem on restart server ([#2401](https://github.com/apache/incubator-hugegraph/pull/2401))
-- fix(server): reinitialize the progress to set up graph auth friendly ([#2411](https://github.com/apache/incubator-hugegraph/pull/2411))
-- fix(chore): remove zgc in dockerfile for ARM env ([#2421](https://github.com/apache/incubator-hugegraph/pull/2421))
-- fix(server): make CacheManager constructor private to satisfy the singleton pattern ([#2432](https://github.com/apache/incubator-hugegraph/pull/2432))
-- fix(server): unify the license headers ([#2438](https://github.com/apache/incubator-hugegraph/pull/2438))
-- fix: format and clean code in dist and example modules ([#2441](https://github.com/apache/incubator-hugegraph/pull/2441))
-- fix: format and clean code in core module ([#2440](https://github.com/apache/incubator-hugegraph/pull/2440))
-- fix: format and clean code in modules ([#2439](https://github.com/apache/incubator-hugegraph/pull/2439))
-- fix(server): clean up the code ([#2456](https://github.com/apache/incubator-hugegraph/pull/2456))
-- fix(server): remove extra blank lines ([#2459](https://github.com/apache/incubator-hugegraph/pull/2459))
-- fix(server): add tip for gremlin api NPE with an empty query ([#2467](https://github.com/apache/incubator-hugegraph/pull/2467))
-- fix(server): fix the metric name when promthus collects hugegraph metric, see issue ([#2462](https://github.com/apache/incubator-hugegraph/pull/2462))
-- fix(server): `serverStarted` error when execute gremlin example ([#2473](https://github.com/apache/incubator-hugegraph/pull/2473))
-- fix(auth): enhance the URL check ([#2422](https://github.com/apache/incubator-hugegraph/pull/2422))
+- fix(core): task restore interrupt problem on restart server ([#2401](https://github.com/apache/hugegraph/pull/2401))
+- fix(server): reinitialize the progress to set up graph auth friendly ([#2411](https://github.com/apache/hugegraph/pull/2411))
+- fix(chore): remove zgc in dockerfile for ARM env ([#2421](https://github.com/apache/hugegraph/pull/2421))
+- fix(server): make CacheManager constructor private to satisfy the singleton pattern ([#2432](https://github.com/apache/hugegraph/pull/2432))
+- fix(server): unify the license headers ([#2438](https://github.com/apache/hugegraph/pull/2438))
+- fix: format and clean code in dist and example modules ([#2441](https://github.com/apache/hugegraph/pull/2441))
+- fix: format and clean code in core module ([#2440](https://github.com/apache/hugegraph/pull/2440))
+- fix: format and clean code in modules ([#2439](https://github.com/apache/hugegraph/pull/2439))
+- fix(server): clean up the code ([#2456](https://github.com/apache/hugegraph/pull/2456))
+- fix(server): remove extra blank lines ([#2459](https://github.com/apache/hugegraph/pull/2459))
+- fix(server): add tip for gremlin api NPE with an empty query ([#2467](https://github.com/apache/hugegraph/pull/2467))
+- fix(server): fix the metric name when promthus collects hugegraph metric, see issue ([#2462](https://github.com/apache/hugegraph/pull/2462))
+- fix(server): `serverStarted` error when execute gremlin example ([#2473](https://github.com/apache/hugegraph/pull/2473))
+- fix(auth): enhance the URL check ([#2422](https://github.com/apache/hugegraph/pull/2422))
#### Option Changes
-* refact(server): enhance the storage path in RocksDB & clean code ([#2491](https://github.com/apache/incubator-hugegraph/pull/2491))
+* refact(server): enhance the storage path in RocksDB & clean code ([#2491](https://github.com/apache/hugegraph/pull/2491))
#### Other Changes
-- chore: add a license link ([#2398](https://github.com/apache/incubator-hugegraph/pull/2398))
-- doc: enhance NOTICE info to keep it clear ([#2409](https://github.com/apache/incubator-hugegraph/pull/2409))
-- chore(server): update swagger info for default server profile ([#2423](https://github.com/apache/incubator-hugegraph/pull/2423))
-- fix(server): unify license header for protobuf file ([#2448](https://github.com/apache/incubator-hugegraph/pull/2448))
-- chore: improve license header checker confs and pre-check header when validating ([#2445](https://github.com/apache/incubator-hugegraph/pull/2445))
-- chore: unify to call SchemaLabel.getLabelId() ([#2458](https://github.com/apache/incubator-hugegraph/pull/2458))
-- chore: refine the hg-style.xml specification ([#2457](https://github.com/apache/incubator-hugegraph/pull/2457))
-- chore: Add a newline formatting configuration and a comment for warning ([#2464](https://github.com/apache/incubator-hugegraph/pull/2464))
-- chore(server): clear context after req done ([#2470](https://github.com/apache/incubator-hugegraph/pull/2470))
+- chore: add a license link ([#2398](https://github.com/apache/hugegraph/pull/2398))
+- doc: enhance NOTICE info to keep it clear ([#2409](https://github.com/apache/hugegraph/pull/2409))
+- chore(server): update swagger info for default server profile ([#2423](https://github.com/apache/hugegraph/pull/2423))
+- fix(server): unify license header for protobuf file ([#2448](https://github.com/apache/hugegraph/pull/2448))
+- chore: improve license header checker confs and pre-check header when validating ([#2445](https://github.com/apache/hugegraph/pull/2445))
+- chore: unify to call SchemaLabel.getLabelId() ([#2458](https://github.com/apache/hugegraph/pull/2458))
+- chore: refine the hg-style.xml specification ([#2457](https://github.com/apache/hugegraph/pull/2457))
+- chore: Add a newline formatting configuration and a comment for warning ([#2464](https://github.com/apache/hugegraph/pull/2464))
+- chore(server): clear context after req done ([#2470](https://github.com/apache/hugegraph/pull/2470))
### hugegraph-toolchain
@@ -65,40 +65,40 @@ PS: 未来 HugeGraph 组件的版本会朝着 `Java 11 -> Java 17 -> Java 21`
#### Feature Changes
-* fix(loader): update shade plugin for spark loader ([#566](https://github.com/apache/incubator-hugegraph-toolchain/pull/566))
-* fix(hubble): yarn install timeout in arm64 ([#583](https://github.com/apache/incubator-hugegraph-toolchain/pull/583))
-* fix(loader): support file name with prefix for hdfs source ([#571](https://github.com/apache/incubator-hugegraph-toolchain/pull/571))
-* feat(hubble): warp the exception info in HugeClientUtil ([#589](https://github.com/apache/incubator-hugegraph-toolchain/pull/589))
+* fix(loader): update shade plugin for spark loader ([#566](https://github.com/apache/hugegraph-toolchain/pull/566))
+* fix(hubble): yarn install timeout in arm64 ([#583](https://github.com/apache/hugegraph-toolchain/pull/583))
+* fix(loader): support file name with prefix for hdfs source ([#571](https://github.com/apache/hugegraph-toolchain/pull/571))
+* feat(hubble): warp the exception info in HugeClientUtil ([#589](https://github.com/apache/hugegraph-toolchain/pull/589))
#### Bug Fix
-* fix: concurrency issue causing file overwrite due to identical filenames ([#572](https://github.com/apache/incubator-hugegraph-toolchain/pull/572))
+* fix: concurrency issue causing file overwrite due to identical filenames ([#572](https://github.com/apache/hugegraph-toolchain/pull/572))
#### Option Changes
-* feat(client): support user defined OKHTTPClient configs ([#590](https://github.com/apache/incubator-hugegraph-toolchain/pull/590))
+* feat(client): support user defined OKHTTPClient configs ([#590](https://github.com/apache/hugegraph-toolchain/pull/590))
#### Other Changes
-* doc: update copyright date(year) in NOTICE ([#567](https://github.com/apache/incubator-hugegraph-toolchain/pull/567))
-* chore(deps): bump ip from 1.1.5 to 1.1.9 in /hugegraph-hubble/hubble-fe ([#580](https://github.com/apache/incubator-hugegraph-toolchain/pull/580))
-* refactor(hubble): enhance maven front plugin ([#568](https://github.com/apache/incubator-hugegraph-toolchain/pull/568))
-* chore(deps): bump es5-ext from 0.10.53 to 0.10.63 in /hugegraph-hubble/hubble-fe ([#582](https://github.com/apache/incubator-hugegraph-toolchain/pull/582))
-* chore(hubble): Enhance code style in hubble ([#592](https://github.com/apache/incubator-hugegraph-toolchain/pull/592))
-* chore: upgrade version to 1.3.0 ([#596](https://github.com/apache/incubator-hugegraph-toolchain/pull/596))
-* chore(ci): update profile commit id for 1.3 ([#597](https://github.com/apache/incubator-hugegraph-toolchain/pull/597))
+* doc: update copyright date(year) in NOTICE ([#567](https://github.com/apache/hugegraph-toolchain/pull/567))
+* chore(deps): bump ip from 1.1.5 to 1.1.9 in /hugegraph-hubble/hubble-fe ([#580](https://github.com/apache/hugegraph-toolchain/pull/580))
+* refactor(hubble): enhance maven front plugin ([#568](https://github.com/apache/hugegraph-toolchain/pull/568))
+* chore(deps): bump es5-ext from 0.10.53 to 0.10.63 in /hugegraph-hubble/hubble-fe ([#582](https://github.com/apache/hugegraph-toolchain/pull/582))
+* chore(hubble): Enhance code style in hubble ([#592](https://github.com/apache/hugegraph-toolchain/pull/592))
+* chore: upgrade version to 1.3.0 ([#596](https://github.com/apache/hugegraph-toolchain/pull/596))
+* chore(ci): update profile commit id for 1.3 ([#597](https://github.com/apache/hugegraph-toolchain/pull/597))
### hugegraph-commons
#### Feature Changes
-* feat: support user defined RestClientConfig/HTTPClient params ([#140](https://github.com/apache/incubator-hugegraph-commons/pull/140))
+* feat: support user defined RestClientConfig/HTTPClient params ([#140](https://github.com/apache/hugegraph-commons/pull/140))
#### Bug Fix
#### Other Changes
-* chore: disable clean flatten for deploy ([#141](https://github.com/apache/incubator-hugegraph-commons/pull/141))
+* chore: disable clean flatten for deploy ([#141](https://github.com/apache/hugegraph-commons/pull/141))
### hugegraph-ai
@@ -113,38 +113,38 @@ PS: 未来 HugeGraph 组件的版本会朝着 `Java 11 -> Java 17 -> Java 21`
#### Feature Changes
-* feat: initialize hugegraph python client ([#5](https://github.com/apache/incubator-hugegraph-ai/pull/5))
-* feat(llm): knowledge graph construction by llm ([#7](https://github.com/apache/incubator-hugegraph-ai/pull/7))
-* feat: initialize rag based on HugeGraph ([#20](https://github.com/apache/incubator-hugegraph-ai/pull/20))
-* feat(client): add variables api and test ([#24](https://github.com/apache/incubator-hugegraph-ai/pull/24))
-* feat: add llm wenxinyiyan & config util & spo_triple_extract ([#27](https://github.com/apache/incubator-hugegraph-ai/pull/27))
-* feat: add auth&metric&traverser&task api and ut ([#28](https://github.com/apache/incubator-hugegraph-ai/pull/28))
-* feat: refactor construct knowledge graph task ([#29](https://github.com/apache/incubator-hugegraph-ai/pull/29))
-* feat: Introduce gradio for creating interactive and visual demo ([#30](https://github.com/apache/incubator-hugegraph-ai/pull/30))
+* feat: initialize hugegraph python client ([#5](https://github.com/apache/hugegraph-ai/pull/5))
+* feat(llm): knowledge graph construction by llm ([#7](https://github.com/apache/hugegraph-ai/pull/7))
+* feat: initialize rag based on HugeGraph ([#20](https://github.com/apache/hugegraph-ai/pull/20))
+* feat(client): add variables api and test ([#24](https://github.com/apache/hugegraph-ai/pull/24))
+* feat: add llm wenxinyiyan & config util & spo_triple_extract ([#27](https://github.com/apache/hugegraph-ai/pull/27))
+* feat: add auth&metric&traverser&task api and ut ([#28](https://github.com/apache/hugegraph-ai/pull/28))
+* feat: refactor construct knowledge graph task ([#29](https://github.com/apache/hugegraph-ai/pull/29))
+* feat: Introduce gradio for creating interactive and visual demo ([#30](https://github.com/apache/hugegraph-ai/pull/30))
#### Bug Fix
-* fix: invalid GitHub label ([#3](https://github.com/apache/incubator-hugegraph-ai/pull/3))
-* fix: import error ([#13](https://github.com/apache/incubator-hugegraph-ai/pull/13))
-* fix: function getEdgeByPage(): the generated query url does not include the parameter page ([#15](https://github.com/apache/incubator-hugegraph-ai/pull/15))
-* fix: issue template ([#23](https://github.com/apache/incubator-hugegraph-ai/pull/23))
-* fix: base-ref/head-ref missed in dependency-check-ci on branch push ([#25](https://github.com/apache/incubator-hugegraph-ai/pull/25))
+* fix: invalid GitHub label ([#3](https://github.com/apache/hugegraph-ai/pull/3))
+* fix: import error ([#13](https://github.com/apache/hugegraph-ai/pull/13))
+* fix: function getEdgeByPage(): the generated query url does not include the parameter page ([#15](https://github.com/apache/hugegraph-ai/pull/15))
+* fix: issue template ([#23](https://github.com/apache/hugegraph-ai/pull/23))
+* fix: base-ref/head-ref missed in dependency-check-ci on branch push ([#25](https://github.com/apache/hugegraph-ai/pull/25))
#### Other Changes
-* chore: add asf.yaml and ISSUE_TEMPLATE ([#1](https://github.com/apache/incubator-hugegraph-ai/pull/1))
-* Bump urllib3 from 2.0.3 to 2.0.7 in /hugegraph-python ([#8](https://github.com/apache/incubator-hugegraph-ai/pull/8))
-* chore: create .gitignore file for py ([#9](https://github.com/apache/incubator-hugegraph-ai/pull/9))
-* refact: improve project structure & add some basic CI ([#17](https://github.com/apache/incubator-hugegraph-ai/pull/17))
-* chore: Update LICENSE and NOTICE ([#31](https://github.com/apache/incubator-hugegraph-ai/pull/31))
-* chore: add release scripts ([#33](https://github.com/apache/incubator-hugegraph-ai/pull/33))
-* chore: change file chmod 755 ([#34](https://github.com/apache/incubator-hugegraph-ai/pull/34))
+* chore: add asf.yaml and ISSUE_TEMPLATE ([#1](https://github.com/apache/hugegraph-ai/pull/1))
+* Bump urllib3 from 2.0.3 to 2.0.7 in /hugegraph-python ([#8](https://github.com/apache/hugegraph-ai/pull/8))
+* chore: create .gitignore file for py ([#9](https://github.com/apache/hugegraph-ai/pull/9))
+* refact: improve project structure & add some basic CI ([#17](https://github.com/apache/hugegraph-ai/pull/17))
+* chore: Update LICENSE and NOTICE ([#31](https://github.com/apache/hugegraph-ai/pull/31))
+* chore: add release scripts ([#33](https://github.com/apache/hugegraph-ai/pull/33))
+* chore: change file chmod 755 ([#34](https://github.com/apache/hugegraph-ai/pull/34))
### 发布细节
Please check the release details/contributor in each repository:
-- [Server Release Notes](https://github.com/apache/incubator-hugegraph/releases)
-- [Toolchain Release Notes](https://github.com/apache/incubator-hugegraph-toolchain/releases)
-- [AI Release Notes](https://github.com/apache/incubator-hugegraph-ai/releases)
-- [Commons Release Notes](https://github.com/apache/incubator-hugegraph-commons/releases)
+- [Server Release Notes](https://github.com/apache/hugegraph/releases)
+- [Toolchain Release Notes](https://github.com/apache/hugegraph-toolchain/releases)
+- [AI Release Notes](https://github.com/apache/hugegraph-ai/releases)
+- [Commons Release Notes](https://github.com/apache/hugegraph-commons/releases)
diff --git a/content/cn/docs/changelog/hugegraph-1.5.0-release-notes.md b/content/cn/docs/changelog/hugegraph-1.5.0-release-notes.md
index 016d091fa..3cf044015 100644
--- a/content/cn/docs/changelog/hugegraph-1.5.0-release-notes.md
+++ b/content/cn/docs/changelog/hugegraph-1.5.0-release-notes.md
@@ -1,7 +1,7 @@
---
title: "HugeGraph 1.5.0 Release Notes"
linkTitle: "Release-1.5.0"
-weight: 5
+weight: 3
---
> WIP: This doc is under construction, please wait for the final version (BETA)
@@ -18,145 +18,145 @@ PS: 未来 HugeGraph 组件的版本会朝着 `Java 11 -> Java 17 -> Java 21`
#### API Changes
-- **BREAKING CHANGE**: Support "parent & child" `EdgeLabel` type [#2662](https://github.com/apache/incubator-hugegraph/pull/2662)
+- **BREAKING CHANGE**: Support "parent & child" `EdgeLabel` type [#2662](https://github.com/apache/hugegraph/pull/2662)
#### Feature Changes
-- Integrate `pd-grpc`, `pd-common`, and `pd-client` [#2498](https://github.com/apache/incubator-hugegraph/pull/2498)
-- Integrate `store-grpc`, `store-common`, and `store-client` [#2476](https://github.com/apache/incubator-hugegraph/pull/2476)
-- Integrate `store-rocksdb` submodule [#2513](https://github.com/apache/incubator-hugegraph/pull/2513)
-- Integrate `pd-core` into HugeGraph [#2478](https://github.com/apache/incubator-hugegraph/pull/2478)
-- Integrate `pd-service` into HugeGraph [#2528](https://github.com/apache/incubator-hugegraph/pull/2528)
-- Integrate `pd-dist` into HugeGraph and add core tests, client tests, and REST tests for PD [#2532](https://github.com/apache/incubator-hugegraph/pull/2532)
-- Integrate `server-hstore` into HugeGraph [#2534](https://github.com/apache/incubator-hugegraph/pull/2534)
-- Integrate `store-core` submodule [#2548](https://github.com/apache/incubator-hugegraph/pull/2548)
-- Integrate `store-node` submodule [#2537](https://github.com/apache/incubator-hugegraph/pull/2537)
-- Support new backend Hstore [#2560](https://github.com/apache/incubator-hugegraph/pull/2560)
-- Support Docker deployment for PD and Store [#2573](https://github.com/apache/incubator-hugegraph/pull/2573)
-- Add a tool method `encode` [#2647](https://github.com/apache/incubator-hugegraph/pull/2647)
-- Add basic `MiniCluster` module for distributed system testing [#2615](https://github.com/apache/incubator-hugegraph/pull/2615)
-- Support disabling RocksDB auto-compaction via configuration [#2586](https://github.com/apache/incubator-hugegraph/pull/2586)
+- Integrate `pd-grpc`, `pd-common`, and `pd-client` [#2498](https://github.com/apache/hugegraph/pull/2498)
+- Integrate `store-grpc`, `store-common`, and `store-client` [#2476](https://github.com/apache/hugegraph/pull/2476)
+- Integrate `store-rocksdb` submodule [#2513](https://github.com/apache/hugegraph/pull/2513)
+- Integrate `pd-core` into HugeGraph [#2478](https://github.com/apache/hugegraph/pull/2478)
+- Integrate `pd-service` into HugeGraph [#2528](https://github.com/apache/hugegraph/pull/2528)
+- Integrate `pd-dist` into HugeGraph and add core tests, client tests, and REST tests for PD [#2532](https://github.com/apache/hugegraph/pull/2532)
+- Integrate `server-hstore` into HugeGraph [#2534](https://github.com/apache/hugegraph/pull/2534)
+- Integrate `store-core` submodule [#2548](https://github.com/apache/hugegraph/pull/2548)
+- Integrate `store-node` submodule [#2537](https://github.com/apache/hugegraph/pull/2537)
+- Support new backend Hstore [#2560](https://github.com/apache/hugegraph/pull/2560)
+- Support Docker deployment for PD and Store [#2573](https://github.com/apache/hugegraph/pull/2573)
+- Add a tool method `encode` [#2647](https://github.com/apache/hugegraph/pull/2647)
+- Add basic `MiniCluster` module for distributed system testing [#2615](https://github.com/apache/hugegraph/pull/2615)
+- Support disabling RocksDB auto-compaction via configuration [#2586](https://github.com/apache/hugegraph/pull/2586)
#### Bug Fixes
-- Switch RocksDB backend to memory when executing Gremlin examples [#2518](https://github.com/apache/incubator-hugegraph/pull/2518)
-- Avoid overriding backend config in Gremlin example scripts [#2519](https://github.com/apache/incubator-hugegraph/pull/2519)
-- Update resource references [#2522](https://github.com/apache/incubator-hugegraph/pull/2522)
-- Randomly generate default values [#2568](https://github.com/apache/incubator-hugegraph/pull/2568)
-- Update build artifact path for Docker deployment [#2590](https://github.com/apache/incubator-hugegraph/pull/2590)
-- Ensure thread safety for range attributes in PD [#2641](https://github.com/apache/incubator-hugegraph/pull/2641)
-- Correct server Docker copy source path [#2637](https://github.com/apache/incubator-hugegraph/pull/2637)
-- Fix JRaft Timer Metrics bug in Hstore [#2602](https://github.com/apache/incubator-hugegraph/pull/2602)
-- Enable JRaft MaxBodySize configuration [#2633](https://github.com/apache/incubator-hugegraph/pull/2633)
+- Switch RocksDB backend to memory when executing Gremlin examples [#2518](https://github.com/apache/hugegraph/pull/2518)
+- Avoid overriding backend config in Gremlin example scripts [#2519](https://github.com/apache/hugegraph/pull/2519)
+- Update resource references [#2522](https://github.com/apache/hugegraph/pull/2522)
+- Randomly generate default values [#2568](https://github.com/apache/hugegraph/pull/2568)
+- Update build artifact path for Docker deployment [#2590](https://github.com/apache/hugegraph/pull/2590)
+- Ensure thread safety for range attributes in PD [#2641](https://github.com/apache/hugegraph/pull/2641)
+- Correct server Docker copy source path [#2637](https://github.com/apache/hugegraph/pull/2637)
+- Fix JRaft Timer Metrics bug in Hstore [#2602](https://github.com/apache/hugegraph/pull/2602)
+- Enable JRaft MaxBodySize configuration [#2633](https://github.com/apache/hugegraph/pull/2633)
#### Option Changes
-- Mark old raft configs as deprecated [#2661](https://github.com/apache/incubator-hugegraph/pull/2661)
-- Enlarge bytes write limit and remove `big` parameter when encoding/decoding string ID length [#2622](https://github.com/apache/incubator-hugegraph/pull/2622)
+- Mark old raft configs as deprecated [#2661](https://github.com/apache/hugegraph/pull/2661)
+- Enlarge bytes write limit and remove `big` parameter when encoding/decoding string ID length [#2622](https://github.com/apache/hugegraph/pull/2622)
#### Other Changes
-- Add Swagger-UI LICENSE files [#2495](https://github.com/apache/incubator-hugegraph/pull/2495)
-- Translate CJK comments and punctuations to English across multiple modules [#2536](https://github.com/apache/incubator-hugegraph/pull/2536), [#2623](https://github.com/apache/incubator-hugegraph/pull/2625), [#2645](https://github.com/apache/incubator-hugegraph/pull/2645)
-- Introduce `install-dist` module in root [#2552](https://github.com/apache/incubator-hugegraph/pull/2552)
-- Enable up-to-date checks for UI (CI) [#2609](https://github.com/apache/incubator-hugegraph/pull/2609)
-- Minor improvements for POM properties [#2574](https://github.com/apache/incubator-hugegraph/pull/2574)
-- Migrate HugeGraph Commons [#2628](https://github.com/apache/incubator-hugegraph/pull/2628)
-- Tar source and binary packages for HugeGraph with PD-Store [#2594](https://github.com/apache/incubator-hugegraph/pull/2594)
-- Refactor: Enhance cache invalidation of the partition → leader shard in `ClientCache` [#2588](https://github.com/apache/incubator-hugegraph/pull/2588)
-- Refactor: Remove redundant properties in `LogMeta` and `PartitionMeta` [#2598](https://github.com/apache/incubator-hugegraph/pull/2598)
+- Add Swagger-UI LICENSE files [#2495](https://github.com/apache/hugegraph/pull/2495)
+- Translate CJK comments and punctuations to English across multiple modules [#2536](https://github.com/apache/hugegraph/pull/2536), [#2623](https://github.com/apache/hugegraph/pull/2625), [#2645](https://github.com/apache/hugegraph/pull/2645)
+- Introduce `install-dist` module in root [#2552](https://github.com/apache/hugegraph/pull/2552)
+- Enable up-to-date checks for UI (CI) [#2609](https://github.com/apache/hugegraph/pull/2609)
+- Minor improvements for POM properties [#2574](https://github.com/apache/hugegraph/pull/2574)
+- Migrate HugeGraph Commons [#2628](https://github.com/apache/hugegraph/pull/2628)
+- Tar source and binary packages for HugeGraph with PD-Store [#2594](https://github.com/apache/hugegraph/pull/2594)
+- Refactor: Enhance cache invalidation of the partition → leader shard in `ClientCache` [#2588](https://github.com/apache/hugegraph/pull/2588)
+- Refactor: Remove redundant properties in `LogMeta` and `PartitionMeta` [#2598](https://github.com/apache/hugegraph/pull/2598)
### hugegraph-toolchain
#### API Changes
-- Support "parent & child" `EdgeLabel` type [#624](https://github.com/apache/incubator-hugegraph-toolchain/pull/624)
+- Support "parent & child" `EdgeLabel` type [#624](https://github.com/apache/hugegraph-toolchain/pull/624)
#### Feature Changes
-- Support English interface & add a script/doc for it in Hubble [#631](https://github.com/apache/incubator-hugegraph-toolchain/pull/631)
+- Support English interface & add a script/doc for it in Hubble [#631](https://github.com/apache/hugegraph-toolchain/pull/631)
#### Bug Fixes
-- Serialize source and target label for non-father EdgeLabel [#628](https://github.com/apache/incubator-hugegraph-toolchain/pull/628)
-- Encode/decode Chinese error after building Hubble package [#627](https://github.com/apache/incubator-hugegraph-toolchain/pull/627)
-- Configure IPv4 to fix timeout of `yarn install` in Hubble [#636](https://github.com/apache/incubator-hugegraph-toolchain/pull/636)
-- Remove debugging output to speed up the frontend construction in Hubble [#638](https://github.com/apache/incubator-hugegraph-toolchain/pull/638)
+- Serialize source and target label for non-father EdgeLabel [#628](https://github.com/apache/hugegraph-toolchain/pull/628)
+- Encode/decode Chinese error after building Hubble package [#627](https://github.com/apache/hugegraph-toolchain/pull/627)
+- Configure IPv4 to fix timeout of `yarn install` in Hubble [#636](https://github.com/apache/hugegraph-toolchain/pull/636)
+- Remove debugging output to speed up the frontend construction in Hubble [#638](https://github.com/apache/hugegraph-toolchain/pull/638)
#### Other Changes
-- Bump `express` from 4.18.2 to 4.19.2 in Hubble Frontend [#598](https://github.com/apache/incubator-hugegraph-toolchain/pull/598)
-- Make IDEA support IssueNavigationLink [#600](https://github.com/apache/incubator-hugegraph-toolchain/pull/600)
-- Update `yarn.lock` for Hubble [#605](https://github.com/apache/incubator-hugegraph-toolchain/pull/605)
-- Introduce `editorconfig-maven-plugin` for verifying code style defined in `.editorconfig` [#614](https://github.com/apache/incubator-hugegraph-toolchain/pull/614)
-- Upgrade distribution version to 1.5.0 [#639](https://github.com/apache/incubator-hugegraph-toolchain/pull/639)
+- Bump `express` from 4.18.2 to 4.19.2 in Hubble Frontend [#598](https://github.com/apache/hugegraph-toolchain/pull/598)
+- Make IDEA support IssueNavigationLink [#600](https://github.com/apache/hugegraph-toolchain/pull/600)
+- Update `yarn.lock` for Hubble [#605](https://github.com/apache/hugegraph-toolchain/pull/605)
+- Introduce `editorconfig-maven-plugin` for verifying code style defined in `.editorconfig` [#614](https://github.com/apache/hugegraph-toolchain/pull/614)
+- Upgrade distribution version to 1.5.0 [#639](https://github.com/apache/hugegraph-toolchain/pull/639)
#### Documentation Changes
-- Clarify the contributing guidelines [#604](https://github.com/apache/incubator-hugegraph-toolchain/pull/604)
-- Enhance the README file for Hubble [#613](https://github.com/apache/incubator-hugegraph-toolchain/pull/613)
-- Update README style referring to the server's style [#615](https://github.com/apache/incubator-hugegraph-toolchain/pull/615)
+- Clarify the contributing guidelines [#604](https://github.com/apache/hugegraph-toolchain/pull/604)
+- Enhance the README file for Hubble [#613](https://github.com/apache/hugegraph-toolchain/pull/613)
+- Update README style referring to the server's style [#615](https://github.com/apache/hugegraph-toolchain/pull/615)
### hugegraph-ai
#### API Changes
-- Added local LLM API and version API. [#41](https://github.com/apache/incubator-hugegraph-ai/pull/41), [#44](https://github.com/apache/incubator-hugegraph-ai/pull/44)
-- Implemented new API and optimized code structure. [#63](https://github.com/apache/incubator-hugegraph-ai/pull/63)
-- Support for graphspace and refactored all APIs. [#67](https://github.com/apache/incubator-hugegraph-ai/pull/67)
+- Added local LLM API and version API. [#41](https://github.com/apache/hugegraph-ai/pull/41), [#44](https://github.com/apache/hugegraph-ai/pull/44)
+- Implemented new API and optimized code structure. [#63](https://github.com/apache/hugegraph-ai/pull/63)
+- Support for graphspace and refactored all APIs. [#67](https://github.com/apache/hugegraph-ai/pull/67)
#### Feature Changes
-- Added openai's apibase configuration and asynchronous methods in RAG web demo. [#41](https://github.com/apache/incubator-hugegraph-ai/pull/41), [#58](https://github.com/apache/incubator-hugegraph-ai/pull/58)
-- Support for multi reranker and enhanced UI. [#73](https://github.com/apache/incubator-hugegraph-ai/pull/73)
-- Node embedding, node classify, and graph classify with models based on DGL. [#83](https://github.com/apache/incubator-hugegraph-ai/pull/83)
-- Graph learning algorithm implementation (10+). [#102](https://github.com/apache/incubator-hugegraph-ai/pull/102)
-- Support for any openai-style API (standard). [#95](https://github.com/apache/incubator-hugegraph-ai/pull/95)
+- Added openai's apibase configuration and asynchronous methods in RAG web demo. [#41](https://github.com/apache/hugegraph-ai/pull/41), [#58](https://github.com/apache/hugegraph-ai/pull/58)
+- Support for multi reranker and enhanced UI. [#73](https://github.com/apache/hugegraph-ai/pull/73)
+- Node embedding, node classify, and graph classify with models based on DGL. [#83](https://github.com/apache/hugegraph-ai/pull/83)
+- Graph learning algorithm implementation (10+). [#102](https://github.com/apache/hugegraph-ai/pull/102)
+- Support for any openai-style API (standard). [#95](https://github.com/apache/hugegraph-ai/pull/95)
#### Bug Fixes
-- Fixed fusiform_similarity test in traverser for server 1.3.0. [#37](https://github.com/apache/incubator-hugegraph-ai/pull/37)
-- Avoid generating config twice and corrected e_cache type. [#56](https://github.com/apache/incubator-hugegraph-ai/pull/56), [#117](https://github.com/apache/incubator-hugegraph-ai/pull/117)
-- Fixed null value detection on vid attributes. [#115](https://github.com/apache/incubator-hugegraph-ai/pull/115)
-- Handled profile regenerate error. [#98](https://github.com/apache/incubator-hugegraph-ai/pull/98)
+- Fixed fusiform_similarity test in traverser for server 1.3.0. [#37](https://github.com/apache/hugegraph-ai/pull/37)
+- Avoid generating config twice and corrected e_cache type. [#56](https://github.com/apache/hugegraph-ai/pull/56), [#117](https://github.com/apache/hugegraph-ai/pull/117)
+- Fixed null value detection on vid attributes. [#115](https://github.com/apache/hugegraph-ai/pull/115)
+- Handled profile regenerate error. [#98](https://github.com/apache/hugegraph-ai/pull/98)
#### Option Changes
-- Added auth for fastapi and gradio. [#70](https://github.com/apache/incubator-hugegraph-ai/pull/70)
-- Support for multiple property types and importing graph from the entire doc. [#84](https://github.com/apache/incubator-hugegraph-ai/pull/84)
+- Added auth for fastapi and gradio. [#70](https://github.com/apache/hugegraph-ai/pull/70)
+- Support for multiple property types and importing graph from the entire doc. [#84](https://github.com/apache/hugegraph-ai/pull/84)
#### Other Changes
-- Reformatted documentation and updated README. [#36](https://github.com/apache/incubator-hugegraph-ai/pull/36), [#81](https://github.com/apache/incubator-hugegraph-ai/pull/81)
-- Introduced a black for code format in GitHub actions. [#47](https://github.com/apache/incubator-hugegraph-ai/pull/47)
-- Updated dependencies and environment preparations. [#45](https://github.com/apache/incubator-hugegraph-ai/pull/45), [#65](https://github.com/apache/incubator-hugegraph-ai/pull/65)
-- Enhanced user-friendly README. [#82](https://github.com/apache/incubator-hugegraph-ai/pull/82)
+- Reformatted documentation and updated README. [#36](https://github.com/apache/hugegraph-ai/pull/36), [#81](https://github.com/apache/hugegraph-ai/pull/81)
+- Introduced a black for code format in GitHub actions. [#47](https://github.com/apache/hugegraph-ai/pull/47)
+- Updated dependencies and environment preparations. [#45](https://github.com/apache/hugegraph-ai/pull/45), [#65](https://github.com/apache/hugegraph-ai/pull/65)
+- Enhanced user-friendly README. [#82](https://github.com/apache/hugegraph-ai/pull/82)
### hugegraph-computer
#### Feature Changes
-- Support Single Source Shortest Path Algorithm [#285](https://github.com/apache/incubator-hugegraph-computer/pull/285)
-- Support Output Filter [#303](https://github.com/apache/incubator-hugegraph-computer/pull/303)
+- Support Single Source Shortest Path Algorithm [#285](https://github.com/apache/hugegraph-computer/pull/285)
+- Support Output Filter [#303](https://github.com/apache/hugegraph-computer/pull/303)
#### Bug Fixes
-- Fix: base-ref/head-ref Missed in Dependency-Review on Schedule Push [#304](https://github.com/apache/incubator-hugegraph-computer/pull/304)
+- Fix: base-ref/head-ref Missed in Dependency-Review on Schedule Push [#304](https://github.com/apache/hugegraph-computer/pull/304)
#### Option Changes
-- Refactor(core): StringEncoding [#300](https://github.com/apache/incubator-hugegraph-computer/pull/300)
+- Refactor(core): StringEncoding [#300](https://github.com/apache/hugegraph-computer/pull/300)
#### Other Changes
-- Improve(algorithm): Random Walk Vertex Inactive [#301](https://github.com/apache/incubator-hugegraph-computer/pull/301)
-- Upgrade Version to 1.3.0 [#305](https://github.com/apache/incubator-hugegraph-computer/pull/305)
-- Doc(readme): Clarify the Contributing Guidelines [#306](https://github.com/apache/incubator-hugegraph-computer/pull/306)
-- Doc(readme): Add Hyperlink to Apache 2.0 [#308](https://github.com/apache/incubator-hugegraph-computer/pull/308)
-- Migrate Project to Computer Directory [#310](https://github.com/apache/incubator-hugegraph-computer/pull/310)
-- Update for Release 1.5 [#317](https://github.com/apache/incubator-hugegraph-computer/pull/317)
-- Fix Path When Exporting Source Package [#319](https://github.com/apache/incubator-hugegraph-computer/pull/319)
+- Improve(algorithm): Random Walk Vertex Inactive [#301](https://github.com/apache/hugegraph-computer/pull/301)
+- Upgrade Version to 1.3.0 [#305](https://github.com/apache/hugegraph-computer/pull/305)
+- Doc(readme): Clarify the Contributing Guidelines [#306](https://github.com/apache/hugegraph-computer/pull/306)
+- Doc(readme): Add Hyperlink to Apache 2.0 [#308](https://github.com/apache/hugegraph-computer/pull/308)
+- Migrate Project to Computer Directory [#310](https://github.com/apache/hugegraph-computer/pull/310)
+- Update for Release 1.5 [#317](https://github.com/apache/hugegraph-computer/pull/317)
+- Fix Path When Exporting Source Package [#319](https://github.com/apache/hugegraph-computer/pull/319)
### 发布细节
Please check the release details/contributor in each repository:
-- [Server Release Notes](https://github.com/apache/incubator-hugegraph/releases)
-- [Toolchain Release Notes](https://github.com/apache/incubator-hugegraph-toolchain/releases)
-- [Computer Release Notes](https://github.com/apache/incubator-hugegraph-computer/releases)
-- [AI Release Notes](https://github.com/apache/incubator-hugegraph-ai/releases)
+- [Server Release Notes](https://github.com/apache/hugegraph/releases)
+- [Toolchain Release Notes](https://github.com/apache/hugegraph-toolchain/releases)
+- [Computer Release Notes](https://github.com/apache/hugegraph-computer/releases)
+- [AI Release Notes](https://github.com/apache/hugegraph-ai/releases)
diff --git a/content/cn/docs/changelog/hugegraph-1.7.0-release-notes.md b/content/cn/docs/changelog/hugegraph-1.7.0-release-notes.md
index 1a385c335..f0292142c 100644
--- a/content/cn/docs/changelog/hugegraph-1.7.0-release-notes.md
+++ b/content/cn/docs/changelog/hugegraph-1.7.0-release-notes.md
@@ -1,7 +1,7 @@
---
title: "HugeGraph 1.7.0 Release Notes"
linkTitle: "Release-1.7.0"
-weight: 7
+weight: 1
---
> WIP: This doc is under construction, please wait for the final version (BETA)
@@ -14,244 +14,244 @@ weight: 7
#### API Changes
-- **BREAKING CHANGE**: Disable legacy backends include MySQL/PG/c*(.etc) [#2746](https://github.com/apache/incubator-hugegraph/pull/2746)
-- **BREAKING CHANGE**: Release version 1.7.0 [server + pd + store] [#2889](https://github.com/apache/incubator-hugegraph/pull/2889)
+- **BREAKING CHANGE**: Disable legacy backends include MySQL/PG/c*(.etc) [#2746](https://github.com/apache/hugegraph/pull/2746)
+- **BREAKING CHANGE**: Release version 1.7.0 [server + pd + store] [#2889](https://github.com/apache/hugegraph/pull/2889)
#### Feature Changes
-- Support MemoryManagement for graph query framework [#2649](https://github.com/apache/incubator-hugegraph/pull/2649)
-- LoginAPI support token_expire field [#2754](https://github.com/apache/incubator-hugegraph/pull/2754)
-- Add option for task role election [#2843](https://github.com/apache/incubator-hugegraph/pull/2843)
-- Optimize perf by avoid boxing long [#2861](https://github.com/apache/incubator-hugegraph/pull/2861)
-- StringId hold bytes to avoid decode/encode [#2862](https://github.com/apache/incubator-hugegraph/pull/2862)
-- Add PerfExample5 and PerfExample6 [#2860](https://github.com/apache/incubator-hugegraph/pull/2860)
-- RocksDBStore remove redundant checkOpened() call [#2863](https://github.com/apache/incubator-hugegraph/pull/2863)
-- Add path filter [#2898](https://github.com/apache/incubator-hugegraph/pull/2898)
-- Init serena memory system & add memories [#2902](https://github.com/apache/incubator-hugegraph/pull/2902)
+- Support MemoryManagement for graph query framework [#2649](https://github.com/apache/hugegraph/pull/2649)
+- LoginAPI support token_expire field [#2754](https://github.com/apache/hugegraph/pull/2754)
+- Add option for task role election [#2843](https://github.com/apache/hugegraph/pull/2843)
+- Optimize perf by avoid boxing long [#2861](https://github.com/apache/hugegraph/pull/2861)
+- StringId hold bytes to avoid decode/encode [#2862](https://github.com/apache/hugegraph/pull/2862)
+- Add PerfExample5 and PerfExample6 [#2860](https://github.com/apache/hugegraph/pull/2860)
+- RocksDBStore remove redundant checkOpened() call [#2863](https://github.com/apache/hugegraph/pull/2863)
+- Add path filter [#2898](https://github.com/apache/hugegraph/pull/2898)
+- Init serena memory system & add memories [#2902](https://github.com/apache/hugegraph/pull/2902)
#### Bug Fixes
-- Filter dynamice path(PUT/GET/DELETE) with params cause OOM [#2569](https://github.com/apache/incubator-hugegraph/pull/2569)
-- JRaft Histogram Metrics Value NaN [#2631](https://github.com/apache/incubator-hugegraph/pull/2631)
-- Update server image desc [#2702](https://github.com/apache/incubator-hugegraph/pull/2702)
-- Kneigbor-api has unmatched edge type with server [#2699](https://github.com/apache/incubator-hugegraph/pull/2699)
-- Add license for swagger-ui & reset use stage to false in ci yml [#2706](https://github.com/apache/incubator-hugegraph/pull/2706)
-- Fix build pd-store arm image [#2744](https://github.com/apache/incubator-hugegraph/pull/2744)
-- Fix graph server cache notifier mechanism [#2729](https://github.com/apache/incubator-hugegraph/pull/2729)
-- Tx leak when stopping the graph server [#2791](https://github.com/apache/incubator-hugegraph/pull/2791)
-- Ensure backend is initialized in gremlin script [#2824](https://github.com/apache/incubator-hugegraph/pull/2824)
-- Fix some potential lock & type cast issues [#2895](https://github.com/apache/incubator-hugegraph/pull/2895)
-- Fix npe in getVersion [#2897](https://github.com/apache/incubator-hugegraph/pull/2897)
-- Fix the support for graphsapi in rocksdb and add testing for graphsapi [#2900](https://github.com/apache/incubator-hugegraph/pull/2900)
-- Remove graph path in auth api path [#2899](https://github.com/apache/incubator-hugegraph/pull/2899)
-- Migrate to LTS jdk11 in all Dockerfile [#2901](https://github.com/apache/incubator-hugegraph/pull/2901)
-- Remove the judgment for java8 compatibility in the init-store [#2905](https://github.com/apache/incubator-hugegraph/pull/2905)
-- Add missing license and remove binary license.txt & fix tinkerpop ci & remove duplicate module [#2910](https://github.com/apache/incubator-hugegraph/pull/2910)
+- Filter dynamice path(PUT/GET/DELETE) with params cause OOM [#2569](https://github.com/apache/hugegraph/pull/2569)
+- JRaft Histogram Metrics Value NaN [#2631](https://github.com/apache/hugegraph/pull/2631)
+- Update server image desc [#2702](https://github.com/apache/hugegraph/pull/2702)
+- Kneigbor-api has unmatched edge type with server [#2699](https://github.com/apache/hugegraph/pull/2699)
+- Add license for swagger-ui & reset use stage to false in ci yml [#2706](https://github.com/apache/hugegraph/pull/2706)
+- Fix build pd-store arm image [#2744](https://github.com/apache/hugegraph/pull/2744)
+- Fix graph server cache notifier mechanism [#2729](https://github.com/apache/hugegraph/pull/2729)
+- Tx leak when stopping the graph server [#2791](https://github.com/apache/hugegraph/pull/2791)
+- Ensure backend is initialized in gremlin script [#2824](https://github.com/apache/hugegraph/pull/2824)
+- Fix some potential lock & type cast issues [#2895](https://github.com/apache/hugegraph/pull/2895)
+- Fix npe in getVersion [#2897](https://github.com/apache/hugegraph/pull/2897)
+- Fix the support for graphsapi in rocksdb and add testing for graphsapi [#2900](https://github.com/apache/hugegraph/pull/2900)
+- Remove graph path in auth api path [#2899](https://github.com/apache/hugegraph/pull/2899)
+- Migrate to LTS jdk11 in all Dockerfile [#2901](https://github.com/apache/hugegraph/pull/2901)
+- Remove the judgment for java8 compatibility in the init-store [#2905](https://github.com/apache/hugegraph/pull/2905)
+- Add missing license and remove binary license.txt & fix tinkerpop ci & remove duplicate module [#2910](https://github.com/apache/hugegraph/pull/2910)
#### Option Changes
-- Remove some outdated configuration [#2678](https://github.com/apache/incubator-hugegraph/pull/2678)
+- Remove some outdated configuration [#2678](https://github.com/apache/hugegraph/pull/2678)
#### Other Changes
-- Update outdated docs for release 1.5.0 [#2690](https://github.com/apache/incubator-hugegraph/pull/2690)
-- Fix licenses and remove empty files [#2692](https://github.com/apache/incubator-hugegraph/pull/2692)
-- Update repo artifacts references [#2695](https://github.com/apache/incubator-hugegraph/pull/2695)
-- Adjust release fury version [#2698](https://github.com/apache/incubator-hugegraph/pull/2698)
-- Fix the JSON license issue [#2697](https://github.com/apache/incubator-hugegraph/pull/2697)
-- Add debug info for tp test [#2688](https://github.com/apache/incubator-hugegraph/pull/2688)
-- Enhance words in README [#2734](https://github.com/apache/incubator-hugegraph/pull/2734)
-- Add collaborators in asf config [#2741](https://github.com/apache/incubator-hugegraph/pull/2741)
-- Adjust the related filters of sofa-bolt [#2735](https://github.com/apache/incubator-hugegraph/pull/2735)
-- Reopen discussion in .asf.yml config [#2751](https://github.com/apache/incubator-hugegraph/pull/2751)
-- Fix typo in README [#2806](https://github.com/apache/incubator-hugegraph/pull/2806)
-- Centralize version management in project [#2797](https://github.com/apache/incubator-hugegraph/pull/2797)
-- Update notice year [#2826](https://github.com/apache/incubator-hugegraph/pull/2826)
-- Improve maven Reproducible Builds → upgrade plugins [#2874](https://github.com/apache/incubator-hugegraph/pull/2874)
-- Enhance docker instruction with auth opened graph [#2881](https://github.com/apache/incubator-hugegraph/pull/2881)
-- Remove the package existing in java8 [#2792](https://github.com/apache/incubator-hugegraph/pull/2792)
-- Revise Docker usage instructions in README [#2882](https://github.com/apache/incubator-hugegraph/pull/2882)
-- Add DeepWiki badge to README [#2883](https://github.com/apache/incubator-hugegraph/pull/2883)
-- Update guidance for store module [#2894](https://github.com/apache/incubator-hugegraph/pull/2894)
-- Update test commands and improve documentation clarity [#2893](https://github.com/apache/incubator-hugegraph/pull/2893)
-- Bump rocksdb version from 7.2.2 to 8.10.2 [#2896](https://github.com/apache/incubator-hugegraph/pull/2896)
+- Update outdated docs for release 1.5.0 [#2690](https://github.com/apache/hugegraph/pull/2690)
+- Fix licenses and remove empty files [#2692](https://github.com/apache/hugegraph/pull/2692)
+- Update repo artifacts references [#2695](https://github.com/apache/hugegraph/pull/2695)
+- Adjust release fury version [#2698](https://github.com/apache/hugegraph/pull/2698)
+- Fix the JSON license issue [#2697](https://github.com/apache/hugegraph/pull/2697)
+- Add debug info for tp test [#2688](https://github.com/apache/hugegraph/pull/2688)
+- Enhance words in README [#2734](https://github.com/apache/hugegraph/pull/2734)
+- Add collaborators in asf config [#2741](https://github.com/apache/hugegraph/pull/2741)
+- Adjust the related filters of sofa-bolt [#2735](https://github.com/apache/hugegraph/pull/2735)
+- Reopen discussion in .asf.yml config [#2751](https://github.com/apache/hugegraph/pull/2751)
+- Fix typo in README [#2806](https://github.com/apache/hugegraph/pull/2806)
+- Centralize version management in project [#2797](https://github.com/apache/hugegraph/pull/2797)
+- Update notice year [#2826](https://github.com/apache/hugegraph/pull/2826)
+- Improve maven Reproducible Builds → upgrade plugins [#2874](https://github.com/apache/hugegraph/pull/2874)
+- Enhance docker instruction with auth opened graph [#2881](https://github.com/apache/hugegraph/pull/2881)
+- Remove the package existing in java8 [#2792](https://github.com/apache/hugegraph/pull/2792)
+- Revise Docker usage instructions in README [#2882](https://github.com/apache/hugegraph/pull/2882)
+- Add DeepWiki badge to README [#2883](https://github.com/apache/hugegraph/pull/2883)
+- Update guidance for store module [#2894](https://github.com/apache/hugegraph/pull/2894)
+- Update test commands and improve documentation clarity [#2893](https://github.com/apache/hugegraph/pull/2893)
+- Bump rocksdb version from 7.2.2 to 8.10.2 [#2896](https://github.com/apache/hugegraph/pull/2896)
### hugegraph-toolchain
#### API Changes
-- Support graphspace [#633](https://github.com/apache/incubator-hugegraph-toolchain/pull/633)
+- Support graphspace [#633](https://github.com/apache/hugegraph-toolchain/pull/633)
#### Feature Changes
-- Support jdbc date type & sync .editorconfig [#648](https://github.com/apache/incubator-hugegraph-toolchain/pull/648)
-- Add a useSSL option for mysql [#650](https://github.com/apache/incubator-hugegraph-toolchain/pull/650)
-- Patch for father sub edge [#654](https://github.com/apache/incubator-hugegraph-toolchain/pull/654)
-- Improve user experience for user script [#666](https://github.com/apache/incubator-hugegraph-toolchain/pull/666)
-- Support concurrent readers, short-id & Graphsrc [#683](https://github.com/apache/incubator-hugegraph-toolchain/pull/683)
-- Init serena onboarding & project memory files [#692](https://github.com/apache/incubator-hugegraph-toolchain/pull/692)
+- Support jdbc date type & sync .editorconfig [#648](https://github.com/apache/hugegraph-toolchain/pull/648)
+- Add a useSSL option for mysql [#650](https://github.com/apache/hugegraph-toolchain/pull/650)
+- Patch for father sub edge [#654](https://github.com/apache/hugegraph-toolchain/pull/654)
+- Improve user experience for user script [#666](https://github.com/apache/hugegraph-toolchain/pull/666)
+- Support concurrent readers, short-id & Graphsrc [#683](https://github.com/apache/hugegraph-toolchain/pull/683)
+- Init serena onboarding & project memory files [#692](https://github.com/apache/hugegraph-toolchain/pull/692)
#### Bug Fixes
-- Typo word in display [#655](https://github.com/apache/incubator-hugegraph-toolchain/pull/655)
-- Patch up missing classes and methods for hubble [#657](https://github.com/apache/incubator-hugegraph-toolchain/pull/657)
-- Adjust Client to 1.7.0 server [#689](https://github.com/apache/incubator-hugegraph-toolchain/pull/689)
-- Remove json license for release 1.7.0 [#698](https://github.com/apache/incubator-hugegraph-toolchain/pull/698)
+- Typo word in display [#655](https://github.com/apache/hugegraph-toolchain/pull/655)
+- Patch up missing classes and methods for hubble [#657](https://github.com/apache/hugegraph-toolchain/pull/657)
+- Adjust Client to 1.7.0 server [#689](https://github.com/apache/hugegraph-toolchain/pull/689)
+- Remove json license for release 1.7.0 [#698](https://github.com/apache/hugegraph-toolchain/pull/698)
#### Other Changes
-- Update hugegraph source commit id [#640](https://github.com/apache/incubator-hugegraph-toolchain/pull/640)
-- Add collaborators in asf config [#656](https://github.com/apache/incubator-hugegraph-toolchain/pull/656)
-- Update pom for version-1.7.0 [#681](https://github.com/apache/incubator-hugegraph-toolchain/pull/681)
-- Add DeepWiki badge to README [#684](https://github.com/apache/incubator-hugegraph-toolchain/pull/684)
-- Adjust APIs to compatible with 1.7.0 server [#685](https://github.com/apache/incubator-hugegraph-toolchain/pull/685)
-- Adjust LoadContext to 1.7.0 version [#687](https://github.com/apache/incubator-hugegraph-toolchain/pull/687)
-- Migrate to LTS jdk11 in all Dockerfile [#691](https://github.com/apache/incubator-hugegraph-toolchain/pull/691)
-- Update copyright year in NOTICE file [#697](https://github.com/apache/incubator-hugegraph-toolchain/pull/697)
+- Update hugegraph source commit id [#640](https://github.com/apache/hugegraph-toolchain/pull/640)
+- Add collaborators in asf config [#656](https://github.com/apache/hugegraph-toolchain/pull/656)
+- Update pom for version-1.7.0 [#681](https://github.com/apache/hugegraph-toolchain/pull/681)
+- Add DeepWiki badge to README [#684](https://github.com/apache/hugegraph-toolchain/pull/684)
+- Adjust APIs to compatible with 1.7.0 server [#685](https://github.com/apache/hugegraph-toolchain/pull/685)
+- Adjust LoadContext to 1.7.0 version [#687](https://github.com/apache/hugegraph-toolchain/pull/687)
+- Migrate to LTS jdk11 in all Dockerfile [#691](https://github.com/apache/hugegraph-toolchain/pull/691)
+- Update copyright year in NOTICE file [#697](https://github.com/apache/hugegraph-toolchain/pull/697)
### hugegraph-computer
#### Feature Changes
-- Migration Vermeer to hugegraph-computer [#316](https://github.com/apache/incubator-hugegraph-computer/pull/316)
-- Make startChan's size configurable [#328](https://github.com/apache/incubator-hugegraph-computer/pull/328)
-- Assign WorkerGroup via worker configuration [#332](https://github.com/apache/incubator-hugegraph-computer/pull/332)
-- Support task priority based scheduling [#336](https://github.com/apache/incubator-hugegraph-computer/pull/336)
-- Avoid 800k [#340](https://github.com/apache/incubator-hugegraph-computer/pull/340)
+- Migration Vermeer to hugegraph-computer [#316](https://github.com/apache/hugegraph-computer/pull/316)
+- Make startChan's size configurable [#328](https://github.com/apache/hugegraph-computer/pull/328)
+- Assign WorkerGroup via worker configuration [#332](https://github.com/apache/hugegraph-computer/pull/332)
+- Support task priority based scheduling [#336](https://github.com/apache/hugegraph-computer/pull/336)
+- Avoid 800k [#340](https://github.com/apache/hugegraph-computer/pull/340)
#### Bug Fixes
-- Fix docker file build [#341](https://github.com/apache/incubator-hugegraph-computer/pull/341)
+- Fix docker file build [#341](https://github.com/apache/hugegraph-computer/pull/341)
#### Other Changes
-- Update release version to 1.5.0 [#318](https://github.com/apache/incubator-hugegraph-computer/pull/318)
-- Update go depends module & fix headers [#321](https://github.com/apache/incubator-hugegraph-computer/pull/321)
-- Update go version to 1.23 [#322](https://github.com/apache/incubator-hugegraph-computer/pull/322)
-- Add collaborator in .asf.yaml [#323](https://github.com/apache/incubator-hugegraph-computer/pull/323)
-- Update the Go version in docker image [#333](https://github.com/apache/incubator-hugegraph-computer/pull/333)
-- Add DeepWiki badge to README [#337](https://github.com/apache/incubator-hugegraph-computer/pull/337)
-- Bump project version to 1.7.0 (RELEASE) [#338](https://github.com/apache/incubator-hugegraph-computer/pull/338)
-- Update copyright year in NOTICE file [#342](https://github.com/apache/incubator-hugegraph-computer/pull/342)
+- Update release version to 1.5.0 [#318](https://github.com/apache/hugegraph-computer/pull/318)
+- Update go depends module & fix headers [#321](https://github.com/apache/hugegraph-computer/pull/321)
+- Update go version to 1.23 [#322](https://github.com/apache/hugegraph-computer/pull/322)
+- Add collaborator in .asf.yaml [#323](https://github.com/apache/hugegraph-computer/pull/323)
+- Update the Go version in docker image [#333](https://github.com/apache/hugegraph-computer/pull/333)
+- Add DeepWiki badge to README [#337](https://github.com/apache/hugegraph-computer/pull/337)
+- Bump project version to 1.7.0 (RELEASE) [#338](https://github.com/apache/hugegraph-computer/pull/338)
+- Update copyright year in NOTICE file [#342](https://github.com/apache/hugegraph-computer/pull/342)
### hugegraph-ai
#### API Changes
-- Support choose template in api [#135](https://github.com/apache/incubator-hugegraph-ai/pull/135)
-- Add post method for paths-api [#162](https://github.com/apache/incubator-hugegraph-ai/pull/162)
-- Support switch graph in api & add some query configs [#184](https://github.com/apache/incubator-hugegraph-ai/pull/184)
-- Text2gremlin api [#258](https://github.com/apache/incubator-hugegraph-ai/pull/258)
-- Support switching prompt EN/CN [#269](https://github.com/apache/incubator-hugegraph-ai/pull/269)
-- **BREAKING CHANGE**: Update keyword extraction method [#282](https://github.com/apache/incubator-hugegraph-ai/pull/282)
+- Support choose template in api [#135](https://github.com/apache/hugegraph-ai/pull/135)
+- Add post method for paths-api [#162](https://github.com/apache/hugegraph-ai/pull/162)
+- Support switch graph in api & add some query configs [#184](https://github.com/apache/hugegraph-ai/pull/184)
+- Text2gremlin api [#258](https://github.com/apache/hugegraph-ai/pull/258)
+- Support switching prompt EN/CN [#269](https://github.com/apache/hugegraph-ai/pull/269)
+- **BREAKING CHANGE**: Update keyword extraction method [#282](https://github.com/apache/hugegraph-ai/pull/282)
#### Feature Changes
-- Added the process of text2gql in graphrag V1.0 [#105](https://github.com/apache/incubator-hugegraph-ai/pull/105)
-- Use pydantic-settings for config management [#122](https://github.com/apache/incubator-hugegraph-ai/pull/122)
-- Timely execute vid embedding & enhance some HTTP logic [#141](https://github.com/apache/incubator-hugegraph-ai/pull/141)
-- Use retry from tenacity [#143](https://github.com/apache/incubator-hugegraph-ai/pull/143)
-- Modify the summary info and enhance the request logic [#147](https://github.com/apache/incubator-hugegraph-ai/pull/147)
-- Automatic backup graph data timely [#151](https://github.com/apache/incubator-hugegraph-ai/pull/151)
-- Add a button to backup data & count together [#153](https://github.com/apache/incubator-hugegraph-ai/pull/153)
-- Extract topk_per_keyword & topk_return_results to .env [#154](https://github.com/apache/incubator-hugegraph-ai/pull/154)
-- Modify clear buttons [#156](https://github.com/apache/incubator-hugegraph-ai/pull/156)
-- Support intent recognition V1 [#159](https://github.com/apache/incubator-hugegraph-ai/pull/159)
-- Change vid embedding x:yy to yy & use multi-thread [#158](https://github.com/apache/incubator-hugegraph-ai/pull/158)
-- Support mathjax in rag query block V1 [#157](https://github.com/apache/incubator-hugegraph-ai/pull/157)
-- Use poetry to manage the dependencies [#149](https://github.com/apache/incubator-hugegraph-ai/pull/149)
-- Return schema.groovy first when backup graph data [#161](https://github.com/apache/incubator-hugegraph-ai/pull/161)
-- Merge all logs into one file [#171](https://github.com/apache/incubator-hugegraph-ai/pull/171)
-- Use uv for the CI action [#175](https://github.com/apache/incubator-hugegraph-ai/pull/175)
-- Use EN prompt for keywords extraction [#174](https://github.com/apache/incubator-hugegraph-ai/pull/174)
-- Support litellm LLM provider [#178](https://github.com/apache/incubator-hugegraph-ai/pull/178)
-- Improve graph extraction default prompt [#187](https://github.com/apache/incubator-hugegraph-ai/pull/187)
-- Replace vid by full vertexes info [#189](https://github.com/apache/incubator-hugegraph-ai/pull/189)
-- Support asynchronous streaming generation in rag block by using async_generator and asyncio.wait [#190](https://github.com/apache/incubator-hugegraph-ai/pull/190)
-- Generalize the regex extraction func [#194](https://github.com/apache/incubator-hugegraph-ai/pull/194)
-- Create quick_start.md [#196](https://github.com/apache/incubator-hugegraph-ai/pull/196)
-- Support Docker & K8s deployment way [#195](https://github.com/apache/incubator-hugegraph-ai/pull/195)
-- Multi-stage building in Dockerfile [#199](https://github.com/apache/incubator-hugegraph-ai/pull/199)
-- Support graph checking before updating vid embedding [#205](https://github.com/apache/incubator-hugegraph-ai/pull/205)
-- Disable text2gql by default [#216](https://github.com/apache/incubator-hugegraph-ai/pull/216)
-- Use 4.1-mini and 0.01 temperature by default [#214](https://github.com/apache/incubator-hugegraph-ai/pull/214)
-- Enhance the multi configs for LLM [#212](https://github.com/apache/incubator-hugegraph-ai/pull/212)
-- Textbox to Code [#217](https://github.com/apache/incubator-hugegraph-ai/pull/223)
-- Replace the IP + Port with URL [#209](https://github.com/apache/incubator-hugegraph-ai/pull/209)
-- Update gradio's version [#235](https://github.com/apache/incubator-hugegraph-ai/pull/235)
-- Use asyncio to get embeddings [#215](https://github.com/apache/incubator-hugegraph-ai/pull/215)
-- Change QPS -> RPM for timer decorator [#241](https://github.com/apache/incubator-hugegraph-ai/pull/241)
-- Support batch embedding [#238](https://github.com/apache/incubator-hugegraph-ai/pull/238)
-- Using nuitka to provide a binary/perf way for the service [#242](https://github.com/apache/incubator-hugegraph-ai/pull/242)
-- Use uv instead poetry [#226](https://github.com/apache/incubator-hugegraph-ai/pull/226)
-- Basic compatible in text2gremlin generation [#261](https://github.com/apache/incubator-hugegraph-ai/pull/261)
-- Enhance config path handling and add project root validation [#262](https://github.com/apache/incubator-hugegraph-ai/pull/262)
-- Add vermeer python client for graph computing [#263](https://github.com/apache/incubator-hugegraph-ai/pull/263)
-- Use uv in client & ml modules & adapter the CI [#257](https://github.com/apache/incubator-hugegraph-ai/pull/257)
-- Use uv to manage pkgs & update README [#272](https://github.com/apache/incubator-hugegraph-ai/pull/272)
-- Limit the deps version to handle critical init problems [#279](https://github.com/apache/incubator-hugegraph-ai/pull/279)
-- Support semi-automated prompt generation [#281](https://github.com/apache/incubator-hugegraph-ai/pull/281)
-- Support semi-automated generated graph schema [#274](https://github.com/apache/incubator-hugegraph-ai/pull/274)
-- Unify all modules with uv [#287](https://github.com/apache/incubator-hugegraph-ai/pull/287)
-- Add GitHub Actions for auto upstream sync and update SEALData subsample logic [#289](https://github.com/apache/incubator-hugegraph-ai/pull/289)
-- Add a basic LLM/AI coding instruction file [#290](https://github.com/apache/incubator-hugegraph-ai/pull/290)
-- Add rules for AI coding guideline - V1.0 [#293](https://github.com/apache/incubator-hugegraph-ai/pull/293)
-- Replace QianFan by OpenAI-compatible format [#285](https://github.com/apache/incubator-hugegraph-ai/pull/285)
-- Optimize vector index with asyncio embedding [#264](https://github.com/apache/incubator-hugegraph-ai/pull/264)
-- Refactor embedding parallelization to preserve order [#295](https://github.com/apache/incubator-hugegraph-ai/pull/295)
-- Support storing vector data for a graph instance by model type/name [#265](https://github.com/apache/incubator-hugegraph-ai/pull/265)
-- Add AGENTS.md as new document standard [#299](https://github.com/apache/incubator-hugegraph-ai/pull/299)
-- Add Fixed Workflow Execution Engine: Flow, Node, and Scheduler Architecture [#302](https://github.com/apache/incubator-hugegraph-ai/pull/302)
-- Support vector db layer V1.0 [#304](https://github.com/apache/incubator-hugegraph-ai/pull/304)
+- Added the process of text2gql in graphrag V1.0 [#105](https://github.com/apache/hugegraph-ai/pull/105)
+- Use pydantic-settings for config management [#122](https://github.com/apache/hugegraph-ai/pull/122)
+- Timely execute vid embedding & enhance some HTTP logic [#141](https://github.com/apache/hugegraph-ai/pull/141)
+- Use retry from tenacity [#143](https://github.com/apache/hugegraph-ai/pull/143)
+- Modify the summary info and enhance the request logic [#147](https://github.com/apache/hugegraph-ai/pull/147)
+- Automatic backup graph data timely [#151](https://github.com/apache/hugegraph-ai/pull/151)
+- Add a button to backup data & count together [#153](https://github.com/apache/hugegraph-ai/pull/153)
+- Extract topk_per_keyword & topk_return_results to .env [#154](https://github.com/apache/hugegraph-ai/pull/154)
+- Modify clear buttons [#156](https://github.com/apache/hugegraph-ai/pull/156)
+- Support intent recognition V1 [#159](https://github.com/apache/hugegraph-ai/pull/159)
+- Change vid embedding x:yy to yy & use multi-thread [#158](https://github.com/apache/hugegraph-ai/pull/158)
+- Support mathjax in rag query block V1 [#157](https://github.com/apache/hugegraph-ai/pull/157)
+- Use poetry to manage the dependencies [#149](https://github.com/apache/hugegraph-ai/pull/149)
+- Return schema.groovy first when backup graph data [#161](https://github.com/apache/hugegraph-ai/pull/161)
+- Merge all logs into one file [#171](https://github.com/apache/hugegraph-ai/pull/171)
+- Use uv for the CI action [#175](https://github.com/apache/hugegraph-ai/pull/175)
+- Use EN prompt for keywords extraction [#174](https://github.com/apache/hugegraph-ai/pull/174)
+- Support litellm LLM provider [#178](https://github.com/apache/hugegraph-ai/pull/178)
+- Improve graph extraction default prompt [#187](https://github.com/apache/hugegraph-ai/pull/187)
+- Replace vid by full vertexes info [#189](https://github.com/apache/hugegraph-ai/pull/189)
+- Support asynchronous streaming generation in rag block by using async_generator and asyncio.wait [#190](https://github.com/apache/hugegraph-ai/pull/190)
+- Generalize the regex extraction func [#194](https://github.com/apache/hugegraph-ai/pull/194)
+- Create quick_start.md [#196](https://github.com/apache/hugegraph-ai/pull/196)
+- Support Docker & K8s deployment way [#195](https://github.com/apache/hugegraph-ai/pull/195)
+- Multi-stage building in Dockerfile [#199](https://github.com/apache/hugegraph-ai/pull/199)
+- Support graph checking before updating vid embedding [#205](https://github.com/apache/hugegraph-ai/pull/205)
+- Disable text2gql by default [#216](https://github.com/apache/hugegraph-ai/pull/216)
+- Use 4.1-mini and 0.01 temperature by default [#214](https://github.com/apache/hugegraph-ai/pull/214)
+- Enhance the multi configs for LLM [#212](https://github.com/apache/hugegraph-ai/pull/212)
+- Textbox to Code [#217](https://github.com/apache/hugegraph-ai/pull/223)
+- Replace the IP + Port with URL [#209](https://github.com/apache/hugegraph-ai/pull/209)
+- Update gradio's version [#235](https://github.com/apache/hugegraph-ai/pull/235)
+- Use asyncio to get embeddings [#215](https://github.com/apache/hugegraph-ai/pull/215)
+- Change QPS -> RPM for timer decorator [#241](https://github.com/apache/hugegraph-ai/pull/241)
+- Support batch embedding [#238](https://github.com/apache/hugegraph-ai/pull/238)
+- Using nuitka to provide a binary/perf way for the service [#242](https://github.com/apache/hugegraph-ai/pull/242)
+- Use uv instead poetry [#226](https://github.com/apache/hugegraph-ai/pull/226)
+- Basic compatible in text2gremlin generation [#261](https://github.com/apache/hugegraph-ai/pull/261)
+- Enhance config path handling and add project root validation [#262](https://github.com/apache/hugegraph-ai/pull/262)
+- Add vermeer python client for graph computing [#263](https://github.com/apache/hugegraph-ai/pull/263)
+- Use uv in client & ml modules & adapter the CI [#257](https://github.com/apache/hugegraph-ai/pull/257)
+- Use uv to manage pkgs & update README [#272](https://github.com/apache/hugegraph-ai/pull/272)
+- Limit the deps version to handle critical init problems [#279](https://github.com/apache/hugegraph-ai/pull/279)
+- Support semi-automated prompt generation [#281](https://github.com/apache/hugegraph-ai/pull/281)
+- Support semi-automated generated graph schema [#274](https://github.com/apache/hugegraph-ai/pull/274)
+- Unify all modules with uv [#287](https://github.com/apache/hugegraph-ai/pull/287)
+- Add GitHub Actions for auto upstream sync and update SEALData subsample logic [#289](https://github.com/apache/hugegraph-ai/pull/289)
+- Add a basic LLM/AI coding instruction file [#290](https://github.com/apache/hugegraph-ai/pull/290)
+- Add rules for AI coding guideline - V1.0 [#293](https://github.com/apache/hugegraph-ai/pull/293)
+- Replace QianFan by OpenAI-compatible format [#285](https://github.com/apache/hugegraph-ai/pull/285)
+- Optimize vector index with asyncio embedding [#264](https://github.com/apache/hugegraph-ai/pull/264)
+- Refactor embedding parallelization to preserve order [#295](https://github.com/apache/hugegraph-ai/pull/295)
+- Support storing vector data for a graph instance by model type/name [#265](https://github.com/apache/hugegraph-ai/pull/265)
+- Add AGENTS.md as new document standard [#299](https://github.com/apache/hugegraph-ai/pull/299)
+- Add Fixed Workflow Execution Engine: Flow, Node, and Scheduler Architecture [#302](https://github.com/apache/hugegraph-ai/pull/302)
+- Support vector db layer V1.0 [#304](https://github.com/apache/hugegraph-ai/pull/304)
#### Bug Fixes
-- Limit the length of log & improve the format [#121](https://github.com/apache/incubator-hugegraph-ai/pull/121)
-- Pylint in ml [#125](https://github.com/apache/incubator-hugegraph-ai/pull/125)
-- Critical bug with pylint usage [#131](https://github.com/apache/incubator-hugegraph-ai/pull/131)
-- Multi vid k-neighbor query only return the data of first vid [#132](https://github.com/apache/incubator-hugegraph-ai/pull/132)
-- Replace getenv usage to settings [#133](https://github.com/apache/incubator-hugegraph-ai/pull/133)
-- Correct header writing errors [#140](https://github.com/apache/incubator-hugegraph-ai/pull/140)
-- Update prompt to fit prefix cache [#137](https://github.com/apache/incubator-hugegraph-ai/pull/137)
-- Extract_graph_data use wrong method [#145](https://github.com/apache/incubator-hugegraph-ai/pull/145)
-- Use empty str for llm config [#155](https://github.com/apache/incubator-hugegraph-ai/pull/155)
-- Update gremlin generate prompt to apply fuzzy match [#163](https://github.com/apache/incubator-hugegraph-ai/pull/163)
-- Enable fastapi auto reload function [#164](https://github.com/apache/incubator-hugegraph-ai/pull/164)
-- Fix tiny bugs & optimize reranker layout [#202](https://github.com/apache/incubator-hugegraph-ai/pull/202)
-- Enable tasks concurrency configs in Gradio [#188](https://github.com/apache/incubator-hugegraph-ai/pull/188)
-- Align regex extraction of json to json format of prompt [#211](https://github.com/apache/incubator-hugegraph-ai/pull/211)
-- Fix documentation sample code error [#219](https://github.com/apache/incubator-hugegraph-ai/pull/219)
-- Failed to remove vectors when updating vid embedding [#243](https://github.com/apache/incubator-hugegraph-ai/pull/243)
-- Skip empty chunk in LLM steaming mode [#245](https://github.com/apache/incubator-hugegraph-ai/pull/245)
-- Ollama batch embedding bug [#250](https://github.com/apache/incubator-hugegraph-ai/pull/250)
-- Fix Dockerfile to add pyproject.toml anchor file [#266](https://github.com/apache/incubator-hugegraph-ai/pull/266)
-- Add missing 'properties' in gremlin prompt formatting [#298](https://github.com/apache/incubator-hugegraph-ai/pull/298)
-- Fixed cgraph version [#305](https://github.com/apache/incubator-hugegraph-ai/pull/305)
-- Ollama embedding API usage and config param [#306](https://github.com/apache/incubator-hugegraph-ai/pull/306)
+- Limit the length of log & improve the format [#121](https://github.com/apache/hugegraph-ai/pull/121)
+- Pylint in ml [#125](https://github.com/apache/hugegraph-ai/pull/125)
+- Critical bug with pylint usage [#131](https://github.com/apache/hugegraph-ai/pull/131)
+- Multi vid k-neighbor query only return the data of first vid [#132](https://github.com/apache/hugegraph-ai/pull/132)
+- Replace getenv usage to settings [#133](https://github.com/apache/hugegraph-ai/pull/133)
+- Correct header writing errors [#140](https://github.com/apache/hugegraph-ai/pull/140)
+- Update prompt to fit prefix cache [#137](https://github.com/apache/hugegraph-ai/pull/137)
+- Extract_graph_data use wrong method [#145](https://github.com/apache/hugegraph-ai/pull/145)
+- Use empty str for llm config [#155](https://github.com/apache/hugegraph-ai/pull/155)
+- Update gremlin generate prompt to apply fuzzy match [#163](https://github.com/apache/hugegraph-ai/pull/163)
+- Enable fastapi auto reload function [#164](https://github.com/apache/hugegraph-ai/pull/164)
+- Fix tiny bugs & optimize reranker layout [#202](https://github.com/apache/hugegraph-ai/pull/202)
+- Enable tasks concurrency configs in Gradio [#188](https://github.com/apache/hugegraph-ai/pull/188)
+- Align regex extraction of json to json format of prompt [#211](https://github.com/apache/hugegraph-ai/pull/211)
+- Fix documentation sample code error [#219](https://github.com/apache/hugegraph-ai/pull/219)
+- Failed to remove vectors when updating vid embedding [#243](https://github.com/apache/hugegraph-ai/pull/243)
+- Skip empty chunk in LLM steaming mode [#245](https://github.com/apache/hugegraph-ai/pull/245)
+- Ollama batch embedding bug [#250](https://github.com/apache/hugegraph-ai/pull/250)
+- Fix Dockerfile to add pyproject.toml anchor file [#266](https://github.com/apache/hugegraph-ai/pull/266)
+- Add missing 'properties' in gremlin prompt formatting [#298](https://github.com/apache/hugegraph-ai/pull/298)
+- Fixed cgraph version [#305](https://github.com/apache/hugegraph-ai/pull/305)
+- Ollama embedding API usage and config param [#306](https://github.com/apache/hugegraph-ai/pull/306)
#### Option Changes
-- Remove enable_gql logic in api & rag block [#148](https://github.com/apache/incubator-hugegraph-ai/pull/148)
+- Remove enable_gql logic in api & rag block [#148](https://github.com/apache/hugegraph-ai/pull/148)
#### Other Changes
-- Update README for python-client/SDK [#150](https://github.com/apache/incubator-hugegraph-ai/pull/150)
-- Enable pip cache [#142](https://github.com/apache/incubator-hugegraph-ai/pull/142)
-- Enable discussion & change merge way [#201](https://github.com/apache/incubator-hugegraph-ai/pull/201)
-- Synchronization with official documentation [#273](https://github.com/apache/incubator-hugegraph-ai/pull/273)
-- Fix grammar errors [#275](https://github.com/apache/incubator-hugegraph-ai/pull/275)
-- Improve README clarity and deployment instructions [#276](https://github.com/apache/incubator-hugegraph-ai/pull/276)
-- Add docker-compose deployment and improve container networking instructions [#280](https://github.com/apache/incubator-hugegraph-ai/pull/280)
-- Update docker compose command [#283](https://github.com/apache/incubator-hugegraph-ai/pull/283)
-- Reduce third-party library log output [#244](https://github.com/apache/incubator-hugegraph-ai/pull/284)
-- Update README with improved setup instructions [#294](https://github.com/apache/incubator-hugegraph-ai/pull/294)
-- Add collaborators in asf config [#182](https://github.com/apache/incubator-hugegraph-ai/pull/182)
+- Update README for python-client/SDK [#150](https://github.com/apache/hugegraph-ai/pull/150)
+- Enable pip cache [#142](https://github.com/apache/hugegraph-ai/pull/142)
+- Enable discussion & change merge way [#201](https://github.com/apache/hugegraph-ai/pull/201)
+- Synchronization with official documentation [#273](https://github.com/apache/hugegraph-ai/pull/273)
+- Fix grammar errors [#275](https://github.com/apache/hugegraph-ai/pull/275)
+- Improve README clarity and deployment instructions [#276](https://github.com/apache/hugegraph-ai/pull/276)
+- Add docker-compose deployment and improve container networking instructions [#280](https://github.com/apache/hugegraph-ai/pull/280)
+- Update docker compose command [#283](https://github.com/apache/hugegraph-ai/pull/283)
+- Reduce third-party library log output [#244](https://github.com/apache/hugegraph-ai/pull/284)
+- Update README with improved setup instructions [#294](https://github.com/apache/hugegraph-ai/pull/294)
+- Add collaborators in asf config [#182](https://github.com/apache/hugegraph-ai/pull/182)
### 发布细节
Please check the release details/contributor in each repository:
-- [Server Release Notes](https://github.com/apache/incubator-hugegraph/releases)
-- [Toolchain Release Notes](https://github.com/apache/incubator-hugegraph-toolchain/releases)
-- [Computer Release Notes](https://github.com/apache/incubator-hugegraph-computer/releases)
-- [AI Release Notes](https://github.com/apache/incubator-hugegraph-ai/releases)
+- [Server Release Notes](https://github.com/apache/hugegraph/releases)
+- [Toolchain Release Notes](https://github.com/apache/hugegraph-toolchain/releases)
+- [Computer Release Notes](https://github.com/apache/hugegraph-computer/releases)
+- [AI Release Notes](https://github.com/apache/hugegraph-ai/releases)
diff --git a/content/cn/docs/clients/gremlin-console.md b/content/cn/docs/clients/gremlin-console.md
index 1d1103550..468bea58e 100644
--- a/content/cn/docs/clients/gremlin-console.md
+++ b/content/cn/docs/clients/gremlin-console.md
@@ -43,7 +43,7 @@ gremlin>
> 这里的 `--` 会被 getopts 解析为最后一个 option,这样后面的 options 就可以传入 Gremlin-Console 进行处理了。`-i` 代表 `Execute the specified script and leave the console open on completion`,更多的选项可以参考 Gremlin-Console 的[源代码](https://github.com/apache/tinkerpop/blob/3.5.1/gremlin-console/src/main/groovy/org/apache/tinkerpop/gremlin/console/Console.groovy#L483)。
-其中 [`example.groovy`](https://github.com/apache/incubator-hugegraph/blob/master/hugegraph-server/hugegraph-dist/src/assembly/static/scripts/example.groovy) 是 scripts 目录下的一个示例脚本,该脚本插入了一些数据,并在最后查询图中顶点和边的数量。
+其中 [`example.groovy`](https://github.com/apache/hugegraph/blob/master/hugegraph-server/hugegraph-dist/src/assembly/static/scripts/example.groovy) 是 scripts 目录下的一个示例脚本,该脚本插入了一些数据,并在最后查询图中顶点和边的数量。
此时还可以继续输入 Gremlin 语句对图进行操作:
diff --git a/content/cn/docs/clients/restful-api/_index.md b/content/cn/docs/clients/restful-api/_index.md
index afdd9d830..67097fa59 100644
--- a/content/cn/docs/clients/restful-api/_index.md
+++ b/content/cn/docs/clients/restful-api/_index.md
@@ -9,7 +9,7 @@ weight: 1
> - HugeGraph 1.7.0+ 引入了图空间功能,API 路径格式为:`/graphspaces/{graphspace}/graphs/{graph}`
> - HugeGraph 1.5.x 及之前版本使用旧路径:`/graphs/{graph}`, 以及创建/克隆图的 api 使用 text/plain 作为 Content-Type, 1.7.0 及之后使用 json
> - 默认图空间名称为 `DEFAULT`,可直接使用
-> - 旧版本 doc 参考:[HugeGraph 1.5.x RESTful API](https://github.com/apache/incubator-hugegraph-doc/tree/release-1.5.0)
+> - 旧版本 doc 参考:[HugeGraph 1.5.x RESTful API](https://github.com/apache/hugegraph-doc/tree/release-1.5.0)
除了下方的文档,你还可以通过 `localhost:8080/swagger-ui/index.html` 访问 `swagger-ui` 以查看 `RESTful API`。[示例可以参考此处](/cn/docs/quickstart/hugegraph/hugegraph-server#swaggerui-example)
diff --git a/content/cn/docs/clients/restful-api/auth.md b/content/cn/docs/clients/restful-api/auth.md
index 606b4e5c0..ffcc7b929 100644
--- a/content/cn/docs/clients/restful-api/auth.md
+++ b/content/cn/docs/clients/restful-api/auth.md
@@ -2,8 +2,13 @@
title: "Authentication API"
linkTitle: "Authentication"
weight: 16
+description: "Authentication(认证鉴权)REST 接口:管理用户、角色、权限和访问控制,实现细粒度的图数据安全机制。"
---
+> **版本变更说明**:
+> - 1.7.0+: Auth API 路径使用 GraphSpace 格式,如 `/graphspaces/DEFAULT/auth/users`,且 group/target 等 id 格式与 name 一致(如 `admin`)
+> - 1.5.x 及更早: Auth API 路径包含 graph 名称,group/target 等 id 格式类似 `-69:grant`。参考 [HugeGraph 1.5.x RESTful API](https://github.com/apache/hugegraph-doc/tree/release-1.5.0)
+
### 10.1 用户认证与权限控制
> 开启权限及相关配置请先参考 [权限配置](/cn/docs/config/config-authentication/) 文档
@@ -22,7 +27,7 @@ city: Beijing})
##### 接口说明:
用户认证与权限控制接口包括 5 类:UserAPI、GroupAPI、TargetAPI、BelongAPI、AccessAPI。
-**注意**: 1.5.0 及之前,group/target 等 id 的格式类似 -69:grant,1.7.0 及之后,id 和 name 一致,如 admin [HugeGraph 1.5.x RESTful API](https://github.com/apache/incubator-hugegraph-doc/tree/release-1.5.0)
+**注意**: 1.5.0 及之前,group/target 等 id 的格式类似 -69:grant,1.7.0 及之后,id 和 name 一致,如 admin [HugeGraph 1.5.x RESTful API](https://github.com/apache/hugegraph-doc/tree/release-1.5.0)
### 10.2 用户(User)API
用户接口包括:创建用户,删除用户,修改用户,和查询用户相关信息接口。
@@ -1044,7 +1049,7 @@ GET http://localhost:8080/graphspaces/DEFAULT/auth/accesses/S-69:all>-88>11>S-77
### 10.7 图空间管理员(Manager)API
-**重要提示**:在使用以下 API 之前,需要先创建图空间(graphspace)。请参考 [Graphspace API](../graphspace) 创建名为 `gs1` 的图空间。文档中的示例均假设已存在名为 `gs1` 的图空间
+**重要提示**:在使用以下 API 之前,需要先创建图空间(graphspace)。请参考 [Graphspace API](./graphspace) 创建名为 `gs1` 的图空间。文档中的示例均假设已存在名为 `gs1` 的图空间
1. 图空间管理员 API 用于在 graphspace 维度给用户授予/回收管理员角色,并查询当前用户或其他用户在该 graphspace 下的角色信息。角色类型可取 `SPACE`、`SPACE_MEMBER`、`ADMIN` 。
diff --git a/content/cn/docs/clients/restful-api/cypher.md b/content/cn/docs/clients/restful-api/cypher.md
index 7eddf199c..0d7154724 100644
--- a/content/cn/docs/clients/restful-api/cypher.md
+++ b/content/cn/docs/clients/restful-api/cypher.md
@@ -2,6 +2,7 @@
title: "Cypher API"
linkTitle: "Cypher"
weight: 15
+description: "Cypher(图查询语言)REST 接口:通过 HTTP 接口执行 OpenCypher 声明式图查询语言。"
---
### 9.1 Cypher
diff --git a/content/cn/docs/clients/restful-api/edge.md b/content/cn/docs/clients/restful-api/edge.md
index d17242a2b..c12ebfaf5 100644
--- a/content/cn/docs/clients/restful-api/edge.md
+++ b/content/cn/docs/clients/restful-api/edge.md
@@ -2,6 +2,7 @@
title: "Edge API"
linkTitle: "Edge"
weight: 8
+description: "Edge(边)REST 接口:创建、查询、更新和删除顶点之间的关系数据,支持批量操作和方向查询。"
---
### 2.2 Edge
diff --git a/content/cn/docs/clients/restful-api/edgelabel.md b/content/cn/docs/clients/restful-api/edgelabel.md
index 145992c22..33f1ad54a 100644
--- a/content/cn/docs/clients/restful-api/edgelabel.md
+++ b/content/cn/docs/clients/restful-api/edgelabel.md
@@ -2,6 +2,7 @@
title: "EdgeLabel API"
linkTitle: "EdgeLabel"
weight: 4
+description: "EdgeLabel(边标签)REST 接口:定义边类型、源顶点和目标顶点的关系约束,构建图的连接规则。"
---
### 1.4 EdgeLabel
@@ -310,4 +311,4 @@ DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/edgelab
注:
-> 可以通过`GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1`(其中"1"是 task_id)来查询异步任务的执行状态,更多[异步任务 RESTful API](../task)
+> 可以通过`GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1`(其中"1"是 task_id)来查询异步任务的执行状态,更多[异步任务 RESTful API](./task)
diff --git a/content/cn/docs/clients/restful-api/graphs.md b/content/cn/docs/clients/restful-api/graphs.md
index 5229a9432..560efc610 100644
--- a/content/cn/docs/clients/restful-api/graphs.md
+++ b/content/cn/docs/clients/restful-api/graphs.md
@@ -2,6 +2,7 @@
title: "Graphs API"
linkTitle: "Graphs"
weight: 12
+description: "Graphs(图管理)REST 接口:管理图实例的生命周期,包括创建、查询、克隆、清空和删除图数据库。"
---
### 6.1 Graphs
@@ -112,19 +113,19 @@ DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/clear?confirm_
##### Method & Url
```
-POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph_clone?clone_graph_name=hugegraph
+POST http://localhost:8080/graphspaces/DEFAULT/graphs/cloneGraph?clone_graph_name=hugegraph
```
##### Request Body (可选)
克隆一个非鉴权模式的图(设置 `Content-Type: application/json`)
-```json
+```javascript
{
"gremlin.graph": "org.apache.hugegraph.HugeFactory",
"backend": "rocksdb",
"serializer": "binary",
- "store": "hugegraph_clone",
+ "store": "cloneGraph",
"rocksdb.data_path": "./rks-data-xx",
"rocksdb.wal_path": "./rks-data-xx"
}
@@ -144,7 +145,7 @@ POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph_clone?clone_grap
```javascript
{
- "name": "hugegraph_clone",
+ "name": "cloneGraph",
"backend": "rocksdb"
}
```
@@ -166,11 +167,21 @@ POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph-xx
##### Request Body
-创建一个非鉴权模式的图(设置 `Content-Type: application/json`)
+创建一个图(设置 `Content-Type: application/json`)
+
+**`gremlin.graph` 配置说明:**
+- 鉴权模式:`"gremlin.graph": "org.apache.hugegraph.auth.HugeFactoryAuthProxy"`(推荐)
+- 非鉴权模式:`"gremlin.graph": "org.apache.hugegraph.HugeFactory"`
+
+**注意**!!
+1. 在 1.7.0 版本中,动态创建图会导致 NPE 错误。该问题已在 [PR#2912](https://github.com/apache/hugegraph/pull/2912) 中修复。当前 master 版本和 1.7.0 之前的版本不受此问题影响。
+2. 1.7.0 及之前版本,如果 backend 是 hstore,必须在请求体加上 "task.scheduler_type": "distributed"。同时请确保 HugeGraph-Server 已正确配置 PD,参见 [HStore 配置](/cn/docs/quickstart/hugegraph/hugegraph-server/#511-分布式存储hstore)。
-```json
+**RocksDB 示例:**
+
+```javascript
{
- "gremlin.graph": "org.apache.hugegraph.HugeFactory",
+ "gremlin.graph": "org.apache.hugegraph.auth.HugeFactoryAuthProxy",
"backend": "rocksdb",
"serializer": "binary",
"store": "hugegraph",
@@ -179,9 +190,20 @@ POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph-xx
}
```
-> Note:
-> 1. Rocksdb 存储路径不能与现有图相同(需使用不同的目录)
-> 2. 如需开启新图的权限系统,需替换设置 `gremlin.graph=org.apache.hugegraph.auth.HugeFactoryAuthProxy`
+**HStore 示例(适用于 1.7.0 及之前版本):**
+
+```javascript
+{
+ "gremlin.graph": "org.apache.hugegraph.auth.HugeFactoryAuthProxy",
+ "backend": "hstore",
+ "serializer": "binary",
+ "store": "hugegraph2",
+ "task.scheduler_type": "distributed",
+ "pd.peers": "127.0.0.1:8686"
+}
+```
+
+> Note: Rocksdb 存储路径不能与现有图相同(需使用不同的目录)
##### Response Status
@@ -216,7 +238,7 @@ POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph-xx
##### Method & Url
```javascript
-DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph_clone?confirm_message=I%27m%20sure%20to%20drop%20the%20graph
+DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/graphA?confirm_message=I%27m%20sure%20to%20drop%20the%20graph
```
##### Response Status
@@ -486,4 +508,4 @@ PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/compact
"local": "OK"
}
}
-```
+```
\ No newline at end of file
diff --git a/content/cn/docs/clients/restful-api/gremlin.md b/content/cn/docs/clients/restful-api/gremlin.md
index d2affc3ae..ab22c1cf1 100644
--- a/content/cn/docs/clients/restful-api/gremlin.md
+++ b/content/cn/docs/clients/restful-api/gremlin.md
@@ -2,6 +2,7 @@
title: "Gremlin API"
linkTitle: "Gremlin"
weight: 14
+description: "Gremlin(图查询语言)REST 接口:通过 HTTP 接口执行 Gremlin 图遍历查询语言脚本。"
---
### 8.1 Gremlin
@@ -228,7 +229,7 @@ POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/jobs/gremlin
注:
-> 可以通过`GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1`(其中"1"是 task_id)来查询异步任务的执行状态,更多[异步任务 RESTful API](../task)
+> 可以通过`GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1`(其中"1"是 task_id)来查询异步任务的执行状态,更多[异步任务 RESTful API](./task)
**查询边**
@@ -259,4 +260,4 @@ POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/jobs/gremlin
注:
-> 可以通过`GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/2`(其中"2"是 task_id)来查询异步任务的执行状态,更多[异步任务 RESTful API](../task)
+> 可以通过`GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/2`(其中"2"是 task_id)来查询异步任务的执行状态,更多[异步任务 RESTful API](./task)
diff --git a/content/cn/docs/clients/restful-api/indexlabel.md b/content/cn/docs/clients/restful-api/indexlabel.md
index efddfbbfb..0a51d23fd 100644
--- a/content/cn/docs/clients/restful-api/indexlabel.md
+++ b/content/cn/docs/clients/restful-api/indexlabel.md
@@ -2,6 +2,7 @@
title: "IndexLabel API"
linkTitle: "IndexLabel"
weight: 5
+description: "IndexLabel(索引标签)REST 接口:为顶点和边的属性创建索引,加速基于属性的查询和过滤操作。"
---
### 1.5 IndexLabel
@@ -173,4 +174,4 @@ DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/indexla
注:
-> 可以通过`GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1`(其中"1"是 task_id)来查询异步任务的执行状态,更多[异步任务 RESTful API](../task)
+> 可以通过`GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1`(其中"1"是 task_id)来查询异步任务的执行状态,更多[异步任务 RESTful API](./task)
diff --git a/content/cn/docs/clients/restful-api/metrics.md b/content/cn/docs/clients/restful-api/metrics.md
index e984d2039..f89698ada 100644
--- a/content/cn/docs/clients/restful-api/metrics.md
+++ b/content/cn/docs/clients/restful-api/metrics.md
@@ -2,7 +2,7 @@
title: "Metrics API"
linkTitle: "Metrics"
weight: 17
-
+description: "Metrics(监控指标)REST 接口:获取系统运行时的性能指标、统计信息和健康状态数据。"
---
HugeGraph 提供了获取监控信息的 Metrics 接口,比如各个 Gremlin 执行时间的统计、缓存的占用大小等。Metrics
diff --git a/content/cn/docs/clients/restful-api/other.md b/content/cn/docs/clients/restful-api/other.md
index 8f394e439..0e6fd0458 100644
--- a/content/cn/docs/clients/restful-api/other.md
+++ b/content/cn/docs/clients/restful-api/other.md
@@ -2,6 +2,7 @@
title: "Other API"
linkTitle: "Other"
weight: 18
+description: "Other(其他接口)REST 接口:提供系统版本查询和 API 版本信息等辅助功能。"
---
### 11.1 Other
diff --git a/content/cn/docs/clients/restful-api/propertykey.md b/content/cn/docs/clients/restful-api/propertykey.md
index 0f008f8b8..fc04e8456 100644
--- a/content/cn/docs/clients/restful-api/propertykey.md
+++ b/content/cn/docs/clients/restful-api/propertykey.md
@@ -2,6 +2,7 @@
title: "PropertyKey API"
linkTitle: "PropertyKey"
weight: 2
+description: "PropertyKey(属性键)REST 接口:定义图中所有属性的数据类型和基数约束,是构建图模式的基础元素。"
---
### 1.2 PropertyKey
diff --git a/content/cn/docs/clients/restful-api/rank.md b/content/cn/docs/clients/restful-api/rank.md
index 780ed8298..f18fc4730 100644
--- a/content/cn/docs/clients/restful-api/rank.md
+++ b/content/cn/docs/clients/restful-api/rank.md
@@ -2,6 +2,7 @@
title: "Rank API"
linkTitle: "Rank"
weight: 10
+description: "Rank(图排序)REST 接口:执行图节点排序算法,如 PageRank、个性化 PageRank 等中心性分析。"
---
### 4.1 rank API 概述
diff --git a/content/cn/docs/clients/restful-api/rebuild.md b/content/cn/docs/clients/restful-api/rebuild.md
index 18e037448..db6281ac6 100644
--- a/content/cn/docs/clients/restful-api/rebuild.md
+++ b/content/cn/docs/clients/restful-api/rebuild.md
@@ -2,6 +2,7 @@
title: "Rebuild API"
linkTitle: "Rebuild"
weight: 6
+description: "Rebuild(重建索引)REST 接口:重建图模式的索引,确保索引数据与图数据保持一致性。"
---
### 1.6 Rebuild
@@ -30,7 +31,7 @@ PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/jobs/rebuild/inde
注:
-> 可以通过`GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1`(其中"1"是 task_id)来查询异步任务的执行状态,更多[异步任务 RESTful API](../task)
+> 可以通过`GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1`(其中"1"是 task_id)来查询异步任务的执行状态,更多[异步任务 RESTful API](./task)
#### 1.6.2 VertexLabel 对应的全部索引重建
@@ -56,7 +57,7 @@ PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/jobs/rebuild/vert
注:
-> 可以通过`GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/2`(其中"2"是 task_id)来查询异步任务的执行状态,更多[异步任务 RESTful API](../task)
+> 可以通过`GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/2`(其中"2"是 task_id)来查询异步任务的执行状态,更多[异步任务 RESTful API](./task)
#### 1.6.3 EdgeLabel 对应的全部索引重建
@@ -82,4 +83,4 @@ PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/jobs/rebuild/edge
注:
-> 可以通过`GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/3`(其中"3"是 task_id)来查询异步任务的执行状态,更多[异步任务 RESTful API](../task)
+> 可以通过`GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/3`(其中"3"是 task_id)来查询异步任务的执行状态,更多[异步任务 RESTful API](./task)
diff --git a/content/cn/docs/clients/restful-api/schema.md b/content/cn/docs/clients/restful-api/schema.md
index f0e525b05..0e80bce4a 100644
--- a/content/cn/docs/clients/restful-api/schema.md
+++ b/content/cn/docs/clients/restful-api/schema.md
@@ -2,6 +2,7 @@
title: "Schema API"
linkTitle: "Schema"
weight: 1
+description: "Schema(图模式)REST 接口:查询图的完整模式定义,包括属性键、顶点标签、边标签和索引标签的统一视图。"
---
### 1.1 Schema
diff --git a/content/cn/docs/clients/restful-api/task.md b/content/cn/docs/clients/restful-api/task.md
index b91a5c5a3..92c89aebb 100644
--- a/content/cn/docs/clients/restful-api/task.md
+++ b/content/cn/docs/clients/restful-api/task.md
@@ -2,6 +2,7 @@
title: "Task API"
linkTitle: "Task"
weight: 13
+description: "Task(任务管理)REST 接口:查询和管理异步任务的执行状态,如索引重建、图遍历等长时任务。"
---
### 7.1 Task
diff --git a/content/cn/docs/clients/restful-api/traverser.md b/content/cn/docs/clients/restful-api/traverser.md
index e246ede58..3d4f210b4 100644
--- a/content/cn/docs/clients/restful-api/traverser.md
+++ b/content/cn/docs/clients/restful-api/traverser.md
@@ -2,6 +2,7 @@
title: "Traverser API"
linkTitle: "Traverser"
weight: 9
+description: "Traverser(图遍历)REST 接口:执行复杂的图算法和路径查询,包括最短路径、K近邻、相似度计算等高级分析功能。"
---
### 3.1 traverser API 概述
diff --git a/content/cn/docs/clients/restful-api/variable.md b/content/cn/docs/clients/restful-api/variable.md
index 5ca2ec0b9..b25b6e44b 100644
--- a/content/cn/docs/clients/restful-api/variable.md
+++ b/content/cn/docs/clients/restful-api/variable.md
@@ -2,6 +2,7 @@
title: "Variable API"
linkTitle: "Variable"
weight: 11
+description: "Variable(变量)REST 接口:存储和管理键值对形式的全局变量,支持图级别的配置和状态管理。"
---
### 5.1 Variables
diff --git a/content/cn/docs/clients/restful-api/vertex.md b/content/cn/docs/clients/restful-api/vertex.md
index 0df58ecce..7f1c8a254 100644
--- a/content/cn/docs/clients/restful-api/vertex.md
+++ b/content/cn/docs/clients/restful-api/vertex.md
@@ -2,6 +2,7 @@
title: "Vertex API"
linkTitle: "Vertex"
weight: 7
+description: "Vertex(顶点)REST 接口:创建、查询、更新和删除图中的顶点数据,支持批量操作和条件过滤。"
---
### 2.1 Vertex
diff --git a/content/cn/docs/clients/restful-api/vertexlabel.md b/content/cn/docs/clients/restful-api/vertexlabel.md
index 31ff5a7eb..9d15589fb 100644
--- a/content/cn/docs/clients/restful-api/vertexlabel.md
+++ b/content/cn/docs/clients/restful-api/vertexlabel.md
@@ -2,6 +2,7 @@
title: "VertexLabel API"
linkTitle: "VertexLabel"
weight: 3
+description: "VertexLabel(顶点标签)REST 接口:定义顶点类型、ID策略及关联的属性,决定顶点的结构和约束规则。"
---
### 1.3 VertexLabel
@@ -307,4 +308,4 @@ DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/vertexl
注:
-> 可以通过`GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1`(其中"1"是 task_id)来查询异步任务的执行状态,更多[异步任务 RESTful API](../task)
+> 可以通过`GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1`(其中"1"是 task_id)来查询异步任务的执行状态,更多[异步任务 RESTful API](./task)
diff --git a/content/cn/docs/config/_index.md b/content/cn/docs/config/_index.md
index 04db80c57..ce83888d7 100644
--- a/content/cn/docs/config/_index.md
+++ b/content/cn/docs/config/_index.md
@@ -1,5 +1,12 @@
---
-title: "Config"
-linkTitle: "Config"
+title: "HugeGraph-Server 配置"
+linkTitle: "Server 配置"
weight: 4
----
\ No newline at end of file
+---
+
+本节介绍 HugeGraph-Server 的配置方法,包括:
+
+- **[Server 启动指南](config-guide)** - 了解配置文件结构和基本配置方法
+- **[Server 完整配置手册](config-option)** - 完整的配置选项列表和说明
+- **[权限配置](config-authentication)** - 用户认证和授权配置
+- **[HTTPS 配置](config-https)** - 启用 HTTPS 安全协议
\ No newline at end of file
diff --git a/content/cn/docs/config/config-authentication.md b/content/cn/docs/config/config-authentication.md
index d11f7db7d..fef920328 100644
--- a/content/cn/docs/config/config-authentication.md
+++ b/content/cn/docs/config/config-authentication.md
@@ -94,14 +94,14 @@ gremlin.graph=org.apache.hugegraph.auth.HugeFactoryAuthProxy
在鉴权配置完成后,需在首次执行 `init-store.sh` 时命令行中输入 `admin` 密码 (非 docker 部署模式下)
如果基于 docker 镜像部署或者已经初始化 HugeGraph 并需要转换为鉴权模式,需要删除相关图数据并重新启动 HugeGraph, 若图已有业务数据,暂时**无法直接转换**鉴权模式 (hugegraph 版本 <= 1.2.0)
-> 对于该功能的改进已经在最新版本发布 (Docker latest 可用),可参考 [PR 2411](https://github.com/apache/incubator-hugegraph/pull/2411), 此时可无缝切换。
+> 对于该功能的改进已经在最新版本发布 (Docker latest 可用),可参考 [PR 2411](https://github.com/apache/hugegraph/pull/2411), 此时可无缝切换。
```bash
# stop the hugeGraph firstly
bin/stop-hugegraph.sh
# delete the store data (here we use the default path for rocksdb)
-# Note: no need to delete data in the latest code (fixed in https://github.com/apache/incubator-hugegraph/pull/2411)
+# Note: no need to delete data in the latest code (fixed in https://github.com/apache/hugegraph/pull/2411)
rm -rf rocksdb-data/
# init store again
diff --git a/content/cn/docs/config/config-computer.md b/content/cn/docs/config/config-computer.md
deleted file mode 100644
index 0b270c9e8..000000000
--- a/content/cn/docs/config/config-computer.md
+++ /dev/null
@@ -1,177 +0,0 @@
----
-title: "HugeGraph-Computer 配置"
-linkTitle: "图计算 Computer 配置"
-weight: 5
----
-
-### Computer Config Options
-
-| config option | default value | description |
-|-----------------------------------------|-------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| algorithm.message_class | org.apache.hugegraph.computer.core.config.Null | The class of message passed when compute vertex. |
-| algorithm.params_class | org.apache.hugegraph.computer.core.config.Null | The class used to transfer algorithms' parameters before algorithm been run. |
-| algorithm.result_class | org.apache.hugegraph.computer.core.config.Null | The class of vertex's value, the instance is used to store computation result for the vertex. |
-| allocator.max_vertices_per_thread | 10000 | Maximum number of vertices per thread processed in each memory allocator |
-| bsp.etcd_endpoints | http://localhost:2379 | The end points to access etcd. |
-| bsp.log_interval | 30000 | The log interval(in ms) to print the log while waiting bsp event. |
-| bsp.max_super_step | 10 | The max super step of the algorithm. |
-| bsp.register_timeout | 300000 | The max timeout to wait for master and works to register. |
-| bsp.wait_master_timeout | 86400000 | The max timeout(in ms) to wait for master bsp event. |
-| bsp.wait_workers_timeout | 86400000 | The max timeout to wait for workers bsp event. |
-| hgkv.max_data_block_size | 65536 | The max byte size of hgkv-file data block. |
-| hgkv.max_file_size | 2147483648 | The max number of bytes in each hgkv-file. |
-| hgkv.max_merge_files | 10 | The max number of files to merge at one time. |
-| hgkv.temp_file_dir | /tmp/hgkv | This folder is used to store temporary files, temporary files will be generated during the file merging process. |
-| hugegraph.name | hugegraph | The graph name to load data and write results back. |
-| hugegraph.url | http://127.0.0.1:8080 | The hugegraph url to load data and write results back. |
-| input.edge_direction | OUT | The data of the edge in which direction is loaded, when the value is BOTH, the edges in both OUT and IN direction will be loaded. |
-| input.edge_freq | MULTIPLE | The frequency of edges can exist between a pair of vertices, allowed values: [SINGLE, SINGLE_PER_LABEL, MULTIPLE]. SINGLE means that only one edge can exist between a pair of vertices, use sourceId + targetId to identify it; SINGLE_PER_LABEL means that each edge label can exist one edge between a pair of vertices, use sourceId + edgelabel + targetId to identify it; MULTIPLE means that many edge can exist between a pair of vertices, use sourceId + edgelabel + sortValues + targetId to identify it. |
-| input.filter_class | org.apache.hugegraph.computer.core.input.filter.DefaultInputFilter | The class to create input-filter object, input-filter is used to Filter vertex edges according to user needs. |
-| input.loader_schema_path || The schema path of loader input, only takes effect when the input.source_type=loader is enabled |
-| input.loader_struct_path || The struct path of loader input, only takes effect when the input.source_type=loader is enabled |
-| input.max_edges_in_one_vertex | 200 | The maximum number of adjacent edges allowed to be attached to a vertex, the adjacent edges will be stored and transferred together as a batch unit. |
-| input.source_type | hugegraph-server | The source type to load input data, allowed values: ['hugegraph-server', 'hugegraph-loader'], the 'hugegraph-loader' means use hugegraph-loader load data from HDFS or file, if use 'hugegraph-loader' load data then please config 'input.loader_struct_path' and 'input.loader_schema_path'. |
-| input.split_fetch_timeout | 300 | The timeout in seconds to fetch input splits |
-| input.split_max_splits | 10000000 | The maximum number of input splits |
-| input.split_page_size | 500 | The page size for streamed load input split data |
-| input.split_size | 1048576 | The input split size in bytes |
-| job.id | local_0001 | The job id on Yarn cluster or K8s cluster. |
-| job.partitions_count | 1 | The partitions count for computing one graph algorithm job. |
-| job.partitions_thread_nums | 4 | The number of threads for partition parallel compute. |
-| job.workers_count | 1 | The workers count for computing one graph algorithm job. |
-| master.computation_class | org.apache.hugegraph.computer.core.master.DefaultMasterComputation | Master-computation is computation that can determine whether to continue next superstep. It runs at the end of each superstep on master. |
-| output.batch_size | 500 | The batch size of output |
-| output.batch_threads | 1 | The threads number used to batch output |
-| output.hdfs_core_site_path || The hdfs core site path. |
-| output.hdfs_delimiter | , | The delimiter of hdfs output. |
-| output.hdfs_kerberos_enable | false | Is Kerberos authentication enabled for Hdfs. |
-| output.hdfs_kerberos_keytab || The Hdfs's key tab file for kerberos authentication. |
-| output.hdfs_kerberos_principal || The Hdfs's principal for kerberos authentication. |
-| output.hdfs_krb5_conf | /etc/krb5.conf | Kerberos configuration file. |
-| output.hdfs_merge_partitions | true | Whether merge output files of multiple partitions. |
-| output.hdfs_path_prefix | /hugegraph-computer/results | The directory of hdfs output result. |
-| output.hdfs_replication | 3 | The replication number of hdfs. |
-| output.hdfs_site_path || The hdfs site path. |
-| output.hdfs_url | hdfs://127.0.0.1:9000 | The hdfs url of output. |
-| output.hdfs_user | hadoop | The hdfs user of output. |
-| output.output_class | org.apache.hugegraph.computer.core.output.LogOutput | The class to output the computation result of each vertex. Be called after iteration computation. |
-| output.result_name | value | The value is assigned dynamically by #name() of instance created by WORKER_COMPUTATION_CLASS. |
-| output.result_write_type | OLAP_COMMON | The result write-type to output to hugegraph, allowed values are: [OLAP_COMMON, OLAP_SECONDARY, OLAP_RANGE]. |
-| output.retry_interval | 10 | The retry interval when output failed |
-| output.retry_times | 3 | The retry times when output failed |
-| output.single_threads | 1 | The threads number used to single output |
-| output.thread_pool_shutdown_timeout | 60 | The timeout seconds of output threads pool shutdown |
-| output.with_adjacent_edges | false | Output the adjacent edges of the vertex or not |
-| output.with_edge_properties | false | Output the properties of the edge or not |
-| output.with_vertex_properties | false | Output the properties of the vertex or not |
-| sort.thread_nums | 4 | The number of threads performing internal sorting. |
-| transport.client_connect_timeout | 3000 | The timeout(in ms) of client connect to server. |
-| transport.client_threads | 4 | The number of transport threads for client. |
-| transport.close_timeout | 10000 | The timeout(in ms) of close server or close client. |
-| transport.finish_session_timeout | 0 | The timeout(in ms) to finish session, 0 means using (transport.sync_request_timeout * transport.max_pending_requests). |
-| transport.heartbeat_interval | 20000 | The minimum interval(in ms) between heartbeats on client side. |
-| transport.io_mode | AUTO | The network IO Mode, either 'NIO', 'EPOLL', 'AUTO', the 'AUTO' means selecting the property mode automatically. |
-| transport.max_pending_requests | 8 | The max number of client unreceived ack, it will trigger the sending unavailable if the number of unreceived ack >= max_pending_requests. |
-| transport.max_syn_backlog | 511 | The capacity of SYN queue on server side, 0 means using system default value. |
-| transport.max_timeout_heartbeat_count | 120 | The maximum times of timeout heartbeat on client side, if the number of timeouts waiting for heartbeat response continuously > max_heartbeat_timeouts the channel will be closed from client side. |
-| transport.min_ack_interval | 200 | The minimum interval(in ms) of server reply ack. |
-| transport.min_pending_requests | 6 | The minimum number of client unreceived ack, it will trigger the sending available if the number of unreceived ack < min_pending_requests. |
-| transport.network_retries | 3 | The number of retry attempts for network communication,if network unstable. |
-| transport.provider_class | org.apache.hugegraph.computer.core.network.netty.NettyTransportProvider | The transport provider, currently only supports Netty. |
-| transport.receive_buffer_size | 0 | The size of socket receive-buffer in bytes, 0 means using system default value. |
-| transport.recv_file_mode | true | Whether enable receive buffer-file mode, it will receive buffer write file from socket by zero-copy if enable. |
-| transport.send_buffer_size | 0 | The size of socket send-buffer in bytes, 0 means using system default value. |
-| transport.server_host | 127.0.0.1 | The server hostname or ip to listen on to transfer data. |
-| transport.server_idle_timeout | 360000 | The max timeout(in ms) of server idle. |
-| transport.server_port | 0 | The server port to listen on to transfer data. The system will assign a random port if it's set to 0. |
-| transport.server_threads | 4 | The number of transport threads for server. |
-| transport.sync_request_timeout | 10000 | The timeout(in ms) to wait response after sending sync-request. |
-| transport.tcp_keep_alive | true | Whether enable TCP keep-alive. |
-| transport.transport_epoll_lt | false | Whether enable EPOLL level-trigger. |
-| transport.write_buffer_high_mark | 67108864 | The high water mark for write buffer in bytes, it will trigger the sending unavailable if the number of queued bytes > write_buffer_high_mark. |
-| transport.write_buffer_low_mark | 33554432 | The low water mark for write buffer in bytes, it will trigger the sending available if the number of queued bytes < write_buffer_low_mark.org.apache.hugegraph.config.OptionChecker$$Lambda$97/0x00000008001c8440@776a6d9b |
-| transport.write_socket_timeout | 3000 | The timeout(in ms) to write data to socket buffer. |
-| valuefile.max_segment_size | 1073741824 | The max number of bytes in each segment of value-file. |
-| worker.combiner_class | org.apache.hugegraph.computer.core.config.Null | Combiner can combine messages into one value for a vertex, for example page-rank algorithm can combine messages of a vertex to a sum value. |
-| worker.computation_class | org.apache.hugegraph.computer.core.config.Null | The class to create worker-computation object, worker-computation is used to compute each vertex in each superstep. |
-| worker.data_dirs | [jobs] | The directories separated by ',' that received vertices and messages can persist into. |
-| worker.edge_properties_combiner_class | org.apache.hugegraph.computer.core.combiner.OverwritePropertiesCombiner | The combiner can combine several properties of the same edge into one properties at inputstep. |
-| worker.partitioner | org.apache.hugegraph.computer.core.graph.partition.HashPartitioner | The partitioner that decides which partition a vertex should be in, and which worker a partition should be in. |
-| worker.received_buffers_bytes_limit | 104857600 | The limit bytes of buffers of received data, the total size of all buffers can't excess this limit. If received buffers reach this limit, they will be merged into a file. |
-| worker.vertex_properties_combiner_class | org.apache.hugegraph.computer.core.combiner.OverwritePropertiesCombiner | The combiner can combine several properties of the same vertex into one properties at inputstep. |
-| worker.wait_finish_messages_timeout | 86400000 | The max timeout(in ms) message-handler wait for finish-message of all workers. |
-| worker.wait_sort_timeout | 600000 | The max timeout(in ms) message-handler wait for sort-thread to sort one batch of buffers. |
-| worker.write_buffer_capacity | 52428800 | The initial size of write buffer that used to store vertex or message. |
-| worker.write_buffer_threshold | 52428800 | The threshold of write buffer, exceeding it will trigger sorting, the write buffer is used to store vertex or message. |
-
-### K8s Operator Config Options
-
-> NOTE: Option needs to be converted through environment variable settings, e.g. k8s.internal_etcd_url => INTERNAL_ETCD_URL
-
-| config option | default value | description |
-|------------------------------|---------------------------|---------------------------------------------------------------------------------------------------------------------------------|
-| k8s.auto_destroy_pod | true | Whether to automatically destroy all pods when the job is completed or failed. |
-| k8s.close_reconciler_timeout | 120 | The max timeout(in ms) to close reconciler. |
-| k8s.internal_etcd_url | http://127.0.0.1:2379 | The internal etcd url for operator system. |
-| k8s.max_reconcile_retry | 3 | The max retry times of reconcile. |
-| k8s.probe_backlog | 50 | The maximum backlog for serving health probes. |
-| k8s.probe_port | 9892 | The value is the port that the controller bind to for serving health probes. |
-| k8s.ready_check_internal | 1000 | The time interval(ms) of check ready. |
-| k8s.ready_timeout | 30000 | The max timeout(in ms) of check ready. |
-| k8s.reconciler_count | 10 | The max number of reconciler thread. |
-| k8s.resync_period | 600000 | The minimum frequency at which watched resources are reconciled. |
-| k8s.timezone | Asia/Shanghai | The timezone of computer job and operator. |
-| k8s.watch_namespace | hugegraph-computer-system | The value is watch custom resources in the namespace, ignore other namespaces, the '*' means is all namespaces will be watched. |
-
-### HugeGraph-Computer CRD
-
-> CRD: https://github.com/apache/hugegraph-computer/blob/master/computer-k8s-operator/manifest/hugegraph-computer-crd.v1.yaml
-
-| spec | default value | description | required |
-|-----------------|-------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|
-| algorithmName | | The name of algorithm. | true |
-| jobId | | The job id. | true |
-| image | | The image of algorithm. | true |
-| computerConf | | The map of computer config options. | true |
-| workerInstances | | The number of worker instances, it will instead the 'job.workers_count' option. | true |
-| pullPolicy | Always | The pull-policy of image, detail please refer to: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy | false |
-| pullSecrets | | The pull-secrets of Image, detail please refer to: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod | false |
-| masterCpu | | The cpu limit of master, the unit can be 'm' or without unit detail please refer to:[https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu) | false |
-| workerCpu | | The cpu limit of worker, the unit can be 'm' or without unit detail please refer to:[https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu) | false |
-| masterMemory | | The memory limit of master, the unit can be one of Ei、Pi、Ti、Gi、Mi、Ki detail please refer to:[https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory) | false |
-| workerMemory | | The memory limit of worker, the unit can be one of Ei、Pi、Ti、Gi、Mi、Ki detail please refer to:[https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory) | false |
-| log4jXml | | The content of log4j.xml for computer job. | false |
-| jarFile | | The jar path of computer algorithm. | false |
-| remoteJarUri | | The remote jar uri of computer algorithm, it will overlay algorithm image. | false |
-| jvmOptions | | The java startup parameters of computer job. | false |
-| envVars | | please refer to: https://kubernetes.io/docs/tasks/inject-data-application/define-interdependent-environment-variables/ | false |
-| envFrom | | please refer to: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/ | false |
-| masterCommand | bin/start-computer.sh | The run command of master, equivalent to 'Entrypoint' field of Docker. | false |
-| masterArgs | ["-r master", "-d k8s"] | The run args of master, equivalent to 'Cmd' field of Docker. | false |
-| workerCommand | bin/start-computer.sh | The run command of worker, equivalent to 'Entrypoint' field of Docker. | false |
-| workerArgs | ["-r worker", "-d k8s"] | The run args of worker, equivalent to 'Cmd' field of Docker. | false |
-| volumes | | Please refer to: https://kubernetes.io/docs/concepts/storage/volumes/ | false |
-| volumeMounts | | Please refer to: https://kubernetes.io/docs/concepts/storage/volumes/ | false |
-| secretPaths | | The map of k8s-secret name and mount path. | false |
-| configMapPaths | | The map of k8s-configmap name and mount path. | false |
-| podTemplateSpec | | Please refer to: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-template-v1/#PodTemplateSpec | false |
-| securityContext | | Please refer to: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ | false |
-
-### KubeDriver Config Options
-
-| config option | default value | description |
-|----------------------------------|------------------------------------------|-----------------------------------------------------------|
-| k8s.build_image_bash_path || The path of command used to build image. |
-| k8s.enable_internal_algorithm | true | Whether enable internal algorithm. |
-| k8s.framework_image_url | hugegraph/hugegraph-computer:latest | The image url of computer framework. |
-| k8s.image_repository_password || The password for login image repository. |
-| k8s.image_repository_registry || The address for login image repository. |
-| k8s.image_repository_url | hugegraph/hugegraph-computer | The url of image repository. |
-| k8s.image_repository_username || The username for login image repository. |
-| k8s.internal_algorithm | [pageRank] | The name list of all internal algorithm. |
-| k8s.internal_algorithm_image_url | hugegraph/hugegraph-computer:latest | The image url of internal algorithm. |
-| k8s.jar_file_dir | /cache/jars/ | The directory where the algorithm jar to upload location. |
-| k8s.kube_config | ~/.kube/config | The path of k8s config file. |
-| k8s.log4j_xml_path || The log4j.xml path for computer job. |
-| k8s.namespace | hugegraph-computer-system | The namespace of hugegraph-computer system. |
-| k8s.pull_secret_names | [] | The names of pull-secret for pulling image. |
diff --git a/content/cn/docs/config/config-guide.md b/content/cn/docs/config/config-guide.md
index efb92c2c1..fed7ba856 100644
--- a/content/cn/docs/config/config-guide.md
+++ b/content/cn/docs/config/config-guide.md
@@ -1,6 +1,6 @@
---
-title: "HugeGraph 配置"
-linkTitle: "参数配置"
+title: "Server 启动指南"
+linkTitle: "Server 启动指南"
weight: 1
---
@@ -138,7 +138,7 @@ ssl: {
- graphs:GremlinServer 启动时需要打开的图,该项是一个 map 结构,key 是图的名字,value 是该图的配置文件路径;
- channelizer:GremlinServer 与客户端有两种通信方式,分别是 WebSocket 和 HTTP(默认)。如果选择 WebSocket,
-用户可以通过 [Gremlin-Console](/clients/gremlin-console.html) 快速体验 HugeGraph 的特性,但是不支持大规模数据导入,
+用户可以通过 [Gremlin-Console](../clients/gremlin-console) 快速体验 HugeGraph 的特性,但是不支持大规模数据导入,
推荐使用 HTTP 的通信方式,HugeGraph 的外围组件都是基于 HTTP 实现的;
默认 GremlinServer 是服务在 localhost:8182,如果需要修改,配置 host、port 即可
@@ -173,8 +173,7 @@ arthas.ip=127.0.0.1
arthas.disabled_commands=jad
# authentication configs
-# choose 'org.apache.hugegraph.auth.StandardAuthenticator' or
-# 'org.apache.hugegraph.auth.ConfigAuthenticator'
+# choose 'org.apache.hugegraph.auth.StandardAuthenticator' or a custom implementation
#auth.authenticator=
# for StandardAuthenticator mode
@@ -182,10 +181,6 @@ arthas.disabled_commands=jad
# auth client config
#auth.remote_url=127.0.0.1:8899,127.0.0.1:8898,127.0.0.1:8897
-# for ConfigAuthenticator mode
-#auth.admin_token=
-#auth.user_tokens=[]
-
# TODO: Deprecated & removed later (useless from version 1.5.0)
# rpc server configs for multi graph-servers or raft-servers
#rpc.server_host=127.0.0.1
diff --git a/content/cn/docs/config/config-option.md b/content/cn/docs/config/config-option.md
index 274dec6de..7719ac4b7 100644
--- a/content/cn/docs/config/config-option.md
+++ b/content/cn/docs/config/config-option.md
@@ -1,6 +1,6 @@
---
-title: "HugeGraph 配置项"
-linkTitle: "配置项列表"
+title: "Server 完整配置手册"
+linkTitle: "Server 完整配置手册"
weight: 2
---
@@ -37,23 +37,31 @@ weight: 2
| gremlinserver.url | http://127.0.0.1:8182 | The url of gremlin server. |
| gremlinserver.max_route | 8 | The max route number for gremlin server. |
| gremlinserver.timeout | 30 | The timeout in seconds of waiting for gremlin server. |
-| batch.max_edges_per_batch | 500 | The maximum number of edges submitted per batch. |
-| batch.max_vertices_per_batch | 500 | The maximum number of vertices submitted per batch. |
-| batch.max_write_ratio | 50 | The maximum thread ratio for batch writing, only take effect if the batch.max_write_threads is 0. |
+| batch.max_edges_per_batch | 2500 | The maximum number of edges submitted per batch. |
+| batch.max_vertices_per_batch | 2500 | The maximum number of vertices submitted per batch. |
+| batch.max_write_ratio | 70 | The maximum thread ratio for batch writing, only take effect if the batch.max_write_threads is 0. |
| batch.max_write_threads | 0 | The maximum threads for batch writing, if the value is 0, the actual value will be set to batch.max_write_ratio * restserver.max_worker_threads. |
-| auth.authenticator | | The class path of authenticator implementation. e.g., org.apache.hugegraph.auth.StandardAuthenticator, or org.apache.hugegraph.auth.ConfigAuthenticator. |
-| auth.admin_token | 162f7848-0b6d-4faf-b557-3a0797869c55 | Token for administrator operations, only for org.apache.hugegraph.auth.ConfigAuthenticator. |
+| auth.authenticator | | The class path of authenticator implementation. e.g., org.apache.hugegraph.auth.StandardAuthenticator, or a custom implementation. |
| auth.graph_store | hugegraph | The name of graph used to store authentication information, like users, only for org.apache.hugegraph.auth.StandardAuthenticator. |
-| auth.user_tokens | [hugegraph:9fd95c9c-711b-415b-b85f-d4df46ba5c31] | The map of user tokens with name and password, only for org.apache.hugegraph.auth.ConfigAuthenticator. |
| auth.audit_log_rate | 1000.0 | The max rate of audit log output per user, default value is 1000 records per second. |
| auth.cache_capacity | 10240 | The max cache capacity of each auth cache item. |
| auth.cache_expire | 600 | The expiration time in seconds of vertex cache. |
| auth.remote_url | | If the address is empty, it provide auth service, otherwise it is auth client and also provide auth service through rpc forwarding. The remote url can be set to multiple addresses, which are concat by ','. |
| auth.token_expire | 86400 | The expiration time in seconds after token created |
| auth.token_secret | FXQXbJtbCLxODc6tGci732pkH1cyf8Qg | Secret key of HS256 algorithm. |
-| exception.allow_trace | false | Whether to allow exception trace stack. |
-| memory_monitor.threshold | 0.85 | The threshold of JVM(in-heap) memory usage monitoring , 1 means disabling this function. |
+| exception.allow_trace | true | Whether to allow exception trace stack. |
+| memory_monitor.threshold | 0.85 | The threshold of JVM(in-heap) memory usage monitoring , 1 means disabling this function. |
| memory_monitor.period | 2000 | The period in ms of JVM(in-heap) memory usage monitoring. |
+| log.slow_query_threshold | 1000 | Slow query log threshold in milliseconds, 0 means disabled. |
+
+### PD/Meta 配置项 (分布式模式)
+
+对应配置文件`rest-server.properties`
+
+| config option | default value | description |
+|------------------|------------------------|--------------------------------------------|
+| pd.peers | 127.0.0.1:8686 | PD server addresses (comma separated). |
+| meta.endpoints | http://127.0.0.1:2379 | Meta service endpoints. |
### 基本配置项
@@ -62,7 +70,7 @@ weight: 2
| config option | default value | description |
|---------------------------------------|----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| gremlin.graph | org.apache.hugegraph.HugeFactory | Gremlin entrance to create graph. |
-| backend | rocksdb | The data store type, available values are [memory, rocksdb, cassandra, scylladb, hbase, mysql]. |
+| backend | rocksdb | The data store type. For version 1.7.0+: [memory, rocksdb, hstore, hbase]. Note: cassandra, scylladb, mysql, postgresql were removed in 1.7.0 (use <= 1.5.x for legacy backends). |
| serializer | binary | The serializer for backend store, available values are [text, binary, cassandra, hbase, mysql]. |
| store | hugegraph | The database name like Cassandra Keyspace. |
| store.connection_detect_interval | 600 | The interval in seconds for detecting connections, if the idle time of a connection exceeds this value, detect it and reconnect if needed before using, value 0 means detecting every time. |
@@ -131,51 +139,6 @@ weight: 2
| raft.rpc_buf_high_water_mark | 20971520 | The ChannelOutboundBuffer's high water mark of netty, only when buffer size exceed this size, the method ChannelOutboundBuffer.isWritable() will return false, it means that the downstream pressure is too great to process the request or network is very congestion, upstream needs to limit rate at this time. |
| raft.read_strategy | ReadOnlyLeaseBased | The linearizability of read strategy. |
-### RPC server 配置
-
-| config option | default value | description |
-|-----------------------------|-----------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| rpc.client_connect_timeout | 20 | The timeout(in seconds) of rpc client connect to rpc server. |
-| rpc.client_load_balancer | consistentHash | The rpc client uses a load-balancing algorithm to access multiple rpc servers in one cluster. Default value is 'consistentHash', means forwarding by request parameters. |
-| rpc.client_read_timeout | 40 | The timeout(in seconds) of rpc client read from rpc server. |
-| rpc.client_reconnect_period | 10 | The period(in seconds) of rpc client reconnect to rpc server. |
-| rpc.client_retries | 3 | Failed retry number of rpc client calls to rpc server. |
-| rpc.config_order | 999 | Sofa rpc configuration file loading order, the larger the more later loading. |
-| rpc.logger_impl | com.alipay.sofa.rpc.log.SLF4JLoggerImpl | Sofa rpc log implementation class. |
-| rpc.protocol | bolt | Rpc communication protocol, client and server need to be specified the same value. |
-| rpc.remote_url | | The remote urls of rpc peers, it can be set to multiple addresses, which are concat by ',', empty value means not enabled. |
-| rpc.server_adaptive_port | false | Whether the bound port is adaptive, if it's enabled, when the port is in use, automatically +1 to detect the next available port. Note that this process is not atomic, so there may still be port conflicts. |
-| rpc.server_host | | The hosts/ips bound by rpc server to provide services, empty value means not enabled. |
-| rpc.server_port | 8090 | The port bound by rpc server to provide services. |
-| rpc.server_timeout | 30 | The timeout(in seconds) of rpc server execution. |
-
-### Cassandra 后端配置项
-
-| config option | default value | description |
-|--------------------------------|----------------|------------------------------------------------------------------------------------------------------------------------------------------------|
-| backend | | Must be set to `cassandra`. |
-| serializer | | Must be set to `cassandra`. |
-| cassandra.host | localhost | The seeds hostname or ip address of cassandra cluster. |
-| cassandra.port | 9042 | The seeds port address of cassandra cluster. |
-| cassandra.connect_timeout | 5 | The cassandra driver connect server timeout(seconds). |
-| cassandra.read_timeout | 20 | The cassandra driver read from server timeout(seconds). |
-| cassandra.keyspace.strategy | SimpleStrategy | The replication strategy of keyspace, valid value is SimpleStrategy or NetworkTopologyStrategy. |
-| cassandra.keyspace.replication | [3] | The keyspace replication factor of SimpleStrategy, like '[3]'.Or replicas in each datacenter of NetworkTopologyStrategy, like '[dc1:2,dc2:1]'. |
-| cassandra.username | | The username to use to login to cassandra cluster. |
-| cassandra.password | | The password corresponding to cassandra.username. |
-| cassandra.compression_type | none | The compression algorithm of cassandra transport: none/snappy/lz4. |
-| cassandra.jmx_port=7199 | 7199 | The port of JMX API service for cassandra. |
-| cassandra.aggregation_timeout | 43200 | The timeout in seconds of waiting for aggregation. |
-
-### ScyllaDB 后端配置项
-
-| config option | default value | description |
-|---------------|---------------|----------------------------|
-| backend | | Must be set to `scylladb`. |
-| serializer | | Must be set to `scylladb`. |
-
-其它与 Cassandra 后端一致。
-
### RocksDB 后端配置项
| config option | default value | description |
@@ -232,7 +195,55 @@ weight: 2
| rocksdb.level0_stop_writes_trigger | 36 | Hard limit on number of level-0 files for stopping writes. |
| rocksdb.soft_pending_compaction_bytes_limit | 68719476736 | The soft limit to impose on pending compaction in bytes. |
-### HBase 后端配置项
+
+K8s 配置项 (可选)
+
+对应配置文件`rest-server.properties`
+
+| config option | default value | description |
+|------------------|-------------------------------|------------------------------------------|
+| server.use_k8s | false | Whether to enable K8s multi-tenancy mode. |
+| k8s.namespace | hugegraph-computer-system | K8s namespace for compute jobs. |
+| k8s.kubeconfig | | Path to kubeconfig file. |
+
+
+
+
+Arthas 诊断配置项 (可选)
+
+对应配置文件`rest-server.properties`
+
+| config option | default value | description |
+|--------------------|---------------|-----------------------|
+| arthas.telnetPort | 8562 | Arthas telnet port. |
+| arthas.httpPort | 8561 | Arthas HTTP port. |
+| arthas.ip | 0.0.0.0 | Arthas bind IP. |
+
+
+
+
+RPC Server 配置
+
+| config option | default value | description |
+|-----------------------------|-----------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| rpc.client_connect_timeout | 20 | The timeout(in seconds) of rpc client connect to rpc server. |
+| rpc.client_load_balancer | consistentHash | The rpc client uses a load-balancing algorithm to access multiple rpc servers in one cluster. Default value is 'consistentHash', means forwarding by request parameters. |
+| rpc.client_read_timeout | 40 | The timeout(in seconds) of rpc client read from rpc server. |
+| rpc.client_reconnect_period | 10 | The period(in seconds) of rpc client reconnect to rpc server. |
+| rpc.client_retries | 3 | Failed retry number of rpc client calls to rpc server. |
+| rpc.config_order | 999 | Sofa rpc configuration file loading order, the larger the more later loading. |
+| rpc.logger_impl | com.alipay.sofa.rpc.log.SLF4JLoggerImpl | Sofa rpc log implementation class. |
+| rpc.protocol | bolt | Rpc communication protocol, client and server need to be specified the same value. |
+| rpc.remote_url | | The remote urls of rpc peers, it can be set to multiple addresses, which are concat by ',', empty value means not enabled. |
+| rpc.server_adaptive_port | false | Whether the bound port is adaptive, if it's enabled, when the port is in use, automatically +1 to detect the next available port. Note that this process is not atomic, so there may still be port conflicts. |
+| rpc.server_host | | The hosts/ips bound by rpc server to provide services, empty value means not enabled. |
+| rpc.server_port | 8090 | The port bound by rpc server to provide services. |
+| rpc.server_timeout | 30 | The timeout(in seconds) of rpc server execution. |
+
+
+
+
+HBase 后端配置项
| config option | default value | description |
|---------------------------|--------------------------------|--------------------------------------------------------------------------|
@@ -253,7 +264,50 @@ weight: 2
| hbase.vertex_partitions | 10 | The number of partitions of the HBase vertex table. |
| hbase.edge_partitions | 30 | The number of partitions of the HBase edge table. |
-### MySQL & PostgreSQL 后端配置项
+
+
+
+---
+
+## ≤ 1.5 版本配置 (Legacy)
+
+以下后端存储在 1.7.0+ 版本中不再支持,仅在 1.5.x 及更早版本中可用:
+
+
+Cassandra 后端配置项
+
+| config option | default value | description |
+|--------------------------------|----------------|------------------------------------------------------------------------------------------------------------------------------------------------|
+| backend | | Must be set to `cassandra`. |
+| serializer | | Must be set to `cassandra`. |
+| cassandra.host | localhost | The seeds hostname or ip address of cassandra cluster. |
+| cassandra.port | 9042 | The seeds port address of cassandra cluster. |
+| cassandra.connect_timeout | 5 | The cassandra driver connect server timeout(seconds). |
+| cassandra.read_timeout | 20 | The cassandra driver read from server timeout(seconds). |
+| cassandra.keyspace.strategy | SimpleStrategy | The replication strategy of keyspace, valid value is SimpleStrategy or NetworkTopologyStrategy. |
+| cassandra.keyspace.replication | [3] | The keyspace replication factor of SimpleStrategy, like '[3]'.Or replicas in each datacenter of NetworkTopologyStrategy, like '[dc1:2,dc2:1]'. |
+| cassandra.username | | The username to use to login to cassandra cluster. |
+| cassandra.password | | The password corresponding to cassandra.username. |
+| cassandra.compression_type | none | The compression algorithm of cassandra transport: none/snappy/lz4. |
+| cassandra.jmx_port=7199 | 7199 | The port of JMX API service for cassandra. |
+| cassandra.aggregation_timeout | 43200 | The timeout in seconds of waiting for aggregation. |
+
+
+
+
+ScyllaDB 后端配置项
+
+| config option | default value | description |
+|---------------|---------------|----------------------------|
+| backend | | Must be set to `scylladb`. |
+| serializer | | Must be set to `scylladb`. |
+
+其它与 Cassandra 后端一致。
+
+
+
+
+MySQL & PostgreSQL 后端配置项
| config option | default value | description |
|----------------------------------|-----------------------------|-------------------------------------------------------------------------------------|
@@ -269,7 +323,10 @@ weight: 2
| jdbc.storage_engine | InnoDB | The storage engine of backend store database, like InnoDB/MyISAM/RocksDB for MySQL. |
| jdbc.postgresql.connect_database | template1 | The database used to connect when init store, drop store or check store exist. |
-### PostgreSQL 后端配置项
+
+
+
+PostgreSQL 后端配置项
| config option | default value | description |
|---------------|---------------|------------------------------|
@@ -281,3 +338,6 @@ weight: 2
> PostgreSQL 后端的 driver 和 url 应该设置为:
> - `jdbc.driver=org.postgresql.Driver`
> - `jdbc.url=jdbc:postgresql://localhost:5432/`
+
+
+
diff --git a/content/cn/docs/contribution-guidelines/committer-guidelines.md b/content/cn/docs/contribution-guidelines/committer-guidelines.md
index d32d93850..f77129418 100644
--- a/content/cn/docs/contribution-guidelines/committer-guidelines.md
+++ b/content/cn/docs/contribution-guidelines/committer-guidelines.md
@@ -9,7 +9,7 @@ weight: 5
# 候选人要求
1. 候选人应遵守 [Apache Code of Conduct](https://www.apache.org/foundation/policies/conduct.html)
-2. PMC 成员将通过搜索[邮件列表](https://lists.apache.org/list?dev@hugegraph.apache.org)、[issues](https://github.com/apache/hugegraph/issues)、[PRs](https://github.com/apache/incubator-hugegraph/pulls)、[官网文档](https://hugegraph.apache.org/docs)等方式,了解候选人如何与他人互动,以及他们所做的贡献
+2. PMC 成员将通过搜索[邮件列表](https://lists.apache.org/list?dev@hugegraph.apache.org)、[issues](https://github.com/apache/hugegraph/issues)、[PRs](https://github.com/apache/hugegraph/pulls)、[官网文档](https://hugegraph.apache.org/docs)等方式,了解候选人如何与他人互动,以及他们所做的贡献
3. 以下是在评估候选人是否适合成为 Committer 时需要考虑的一些要点:
1. 与社区成员合作的能力
2. 担任导师的能力
@@ -24,7 +24,7 @@ weight: 5
## 发起社区邮件讨论 (DISCUSS)
-任何 HugeGraph 的 (P)PMC 成员都可以发起投票讨论,在发现社区贡献者任何有价值的贡献并取得候选人本人同意后,可以在 private@hugegraph.apache.org 发起讨论。
+任何 HugeGraph 的 PMC 成员都可以发起投票讨论,在发现社区贡献者任何有价值的贡献并取得候选人本人同意后,可以在 private@hugegraph.apache.org 发起讨论。
讨论邮件里提议者要把候选人的贡献说清楚,并最好给出确认贡献的 URL 等信息,便于大家讨论分析。
下面是 HugeGraph 对应的邮件模板:(仅供参考)
@@ -73,33 +73,33 @@ Welcome everyone to share opinions~
Thanks!
```
-对于讨论邮件中贡献链接,可以使用 [GitHub Search](https://github.com/search) 的统计功能,按需输入如下对应关键词查询即可,可以在此基础上添加新的 repo 如 `repo:apache/incubator-hugegraph-computer`,特别注意调整**时间范围** (下面是一个模板参考,请自行调整参数):
+对于讨论邮件中贡献链接,可以使用 [GitHub Search](https://github.com/search) 的统计功能,按需输入如下对应关键词查询即可,可以在此基础上添加新的 repo 如 `repo:apache/hugegraph-computer`,特别注意调整**时间范围** (下面是一个模板参考,请自行调整参数):
- PR 提交次数
- - `is:pr author:xxx repo:apache/incubator-hugegraph repo:apache/incubator-hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
+ - `is:pr author:xxx repo:apache/hugegraph repo:apache/hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
- 代码提交/修改行数
- - https://github.com/apache/incubator-hugegraph/graphs/contributors?from=2023-06-01&to=2023-12-25&type=c
- - https://github.com/apache/incubator-hugegraph-doc/graphs/contributors?from=2023-06-01&to=2023-12-25&type=c
+ - https://github.com/apache/hugegraph/graphs/contributors?from=2023-06-01&to=2023-12-25&type=c
+ - https://github.com/apache/hugegraph-doc/graphs/contributors?from=2023-06-01&to=2023-12-25&type=c
- PR 提交关联 Issue 次数
- - `linked:issue involves:xxx repo:apache/incubator-hugegraph repo:apache/incubator-hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
+ - `linked:issue involves:xxx repo:apache/hugegraph repo:apache/hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
- PR Review 个数
- - `type:pr reviewed-by:xxx repo:apache/incubator-hugegraph repo:apache/incubator-hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
+ - `type:pr reviewed-by:xxx repo:apache/hugegraph repo:apache/hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
- PR Review 行数
- 合并次数
- - `type:pr author:xxx repo:apache/incubator-hugegraph repo:apache/incubator-hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
+ - `type:pr author:xxx repo:apache/hugegraph repo:apache/hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
- 有效合并行数
- - https://github.com/apache/incubator-hugegraph/graphs/contributors?from=2023-06-01&to=2023-12-25&type=c
- - https://github.com/apache/incubator-hugegraph-doc/graphs/contributors?from=2023-06-01&to=2023-12-25&type=c
+ - https://github.com/apache/hugegraph/graphs/contributors?from=2023-06-01&to=2023-12-25&type=c
+ - https://github.com/apache/hugegraph-doc/graphs/contributors?from=2023-06-01&to=2023-12-25&type=c
- Issue 提交数
- - `type:issue author:xxx repo:apache/incubator-hugegraph repo:apache/incubator-hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
+ - `type:issue author:xxx repo:apache/hugegraph repo:apache/hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
- Issue 修复数
- 在 Issue 提交数的基础上选取状态为 closed 的 Issues
- Issue 参与数
- - `type:issue involves:xxx repo:apache/incubator-hugegraph repo:apache/incubator-hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
+ - `type:issue involves:xxx repo:apache/hugegraph repo:apache/hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
- 评论 Issue 数
- - `type:issue commenter:xxx repo:apache/incubator-hugegraph repo:apache/incubator-hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
+ - `type:issue commenter:xxx repo:apache/hugegraph repo:apache/hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
- 评论 PR 数
- - `type:pr commenter:xxx repo:apache/incubator-hugegraph repo:apache/incubator-hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
+ - `type:pr commenter:xxx repo:apache/hugegraph repo:apache/hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
Mailing Lists 的参与则可使用 https://lists.apache.org/list?dev@hugegraph.apache.org:lte=10M:xxx 查询。
@@ -133,7 +133,7 @@ Please vote accordingly:
Thanks!
```
-然后 (P)PMC 成员回复 +1 或 -1 的邮件回复表达意见,一般来说至少需要 ≥3 票 +1 才能结束投票。
+然后 PMC 成员回复 +1 或 -1 的邮件回复表达意见,一般来说至少需要 ≥3 票 +1 才能结束投票。
## 宣布投票结果 (RESULT)
@@ -169,7 +169,7 @@ Subject: Invitation to become HugeGraph committer: xxx
Hello xxx,
-The HugeGraph Project Management Committee (PPMC)
+The HugeGraph Project Management Committee (PMC)
hereby offers you committer privileges to the project.
These privileges are offered on the understanding that you'll use them
reasonably and with common sense. We like to work on trust
@@ -214,7 +214,7 @@ establishing you as a committer.
With the expectation of your acceptance, welcome!
-The Apache HugeGraph(incubating) PPMC
+The Apache HugeGraph PMC
```
## 候选人接受邀请 (ACCEPT)
@@ -226,13 +226,13 @@ To: [ Sender's Email ]
Cc: private@hugegraph.apache.org
Subject: Re: Invitation to become HugeGraph committer: xxx
-Hello Apache HugeGraph(incubating) PPMC,
+Hello Apache HugeGraph PMC,
I accept the invitation.
Thanks to the Apache HugeGraph Community for recognizing my work, I
will continue to actively participate in the work of the Apache
-HugeGraph(incubating).
+HugeGraph.
Next, I will follow the instructions to complete the next steps:
Signing and submitting iCLA and registering Apache ID.
@@ -258,7 +258,7 @@ xxx
5. **Country:** 所在国家英文
6. **E-mail**: 邮箱地址,建议与上述邮件中使用的邮箱相同
7. **(optional) preferred Apache id(s)**: 选择一个 [Apache committer](http://people.apache.org/committer-index.html) 页面不存在的 **SVN ID**
- 8. **(optional) notify project**:Apache HugeGraph(incubating)
+ 8. **(optional) notify project**:Apache HugeGraph
9. **签名:务必使用 PDF 工具手写**
10. **Date:** 格式 xxxx-xx-xx
3. 签署完之后将 `icla.pdf` 重命名为 `姓名拼音-icla.pdf`
@@ -270,7 +270,7 @@ Subject: ICLA Information
Hello everyone:
-I have accepted the Apache HugeGraph(incubating) PPMC invitation to
+I have accepted the Apache HugeGraph PMC invitation to
become a HugeGraph committer, the attachment is my ICLA information.
(Optional) My GitHub account is https://github.com/xxx. Thanks!
@@ -302,7 +302,7 @@ for more information about roles at Apache.
2. 配置个人信息 https://whimsy.apache.org/roster/committer/xxx
3. 关联 GitHub 账号 https://gitbox.apache.org/boxer
1. 这一步需要配置 GitHub 双重身份验证 (2FA)
-4. **负责提名的 PMC 成员需通过 [Roster](https://whimsy.apache.org/roster/ppmc/hugegraph) 页面,将新的 Committer 添加到官方提交者列表中** (**重要**, 否则仓库权限不生效)
+4. **负责提名的 PMC 成员需通过 [Roster](https://whimsy.apache.org/roster/pmc/hugegraph) 页面,将新的 Committer 添加到官方提交者列表中** (**重要**, 否则仓库权限不生效)
1. 在这一步后,候选人即新的 Committer 才拥有对 GitHub HugeGraph 仓库的写权限
5. (可选) 新的 Committer 可以使用 Apache 账号[申请](https://www.jetbrains.com/shop/eform/apache)免费使用 Jetbrains 的全系列产品
@@ -315,7 +315,7 @@ for more information about roles at Apache.
To: dev@hugegraph.apache.org
Subject: [ANNOUNCE] New Committer: xxx
-Hi everyone, The PPMC for Apache HugeGraph(incubating) has invited xxx to
+Hi everyone, The PMC for Apache HugeGraph has invited xxx to
become a Committer and we are pleased to announce that he/she has accepted.
xxx is being active in the HugeGraph community & dedicated to ... modules,
@@ -327,32 +327,19 @@ Welcome xxx, and please enjoy your community journey~
Thanks!
-The Apache HugeGraph PPMC
+The Apache HugeGraph PMC
```
-## 更新 clutch status 信息
+## 更新治理信息入口
-负责提名的 PMC 成员需下载 clutch status 信息, 并进行更新, 生效后可在 [clutch](https://incubator.apache.org/clutch/hugegraph.html) 和 [projects](https://incubator.apache.org/projects/hugegraph.html) 页面查询。流程参考如下:
+Apache HugeGraph 已于 2026 年 1 月毕业,治理信息不再通过 Incubator clutch 页面维护,而是通过 ASF committee/project 数据维护。
-```text
-# 1. 下载 clutch status 信息
-svn co https://svn.apache.org/repos/asf/incubator/public/trunk/content/projects/
-
-# 2. 修改编辑 (注意下面内容仅供参考)
-cd projects
-vim hugegraph.xml
-
-
- News
-
- - YYYY-MM-DD New Committer: xxx
- - ...
-
-
-
-# 3. 提交
-svn commit -m "update news for hugegraph"
-```
+请优先检查:
+
+- [HugeGraph 的 ASF Committee 页面](https://projects.apache.org/committee.html?hugegraph)
+- [PMC roster 页面](https://whimsy.apache.org/roster/pmc/hugegraph)
+
+若信息未自动同步,请按照 ASF 官方流程联系 Apache Community Development 或 ASF Infra 协助处理。
# 参考
diff --git a/content/cn/docs/contribution-guidelines/contribute.md b/content/cn/docs/contribution-guidelines/contribute.md
index 05b6a598a..5f4407c17 100644
--- a/content/cn/docs/contribution-guidelines/contribute.md
+++ b/content/cn/docs/contribution-guidelines/contribute.md
@@ -22,7 +22,7 @@ Before submitting the code, we need to do some preparation:
1. Sign up or login to GitHub: [https://github.com](https://github.com)
-2. Fork HugeGraph repo from GitHub: [https://github.com/apache/incubator-hugegraph/fork](https://github.com/apache/hugegraph/fork)
+2. Fork HugeGraph repo from GitHub: [https://github.com/apache/hugegraph/fork](https://github.com/apache/hugegraph/fork)
3. Clone code from fork repo to local: [https://github.com/${GITHUB_USER_NAME}/hugegraph](https://github.com/${GITHUB_USER_NAME}/hugegraph)
@@ -46,7 +46,7 @@ Before submitting the code, we need to do some preparation:
## 2. Create an Issue on GitHub
-If you encounter bugs or have any questions, please go to [GitHub Issues](https://github.com/apache/incubator-hugegraph/issues) to report them and feel free to [create an issue](https://github.com/apache/hugegraph/issues/new).
+If you encounter bugs or have any questions, please go to [GitHub Issues](https://github.com/apache/hugegraph/issues) to report them and feel free to [create an issue](https://github.com/apache/hugegraph/issues/new).
## 3. Make changes of code locally
@@ -74,15 +74,15 @@ vim hugegraph-core/src/main/java/org/apache/hugegraph/HugeFactory.java
# run test locally (optional)
mvn test -Pcore-test,memory
```
-Note: In order to be consistent with the code style easily, if you use [IDEA](https://www.jetbrains.com/idea/) as your IDE, you can directly [import](https://www.jetbrains.com/help/idea/configuring-code-style.html) our code style [configuration file](./hugegraph-style.xml).
+Note: In order to be consistent with the code style easily, if you use IDEA as your IDE, you can import our code style configuration file.
##### 3.2.1 添加第三方依赖
如果我们要在 `HugeGraph` 项目中添加新的第三方依赖, 我们需要做下面的几件事情:
-1. 找到第三方依赖的仓库,将依赖的 `license` 文件放到 [./hugegraph-dist/release-docs/licenses/](https://github.com/apache/incubator-hugegraph/tree/master/hugegraph-server/hugegraph-dist/release-docs/licenses) 路径下。
-2. 在[./hugegraph-dist/release-docs/LICENSE](https://github.com/apache/incubator-hugegraph/blob/master/hugegraph-server/hugegraph-dist/release-docs/LICENSE) 中声明该依赖的 `LICENSE` 信息。
-3. 找到仓库里的 NOTICE 文件,将其追加到 [./hugegraph-dist/release-docs/NOTICE](https://github.com/apache/incubator-hugegraph/blob/master/hugegraph-server/hugegraph-dist/release-docs/NOTICE) 文件后面(如果没有NOTICE文件则跳过这一步)。
-4. 本地执行[./hugegraph-dist/scripts/dependency/regenerate_known_dependencies.sh](https://github.com/apache/incubator-hugegraph/blob/master/hugegraph-server/hugegraph-dist/scripts/dependency/regenerate_known_dependencies.sh) 脚本来更新依赖列表[known-dependencies.txt](https://github.com/apache/incubator-hugegraph/blob/master/hugegraph-server/hugegraph-dist/scripts/dependency/known-dependencies.txt) (或者手动更新)。
+1. 找到第三方依赖的仓库,将依赖的 `license` 文件放到 [./hugegraph-dist/release-docs/licenses/](https://github.com/apache/hugegraph/tree/master/hugegraph-server/hugegraph-dist/release-docs/licenses) 路径下。
+2. 在[./hugegraph-dist/release-docs/LICENSE](https://github.com/apache/hugegraph/blob/master/hugegraph-server/hugegraph-dist/release-docs/LICENSE) 中声明该依赖的 `LICENSE` 信息。
+3. 找到仓库里的 NOTICE 文件,将其追加到 [./hugegraph-dist/release-docs/NOTICE](https://github.com/apache/hugegraph/blob/master/hugegraph-server/hugegraph-dist/release-docs/NOTICE) 文件后面(如果没有NOTICE文件则跳过这一步)。
+4. 本地执行[./hugegraph-dist/scripts/dependency/regenerate_known_dependencies.sh](https://github.com/apache/hugegraph/blob/master/hugegraph-server/hugegraph-dist/scripts/dependency/regenerate_known_dependencies.sh) 脚本来更新依赖列表[known-dependencies.txt](https://github.com/apache/hugegraph/blob/master/hugegraph-server/hugegraph-dist/scripts/dependency/known-dependencies.txt) (或者手动更新)。
**例如**:在项目中引入了第三方新依赖 -> `ant-1.9.1.jar`
- 项目源码位于:https://github.com/apache/ant/tree/rel/1.9.1
diff --git a/content/cn/docs/contribution-guidelines/hugegraph-server-idea-setup.md b/content/cn/docs/contribution-guidelines/hugegraph-server-idea-setup.md
index 502350ca8..8561312e5 100644
--- a/content/cn/docs/contribution-guidelines/hugegraph-server-idea-setup.md
+++ b/content/cn/docs/contribution-guidelines/hugegraph-server-idea-setup.md
@@ -4,11 +4,11 @@ linkTitle: "在 IDEA 中配置 Server 开发环境"
weight: 4
---
-> 注意:下述配置仅供参考,基于[这个版本](https://github.com/apache/incubator-hugegraph/commit/a946ad1de4e8f922251a5241ffc957c33379677f),在 Linux 和 macOS 平台下进行了测试。
+> 注意:下述配置仅供参考,基于[这个版本](https://github.com/apache/hugegraph/commit/a946ad1de4e8f922251a5241ffc957c33379677f),在 Linux 和 macOS 平台下进行了测试。
### 背景
-在 [Quick Start](/docs/quickstart/hugegraph-server/) 部分已经介绍了使用**脚本**启停 HugeGraph-Server 的流程。下面以 Linux 平台为例,
+在 [Quick Start](/docs/quickstart/hugegraph/hugegraph-server/) 部分已经介绍了使用**脚本**启停 HugeGraph-Server 的流程。下面以 Linux 平台为例,
介绍使用 **IntelliJ IDEA** 运行与调试 HugeGraph-Server 的流程。
本地启动的核心与**脚本启动**是一样的:
@@ -17,7 +17,7 @@ weight: 4
2. 启动 HugeGraph-Server,执行 `HugeGraphServer` 类加载初始化的图信息启动
在执行下述流程之前,请确保已经克隆了 HugeGraph 的源代码,并且已经配置了 Java 11 环境 & 可以参考这个
-[配置文档](https://github.com/apache/incubator-hugegraph/wiki/The-style-config-for-HugeGraph-in-IDEA)
+[配置文档](https://github.com/apache/hugegraph/wiki/The-style-config-for-HugeGraph-in-IDEA)
```bash
git clone https://github.com/apache/hugegraph.git
@@ -57,7 +57,7 @@ rocksdb.wal_path=.
- LD_LIBRARY_PATH=/path/to/your/library:$LD_LIBRARY_PATH
- LD_PRELOAD=libjemalloc.so:librocksdbjni-linux64.so
-> 若在 **Java 11** 环境下为 HugeGraph-Server 配置了**用户认证** (authenticator),需要参考二进制包的脚本[配置](https://github.com/apache/incubator-hugegraph/blob/master/hugegraph-server/hugegraph-dist/src/assembly/static/bin/init-store.sh#L52),添加下述 **VM options**:
+> 若在 **Java 11** 环境下为 HugeGraph-Server 配置了**用户认证** (authenticator),需要参考二进制包的脚本[配置](https://github.com/apache/hugegraph/blob/master/hugegraph-server/hugegraph-dist/src/assembly/static/bin/init-store.sh#L52),添加下述 **VM options**:
>
> ```bash
> --add-exports=java.base/jdk.internal.reflect=ALL-UNNAMED
@@ -93,7 +93,7 @@ rocksdb.wal_path=.
- 将 `Main class` 设置为 `org.apache.hugegraph.dist.HugeGraphServer`
- 设置运行参数为 `conf/gremlin-server.yaml conf/rest-server.properties`,同样地,这里的路径是相对于工作路径的,需要将工作路径设置为 `path-to-your-directory`
-> 类似的,若在 **Java 11** 环境下为 HugeGraph-Server 配置了**用户认证** (authenticator),同样需要参考二进制包的脚本[配置](https://github.com/apache/incubator-hugegraph/blob/master/hugegraph-server/hugegraph-dist/src/assembly/static/bin/hugegraph-server.sh#L124),添加下述 **VM options**:
+> 类似的,若在 **Java 11** 环境下为 HugeGraph-Server 配置了**用户认证** (authenticator),同样需要参考二进制包的脚本[配置](https://github.com/apache/hugegraph/blob/master/hugegraph-server/hugegraph-dist/src/assembly/static/bin/hugegraph-server.sh#L124),添加下述 **VM options**:
>
> ```bash
> --add-exports=java.base/jdk.internal.reflect=ALL-UNNAMED --add-modules=jdk.unsupported --add-exports=java.base/sun.nio.ch=ALL-UNNAMED
@@ -167,8 +167,8 @@ curl "http://localhost:8080/graphs/hugegraph/graph/vertices" | gunzip
##### 参考
-1. [HugeGraph-Server Quick Start](/docs/quickstart/hugegraph-server/)
+1. [HugeGraph-Server Quick Start](/docs/quickstart/hugegraph/hugegraph-server/)
2. [hugegraph-server 本地调试文档 (Win/Unix)](https://gist.github.com/imbajin/1661450f000cd62a67e46d4f1abfe82c)
3. ["package sun.misc does not exist" compilation error](https://youtrack.jetbrains.com/issue/IDEA-180033)
4. [Cannot compile: java: package sun.misc does not exist](https://youtrack.jetbrains.com/issue/IDEA-201168)
-5. [The code-style config for HugeGraph in IDEA](https://github.com/apache/incubator-hugegraph/wiki/The-style-config-for-HugeGraph-in-IDEA)
+5. [The code-style config for HugeGraph in IDEA](https://github.com/apache/hugegraph/wiki/The-style-config-for-HugeGraph-in-IDEA)
diff --git a/content/cn/docs/contribution-guidelines/validate-release.md b/content/cn/docs/contribution-guidelines/validate-release.md
index 7794a4ba2..7e5032158 100644
--- a/content/cn/docs/contribution-guidelines/validate-release.md
+++ b/content/cn/docs/contribution-guidelines/validate-release.md
@@ -6,11 +6,15 @@ weight: 3
> Note: 这篇文档会持续更新。
> 你需要使用 Java11 验证测试 (如果希望测试功能/运行时),从 1.5.0 版本开始 (除 client 外) 不再支持 Java8
+>
+> 毕业说明:Apache HugeGraph 已于 2026 年 1 月毕业。正式发版投票现由 HugeGraph 社区内部完成(`dev@hugegraph.apache.org` 上的 PMC binding 投票),不再需要 Incubator `general@incubator.apache.org` 审批。
## 验证阶段
当内部的临时发布和打包工作完成后,其他的社区开发者 (尤其是 PMC)
-需要参与到[验证环节](https://cwiki.apache.org/confluence/display/INCUBATOR/Incubator+Release+Checklist)
+需要按 ASF 发版规范参与验证,可参考:
+- [ASF 发布策略](https://www.apache.org/legal/release-policy.html)
+- [Incubator 检查清单(历史参考)](https://cwiki.apache.org/confluence/display/INCUBATOR/Incubator+Release+Checklist)
确保某个人发布版本的"正确性 + 完整性", 这里需要**每个人**都尽量参与,然后后序**邮件回复**的时候说明自己
**已检查**了哪些项。(下面是核心项)
@@ -43,9 +47,9 @@ sudo apt-get install wget -y
brew install wget
# 4. 下载 hugegraph-svn 目录 (版本号注意填写此次验证版本)
-svn co https://dist.apache.org/repos/dist/dev/incubator/hugegraph/1.x.x/
+svn co https://dist.apache.org/repos/dist/dev/hugegraph/1.x.x/
# (注) 如果出现 svn 下载某个文件速度很慢的情况, 可以考虑 wget 单个文件下载, 如下 (或考虑使用 VPN / 代理)
-wget https://dist.apache.org/repos/dist/dev/incubator/hugegraph/1.x.x/apache-hugegraph-toolchain-incubating-1.x.x.tar.gz
+wget https://dist.apache.org/repos/dist/dev/hugegraph/1.x.x/apache-hugegraph-toolchain-incubating-1.x.x.tar.gz
```
#### 2. 检查 hash 值
@@ -65,7 +69,7 @@ for i in *.tar.gz; do echo $i; shasum -a 512 --check $i.sha512; done
```bash
# 1. 下载项目可信赖公钥到本地 (首次需要) & 导入
-curl https://downloads.apache.org/incubator/hugegraph/KEYS > KEYS
+curl https://downloads.apache.org/hugegraph/KEYS > KEYS
gpg --import KEYS
# 导入后可以看到如下输出, 这代表导入了 x 个用户公钥
@@ -111,8 +115,8 @@ for i in *.tar.gz; do echo $i; gpg --verify $i.asc $i ; done
解压 `*hugegraph*src.tar.gz`后,进行如下检查:
-1. 文件夹都带有 `incubating`, 且不存在**空的**文件/文件夹
-2. 存在 `LICENSE` + `NOTICE` + 存在 `DISCLAIMER` 文件并且内容正常
+1. 包名/目录名应符合当前发版命名(历史版本可能仍包含 `incubating`),且不存在**空的**文件/文件夹
+2. 存在 `LICENSE` + `NOTICE` 且内容正常;历史 incubating 制品需检查 `DISCLAIMER`
3. **不存在** 缺乏 License 的二进制文件
4. 源码文件都包含标准 `ASF License` 头 (这个用插件跑一下为主)
5. 检查每个父 / 子模块的 `pom.xml` 版本号是否一致 (且符合期望)
@@ -133,8 +137,8 @@ mvn clean package -DskipTests -Dcheckstyle.skip=true -P stage
解压 `xxx-hugegraph.tar.gz`后,进行如下检查:
-1. 文件夹都带有 `incubating`
-2. 存在 `LICENSE` + `NOTICE` 文件并且内容正常
+1. 包名/目录名应符合当前发版命名(历史版本可能仍包含 `incubating`)
+2. 存在 `LICENSE` + `NOTICE` 且内容正常(历史 incubating 制品需检查 `DISCLAIMER`)
3. 服务启动
```bash
diff --git a/content/cn/docs/download/download.md b/content/cn/docs/download/download.md
index 48977f8a5..831c22faa 100644
--- a/content/cn/docs/download/download.md
+++ b/content/cn/docs/download/download.md
@@ -1,5 +1,5 @@
---
-title: "下载 Apache HugeGraph (Incubating)"
+title: "下载 Apache HugeGraph"
linkTitle: "Download"
weight: 2
---
@@ -7,15 +7,13 @@ weight: 2
> 指南:
>
> - 推荐使用最新版本的 HugeGraph 软件包, 运行时环境请选择 Java11
-> - 验证下载版本, 请使用相应的哈希 (SHA512)、签名和 [项目签名验证 KEYS](https://downloads.apache.org/incubator/hugegraph/KEYS)
+> - 验证下载版本, 请使用相应的哈希 (SHA512)、签名和 [项目签名验证 KEYS](https://downloads.apache.org/hugegraph/KEYS)
> - 检查哈希 (SHA512)、签名的说明在 [版本验证](/docs/contribution-guidelines/validate-release/) 页面, 也可参考 [ASF 验证说明](https://www.apache.org/dyn/closer.cgi#verify)
-
-> 注: HugeGraph 所有组件版本号已保持一致, `client/loader/hubble/common` 等 maven 仓库版本号同理, 依赖引用可参考 [maven 示例](https://github.com/apache/incubator-hugegraph-toolchain#maven-dependencies)
+> - 注: HugeGraph 所有组件版本号已保持一致, `client/loader/hubble/common` 等 maven 仓库版本号同理, 依赖引用可参考 [maven 示例](https://github.com/apache/hugegraph-toolchain#maven-dependencies)
+> - 兼容说明: HugeGraph 于 2026 年 1 月毕业后,下载路径已从 `/incubator/hugegraph` 迁移到 `/hugegraph`。历史版本的发布文件名可能仍包含 `-incubating-`。
### 最新版本 1.7.0
-> 注: 从版本 `1.5.0` 开始,需要 Java11 运行时环境
-
- Release Date: 2025-11-28
- [Release Notes](/docs/changelog/hugegraph-1.7.0-release-notes/)
@@ -23,15 +21,15 @@ weight: 2
| Server | Toolchain |
|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0.tar.gz.sha512)] |
+| [[Binary](https://www.apache.org/dyn/closer.lua/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0.tar.gz.sha512)] |
#### 源码包
-Please refer to [build from source](/docs/quickstart/hugegraph-server/).
+Please refer to [build from source](/docs/quickstart/hugegraph/hugegraph-server/).
| Server | Toolchain | AI | Computer |
|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.7.0/apache-hugegraph-ai-incubating-1.7.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-ai-incubating-1.7.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-ai-incubating-1.7.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.7.0/apache-hugegraph-computer-incubating-1.7.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-computer-incubating-1.7.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-computer-incubating-1.7.0-src.tar.gz.sha512)] |
+| [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.7.0/apache-hugegraph-ai-incubating-1.7.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-ai-incubating-1.7.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-ai-incubating-1.7.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.7.0/apache-hugegraph-computer-incubating-1.7.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-computer-incubating-1.7.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-computer-incubating-1.7.0-src.tar.gz.sha512)] |
---
@@ -41,6 +39,7 @@ Please refer to [build from source](/docs/quickstart/hugegraph-server/).
>
> 1. 请大家尽早迁移到最新 Release 版本上, 社区将不再维护 `1.0.0` 前的旧版本 (非 ASF 版本)
> 2. `1.3.0` 是最后一个兼容 Java8 的主版本, 请尽早使用/迁移运行时为 Java11 (低版本 Java 有潜在更多的 SEC 风险和性能影响)
+> 3. 从版本 `1.5.0` 开始,需要 Java11 运行时环境
#### 1.5.0
@@ -51,15 +50,15 @@ Please refer to [build from source](/docs/quickstart/hugegraph-server/).
| Server | Toolchain |
|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz.sha512)] |
+| [[Binary](https://www.apache.org/dyn/closer.lua/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz.sha512)] |
##### 源码包
-Please refer to [build from source](/docs/quickstart/hugegraph-server/).
+Please refer to [build from source](/docs/quickstart/hugegraph/hugegraph-server/).
| Server | Toolchain | AI | Computer |
|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-ai-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-ai-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-ai-incubating-1.5.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-computer-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-computer-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-computer-incubating-1.5.0-src.tar.gz.sha512)] |
+| [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.5.0/apache-hugegraph-ai-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.5.0/apache-hugegraph-ai-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.5.0/apache-hugegraph-ai-incubating-1.5.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.5.0/apache-hugegraph-computer-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.5.0/apache-hugegraph-computer-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.5.0/apache-hugegraph-computer-incubating-1.5.0-src.tar.gz.sha512)] |
#### 1.3.0
@@ -70,15 +69,15 @@ Please refer to [build from source](/docs/quickstart/hugegraph-server/).
| Server | Toolchain |
|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.3.0/apache-hugegraph-incubating-1.3.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.3.0/apache-hugegraph-incubating-1.3.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.3.0/apache-hugegraph-incubating-1.3.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.3.0/apache-hugegraph-toolchain-incubating-1.3.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.3.0/apache-hugegraph-toolchain-incubating-1.3.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.3.0/apache-hugegraph-toolchain-incubating-1.3.0.tar.gz.sha512)] |
+| [[Binary](https://www.apache.org/dyn/closer.lua/hugegraph/1.3.0/apache-hugegraph-incubating-1.3.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.3.0/apache-hugegraph-incubating-1.3.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.3.0/apache-hugegraph-incubating-1.3.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/hugegraph/1.3.0/apache-hugegraph-toolchain-incubating-1.3.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.3.0/apache-hugegraph-toolchain-incubating-1.3.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.3.0/apache-hugegraph-toolchain-incubating-1.3.0.tar.gz.sha512)] |
##### 源码包
-Please refer to [build from source](/docs/quickstart/hugegraph-server/).
+Please refer to [build from source](/docs/quickstart/hugegraph/hugegraph-server/).
| Server | Toolchain | AI | Common |
|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.3.0/apache-hugegraph-incubating-1.3.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.3.0/apache-hugegraph-incubating-1.3.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.3.0/apache-hugegraph-incubating-1.3.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.3.0/apache-hugegraph-toolchain-incubating-1.3.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.3.0/apache-hugegraph-toolchain-incubating-1.3.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.3.0/apache-hugegraph-toolchain-incubating-1.3.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.3.0/apache-hugegraph-ai-incubating-1.3.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.3.0/apache-hugegraph-ai-incubating-1.3.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.3.0/apache-hugegraph-ai-incubating-1.3.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.3.0/apache-hugegraph-commons-incubating-1.3.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.3.0/apache-hugegraph-commons-incubating-1.3.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.3.0/apache-hugegraph-commons-incubating-1.3.0-src.tar.gz.sha512)] |
+| [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.3.0/apache-hugegraph-incubating-1.3.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.3.0/apache-hugegraph-incubating-1.3.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.3.0/apache-hugegraph-incubating-1.3.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.3.0/apache-hugegraph-toolchain-incubating-1.3.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.3.0/apache-hugegraph-toolchain-incubating-1.3.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.3.0/apache-hugegraph-toolchain-incubating-1.3.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.3.0/apache-hugegraph-ai-incubating-1.3.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.3.0/apache-hugegraph-ai-incubating-1.3.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.3.0/apache-hugegraph-ai-incubating-1.3.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.3.0/apache-hugegraph-commons-incubating-1.3.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.3.0/apache-hugegraph-commons-incubating-1.3.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.3.0/apache-hugegraph-commons-incubating-1.3.0-src.tar.gz.sha512)] |
#### 1.2.0
@@ -90,13 +89,13 @@ Please refer to [build from source](/docs/quickstart/hugegraph-server/).
| Server | Toolchain |
|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.2.0/apache-hugegraph-incubating-1.2.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.2.0/apache-hugegraph-incubating-1.2.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.2.0/apache-hugegraph-incubating-1.2.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.2.0/apache-hugegraph-toolchain-incubating-1.2.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.2.0/apache-hugegraph-toolchain-incubating-1.2.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.2.0/apache-hugegraph-toolchain-incubating-1.2.0.tar.gz.sha512)] |
+| [[Binary](https://www.apache.org/dyn/closer.lua/hugegraph/1.2.0/apache-hugegraph-incubating-1.2.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.2.0/apache-hugegraph-incubating-1.2.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.2.0/apache-hugegraph-incubating-1.2.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/hugegraph/1.2.0/apache-hugegraph-toolchain-incubating-1.2.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.2.0/apache-hugegraph-toolchain-incubating-1.2.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.2.0/apache-hugegraph-toolchain-incubating-1.2.0.tar.gz.sha512)] |
##### 源码包
| Server | Toolchain | Computer | Common |
|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.2.0/apache-hugegraph-incubating-1.2.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.2.0/apache-hugegraph-incubating-1.2.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.2.0/apache-hugegraph-incubating-1.2.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.2.0/apache-hugegraph-toolchain-incubating-1.2.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.2.0/apache-hugegraph-toolchain-incubating-1.2.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.2.0/apache-hugegraph-toolchain-incubating-1.2.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.2.0/apache-hugegraph-computer-incubating-1.2.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.2.0/apache-hugegraph-computer-incubating-1.2.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.2.0/apache-hugegraph-computer-incubating-1.2.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.2.0/apache-hugegraph-commons-incubating-1.2.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.2.0/apache-hugegraph-commons-incubating-1.2.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.2.0/apache-hugegraph-commons-incubating-1.2.0-src.tar.gz.sha512)] |
+| [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.2.0/apache-hugegraph-incubating-1.2.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.2.0/apache-hugegraph-incubating-1.2.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.2.0/apache-hugegraph-incubating-1.2.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.2.0/apache-hugegraph-toolchain-incubating-1.2.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.2.0/apache-hugegraph-toolchain-incubating-1.2.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.2.0/apache-hugegraph-toolchain-incubating-1.2.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.2.0/apache-hugegraph-computer-incubating-1.2.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.2.0/apache-hugegraph-computer-incubating-1.2.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.2.0/apache-hugegraph-computer-incubating-1.2.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.2.0/apache-hugegraph-commons-incubating-1.2.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.2.0/apache-hugegraph-commons-incubating-1.2.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.2.0/apache-hugegraph-commons-incubating-1.2.0-src.tar.gz.sha512)] |
#### 1.0.0
@@ -107,16 +106,10 @@ Please refer to [build from source](/docs/quickstart/hugegraph-server/).
| Server | Toolchain | Computer |
|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0.tar.gz.sha512)] |
+| [[Binary](https://www.apache.org/dyn/closer.lua/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0.tar.gz.sha512)] |
##### 源码包
| Server | Toolchain | Computer | Common |
|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-commons-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-commons-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-commons-incubating-1.0.0-src.tar.gz.sha512)] |
-
----
-
- 旧版本 (非 ASF 版本)
-由于 ASF 规则要求, 不能直接在当前页面存放非 ASF 发行包, 对于 1.0.0 前旧版本 (非 ASF 版本) 的下载说明, 请跳转至 https://github.com/apache/incubator-hugegraph-doc/wiki/Apache-HugeGraph-(Incubating)-Old-Versions-Download
-
+| [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.0.0/apache-hugegraph-commons-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-commons-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-commons-incubating-1.0.0-src.tar.gz.sha512)] |
diff --git a/content/cn/docs/guides/faq.md b/content/cn/docs/guides/faq.md
index d658fdddc..0fb08eea6 100644
--- a/content/cn/docs/guides/faq.md
+++ b/content/cn/docs/guides/faq.md
@@ -4,9 +4,13 @@ linkTitle: "FAQ"
weight: 6
---
-- 如何选择后端存储? 选 RocksDB 还是 Cassandra 还是 Hbase 还是 Mysql?
+- 如何选择后端存储? 选 RocksDB 还是分布式存储?
- 根据你的具体需要来判断, 一般单机或数据量 < 100 亿推荐 RocksDB, 其他推荐使用分布式存储的后端集群
+ HugeGraph 支持多种部署模式,根据数据规模和场景选择:
+ - **单机模式**:Server + RocksDB,适合开发测试和中小规模数据(< 4TB)
+ - **分布式模式**:HugeGraph-PD + HugeGraph-Store (HStore),支持水平扩展和高可用(< 1000TB 数据规模),适合生产环境和大规模图数据应用
+
+ 注:Cassandra、HBase、MySQL 等后端仅在 HugeGraph <= 1.5 版本中可用,官方后续不再单独维护
- 启动服务时提示:`xxx (core dumped) xxx`
diff --git a/content/cn/docs/guides/security.md b/content/cn/docs/guides/security.md
index a1bdf20c5..9a0a77dc9 100644
--- a/content/cn/docs/guides/security.md
+++ b/content/cn/docs/guides/security.md
@@ -29,6 +29,8 @@ weight: 7
- [CVE-2024-27348](https://www.cve.org/CVERecord?id=CVE-2024-27348): HugeGraph-Server - Command execution in gremlin
- [CVE-2024-27349](https://www.cve.org/CVERecord?id=CVE-2024-27349): HugeGraph-Server - Bypass whitelist in Auth mode
+- [CVE-2024-43441](https://www.cve.org/CVERecord?id=CVE-2024-43441): HugeGraph-Server - Fixed JWT Token (Secret)
+- [CVE-2025-26866](https://www.cve.org/CVERecord?id=CVE-2025-26866): HugeGraph-Server - RAFT and deserialization vulnerability
### [HugeGraph-Toolchain](https://github.com/apache/hugegraph-toolchain) 仓库 (Hubble/Loader/Client/Tools/..)
diff --git a/content/cn/docs/guides/toolchain-local-test.md b/content/cn/docs/guides/toolchain-local-test.md
index b2d4e29fb..0aae55fe4 100644
--- a/content/cn/docs/guides/toolchain-local-test.md
+++ b/content/cn/docs/guides/toolchain-local-test.md
@@ -352,7 +352,8 @@ python -m pip install -r hubble-dist/assembly/travis/requirements.txt
```bash
mvn package -Dmaven.test.skip=true
# 可选:启动验证
-cd apache-hugegraph-hubble-incubating-*/bin
+# 兼容历史(-incubating-)与毕业后 TLP 命名
+cd apache-hugegraph-hubble*/bin
./start-hubble.sh -d && sleep 10
curl http://localhost:8088/api/health
./stop-hubble.sh
diff --git a/content/cn/docs/introduction/README.md b/content/cn/docs/introduction/_index.md
similarity index 69%
rename from content/cn/docs/introduction/README.md
rename to content/cn/docs/introduction/_index.md
index c228893dc..076c81c3d 100644
--- a/content/cn/docs/introduction/README.md
+++ b/content/cn/docs/introduction/_index.md
@@ -2,12 +2,16 @@
title: "Introduction with HugeGraph"
linkTitle: "Introduction"
weight: 1
+aliases:
+ - /cn/docs/introduction/readme/
+ - /cn/docs/introduction/README/
---
### Summary
Apache HugeGraph 是一款易用、高效、通用的开源图数据库系统(Graph Database,[GitHub 项目地址](https://github.com/apache/hugegraph)),
实现了[Apache TinkerPop3](https://tinkerpop.apache.org)框架及完全兼容[Gremlin](https://tinkerpop.apache.org/gremlin.html)查询语言,
+同时支持 [Cypher](https://opencypher.org/) 查询语言(OpenCypher 标准),
具备完善的工具链组件,助力用户轻松构建基于图数据库之上的应用和产品。HugeGraph 支持百亿以上的顶点和边快速导入,并提供毫秒级的关联关系查询能力(OLTP),
并支持大规模分布式图分析(OLAP)。
@@ -19,17 +23,43 @@ HugeGraph 典型应用场景包括深度关系探索、关联分析、路径搜
### Features
HugeGraph 支持在线及离线环境下的图操作,支持批量导入数据,支持高效的复杂关联关系分析,并且能够与大数据平台无缝集成。
-HugeGraph 支持多用户并行操作,用户可输入 Gremlin 查询语句,并及时得到图查询结果,也可在用户程序中调用 HugeGraph API 进行图分析或查询。
+HugeGraph 支持多用户并行操作,用户可输入 Gremlin/Cypher 查询语句,并及时得到图查询结果,也可在用户程序中调用 HugeGraph API 进行图分析或查询。
-本系统具备如下特点:
+本系统具备如下特点:
-- 易用:HugeGraph 支持 Gremlin 图查询语言与 RESTful API,同时提供图检索常用接口,具备功能齐全的周边工具,轻松实现基于图的各种查询分析运算。
+- 易用:HugeGraph 支持 Gremlin/Cypher 图查询语言与 RESTful API,同时提供图检索常用接口,具备功能齐全的周边工具,轻松实现基于图的各种查询分析运算。
- 高效:HugeGraph 在图存储和图计算方面做了深度优化,提供多种批量导入工具,轻松完成百亿级数据快速导入,通过优化过的查询达到图检索的毫秒级响应。支持数千用户并发的在线实时操作。
- 通用:HugeGraph 支持 Apache Gremlin 标准图查询语言和 Property Graph 标准图建模方法,支持基于图的 OLTP 和 OLAP 方案。集成 Apache Hadoop 及 Apache Spark 大数据平台。
- 可扩展:支持分布式存储、数据多副本及横向扩容,内置多种后端存储引擎,也可插件式轻松扩展后端存储引擎。
- 开放:HugeGraph 代码开源(Apache 2 License),客户可自主修改定制,选择性回馈开源社区。
-本系统的功能包括但不限于:
+### 部署模式
+
+HugeGraph 支持多种部署模式,满足不同规模和场景的需求:
+
+**单机模式 (Standalone)**
+- Server + RocksDB 后端存储
+- 适合开发测试和中小规模数据(< 4TB)
+- Docker 快速启动: `docker run hugegraph/hugegraph`
+- 详见 [Server 快速开始](/cn/docs/quickstart/hugegraph/hugegraph-server)
+
+**分布式模式 (Distributed)**
+- HugeGraph-PD: 元数据管理和集群调度
+- HugeGraph-Store (HStore): 分布式存储引擎
+- 支持水平扩展和高可用(< 1000TB 数据规模)
+- 适合生产环境和大规模图数据应用
+
+### 快速入门指南
+
+| 使用场景 | 推荐路径 |
+|---------|---------|
+| 快速体验 | [Docker 部署](/cn/docs/quickstart/hugegraph/hugegraph-server#docker) |
+| 构建 OLTP 应用 | Server → REST API / Gremlin / Cypher |
+| 图分析 (OLAP) | [Vermeer](/cn/docs/quickstart/computing/hugegraph-computer) (推荐) 或 Computer |
+| 构建 AI 应用 | [HugeGraph-AI](/cn/docs/quickstart/hugegraph-ai) (GraphRAG/知识图谱) |
+| 批量导入数据 | [Loader](/cn/docs/quickstart/toolchain/hugegraph-loader) + [Hubble](/cn/docs/quickstart/toolchain/hugegraph-hubble) |
+
+### 功能特性
- 支持从多数据源批量导入数据 (包括本地文件、HDFS 文件、MySQL 数据库等数据源),支持多种文件格式导入 (包括 TXT、CSV、JSON 等格式)
- 具备可视化操作界面,可用于操作、分析及展示图,降低用户使用门槛
@@ -50,20 +80,20 @@ HugeGraph 支持多用户并行操作,用户可输入 Gremlin 查询语句,
- Backend:实现将图数据存储到后端,支持的后端包括:Memory、Cassandra、ScyllaDB、RocksDB、HBase、MySQL 及 PostgreSQL,用户根据实际情况选择一种即可;
- API:内置 REST Server,向用户提供 RESTful API,同时完全兼容 Gremlin 查询。(支持分布式存储和计算下推)
- [HugeGraph-Toolchain](https://github.com/apache/hugegraph-toolchain): (工具链)
- - [HugeGraph-Client](/cn/docs/quickstart/client/hugegraph-client):HugeGraph-Client 提供了 RESTful API 的客户端,用于连接 HugeGraph-Server,目前仅实现 Java 版,其他语言用户可自行实现;
+ - [HugeGraph-Client](/cn/docs/quickstart/client/hugegraph-client):HugeGraph-Client 提供了 RESTful API 的客户端,用于连接 HugeGraph-Server,支持 Java/Python/Go 多语言版本;
- [HugeGraph-Loader](/cn/docs/quickstart/toolchain/hugegraph-loader):HugeGraph-Loader 是基于 HugeGraph-Client 的数据导入工具,将普通文本数据转化为图形的顶点和边并插入图形数据库中;
- - [HugeGraph-Hubble](/cn/docs/quickstart/toolchain/hugegraph-hubble):HugeGraph-Hubble 是 HugeGraph 的 Web
+ - [HugeGraph-Hubble](/cn/docs/quickstart/toolchain/hugegraph-hubble):HugeGraph-Hubble 是 HugeGraph 的 Web
可视化管理平台,一站式可视化分析平台,平台涵盖了从数据建模,到数据快速导入,再到数据的在线、离线分析、以及图的统一管理的全过程;
- [HugeGraph-Tools](/cn/docs/quickstart/toolchain/hugegraph-tools):HugeGraph-Tools 是 HugeGraph 的部署和管理工具,包括管理图、备份/恢复、Gremlin 执行等功能。
-- [HugeGraph-Computer](/cn/docs/quickstart/computing/hugegraph-computer):HugeGraph-Computer 是分布式图处理系统 (OLAP).
- 它是 [Pregel](https://kowshik.github.io/JPregel/pregel_paper.pdf) 的一个实现。它可以运行在 Kubernetes/Yarn
- 等集群上,支持超大规模图计算。
-- [HugeGraph-AI](/cn/docs/quickstart/hugegraph-ai):HugeGraph-AI 是 HugeGraph 独立的 AI
- 组件,提供了图神经网络的训练和推理功能,LLM/Graph RAG 结合/Python-Client 等相关组件,持续更新 ing。
+- [HugeGraph-Computer](/cn/docs/quickstart/computing/hugegraph-computer):HugeGraph-Computer 是分布式图处理系统 (OLAP)。
+ 它是 [Pregel](https://kowshik.github.io/JPregel/pregel_paper.pdf) 的一个实现。它可以运行在 Kubernetes/Yarn
+ 等集群上,支持超大规模图计算。同时提供 Vermeer 轻量级图计算引擎,适合快速开始和中小规模图分析。
+- [HugeGraph-AI](/cn/docs/quickstart/hugegraph-ai):HugeGraph-AI 是 HugeGraph 独立的 AI
+ 组件,提供 LLM/GraphRAG 智能问答、自动化知识图谱构建、图神经网络训练/推理、Python-Client 等功能,内置 20+ 图机器学习算法,持续更新中。
### Contact Us
-- [GitHub Issues](https://github.com/apache/incubator-hugegraph/issues): 使用途中出现问题或提供功能性建议,可通过此反馈 (推荐)
+- [GitHub Issues](https://github.com/apache/hugegraph/issues): 使用途中出现问题或提供功能性建议,可通过此反馈 (推荐)
- 邮件反馈:[dev@hugegraph.apache.org](mailto:dev@hugegraph.apache.org) ([邮箱订阅方式](https://hugegraph.apache.org/docs/contribution-guidelines/subscribe/))
- SEC 反馈: [security@hugegraph.apache.org](mailto:security@hugegraph.apache.org) (报告安全相关问题)
- 微信公众号:Apache HugeGraph, 欢迎扫描下方二维码加入我们!
diff --git a/content/cn/docs/performance/api-preformance/_index.md b/content/cn/docs/performance/api-performance/_index.md
similarity index 77%
rename from content/cn/docs/performance/api-preformance/_index.md
rename to content/cn/docs/performance/api-performance/_index.md
index 94c00c1b1..7fc78749a 100644
--- a/content/cn/docs/performance/api-preformance/_index.md
+++ b/content/cn/docs/performance/api-performance/_index.md
@@ -12,8 +12,8 @@ HugeGraph API性能测试主要测试HugeGraph-Server对RESTful API请求的并
HugeGraph的每个发布版本的RESTful API的性能测试情况可以参考:
-- [v0.5.6 stand-alone](/docs/performance/api-preformance/hugegraph-api-0.5.6-rocksdb/)
-- [v0.5.6 cluster](/docs/performance/api-preformance/hugegraph-api-0.5.6-cassandra/)
+- [v0.5.6 stand-alone](/docs/performance/api-performance/hugegraph-api-0.5.6-rocksdb/)
+- [v0.5.6 cluster](/docs/performance/api-performance/hugegraph-api-0.5.6-cassandra/)
> 即将更新,敬请期待!
diff --git a/content/cn/docs/performance/api-preformance/hugegraph-api-0.2.md b/content/cn/docs/performance/api-performance/hugegraph-api-0.2.md
similarity index 100%
rename from content/cn/docs/performance/api-preformance/hugegraph-api-0.2.md
rename to content/cn/docs/performance/api-performance/hugegraph-api-0.2.md
diff --git a/content/cn/docs/performance/api-preformance/hugegraph-api-0.4.4.md b/content/cn/docs/performance/api-performance/hugegraph-api-0.4.4.md
similarity index 100%
rename from content/cn/docs/performance/api-preformance/hugegraph-api-0.4.4.md
rename to content/cn/docs/performance/api-performance/hugegraph-api-0.4.4.md
diff --git a/content/cn/docs/performance/api-preformance/hugegraph-api-0.5.6-Cassandra.md b/content/cn/docs/performance/api-performance/hugegraph-api-0.5.6-cassandra.md
similarity index 100%
rename from content/cn/docs/performance/api-preformance/hugegraph-api-0.5.6-Cassandra.md
rename to content/cn/docs/performance/api-performance/hugegraph-api-0.5.6-cassandra.md
diff --git a/content/cn/docs/performance/api-preformance/hugegraph-api-0.5.6-RocksDB.md b/content/cn/docs/performance/api-performance/hugegraph-api-0.5.6-rocksdb.md
similarity index 100%
rename from content/cn/docs/performance/api-preformance/hugegraph-api-0.5.6-RocksDB.md
rename to content/cn/docs/performance/api-performance/hugegraph-api-0.5.6-rocksdb.md
diff --git a/content/cn/docs/quickstart/client/hugegraph-client-go.md b/content/cn/docs/quickstart/client/hugegraph-client-go.md
index 9778cf52e..11c7eb5bb 100644
--- a/content/cn/docs/quickstart/client/hugegraph-client-go.md
+++ b/content/cn/docs/quickstart/client/hugegraph-client-go.md
@@ -13,7 +13,7 @@ weight: 3
## 安装教程
```shell
-go get github.com/apache/incubator-hugegraph-toolchain/hugegraph-client-go
+go get github.com/apache/hugegraph-toolchain/hugegraph-client-go
```
## 已实现 API
@@ -34,8 +34,8 @@ import (
"log"
"os"
- "github.com/apache/incubator-hugegraph-toolchain/hugegraph-client-go"
- "github.com/apache/incubator-hugegraph-toolchain/hugegraph-client-go/hgtransport"
+ "github.com/apache/hugegraph-toolchain/hugegraph-client-go"
+ "github.com/apache/hugegraph-toolchain/hugegraph-client-go/hgtransport"
)
func main() {
@@ -73,8 +73,8 @@ import (
"log"
"os"
- "github.com/apache/incubator-hugegraph-toolchain/hugegraph-client-go"
- "github.com/apache/incubator-hugegraph-toolchain/hugegraph-client-go/hgtransport"
+ "github.com/apache/hugegraph-toolchain/hugegraph-client-go"
+ "github.com/apache/hugegraph-toolchain/hugegraph-client-go/hgtransport"
)
// initClient 初始化并返回一个 HugeGraph 客户端实例
diff --git a/content/cn/docs/quickstart/client/hugegraph-client-python.md b/content/cn/docs/quickstart/client/hugegraph-client-python.md
index 7ed2e6ad9..d904a6e9f 100644
--- a/content/cn/docs/quickstart/client/hugegraph-client-python.md
+++ b/content/cn/docs/quickstart/client/hugegraph-client-python.md
@@ -25,8 +25,8 @@ uv pip install hugegraph-python # 注意:可能不是最新版本,建议从
要从源码安装,请克隆仓库并安装所需的依赖项:
```bash
-git clone https://github.com/apache/incubator-hugegraph-ai.git
-cd incubator-hugegraph-ai/hugegraph-python-client
+git clone https://github.com/apache/hugegraph-ai.git
+cd hugegraph-ai/hugegraph-python-client
# 普通安装
uv pip install .
diff --git a/content/cn/docs/quickstart/client/hugegraph-client.md b/content/cn/docs/quickstart/client/hugegraph-client.md
index 9322aabfa..994d722b8 100644
--- a/content/cn/docs/quickstart/client/hugegraph-client.md
+++ b/content/cn/docs/quickstart/client/hugegraph-client.md
@@ -7,11 +7,11 @@ weight: 1
### 1 HugeGraph-Client 概述
[HugeGraph-Client](https://github.com/apache/hugegraph-toolchain) 向 HugeGraph-Server 发出 HTTP 请求,获取并解析 Server 的执行结果。
-提供了 Java/Go/[Python](https://github.com/apache/incubator-hugegraph-ai/tree/main/hugegraph-python-client) 版,
+提供了 Java/Go/[Python](https://github.com/apache/hugegraph-ai/tree/main/hugegraph-python-client) 版,
用户可以使用 [Client-API](/cn/docs/clients/hugegraph-client) 编写代码操作 HugeGraph,比如元数据和图数据的增删改查,或者执行 gremlin 语句等。
后文主要是 Java 使用示例 (其他语言 SDK 可参考对应 `READEME` 页面)
-> 现在已经支持基于 Go/Python 语言的 HugeGraph [Client SDK](https://github.com/apache/incubator-hugegraph-toolchain/blob/master/hugegraph-client-go/README.md) (version >=1.2.0)
+> 现在已经支持基于 Go/Python 语言的 HugeGraph [Client SDK](https://github.com/apache/hugegraph-toolchain/blob/master/hugegraph-client-go/README.md) (version >=1.2.0)
### 2 环境要求
@@ -357,7 +357,7 @@ public class BatchExample {
#### 4.4 运行 Example
运行 Example 之前需要启动 Server,
-启动过程见[HugeGraph-Server Quick Start](/cn/docs/quickstart/hugegraph-server)
+启动过程见[HugeGraph-Server Quick Start](/cn/docs/quickstart/hugegraph/hugegraph-server)
#### 4.5 详细 API 说明
diff --git a/content/cn/docs/quickstart/computing/_index.md b/content/cn/docs/quickstart/computing/_index.md
index 8777af8c9..bbce7c1c1 100644
--- a/content/cn/docs/quickstart/computing/_index.md
+++ b/content/cn/docs/quickstart/computing/_index.md
@@ -4,8 +4,8 @@ linkTitle: "HugeGraph Computing (OLAP)"
weight: 4
---
-## 🚀 最佳实践:优先使用 DeepWiki 智能文档
+> DeepWiki 提供实时更新的项目文档,内容更全面准确,适合快速了解项目最新情况。
+>
+> 📖 [https://deepwiki.com/apache/hugegraph-computer](https://deepwiki.com/apache/hugegraph-computer)
-> 为解决静态文档可能过时的问题,我们提供了 **实时更新、内容更全面** 的 DeepWiki。它相当于一个拥有项目最新知识的专家,非常适合**所有开发者**在开始项目前阅读和咨询。
-
-**👉 强烈推荐访问并对话:**[**incubator-hugegraph-computer**](https://deepwiki.com/apache/incubator-hugegraph-computer)
\ No newline at end of file
+**GitHub 访问:** [https://github.com/apache/hugegraph-computer](https://github.com/apache/hugegraph-computer)
\ No newline at end of file
diff --git a/content/cn/docs/quickstart/computing/hugegraph-computer-config.md b/content/cn/docs/quickstart/computing/hugegraph-computer-config.md
new file mode 100644
index 000000000..783446d73
--- /dev/null
+++ b/content/cn/docs/quickstart/computing/hugegraph-computer-config.md
@@ -0,0 +1,448 @@
+---
+title: "HugeGraph-Computer 配置参考"
+linkTitle: "Computer 配置参考"
+weight: 3
+---
+
+### Computer 配置选项
+
+> **默认值说明:**
+> - 以下配置项显示的是**代码默认值**(定义在 `ComputerOptions.java` 中)
+> - 当**打包配置文件**(`conf/computer.properties` 分发包中)指定了不同的值时,会以 `值 (打包: 值)` 的形式标注
+> - 示例:`300000 (打包: 100000)` 表示代码默认值为 300000,但分发包默认值为 100000
+> - 对于生产环境部署,除非明确覆盖,否则打包默认值优先生效
+
+---
+
+### 1. 基础配置
+
+HugeGraph-Computer 核心作业设置。
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| hugegraph.url | http://127.0.0.1:8080 | HugeGraph 服务器 URL,用于加载数据和写回结果。 |
+| hugegraph.name | hugegraph | 图名称,用于加载数据和写回结果。 |
+| hugegraph.username | "" (空) | HugeGraph 认证用户名(如果未启用认证则留空)。 |
+| hugegraph.password | "" (空) | HugeGraph 认证密码(如果未启用认证则留空)。 |
+| job.id | local_0001 (打包: local_001) | YARN 集群或 K8s 集群上的作业标识符。 |
+| job.namespace | "" (空) | 作业命名空间,可以分隔不同的数据源。🔒 **由系统管理 - 不要手动修改**。 |
+| job.workers_count | 1 | 执行一个图算法作业的 Worker 数量。🔒 **在 K8s 中由系统管理 - 不要手动修改**。 |
+| job.partitions_count | 1 | 执行一个图算法作业的分区数量。 |
+| job.partitions_thread_nums | 4 | 分区并行计算的线程数量。 |
+
+---
+
+### 2. 算法配置
+
+计算逻辑的算法特定配置。
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| algorithm.params_class | org.apache.hugegraph.computer.core.config.Null | ⚠️ **必填** 在算法运行前用于传递算法参数的类。 |
+| algorithm.result_class | org.apache.hugegraph.computer.core.config.Null | 顶点值的类,用于存储顶点的计算结果。 |
+| algorithm.message_class | org.apache.hugegraph.computer.core.config.Null | 计算顶点时传递的消息类。 |
+
+---
+
+### 3. 输入配置
+
+从 HugeGraph 或其他数据源加载输入数据的配置。
+
+#### 3.1 输入源
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| input.source_type | hugegraph-server | 加载输入数据的源类型,允许值:['hugegraph-server', 'hugegraph-loader']。'hugegraph-loader' 表示使用 hugegraph-loader 从 HDFS 或文件加载数据。如果使用 'hugegraph-loader',请配置 'input.loader_struct_path' 和 'input.loader_schema_path'。 |
+| input.loader_struct_path | "" (空) | Loader 输入的结构路径,仅在 input.source_type=loader 启用时生效。 |
+| input.loader_schema_path | "" (空) | Loader 输入的 schema 路径,仅在 input.source_type=loader 启用时生效。 |
+
+#### 3.2 输入分片
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| input.split_size | 1048576 (1 MB) | 输入分片大小(字节)。 |
+| input.split_max_splits | 10000000 | 最大输入分片数量。 |
+| input.split_page_size | 500 | 流式加载输入分片数据的页面大小。 |
+| input.split_fetch_timeout | 300 | 获取输入分片的超时时间(秒)。 |
+
+#### 3.3 输入处理
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| input.filter_class | org.apache.hugegraph.computer.core.input.filter.DefaultInputFilter | 创建输入过滤器对象的类。输入过滤器用于根据用户需求过滤顶点边。 |
+| input.edge_direction | OUT | 要加载的边的方向,允许值:[OUT, IN, BOTH]。当值为 BOTH 时,将加载 OUT 和 IN 两个方向的边。 |
+| input.edge_freq | MULTIPLE | 一对顶点之间可以存在的边的频率,允许值:[SINGLE, SINGLE_PER_LABEL, MULTIPLE]。SINGLE 表示一对顶点之间只能存在一条边(通过 sourceId + targetId 标识);SINGLE_PER_LABEL 表示每个边标签在一对顶点之间可以有一条边(通过 sourceId + edgeLabel + targetId 标识);MULTIPLE 表示一对顶点之间可以存在多条边(通过 sourceId + edgeLabel + sortValues + targetId 标识)。 |
+| input.max_edges_in_one_vertex | 200 | 允许附加到一个顶点的最大邻接边数量。邻接边将作为一个批处理单元一起存储和传输。 |
+
+#### 3.4 输入性能
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| input.send_thread_nums | 4 | 并行发送顶点或边的线程数量。 |
+
+---
+
+### 4. 快照与存储配置
+
+HugeGraph-Computer 支持快照功能,可将顶点/边分区保存到本地存储或 MinIO 对象存储,用于断点恢复或加速重复计算。
+
+#### 4.1 基础快照配置
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| snapshot.write | false | 是否写入输入顶点/边分区的快照。 |
+| snapshot.load | false | 是否从顶点/边分区的快照加载。 |
+| snapshot.name | "" (空) | 用户自定义的快照名称,用于区分不同的快照。 |
+
+#### 4.2 MinIO 集成(可选)
+
+MinIO 可用作 K8s 部署中快照的分布式对象存储后端。
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| snapshot.minio_endpoint | "" (空) | MinIO 服务端点(例如 `http://minio:9000`)。使用 MinIO 时必填。 |
+| snapshot.minio_access_key | minioadmin | MinIO 认证访问密钥。 |
+| snapshot.minio_secret_key | minioadmin | MinIO 认证密钥。 |
+| snapshot.minio_bucket_name | "" (空) | 用于存储快照数据的 MinIO 存储桶名称。 |
+
+**使用场景:**
+- **断点恢复**:作业失败后从快照恢复,避免重新加载数据
+- **重复计算**:多次运行同一算法时从快照加载数据以加速启动
+- **A/B 测试**:保存同一数据集的多个快照版本,测试不同的算法参数
+
+**示例:本地快照**(在 `computer.properties` 中):
+```properties
+snapshot.write=true
+snapshot.name=pagerank-snapshot-20260201
+```
+
+**示例:MinIO 快照**(在 K8s CRD `computerConf` 中):
+```yaml
+computerConf:
+ snapshot.write: "true"
+ snapshot.name: "pagerank-snapshot-v1"
+ snapshot.minio_endpoint: "http://minio:9000"
+ snapshot.minio_access_key: "my-access-key"
+ snapshot.minio_secret_key: "my-secret-key"
+ snapshot.minio_bucket_name: "hugegraph-snapshots"
+```
+
+---
+
+### 5. Worker 与 Master 配置
+
+Worker 和 Master 计算逻辑的配置。
+
+#### 5.1 Master 配置
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| master.computation_class | org.apache.hugegraph.computer.core.master.DefaultMasterComputation | Master 计算是可以决定是否继续下一个超步的计算。它在每个超步结束时在 master 上运行。 |
+
+#### 5.2 Worker 计算
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| worker.computation_class | org.apache.hugegraph.computer.core.config.Null | 创建 worker 计算对象的类。Worker 计算用于在每个超步中计算每个顶点。 |
+| worker.combiner_class | org.apache.hugegraph.computer.core.config.Null | Combiner 可以将消息组合为一个顶点的一个值。例如,PageRank 算法可以将一个顶点的消息组合为一个求和值。 |
+| worker.partitioner | org.apache.hugegraph.computer.core.graph.partition.HashPartitioner | 分区器,决定顶点应该在哪个分区中,以及分区应该在哪个 worker 中。 |
+
+#### 5.3 Worker 组合器
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| worker.vertex_properties_combiner_class | org.apache.hugegraph.computer.core.combiner.OverwritePropertiesCombiner | 组合器可以在输入步骤将同一顶点的多个属性组合为一个属性。 |
+| worker.edge_properties_combiner_class | org.apache.hugegraph.computer.core.combiner.OverwritePropertiesCombiner | 组合器可以在输入步骤将同一边的多个属性组合为一个属性。 |
+
+#### 5.4 Worker 缓冲区
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| worker.received_buffers_bytes_limit | 104857600 (100 MB) | 接收数据缓冲区的限制字节数。所有缓冲区的总大小不能超过此限制。如果接收缓冲区达到此限制,它们将被合并到文件中(溢出到磁盘)。 |
+| worker.write_buffer_capacity | 52428800 (50 MB) | 用于存储顶点或消息的写缓冲区的初始大小。 |
+| worker.write_buffer_threshold | 52428800 (50 MB) | 写缓冲区的阈值。超过它将触发排序。写缓冲区用于存储顶点或消息。 |
+
+#### 5.5 Worker 数据与超时
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| worker.data_dirs | [jobs] | 用逗号分隔的目录,接收的顶点和消息可以持久化到其中。 |
+| worker.wait_sort_timeout | 600000 (10 分钟) | 消息处理程序等待排序线程对一批缓冲区进行排序的最大超时时间(毫秒)。 |
+| worker.wait_finish_messages_timeout | 86400000 (24 小时) | 消息处理程序等待所有 worker 完成消息的最大超时时间(毫秒)。 |
+
+---
+
+### 6. I/O 与输出配置
+
+输出计算结果的配置。
+
+#### 6.1 输出类与结果
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| output.output_class | org.apache.hugegraph.computer.core.output.LogOutput | 输出每个顶点计算结果的类。在迭代计算后调用。 |
+| output.result_name | value | 该值由 WORKER_COMPUTATION_CLASS 创建的实例的 #name() 动态分配。 |
+| output.result_write_type | OLAP_COMMON | 输出到 HugeGraph 的结果写入类型,允许值:[OLAP_COMMON, OLAP_SECONDARY, OLAP_RANGE]。 |
+
+#### 6.2 输出行为
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| output.with_adjacent_edges | false | 是否输出顶点的邻接边。 |
+| output.with_vertex_properties | false | 是否输出顶点的属性。 |
+| output.with_edge_properties | false | 是否输出边的属性。 |
+
+#### 6.3 批量输出
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| output.batch_size | 500 | 输出的批处理大小。 |
+| output.batch_threads | 1 | 用于批量输出的线程数量。 |
+| output.single_threads | 1 | 用于单个输出的线程数量。 |
+
+#### 6.4 HDFS 输出
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| output.hdfs_url | hdfs://127.0.0.1:9000 | 输出的 HDFS URL。 |
+| output.hdfs_user | hadoop | 输出的 HDFS 用户。 |
+| output.hdfs_path_prefix | /hugegraph-computer/results | HDFS 输出结果的目录。 |
+| output.hdfs_delimiter | , (逗号) | HDFS 输出的分隔符。 |
+| output.hdfs_merge_partitions | true | 是否合并多个分区的输出文件。 |
+| output.hdfs_replication | 3 | HDFS 的副本数。 |
+| output.hdfs_core_site_path | "" (空) | HDFS core site 路径。 |
+| output.hdfs_site_path | "" (空) | HDFS site 路径。 |
+| output.hdfs_kerberos_enable | false | 是否为 HDFS 启用 Kerberos 认证。 |
+| output.hdfs_kerberos_principal | "" (空) | HDFS 的 Kerberos 认证 principal。 |
+| output.hdfs_kerberos_keytab | "" (空) | HDFS 的 Kerberos 认证 keytab 文件。 |
+| output.hdfs_krb5_conf | /etc/krb5.conf | Kerberos 配置文件路径。 |
+
+#### 6.5 重试与超时
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| output.retry_times | 3 | 输出失败时的重试次数。 |
+| output.retry_interval | 10 | 输出失败时的重试间隔(秒)。 |
+| output.thread_pool_shutdown_timeout | 60 | 输出线程池关闭的超时时间(秒)。 |
+
+---
+
+### 7. 网络与传输配置
+
+Worker 和 Master 之间网络通信的配置。
+
+#### 7.1 服务器配置
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| transport.server_host | 127.0.0.1 | 🔒 **由系统管理** 监听传输数据的服务器主机名或 IP。不要手动修改。 |
+| transport.server_port | 0 | 🔒 **由系统管理** 监听传输数据的服务器端口。如果设置为 0,系统将分配一个随机端口。不要手动修改。 |
+| transport.server_threads | 4 | 服务器传输线程的数量。 |
+
+#### 7.2 客户端配置
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| transport.client_threads | 4 | 客户端传输线程的数量。 |
+| transport.client_connect_timeout | 3000 | 客户端连接到服务器的超时时间(毫秒)。 |
+
+#### 7.3 协议配置
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| transport.provider_class | org.apache.hugegraph.computer.core.network.netty.NettyTransportProvider | 传输提供程序,目前仅支持 Netty。 |
+| transport.io_mode | AUTO | 网络 IO 模式,允许值:[NIO, EPOLL, AUTO]。AUTO 表示自动选择适当的模式。 |
+| transport.tcp_keep_alive | true | 是否启用 TCP keep-alive。 |
+| transport.transport_epoll_lt | false | 是否启用 EPOLL 水平触发(仅在 io_mode=EPOLL 时有效)。 |
+
+#### 7.4 缓冲区配置
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| transport.send_buffer_size | 0 | Socket 发送缓冲区大小(字节)。0 表示使用系统默认值。 |
+| transport.receive_buffer_size | 0 | Socket 接收缓冲区大小(字节)。0 表示使用系统默认值。 |
+| transport.write_buffer_high_mark | 67108864 (64 MB) | 写缓冲区的高水位标记(字节)。如果排队字节数 > write_buffer_high_mark,将触发发送不可用。 |
+| transport.write_buffer_low_mark | 33554432 (32 MB) | 写缓冲区的低水位标记(字节)。如果排队字节数 < write_buffer_low_mark,将触发发送可用。 |
+
+#### 7.5 流量控制
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| transport.max_pending_requests | 8 | 客户端未接收 ACK 的最大数量。如果未接收 ACK 的数量 >= max_pending_requests,将触发发送不可用。 |
+| transport.min_pending_requests | 6 | 客户端未接收 ACK 的最小数量。如果未接收 ACK 的数量 < min_pending_requests,将触发发送可用。 |
+| transport.min_ack_interval | 200 | 服务器回复 ACK 的最小间隔(毫秒)。 |
+
+#### 7.6 超时配置
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| transport.close_timeout | 10000 | 关闭服务器或关闭客户端的超时时间(毫秒)。 |
+| transport.sync_request_timeout | 10000 | 发送同步请求后等待响应的超时时间(毫秒)。 |
+| transport.finish_session_timeout | 0 | 完成会话的超时时间(毫秒)。0 表示使用 (transport.sync_request_timeout × transport.max_pending_requests)。 |
+| transport.write_socket_timeout | 3000 | 将数据写入 socket 缓冲区的超时时间(毫秒)。 |
+| transport.server_idle_timeout | 360000 (6 分钟) | 服务器空闲的最大超时时间(毫秒)。 |
+
+#### 7.7 心跳配置
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| transport.heartbeat_interval | 20000 (20 秒) | 客户端心跳之间的最小间隔(毫秒)。 |
+| transport.max_timeout_heartbeat_count | 120 | 客户端超时心跳的最大次数。如果连续等待心跳响应超时的次数 > max_timeout_heartbeat_count,通道将从客户端关闭。 |
+
+#### 7.8 高级网络设置
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| transport.max_syn_backlog | 511 | 服务器端 SYN 队列的容量。0 表示使用系统默认值。 |
+| transport.recv_file_mode | true | 是否启用接收缓冲文件模式。如果启用,将使用零拷贝从 socket 接收缓冲区并写入文件。**注意**:需要操作系统支持零拷贝(例如 Linux sendfile/splice)。 |
+| transport.network_retries | 3 | 网络通信不稳定时的重试次数。 |
+
+---
+
+### 8. 存储与持久化配置
+
+HGKV(HugeGraph Key-Value)存储引擎和值文件的配置。
+
+#### 8.1 HGKV 配置
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| hgkv.max_file_size | 2147483648 (2 GB) | 每个 HGKV 文件的最大字节数。 |
+| hgkv.max_data_block_size | 65536 (64 KB) | HGKV 文件数据块的最大字节大小。 |
+| hgkv.max_merge_files | 10 | 一次合并的最大文件数。 |
+| hgkv.temp_file_dir | /tmp/hgkv | 此文件夹用于在文件合并过程中存储临时文件。 |
+
+#### 8.2 值文件配置
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| valuefile.max_segment_size | 1073741824 (1 GB) | 值文件每个段的最大字节数。 |
+
+---
+
+### 9. BSP 与协调配置
+
+批量同步并行(BSP)协议和 etcd 协调的配置。
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| bsp.etcd_endpoints | http://localhost:2379 | 🔒 **在 K8s 中由系统管理** 访问 etcd 的端点。对于多个端点,使用逗号分隔列表:`http://host1:port1,http://host2:port2`。不要在 K8s 部署中手动修改。 |
+| bsp.max_super_step | 10 (打包: 2) | 算法的最大超步数。 |
+| bsp.register_timeout | 300000 (打包: 100000) | 等待 master 和 worker 注册的最大超时时间(毫秒)。 |
+| bsp.wait_workers_timeout | 86400000 (24 小时) | 等待 worker BSP 事件的最大超时时间(毫秒)。 |
+| bsp.wait_master_timeout | 86400000 (24 小时) | 等待 master BSP 事件的最大超时时间(毫秒)。 |
+| bsp.log_interval | 30000 (30 秒) | 等待 BSP 事件时打印日志的日志间隔(毫秒)。 |
+
+---
+
+### 10. 性能调优配置
+
+性能优化的配置。
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| allocator.max_vertices_per_thread | 10000 | 每个内存分配器中每个线程处理的最大顶点数。 |
+| sort.thread_nums | 4 | 执行内部排序的线程数量。 |
+
+---
+
+### 11. 系统管理配置
+
+⚠️ **由系统管理的配置项 - 禁止用户手动修改。**
+
+以下配置项由 K8s Operator、Driver 或运行时系统自动管理。手动修改将导致集群通信失败或作业调度错误。
+
+| 配置项 | 管理者 | 说明 |
+|--------|--------|------|
+| bsp.etcd_endpoints | K8s Operator | 自动设置为 operator 的 etcd 服务地址 |
+| transport.server_host | 运行时 | 自动设置为 pod/容器主机名 |
+| transport.server_port | 运行时 | 自动分配随机端口 |
+| job.namespace | K8s Operator | 自动设置为作业命名空间 |
+| job.id | K8s Operator | 自动从 CRD 设置为作业 ID |
+| job.workers_count | K8s Operator | 自动从 CRD `workerInstances` 设置 |
+| rpc.server_host | 运行时 | RPC 服务器主机名(系统管理) |
+| rpc.server_port | 运行时 | RPC 服务器端口(系统管理) |
+| rpc.remote_url | 运行时 | RPC 远程 URL(系统管理) |
+
+**为什么禁止修改:**
+- **BSP/RPC 配置**:必须与实际部署的 etcd/RPC 服务匹配。手动覆盖会破坏协调。
+- **作业配置**:必须与 K8s CRD 规范匹配。不匹配会导致 worker 数量错误。
+- **传输配置**:必须使用实际的 pod 主机名/端口。手动值会阻止 worker 间通信。
+
+---
+
+### K8s Operator 配置选项
+
+> 注意:选项需要通过环境变量设置进行转换,例如 k8s.internal_etcd_url => INTERNAL_ETCD_URL
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| k8s.auto_destroy_pod | true | 作业完成或失败时是否自动销毁所有 pod。 |
+| k8s.close_reconciler_timeout | 120 | 关闭 reconciler 的最大超时时间(毫秒)。 |
+| k8s.internal_etcd_url | http://127.0.0.1:2379 | operator 系统的内部 etcd URL。 |
+| k8s.max_reconcile_retry | 3 | reconcile 的最大重试次数。 |
+| k8s.probe_backlog | 50 | 服务健康探针的最大积压。 |
+| k8s.probe_port | 9892 | controller 绑定的用于服务健康探针的端口。 |
+| k8s.ready_check_internal | 1000 | 检查就绪的时间间隔(毫秒)。 |
+| k8s.ready_timeout | 30000 | 检查就绪的最大超时时间(毫秒)。 |
+| k8s.reconciler_count | 10 | reconciler 线程的最大数量。 |
+| k8s.resync_period | 600000 | 被监视资源进行 reconcile 的最小频率。 |
+| k8s.timezone | Asia/Shanghai | computer 作业和 operator 的时区。 |
+| k8s.watch_namespace | hugegraph-computer-system | 监视自定义资源的命名空间。使用 '*' 监视所有命名空间。 |
+
+---
+
+### HugeGraph-Computer CRD
+
+> CRD: https://github.com/apache/hugegraph-computer/blob/master/computer-k8s-operator/manifest/hugegraph-computer-crd.v1.yaml
+
+| 字段 | 默认值 | 说明 | 必填 |
+|------|--------|------|------|
+| algorithmName | | 算法名称。 | true |
+| jobId | | 作业 ID。 | true |
+| image | | 算法镜像。 | true |
+| computerConf | | computer 配置选项的映射。 | true |
+| workerInstances | | worker 实例数量,将覆盖 'job.workers_count' 选项。 | true |
+| pullPolicy | Always | 镜像拉取策略,详情请参考:https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy | false |
+| pullSecrets | | 镜像拉取密钥,详情请参考:https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod | false |
+| masterCpu | | master 的 CPU 限制,单位可以是 'm' 或无单位,详情请参考:[https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu) | false |
+| workerCpu | | worker 的 CPU 限制,单位可以是 'm' 或无单位,详情请参考:[https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu) | false |
+| masterMemory | | master 的内存限制,单位可以是 Ei、Pi、Ti、Gi、Mi、Ki 之一,详情请参考:[https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory) | false |
+| workerMemory | | worker 的内存限制,单位可以是 Ei、Pi、Ti、Gi、Mi、Ki 之一,详情请参考:[https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory) | false |
+| log4jXml | | computer 作业的 log4j.xml 内容。 | false |
+| jarFile | | computer 算法的 jar 路径。 | false |
+| remoteJarUri | | computer 算法的远程 jar URI,将覆盖算法镜像。 | false |
+| jvmOptions | | computer 作业的 Java 启动参数。 | false |
+| envVars | | 请参考:https://kubernetes.io/docs/tasks/inject-data-application/define-interdependent-environment-variables/ | false |
+| envFrom | | 请参考:https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/ | false |
+| masterCommand | bin/start-computer.sh | master 的运行命令,等同于 Docker 的 'Entrypoint' 字段。 | false |
+| masterArgs | ["-r master", "-d k8s"] | master 的运行参数,等同于 Docker 的 'Cmd' 字段。 | false |
+| workerCommand | bin/start-computer.sh | worker 的运行命令,等同于 Docker 的 'Entrypoint' 字段。 | false |
+| workerArgs | ["-r worker", "-d k8s"] | worker 的运行参数,等同于 Docker 的 'Cmd' 字段。 | false |
+| volumes | | 请参考:https://kubernetes.io/docs/concepts/storage/volumes/ | false |
+| volumeMounts | | 请参考:https://kubernetes.io/docs/concepts/storage/volumes/ | false |
+| secretPaths | | k8s-secret 名称和挂载路径的映射。 | false |
+| configMapPaths | | k8s-configmap 名称和挂载路径的映射。 | false |
+| podTemplateSpec | | 请参考:https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-template-v1/#PodTemplateSpec | false |
+| securityContext | | 请参考:https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ | false |
+
+---
+
+### KubeDriver 配置选项
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| k8s.build_image_bash_path | | 用于构建镜像的命令路径。 |
+| k8s.enable_internal_algorithm | true | 是否启用内部算法。 |
+| k8s.framework_image_url | hugegraph/hugegraph-computer:latest | computer 框架的镜像 URL。 |
+| k8s.image_repository_password | | 登录镜像仓库的密码。 |
+| k8s.image_repository_registry | | 登录镜像仓库的地址。 |
+| k8s.image_repository_url | hugegraph/hugegraph-computer | 镜像仓库的 URL。 |
+| k8s.image_repository_username | | 登录镜像仓库的用户名。 |
+| k8s.internal_algorithm | [pageRank] | 所有内部算法的名称列表。**注意**:算法名称在这里使用驼峰命名法(例如 `pageRank`),但算法实现返回下划线命名法(例如 `page_rank`)。 |
+| k8s.internal_algorithm_image_url | hugegraph/hugegraph-computer:latest | 内部算法的镜像 URL。 |
+| k8s.jar_file_dir | /cache/jars/ | 算法 jar 将上传到的目录。 |
+| k8s.kube_config | ~/.kube/config | k8s 配置文件的路径。 |
+| k8s.log4j_xml_path | | computer 作业的 log4j.xml 路径。 |
+| k8s.namespace | hugegraph-computer-system | hugegraph-computer 系统的命名空间。 |
+| k8s.pull_secret_names | [] | 拉取镜像的 pull-secret 名称。 |
diff --git a/content/cn/docs/quickstart/computing/hugegraph-computer.md b/content/cn/docs/quickstart/computing/hugegraph-computer.md
index 4c9dd7237..c401ff844 100644
--- a/content/cn/docs/quickstart/computing/hugegraph-computer.md
+++ b/content/cn/docs/quickstart/computing/hugegraph-computer.md
@@ -6,7 +6,7 @@ weight: 2
## 1 HugeGraph-Computer 概述
-[`HugeGraph-Computer`](https://github.com/apache/incubator-hugegraph-computer) 是分布式图处理系统 (OLAP). 它是 [Pregel](https://kowshik.github.io/JPregel/pregel_paper.pdf)的一个实现。它可以运行在 Kubernetes(K8s)/Yarn 上。(它侧重可支持百亿~千亿的图数据量下进行图计算, 会使用磁盘进行排序和加速, 这是它和 Vermeer 相对最大的区别之一)
+[`HugeGraph-Computer`](https://github.com/apache/hugegraph-computer) 是分布式图处理系统 (OLAP). 它是 [Pregel](https://kowshik.github.io/JPregel/pregel_paper.pdf)的一个实现。它可以运行在 Kubernetes(K8s)/Yarn 上。(它侧重可支持百亿~千亿的图数据量下进行图计算, 会使用磁盘进行排序和加速, 这是它和 Vermeer 相对最大的区别之一)
### 特性
@@ -44,7 +44,7 @@ weight: 2
下载最新版本的 HugeGraph-Computer release 包:
```bash
-wget https://downloads.apache.org/incubator/hugegraph/${version}/apache-hugegraph-computer-incubating-${version}.tar.gz
+wget https://downloads.apache.org/hugegraph/${version}/apache-hugegraph-computer-incubating-${version}.tar.gz
tar zxvf apache-hugegraph-computer-incubating-${version}.tar.gz -C hugegraph-computer
```
@@ -65,7 +65,7 @@ mvn clean package -DskipTests
#### 3.1.3 启动 master 节点
-> 您可以使用 `-c` 参数指定配置文件,更多 computer 配置请看:[Computer Config Options](/docs/config/config-computer#computer-config-options)
+> 您可以使用 `-c` 参数指定配置文件,更多 computer 配置请看:[Computer Config Options](/docs/quickstart/computing/hugegraph-computer-config#computer-config-options)
```bash
cd hugegraph-computer
@@ -137,9 +137,9 @@ hugegraph-computer-operator-etcd-28lm67jxk5 1/1 Runnin
#### 3.2.5 提交作业
-> 更多 computer crd spec 请看:[Computer CRD](/docs/config/config-computer#hugegraph-computer-crd)
+> 更多 computer crd spec 请看:[Computer CRD](/docs/quickstart/computing/hugegraph-computer-config#hugegraph-computer-crd)
>
-> 更多 Computer 配置请看:[Computer Config Options](/docs/config/config-computer#computer-config-options)
+> 更多 Computer 配置请看:[Computer Config Options](/docs/quickstart/computing/hugegraph-computer-config#computer-config-options)
```yaml
cat < 为解决静态文档可能过时的问题,我们提供了 **实时更新、内容更全面** 的 DeepWiki。它相当于一个拥有项目最新知识的专家,非常适合**所有开发者**在开始项目前阅读和咨询。
-
-**👉 强烈推荐访问并对话:**[**incubator-hugegraph-ai**](https://deepwiki.com/apache/incubator-hugegraph-ai)
+> DeepWiki 提供实时更新的项目文档,内容更全面准确,适合快速了解项目最新情况。
+>
+> 📖 [https://deepwiki.com/apache/hugegraph-ai](https://deepwiki.com/apache/hugegraph-ai)
`hugegraph-ai` 整合了 [HugeGraph](https://github.com/apache/hugegraph) 与人工智能功能,为开发者构建 AI 驱动的图应用提供全面支持。
## ✨ 核心功能
- **GraphRAG**:利用图增强检索构建智能问答系统
+- **Text2Gremlin**:自然语言到图查询的转换,支持 REST API
- **知识图谱构建**:使用大语言模型从文本自动构建图谱
-- **图机器学习**:集成 20 多种图学习算法(GCN、GAT、GraphSAGE 等)
+- **图机器学习**:集成 21 种图学习算法(GCN、GAT、GraphSAGE 等)
- **Python 客户端**:易于使用的 HugeGraph Python 操作接口
- **AI 智能体**:提供智能图分析与推理能力
+### 🎉 v1.5.0 新特性
+
+- **Text2Gremlin REST API**:通过 REST 端点将自然语言查询转换为 Gremlin 命令
+- **多模型向量支持**:每个图实例可以使用独立的嵌入模型
+- **双语提示支持**:支持英文和中文提示词切换(EN/CN)
+- **半自动 Schema 生成**:从文本数据智能推断 Schema
+- **半自动 Prompt 生成**:上下文感知的提示词模板
+- **增强的 Reranker 支持**:集成 Cohere 和 SiliconFlow 重排序器
+- **LiteLLM 多供应商支持**:统一接口支持 OpenAI、Anthropic、Gemini 等
+
## 🚀 快速开始
> [!NOTE]
-> 如需完整的部署指南和详细示例,请参阅 [hugegraph-llm/README.md](https://github.com/apache/incubator-hugegraph-ai/blob/main/hugegraph-llm/README.md)。
+> 如需完整的部署指南和详细示例,请参阅 [hugegraph-llm/README.md](https://github.com/apache/hugegraph-ai/blob/main/hugegraph-llm/README.md)。
### 环境要求
-- Python 3.9+(建议 hugegraph-llm 使用 3.10+)
-- [uv](https://docs.astral.sh/uv/)(推荐的包管理器)
-- HugeGraph Server 1.3+(建议 1.5+)
+- Python 3.10+(hugegraph-llm 必需)
+- [uv](https://docs.astral.sh/uv/) 0.7+(推荐的包管理器)
+- HugeGraph Server 1.5+(必需)
- Docker(可选,用于容器化部署)
### 方案一:Docker 部署(推荐)
```bash
# 克隆仓库
-git clone https://github.com/apache/incubator-hugegraph-ai.git
-cd incubator-hugegraph-ai
+git clone https://github.com/apache/hugegraph-ai.git
+cd hugegraph-ai
# 设置环境并启动服务
cp docker/env.template docker/.env
@@ -59,8 +68,8 @@ docker-compose -f docker-compose-network.yml up -d
docker run -itd --name=server -p 8080:8080 hugegraph/hugegraph
# 2. 克隆并设置项目
-git clone https://github.com/apache/incubator-hugegraph-ai.git
-cd incubator-hugegraph-ai/hugegraph-llm
+git clone https://github.com/apache/hugegraph-ai.git
+cd hugegraph-ai/hugegraph-llm
# 3. 安装依赖
uv venv && source .venv/bin/activate
@@ -115,21 +124,23 @@ from pyhugegraph.client import PyHugeClient
## 📦 模块
-### [hugegraph-llm](https://github.com/apache/incubator-hugegraph-ai/tree/main/hugegraph-llm) [](https://deepwiki.com/apache/incubator-hugegraph-ai)
+### [hugegraph-llm](https://github.com/apache/hugegraph-ai/tree/main/hugegraph-llm) [](https://deepwiki.com/apache/hugegraph-ai)
用于图应用的大语言模型集成:
- **GraphRAG**:基于图数据的检索增强生成
- **知识图谱构建**:从文本自动构建知识图谱
- **自然语言接口**:使用自然语言查询图
- **AI 智能体**:智能图分析与推理
-### [hugegraph-ml](https://github.com/apache/incubator-hugegraph-ai/tree/main/hugegraph-ml)
-包含 20+ 算法的图机器学习:
-- **节点分类**:GCN、GAT、GraphSAGE、APPNP 等
-- **图分类**:DiffPool、P-GNN 等
-- **图嵌入**:DeepWalk、Node2Vec、GRACE 等
-- **链接预测**:SEAL、GATNE 等
+### [hugegraph-ml](https://github.com/apache/hugegraph-ai/tree/main/hugegraph-ml)
+包含 21 种算法的图机器学习:
+- **节点分类**:GCN、GAT、GraphSAGE、APPNP、AGNN、ARMA、DAGNN、DeeperGCN、GRAND、JKNet、Cluster-GCN
+- **图分类**:DiffPool、GIN
+- **图嵌入**:DGI、BGRL、GRACE
+- **链接预测**:SEAL、P-GNN、GATNE
+- **欺诈检测**:CARE-GNN、BGNN
+- **后处理**:C&S(Correct & Smooth)
-### [hugegraph-python-client](https://github.com/apache/incubator-hugegraph-ai/tree/main/hugegraph-python-client)
+### [hugegraph-python-client](https://github.com/apache/hugegraph-ai/tree/main/hugegraph-python-client)
用于 HugeGraph 操作的 Python 客户端:
- **Schema 管理**:定义顶点/边标签和属性
- **CRUD 操作**:创建、读取、更新、删除图数据
@@ -139,8 +150,8 @@ from pyhugegraph.client import PyHugeClient
## 📚 了解更多
- [项目主页](https://hugegraph.apache.org/docs/quickstart/hugegraph-ai/)
-- [LLM 快速入门指南](https://github.com/apache/incubator-hugegraph-ai/blob/main/hugegraph-llm/quick_start.md)
-- [DeepWiki AI 文档](https://deepwiki.com/apache/incubator-hugegraph-ai)
+- [LLM 快速入门指南](https://github.com/apache/hugegraph-ai/blob/main/hugegraph-llm/quick_start.md)
+- [DeepWiki AI 文档](https://deepwiki.com/apache/hugegraph-ai)
## 🔗 相关项目
@@ -157,16 +168,16 @@ from pyhugegraph.client import PyHugeClient
- 提交 PR 前运行 `./style/code_format_and_analysis.sh`
- 报告错误前检查现有问题
-[](https://github.com/apache/incubator-hugegraph-ai/graphs/contributors)
+[](https://github.com/apache/hugegraph-ai/graphs/contributors)
## 📄 许可证
-hugegraph-ai 采用 [Apache 2.0 许可证](https://github.com/apache/incubator-hugegraph-ai/blob/main/LICENSE)。
+hugegraph-ai 采用 [Apache 2.0 许可证](https://github.com/apache/hugegraph-ai/blob/main/LICENSE)。
## 📞 联系我们
-- **GitHub Issues**:[报告错误或请求功能](https://github.com/apache/incubator-hugegraph-ai/issues)(响应最快)
+- **GitHub Issues**:[报告错误或请求功能](https://github.com/apache/hugegraph-ai/issues)(响应最快)
- **电子邮件**:[dev@hugegraph.apache.org](mailto:dev@hugegraph.apache.org)([需要订阅](https://hugegraph.apache.org/docs/contribution-guidelines/subscribe/))
-- **微信**:关注“Apache HugeGraph”官方公众号
+- **微信**:关注 "Apache HugeGraph" 微信公众号
diff --git a/content/cn/docs/quickstart/hugegraph-ai/config-reference.md b/content/cn/docs/quickstart/hugegraph-ai/config-reference.md
new file mode 100644
index 000000000..4172ae12e
--- /dev/null
+++ b/content/cn/docs/quickstart/hugegraph-ai/config-reference.md
@@ -0,0 +1,396 @@
+---
+title: "配置参考"
+linkTitle: "配置参考"
+weight: 4
+---
+
+本文档提供 HugeGraph-LLM 所有配置选项的完整参考。
+
+## 配置文件
+
+- **环境文件**:`.env`(从模板创建或自动生成)
+- **提示词配置**:`src/hugegraph_llm/resources/demo/config_prompt.yaml`
+
+> [!TIP]
+> 运行 `python -m hugegraph_llm.config.generate --update` 可自动生成或更新带有默认值的配置文件。
+
+## 环境变量概览
+
+### 1. 语言和模型类型选择
+
+```bash
+# 提示词语言(影响系统提示词和生成文本)
+LANGUAGE=EN # 选项: EN | CN
+
+# 不同任务的 LLM 类型
+CHAT_LLM_TYPE=openai # 对话/RAG: openai | litellm | ollama/local
+EXTRACT_LLM_TYPE=openai # 实体抽取: openai | litellm | ollama/local
+TEXT2GQL_LLM_TYPE=openai # 文本转 Gremlin: openai | litellm | ollama/local
+
+# 嵌入模型类型
+EMBEDDING_TYPE=openai # 选项: openai | litellm | ollama/local
+
+# Reranker 类型(可选)
+RERANKER_TYPE= # 选项: cohere | siliconflow | (留空表示无)
+```
+
+### 2. OpenAI 配置
+
+每个 LLM 任务(chat、extract、text2gql)都有独立配置:
+
+#### 2.1 Chat LLM(RAG 答案生成)
+
+```bash
+OPENAI_CHAT_API_BASE=https://api.openai.com/v1
+OPENAI_CHAT_API_KEY=sk-your-api-key-here
+OPENAI_CHAT_LANGUAGE_MODEL=gpt-4o-mini
+OPENAI_CHAT_TOKENS=8192 # 对话响应的最大 tokens
+```
+
+#### 2.2 Extract LLM(实体和关系抽取)
+
+```bash
+OPENAI_EXTRACT_API_BASE=https://api.openai.com/v1
+OPENAI_EXTRACT_API_KEY=sk-your-api-key-here
+OPENAI_EXTRACT_LANGUAGE_MODEL=gpt-4o-mini
+OPENAI_EXTRACT_TOKENS=1024 # 抽取任务的最大 tokens
+```
+
+#### 2.3 Text2GQL LLM(自然语言转 Gremlin)
+
+```bash
+OPENAI_TEXT2GQL_API_BASE=https://api.openai.com/v1
+OPENAI_TEXT2GQL_API_KEY=sk-your-api-key-here
+OPENAI_TEXT2GQL_LANGUAGE_MODEL=gpt-4o-mini
+OPENAI_TEXT2GQL_TOKENS=4096 # 查询生成的最大 tokens
+```
+
+#### 2.4 嵌入模型
+
+```bash
+OPENAI_EMBEDDING_API_BASE=https://api.openai.com/v1
+OPENAI_EMBEDDING_API_KEY=sk-your-api-key-here
+OPENAI_EMBEDDING_MODEL=text-embedding-3-small
+```
+
+> [!NOTE]
+> 您可以为每个任务使用不同的 API 密钥/端点,以优化成本或使用专用模型。
+
+### 3. LiteLLM 配置(多供应商支持)
+
+LiteLLM 支持统一访问 100 多个 LLM 供应商(OpenAI、Anthropic、Google、Azure 等)。
+
+#### 3.1 Chat LLM
+
+```bash
+LITELLM_CHAT_API_BASE=http://localhost:4000 # LiteLLM 代理 URL
+LITELLM_CHAT_API_KEY=sk-litellm-key # LiteLLM API 密钥
+LITELLM_CHAT_LANGUAGE_MODEL=anthropic/claude-3-5-sonnet-20241022
+LITELLM_CHAT_TOKENS=8192
+```
+
+#### 3.2 Extract LLM
+
+```bash
+LITELLM_EXTRACT_API_BASE=http://localhost:4000
+LITELLM_EXTRACT_API_KEY=sk-litellm-key
+LITELLM_EXTRACT_LANGUAGE_MODEL=openai/gpt-4o-mini
+LITELLM_EXTRACT_TOKENS=256
+```
+
+#### 3.3 Text2GQL LLM
+
+```bash
+LITELLM_TEXT2GQL_API_BASE=http://localhost:4000
+LITELLM_TEXT2GQL_API_KEY=sk-litellm-key
+LITELLM_TEXT2GQL_LANGUAGE_MODEL=openai/gpt-4o-mini
+LITELLM_TEXT2GQL_TOKENS=4096
+```
+
+#### 3.4 嵌入模型
+
+```bash
+LITELLM_EMBEDDING_API_BASE=http://localhost:4000
+LITELLM_EMBEDDING_API_KEY=sk-litellm-key
+LITELLM_EMBEDDING_MODEL=openai/text-embedding-3-small
+```
+
+**模型格式**: `供应商/模型名称`
+
+示例:
+- `openai/gpt-4o-mini`
+- `anthropic/claude-3-5-sonnet-20241022`
+- `google/gemini-2.0-flash-exp`
+- `azure/gpt-4`
+
+完整列表请参阅 [LiteLLM Providers](https://docs.litellm.ai/docs/providers)。
+
+### 4. Ollama 配置(本地部署)
+
+使用 Ollama 运行本地 LLM,确保隐私和成本控制。
+
+#### 4.1 Chat LLM
+
+```bash
+OLLAMA_CHAT_HOST=127.0.0.1
+OLLAMA_CHAT_PORT=11434
+OLLAMA_CHAT_LANGUAGE_MODEL=llama3.1:8b
+```
+
+#### 4.2 Extract LLM
+
+```bash
+OLLAMA_EXTRACT_HOST=127.0.0.1
+OLLAMA_EXTRACT_PORT=11434
+OLLAMA_EXTRACT_LANGUAGE_MODEL=llama3.1:8b
+```
+
+#### 4.3 Text2GQL LLM
+
+```bash
+OLLAMA_TEXT2GQL_HOST=127.0.0.1
+OLLAMA_TEXT2GQL_PORT=11434
+OLLAMA_TEXT2GQL_LANGUAGE_MODEL=qwen2.5-coder:7b
+```
+
+#### 4.4 嵌入模型
+
+```bash
+OLLAMA_EMBEDDING_HOST=127.0.0.1
+OLLAMA_EMBEDDING_PORT=11434
+OLLAMA_EMBEDDING_MODEL=nomic-embed-text
+```
+
+> [!TIP]
+> 下载模型:`ollama pull llama3.1:8b` 或 `ollama pull qwen2.5-coder:7b`
+
+### 5. Reranker 配置
+
+Reranker 通过根据相关性重新排序检索结果来提高 RAG 准确性。
+
+#### 5.1 Cohere Reranker
+
+```bash
+RERANKER_TYPE=cohere
+COHERE_BASE_URL=https://api.cohere.com/v1/rerank
+RERANKER_API_KEY=your-cohere-api-key
+RERANKER_MODEL=rerank-english-v3.0
+```
+
+可用模型:
+- `rerank-english-v3.0`(英文)
+- `rerank-multilingual-v3.0`(100+ 种语言)
+
+#### 5.2 SiliconFlow Reranker
+
+```bash
+RERANKER_TYPE=siliconflow
+RERANKER_API_KEY=your-siliconflow-api-key
+RERANKER_MODEL=BAAI/bge-reranker-v2-m3
+```
+
+### 6. HugeGraph 连接
+
+配置与 HugeGraph 服务器实例的连接。
+
+```bash
+# 服务器连接
+GRAPH_IP=127.0.0.1
+GRAPH_PORT=8080
+GRAPH_NAME=hugegraph # 图实例名称
+GRAPH_USER=admin # 用户名
+GRAPH_PWD=admin-password # 密码
+GRAPH_SPACE= # 图空间(可选,用于多租户)
+```
+
+### 7. 查询参数
+
+控制图遍历行为和结果限制。
+
+```bash
+# 图遍历限制
+MAX_GRAPH_PATH=10 # 图查询的最大路径深度
+MAX_GRAPH_ITEMS=30 # 从图中检索的最大项数
+EDGE_LIMIT_PRE_LABEL=8 # 每个标签类型的最大边数
+
+# 属性过滤
+LIMIT_PROPERTY=False # 限制结果中的属性(True/False)
+```
+
+### 8. 向量搜索配置
+
+配置向量相似性搜索参数。
+
+```bash
+# 向量搜索阈值
+VECTOR_DIS_THRESHOLD=0.9 # 最小余弦相似度(0-1,越高越严格)
+TOPK_PER_KEYWORD=1 # 每个提取关键词的 Top-K 结果
+```
+
+### 9. Rerank 配置
+
+```bash
+# Rerank 结果限制
+TOPK_RETURN_RESULTS=20 # 重排序后的 top 结果数
+```
+
+## 配置优先级
+
+系统按以下顺序加载配置(后面的来源覆盖前面的):
+
+1. **默认值**(在 `*_config.py` 文件中)
+2. **环境变量**(来自 `.env` 文件)
+3. **运行时更新**(通过 Web UI 或 API 调用)
+
+## 配置示例
+
+### 最小配置(OpenAI)
+
+```bash
+# 语言
+LANGUAGE=EN
+
+# LLM 类型
+CHAT_LLM_TYPE=openai
+EXTRACT_LLM_TYPE=openai
+TEXT2GQL_LLM_TYPE=openai
+EMBEDDING_TYPE=openai
+
+# OpenAI 凭据(所有任务共用一个密钥)
+OPENAI_API_BASE=https://api.openai.com/v1
+OPENAI_API_KEY=sk-your-api-key-here
+OPENAI_LANGUAGE_MODEL=gpt-4o-mini
+OPENAI_EMBEDDING_MODEL=text-embedding-3-small
+
+# HugeGraph 连接
+GRAPH_IP=127.0.0.1
+GRAPH_PORT=8080
+GRAPH_NAME=hugegraph
+GRAPH_USER=admin
+GRAPH_PWD=admin
+```
+
+### 生产环境配置(LiteLLM + Reranker)
+
+```bash
+# 双语支持
+LANGUAGE=EN
+
+# 灵活使用 LiteLLM
+CHAT_LLM_TYPE=litellm
+EXTRACT_LLM_TYPE=litellm
+TEXT2GQL_LLM_TYPE=litellm
+EMBEDDING_TYPE=litellm
+
+# LiteLLM 代理
+LITELLM_CHAT_API_BASE=http://localhost:4000
+LITELLM_CHAT_API_KEY=sk-litellm-master-key
+LITELLM_CHAT_LANGUAGE_MODEL=anthropic/claude-3-5-sonnet-20241022
+LITELLM_CHAT_TOKENS=8192
+
+LITELLM_EXTRACT_API_BASE=http://localhost:4000
+LITELLM_EXTRACT_API_KEY=sk-litellm-master-key
+LITELLM_EXTRACT_LANGUAGE_MODEL=openai/gpt-4o-mini
+LITELLM_EXTRACT_TOKENS=256
+
+LITELLM_TEXT2GQL_API_BASE=http://localhost:4000
+LITELLM_TEXT2GQL_API_KEY=sk-litellm-master-key
+LITELLM_TEXT2GQL_LANGUAGE_MODEL=openai/gpt-4o-mini
+LITELLM_TEXT2GQL_TOKENS=4096
+
+LITELLM_EMBEDDING_API_BASE=http://localhost:4000
+LITELLM_EMBEDDING_API_KEY=sk-litellm-master-key
+LITELLM_EMBEDDING_MODEL=openai/text-embedding-3-small
+
+# Cohere Reranker 提高准确性
+RERANKER_TYPE=cohere
+COHERE_BASE_URL=https://api.cohere.com/v1/rerank
+RERANKER_API_KEY=your-cohere-key
+RERANKER_MODEL=rerank-multilingual-v3.0
+
+# 带认证的 HugeGraph
+GRAPH_IP=prod-hugegraph.example.com
+GRAPH_PORT=8080
+GRAPH_NAME=production_graph
+GRAPH_USER=rag_user
+GRAPH_PWD=secure-password
+GRAPH_SPACE=prod_space
+
+# 优化的查询参数
+MAX_GRAPH_PATH=15
+MAX_GRAPH_ITEMS=50
+VECTOR_DIS_THRESHOLD=0.85
+TOPK_RETURN_RESULTS=30
+```
+
+### 本地/离线配置(Ollama)
+
+```bash
+# 语言
+LANGUAGE=EN
+
+# 全部通过 Ollama 使用本地模型
+CHAT_LLM_TYPE=ollama/local
+EXTRACT_LLM_TYPE=ollama/local
+TEXT2GQL_LLM_TYPE=ollama/local
+EMBEDDING_TYPE=ollama/local
+
+# Ollama 端点
+OLLAMA_CHAT_HOST=127.0.0.1
+OLLAMA_CHAT_PORT=11434
+OLLAMA_CHAT_LANGUAGE_MODEL=llama3.1:8b
+
+OLLAMA_EXTRACT_HOST=127.0.0.1
+OLLAMA_EXTRACT_PORT=11434
+OLLAMA_EXTRACT_LANGUAGE_MODEL=llama3.1:8b
+
+OLLAMA_TEXT2GQL_HOST=127.0.0.1
+OLLAMA_TEXT2GQL_PORT=11434
+OLLAMA_TEXT2GQL_LANGUAGE_MODEL=qwen2.5-coder:7b
+
+OLLAMA_EMBEDDING_HOST=127.0.0.1
+OLLAMA_EMBEDDING_PORT=11434
+OLLAMA_EMBEDDING_MODEL=nomic-embed-text
+
+# 离线环境不使用 reranker
+RERANKER_TYPE=
+
+# 本地 HugeGraph
+GRAPH_IP=127.0.0.1
+GRAPH_PORT=8080
+GRAPH_NAME=hugegraph
+GRAPH_USER=admin
+GRAPH_PWD=admin
+```
+
+## 配置验证
+
+修改 `.env` 后,验证配置:
+
+1. **通过 Web UI**:访问 `http://localhost:8001` 并检查设置面板
+2. **通过 Python**:
+```python
+from hugegraph_llm.config import settings
+print(settings.llm_config)
+print(settings.hugegraph_config)
+```
+3. **通过 REST API**:
+```bash
+curl http://localhost:8001/config
+```
+
+## 故障排除
+
+| 问题 | 解决方案 |
+|------|---------|
+| "API key not found" | 检查 `.env` 中的 `*_API_KEY` 是否正确设置 |
+| "Connection refused" | 验证 `GRAPH_IP` 和 `GRAPH_PORT` 是否正确 |
+| "Model not found" | 对于 Ollama:运行 `ollama pull <模型名称>` |
+| "Rate limit exceeded" | 减少 `MAX_GRAPH_ITEMS` 或使用不同的 API 密钥 |
+| "Embedding dimension mismatch" | 删除现有向量并使用正确模型重建 |
+
+## 另见
+
+- [HugeGraph-LLM 概述](./hugegraph-llm.md)
+- [REST API 参考](./rest-api.md)
+- [快速入门指南](./quick_start.md)
diff --git a/content/cn/docs/quickstart/hugegraph-ai/hugegraph-llm.md b/content/cn/docs/quickstart/hugegraph-ai/hugegraph-llm.md
index b353a8fba..d376b3de1 100644
--- a/content/cn/docs/quickstart/hugegraph-ai/hugegraph-llm.md
+++ b/content/cn/docs/quickstart/hugegraph-ai/hugegraph-llm.md
@@ -4,11 +4,11 @@ linkTitle: "HugeGraph-LLM"
weight: 1
---
-> 本文为中文翻译版本,内容基于英文版进行,我们欢迎您随时提出修改建议。我们推荐您阅读 [AI 仓库 README](https://github.com/apache/incubator-hugegraph-ai/tree/main/hugegraph-llm#readme) 以获取最新信息,官网会定期同步更新。
+> 本文为中文翻译版本,内容基于英文版进行,我们欢迎您随时提出修改建议。我们推荐您阅读 [AI 仓库 README](https://github.com/apache/hugegraph-ai/tree/main/hugegraph-llm#readme) 以获取最新信息,官网会定期同步更新。
> **连接图数据库与大语言模型的桥梁**
-> AI 总结项目文档:[](https://deepwiki.com/apache/incubator-hugegraph-ai)
+> AI 总结项目文档:[](https://deepwiki.com/apache/hugegraph-ai)
## 🎯 概述
@@ -19,7 +19,7 @@ HugeGraph-LLM 是一个功能强大的工具包,它融合了图数据库和大
- 🗣️ **自然语言查询**:通过自然语言(Gremlin/Cypher)操作图数据库。
- 🔍 **图增强 RAG**:借助知识图谱提升问答准确性(GraphRAG 和 Graph Agent)。
-更多源码文档,请访问我们的 [DeepWiki](https://deepwiki.com/apache/incubator-hugegraph-ai) 页面(推荐)。
+更多源码文档,请访问我们的 [DeepWiki](https://deepwiki.com/apache/hugegraph-ai) 页面(推荐)。
## 📋 环境要求
@@ -90,8 +90,8 @@ docker run -itd --name=server -p 8080:8080 hugegraph/hugegraph
curl -LsSf https://astral.sh/uv/install.sh | sh
# 3. 克隆并设置项目
-git clone https://github.com/apache/incubator-hugegraph-ai.git
-cd incubator-hugegraph-ai/hugegraph-llm
+git clone https://github.com/apache/hugegraph-ai.git
+cd hugegraph-ai/hugegraph-llm
# 4. 创建虚拟环境并安装依赖
uv venv && source .venv/bin/activate
@@ -116,7 +116,7 @@ python -m hugegraph_llm.config.generate --update
```
> [!TIP]
-> 查看我们的[快速入门指南](https://github.com/apache/incubator-hugegraph-ai/blob/main/hugegraph-llm/quick_start.md)获取详细用法示例和查询逻辑解释。
+> 查看我们的[快速入门指南](https://github.com/apache/hugegraph-ai/blob/main/hugegraph-llm/quick_start.md)获取详细用法示例和查询逻辑解释。
## 💡 用法示例
@@ -131,7 +131,7 @@ python -m hugegraph_llm.config.generate --update
- **文件**:上传 TXT 或 DOCX 文件(支持多选)
**Schema 配置:**
-- **自定义 Schema**:遵循我们[模板](https://github.com/apache/incubator-hugegraph-ai/blob/aff3bbe25fa91c3414947a196131be812c20ef11/hugegraph-llm/src/hugegraph_llm/config/config_data.py#L125)的 JSON 格式
+- **自定义 Schema**:遵循我们[模板](https://github.com/apache/hugegraph-ai/blob/aff3bbe25fa91c3414947a196131be812c20ef11/hugegraph-llm/src/hugegraph_llm/config/config_data.py#L125)的 JSON 格式
- **HugeGraph Schema**:使用现有图实例的 Schema(例如,“hugegraph”)

@@ -214,7 +214,7 @@ graph TD
## 🔧 配置
-运行演示后,将自动生成配置文件:
+运行演示后,将自动生成配置文件:
- **环境**:`hugegraph-llm/.env`
- **提示**:`hugegraph-llm/src/hugegraph_llm/resources/demo/config_prompt.yaml`
@@ -222,7 +222,80 @@ graph TD
> [!NOTE]
> 使用 Web 界面时,配置更改会自动保存。对于手动更改,刷新页面即可加载更新。
-**LLM 提供商支持**:本项目使用 [LiteLLM](https://docs.litellm.ai/docs/providers) 实现多提供商 LLM 支持。
+### LLM 提供商配置
+
+本项目使用 [LiteLLM](https://docs.litellm.ai/docs/providers) 实现多提供商 LLM 支持,可统一访问 OpenAI、Anthropic、Google、Cohere 以及 100 多个其他提供商。
+
+#### 方案一:直接 LLM 连接(OpenAI、Ollama)
+
+```bash
+# .env 配置
+chat_llm_type=openai # 或 ollama/local
+openai_api_key=sk-xxx
+openai_api_base=https://api.openai.com/v1
+openai_language_model=gpt-4o-mini
+openai_max_tokens=4096
+```
+
+#### 方案二:LiteLLM 多提供商支持
+
+LiteLLM 作为多个 LLM 提供商的统一代理:
+
+```bash
+# .env 配置
+chat_llm_type=litellm
+extract_llm_type=litellm
+text2gql_llm_type=litellm
+
+# LiteLLM 设置
+litellm_api_base=http://localhost:4000 # LiteLLM 代理服务器
+litellm_api_key=sk-1234 # LiteLLM API 密钥
+
+# 模型选择(提供商/模型格式)
+litellm_language_model=anthropic/claude-3-5-sonnet-20241022
+litellm_max_tokens=4096
+```
+
+**支持的提供商**:OpenAI、Anthropic、Google(Gemini)、Azure、Cohere、Bedrock、Vertex AI、Hugging Face 等。
+
+完整提供商列表和配置详情,请访问 [LiteLLM Providers](https://docs.litellm.ai/docs/providers)。
+
+### Reranker 配置
+
+Reranker 通过重新排序检索结果来提高 RAG 准确性。支持的提供商:
+
+```bash
+# Cohere Reranker
+reranker_type=cohere
+cohere_api_key=your-cohere-key
+cohere_rerank_model=rerank-english-v3.0
+
+# SiliconFlow Reranker
+reranker_type=siliconflow
+siliconflow_api_key=your-siliconflow-key
+siliconflow_rerank_model=BAAI/bge-reranker-v2-m3
+```
+
+### Text2Gremlin 配置
+
+将自然语言转换为 Gremlin 查询:
+
+```python
+from hugegraph_llm.operators.graph_rag_task import Text2GremlinPipeline
+
+# 初始化工作流
+text2gremlin = Text2GremlinPipeline()
+
+# 生成 Gremlin 查询
+result = (
+ text2gremlin
+ .query_to_gremlin(query="查找所有由 Francis Ford Coppola 执导的电影")
+ .execute_gremlin_query()
+ .run()
+)
+```
+
+**REST API 端点**:有关 HTTP 端点详情,请参阅 [REST API 文档](./rest-api.md)。
## 📚 其他资源
diff --git a/content/cn/docs/quickstart/hugegraph-ai/hugegraph-ml.md b/content/cn/docs/quickstart/hugegraph-ai/hugegraph-ml.md
new file mode 100644
index 000000000..baf0481f0
--- /dev/null
+++ b/content/cn/docs/quickstart/hugegraph-ai/hugegraph-ml.md
@@ -0,0 +1,289 @@
+---
+title: "HugeGraph-ML"
+linkTitle: "HugeGraph-ML"
+weight: 2
+---
+
+HugeGraph-ML 将 HugeGraph 与流行的图学习库集成,支持直接在图数据上进行端到端的机器学习工作流。
+
+## 概述
+
+`hugegraph-ml` 提供了统一接口,用于将图神经网络和机器学习算法应用于存储在 HugeGraph 中的数据。它通过无缝转换 HugeGraph 数据到主流 ML 框架兼容格式,消除了复杂的数据导出/导入流程。
+
+### 核心功能
+
+- **直接 HugeGraph 集成**:无需手动导出即可直接从 HugeGraph 查询图数据
+- **21 种算法实现**:全面覆盖节点分类、图分类、嵌入和链接预测
+- **DGL 后端**:利用深度图库(DGL)进行高效训练
+- **端到端工作流**:从数据加载到模型训练和评估
+- **模块化任务**:可复用的常见 ML 场景任务抽象
+
+## 环境要求
+
+- **Python**:3.9+(独立模块)
+- **HugeGraph Server**:1.0+(推荐:1.5+)
+- **UV 包管理器**:0.7+(用于依赖管理)
+
+## 安装
+
+### 1. 启动 HugeGraph Server
+
+```bash
+# 方案一:Docker(推荐)
+docker run -itd --name=hugegraph -p 8080:8080 hugegraph/hugegraph
+
+# 方案二:二进制包
+# 参见 https://hugegraph.apache.org/docs/download/download/
+```
+
+### 2. 克隆并设置
+
+```bash
+git clone https://github.com/apache/hugegraph-ai.git
+cd hugegraph-ai/hugegraph-ml
+```
+
+### 3. 安装依赖
+
+```bash
+# uv sync 自动创建 .venv 并安装所有依赖
+uv sync
+
+# 激活虚拟环境
+source .venv/bin/activate
+```
+
+### 4. 导航到源代码目录
+
+```bash
+cd ./src
+```
+
+> [!NOTE]
+> 所有示例均假定您在已激活的虚拟环境中。
+
+## 已实现算法
+
+HugeGraph-ML 目前实现了跨多个类别的 **21 种图机器学习算法**:
+
+### 节点分类(11 种算法)
+
+基于网络结构和特征预测图节点的标签。
+
+| 算法 | 论文 | 描述 |
+|-----|------|------|
+| **GCN** | [Kipf & Welling, 2017](https://arxiv.org/abs/1609.02907) | 图卷积网络 |
+| **GAT** | [Veličković et al., 2018](https://arxiv.org/abs/1710.10903) | 图注意力网络 |
+| **GraphSAGE** | [Hamilton et al., 2017](https://arxiv.org/abs/1706.02216) | 归纳式表示学习 |
+| **APPNP** | [Klicpera et al., 2019](https://arxiv.org/abs/1810.05997) | 个性化 PageRank 传播 |
+| **AGNN** | [Thekumparampil et al., 2018](https://arxiv.org/abs/1803.03735) | 基于注意力的 GNN |
+| **ARMA** | [Bianchi et al., 2019](https://arxiv.org/abs/1901.01343) | 自回归移动平均滤波器 |
+| **DAGNN** | [Liu et al., 2020](https://arxiv.org/abs/2007.09296) | 深度自适应图神经网络 |
+| **DeeperGCN** | [Li et al., 2020](https://arxiv.org/abs/2006.07739) | 非常深的 GCN 架构 |
+| **GRAND** | [Feng et al., 2020](https://arxiv.org/abs/2005.11079) | 图随机神经网络 |
+| **JKNet** | [Xu et al., 2018](https://arxiv.org/abs/1806.03536) | 跳跃知识网络 |
+| **Cluster-GCN** | [Chiang et al., 2019](https://arxiv.org/abs/1905.07953) | 通过聚类实现可扩展 GCN 训练 |
+
+### 图分类(2 种算法)
+
+基于结构和节点特征对整个图进行分类。
+
+| 算法 | 论文 | 描述 |
+|-----|------|------|
+| **DiffPool** | [Ying et al., 2018](https://arxiv.org/abs/1806.08804) | 可微分图池化 |
+| **GIN** | [Xu et al., 2019](https://arxiv.org/abs/1810.00826) | 图同构网络 |
+
+### 图嵌入(3 种算法)
+
+学习用于下游任务的无监督节点表示。
+
+| 算法 | 论文 | 描述 |
+|-----|------|------|
+| **DGI** | [Veličković et al., 2019](https://arxiv.org/abs/1809.10341) | 深度图信息最大化(对比学习) |
+| **BGRL** | [Thakoor et al., 2021](https://arxiv.org/abs/2102.06514) | 自举图表示学习 |
+| **GRACE** | [Zhu et al., 2020](https://arxiv.org/abs/2006.04131) | 图对比学习 |
+
+### 链接预测(3 种算法)
+
+预测图中缺失或未来的连接。
+
+| 算法 | 论文 | 描述 |
+|-----|------|------|
+| **SEAL** | [Zhang & Chen, 2018](https://arxiv.org/abs/1802.09691) | 子图提取和标注 |
+| **P-GNN** | [You et al., 2019](http://proceedings.mlr.press/v97/you19b/you19b.pdf) | 位置感知 GNN |
+| **GATNE** | [Cen et al., 2019](https://arxiv.org/abs/1905.01669) | 属性多元异构网络嵌入 |
+
+### 欺诈检测(2 种算法)
+
+检测图中的异常节点(例如欺诈账户)。
+
+| 算法 | 论文 | 描述 |
+|-----|------|------|
+| **CARE-GNN** | [Dou et al., 2020](https://arxiv.org/abs/2008.08692) | 抗伪装 GNN |
+| **BGNN** | [Zheng et al., 2021](https://arxiv.org/abs/2101.08543) | 二部图神经网络 |
+
+### 后处理(1 种算法)
+
+通过标签传播改进预测。
+
+| 算法 | 论文 | 描述 |
+|-----|------|------|
+| **C&S** | [Huang et al., 2020](https://arxiv.org/abs/2010.13993) | 校正与平滑(预测优化) |
+
+## 使用示例
+
+### 示例 1:使用 DGI 进行节点嵌入
+
+使用深度图信息最大化(DGI)在 Cora 数据集上进行无监督节点嵌入。
+
+#### 步骤 1:导入数据集(如需)
+
+```python
+from hugegraph_ml.utils.dgl2hugegraph_utils import import_graph_from_dgl
+
+# 从 DGL 导入 Cora 数据集到 HugeGraph
+import_graph_from_dgl("cora")
+```
+
+#### 步骤 2:转换图数据
+
+```python
+from hugegraph_ml.data.hugegraph2dgl import HugeGraph2DGL
+
+# 将 HugeGraph 数据转换为 DGL 格式
+hg2d = HugeGraph2DGL()
+graph = hg2d.convert_graph(vertex_label="CORA_vertex", edge_label="CORA_edge")
+```
+
+#### 步骤 3:初始化模型
+
+```python
+from hugegraph_ml.models.dgi import DGI
+
+# 创建 DGI 模型
+model = DGI(n_in_feats=graph.ndata["feat"].shape[1])
+```
+
+#### 步骤 4:训练并生成嵌入
+
+```python
+from hugegraph_ml.tasks.node_embed import NodeEmbed
+
+# 训练模型并生成节点嵌入
+node_embed_task = NodeEmbed(graph=graph, model=model)
+embedded_graph = node_embed_task.train_and_embed(
+ add_self_loop=True,
+ n_epochs=300,
+ patience=30
+)
+```
+
+#### 步骤 5:下游任务(节点分类)
+
+```python
+from hugegraph_ml.models.mlp import MLPClassifier
+from hugegraph_ml.tasks.node_classify import NodeClassify
+
+# 使用嵌入进行节点分类
+model = MLPClassifier(
+ n_in_feat=embedded_graph.ndata["feat"].shape[1],
+ n_out_feat=embedded_graph.ndata["label"].unique().shape[0]
+)
+node_clf_task = NodeClassify(graph=embedded_graph, model=model)
+node_clf_task.train(lr=1e-3, n_epochs=400, patience=40)
+print(node_clf_task.evaluate())
+```
+
+**预期输出:**
+```python
+{'accuracy': 0.82, 'loss': 0.5714246034622192}
+```
+
+**完整示例**:参见 [dgi_example.py](https://github.com/apache/hugegraph-ai/blob/main/hugegraph-ml/src/hugegraph_ml/examples/dgi_example.py)
+
+### 示例 2:使用 GRAND 进行节点分类
+
+使用 GRAND 模型直接对节点进行分类(无需单独的嵌入步骤)。
+
+```python
+from hugegraph_ml.data.hugegraph2dgl import HugeGraph2DGL
+from hugegraph_ml.models.grand import GRAND
+from hugegraph_ml.tasks.node_classify import NodeClassify
+
+# 加载图
+hg2d = HugeGraph2DGL()
+graph = hg2d.convert_graph(vertex_label="CORA_vertex", edge_label="CORA_edge")
+
+# 初始化 GRAND 模型
+model = GRAND(
+ n_in_feats=graph.ndata["feat"].shape[1],
+ n_out_feats=graph.ndata["label"].unique().shape[0]
+)
+
+# 训练和评估
+node_clf_task = NodeClassify(graph=graph, model=model)
+node_clf_task.train(lr=1e-2, n_epochs=1500, patience=100)
+print(node_clf_task.evaluate())
+```
+
+**完整示例**:参见 [grand_example.py](https://github.com/apache/hugegraph-ai/blob/main/hugegraph-ml/src/hugegraph_ml/examples/grand_example.py)
+
+## 核心组件
+
+### HugeGraph2DGL 转换器
+
+无缝将 HugeGraph 数据转换为 DGL 图格式:
+
+```python
+from hugegraph_ml.data.hugegraph2dgl import HugeGraph2DGL
+
+hg2d = HugeGraph2DGL()
+graph = hg2d.convert_graph(
+ vertex_label="person", # 要提取的顶点标签
+ edge_label="knows", # 要提取的边标签
+ directed=False # 图的方向性
+)
+```
+
+### 任务抽象
+
+用于常见 ML 工作流的可复用任务对象:
+
+| 任务 | 类 | 用途 |
+|-----|-----|------|
+| 节点嵌入 | `NodeEmbed` | 生成无监督节点嵌入 |
+| 节点分类 | `NodeClassify` | 预测节点标签 |
+| 图分类 | `GraphClassify` | 预测图级标签 |
+| 链接预测 | `LinkPredict` | 预测缺失边 |
+
+## 最佳实践
+
+1. **从小数据集开始**:在扩展之前先在小图(例如 Cora、Citeseer)上测试您的流程
+2. **使用早停**:设置 `patience` 参数以避免过拟合
+3. **调整超参数**:根据数据集大小调整学习率、隐藏维度和周期数
+4. **监控 GPU 内存**:大图可能需要批量训练(例如 Cluster-GCN)
+5. **验证 Schema**:确保顶点/边标签与您的 HugeGraph schema 匹配
+
+## 故障排除
+
+| 问题 | 解决方案 |
+|-----|---------|
+| 连接 HugeGraph "Connection refused" | 验证服务器是否在 8080 端口运行 |
+| CUDA 内存不足 | 减少批大小或使用仅 CPU 模式 |
+| 模型收敛问题 | 尝试不同的学习率(1e-2、1e-3、1e-4) |
+| DGL 的 ImportError | 运行 `uv sync` 重新安装依赖 |
+
+## 贡献
+
+添加新算法:
+
+1. 在 `src/hugegraph_ml/models/your_model.py` 创建模型文件
+2. 继承基础模型类并实现 `forward()` 方法
+3. 在 `src/hugegraph_ml/examples/` 添加示例脚本
+4. 更新此文档并添加算法详情
+
+## 另见
+
+- [HugeGraph-AI 概述](../_index.md) - 完整 AI 生态系统
+- [HugeGraph-LLM](./hugegraph-llm.md) - RAG 和知识图谱构建
+- [GitHub 仓库](https://github.com/apache/hugegraph-ai/tree/main/hugegraph-ml) - 源代码和示例
diff --git a/content/cn/docs/quickstart/hugegraph-ai/quick_start.md b/content/cn/docs/quickstart/hugegraph-ai/quick_start.md
index 6d8d22f90..da148f7e7 100644
--- a/content/cn/docs/quickstart/hugegraph-ai/quick_start.md
+++ b/content/cn/docs/quickstart/hugegraph-ai/quick_start.md
@@ -190,3 +190,63 @@ graph TD;
# 5. 图工具
输入 Gremlin 查询以执行相应操作。
+
+# 6. 语言切换 (v1.5.0+)
+
+HugeGraph-LLM 支持双语提示词,以提高跨语言的准确性。
+
+### 在英文和中文之间切换
+
+系统语言影响:
+- **系统提示词**:LLM 使用的内部提示词
+- **关键词提取**:特定语言的提取逻辑
+- **答案生成**:响应格式和风格
+
+#### 配置方法一:环境变量
+
+编辑您的 `.env` 文件:
+
+```bash
+# 英文提示词(默认)
+LANGUAGE=EN
+
+# 中文提示词
+LANGUAGE=CN
+```
+
+更改语言设置后重启服务。
+
+#### 配置方法二:Web UI(动态)
+
+如果您的部署中可用,使用 Web UI 中的设置面板切换语言,无需重启:
+
+1. 导航到**设置**或**配置**选项卡
+2. 选择**语言**:`EN` 或 `CN`
+3. 点击**保存** - 更改立即生效
+
+#### 特定语言的行为
+
+| 语言 | 关键词提取 | 答案风格 | 使用场景 |
+|-----|-----------|---------|---------|
+| `EN` | 英文 NLP 模型 | 专业、简洁 | 国际用户、英文文档 |
+| `CN` | 中文 NLP 模型 | 自然的中文表达 | 中文用户、中文文档 |
+
+> [!TIP]
+> 将 `LANGUAGE` 设置与您的主要文档语言匹配,以获得最佳 RAG 准确性。
+
+### REST API 语言覆盖
+
+使用 REST API 时,您可以为每个请求指定自定义提示词,以覆盖默认语言设置:
+
+```bash
+curl -X POST http://localhost:8001/rag \
+ -H "Content-Type: application/json" \
+ -d '{
+ "query": "告诉我关于阿尔·帕西诺的信息",
+ "graph_only": true,
+ "keywords_extract_prompt": "请从以下文本中提取关键实体...",
+ "answer_prompt": "请根据以下上下文回答问题..."
+ }'
+```
+
+完整参数详情请参阅 [REST API 参考](./rest-api.md)。
diff --git a/content/cn/docs/quickstart/hugegraph-ai/rest-api.md b/content/cn/docs/quickstart/hugegraph-ai/rest-api.md
new file mode 100644
index 000000000..349ff4c06
--- /dev/null
+++ b/content/cn/docs/quickstart/hugegraph-ai/rest-api.md
@@ -0,0 +1,428 @@
+---
+title: "REST API 参考"
+linkTitle: "REST API"
+weight: 5
+---
+
+HugeGraph-LLM 提供 REST API 端点,用于将 RAG 和 Text2Gremlin 功能集成到您的应用程序中。
+
+## 基础 URL
+
+```
+http://localhost:8001
+```
+
+启动服务时更改主机/端口:
+```bash
+python -m hugegraph_llm.demo.rag_demo.app --host 127.0.0.1 --port 8001
+```
+
+## 认证
+
+目前 API 支持可选的基于令牌的认证:
+
+```bash
+# 在 .env 中启用认证
+ENABLE_LOGIN=true
+USER_TOKEN=your-user-token
+ADMIN_TOKEN=your-admin-token
+```
+
+在请求头中传递令牌:
+```bash
+Authorization: Bearer
+```
+
+---
+
+## RAG 端点
+
+### 1. 完整 RAG 查询
+
+**POST** `/rag`
+
+执行完整的 RAG 工作流,包括关键词提取、图检索、向量搜索、重排序和答案生成。
+
+#### 请求体
+
+```json
+{
+ "query": "给我讲讲阿尔·帕西诺的电影",
+ "raw_answer": false,
+ "vector_only": false,
+ "graph_only": true,
+ "graph_vector_answer": false,
+ "graph_ratio": 0.5,
+ "rerank_method": "cohere",
+ "near_neighbor_first": false,
+ "gremlin_tmpl_num": 5,
+ "max_graph_items": 30,
+ "topk_return_results": 20,
+ "vector_dis_threshold": 0.9,
+ "topk_per_keyword": 1,
+ "custom_priority_info": "",
+ "answer_prompt": "",
+ "keywords_extract_prompt": "",
+ "gremlin_prompt": "",
+ "client_config": {
+ "url": "127.0.0.1:8080",
+ "graph": "hugegraph",
+ "user": "admin",
+ "pwd": "admin",
+ "gs": ""
+ }
+}
+```
+
+**参数说明:**
+
+| 字段 | 类型 | 必需 | 默认值 | 描述 |
+|-----|------|------|-------|------|
+| `query` | string | 是 | - | 用户的自然语言问题 |
+| `raw_answer` | boolean | 否 | false | 返回 LLM 答案而不检索 |
+| `vector_only` | boolean | 否 | false | 仅使用向量搜索(无图) |
+| `graph_only` | boolean | 否 | false | 仅使用图检索(无向量) |
+| `graph_vector_answer` | boolean | 否 | false | 结合图和向量结果 |
+| `graph_ratio` | float | 否 | 0.5 | 图与向量结果的比例(0-1) |
+| `rerank_method` | string | 否 | "" | 重排序器:"cohere"、"siliconflow"、"" |
+| `near_neighbor_first` | boolean | 否 | false | 优先选择直接邻居 |
+| `gremlin_tmpl_num` | integer | 否 | 5 | 尝试的 Gremlin 模板数量 |
+| `max_graph_items` | integer | 否 | 30 | 图检索的最大项数 |
+| `topk_return_results` | integer | 否 | 20 | 重排序后的 Top-K |
+| `vector_dis_threshold` | float | 否 | 0.9 | 向量相似度阈值(0-1) |
+| `topk_per_keyword` | integer | 否 | 1 | 每个关键词的 Top-K 向量 |
+| `custom_priority_info` | string | 否 | "" | 要优先考虑的自定义上下文 |
+| `answer_prompt` | string | 否 | "" | 自定义答案生成提示词 |
+| `keywords_extract_prompt` | string | 否 | "" | 自定义关键词提取提示词 |
+| `gremlin_prompt` | string | 否 | "" | 自定义 Gremlin 生成提示词 |
+| `client_config` | object | 否 | null | 覆盖图连接设置 |
+
+#### 响应
+
+```json
+{
+ "query": "给我讲讲阿尔·帕西诺的电影",
+ "graph_only": {
+ "answer": "阿尔·帕西诺主演了《教父》(1972 年),由弗朗西斯·福特·科波拉执导...",
+ "context": ["《教父》是 1972 年的犯罪电影...", "..."],
+ "graph_paths": ["..."],
+ "keywords": ["阿尔·帕西诺", "电影"]
+ }
+}
+```
+
+#### 示例(curl)
+
+```bash
+curl -X POST http://localhost:8001/rag \
+ -H "Content-Type: application/json" \
+ -d '{
+ "query": "给我讲讲阿尔·帕西诺",
+ "graph_only": true,
+ "max_graph_items": 30
+ }'
+```
+
+### 2. 仅图检索
+
+**POST** `/rag/graph`
+
+检索图上下文而不生成答案。用于调试或自定义处理。
+
+#### 请求体
+
+```json
+{
+ "query": "阿尔·帕西诺的电影",
+ "max_graph_items": 30,
+ "topk_return_results": 20,
+ "vector_dis_threshold": 0.9,
+ "topk_per_keyword": 1,
+ "gremlin_tmpl_num": 5,
+ "rerank_method": "cohere",
+ "near_neighbor_first": false,
+ "custom_priority_info": "",
+ "gremlin_prompt": "",
+ "get_vertex_only": false,
+ "client_config": {
+ "url": "127.0.0.1:8080",
+ "graph": "hugegraph",
+ "user": "admin",
+ "pwd": "admin",
+ "gs": ""
+ }
+}
+```
+
+**额外参数:**
+
+| 字段 | 类型 | 默认值 | 描述 |
+|-----|------|-------|------|
+| `get_vertex_only` | boolean | false | 仅返回顶点 ID,不返回完整详情 |
+
+#### 响应
+
+```json
+{
+ "graph_recall": {
+ "query": "阿尔·帕西诺的电影",
+ "keywords": ["阿尔·帕西诺", "电影"],
+ "match_vids": ["1:阿尔·帕西诺", "2:教父"],
+ "graph_result_flag": true,
+ "gremlin": "g.V('1:阿尔·帕西诺').outE().inV().limit(30)",
+ "graph_result": [
+ {"id": "1:阿尔·帕西诺", "label": "person", "properties": {"name": "阿尔·帕西诺"}},
+ {"id": "2:教父", "label": "movie", "properties": {"title": "教父"}}
+ ],
+ "vertex_degree_list": [5, 12]
+ }
+}
+```
+
+#### 示例(curl)
+
+```bash
+curl -X POST http://localhost:8001/rag/graph \
+ -H "Content-Type: application/json" \
+ -d '{
+ "query": "阿尔·帕西诺",
+ "max_graph_items": 30,
+ "get_vertex_only": false
+ }'
+```
+
+---
+
+## Text2Gremlin 端点
+
+### 3. 自然语言转 Gremlin
+
+**POST** `/text2gremlin`
+
+将自然语言查询转换为可执行的 Gremlin 命令。
+
+#### 请求体
+
+```json
+{
+ "query": "查找所有由弗朗西斯·福特·科波拉执导的电影",
+ "example_num": 5,
+ "gremlin_prompt": "",
+ "output_types": ["GREMLIN", "RESULT"],
+ "client_config": {
+ "url": "127.0.0.1:8080",
+ "graph": "hugegraph",
+ "user": "admin",
+ "pwd": "admin",
+ "gs": ""
+ }
+}
+```
+
+**参数说明:**
+
+| 字段 | 类型 | 必需 | 默认值 | 描述 |
+|-----|------|------|-------|------|
+| `query` | string | 是 | - | 自然语言查询 |
+| `example_num` | integer | 否 | 5 | 使用的示例模板数量 |
+| `gremlin_prompt` | string | 否 | "" | Gremlin 生成的自定义提示词 |
+| `output_types` | array | 否 | null | 输出类型:["GREMLIN", "RESULT", "CYPHER"] |
+| `client_config` | object | 否 | null | 图连接覆盖 |
+
+**输出类型:**
+- `GREMLIN`:生成的 Gremlin 查询
+- `RESULT`:图的执行结果
+- `CYPHER`:Cypher 查询(如果请求)
+
+#### 响应
+
+```json
+{
+ "gremlin": "g.V().has('person','name','弗朗西斯·福特·科波拉').out('directed').hasLabel('movie').values('title')",
+ "result": [
+ "教父",
+ "教父 2",
+ "现代启示录"
+ ]
+}
+```
+
+#### 示例(curl)
+
+```bash
+curl -X POST http://localhost:8001/text2gremlin \
+ -H "Content-Type: application/json" \
+ -d '{
+ "query": "查找所有由弗朗西斯·福特·科波拉执导的电影",
+ "output_types": ["GREMLIN", "RESULT"]
+ }'
+```
+
+---
+
+## 配置端点
+
+### 4. 更新图连接
+
+**POST** `/config/graph`
+
+动态更新 HugeGraph 连接设置。
+
+#### 请求体
+
+```json
+{
+ "url": "127.0.0.1:8080",
+ "name": "hugegraph",
+ "user": "admin",
+ "pwd": "admin",
+ "gs": ""
+}
+```
+
+#### 响应
+
+```json
+{
+ "status_code": 201,
+ "message": "图配置更新成功"
+}
+```
+
+### 5. 更新 LLM 配置
+
+**POST** `/config/llm`
+
+运行时更新聊天/提取 LLM 设置。
+
+#### 请求体(OpenAI)
+
+```json
+{
+ "llm_type": "openai",
+ "api_key": "sk-your-api-key",
+ "api_base": "https://api.openai.com/v1",
+ "language_model": "gpt-4o-mini",
+ "max_tokens": 4096
+}
+```
+
+#### 请求体(Ollama)
+
+```json
+{
+ "llm_type": "ollama/local",
+ "host": "127.0.0.1",
+ "port": 11434,
+ "language_model": "llama3.1:8b"
+}
+```
+
+### 6. 更新嵌入配置
+
+**POST** `/config/embedding`
+
+更新嵌入模型设置。
+
+#### 请求体
+
+```json
+{
+ "llm_type": "openai",
+ "api_key": "sk-your-api-key",
+ "api_base": "https://api.openai.com/v1",
+ "language_model": "text-embedding-3-small"
+}
+```
+
+### 7. 更新 Reranker 配置
+
+**POST** `/config/rerank`
+
+配置重排序器设置。
+
+#### 请求体(Cohere)
+
+```json
+{
+ "reranker_type": "cohere",
+ "api_key": "your-cohere-key",
+ "reranker_model": "rerank-multilingual-v3.0",
+ "cohere_base_url": "https://api.cohere.com/v1/rerank"
+}
+```
+
+#### 请求体(SiliconFlow)
+
+```json
+{
+ "reranker_type": "siliconflow",
+ "api_key": "your-siliconflow-key",
+ "reranker_model": "BAAI/bge-reranker-v2-m3"
+}
+```
+
+---
+
+## 错误响应
+
+所有端点返回标准 HTTP 状态码:
+
+| 代码 | 含义 |
+|-----|------|
+| 200 | 成功 |
+| 201 | 已创建(配置已更新) |
+| 400 | 错误请求(无效参数) |
+| 500 | 内部服务器错误 |
+| 501 | 未实现 |
+
+错误响应格式:
+```json
+{
+ "detail": "描述错误的消息"
+}
+```
+
+---
+
+## Python 客户端示例
+
+```python
+import requests
+
+BASE_URL = "http://localhost:8001"
+
+# 1. 配置图连接
+graph_config = {
+ "url": "127.0.0.1:8080",
+ "name": "hugegraph",
+ "user": "admin",
+ "pwd": "admin"
+}
+requests.post(f"{BASE_URL}/config/graph", json=graph_config)
+
+# 2. 执行 RAG 查询
+rag_request = {
+ "query": "给我讲讲阿尔·帕西诺",
+ "graph_only": True,
+ "max_graph_items": 30
+}
+response = requests.post(f"{BASE_URL}/rag", json=rag_request)
+print(response.json())
+
+# 3. 从自然语言生成 Gremlin
+text2gql_request = {
+ "query": "查找所有与阿尔·帕西诺合作的导演",
+ "output_types": ["GREMLIN", "RESULT"]
+}
+response = requests.post(f"{BASE_URL}/text2gremlin", json=text2gql_request)
+print(response.json())
+```
+
+---
+
+## 另见
+
+- [配置参考](./config-reference.md) - 完整的 .env 配置指南
+- [HugeGraph-LLM 概述](./hugegraph-llm.md) - 架构和功能
+- [快速入门指南](./quick_start.md) - Web UI 入门
diff --git a/content/cn/docs/quickstart/hugegraph-spark.md b/content/cn/docs/quickstart/hugegraph-spark.md
index 7caaeffed..b66cfe203 100644
--- a/content/cn/docs/quickstart/hugegraph-spark.md
+++ b/content/cn/docs/quickstart/hugegraph-spark.md
@@ -13,7 +13,7 @@ HugeGraph-Spark 是一个连接 HugeGraph 和 Spark GraphX 的工具,能够读
### 2 环境依赖
-在使用 HugeGraph-Spark 前,需要依赖 HugeGraph Server 服务,下载和启动 Server 请参考 [HugeGraph-Server Quick Start](/docs/quickstart/hugegraph-server)。另外,由于 HugeGraph-Spark 需要使用 Spark GraphX,所以还需要下载 spark,本文的示例使用的是 apache-spark-2.1.1。
+在使用 HugeGraph-Spark 前,需要依赖 HugeGraph Server 服务,下载和启动 Server 请参考 [HugeGraph-Server Quick Start](/docs/quickstart/hugegraph/hugegraph-server)。另外,由于 HugeGraph-Spark 需要使用 Spark GraphX,所以还需要下载 spark,本文的示例使用的是 apache-spark-2.1.1。
```
wget https://archive.apache.org/dist/spark/spark-2.1.1/spark-2.1.1-bin-hadoop2.7.tgz
diff --git a/content/cn/docs/quickstart/hugegraph-studio.md b/content/cn/docs/quickstart/hugegraph-studio.md
index ab7661bb1..ac7c820c4 100644
--- a/content/cn/docs/quickstart/hugegraph-studio.md
+++ b/content/cn/docs/quickstart/hugegraph-studio.md
@@ -17,7 +17,7 @@ HugeGraph-Studio是HugeGraph的前端展示工具,是基于Web的图形化IDE
- 图数据的展示
- 图数据的分析
-> 注意:HugeGraph-Studio需要依赖HugeGraph-Server,在安装和使用HugeGraph-Studio之前,请通过jps命令检查HugeGraphServer服务是否已经启动,如果没有启动,请参考[HugeGraph-Server安装配置](/docs/quickstart/hugegraph-server)启动HugeGraphServer。
+> 注意:HugeGraph-Studio需要依赖HugeGraph-Server,在安装和使用HugeGraph-Studio之前,请通过jps命令检查HugeGraphServer服务是否已经启动,如果没有启动,请参考[HugeGraph-Server安装配置](/docs/quickstart/hugegraph/hugegraph-server)启动HugeGraphServer。
### 2 安装和运行HugeGraph-Studio
@@ -145,7 +145,7 @@ graph.schema().propertyKey("price").asInt().ifNotExist().create()
**在这里有几点需要说明**
1、上述语句是`groovy`语言形式(类似但不是`java`)的`gremlin`语句,这些`gremlin`语句会被发送到`HugeGraphServer`上执行。
-关于`gremlin`本身可以参考[Gremlin Query Language](/language/hugegraph-gremlin.md)或[Tinkerpop官网](http://tinkerpop.apache.org/);
+关于`gremlin`本身可以参考[Gremlin Query Language](../language/hugegraph-gremlin)或[Tinkerpop官网](http://tinkerpop.apache.org/);
2、上述语句是通过`graph.schema()`获取到`SchemaManager`对象后操作元数据,通过`gremlin`语句操作Schema可参考文档[HugeGraph-Client](/docs/clients/hugegraph-client),
需要注意的是`HugeGraph-Client`是`java`语法,大体上与`gremlin`风格是一致的,具体的差异见文档`HugeGraph-Client`中的说明。
diff --git a/content/cn/docs/quickstart/hugegraph/_index.md b/content/cn/docs/quickstart/hugegraph/_index.md
index f64d0adcf..a7d5fa164 100644
--- a/content/cn/docs/quickstart/hugegraph/_index.md
+++ b/content/cn/docs/quickstart/hugegraph/_index.md
@@ -4,8 +4,8 @@ linkTitle: "HugeGraph (OLTP)"
weight: 1
---
-## 🚀 最佳实践:优先使用 DeepWiki 智能文档
+> DeepWiki 提供实时更新的项目文档,内容更全面准确,适合快速了解项目最新情况。
+>
+> 📖 [https://deepwiki.com/apache/hugegraph](https://deepwiki.com/apache/hugegraph)
-> 为解决静态文档可能过时的问题,我们提供了 **实时更新、内容更全面** 的 DeepWiki。它相当于一个拥有项目最新知识的专家,非常适合**所有开发者**在开始项目前阅读和咨询。
-
-**👉 强烈推荐访问并对话:**[**incubator-hugegraph**](https://deepwiki.com/apache/incubator-hugegraph)
\ No newline at end of file
+**GitHub 访问:** [https://github.com/apache/hugegraph](https://github.com/apache/hugegraph)
\ No newline at end of file
diff --git a/content/cn/docs/quickstart/hugegraph/hugegraph-hstore.md b/content/cn/docs/quickstart/hugegraph/hugegraph-hstore.md
index 0e6bbb3d5..50d28d579 100644
--- a/content/cn/docs/quickstart/hugegraph/hugegraph-hstore.md
+++ b/content/cn/docs/quickstart/hugegraph/hugegraph-hstore.md
@@ -30,7 +30,7 @@ HugeGraph-Store 是 HugeGraph 分布式版本的存储节点组件,负责实
```bash
# 用最新版本号替换 {version},例如 1.5.0
-wget https://downloads.apache.org/incubator/hugegraph/{version}/apache-hugegraph-incubating-{version}.tar.gz
+wget https://downloads.apache.org/hugegraph/{version}/apache-hugegraph-incubating-{version}.tar.gz
tar zxf apache-hugegraph-incubating-{version}.tar.gz
cd apache-hugegraph-incubating-{version}/apache-hugegraph-hstore-incubating-{version}
```
diff --git a/content/cn/docs/quickstart/hugegraph/hugegraph-pd.md b/content/cn/docs/quickstart/hugegraph/hugegraph-pd.md
index 023acebed..46d70c32f 100644
--- a/content/cn/docs/quickstart/hugegraph/hugegraph-pd.md
+++ b/content/cn/docs/quickstart/hugegraph/hugegraph-pd.md
@@ -29,7 +29,7 @@ HugeGraph-PD (Placement Driver) 是 HugeGraph 分布式版本的元数据管理
```bash
# 用最新版本号替换 {version},例如 1.5.0
-wget https://downloads.apache.org/incubator/hugegraph/{version}/apache-hugegraph-incubating-{version}.tar.gz
+wget https://downloads.apache.org/hugegraph/{version}/apache-hugegraph-incubating-{version}.tar.gz
tar zxf apache-hugegraph-incubating-{version}.tar.gz
cd apache-hugegraph-incubating-{version}/apache-hugegraph-pd-incubating-{version}
```
diff --git a/content/cn/docs/quickstart/hugegraph/hugegraph-server.md b/content/cn/docs/quickstart/hugegraph/hugegraph-server.md
index a0606b94d..5b0610e16 100644
--- a/content/cn/docs/quickstart/hugegraph/hugegraph-server.md
+++ b/content/cn/docs/quickstart/hugegraph/hugegraph-server.md
@@ -8,7 +8,9 @@ weight: 1
HugeGraph-Server 是 HugeGraph 项目的核心部分,包含 graph-core、backend、API 等子模块。
-Core 模块是 Tinkerpop 接口的实现,Backend 模块用于管理数据存储,目前支持的后端包括:Memory、Cassandra、ScyllaDB 以及 RocksDB,API 模块提供 HTTP Server,将 Client 的 HTTP 请求转化为对 Core 的调用。
+Core 模块是 Tinkerpop 接口的实现,Backend 模块用于管理数据存储,1.7.0+ 版本支持的后端包括:RocksDB(单机默认)、HStore(分布式)、HBase 和 Memory。API 模块提供 HTTP Server,将 Client 的 HTTP 请求转化为对 Core 的调用。
+
+> ⚠️ **重要变更**: 从 1.7.0 版本开始,MySQL、PostgreSQL、Cassandra、ScyllaDB 等遗留后端已被移除。如需使用这些后端,请使用 1.5.x 或更早版本。
> 文档中会出现 `HugeGraph-Server` 及 `HugeGraphServer` 这两种写法,其他组件也类似。
> 这两种写法含义上并明显差异,可以这么区分:`HugeGraph-Server` 表示服务端相关组件代码,`HugeGraphServer` 表示服务进程。
@@ -37,14 +39,14 @@ Core 模块是 Tinkerpop 接口的实现,Backend 模块用于管理数据存
#### 3.1 使用 Docker 容器 (便于**测试**)
-可参考 [Docker 部署方式](https://github.com/apache/incubator-hugegraph/blob/master/hugegraph-server/hugegraph-dist/docker/README.md)。
+可参考 [Docker 部署方式](https://github.com/apache/hugegraph/blob/master/hugegraph-server/hugegraph-dist/docker/README.md)。
-我们可以使用 `docker run -itd --name=server -p 8080:8080 -e PASSWORD=xxx hugegraph/hugegraph:1.5.0` 去快速启动一个内置了 `RocksDB` 的 `Hugegraph server`.
+我们可以使用 `docker run -itd --name=server -p 8080:8080 -e PASSWORD=xxx hugegraph/hugegraph:1.7.0` 去快速启动一个内置了 `RocksDB` 的 `Hugegraph server`.
可选项:
1. 可以使用 `docker exec -it server bash` 进入容器完成一些操作
-2. 可以使用 `docker run -itd --name=server -p 8080:8080 -e PRELOAD="true" hugegraph/hugegraph:1.5.0` 在启动的时候预加载一个**内置的**样例图。可以通过 `RESTful API` 进行验证。具体步骤可以参考 [5.1.9](#519-%E5%90%AF%E5%8A%A8-server-%E7%9A%84%E6%97%B6%E5%80%99%E5%88%9B%E5%BB%BA%E7%A4%BA%E4%BE%8B%E5%9B%BE)
+2. 可以使用 `docker run -itd --name=server -p 8080:8080 -e PRELOAD="true" hugegraph/hugegraph:1.7.0` 在启动的时候预加载一个**内置的**样例图。可以通过 `RESTful API` 进行验证。具体步骤可以参考 [5.1.9](#519-%E5%90%AF%E5%8A%A8-server-%E7%9A%84%E6%97%B6%E5%80%99%E5%88%9B%E5%BB%BA%E7%A4%BA%E4%BE%8B%E5%9B%BE)
3. 可以使用 `-e PASSWORD=xxx` 设置是否开启鉴权模式以及 admin 的密码,具体步骤可以参考 [Config Authentication](/cn/docs/config/config-authentication#使用-docker-时开启鉴权模式)
如果使用 docker desktop,则可以按照如下的方式设置可选项:
@@ -59,7 +61,7 @@ Core 模块是 Tinkerpop 接口的实现,Backend 模块用于管理数据存
version: '3'
services:
server:
- image: hugegraph/hugegraph:1.5.0
+ image: hugegraph/hugegraph:1.7.0
container_name: server
environment:
- PASSWORD=xxx
@@ -74,13 +76,13 @@ services:
>
> 1. hugegraph 的 docker 镜像是一个便捷版本,用于快速启动 hugegraph,并不是**官方发布物料包方式**。你可以从 [ASF Release Distribution Policy](https://infra.apache.org/release-distribution.html#dockerhub) 中得到更多细节。
>
-> 2. 推荐使用 `release tag` (如 `1.5.0/1.x.0`) 以获取稳定版。使用 `latest` tag 可以使用开发中的最新功能。
+> 2. 推荐使用 `release tag` (如 `1.7.0/1.x.0`) 以获取稳定版。使用 `latest` tag 可以使用开发中的最新功能。
#### 3.2 下载 tar 包
```bash
-# use the latest version, here is 1.5.0 for example
-wget https://downloads.apache.org/incubator/hugegraph/{version}/apache-hugegraph-incubating-{version}.tar.gz
+# use the latest version, here is 1.7.0 for example
+wget https://downloads.apache.org/hugegraph/{version}/apache-hugegraph-incubating-{version}.tar.gz
tar zxf *hugegraph*.tar.gz
```
@@ -138,11 +140,11 @@ mvn package -DskipTests
HugeGraph-Tools 提供了一键部署的命令行工具,用户可以使用该工具快速地一键下载、解压、配置并启动 HugeGraph-Server 和 HugeGraph-Hubble,最新的 HugeGraph-Toolchain 中已经包含所有的这些工具,直接下载它解压就有工具包集合了
```bash
-# download toolchain package, it includes loader + tool + hubble, please check the latest version (here is 1.5.0)
-wget https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz
+# download toolchain package, it includes loader + tool + hubble, please check the latest version (here is 1.7.0)
+wget https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0.tar.gz
tar zxf *hugegraph-*.tar.gz
# enter the tool's package
-cd *hugegraph*/*tool*
+cd *hugegraph*/*tool*
```
> 注:`${version}` 为版本号,最新版本号可参考 [Download 页面](/docs/download/download),或直接从 Download 页面点击链接下载
@@ -200,10 +202,31 @@ task.scheduler_type=distributed
pd.peers=127.0.0.1:8686,127.0.0.1:8687,127.0.0.1:8688
```
+```properties
+# 简单示例(带鉴权)
+gremlin.graph=org.apache.hugegraph.auth.HugeFactoryAuthProxy
+
+# 指定存储 hstore(必须)
+backend=hstore
+serializer=binary
+store=hugegraph
+
+# 指定任务调度器(1.7.0及之前,hstore 存储必须)
+task.scheduler_type=distributed
+
+# pd config
+pd.peers=127.0.0.1:8686
+```
+
2. 修改 HugeGraph-Server 的 `rest-server.properties` 配置:
```properties
usePD=true
+# 注意,1.7.0 必须在 rest-server.properties 配置 pd.peers
+pd.peers=127.0.0.1:8686,127.0.0.1:8687,127.0.0.1:8688
+
+# 若需要 auth
+# auth.authenticator=org.apache.hugegraph.auth.StandardAuthenticator
```
如果配置多个 HugeGraph-Server 节点,需要为每个节点修改 `rest-server.properties` 配置文件,例如:
@@ -213,6 +236,7 @@ usePD=true
usePD=true
restserver.url=http://127.0.0.1:8081
gremlinserver.url=http://127.0.0.1:8181
+pd.peers=127.0.0.1:8686
rpc.server_host=127.0.0.1
rpc.server_port=8091
@@ -226,6 +250,7 @@ server.role=master
usePD=true
restserver.url=http://127.0.0.1:8082
gremlinserver.url=http://127.0.0.1:8182
+pd.peers=127.0.0.1:8686
rpc.server_host=127.0.0.1
rpc.server_port=8092
@@ -366,6 +391,8 @@ Connecting to HugeGraphServer (http://127.0.0.1:8080/graphs)....OK
##### 5.1.4 MySQL
+> ⚠️ **已废弃**: 此后端从 HugeGraph 1.7.0 版本开始已移除。如需使用,请参考 1.5.x 版本文档。
+
点击展开/折叠 MySQL 配置及启动方法
@@ -410,6 +437,8 @@ Connecting to HugeGraphServer (http://127.0.0.1:8080/graphs)....OK
##### 5.1.5 Cassandra
+> ⚠️ **已废弃**: 此后端从 HugeGraph 1.7.0 版本开始已移除。如需使用,请参考 1.5.x 版本文档。
+
点击展开/折叠 Cassandra 配置及启动方法
@@ -495,6 +524,8 @@ Connecting to HugeGraphServer (http://127.0.0.1:8080/graphs)....OK
##### 5.1.7 ScyllaDB
+> ⚠️ **已废弃**: 此后端从 HugeGraph 1.7.0 版本开始已移除。如需使用,请参考 1.5.x 版本文档。
+
点击展开/折叠 ScyllaDB 配置及启动方法
@@ -563,12 +594,14 @@ Connecting to HugeGraphServer (http://127.0.0.1:8080/graphs)......OK
##### 5.2.1 使用 Cassandra 作为后端
+> ⚠️ **已废弃**: Cassandra 后端从 HugeGraph 1.7.0 版本开始已移除。如需使用,请参考 1.5.x 版本文档。
+
点击展开/折叠 Cassandra 配置及启动方法
在使用 Docker 的时候,我们可以使用 Cassandra 作为后端存储。我们更加推荐直接使用 docker-compose 来对于 server 以及 Cassandra 进行统一管理
-样例的 `docker-compose.yml` 可以在 [github](https://github.com/apache/incubator-hugegraph/blob/master/hugegraph-server/hugegraph-dist/docker/example/docker-compose-cassandra.yml) 中获取,使用 `docker-compose up -d` 启动。(如果使用 cassandra 4.0 版本作为后端存储,则需要大约两个分钟初始化,请耐心等待)
+样例的 `docker-compose.yml` 可以在 [github](https://github.com/apache/hugegraph/blob/master/hugegraph-server/hugegraph-dist/docker/example/docker-compose-cassandra.yml) 中获取,使用 `docker-compose up -d` 启动。(如果使用 cassandra 4.0 版本作为后端存储,则需要大约两个分钟初始化,请耐心等待)
```yaml
version: "3"
@@ -631,17 +664,17 @@ volumes:
1. 使用`docker run`
- 使用 `docker run -itd --name=server -p 8080:8080 -e PRELOAD=true hugegraph/hugegraph:1.5.0`
+ 使用 `docker run -itd --name=server -p 8080:8080 -e PRELOAD=true hugegraph/hugegraph:1.7.0`
2. 使用`docker-compose`
- 创建`docker-compose.yml`,具体文件如下,在环境变量中设置 PRELOAD=true。其中,[`example.groovy`](https://github.com/apache/incubator-hugegraph/blob/master/hugegraph-server/hugegraph-dist/src/assembly/static/scripts/example.groovy) 是一个预定义的脚本,用于预加载样例数据。如果有需要,可以通过挂载新的 `example.groovy` 脚本改变预加载的数据。
+ 创建`docker-compose.yml`,具体文件如下,在环境变量中设置 PRELOAD=true。其中,[`example.groovy`](https://github.com/apache/hugegraph/blob/master/hugegraph-server/hugegraph-dist/src/assembly/static/scripts/example.groovy) 是一个预定义的脚本,用于预加载样例数据。如果有需要,可以通过挂载新的 `example.groovy` 脚本改变预加载的数据。
```yaml
version: '3'
services:
server:
- image: hugegraph/hugegraph:1.5.0
+ image: hugegraph/hugegraph:1.7.0
container_name: server
environment:
- PRELOAD=true
diff --git a/content/cn/docs/quickstart/toolchain/_index.md b/content/cn/docs/quickstart/toolchain/_index.md
index 776d935b2..6b318fa74 100644
--- a/content/cn/docs/quickstart/toolchain/_index.md
+++ b/content/cn/docs/quickstart/toolchain/_index.md
@@ -6,8 +6,8 @@ weight: 2
> **测试指南**:如需在本地运行工具链测试,请参考 [HugeGraph 工具链本地测试指南](/cn/docs/guides/toolchain-local-test)
-## 🚀 最佳实践:优先使用 DeepWiki 智能文档
+> DeepWiki 提供实时更新的项目文档,内容更全面准确,适合快速了解项目最新情况。
+>
+> 📖 [https://deepwiki.com/apache/hugegraph-toolchain](https://deepwiki.com/apache/hugegraph-toolchain)
-> 为解决静态文档可能过时的问题,我们提供了 **实时更新、内容更全面** 的 DeepWiki。它相当于一个拥有项目最新知识的专家,非常适合**所有开发者**在开始项目前阅读和咨询。
-
-**👉 强烈推荐访问并对话:**[**incubator-hugegraph-toolchain**](https://deepwiki.com/apache/incubator-hugegraph-toolchain)
+**GitHub 访问:** [https://github.com/apache/hugegraph-toolchain](https://github.com/apache/hugegraph-toolchain)
diff --git a/content/cn/docs/quickstart/toolchain/hugegraph-hubble.md b/content/cn/docs/quickstart/toolchain/hugegraph-hubble.md
index 2167c0a72..51adbc0bf 100644
--- a/content/cn/docs/quickstart/toolchain/hugegraph-hubble.md
+++ b/content/cn/docs/quickstart/toolchain/hugegraph-hubble.md
@@ -90,7 +90,7 @@ services:
`hubble`项目在`toolchain`项目中,首先下载`toolchain`的 tar 包
```bash
-wget https://downloads.apache.org/incubator/hugegraph/{version}/apache-hugegraph-toolchain-incubating-{version}.tar.gz
+wget https://downloads.apache.org/hugegraph/{version}/apache-hugegraph-toolchain-incubating-{version}.tar.gz
tar -xvf apache-hugegraph-toolchain-incubating-{version}.tar.gz
cd apache-hugegraph-toolchain-incubating-{version}.tar.gz/apache-hugegraph-hubble-incubating-{version}
```
@@ -551,3 +551,26 @@ Hubble 上暂未提供可视化的 OLAP 算法执行,可调用 RESTful API 进
+
+
+### 5 配置说明
+
+HugeGraph-Hubble 可以通过 `conf/hugegraph-hubble.properties` 文件进行配置。
+
+#### 5.1 服务器配置
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| `hubble.host` | `0.0.0.0` | Hubble 服务绑定的地址 |
+| `hubble.port` | `8088` | Hubble 服务监听的端口 |
+
+#### 5.2 Gremlin 查询限制
+
+这些设置控制查询结果限制,防止内存问题:
+
+| 配置项 | 默认值 | 说明 |
+|--------|--------|------|
+| `gremlin.suffix_limit` | `250` | 查询后缀最大长度 |
+| `gremlin.vertex_degree_limit` | `100` | 显示的最大顶点度数 |
+| `gremlin.edges_total_limit` | `500` | 返回的最大边数 |
+| `gremlin.batch_query_ids` | `100` | ID 批量查询大小 |
diff --git a/content/cn/docs/quickstart/toolchain/hugegraph-loader.md b/content/cn/docs/quickstart/toolchain/hugegraph-loader.md
index 9b088e4cb..622fb8fd1 100644
--- a/content/cn/docs/quickstart/toolchain/hugegraph-loader.md
+++ b/content/cn/docs/quickstart/toolchain/hugegraph-loader.md
@@ -77,7 +77,7 @@ services:
下载最新版本的 `HugeGraph-Toolchain` Release 包,里面包含了 `loader + tool + hubble` 全套工具,如果你已经下载,可跳过重复步骤
```bash
-wget https://downloads.apache.org/incubator/hugegraph/{version}/apache-hugegraph-toolchain-incubating-{version}.tar.gz
+wget https://downloads.apache.org/hugegraph/{version}/apache-hugegraph-toolchain-incubating-{version}.tar.gz
tar zxf *hugegraph*.tar.gz
```
@@ -90,7 +90,7 @@ tar zxf *hugegraph*.tar.gz
git clone https://github.com/apache/hugegraph-toolchain.git
# 2. get from direct url (please choose the **latest release** version)
-wget https://downloads.apache.org/incubator/hugegraph/{version}/apache-hugegraph-toolchain-incubating-{version}-src.tar.gz
+wget https://downloads.apache.org/hugegraph/{version}/apache-hugegraph-toolchain-incubating-{version}-src.tar.gz
```
@@ -807,47 +807,71 @@ schema: 必填
##### 3.4.1 参数说明
-| 参数 | 默认值 | 是否必传 | 描述信息 |
-|---------------------------|-----------|------|-------------------------------------------------------------------|
-| `-f` 或 `--file` | | Y | 配置脚本的路径 |
-| `-g` 或 `--graph` | | Y | 图名称 |
-| `-gs` 或 `--graphspace` | DEFAULT | | 图空间 |
-| `-s` 或 `--schema` | | Y | schema 文件路径 |
-| `-h` 或 `--host` 或 `-i` | localhost | | HugeGraphServer 的地址 |
-| `-p` 或 `--port` | 8080 | | HugeGraphServer 的端口号 |
-| `--username` | null | | 当 HugeGraphServer 开启了权限认证时,当前图的 username |
-| `--password` | null | | 当 HugeGraphServer 开启了权限认证时,当前图的 password |
-| `--create-graph` | false | | 是否在图不存在时自动创建 |
-| `--token` | null | | 当 HugeGraphServer 开启了权限认证时,当前图的 token |
-| `--protocol` | http | | 向服务端发请求的协议,可选 http 或 https |
-| `--pd-peers` | | | PD 服务节点地址 |
-| `--pd-token` | | | 访问 PD 服务的 token |
-| `--meta-endpoints` | | | 元信息存储服务地址 |
-| `--direct` | false | | 是否直连 HugeGraph-Store |
-| `--route-type` | NODE_PORT | | 路由选择方式(可选值:NODE_PORT / DDS / BOTH) |
-| `--cluster` | hg | | 集群名 |
-| `--trust-store-file` | | | 请求协议为 https 时,客户端的证书文件路径 |
-| `--trust-store-password` | | | 请求协议为 https 时,客户端证书密码 |
-| `--clear-all-data` | false | | 导入数据前是否清除服务端的原有数据 |
-| `--clear-timeout` | 240 | | 导入数据前清除服务端的原有数据的超时时间 |
-| `--incremental-mode` | false | | 是否使用断点续导模式,仅输入源为 FILE 和 HDFS 支持该模式,启用该模式能从上一次导入停止的地方开始导入 |
-| `--failure-mode` | false | | 失败模式为 true 时,会导入之前失败了的数据,一般来说失败数据文件需要在人工更正编辑好后,再次进行导入 |
-| `--batch-insert-threads` | CPUs | | 批量插入线程池大小 (CPUs 是当前 OS 可用**逻辑核**个数) |
-| `--single-insert-threads` | 8 | | 单条插入线程池的大小 |
-| `--max-conn` | 4 * CPUs | | HugeClient 与 HugeGraphServer 的最大 HTTP 连接数,**调整线程**的时候建议同时调整此项 |
-| `--max-conn-per-route` | 2 * CPUs | | HugeClient 与 HugeGraphServer 每个路由的最大 HTTP 连接数,**调整线程**的时候建议同时调整此项 |
-| `--batch-size` | 500 | | 导入数据时每个批次包含的数据条数 |
-| `--max-parse-errors` | 1 | | 最多允许多少行数据解析错误,达到该值则程序退出 |
-| `--max-insert-errors` | 500 | | 最多允许多少行数据插入错误,达到该值则程序退出 |
-| `--timeout` | 60 | | 插入结果返回的超时时间(秒) |
-| `--shutdown-timeout` | 10 | | 多线程停止的等待时间(秒) |
-| `--retry-times` | 0 | | 发生特定异常时的重试次数 |
-| `--retry-interval` | 10 | | 重试之前的间隔时间(秒) |
-| `--check-vertex` | false | | 插入边时是否检查边所连接的顶点是否存在 |
-| `--print-progress` | true | | 是否在控制台实时打印导入条数 |
-| `--dry-run` | false | | 打开该模式,只解析不导入,通常用于测试 |
-| `--help` | false | | 打印帮助信息 |
-
+| 参数 | 默认值 | 是否必传 | 描述信息 |
+|-----------------------------------------|-------------|------|-------------------------------------------------------------------|
+| `-f` 或 `--file` | | Y | 配置脚本的路径 |
+| `-g` 或 `--graph` | | Y | 图名称 |
+| `--graphspace` | DEFAULT | | 图空间 |
+| `-s` 或 `--schema` | | Y | schema 文件路径 |
+| `-h` 或 `--host` 或 `-i` | localhost | | HugeGraphServer 的地址 |
+| `-p` 或 `--port` | 8080 | | HugeGraphServer 的端口号 |
+| `--username` | null | | 当 HugeGraphServer 开启了权限认证时,当前图的 username |
+| `--password` | null | | 当 HugeGraphServer 开启了权限认证时,当前图的 password |
+| `--create-graph` | false | | 是否在图不存在时自动创建 |
+| `--token` | null | | 当 HugeGraphServer 开启了权限认证时,当前图的 token |
+| `--protocol` | http | | 向服务端发请求的协议,可选 http 或 https |
+| `--pd-peers` | | | PD 服务节点地址 |
+| `--pd-token` | | | 访问 PD 服务的 token |
+| `--meta-endpoints` | | | 元信息存储服务地址 |
+| `--direct` | false | | 是否直连 HugeGraph-Store |
+| `--route-type` | NODE_PORT | | 路由选择方式(可选值:NODE_PORT / DDS / BOTH) |
+| `--cluster` | hg | | 集群名 |
+| `--trust-store-file` | | | 请求协议为 https 时,客户端的证书文件路径 |
+| `--trust-store-password` | | | 请求协议为 https 时,客户端证书密码 |
+| `--clear-all-data` | false | | 导入数据前是否清除服务端的原有数据 |
+| `--clear-timeout` | 240 | | 导入数据前清除服务端的原有数据的超时时间 |
+| `--incremental-mode` | false | | 是否使用断点续导模式,仅输入源为 FILE 和 HDFS 支持该模式,启用该模式能从上一次导入停止的地方开始导入 |
+| `--failure-mode` | false | | 失败模式为 true 时,会导入之前失败了的数据,一般来说失败数据文件需要在人工更正编辑好后,再次进行导入 |
+| `--batch-insert-threads` | CPUs | | 批量插入线程池大小 (CPUs 是当前 OS 可用**逻辑核**个数) |
+| `--single-insert-threads` | 8 | | 单条插入线程池的大小 |
+| `--max-conn` | 4 * CPUs | | HugeClient 与 HugeGraphServer 的最大 HTTP 连接数,**调整线程**的时候建议同时调整此项 |
+| `--max-conn-per-route` | 2 * CPUs | | HugeClient 与 HugeGraphServer 每个路由的最大 HTTP 连接数,**调整线程**的时候建议同时调整此项 |
+| `--batch-size` | 500 | | 导入数据时每个批次包含的数据条数 |
+| `--max-parse-errors` | 1 | | 最多允许多少行数据解析错误,达到该值则程序退出 |
+| `--max-insert-errors` | 500 | | 最多允许多少行数据插入错误,达到该值则程序退出 |
+| `--timeout` | 60 | | 插入结果返回的超时时间(秒) |
+| `--shutdown-timeout` | 10 | | 多线程停止的等待时间(秒) |
+| `--retry-times` | 0 | | 发生特定异常时的重试次数 |
+| `--retry-interval` | 10 | | 重试之前的间隔时间(秒) |
+| `--check-vertex` | false | | 插入边时是否检查边所连接的顶点是否存在 |
+| `--print-progress` | true | | 是否在控制台实时打印导入条数 |
+| `--dry-run` | false | | 打开该模式,只解析不导入,通常用于测试 |
+| `--help` 或 `-help` | false | | 打印帮助信息 |
+| `--parser-threads` 或 `--parallel-count` | max(2,CPUS) | | 并行读取数据文件最大线程数 |
+| `--start-file` | 0 | | 用于部分(分片)导入的起始文件索引 |
+| `--end-file` | -1 | | 用于部分导入的截止文件索引 |
+| `--scatter-sources` | false | | 分散(并行)读取多个数据源以优化 I/O 性能 |
+| `--cdc-flush-interval` | 30000 | | Flink CDC 的数据刷新间隔 |
+| `--cdc-sink-parallelism` | 1 | | Flink CDC 写入端(Sink)的并行度 |
+| `--max-read-errors` | 1 | | 程序退出前允许的最大读取错误行数 |
+| `--max-read-lines` | -1L | | 最大读取行数限制;一旦达到此行数,导入任务将停止 |
+| `--test-mode` | false | | 是否开启测试模式 |
+| `--use-prefilter` | false | | 是否预先过滤顶点 |
+| `--short-id` | [] | | 将自定义 ID 映射为更短的 ID |
+| `--vertex-edge-limit` | -1L | | 单个顶点的最大边数限制 |
+| `--sink-type` | true | | 是否输出至不同的存储 |
+| `--vertex-partitions` | 64 | | HBase 顶点表的预分区数量 |
+| `--edge-partitions` | 64 | | HBase 边表的预分区数量 |
+| `--vertex-table-name` | | | HBase 顶点表名称 |
+| `--edge-table-name` | | | HBase 边表名称 |
+| `--hbase-zk-quorum` | | | HBase Zookeeper 集群地址 |
+| `--hbase-zk-port` | | | HBase Zookeeper 端口号 |
+| `--hbase-zk-parent` | | | HBase Zookeeper 根路径 |
+| `--restore` | false | | 将图模式设置为恢复模式 (RESTORING) |
+| `--backend` | hstore | | 自动创建图(如果不存在)时的后端存储类型 |
+| `--serializer` | binary | | 自动创建图(如果不存在)时的序列化器类型 |
+| `--scheduler-type` | distributed | | 自动创建图(如果不存在)时的任务调度器类型 |
+| `--batch-failure-fallback` | true | | 批量插入失败时是否回退至单条插入模式 |
##### 3.4.2 断点续导模式
通常情况下,Loader 任务都需要较长时间执行,如果因为某些原因导致导入中断进程退出,而下次希望能从中断的点继续导,这就是使用断点续导的场景。
@@ -891,7 +915,7 @@ bin/hugegraph-loader -g {GRAPH_NAME} -f ${INPUT_DESC_FILE} -s ${SCHEMA_FILE} -h
### 4 完整示例
-下面给出的是 hugegraph-loader 包中 example 目录下的例子。([GitHub 地址](https://github.com/apache/incubator-hugegraph-toolchain/tree/master/hugegraph-loader/assembly/static/example/file))
+下面给出的是 hugegraph-loader 包中 example 目录下的例子。([GitHub 地址](https://github.com/apache/hugegraph-toolchain/tree/master/hugegraph-loader/assembly/static/example/file))
#### 4.1 准备数据
diff --git a/content/cn/docs/quickstart/toolchain/hugegraph-spark-connector.md b/content/cn/docs/quickstart/toolchain/hugegraph-spark-connector.md
new file mode 100644
index 000000000..13dec291b
--- /dev/null
+++ b/content/cn/docs/quickstart/toolchain/hugegraph-spark-connector.md
@@ -0,0 +1,182 @@
+---
+title: "HugeGraph-Spark-Connector Quick Start"
+linkTitle: "使用 Spark Connector 读写图数据"
+weight: 4
+---
+
+### 1 HugeGraph-Spark-Connector 概述
+
+HugeGraph-Spark-Connector 是一个用于在 Spark 中以标准格式读写 HugeGraph 数据的连接器应用程序。
+
+### 2 环境要求
+
+- Java 8+
+- Maven 3.6+
+- Spark 3.x
+- Scala 2.12
+
+### 3 编译
+
+#### 3.1 不执行测试的编译
+
+```bash
+mvn clean package -DskipTests
+```
+
+#### 3.2 执行默认测试的编译
+
+```bash
+mvn clean package
+```
+
+### 4 使用方法
+
+首先在你的 pom.xml 中添加依赖:
+
+```xml
+
+ org.apache.hugegraph
+ hugegraph-spark-connector
+ ${revision}
+
+```
+
+#### 4.1 Schema 定义示例
+
+假设我们有一个图,其 schema 定义如下:
+
+```groovy
+schema.propertyKey("name").asText().ifNotExist().create()
+schema.propertyKey("age").asInt().ifNotExist().create()
+schema.propertyKey("city").asText().ifNotExist().create()
+schema.propertyKey("weight").asDouble().ifNotExist().create()
+schema.propertyKey("lang").asText().ifNotExist().create()
+schema.propertyKey("date").asText().ifNotExist().create()
+schema.propertyKey("price").asDouble().ifNotExist().create()
+
+schema.vertexLabel("person")
+ .properties("name", "age", "city")
+ .useCustomizeStringId()
+ .nullableKeys("age", "city")
+ .ifNotExist()
+ .create()
+
+schema.vertexLabel("software")
+ .properties("name", "lang", "price")
+ .primaryKeys("name")
+ .ifNotExist()
+ .create()
+
+schema.edgeLabel("knows")
+ .sourceLabel("person")
+ .targetLabel("person")
+ .properties("date", "weight")
+ .ifNotExist()
+ .create()
+
+schema.edgeLabel("created")
+ .sourceLabel("person")
+ .targetLabel("software")
+ .properties("date", "weight")
+ .ifNotExist()
+ .create()
+```
+
+#### 4.2 写入顶点数据(Scala)
+
+```scala
+val df = sparkSession.createDataFrame(Seq(
+ Tuple3("marko", 29, "Beijing"),
+ Tuple3("vadas", 27, "HongKong"),
+ Tuple3("Josh", 32, "Beijing"),
+ Tuple3("peter", 35, "ShangHai"),
+ Tuple3("li,nary", 26, "Wu,han"),
+ Tuple3("Bob", 18, "HangZhou"),
+)) toDF("name", "age", "city")
+
+df.show()
+
+df.write
+ .format("org.apache.hugegraph.spark.connector.DataSource")
+ .option("host", "127.0.0.1")
+ .option("port", "8080")
+ .option("graph", "hugegraph")
+ .option("data-type", "vertex")
+ .option("label", "person")
+ .option("id", "name")
+ .option("batch-size", 2)
+ .mode(SaveMode.Overwrite)
+ .save()
+```
+
+#### 4.3 写入边数据(Scala)
+
+```scala
+val df = sparkSession.createDataFrame(Seq(
+ Tuple4("marko", "vadas", "20160110", 0.5),
+ Tuple4("peter", "Josh", "20230801", 1.0),
+ Tuple4("peter", "li,nary", "20130220", 2.0)
+)).toDF("source", "target", "date", "weight")
+
+df.show()
+
+df.write
+ .format("org.apache.hugegraph.spark.connector.DataSource")
+ .option("host", "127.0.0.1")
+ .option("port", "8080")
+ .option("graph", "hugegraph")
+ .option("data-type", "edge")
+ .option("label", "knows")
+ .option("source-name", "source")
+ .option("target-name", "target")
+ .option("batch-size", 2)
+ .mode(SaveMode.Overwrite)
+ .save()
+```
+
+### 5 配置参数
+
+#### 5.1 客户端配置
+
+客户端配置用于配置 hugegraph-client。
+
+| 参数 | 默认值 | 说明 |
+|----------------------|------------|-------------------------------------------------------|
+| `host` | `localhost` | HugeGraphServer 的地址 |
+| `port` | `8080` | HugeGraphServer 的端口 |
+| `graph` | `hugegraph` | 图空间名称 |
+| `protocol` | `http` | 向服务器发送请求的协议,可选 `http` 或 `https` |
+| `username` | `null` | 当 HugeGraphServer 开启权限认证时,当前图的用户名 |
+| `token` | `null` | 当 HugeGraphServer 开启权限认证时,当前图的 token |
+| `timeout` | `60` | 插入结果返回的超时时间(秒) |
+| `max-conn` | `CPUS * 4` | HugeClient 与 HugeGraphServer 之间的最大 HTTP 连接数 |
+| `max-conn-per-route` | `CPUS * 2` | HugeClient 与 HugeGraphServer 之间每个路由的最大 HTTP 连接数 |
+| `trust-store-file` | `null` | 当请求协议为 https 时,客户端的证书文件路径 |
+| `trust-store-token` | `null` | 当请求协议为 https 时,客户端的证书密码 |
+
+#### 5.2 图数据配置
+
+图数据配置用于设置图空间的配置。
+
+| 参数 | 默认值 | 说明 |
+|-------------------|-------|----------------------------------------------------------------------------------------------------------------------------------------------------|
+| `data-type` | | 图数据类型,必须是 `vertex` 或 `edge` |
+| `label` | | 要导入的顶点/边数据所属的标签 |
+| `id` | | 指定某一列作为顶点的 id 列。当顶点 id 策略为 CUSTOMIZE 时,必填;当 id 策略为 PRIMARY_KEY 时,必须为空 |
+| `source-name` | | 选择输入源的某些列作为源顶点的 id 列。当源顶点的 id 策略为 CUSTOMIZE 时,必须指定某一列作为顶点的 id 列;当源顶点的 id 策略为 PRIMARY_KEY 时,必须指定一列或多列用于拼接生成顶点的 id,即无论使用哪种 id 策略,此项都是必填的 |
+| `target-name` | | 指定某些列作为目标顶点的 id 列,与 source-name 类似 |
+| `selected-fields` | | 选择某些列进行插入,其他未选择的列不插入,不能与 ignored-fields 同时存在 |
+| `ignored-fields` | | 忽略某些列使其不参与插入,不能与 selected-fields 同时存在 |
+| `batch-size` | `500` | 导入数据时每批数据的条目数 |
+
+#### 5.3 通用配置
+
+通用配置包含一些常用的配置项。
+
+| 参数 | 默认值 | 说明 |
+|-------------|-----|-------------------------------------------------------------------|
+| `delimiter` | `,` | `source-name`、`target-name`、`selected-fields` 或 `ignored-fields` 的分隔符 |
+
+### 6 许可证
+
+与 HugeGraph 一样,hugegraph-spark-connector 也采用 Apache 2.0 许可证。
diff --git a/content/cn/docs/quickstart/toolchain/hugegraph-tools.md b/content/cn/docs/quickstart/toolchain/hugegraph-tools.md
index cd2414ed9..39e4a47b4 100644
--- a/content/cn/docs/quickstart/toolchain/hugegraph-tools.md
+++ b/content/cn/docs/quickstart/toolchain/hugegraph-tools.md
@@ -22,7 +22,7 @@ HugeGraph-Tools 是 HugeGraph 的自动化部署、管理和备份/还原组件
下载最新版本的 HugeGraph-Toolchain 包, 然后进入 tools 子目录
```bash
-wget https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0.tar.gz
+wget https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0.tar.gz
tar zxf *hugegraph*.tar.gz
```
@@ -36,7 +36,7 @@ tar zxf *hugegraph*.tar.gz
git clone https://github.com/apache/hugegraph-toolchain.git
# 2. get from direct (e.g. here is 1.0.0, please choose the latest version)
-wget https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0-src.tar.gz
+wget https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0-src.tar.gz
```
编译生成 tar 包:
@@ -55,10 +55,11 @@ mvn package -DskipTests
解压后,进入 hugegraph-tools 目录,可以使用`bin/hugegraph`或者`bin/hugegraph help`来查看 usage 信息。主要分为:
-- 图管理类,graph-mode-set、graph-mode-get、graph-list、graph-get 和 graph-clear
+- 图管理类,graph-mode-set、graph-mode-get、graph-list、graph-get、graph-clear、graph-create、graph-clone 和 graph-drop
- 异步任务管理类,task-list、task-get、task-delete、task-cancel 和 task-clear
- Gremlin类,gremlin-execute 和 gremlin-schedule
- 备份/恢复类,backup、restore、migrate、schedule-backup 和 dump
+- 认证数据备份/恢复类,auth-backup 和 auth-restore
- 安装部署类,deploy、clear、start-all 和 stop-all
```bash
@@ -105,7 +106,7 @@ Usage: hugegraph [options] [command] [command options]
#export HUGEGRAPH_TRUST_STORE_PASSWORD=
```
-##### 3.3 图管理类,graph-mode-set、graph-mode-get、graph-list、graph-get和graph-clear
+##### 3.3 图管理类,graph-mode-set、graph-mode-get、graph-list、graph-get、graph-clear、graph-create、graph-clone和graph-drop
- graph-mode-set,设置图的 restore mode
- --graph-mode 或者 -m,必填项,指定将要设置的模式,合法值包括 [NONE, RESTORING, MERGING, LOADING]
@@ -114,6 +115,14 @@ Usage: hugegraph [options] [command] [command options]
- graph-get,获取某个图及其存储后端类型
- graph-clear,清除某个图的全部 schema 和 data
- --confirm-message 或者 -c,必填项,删除确认信息,需要手动输入,二次确认防止误删,"I'm sure to delete all data",包括双引号
+- graph-create,使用配置文件创建新图
+ - --name 或者 -n,选填项,新图的名称,默认为 hugegraph
+ - --file 或者 -f,必填项,图配置文件的路径
+- graph-clone,克隆已存在的图
+ - --name 或者 -n,选填项,新克隆图的名称,默认为 hugegraph
+ - --clone-graph-name,选填项,要克隆的源图名称,默认为 hugegraph
+- graph-drop,删除图(不同于 graph-clear,这会完全删除图)
+ - --confirm-message 或者 -c,必填项,确认消息 "I'm sure to drop the graph",包括双引号
> 当需要把备份的图原样恢复到一个新的图中的时候,需要先将图模式设置为 RESTORING 模式;当需要将备份的图合并到已存在的图中时,需要先将图模式设置为 MERGING 模式。
@@ -159,6 +168,7 @@ Usage: hugegraph [options] [command] [command options]
- --huge-types 或者 -t,要备份的数据类型,逗号分隔,可选值为 'all' 或者 一个或多个 [vertex,edge,vertex_label,edge_label,property_key,index_label] 的组合,'all' 代表全部6种类型,即顶点、边和所有schema
- --log 或者 -l,指定日志目录,默认为当前目录
- --retry,指定失败重试次数,默认为 3
+ - --thread-num 或者 -T,使用的线程数,默认为 Math.min(10, Math.max(4, CPUs / 2))
- --split-size 或者 -s,指定在备份时对顶点或者边分块的大小,默认为 1048576
- -D,用 -Dkey=value 的模式指定动态参数,用来备份数据到 HDFS 时,指定 HDFS 的配置项,例如:-Dfs.default.name=hdfs://localhost:9000
- restore,将 JSON 格式存储的 schema 或者 data 恢复到一个新图中(RESTORING 模式)或者合并到已存在的图中(MERGING 模式)
@@ -167,6 +177,7 @@ Usage: hugegraph [options] [command] [command options]
- --huge-types 或者 -t,要恢复的数据类型,逗号分隔,可选值为 'all' 或者 一个或多个 [vertex,edge,vertex_label,edge_label,property_key,index_label] 的组合,'all' 代表全部6种类型,即顶点、边和所有schema
- --log 或者 -l,指定日志目录,默认为当前目录
- --retry,指定失败重试次数,默认为 3
+ - --thread-num 或者 -T,使用的线程数,默认为 Math.min(10, Math.max(4, CPUs / 2))
- -D,用 -Dkey=value 的模式指定动态参数,用来从 HDFS 恢复图时,指定 HDFS 的配置项,例如:-Dfs.default.name=hdfs://localhost:9000
> 只有当 --format 为 json 执行 backup 时,才可以使用 restore 命令恢复
- migrate, 将当前连接的图迁移至另一个 HugeGraphServer 中
@@ -198,9 +209,28 @@ Usage: hugegraph [options] [command] [command options]
- --log 或者 -l,指定日志目录,默认为当前目录
- --retry,指定失败重试次数,默认为 3
- --split-size 或者 -s,指定在备份时对顶点或者边分块的大小,默认为 1048576
- - -D,用 -Dkey=value 的模式指定动态参数,用来备份数据到 HDFS 时,指定 HDFS 的配置项,例如:-Dfs.default.name=hdfs://localhost:9000
+ - -D,用 -Dkey=value 的模式指定动态参数,用来备份数据到 HDFS 时,指定 HDFS 的配置项,例如:-Dfs.default.name=hdfs://localhost:9000
+
+##### 3.7 认证数据备份/恢复类
+
+- auth-backup,备份认证数据到指定目录
+ - --types 或者 -t,要备份的认证数据类型,逗号分隔,可选值为 'all' 或者一个或多个 [user, group, target, belong, access] 的组合,'all' 代表全部5种类型
+ - --directory 或者 -d,备份数据存储目录,默认为当前目录
+ - --log 或者 -l,指定日志目录,默认为当前目录
+ - --retry,指定失败重试次数,默认为 3
+ - --thread-num 或者 -T,使用的线程数,默认为 Math.min(10, Math.max(4, CPUs / 2))
+ - -D,用 -Dkey=value 的模式指定动态参数,用来备份数据到 HDFS 时,指定 HDFS 的配置项,例如:-Dfs.default.name=hdfs://localhost:9000
+- auth-restore,从指定目录恢复认证数据
+ - --types 或者 -t,要恢复的认证数据类型,逗号分隔,可选值为 'all' 或者一个或多个 [user, group, target, belong, access] 的组合,'all' 代表全部5种类型
+ - --directory 或者 -d,备份数据存储目录,默认为当前目录
+ - --log 或者 -l,指定日志目录,默认为当前目录
+ - --retry,指定失败重试次数,默认为 3
+ - --thread-num 或者 -T,使用的线程数,默认为 Math.min(10, Math.max(4, CPUs / 2))
+ - --strategy,冲突处理策略,可选值为 [stop, ignore],默认为 stop。stop 表示遇到冲突时停止恢复,ignore 表示忽略冲突继续恢复
+ - --init-password,恢复用户时设置的初始密码,恢复用户数据时必填
+ - -D,用 -Dkey=value 的模式指定动态参数,用来从 HDFS 恢复数据时,指定 HDFS 的配置项,例如:-Dfs.default.name=hdfs://localhost:9000
-##### 3.7 安装部署类
+##### 3.8 安装部署类
- deploy,一键下载、安装和启动 HugeGraph-Server 和 HugeGraph-Studio
- -v,必填项,指明安装的 HugeGraph-Server 和 HugeGraph-Studio 的版本号,最新的是 0.9
@@ -215,7 +245,7 @@ Usage: hugegraph [options] [command] [command options]
> deploy命令中有可选参数 -u,提供时会使用指定的下载地址替代默认下载地址下载 tar 包,并且将地址写入`~/hugegraph-download-url-prefix`文件中;之后如果不指定地址时,会优先从`~/hugegraph-download-url-prefix`指定的地址下载 tar 包;如果 -u 和`~/hugegraph-download-url-prefix`都没有时,会从默认下载地址进行下载
-##### 3.8 具体命令参数
+##### 3.9 具体命令参数
各子命令的具体参数如下:
@@ -524,7 +554,7 @@ Usage: hugegraph [options] [command] [command options]
```
-##### 3.9 具体命令示例
+##### 3.10 具体命令示例
###### 1. gremlin语句
diff --git a/content/en/_index.html b/content/en/_index.html
index 089beb0b6..c4ecb2f90 100644
--- a/content/en/_index.html
+++ b/content/en/_index.html
@@ -10,45 +10,53 @@
Apache
HugeGraph
-
- Incubating
}}">
Learn More
-
+ }}">
Download
- HugeGraph is a convenient, efficient, and adaptable graph database
- compatible with the Apache TinkerPop3 framework and the Gremlin query language.
+ HugeGraph is a full-stack graph system covering }}">graph database, }}">graph computing, and }}">graph AI.
+ It provides complete graph data processing capabilities from storage and real-time querying to offline analysis, and supports both }}">Gremlin and }}">Cypher query languages.
{{< blocks/link-down color="info" >}}
{{< /blocks/cover >}}
{{% blocks/lead color="primary" %}}
-HugeGraph supports fast import performance in the case of more than 10 billion Vertices and Edges
- Graph, millisecond-level OLTP query capability, and large-scale distributed
- graph processing (OLAP). The main scenarios of HugeGraph include
- correlation search, fraud detection, and knowledge graph.
-
+HugeGraph supports high-speed import and millisecond-level real-time queries for up to hundreds of billions of graph data, with deep integration with big data platforms like Spark and Flink, and can use the }}">HugeGraph toolchain for data import, visualization, and operations.
+In the AI era, combined with Large Language Models (LLMs), it provides powerful graph computing capabilities for intelligent Q&A, recommendation systems, fraud detection, and knowledge graph applications.
{{% /blocks/lead %}}
{{< blocks/section color="dark" >}}
{{% blocks/feature icon="fa-lightbulb" title="Convenient" %}}
-Not only supports Gremlin graph query language and RESTful API but also provides commonly used graph algorithm APIs. To help users easily implement various queries and analyses, HugeGraph has a full range of accessory tools, such as supporting distributed storage, data replication, scaling horizontally, and supports many built-in backends of storage engines.
-
-
+Supports Gremlin graph query language and [**RESTful API**]({{< relref "/docs/clients/restful-api" >}}), and provides commonly used graph retrieval interfaces. It includes a complete set of supporting tools with distributed storage, data replication, horizontal scaling, and multiple built-in backend storage engines for efficient query and analysis workflows.
{{% /blocks/feature %}}
{{% blocks/feature icon="fa-shipping-fast" title="Efficient" %}}
-Has been deeply optimized in graph storage and graph computation. It provides multiple batch import tools that can easily complete the fast-import of tens of billions of data, achieves millisecond-level response for graph retrieval through ameliorated queries, and supports concurrent online and real-time operations for thousands of users.
+Deeply optimized for graph storage and graph computing, it provides [**batch import tools**]({{< relref "/docs/quickstart/toolchain/hugegraph-loader" >}}) to efficiently ingest massive data, achieves millisecond-level graph retrieval latency with optimized queries, and supports concurrent online operations for thousands of users.
{{% /blocks/feature %}}
{{% blocks/feature icon="fa-exchange-alt" title="Adaptable" %}}
-Adapts to the Apache Gremlin standard graph query language and the Property Graph standard modeling method, and both support graph-based OLTP and OLAP schemes. Furthermore, HugeGraph can be integrated with Hadoop and Spark's big data platforms, and easily extend the back-end storage engine through plug-ins.
+Supports the Apache Gremlin standard graph query language and the Property Graph modeling method, with both OLTP and [**OLAP graph computing**]({{< relref "/docs/quickstart/computing/hugegraph-computer" >}}) scenarios. It can be integrated with Hadoop and Spark and can extend backend storage engines through plug-ins.
+{{% /blocks/feature %}}
+
+
+{{% blocks/feature icon="fa-brain" title="AI-Ready" %}}
+Integrates LLM with [**GraphRAG capabilities**]({{< relref "/docs/quickstart/hugegraph-ai" >}}), automated knowledge graph construction, and 20+ built-in graph machine learning algorithms to build AI-driven graph applications.
+{{% /blocks/feature %}}
+
+
+{{% blocks/feature icon="fa-expand-arrows-alt" title="Scalable" %}}
+Supports horizontal scaling and distributed deployment, seamlessly migrating from standalone to PB-level clusters, and provides [**distributed storage engine**]({{< relref "/docs/quickstart/hugegraph/hugegraph-hstore" >}}) options for different scale and performance requirements.
+{{% /blocks/feature %}}
+
+
+{{% blocks/feature icon="fa-puzzle-piece" title="Open Ecosystem" %}}
+Adheres to Apache TinkerPop standards, provides [**multi-language clients**]({{< relref "/docs/quickstart/client/hugegraph-client" >}}), is compatible with mainstream big data platforms, and is backed by an active and evolving community.
{{% /blocks/feature %}}
@@ -57,7 +65,7 @@ Apache
{{< blocks/section color="blue-deep">}}
-
The first graph database project in Apache
+The First Apache Foundation Top-Level Graph Project
{{< /blocks/section >}}
@@ -66,19 +74,19 @@ The first graph database project in Apache
{{< blocks/section >}}
{{% blocks/feature icon="far fa-tools" title="Get The **Toolchain**" %}}
-[It](https://github.com/apache/incubator-hugegraph-toolchain) includes graph loader & dashboard & backup tools
+[It](https://github.com/apache/hugegraph-toolchain) includes graph loader & dashboard & backup tools
{{% /blocks/feature %}}
-{{% blocks/feature icon="fab fa-github" title="Efficient" url="https://github.com/apache/incubator-hugegraph" %}}
-We do a [Pull Request](https://github.com/apache/incubator-hugegraph/pulls) contributions workflow on **GitHub**. New users are always welcome!
+{{% blocks/feature icon="fab fa-github" title="Efficient" url="https://github.com/apache/hugegraph" %}}
+We do a [Pull Request](https://github.com/apache/hugegraph/pulls) contributions workflow on **GitHub**. New users are always welcome!
{{% /blocks/feature %}}
-{{% blocks/feature icon="fab fa-weixin" title="Follow us on Wechat!" url="https://twitter.com/apache-hugegraph" %}}
-Follow the official account "HugeGraph" to get the latest news
+{{% blocks/feature icon="fab fa-slack" title="Join us on Slack!" url="https://the-asf.slack.com/archives/C059UU2FJ23" %}}
+Join the [ASF Slack channel](https://the-asf.slack.com/archives/C059UU2FJ23) for community discussions
-PS: twitter account it's on the way
+Could also follow the WeChat account "HugeGraph" for updates
{{% /blocks/feature %}}
diff --git a/content/en/blog/hugegraph-ai/agentic_graphrag.md b/content/en/blog/hugegraph-ai/agentic_graphrag.md
index ac1112871..9b92437d3 100644
--- a/content/en/blog/hugegraph-ai/agentic_graphrag.md
+++ b/content/en/blog/hugegraph-ai/agentic_graphrag.md
@@ -1,7 +1,7 @@
---
date: 2025-10-29
title: "Agentic GraphRAG"
-linkTitle: "Agentic GraphRAG"
+linkTitle: "Agentic GraphRAG: A Modular Architecture Practice"
---
# Project Background
diff --git a/content/en/blog/hugegraph/toplingdb/toplingdb-quick-start.md b/content/en/blog/hugegraph/toplingdb/toplingdb-quick-start.md
index 3393a26cd..599be5000 100644
--- a/content/en/blog/hugegraph/toplingdb/toplingdb-quick-start.md
+++ b/content/en/blog/hugegraph/toplingdb/toplingdb-quick-start.md
@@ -136,5 +136,5 @@ Caused by: org.rocksdb.RocksDBException: While lock file: rocksdb-data/data/m/LO
## Related Documentation
- [ToplingDB YAML Configuration Explained](/blog/2025/09/30/toplingdb-yaml-configuration-file/) – Understand each parameter in the config file
-- [HugeGraph Configuration Guide](/docs/config/config-option/) – Reference for core HugeGraph settings
+- [HugeGraph Configuration Guide](/docs/config/config-option) – Reference for core HugeGraph settings
- [ToplingDB GitHub Repository](https://github.com/topling/toplingdb) – Official docs and latest updates
diff --git a/content/en/community/_index.md b/content/en/community/_index.md
index cdade1630..1ae39a248 100644
--- a/content/en/community/_index.md
+++ b/content/en/community/_index.md
@@ -5,4 +5,4 @@ menu:
weight: 40
---
-
+Visit the [Project Maturity]({{< relref "maturity" >}}) assessment.
diff --git a/maturity.md b/content/en/community/maturity.md
similarity index 76%
rename from maturity.md
rename to content/en/community/maturity.md
index 6f0d7e3c2..5ff25f46e 100644
--- a/maturity.md
+++ b/content/en/community/maturity.md
@@ -1,4 +1,10 @@
-# Maturity Assessment for Apache HugeGraph (incubating)
+---
+title: Maturity
+description: Apache HugeGraph maturity assessment
+weight: 50
+---
+
+# Maturity Assessment for Apache HugeGraph
The goals of this maturity model are to describe how Apache projects operate in a concise and high-level way, and to provide a basic framework that projects may choose to use to evaluate themselves.
@@ -14,37 +20,37 @@ The following table is filled according to the [Apache Maturity Model](https://c
### CODE
-| **ID** | **Description** | **Status** |
-| -------- | ----- | ---------- |
+| **ID** | **Description** | **Status** |
+| -------- | --------------- | ---------- |
| **CD10** | The project produces Open Source software for distribution to the public, at no charge. | **YES** The project source code is licensed under the `Apache License 2.0`. |
-| **CD20** | Anyone can easily discover and access the project's code.. | **YES** The [official website](https://hugegraph.apache.org/) includes a link to the [GitHub repository](https://github.com/apache/hugegraph). |
+| **CD20** | Anyone can easily discover and access the project's code. | **YES** The [official website](https://hugegraph.apache.org/) includes a link to the [GitHub repository](https://github.com/apache/hugegraph). |
| **CD30** | Anyone using standard, widely-available tools, can build the code in a reproducible way. | **YES** Apache HugeGraph provides a [Quick Start](https://hugegraph.apache.org/docs/quickstart/hugegraph/hugegraph-server/) document that explains how to compile the source code. |
-| **CD40** | The full history of the project's code is available via a source code control system, in a way that allows anyone to recreate any released version. _ | **YES** The project uses Git, and anyone can view the full history of the project via commit logs and tags for each release. |
+| **CD40** | The full history of the project's code is available via a source code control system, in a way that allows anyone to recreate any released version. | **YES** The project uses Git, and anyone can view the full history of the project via commit logs and tags for each release. |
| **CD50** | The source code control system establishes the provenance of each line of code in a reliable way, based on strong authentication of the committer. When third parties contribute code, commit messages provide reliable information about the code provenance. | **YES** The project uses GitHub managed by Apache Infra, ensuring provenance of each line of code to a committer. Third-party contributions are accepted via pull requests in accordance with the [Contribution Guidelines](https://hugegraph.apache.org/docs/contribution-guidelines/).|
### LICENSE
-| **ID** | **Description** | **Status** |
+| **ID** | **Description** | **Status** |
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- |
| **LC10** | The Apache License, version 2.0, covers the released code. | **YES** The [LICENSE](https://github.com/apache/hugegraph/blob/master/LICENSE) file is present in the source repository. All source files contain the APLv2 header, which is checked by the Apache Rat plugin during builds. |
-| **LC20** | Libraries that are mandatory dependencies of the project's code do not create more restrictions than the Apache License does. _ | **YES** All dependencies have been checked and none of them create more restrictions than the Apache License does. |
-| **LC30** | The libraries mentioned in LC20 are available as Open Source software. _ | **YES** Dependencies are available in public Maven repositories. |
+| **LC20** | Libraries that are mandatory dependencies of the project's code do not create more restrictions than the Apache License does. | **YES** All dependencies have been checked and none of them create more restrictions than the Apache License does. |
+| **LC30** | The libraries mentioned in LC20 are available as Open Source software. | **YES** Dependencies are available in public Maven repositories. |
| **LC40** | Committers are bound by an Individual Contributor Agreement (the "Apache iCLA") that defines which code they may commit and how they need to identify code that is not their own. | **YES** All committers have a signed iCLA on file with the ASF. |
| **LC50** | The project clearly defines and documents the copyright ownership of everything that the project produces. | **YES** This is documented via copyright notices in the source files and the NOTICE file. |
### Releases
-| **ID** | **Description** _ | **Status** |
+| **ID** | **Description** | **Status** |
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- |
-| **RE10** | Releases consist of source code, distributed using standard and open archive formats that are expected to stay readable in the long term. | **YES** Source releases are distributed via [dist.apache.org](https://dist.apache.org/repos/dist/release/incubator/hugegraph/) and linked from the website's [download page](https://hugegraph.apache.org/docs/download/download/). |
+| **RE10** | Releases consist of source code, distributed using standard and open archive formats that are expected to stay readable in the long term. | **YES** Source releases are distributed via [dist.apache.org](https://dist.apache.org/repos/dist/release/hugegraph/) and linked from the website's [download page](https://hugegraph.apache.org/docs/download/download/). |
| **RE20** | The project's PMC (Project Management Committee, see CS10) approves each software release in order to make the release an act of the Foundation. | **YES** All releases are voted on by the PMC on the dev@hugegraph.apache.org mailing list. |
-| **RE30** | Releases are signed and/or distributed along with digests that anyone can reliably use to validate the downloaded archives. | **YES** All releases are signed by the release manager and distributed with checksums. The [KEYS](https://downloads.apache.org/incubator/hugegraph/KEYS) file is available for verification. |
+| **RE30** | Releases are signed and/or distributed along with digests that anyone can reliably use to validate the downloaded archives. | **YES** All releases are signed by the release manager and distributed with checksums. The [KEYS](https://downloads.apache.org/hugegraph/KEYS) file is available for verification. |
| **RE40** | The project can distribute convenience binaries alongside source code, but they are not Apache Releases, they are provided with no guarantee. | **YES** The project provides convenience binaries, but only the source code archive is an official Apache release. |
-| **RE50** | The project documents a repeatable release process so that someone new to the project can independently generate the complete set of artifacts required for a release. | **YES** The project documents its release process in the [How to Release](https://github.com/apache/incubator-hugegraph/wiki/ASF-Release-Guidance-V2.0) guide. |
+| **RE50** | The project documents a repeatable release process so that someone new to the project can independently generate the complete set of artifacts required for a release. | **YES** The project documents its release process in the [How to Release](https://github.com/apache/hugegraph/wiki/ASF-Release-Guidance-V2.0) guide. |
### Quality
-| **ID** | **Description** | **Status** |
+| **ID** | **Description** | **Status** |
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- |
| **QU10** | The project is open and honest about the quality of its code. Various levels of quality and maturity for various modules are natural and acceptable as long as they are clearly communicated. | **YES** Users are encouraged to [report issues](https://github.com/apache/hugegraph/issues) on GitHub, and all discussions are public. |
| **QU20** | The project puts a very high priority on producing secure software. | **YES** All reported security issues are treated with high priority. |
@@ -54,7 +60,7 @@ The following table is filled according to the [Apache Maturity Model](https://c
### Community
-| **ID** | **Description** | **Status** |
+| **ID** | **Description** | **Status** |
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- |
| **CO10** | The project has a well-known homepage that points to all the information required to operate according to this maturity model. | **YES** The [website](https://hugegraph.apache.org/) serves as the central point for project information. |
| **CO20** | The community welcomes contributions from anyone who acts in good faith and in a respectful manner, and who adds value to the project. | **YES** Apache HugeGraph has a [Contribution Guidelines](https://hugegraph.apache.org/docs/contribution-guidelines/) page and welcomes all valuable contributions. |
@@ -66,17 +72,17 @@ The following table is filled according to the [Apache Maturity Model](https://c
### Consensus
-| **ID** | **Description** | **Status** |
+| **ID** | **Description** | **Status** |
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- |
-| **CS10** | The project maintains a public list of its contributors who have decision power. The project's PMC (Project Management Committee) consists of those contributors. | **YES** The website maintains a public list of all [PMC members and committers](https://incubator.apache.org/clutch/hugegraph.html). |
+| **CS10** | The project maintains a public list of its contributors who have decision power. The project's PMC (Project Management Committee) consists of those contributors. | **YES** Project governance information is publicly available via ASF committee and roster pages, such as [committee info](https://projects.apache.org/committee.html?hugegraph). |
| **CS20** | Decisions require a consensus among PMC members and are documented on the project's main communications channel. The PMC takes community opinions into account, but the PMC has the final word. | **YES** All decisions are made by votes on the dev@hugegraph.apache.org mailing list, requiring at least three +1 votes from PMC members and no vetos. |
| **CS30** | The project uses documented voting rules to build consensus when discussion is not sufficient. | **YES** The project uses the standard ASF voting rules. |
-| **CS40** |In Apache projects, vetoes are only valid for code commits. The person exercising the veto must justify it with a technical explanation, as per the Apache voting rules defined in CS30. | **YES** The HugeGraph community follows this principle. |
+| **CS40** | In Apache projects, vetoes are only valid for code commits. The person exercising the veto must justify it with a technical explanation, as per the Apache voting rules defined in CS30. | **YES** The HugeGraph community follows this principle. |
| **CS50** | All "important" discussions happen asynchronously in written form on the project's main communications channel. Offline, face-to-face or private discussions that affect the project are also documented on that channel. | **YES** All important discussions and decisions are documented on the public mailing list for transparency and accessibility. |
### Independence
-| **ID** | **Description** | **Status** |
+| **ID** | **Description** | **Status** |
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- |
| **IN10** | The project is independent from any corporate or organizational influence. | **YES** The PMC members and committers of Apache HugeGraph are from 10+ different companies/college/institution, ensuring no single entity controls the project. |
| **IN20** | Contributors act as themselves, not as representatives of a corporation or organization. | **YES** Contributions are made by individuals on behalf of the project and community, not their employers. |
diff --git a/content/en/docs/SUMMARY.md b/content/en/docs/SUMMARY.md
index 0e004bfa4..44321bcfe 100644
--- a/content/en/docs/SUMMARY.md
+++ b/content/en/docs/SUMMARY.md
@@ -1,69 +1,66 @@
# HugeGraph Docs
-* [Download](download.md)
+* [Download](download/download)
## Quickstart
-* [Install HugeGraph-Server](quickstart/hugegraph-server.md)
-* [Load data with HugeGraph-Loader](quickstart/hugegraph-loader.md)
-* [Visual with HugeGraph-Hubble](quickstart/hugegraph-hubble.md)
-* [Develop with HugeGraph-Client](quickstart/hugegraph-client.md)
-* [Manage with HugeGraph-Tools](quickstart/hugegraph-tools.md)
-* [Analysis with HugeGraph-Computer](quickstart/hugegraph-computer.md)
-* [Display with HugeGraph-Studio](quickstart/hugegraph-studio.md)
+* [Install HugeGraph-Server](quickstart/hugegraph/hugegraph-server)
+* [Load data with HugeGraph-Loader](quickstart/toolchain/hugegraph-loader)
+* [Visual with HugeGraph-Hubble](quickstart/toolchain/hugegraph-hubble)
+* [Develop with HugeGraph-Client](quickstart/client/hugegraph-client)
+* [Manage with HugeGraph-Tools](quickstart/toolchain/hugegraph-tools)
+* [Analysis with HugeGraph-Computer](quickstart/computing/hugegraph-computer)
## Config
-* [Config Guide](config/config-guide.md)
-* [Config Options](config/config-option.md)
-* [Config Authentication](config/config-authentication.md)
-* [Config HTTPS](config/config-https.md)
-* [Config Computer](config/config-computer.md)
-
+* [Config Guide](config/config-guide)
+* [Config Options](config/config-option)
+* [Config Authentication](config/config-authentication)
+* [Config HTTPS](config/config-https)
+* [Config Computer](quickstart/computing/hugegraph-computer)
## API
-* [RESTful API](clients/hugegraph-api.md)
- * [Schema](clients/restful-api/schema.md)
- * [PropertyKey](clients/restful-api/propertykey.md)
- * [VertexLabel](clients/restful-api/vertexlabel.md)
- * [EdgeLabel](clients/restful-api/edgelabel.md)
- * [IndexLabel](clients/restful-api/indexlabel.md)
- * [Rebuild](clients/restful-api/rebuild.md)
- * [Vertex](clients/restful-api/vertex.md)
- * [Edge](clients/restful-api/edge.md)
- * [Traverser](clients/restful-api/traverser.md)
- * [Rank](clients/restful-api/rank.md)
- * [Variable](clients/restful-api/variable.md)
- * [Graphs](clients/restful-api/graphs.md)
- * [Task](clients/restful-api/task.md)
- * [Gremlin](clients/restful-api/gremlin.md)
- * [Cypher](clients/restful-api/cypher.md)
- * [Authentication](clients/restful-api/auth.md)
- * [Other](clients/restful-api/other.md)
-* [Java Client](clients/hugegraph-client.md)
-* [Gremlin Console](clients/gremlin-console.md)
+* [RESTful API](clients/restful-api)
+ * [Schema](clients/restful-api/schema)
+ * [PropertyKey](clients/restful-api/propertykey)
+ * [VertexLabel](clients/restful-api/vertexlabel)
+ * [EdgeLabel](clients/restful-api/edgelabel)
+ * [IndexLabel](clients/restful-api/indexlabel)
+ * [Rebuild](clients/restful-api/rebuild)
+ * [Vertex](clients/restful-api/vertex)
+ * [Edge](clients/restful-api/edge)
+ * [Traverser](clients/restful-api/traverser)
+ * [Rank](clients/restful-api/rank)
+ * [Variable](clients/restful-api/variable)
+ * [Graphs](clients/restful-api/graphs)
+ * [Task](clients/restful-api/task)
+ * [Gremlin](clients/restful-api/gremlin)
+ * [Cypher](clients/restful-api/cypher)
+ * [Authentication](clients/restful-api/auth)
+ * [Other](clients/restful-api/other)
+* [Java Client](clients/hugegraph-client)
+* [Gremlin Console](clients/gremlin-console)
## Guides
-* [Architecture Overview](guides/architectural.md)
-* [Design Concepts](guides/desgin-concept.md)
-* [Custom Plugins](guides/custom-plugin.md)
-* [Backup Restore](guides/backup-restore.md)
-* [FAQ](guides/faq.md)
+* [Architecture Overview](guides/architectural)
+* [Design Concepts](guides/desgin-concept)
+* [Custom Plugins](guides/custom-plugin)
+* [Backup Restore](guides/backup-restore)
+* [FAQ](guides/faq)
## Query Language
-* [Gremlin Query Language](language/hugegraph-gremlin.md)
-* [HugeGraph Examples](language/hugegraph-example.md)
+* [Gremlin Query Language](language/hugegraph-gremlin)
+* [HugeGraph Examples](language/hugegraph-example)
## Performance
-* [HugeGraph Benchmark Performance](performance/hugegraph-benchmark-0.5.6.md)
-* [HugeGraph API Performance—Outdated](content/cn/docs/performance/api-preformance/_index.md)
- * [v0.5.6 Stand-alone(RocksDB)](content/cn/docs/performance/api-preformance/hugegraph-api-0.5.6-RocksDB.md)
- * [v0.5.6 Cluster(Cassandra)](content/cn/docs/performance/api-preformance/hugegraph-api-0.5.6-Cassandra.md)
- * [v0.4.4](content/cn/docs/performance/api-preformance/hugegraph-api-0.4.4.md)
- * [v0.2](content/cn/docs/performance/api-preformance/hugegraph-api-0.2.md)
-* [HugeGraph-Loader Performance](performance/hugegraph-loader-performance.md)
+* [HugeGraph Benchmark Performance](performance/hugegraph-benchmark-0.5.6)
+* [HugeGraph API Performance—Outdated](performance/api-performance)
+ * [v0.5.6 Stand-alone(RocksDB)](performance/api-performance/hugegraph-api-0.5.6-rocksdb)
+ * [v0.5.6 Cluster(Cassandra)](performance/api-performance/hugegraph-api-0.5.6-cassandra)
+ * [v0.4.4](performance/api-performance/hugegraph-api-0.4.4)
+ * [v0.2](performance/api-performance/hugegraph-api-0.2)
+* [HugeGraph-Loader Performance](performance/hugegraph-loader-performance)
## ChangeLogs
-* [Release-1.3.0](changelog/hugegraph-1.3.0-release-notes.md)
-* [Release-1.2.0](changelog/hugegraph-1.2.0-release-notes.md)
-* [Release-1.0.0](changelog/hugegraph-1.0.0-release-notes.md)
-* [Release-0.12.0](changelog/hugegraph-0.12.0-release-notes.md)
-
+* [Release-1.3.0](changelog/hugegraph-1.3.0-release-notes)
+* [Release-1.2.0](changelog/hugegraph-1.2.0-release-notes)
+* [Release-1.0.0](changelog/hugegraph-1.0.0-release-notes)
+* [Release-0.12.0](changelog/hugegraph-0.12.0-release-notes)
diff --git a/content/en/docs/_index.md b/content/en/docs/_index.md
index bdfa1b9ac..764881c01 100755
--- a/content/en/docs/_index.md
+++ b/content/en/docs/_index.md
@@ -7,4 +7,52 @@ menu:
weight: 20
---
-Welcome to HugeGraph docs
+## Apache HugeGraph Documentation
+
+Apache HugeGraph is a complete graph database ecosystem, supporting OLTP real-time queries, OLAP offline analysis, and AI intelligent applications.
+
+### Quick Navigation by Scenario
+
+| I want to... | Start here |
+|----------|-----------|
+| **Run graph queries** (OLTP) | [HugeGraph Server Quickstart](quickstart/hugegraph/hugegraph-server) |
+| **Large-scale graph computing** (OLAP) | [Graph Computing Engine](quickstart/computing/hugegraph-computer) |
+| **Build AI/RAG applications** | [HugeGraph-AI](quickstart/hugegraph-ai/quick_start) |
+| **Batch import data** | [HugeGraph Loader](quickstart/toolchain/hugegraph-loader) |
+| **Visualize and manage graphs** | [Hubble Web UI](quickstart/toolchain/hugegraph-hubble) |
+
+### Ecosystem Overview
+
+```
+┌─────────────────────────────────────────────────────────────────┐
+│ Apache HugeGraph Ecosystem │
+├─────────────────────────────────────────────────────────────────┤
+│ ┌─────────────┐ ┌─────────────┐ ┌─────────────────────────┐ │
+│ │ HugeGraph │ │ HugeGraph │ │ HugeGraph-AI │ │
+│ │ Server │ │ Computer │ │ (GraphRAG/ML/Python) │ │
+│ │ (OLTP) │ │ (OLAP) │ │ │ │
+│ └─────────────┘ └─────────────┘ └─────────────────────────┘ │
+│ │ │ │ │
+│ ┌──────┴───────────────┴────────────────────┴──────────────┐ │
+│ │ HugeGraph Toolchain │ │
+│ │ Hubble (UI) | Loader | Client (Java/Go/Python) | Tools │ │
+│ └───────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────┘
+```
+
+### Core Components
+
+- **HugeGraph Server** - Core graph database with REST API + Gremlin + Cypher support
+- **HugeGraph Toolchain** - Client SDKs, data import, visualization, and operational tools
+- **HugeGraph Computer** - Distributed graph computing (Vermeer high-performance in-memory / Computer massive external storage)
+- **HugeGraph-AI** - GraphRAG, knowledge graph construction, 20+ graph ML algorithms
+
+### Deployment Modes
+
+| Mode | Use Case | Data Scale |
+|-----|---------|---------|
+| **Standalone** | High-speed stable, compute-storage integrated | < 4TB |
+| **Distributed** | Massive storage, compute-storage separated | < 1000TB |
+| **Docker** | Quick start | Any |
+
+[📖 Detailed Introduction](introduction/)
diff --git a/content/en/docs/changelog/hugegraph-0.12.0-release-notes.md b/content/en/docs/changelog/hugegraph-0.12.0-release-notes.md
index c093dc916..92e7928c1 100644
--- a/content/en/docs/changelog/hugegraph-0.12.0-release-notes.md
+++ b/content/en/docs/changelog/hugegraph-0.12.0-release-notes.md
@@ -2,7 +2,7 @@
title: "HugeGraph 0.12 Release Notes"
linkTitle: "Release-0.12.0"
draft: true
-weight: 1
+weight: 11
---
### API & Client
diff --git a/content/en/docs/changelog/hugegraph-1.0.0-release-notes.md b/content/en/docs/changelog/hugegraph-1.0.0-release-notes.md
index ece52feb1..16e5949ab 100644
--- a/content/en/docs/changelog/hugegraph-1.0.0-release-notes.md
+++ b/content/en/docs/changelog/hugegraph-1.0.0-release-notes.md
@@ -1,7 +1,7 @@
---
title: "HugeGraph 1.0.0 Release Notes"
linkTitle: "Release-1.0.0"
-weight: 2
+weight: 9
---
### OLTP API & Client Changes
@@ -169,7 +169,7 @@ weight: 2
Please check the release details in each repository:
-- [Server Release Notes](https://github.com/apache/incubator-hugegraph/releases/tag/1.0.0)
-- [Toolchain Release Notes](https://github.com/apache/incubator-hugegraph-toolchain/releases/tag/1.0.0)
-- [Computer Release Notes](https://github.com/apache/incubator-hugegraph-computer/releases/tag/1.0.0)
-- [Commons Release Notes](https://github.com/apache/incubator-hugegraph-commons/releases/tag/1.0.0)
+- [Server Release Notes](https://github.com/apache/hugegraph/releases/tag/1.0.0)
+- [Toolchain Release Notes](https://github.com/apache/hugegraph-toolchain/releases/tag/1.0.0)
+- [Computer Release Notes](https://github.com/apache/hugegraph-computer/releases/tag/1.0.0)
+- [Commons Release Notes](https://github.com/apache/hugegraph-commons/releases/tag/1.0.0)
diff --git a/content/en/docs/changelog/hugegraph-1.2.0-release-notes.md b/content/en/docs/changelog/hugegraph-1.2.0-release-notes.md
index 813674f4b..bc130fe66 100644
--- a/content/en/docs/changelog/hugegraph-1.2.0-release-notes.md
+++ b/content/en/docs/changelog/hugegraph-1.2.0-release-notes.md
@@ -1,7 +1,7 @@
---
title: "HugeGraph 1.2.0 Release Notes"
linkTitle: "Release-1.2.0"
-weight: 3
+weight: 7
---
### Java version statement
@@ -11,205 +11,205 @@ weight: 3
1. hugegraph, hugegraph-toolchain, hugegraph-commons consider use Java 11, also compatible with Java 8 now.
2. hugegraph-computer required to use Java 11, **not compatible with Java 8 now!**
-**v1.2.0 may be the last major version compatible with Java 8**, compatibility with Java 8 will totally end in v1.5 when [PD/Store](https://github.com/apache/incubator-hugegraph/issues/2265) merged into master branch (Except for the `java-client`).
+**v1.2.0 may be the last major version compatible with Java 8**, compatibility with Java 8 will totally end in v1.5 when [PD/Store](https://github.com/apache/hugegraph/issues/2265) merged into master branch (Except for the `java-client`).
### hugegraph
#### API Changes
-- feat(api&core): in oltp apis, add statistics info and support full info about vertices and edges ([#2262](https://github.com/apache/incubator-hugegraph/pull/2262))
-- feat(api): support embedded arthas agent in hugegraph-server ([#2278](https://github.com/apache/incubator-hugegraph/pull/2278),[#2337](https://github.com/apache/incubator-hugegraph/pull/2337))
-- feat(api): support metric API Prometheus format & add statistic metric api ([#2286](https://github.com/apache/incubator-hugegraph/pull/2286))
-- feat(api-core): support label & property filtering for both edge and vertex & support kout dfs mode ([#2295](https://github.com/apache/incubator-hugegraph/pull/2295))
-- feat(api): support recording slow query log ([#2327](https://github.com/apache/incubator-hugegraph/pull/2327))
+- feat(api&core): in oltp apis, add statistics info and support full info about vertices and edges ([#2262](https://github.com/apache/hugegraph/pull/2262))
+- feat(api): support embedded arthas agent in hugegraph-server ([#2278](https://github.com/apache/hugegraph/pull/2278),[#2337](https://github.com/apache/hugegraph/pull/2337))
+- feat(api): support metric API Prometheus format & add statistic metric api ([#2286](https://github.com/apache/hugegraph/pull/2286))
+- feat(api-core): support label & property filtering for both edge and vertex & support kout dfs mode ([#2295](https://github.com/apache/hugegraph/pull/2295))
+- feat(api): support recording slow query log ([#2327](https://github.com/apache/hugegraph/pull/2327))
#### Feature Changes
-- feat: support task auto manage by server role state machine ([#2130](https://github.com/apache/incubator-hugegraph/pull/2130))
-- feat: support parallel compress snapshot ([#2136](https://github.com/apache/incubator-hugegraph/pull/2136))
-- feat: use an enhanced CypherAPI to refactor it ([#2143](https://github.com/apache/incubator-hugegraph/pull/2143))
-- feat(perf): support JMH benchmark in HG-test module ([#2238](https://github.com/apache/incubator-hugegraph/pull/2238))
-- feat: optimising adjacency edge queries ([#2242](https://github.com/apache/incubator-hugegraph/pull/2242))
-- Feat: IP white list ([#2299](https://github.com/apache/incubator-hugegraph/pull/2299))
-- feat(cassandra): adapt cassandra from 3.11.12 to 4.0.10 ([#2300](https://github.com/apache/incubator-hugegraph/pull/2300))
-- feat: support Cassandra with docker-compose in server ([#2307](https://github.com/apache/incubator-hugegraph/pull/2307))
-- feat(core): support batch+parallel edges traverse ([#2312](https://github.com/apache/incubator-hugegraph/pull/2312))
-- feat: adapt Dockerfile for new project structur ([#2344](https://github.com/apache/incubator-hugegraph/pull/2344))
-- feat(server):swagger support auth for standardAuth mode by ([#2360](https://github.com/apache/incubator-hugegraph/pull/2360))
-- feat(core): add IntMapByDynamicHash V1 implement ([#2377](https://github.com/apache/incubator-hugegraph/pull/2377))
+- feat: support task auto manage by server role state machine ([#2130](https://github.com/apache/hugegraph/pull/2130))
+- feat: support parallel compress snapshot ([#2136](https://github.com/apache/hugegraph/pull/2136))
+- feat: use an enhanced CypherAPI to refactor it ([#2143](https://github.com/apache/hugegraph/pull/2143))
+- feat(perf): support JMH benchmark in HG-test module ([#2238](https://github.com/apache/hugegraph/pull/2238))
+- feat: optimising adjacency edge queries ([#2242](https://github.com/apache/hugegraph/pull/2242))
+- Feat: IP white list ([#2299](https://github.com/apache/hugegraph/pull/2299))
+- feat(cassandra): adapt cassandra from 3.11.12 to 4.0.10 ([#2300](https://github.com/apache/hugegraph/pull/2300))
+- feat: support Cassandra with docker-compose in server ([#2307](https://github.com/apache/hugegraph/pull/2307))
+- feat(core): support batch+parallel edges traverse ([#2312](https://github.com/apache/hugegraph/pull/2312))
+- feat: adapt Dockerfile for new project structur ([#2344](https://github.com/apache/hugegraph/pull/2344))
+- feat(server):swagger support auth for standardAuth mode by ([#2360](https://github.com/apache/hugegraph/pull/2360))
+- feat(core): add IntMapByDynamicHash V1 implement ([#2377](https://github.com/apache/hugegraph/pull/2377))
#### Bug Fix
-- fix: transfer add_peer/remove_peer command to leader ([#2112](https://github.com/apache/incubator-hugegraph/pull/2112))
-- fix query dirty edges of a vertex with cache ([#2166](https://github.com/apache/incubator-hugegraph/pull/2166))
-- fix exception of vertex-drop with index ([#2181](https://github.com/apache/incubator-hugegraph/pull/2181))
-- fix: remove dup 'From' in filterExpiredResultFromFromBackend ([#2207](https://github.com/apache/incubator-hugegraph/pull/2207))
-- fix: jdbc ssl mode parameter redundant ([#2224](https://github.com/apache/incubator-hugegraph/pull/2224))
-- fix: error when start gremlin-console with sample script ([#2231](https://github.com/apache/incubator-hugegraph/pull/2231))
-- fix(core): support order by id ([#2233](https://github.com/apache/incubator-hugegraph/pull/2233))
-- fix: update ssl_mode value ([#2235](https://github.com/apache/incubator-hugegraph/pull/2235))
-- fix: optimizing ClassNotFoundException error message for MYSQL ([#2246](https://github.com/apache/incubator-hugegraph/pull/2246))
-- fix: asf invalid notification scheme 'discussions_status' ([#2247](https://github.com/apache/incubator-hugegraph/pull/2247))
-- fix: asf invalid notification scheme 'discussions_comment' ([#2250](https://github.com/apache/incubator-hugegraph/pull/2250))
-- fix: incorrect use of 'NO_LIMIT' variable ([#2253](https://github.com/apache/incubator-hugegraph/pull/2253))
-- fix(core): close flat mapper iterator after usage ([#2281](https://github.com/apache/incubator-hugegraph/pull/2281))
-- fix(dist): avoid var PRELOAD cover environmnet vars ([#2302](https://github.com/apache/incubator-hugegraph/pull/2302))
-- fix: base-ref/head-ref missed in dependency-review on master ([#2308](https://github.com/apache/incubator-hugegraph/pull/2308))
-- fix(core): handle schema Cache expandCapacity concurrent problem ([#2332](https://github.com/apache/incubator-hugegraph/pull/2332))
-- fix: in wait-storage.sh, always wait for storage with default rocksdb ([#2333](https://github.com/apache/incubator-hugegraph/pull/2333))
-- fix(api): refactor/downgrade record logic for slow log ([#2347](https://github.com/apache/incubator-hugegraph/pull/2347))
-- fix(api): clean some code for release ([#2348](https://github.com/apache/incubator-hugegraph/pull/2348))
-- fix: remove redirect-to-master from synchronous Gremlin API ([#2356](https://github.com/apache/incubator-hugegraph/pull/2356))
-- fix HBase PrefixFilter bug ([#2364](https://github.com/apache/incubator-hugegraph/pull/2364))
-- chore: fix curl failed to request https urls ([#2378](https://github.com/apache/incubator-hugegraph/pull/2378))
-- fix(api): correct the vertex id in the edge-existence api ([#2380](https://github.com/apache/incubator-hugegraph/pull/2380))
-- fix: github action build docker image failed during the release 1.2 process ([#2386](https://github.com/apache/incubator-hugegraph/pull/2386))
-- fix: TinkerPop unit test lack some lables ([#2387](https://github.com/apache/incubator-hugegraph/pull/2387))
+- fix: transfer add_peer/remove_peer command to leader ([#2112](https://github.com/apache/hugegraph/pull/2112))
+- fix query dirty edges of a vertex with cache ([#2166](https://github.com/apache/hugegraph/pull/2166))
+- fix exception of vertex-drop with index ([#2181](https://github.com/apache/hugegraph/pull/2181))
+- fix: remove dup 'From' in filterExpiredResultFromFromBackend ([#2207](https://github.com/apache/hugegraph/pull/2207))
+- fix: jdbc ssl mode parameter redundant ([#2224](https://github.com/apache/hugegraph/pull/2224))
+- fix: error when start gremlin-console with sample script ([#2231](https://github.com/apache/hugegraph/pull/2231))
+- fix(core): support order by id ([#2233](https://github.com/apache/hugegraph/pull/2233))
+- fix: update ssl_mode value ([#2235](https://github.com/apache/hugegraph/pull/2235))
+- fix: optimizing ClassNotFoundException error message for MYSQL ([#2246](https://github.com/apache/hugegraph/pull/2246))
+- fix: asf invalid notification scheme 'discussions_status' ([#2247](https://github.com/apache/hugegraph/pull/2247))
+- fix: asf invalid notification scheme 'discussions_comment' ([#2250](https://github.com/apache/hugegraph/pull/2250))
+- fix: incorrect use of 'NO_LIMIT' variable ([#2253](https://github.com/apache/hugegraph/pull/2253))
+- fix(core): close flat mapper iterator after usage ([#2281](https://github.com/apache/hugegraph/pull/2281))
+- fix(dist): avoid var PRELOAD cover environmnet vars ([#2302](https://github.com/apache/hugegraph/pull/2302))
+- fix: base-ref/head-ref missed in dependency-review on master ([#2308](https://github.com/apache/hugegraph/pull/2308))
+- fix(core): handle schema Cache expandCapacity concurrent problem ([#2332](https://github.com/apache/hugegraph/pull/2332))
+- fix: in wait-storage.sh, always wait for storage with default rocksdb ([#2333](https://github.com/apache/hugegraph/pull/2333))
+- fix(api): refactor/downgrade record logic for slow log ([#2347](https://github.com/apache/hugegraph/pull/2347))
+- fix(api): clean some code for release ([#2348](https://github.com/apache/hugegraph/pull/2348))
+- fix: remove redirect-to-master from synchronous Gremlin API ([#2356](https://github.com/apache/hugegraph/pull/2356))
+- fix HBase PrefixFilter bug ([#2364](https://github.com/apache/hugegraph/pull/2364))
+- chore: fix curl failed to request https urls ([#2378](https://github.com/apache/hugegraph/pull/2378))
+- fix(api): correct the vertex id in the edge-existence api ([#2380](https://github.com/apache/hugegraph/pull/2380))
+- fix: github action build docker image failed during the release 1.2 process ([#2386](https://github.com/apache/hugegraph/pull/2386))
+- fix: TinkerPop unit test lack some lables ([#2387](https://github.com/apache/hugegraph/pull/2387))
#### Option Changes
-- feat(dist): support pre-load test graph data in docker container ([#2241](https://github.com/apache/incubator-hugegraph/pull/2241))
+- feat(dist): support pre-load test graph data in docker container ([#2241](https://github.com/apache/hugegraph/pull/2241))
#### Other Changes
-- refact: use standard UTF-8 charset & enhance CI configs ([#2095](https://github.com/apache/incubator-hugegraph/pull/2095))
-- move validate release to hugegraph-doc ([#2109](https://github.com/apache/incubator-hugegraph/pull/2109))
-- refact: use a slim way to build docker image on latest code & support zgc ([#2118](https://github.com/apache/incubator-hugegraph/pull/2118))
-- chore: remove stage-repo in pom due to release done & update mail rule ([#2128](https://github.com/apache/incubator-hugegraph/pull/2128))
-- doc: update issue template & README file ([#2131](https://github.com/apache/incubator-hugegraph/pull/2131))
-- chore: cmn algorithm optimization ([#2134](https://github.com/apache/incubator-hugegraph/pull/2134))
-- add github token for license check comment ([#2139](https://github.com/apache/incubator-hugegraph/pull/2139))
-- chore: disable PR up-to-date in branch ([#2150](https://github.com/apache/incubator-hugegraph/pull/2150))
-- refact(core): remove lock of globalMasterInfo to optimize perf ([#2151](https://github.com/apache/incubator-hugegraph/pull/2151))
-- chore: async remove left index shouldn't effect query ([#2199](https://github.com/apache/incubator-hugegraph/pull/2199))
-- refact(rocksdb): clean & reformat some code ([#2200](https://github.com/apache/incubator-hugegraph/pull/2200))
-- refact(core): optimized batch removal of remaining indices consumed by a single consumer ([#2203](https://github.com/apache/incubator-hugegraph/pull/2203))
-- add com.janeluo.ikkanalyzer dependency to core model ([#2206](https://github.com/apache/incubator-hugegraph/pull/2206))
-- refact(core): early stop unnecessary loops in edge cache ([#2211](https://github.com/apache/incubator-hugegraph/pull/2211))
-- doc: update README & add QR code ([#2218](https://github.com/apache/incubator-hugegraph/pull/2218))
-- chore: update .asf.yaml for mail rule ([#2221](https://github.com/apache/incubator-hugegraph/pull/2221))
-- chore: improve the UI & content in README ([#2227](https://github.com/apache/incubator-hugegraph/pull/2227))
-- chore: add pr template ([#2234](https://github.com/apache/incubator-hugegraph/pull/2234))
-- doc: modify ASF and remove meaningless CLA ([#2237](https://github.com/apache/incubator-hugegraph/pull/2237))
-- chore(dist): replace wget to curl to download swagger-ui ([#2277](https://github.com/apache/incubator-hugegraph/pull/2277))
-- Update StandardStateMachineCallback.java ([#2290](https://github.com/apache/incubator-hugegraph/pull/2290))
-- doc: update README about start server with example graph ([#2315](https://github.com/apache/incubator-hugegraph/pull/2315))
-- README.md tiny improve ([#2320](https://github.com/apache/incubator-hugegraph/pull/2320))
-- doc: README.md tiny improve ([#2331](https://github.com/apache/incubator-hugegraph/pull/2331))
-- refact: adjust project structure for merge PD & Store[Breaking Change] ([#2338](https://github.com/apache/incubator-hugegraph/pull/2338))
-- chore: disable raft test in normal PR due to timeout problem ([#2349](https://github.com/apache/incubator-hugegraph/pull/2349))
-- chore(ci): add stage profile settings ([#2361](https://github.com/apache/incubator-hugegraph/pull/2361))
-- refact(api): update common 1.2 & fix jersey client code problem ([#2365](https://github.com/apache/incubator-hugegraph/pull/2365))
-- chore: move server info into GlobalMasterInfo ([#2370](https://github.com/apache/incubator-hugegraph/pull/2370))
-- chore: reset hugegraph version to 1.2.0 ([#2382](https://github.com/apache/incubator-hugegraph/pull/2382))
+- refact: use standard UTF-8 charset & enhance CI configs ([#2095](https://github.com/apache/hugegraph/pull/2095))
+- move validate release to hugegraph-doc ([#2109](https://github.com/apache/hugegraph/pull/2109))
+- refact: use a slim way to build docker image on latest code & support zgc ([#2118](https://github.com/apache/hugegraph/pull/2118))
+- chore: remove stage-repo in pom due to release done & update mail rule ([#2128](https://github.com/apache/hugegraph/pull/2128))
+- doc: update issue template & README file ([#2131](https://github.com/apache/hugegraph/pull/2131))
+- chore: cmn algorithm optimization ([#2134](https://github.com/apache/hugegraph/pull/2134))
+- add github token for license check comment ([#2139](https://github.com/apache/hugegraph/pull/2139))
+- chore: disable PR up-to-date in branch ([#2150](https://github.com/apache/hugegraph/pull/2150))
+- refact(core): remove lock of globalMasterInfo to optimize perf ([#2151](https://github.com/apache/hugegraph/pull/2151))
+- chore: async remove left index shouldn't effect query ([#2199](https://github.com/apache/hugegraph/pull/2199))
+- refact(rocksdb): clean & reformat some code ([#2200](https://github.com/apache/hugegraph/pull/2200))
+- refact(core): optimized batch removal of remaining indices consumed by a single consumer ([#2203](https://github.com/apache/hugegraph/pull/2203))
+- add com.janeluo.ikkanalyzer dependency to core model ([#2206](https://github.com/apache/hugegraph/pull/2206))
+- refact(core): early stop unnecessary loops in edge cache ([#2211](https://github.com/apache/hugegraph/pull/2211))
+- doc: update README & add QR code ([#2218](https://github.com/apache/hugegraph/pull/2218))
+- chore: update .asf.yaml for mail rule ([#2221](https://github.com/apache/hugegraph/pull/2221))
+- chore: improve the UI & content in README ([#2227](https://github.com/apache/hugegraph/pull/2227))
+- chore: add pr template ([#2234](https://github.com/apache/hugegraph/pull/2234))
+- doc: modify ASF and remove meaningless CLA ([#2237](https://github.com/apache/hugegraph/pull/2237))
+- chore(dist): replace wget to curl to download swagger-ui ([#2277](https://github.com/apache/hugegraph/pull/2277))
+- Update StandardStateMachineCallback.java ([#2290](https://github.com/apache/hugegraph/pull/2290))
+- doc: update README about start server with example graph ([#2315](https://github.com/apache/hugegraph/pull/2315))
+- README.md tiny improve ([#2320](https://github.com/apache/hugegraph/pull/2320))
+- doc: README.md tiny improve ([#2331](https://github.com/apache/hugegraph/pull/2331))
+- refact: adjust project structure for merge PD & Store[Breaking Change] ([#2338](https://github.com/apache/hugegraph/pull/2338))
+- chore: disable raft test in normal PR due to timeout problem ([#2349](https://github.com/apache/hugegraph/pull/2349))
+- chore(ci): add stage profile settings ([#2361](https://github.com/apache/hugegraph/pull/2361))
+- refact(api): update common 1.2 & fix jersey client code problem ([#2365](https://github.com/apache/hugegraph/pull/2365))
+- chore: move server info into GlobalMasterInfo ([#2370](https://github.com/apache/hugegraph/pull/2370))
+- chore: reset hugegraph version to 1.2.0 ([#2382](https://github.com/apache/hugegraph/pull/2382))
### hugegraph-computer
#### Feature Changes
-* feat: implement fast-failover for MessageRecvManager and DataClientManager ([#243](https://github.com/apache/incubator-hugegraph-computer/pull/243))
-* feat: implement parallel send data in load graph step ([#248](https://github.com/apache/incubator-hugegraph-computer/pull/248))
-* feat(k8s): init operator project & add webhook ([#259](https://github.com/apache/incubator-hugegraph-computer/pull/259), [#263](https://github.com/apache/incubator-hugegraph-computer/pull/263))
-* feat(core): support load vertex/edge snapshot ([#269](https://github.com/apache/incubator-hugegraph-computer/pull/269))
-* feat(k8s): Add MinIO as internal(default) storage ([#272](https://github.com/apache/incubator-hugegraph-computer/pull/272))
-* feat(algorithm): support random walk in computer ([#274](https://github.com/apache/incubator-hugegraph-computer/pull/274), [#280](https://github.com/apache/incubator-hugegraph-computer/pull/280))
-* feat: use 'foreground' delete policy to cancel k8s job ([#290](https://github.com/apache/incubator-hugegraph-computer/pull/290))
+* feat: implement fast-failover for MessageRecvManager and DataClientManager ([#243](https://github.com/apache/hugegraph-computer/pull/243))
+* feat: implement parallel send data in load graph step ([#248](https://github.com/apache/hugegraph-computer/pull/248))
+* feat(k8s): init operator project & add webhook ([#259](https://github.com/apache/hugegraph-computer/pull/259), [#263](https://github.com/apache/hugegraph-computer/pull/263))
+* feat(core): support load vertex/edge snapshot ([#269](https://github.com/apache/hugegraph-computer/pull/269))
+* feat(k8s): Add MinIO as internal(default) storage ([#272](https://github.com/apache/hugegraph-computer/pull/272))
+* feat(algorithm): support random walk in computer ([#274](https://github.com/apache/hugegraph-computer/pull/274), [#280](https://github.com/apache/hugegraph-computer/pull/280))
+* feat: use 'foreground' delete policy to cancel k8s job ([#290](https://github.com/apache/hugegraph-computer/pull/290))
#### Bug Fix
-* fix: superstep not take effect ([#237](https://github.com/apache/incubator-hugegraph-computer/pull/237))
-* fix(k8s): modify inconsistent apiGroups ([#270](https://github.com/apache/incubator-hugegraph-computer/pull/270))
-* fix(algorithm): record loop is not copied ([#276](https://github.com/apache/incubator-hugegraph-computer/pull/276))
-* refact(core): adaptor for common 1.2 & fix a string of possible CI problem ([#286](https://github.com/apache/incubator-hugegraph-computer/pull/286))
-* fix: remove okhttp1 due to conflicts risk ([#294](https://github.com/apache/incubator-hugegraph-computer/pull/294))
-* fix(core): io.grpc.grpc-core dependency conflic ([#296](https://github.com/apache/incubator-hugegraph-computer/pull/296))
+* fix: superstep not take effect ([#237](https://github.com/apache/hugegraph-computer/pull/237))
+* fix(k8s): modify inconsistent apiGroups ([#270](https://github.com/apache/hugegraph-computer/pull/270))
+* fix(algorithm): record loop is not copied ([#276](https://github.com/apache/hugegraph-computer/pull/276))
+* refact(core): adaptor for common 1.2 & fix a string of possible CI problem ([#286](https://github.com/apache/hugegraph-computer/pull/286))
+* fix: remove okhttp1 due to conflicts risk ([#294](https://github.com/apache/hugegraph-computer/pull/294))
+* fix(core): io.grpc.grpc-core dependency conflic ([#296](https://github.com/apache/hugegraph-computer/pull/296))
#### Option Changes
-* feat(core): isolate namespace for different input data source ([#252](https://github.com/apache/incubator-hugegraph-computer/pull/252))
-* refact(core): support auth config for computer task ([#265](https://github.com/apache/incubator-hugegraph-computer/pull/265))
+* feat(core): isolate namespace for different input data source ([#252](https://github.com/apache/hugegraph-computer/pull/252))
+* refact(core): support auth config for computer task ([#265](https://github.com/apache/hugegraph-computer/pull/265))
#### Other Changes
-* remove apache stage repo & update notification rule ([#232](https://github.com/apache/incubator-hugegraph-computer/pull/232))
-* chore: fix empty license file ([#233](https://github.com/apache/incubator-hugegraph-computer/pull/233))
-* chore: enhance mailbox settings & enable require ci ([#235](https://github.com/apache/incubator-hugegraph-computer/pull/235))
-* fix: typo errors in start-computer.sh ([#238](https://github.com/apache/incubator-hugegraph-computer/pull/238))
-* [Feature-241] Add PULL_REQUEST_TEMPLATE ([#242](https://github.com/apache/incubator-hugegraph-computer/pull/242), [#257](https://github.com/apache/incubator-hugegraph-computer/pull/257))
-* chore: change etcd url only for ci ([#245](https://github.com/apache/incubator-hugegraph-computer/pull/245))
-* doc: update readme & add QR code ([#249](https://github.com/apache/incubator-hugegraph-computer/pull/249))
-* doc(k8s): add building note for missing classes ([#254](https://github.com/apache/incubator-hugegraph-computer/pull/254))
-* chore: reduce mail to dev list ([#255](https://github.com/apache/incubator-hugegraph-computer/pull/255))
-* add: dependency-review ([#266](https://github.com/apache/incubator-hugegraph-computer/pull/266))
-* chore: correct incorrect comment ([#268](https://github.com/apache/incubator-hugegraph-computer/pull/268))
-* refactor(api): ListValue.getFirst() replaces ListValue.get(0) ([#282](https://github.com/apache/incubator-hugegraph-computer/pull/282))
-* Improve: Passing workerId to WorkerStat & Skip wait worker close if master executes failed ([#292](https://github.com/apache/incubator-hugegraph-computer/pull/292))
-* chore: add check dependencies ([#293](https://github.com/apache/incubator-hugegraph-computer/pull/293))
-* chore(license): update license for 1.2.0 ([#299](https://github.com/apache/incubator-hugegraph-computer/pull/299))
+* remove apache stage repo & update notification rule ([#232](https://github.com/apache/hugegraph-computer/pull/232))
+* chore: fix empty license file ([#233](https://github.com/apache/hugegraph-computer/pull/233))
+* chore: enhance mailbox settings & enable require ci ([#235](https://github.com/apache/hugegraph-computer/pull/235))
+* fix: typo errors in start-computer.sh ([#238](https://github.com/apache/hugegraph-computer/pull/238))
+* [Feature-241] Add PULL_REQUEST_TEMPLATE ([#242](https://github.com/apache/hugegraph-computer/pull/242), [#257](https://github.com/apache/hugegraph-computer/pull/257))
+* chore: change etcd url only for ci ([#245](https://github.com/apache/hugegraph-computer/pull/245))
+* doc: update readme & add QR code ([#249](https://github.com/apache/hugegraph-computer/pull/249))
+* doc(k8s): add building note for missing classes ([#254](https://github.com/apache/hugegraph-computer/pull/254))
+* chore: reduce mail to dev list ([#255](https://github.com/apache/hugegraph-computer/pull/255))
+* add: dependency-review ([#266](https://github.com/apache/hugegraph-computer/pull/266))
+* chore: correct incorrect comment ([#268](https://github.com/apache/hugegraph-computer/pull/268))
+* refactor(api): ListValue.getFirst() replaces ListValue.get(0) ([#282](https://github.com/apache/hugegraph-computer/pull/282))
+* Improve: Passing workerId to WorkerStat & Skip wait worker close if master executes failed ([#292](https://github.com/apache/hugegraph-computer/pull/292))
+* chore: add check dependencies ([#293](https://github.com/apache/hugegraph-computer/pull/293))
+* chore(license): update license for 1.2.0 ([#299](https://github.com/apache/hugegraph-computer/pull/299))
### hugegraph-toolchain
#### API Changes
-- feat(client): support edgeExistence api ([#544](https://github.com/apache/incubator-hugegraph-toolchain/pull/544))
-- refact(client): update tests for new OLTP traverser APIs ([#550](https://github.com/apache/incubator-hugegraph-toolchain/pull/550))
+- feat(client): support edgeExistence api ([#544](https://github.com/apache/hugegraph-toolchain/pull/544))
+- refact(client): update tests for new OLTP traverser APIs ([#550](https://github.com/apache/hugegraph-toolchain/pull/550))
#### Feature Changes
-- feat(spark): support spark-sink connector for loader ([#497](https://github.com/apache/incubator-hugegraph-toolchain/pull/497))
-- feat(loader): support kafka as datasource ([#506](https://github.com/apache/incubator-hugegraph-toolchain/pull/506))
-- feat(client): support go client for hugegraph ([#514](https://github.com/apache/incubator-hugegraph-toolchain/pull/514))
-- feat(loader): support docker for loader ([#530](https://github.com/apache/incubator-hugegraph-toolchain/pull/530))
-- feat: update common version and remove jersey code ([#538](https://github.com/apache/incubator-hugegraph-toolchain/pull/538))
+- feat(spark): support spark-sink connector for loader ([#497](https://github.com/apache/hugegraph-toolchain/pull/497))
+- feat(loader): support kafka as datasource ([#506](https://github.com/apache/hugegraph-toolchain/pull/506))
+- feat(client): support go client for hugegraph ([#514](https://github.com/apache/hugegraph-toolchain/pull/514))
+- feat(loader): support docker for loader ([#530](https://github.com/apache/hugegraph-toolchain/pull/530))
+- feat: update common version and remove jersey code ([#538](https://github.com/apache/hugegraph-toolchain/pull/538))
#### Bug Fix
-- fix: convert numbers to strings ([#465](https://github.com/apache/incubator-hugegraph-toolchain/pull/465))
-- fix: hugegraph-spark-loader shell string length limit ([#469](https://github.com/apache/incubator-hugegraph-toolchain/pull/469))
-- fix: spark loader meet Exception: Class is not registered ([#470](https://github.com/apache/incubator-hugegraph-toolchain/pull/470))
-- fix: spark loader Task not serializable ([#471](https://github.com/apache/incubator-hugegraph-toolchain/pull/471))
-- fix: spark with loader has dependency conflicts ([#480](https://github.com/apache/incubator-hugegraph-toolchain/pull/480))
-- fix: spark-loader example schema and struct mismatch ([#504](https://github.com/apache/incubator-hugegraph-toolchain/pull/504))
-- fix(loader): error log ([#499](https://github.com/apache/incubator-hugegraph-toolchain/pull/499))
-- fix: checkstyle && add suppressions.xml ([#500](https://github.com/apache/incubator-hugegraph-toolchain/pull/500))
-- fix(loader): resolve error in loader script ([#510](https://github.com/apache/incubator-hugegraph-toolchain/pull/510))
-- fix: base-ref/head-ref missed in dependency-check-ci on branch push ([#516](https://github.com/apache/incubator-hugegraph-toolchain/pull/516), [#551](https://github.com/apache/incubator-hugegraph-toolchain/pull/551))
-- fix yarn network connection on linux/arm64 arch ([#519](https://github.com/apache/incubator-hugegraph-toolchain/pull/519))
-- fix(hubble): drop-down box could not display all options ([#535](https://github.com/apache/incubator-hugegraph-toolchain/pull/535))
-- fix(hubble): build with node and yarn ([#543](https://github.com/apache/incubator-hugegraph-toolchain/pull/543))
-- fix(loader): loader options ([#548](https://github.com/apache/incubator-hugegraph-toolchain/pull/548))
-- fix(hubble): parent override children dep version ([#549](https://github.com/apache/incubator-hugegraph-toolchain/pull/549))
-- fix: exclude okhttp1 which has different groupID with okhttp3 ([#555](https://github.com/apache/incubator-hugegraph-toolchain/pull/555))
-- fix: github action build docker image failed ([#556](https://github.com/apache/incubator-hugegraph-toolchain/pull/556), [#557](https://github.com/apache/incubator-hugegraph-toolchain/pull/557))
-- fix: build error with npm not exist & tiny improve ([#558](https://github.com/apache/incubator-hugegraph-toolchain/pull/558))
+- fix: convert numbers to strings ([#465](https://github.com/apache/hugegraph-toolchain/pull/465))
+- fix: hugegraph-spark-loader shell string length limit ([#469](https://github.com/apache/hugegraph-toolchain/pull/469))
+- fix: spark loader meet Exception: Class is not registered ([#470](https://github.com/apache/hugegraph-toolchain/pull/470))
+- fix: spark loader Task not serializable ([#471](https://github.com/apache/hugegraph-toolchain/pull/471))
+- fix: spark with loader has dependency conflicts ([#480](https://github.com/apache/hugegraph-toolchain/pull/480))
+- fix: spark-loader example schema and struct mismatch ([#504](https://github.com/apache/hugegraph-toolchain/pull/504))
+- fix(loader): error log ([#499](https://github.com/apache/hugegraph-toolchain/pull/499))
+- fix: checkstyle && add suppressions.xml ([#500](https://github.com/apache/hugegraph-toolchain/pull/500))
+- fix(loader): resolve error in loader script ([#510](https://github.com/apache/hugegraph-toolchain/pull/510))
+- fix: base-ref/head-ref missed in dependency-check-ci on branch push ([#516](https://github.com/apache/hugegraph-toolchain/pull/516), [#551](https://github.com/apache/hugegraph-toolchain/pull/551))
+- fix yarn network connection on linux/arm64 arch ([#519](https://github.com/apache/hugegraph-toolchain/pull/519))
+- fix(hubble): drop-down box could not display all options ([#535](https://github.com/apache/hugegraph-toolchain/pull/535))
+- fix(hubble): build with node and yarn ([#543](https://github.com/apache/hugegraph-toolchain/pull/543))
+- fix(loader): loader options ([#548](https://github.com/apache/hugegraph-toolchain/pull/548))
+- fix(hubble): parent override children dep version ([#549](https://github.com/apache/hugegraph-toolchain/pull/549))
+- fix: exclude okhttp1 which has different groupID with okhttp3 ([#555](https://github.com/apache/hugegraph-toolchain/pull/555))
+- fix: github action build docker image failed ([#556](https://github.com/apache/hugegraph-toolchain/pull/556), [#557](https://github.com/apache/hugegraph-toolchain/pull/557))
+- fix: build error with npm not exist & tiny improve ([#558](https://github.com/apache/hugegraph-toolchain/pull/558))
#### Option Changes
-- set default data when create graph ([#447](https://github.com/apache/incubator-hugegraph-toolchain/pull/447))
+- set default data when create graph ([#447](https://github.com/apache/hugegraph-toolchain/pull/447))
#### Other Changes
-- chore: remove apache stage repo & update mail rule ([#433](https://github.com/apache/incubator-hugegraph-toolchain/pull/433), [#474](https://github.com/apache/incubator-hugegraph-toolchain/pull/474), [#479](https://github.com/apache/incubator-hugegraph-toolchain/pull/479))
-- refact: clean extra store file in all modules ([#434](https://github.com/apache/incubator-hugegraph-toolchain/pull/434))
-- chore: use fixed node.js version 16 to avoid ci problem ([#437](https://github.com/apache/incubator-hugegraph-toolchain/pull/437), [#441](https://github.com/apache/incubator-hugegraph-toolchain/pull/441))
-- chore(hubble): use latest code in Dockerfile ([#440](https://github.com/apache/incubator-hugegraph-toolchain/pull/440))
-- chore: remove maven plugin for docker build ([#443](https://github.com/apache/incubator-hugegraph-toolchain/pull/443))
-- chore: improve spark parallel ([#450](https://github.com/apache/incubator-hugegraph-toolchain/pull/450))
-- doc: fix build status badge link ([#455](https://github.com/apache/incubator-hugegraph-toolchain/pull/455))
-- chore: keep hadoop-hdfs-client and hadoop-common version consistent ([#457](https://github.com/apache/incubator-hugegraph-toolchain/pull/457))
-- doc: add basic contact info & QR code in README ([#462](https://github.com/apache/incubator-hugegraph-toolchain/pull/462), [#475](https://github.com/apache/incubator-hugegraph-toolchain/pull/475))
-- chore: disable PR up-to-date in branch ([#473](https://github.com/apache/incubator-hugegraph-toolchain/pull/473))
-- chore: auto add pr auto label by path ([#466](https://github.com/apache/incubator-hugegraph-toolchain/pull/466), [#528](https://github.com/apache/incubator-hugegraph-toolchain/pull/528))
-- chore: unify the dependencies versions of the entire project ([#478](https://github.com/apache/incubator-hugegraph-toolchain/pull/478))
-- chore(deps): bump async, semver, word-wrap, browserify-sign in hubble-fe ([#484](https://github.com/apache/incubator-hugegraph-toolchain/pull/484), [#491](https://github.com/apache/incubator-hugegraph-toolchain/pull/491), [#494](https://github.com/apache/incubator-hugegraph-toolchain/pull/494), [#529](https://github.com/apache/incubator-hugegraph-toolchain/pull/529))
-- chore: add pr template ([#498](https://github.com/apache/incubator-hugegraph-toolchain/pull/498))
-- doc(hubble): add docker-compose to start with server ([#522](https://github.com/apache/incubator-hugegraph-toolchain/pull/522))
-- chore(ci): add stage profile settings ([#536](https://github.com/apache/incubator-hugegraph-toolchain/pull/536))
-- chore(client): increase the api num as the latest server commit + 10 ([#546](https://github.com/apache/incubator-hugegraph-toolchain/pull/546))
-- chore(spark): install hugegraph from source ([#552](https://github.com/apache/incubator-hugegraph-toolchain/pull/552))
-- doc: adjust docker related desc in readme ([#559](https://github.com/apache/incubator-hugegraph-toolchain/pull/559))
-- chore(license): update license for 1.2 ([#560](https://github.com/apache/incubator-hugegraph-toolchain/pull/560), [#561](https://github.com/apache/incubator-hugegraph-toolchain/pull/561))
+- chore: remove apache stage repo & update mail rule ([#433](https://github.com/apache/hugegraph-toolchain/pull/433), [#474](https://github.com/apache/hugegraph-toolchain/pull/474), [#479](https://github.com/apache/hugegraph-toolchain/pull/479))
+- refact: clean extra store file in all modules ([#434](https://github.com/apache/hugegraph-toolchain/pull/434))
+- chore: use fixed node.js version 16 to avoid ci problem ([#437](https://github.com/apache/hugegraph-toolchain/pull/437), [#441](https://github.com/apache/hugegraph-toolchain/pull/441))
+- chore(hubble): use latest code in Dockerfile ([#440](https://github.com/apache/hugegraph-toolchain/pull/440))
+- chore: remove maven plugin for docker build ([#443](https://github.com/apache/hugegraph-toolchain/pull/443))
+- chore: improve spark parallel ([#450](https://github.com/apache/hugegraph-toolchain/pull/450))
+- doc: fix build status badge link ([#455](https://github.com/apache/hugegraph-toolchain/pull/455))
+- chore: keep hadoop-hdfs-client and hadoop-common version consistent ([#457](https://github.com/apache/hugegraph-toolchain/pull/457))
+- doc: add basic contact info & QR code in README ([#462](https://github.com/apache/hugegraph-toolchain/pull/462), [#475](https://github.com/apache/hugegraph-toolchain/pull/475))
+- chore: disable PR up-to-date in branch ([#473](https://github.com/apache/hugegraph-toolchain/pull/473))
+- chore: auto add pr auto label by path ([#466](https://github.com/apache/hugegraph-toolchain/pull/466), [#528](https://github.com/apache/hugegraph-toolchain/pull/528))
+- chore: unify the dependencies versions of the entire project ([#478](https://github.com/apache/hugegraph-toolchain/pull/478))
+- chore(deps): bump async, semver, word-wrap, browserify-sign in hubble-fe ([#484](https://github.com/apache/hugegraph-toolchain/pull/484), [#491](https://github.com/apache/hugegraph-toolchain/pull/491), [#494](https://github.com/apache/hugegraph-toolchain/pull/494), [#529](https://github.com/apache/hugegraph-toolchain/pull/529))
+- chore: add pr template ([#498](https://github.com/apache/hugegraph-toolchain/pull/498))
+- doc(hubble): add docker-compose to start with server ([#522](https://github.com/apache/hugegraph-toolchain/pull/522))
+- chore(ci): add stage profile settings ([#536](https://github.com/apache/hugegraph-toolchain/pull/536))
+- chore(client): increase the api num as the latest server commit + 10 ([#546](https://github.com/apache/hugegraph-toolchain/pull/546))
+- chore(spark): install hugegraph from source ([#552](https://github.com/apache/hugegraph-toolchain/pull/552))
+- doc: adjust docker related desc in readme ([#559](https://github.com/apache/hugegraph-toolchain/pull/559))
+- chore(license): update license for 1.2 ([#560](https://github.com/apache/hugegraph-toolchain/pull/560), [#561](https://github.com/apache/hugegraph-toolchain/pull/561))
@@ -217,27 +217,27 @@ weight: 3
#### Feature Changes
-- feat(common): replace jersey dependencies with OkHttp (Breaking Change) ([#133](https://github.com/apache/incubator-hugegraph-commons/pull/133))
+- feat(common): replace jersey dependencies with OkHttp (Breaking Change) ([#133](https://github.com/apache/hugegraph-commons/pull/133))
#### Bug Fix
-- fix(common): handle spring-boot2/jersey dependency conflicts ([#131](https://github.com/apache/incubator-hugegraph-commons/pull/131))
-- fix: Assert.assertThrows() should check result of exceptionConsumer ([#135](https://github.com/apache/incubator-hugegraph-commons/pull/135))
-- fix(common): json param convert ([#137](https://github.com/apache/incubator-hugegraph-commons/pull/137))
+- fix(common): handle spring-boot2/jersey dependency conflicts ([#131](https://github.com/apache/hugegraph-commons/pull/131))
+- fix: Assert.assertThrows() should check result of exceptionConsumer ([#135](https://github.com/apache/hugegraph-commons/pull/135))
+- fix(common): json param convert ([#137](https://github.com/apache/hugegraph-commons/pull/137))
#### Other Changes
-- refact(common): add more construction methods for convenient ([#132](https://github.com/apache/incubator-hugegraph-commons/pull/132))
-- add: dependency-review ([#134](https://github.com/apache/incubator-hugegraph-commons/pull/134))
-- refact(common): rename jsonutil to avoid conflicts with server ([#136](https://github.com/apache/incubator-hugegraph-commons/pull/136))
-- doc: update README for release ([#138](https://github.com/apache/incubator-hugegraph-commons/pull/138))
-- update licence ([#139](https://github.com/apache/incubator-hugegraph-commons/pull/139))
+- refact(common): add more construction methods for convenient ([#132](https://github.com/apache/hugegraph-commons/pull/132))
+- add: dependency-review ([#134](https://github.com/apache/hugegraph-commons/pull/134))
+- refact(common): rename jsonutil to avoid conflicts with server ([#136](https://github.com/apache/hugegraph-commons/pull/136))
+- doc: update README for release ([#138](https://github.com/apache/hugegraph-commons/pull/138))
+- update licence ([#139](https://github.com/apache/hugegraph-commons/pull/139))
### Release Details
Please check the release details in each repository:
-- [Server Release Notes](https://github.com/apache/incubator-hugegraph/releases)
-- [Toolchain Release Notes](https://github.com/apache/incubator-hugegraph-toolchain/releases)
-- [Computer Release Notes](https://github.com/apache/incubator-hugegraph-computer/releases)
-- [Commons Release Notes](https://github.com/apache/incubator-hugegraph-commons/releases)
+- [Server Release Notes](https://github.com/apache/hugegraph/releases)
+- [Toolchain Release Notes](https://github.com/apache/hugegraph-toolchain/releases)
+- [Computer Release Notes](https://github.com/apache/hugegraph-computer/releases)
+- [Commons Release Notes](https://github.com/apache/hugegraph-commons/releases)
diff --git a/content/en/docs/changelog/hugegraph-1.3.0-release-notes.md b/content/en/docs/changelog/hugegraph-1.3.0-release-notes.md
index 869273b19..9d4b2ee73 100644
--- a/content/en/docs/changelog/hugegraph-1.3.0-release-notes.md
+++ b/content/en/docs/changelog/hugegraph-1.3.0-release-notes.md
@@ -1,7 +1,7 @@
---
title: "HugeGraph 1.3.0 Release Notes"
linkTitle: "Release-1.3.0"
-weight: 4
+weight: 5
---
### Operating Environment / Version Description
@@ -23,45 +23,45 @@ PS: In the future, we will gradually upgrade the java version from `Java 11 -> J
#### API Changes
-* feat(api): optimize adjacent-edges query ([#2408](https://github.com/apache/incubator-hugegraph/pull/2408))
+* feat(api): optimize adjacent-edges query ([#2408](https://github.com/apache/hugegraph/pull/2408))
#### Feature Changes
-- feat: support docker use the auth when starting ([#2403](https://github.com/apache/incubator-hugegraph/pull/2403))
-- feat: added the OpenTelemetry trace support ([#2477](https://github.com/apache/incubator-hugegraph/pull/2477))
+- feat: support docker use the auth when starting ([#2403](https://github.com/apache/hugegraph/pull/2403))
+- feat: added the OpenTelemetry trace support ([#2477](https://github.com/apache/hugegraph/pull/2477))
#### Bug Fix
-- fix(core): task restore interrupt problem on restart server ([#2401](https://github.com/apache/incubator-hugegraph/pull/2401))
-- fix(server): reinitialize the progress to set up graph auth friendly ([#2411](https://github.com/apache/incubator-hugegraph/pull/2411))
-- fix(chore): remove zgc in dockerfile for ARM env ([#2421](https://github.com/apache/incubator-hugegraph/pull/2421))
-- fix(server): make CacheManager constructor private to satisfy the singleton pattern ([#2432](https://github.com/apache/incubator-hugegraph/pull/2432))
-- fix(server): unify the license headers ([#2438](https://github.com/apache/incubator-hugegraph/pull/2438))
-- fix: format and clean code in dist and example modules ([#2441](https://github.com/apache/incubator-hugegraph/pull/2441))
-- fix: format and clean code in core module ([#2440](https://github.com/apache/incubator-hugegraph/pull/2440))
-- fix: format and clean code in modules ([#2439](https://github.com/apache/incubator-hugegraph/pull/2439))
-- fix(server): clean up the code ([#2456](https://github.com/apache/incubator-hugegraph/pull/2456))
-- fix(server): remove extra blank lines ([#2459](https://github.com/apache/incubator-hugegraph/pull/2459))
-- fix(server): add tip for gremlin api NPE with an empty query ([#2467](https://github.com/apache/incubator-hugegraph/pull/2467))
-- fix(server): fix the metric name when promthus collects hugegraph metric, see issue ([#2462](https://github.com/apache/incubator-hugegraph/pull/2462))
-- fix(server): `serverStarted` error when execute gremlin example ([#2473](https://github.com/apache/incubator-hugegraph/pull/2473))
-- fix(auth): enhance the URL check ([#2422](https://github.com/apache/incubator-hugegraph/pull/2422))
+- fix(core): task restore interrupt problem on restart server ([#2401](https://github.com/apache/hugegraph/pull/2401))
+- fix(server): reinitialize the progress to set up graph auth friendly ([#2411](https://github.com/apache/hugegraph/pull/2411))
+- fix(chore): remove zgc in dockerfile for ARM env ([#2421](https://github.com/apache/hugegraph/pull/2421))
+- fix(server): make CacheManager constructor private to satisfy the singleton pattern ([#2432](https://github.com/apache/hugegraph/pull/2432))
+- fix(server): unify the license headers ([#2438](https://github.com/apache/hugegraph/pull/2438))
+- fix: format and clean code in dist and example modules ([#2441](https://github.com/apache/hugegraph/pull/2441))
+- fix: format and clean code in core module ([#2440](https://github.com/apache/hugegraph/pull/2440))
+- fix: format and clean code in modules ([#2439](https://github.com/apache/hugegraph/pull/2439))
+- fix(server): clean up the code ([#2456](https://github.com/apache/hugegraph/pull/2456))
+- fix(server): remove extra blank lines ([#2459](https://github.com/apache/hugegraph/pull/2459))
+- fix(server): add tip for gremlin api NPE with an empty query ([#2467](https://github.com/apache/hugegraph/pull/2467))
+- fix(server): fix the metric name when promthus collects hugegraph metric, see issue ([#2462](https://github.com/apache/hugegraph/pull/2462))
+- fix(server): `serverStarted` error when execute gremlin example ([#2473](https://github.com/apache/hugegraph/pull/2473))
+- fix(auth): enhance the URL check ([#2422](https://github.com/apache/hugegraph/pull/2422))
#### Option Changes
-* refact(server): enhance the storage path in RocksDB & clean code ([#2491](https://github.com/apache/incubator-hugegraph/pull/2491))
+* refact(server): enhance the storage path in RocksDB & clean code ([#2491](https://github.com/apache/hugegraph/pull/2491))
#### Other Changes
-- chore: add a license link ([#2398](https://github.com/apache/incubator-hugegraph/pull/2398))
-- doc: enhance NOTICE info to keep it clear ([#2409](https://github.com/apache/incubator-hugegraph/pull/2409))
-- chore(server): update swagger info for default server profile ([#2423](https://github.com/apache/incubator-hugegraph/pull/2423))
-- fix(server): unify license header for protobuf file ([#2448](https://github.com/apache/incubator-hugegraph/pull/2448))
-- chore: improve license header checker confs and pre-check header when validating ([#2445](https://github.com/apache/incubator-hugegraph/pull/2445))
-- chore: unify to call SchemaLabel.getLabelId() ([#2458](https://github.com/apache/incubator-hugegraph/pull/2458))
-- chore: refine the hg-style.xml specification ([#2457](https://github.com/apache/incubator-hugegraph/pull/2457))
-- chore: Add a newline formatting configuration and a comment for warning ([#2464](https://github.com/apache/incubator-hugegraph/pull/2464))
-- chore(server): clear context after req done ([#2470](https://github.com/apache/incubator-hugegraph/pull/2470))
+- chore: add a license link ([#2398](https://github.com/apache/hugegraph/pull/2398))
+- doc: enhance NOTICE info to keep it clear ([#2409](https://github.com/apache/hugegraph/pull/2409))
+- chore(server): update swagger info for default server profile ([#2423](https://github.com/apache/hugegraph/pull/2423))
+- fix(server): unify license header for protobuf file ([#2448](https://github.com/apache/hugegraph/pull/2448))
+- chore: improve license header checker confs and pre-check header when validating ([#2445](https://github.com/apache/hugegraph/pull/2445))
+- chore: unify to call SchemaLabel.getLabelId() ([#2458](https://github.com/apache/hugegraph/pull/2458))
+- chore: refine the hg-style.xml specification ([#2457](https://github.com/apache/hugegraph/pull/2457))
+- chore: Add a newline formatting configuration and a comment for warning ([#2464](https://github.com/apache/hugegraph/pull/2464))
+- chore(server): clear context after req done ([#2470](https://github.com/apache/hugegraph/pull/2470))
### hugegraph-toolchain
@@ -69,40 +69,40 @@ PS: In the future, we will gradually upgrade the java version from `Java 11 -> J
#### Feature Changes
-* fix(loader): update shade plugin for spark loader ([#566](https://github.com/apache/incubator-hugegraph-toolchain/pull/566))
-* fix(hubble): yarn install timeout in arm64 ([#583](https://github.com/apache/incubator-hugegraph-toolchain/pull/583))
-* fix(loader): support file name with prefix for hdfs source ([#571](https://github.com/apache/incubator-hugegraph-toolchain/pull/571))
-* feat(hubble): warp the exception info in HugeClientUtil ([#589](https://github.com/apache/incubator-hugegraph-toolchain/pull/589))
+* fix(loader): update shade plugin for spark loader ([#566](https://github.com/apache/hugegraph-toolchain/pull/566))
+* fix(hubble): yarn install timeout in arm64 ([#583](https://github.com/apache/hugegraph-toolchain/pull/583))
+* fix(loader): support file name with prefix for hdfs source ([#571](https://github.com/apache/hugegraph-toolchain/pull/571))
+* feat(hubble): warp the exception info in HugeClientUtil ([#589](https://github.com/apache/hugegraph-toolchain/pull/589))
#### Bug Fix
-* fix: concurrency issue causing file overwrite due to identical filenames ([#572](https://github.com/apache/incubator-hugegraph-toolchain/pull/572))
+* fix: concurrency issue causing file overwrite due to identical filenames ([#572](https://github.com/apache/hugegraph-toolchain/pull/572))
#### Option Changes
-* feat(client): support user defined OKHTTPClient configs ([#590](https://github.com/apache/incubator-hugegraph-toolchain/pull/590))
+* feat(client): support user defined OKHTTPClient configs ([#590](https://github.com/apache/hugegraph-toolchain/pull/590))
#### Other Changes
-* doc: update copyright date(year) in NOTICE ([#567](https://github.com/apache/incubator-hugegraph-toolchain/pull/567))
-* chore(deps): bump ip from 1.1.5 to 1.1.9 in /hugegraph-hubble/hubble-fe ([#580](https://github.com/apache/incubator-hugegraph-toolchain/pull/580))
-* refactor(hubble): enhance maven front plugin ([#568](https://github.com/apache/incubator-hugegraph-toolchain/pull/568))
-* chore(deps): bump es5-ext from 0.10.53 to 0.10.63 in /hugegraph-hubble/hubble-fe ([#582](https://github.com/apache/incubator-hugegraph-toolchain/pull/582))
-* chore(hubble): Enhance code style in hubble ([#592](https://github.com/apache/incubator-hugegraph-toolchain/pull/592))
-* chore: upgrade version to 1.3.0 ([#596](https://github.com/apache/incubator-hugegraph-toolchain/pull/596))
-* chore(ci): update profile commit id for 1.3 ([#597](https://github.com/apache/incubator-hugegraph-toolchain/pull/597))
+* doc: update copyright date(year) in NOTICE ([#567](https://github.com/apache/hugegraph-toolchain/pull/567))
+* chore(deps): bump ip from 1.1.5 to 1.1.9 in /hugegraph-hubble/hubble-fe ([#580](https://github.com/apache/hugegraph-toolchain/pull/580))
+* refactor(hubble): enhance maven front plugin ([#568](https://github.com/apache/hugegraph-toolchain/pull/568))
+* chore(deps): bump es5-ext from 0.10.53 to 0.10.63 in /hugegraph-hubble/hubble-fe ([#582](https://github.com/apache/hugegraph-toolchain/pull/582))
+* chore(hubble): Enhance code style in hubble ([#592](https://github.com/apache/hugegraph-toolchain/pull/592))
+* chore: upgrade version to 1.3.0 ([#596](https://github.com/apache/hugegraph-toolchain/pull/596))
+* chore(ci): update profile commit id for 1.3 ([#597](https://github.com/apache/hugegraph-toolchain/pull/597))
### hugegraph-commons
#### Feature Changes
-* feat: support user defined RestClientConfig/HTTPClient params ([#140](https://github.com/apache/incubator-hugegraph-commons/pull/140))
+* feat: support user defined RestClientConfig/HTTPClient params ([#140](https://github.com/apache/hugegraph-commons/pull/140))
#### Bug Fix
#### Other Changes
-* chore: disable clean flatten for deploy ([#141](https://github.com/apache/incubator-hugegraph-commons/pull/141))
+* chore: disable clean flatten for deploy ([#141](https://github.com/apache/hugegraph-commons/pull/141))
### hugegraph-ai
@@ -123,38 +123,38 @@ and the addition of basic CI further enhance the project's robustness and develo
#### Feature Changes
-* feat: initialize hugegraph python client ([#5](https://github.com/apache/incubator-hugegraph-ai/pull/5))
-* feat(llm): knowledge graph construction by llm ([#7](https://github.com/apache/incubator-hugegraph-ai/pull/7))
-* feat: initialize rag based on HugeGraph ([#20](https://github.com/apache/incubator-hugegraph-ai/pull/20))
-* feat(client): add variables api and test ([#24](https://github.com/apache/incubator-hugegraph-ai/pull/24))
-* feat: add llm wenxinyiyan & config util & spo_triple_extract ([#27](https://github.com/apache/incubator-hugegraph-ai/pull/27))
-* feat: add auth&metric&traverser&task api and ut ([#28](https://github.com/apache/incubator-hugegraph-ai/pull/28))
-* feat: refactor construct knowledge graph task ([#29](https://github.com/apache/incubator-hugegraph-ai/pull/29))
-* feat: Introduce gradio for creating interactive and visual demo ([#30](https://github.com/apache/incubator-hugegraph-ai/pull/30))
+* feat: initialize hugegraph python client ([#5](https://github.com/apache/hugegraph-ai/pull/5))
+* feat(llm): knowledge graph construction by llm ([#7](https://github.com/apache/hugegraph-ai/pull/7))
+* feat: initialize rag based on HugeGraph ([#20](https://github.com/apache/hugegraph-ai/pull/20))
+* feat(client): add variables api and test ([#24](https://github.com/apache/hugegraph-ai/pull/24))
+* feat: add llm wenxinyiyan & config util & spo_triple_extract ([#27](https://github.com/apache/hugegraph-ai/pull/27))
+* feat: add auth&metric&traverser&task api and ut ([#28](https://github.com/apache/hugegraph-ai/pull/28))
+* feat: refactor construct knowledge graph task ([#29](https://github.com/apache/hugegraph-ai/pull/29))
+* feat: Introduce gradio for creating interactive and visual demo ([#30](https://github.com/apache/hugegraph-ai/pull/30))
#### Bug Fix
-* fix: invalid GitHub label ([#3](https://github.com/apache/incubator-hugegraph-ai/pull/3))
-* fix: import error ([#13](https://github.com/apache/incubator-hugegraph-ai/pull/13))
-* fix: function getEdgeByPage(): the generated query url does not include the parameter page ([#15](https://github.com/apache/incubator-hugegraph-ai/pull/15))
-* fix: issue template ([#23](https://github.com/apache/incubator-hugegraph-ai/pull/23))
-* fix: base-ref/head-ref missed in dependency-check-ci on branch push ([#25](https://github.com/apache/incubator-hugegraph-ai/pull/25))
+* fix: invalid GitHub label ([#3](https://github.com/apache/hugegraph-ai/pull/3))
+* fix: import error ([#13](https://github.com/apache/hugegraph-ai/pull/13))
+* fix: function getEdgeByPage(): the generated query url does not include the parameter page ([#15](https://github.com/apache/hugegraph-ai/pull/15))
+* fix: issue template ([#23](https://github.com/apache/hugegraph-ai/pull/23))
+* fix: base-ref/head-ref missed in dependency-check-ci on branch push ([#25](https://github.com/apache/hugegraph-ai/pull/25))
#### Other Changes
-* chore: add asf.yaml and ISSUE_TEMPLATE ([#1](https://github.com/apache/incubator-hugegraph-ai/pull/1))
-* Bump urllib3 from 2.0.3 to 2.0.7 in /hugegraph-python ([#8](https://github.com/apache/incubator-hugegraph-ai/pull/8))
-* chore: create .gitignore file for py ([#9](https://github.com/apache/incubator-hugegraph-ai/pull/9))
-* refact: improve project structure & add some basic CI ([#17](https://github.com/apache/incubator-hugegraph-ai/pull/17))
-* chore: Update LICENSE and NOTICE ([#31](https://github.com/apache/incubator-hugegraph-ai/pull/31))
-* chore: add release scripts ([#33](https://github.com/apache/incubator-hugegraph-ai/pull/33))
-* chore: change file chmod 755 ([#34](https://github.com/apache/incubator-hugegraph-ai/pull/34))
+* chore: add asf.yaml and ISSUE_TEMPLATE ([#1](https://github.com/apache/hugegraph-ai/pull/1))
+* Bump urllib3 from 2.0.3 to 2.0.7 in /hugegraph-python ([#8](https://github.com/apache/hugegraph-ai/pull/8))
+* chore: create .gitignore file for py ([#9](https://github.com/apache/hugegraph-ai/pull/9))
+* refact: improve project structure & add some basic CI ([#17](https://github.com/apache/hugegraph-ai/pull/17))
+* chore: Update LICENSE and NOTICE ([#31](https://github.com/apache/hugegraph-ai/pull/31))
+* chore: add release scripts ([#33](https://github.com/apache/hugegraph-ai/pull/33))
+* chore: change file chmod 755 ([#34](https://github.com/apache/hugegraph-ai/pull/34))
### Release Details
Please check the release details/contributor in each repository:
-- [Server Release Notes](https://github.com/apache/incubator-hugegraph/releases)
-- [Toolchain Release Notes](https://github.com/apache/incubator-hugegraph-toolchain/releases)
-- [AI Release Notes](https://github.com/apache/incubator-hugegraph-ai/releases)
-- [Commons Release Notes](https://github.com/apache/incubator-hugegraph-commons/releases)
+- [Server Release Notes](https://github.com/apache/hugegraph/releases)
+- [Toolchain Release Notes](https://github.com/apache/hugegraph-toolchain/releases)
+- [AI Release Notes](https://github.com/apache/hugegraph-ai/releases)
+- [Commons Release Notes](https://github.com/apache/hugegraph-commons/releases)
diff --git a/content/en/docs/changelog/hugegraph-1.5.0-release-notes.md b/content/en/docs/changelog/hugegraph-1.5.0-release-notes.md
index 7ed0dd595..41a6a7634 100644
--- a/content/en/docs/changelog/hugegraph-1.5.0-release-notes.md
+++ b/content/en/docs/changelog/hugegraph-1.5.0-release-notes.md
@@ -1,7 +1,7 @@
---
title: "HugeGraph 1.5.0 Release Notes"
linkTitle: "Release-1.5.0"
-weight: 5
+weight: 3
---
> WIP: This doc is under construction, please wait for the final version (BETA)
@@ -18,145 +18,145 @@ PS: In the future, HugeGraph components will evolve through versions of `Java 11
#### API Changes
-- **BREAKING CHANGE**: Support "parent & child" `EdgeLabel` type [#2662](https://github.com/apache/incubator-hugegraph/pull/2662)
+- **BREAKING CHANGE**: Support "parent & child" `EdgeLabel` type [#2662](https://github.com/apache/hugegraph/pull/2662)
#### Feature Changes
-- Integrate `pd-grpc`, `pd-common`, and `pd-client` [#2498](https://github.com/apache/incubator-hugegraph/pull/2498)
-- Integrate `store-grpc`, `store-common`, and `store-client` [#2476](https://github.com/apache/incubator-hugegraph/pull/2476)
-- Integrate `store-rocksdb` submodule [#2513](https://github.com/apache/incubator-hugegraph/pull/2513)
-- Integrate `pd-core` into HugeGraph [#2478](https://github.com/apache/incubator-hugegraph/pull/2478)
-- Integrate `pd-service` into HugeGraph [#2528](https://github.com/apache/incubator-hugegraph/pull/2528)
-- Integrate `pd-dist` into HugeGraph and add core tests, client tests, and REST tests for PD [#2532](https://github.com/apache/incubator-hugegraph/pull/2532)
-- Integrate `server-hstore` into HugeGraph [#2534](https://github.com/apache/incubator-hugegraph/pull/2534)
-- Integrate `store-core` submodule [#2548](https://github.com/apache/incubator-hugegraph/pull/2548)
-- Integrate `store-node` submodule [#2537](https://github.com/apache/incubator-hugegraph/pull/2537)
-- Support new backend Hstore [#2560](https://github.com/apache/incubator-hugegraph/pull/2560)
-- Support Docker deployment for PD and Store [#2573](https://github.com/apache/incubator-hugegraph/pull/2573)
-- Add a tool method `encode` [#2647](https://github.com/apache/incubator-hugegraph/pull/2647)
-- Add basic `MiniCluster` module for distributed system testing [#2615](https://github.com/apache/incubator-hugegraph/pull/2615)
-- Support disabling RocksDB auto-compaction via configuration [#2586](https://github.com/apache/incubator-hugegraph/pull/2586)
+- Integrate `pd-grpc`, `pd-common`, and `pd-client` [#2498](https://github.com/apache/hugegraph/pull/2498)
+- Integrate `store-grpc`, `store-common`, and `store-client` [#2476](https://github.com/apache/hugegraph/pull/2476)
+- Integrate `store-rocksdb` submodule [#2513](https://github.com/apache/hugegraph/pull/2513)
+- Integrate `pd-core` into HugeGraph [#2478](https://github.com/apache/hugegraph/pull/2478)
+- Integrate `pd-service` into HugeGraph [#2528](https://github.com/apache/hugegraph/pull/2528)
+- Integrate `pd-dist` into HugeGraph and add core tests, client tests, and REST tests for PD [#2532](https://github.com/apache/hugegraph/pull/2532)
+- Integrate `server-hstore` into HugeGraph [#2534](https://github.com/apache/hugegraph/pull/2534)
+- Integrate `store-core` submodule [#2548](https://github.com/apache/hugegraph/pull/2548)
+- Integrate `store-node` submodule [#2537](https://github.com/apache/hugegraph/pull/2537)
+- Support new backend Hstore [#2560](https://github.com/apache/hugegraph/pull/2560)
+- Support Docker deployment for PD and Store [#2573](https://github.com/apache/hugegraph/pull/2573)
+- Add a tool method `encode` [#2647](https://github.com/apache/hugegraph/pull/2647)
+- Add basic `MiniCluster` module for distributed system testing [#2615](https://github.com/apache/hugegraph/pull/2615)
+- Support disabling RocksDB auto-compaction via configuration [#2586](https://github.com/apache/hugegraph/pull/2586)
#### Bug Fixes
-- Switch RocksDB backend to memory when executing Gremlin examples [#2518](https://github.com/apache/incubator-hugegraph/pull/2518)
-- Avoid overriding backend config in Gremlin example scripts [#2519](https://github.com/apache/incubator-hugegraph/pull/2519)
-- Update resource references [#2522](https://github.com/apache/incubator-hugegraph/pull/2522)
-- Randomly generate default values [#2568](https://github.com/apache/incubator-hugegraph/pull/2568)
-- Update build artifact path for Docker deployment [#2590](https://github.com/apache/incubator-hugegraph/pull/2590)
-- Ensure thread safety for range attributes in PD [#2641](https://github.com/apache/incubator-hugegraph/pull/2641)
-- Correct server Docker copy source path [#2637](https://github.com/apache/incubator-hugegraph/pull/2637)
-- Fix JRaft Timer Metrics bug in Hstore [#2602](https://github.com/apache/incubator-hugegraph/pull/2602)
-- Enable JRaft MaxBodySize configuration [#2633](https://github.com/apache/incubator-hugegraph/pull/2633)
+- Switch RocksDB backend to memory when executing Gremlin examples [#2518](https://github.com/apache/hugegraph/pull/2518)
+- Avoid overriding backend config in Gremlin example scripts [#2519](https://github.com/apache/hugegraph/pull/2519)
+- Update resource references [#2522](https://github.com/apache/hugegraph/pull/2522)
+- Randomly generate default values [#2568](https://github.com/apache/hugegraph/pull/2568)
+- Update build artifact path for Docker deployment [#2590](https://github.com/apache/hugegraph/pull/2590)
+- Ensure thread safety for range attributes in PD [#2641](https://github.com/apache/hugegraph/pull/2641)
+- Correct server Docker copy source path [#2637](https://github.com/apache/hugegraph/pull/2637)
+- Fix JRaft Timer Metrics bug in Hstore [#2602](https://github.com/apache/hugegraph/pull/2602)
+- Enable JRaft MaxBodySize configuration [#2633](https://github.com/apache/hugegraph/pull/2633)
#### Option Changes
-- Mark old raft configs as deprecated [#2661](https://github.com/apache/incubator-hugegraph/pull/2661)
-- Enlarge bytes write limit and remove `big` parameter when encoding/decoding string ID length [#2622](https://github.com/apache/incubator-hugegraph/pull/2622)
+- Mark old raft configs as deprecated [#2661](https://github.com/apache/hugegraph/pull/2661)
+- Enlarge bytes write limit and remove `big` parameter when encoding/decoding string ID length [#2622](https://github.com/apache/hugegraph/pull/2622)
#### Other Changes
-- Add Swagger-UI LICENSE files [#2495](https://github.com/apache/incubator-hugegraph/pull/2495)
-- Translate CJK comments and punctuations to English across multiple modules [#2536](https://github.com/apache/incubator-hugegraph/pull/2536), [#2623](https://github.com/apache/incubator-hugegraph/pull/2625), [#2645](https://github.com/apache/incubator-hugegraph/pull/2645)
-- Introduce `install-dist` module in root [#2552](https://github.com/apache/incubator-hugegraph/pull/2552)
-- Enable up-to-date checks for UI (CI) [#2609](https://github.com/apache/incubator-hugegraph/pull/2609)
-- Minor improvements for POM properties [#2574](https://github.com/apache/incubator-hugegraph/pull/2574)
-- Migrate HugeGraph Commons [#2628](https://github.com/apache/incubator-hugegraph/pull/2628)
-- Tar source and binary packages for HugeGraph with PD-Store [#2594](https://github.com/apache/incubator-hugegraph/pull/2594)
-- Refactor: Enhance cache invalidation of the partition → leader shard in `ClientCache` [#2588](https://github.com/apache/incubator-hugegraph/pull/2588)
-- Refactor: Remove redundant properties in `LogMeta` and `PartitionMeta` [#2598](https://github.com/apache/incubator-hugegraph/pull/2598)
+- Add Swagger-UI LICENSE files [#2495](https://github.com/apache/hugegraph/pull/2495)
+- Translate CJK comments and punctuations to English across multiple modules [#2536](https://github.com/apache/hugegraph/pull/2536), [#2623](https://github.com/apache/hugegraph/pull/2625), [#2645](https://github.com/apache/hugegraph/pull/2645)
+- Introduce `install-dist` module in root [#2552](https://github.com/apache/hugegraph/pull/2552)
+- Enable up-to-date checks for UI (CI) [#2609](https://github.com/apache/hugegraph/pull/2609)
+- Minor improvements for POM properties [#2574](https://github.com/apache/hugegraph/pull/2574)
+- Migrate HugeGraph Commons [#2628](https://github.com/apache/hugegraph/pull/2628)
+- Tar source and binary packages for HugeGraph with PD-Store [#2594](https://github.com/apache/hugegraph/pull/2594)
+- Refactor: Enhance cache invalidation of the partition → leader shard in `ClientCache` [#2588](https://github.com/apache/hugegraph/pull/2588)
+- Refactor: Remove redundant properties in `LogMeta` and `PartitionMeta` [#2598](https://github.com/apache/hugegraph/pull/2598)
### hugegraph-toolchain
#### API Changes
-- Support "parent & child" `EdgeLabel` type [#624](https://github.com/apache/incubator-hugegraph-toolchain/pull/624)
+- Support "parent & child" `EdgeLabel` type [#624](https://github.com/apache/hugegraph-toolchain/pull/624)
#### Feature Changes
-- Support English interface & add a script/doc for it in Hubble [#631](https://github.com/apache/incubator-hugegraph-toolchain/pull/631)
+- Support English interface & add a script/doc for it in Hubble [#631](https://github.com/apache/hugegraph-toolchain/pull/631)
#### Bug Fixes
-- Serialize source and target label for non-father EdgeLabel [#628](https://github.com/apache/incubator-hugegraph-toolchain/pull/628)
-- Encode/decode Chinese error after building Hubble package [#627](https://github.com/apache/incubator-hugegraph-toolchain/pull/627)
-- Configure IPv4 to fix timeout of `yarn install` in Hubble [#636](https://github.com/apache/incubator-hugegraph-toolchain/pull/636)
-- Remove debugging output to speed up the frontend construction in Hubble [#638](https://github.com/apache/incubator-hugegraph-toolchain/pull/638)
+- Serialize source and target label for non-father EdgeLabel [#628](https://github.com/apache/hugegraph-toolchain/pull/628)
+- Encode/decode Chinese error after building Hubble package [#627](https://github.com/apache/hugegraph-toolchain/pull/627)
+- Configure IPv4 to fix timeout of `yarn install` in Hubble [#636](https://github.com/apache/hugegraph-toolchain/pull/636)
+- Remove debugging output to speed up the frontend construction in Hubble [#638](https://github.com/apache/hugegraph-toolchain/pull/638)
#### Other Changes
-- Bump `express` from 4.18.2 to 4.19.2 in Hubble Frontend [#598](https://github.com/apache/incubator-hugegraph-toolchain/pull/598)
-- Make IDEA support IssueNavigationLink [#600](https://github.com/apache/incubator-hugegraph-toolchain/pull/600)
-- Update `yarn.lock` for Hubble [#605](https://github.com/apache/incubator-hugegraph-toolchain/pull/605)
-- Introduce `editorconfig-maven-plugin` for verifying code style defined in `.editorconfig` [#614](https://github.com/apache/incubator-hugegraph-toolchain/pull/614)
-- Upgrade distribution version to 1.5.0 [#639](https://github.com/apache/incubator-hugegraph-toolchain/pull/639)
+- Bump `express` from 4.18.2 to 4.19.2 in Hubble Frontend [#598](https://github.com/apache/hugegraph-toolchain/pull/598)
+- Make IDEA support IssueNavigationLink [#600](https://github.com/apache/hugegraph-toolchain/pull/600)
+- Update `yarn.lock` for Hubble [#605](https://github.com/apache/hugegraph-toolchain/pull/605)
+- Introduce `editorconfig-maven-plugin` for verifying code style defined in `.editorconfig` [#614](https://github.com/apache/hugegraph-toolchain/pull/614)
+- Upgrade distribution version to 1.5.0 [#639](https://github.com/apache/hugegraph-toolchain/pull/639)
#### Documentation Changes
-- Clarify the contributing guidelines [#604](https://github.com/apache/incubator-hugegraph-toolchain/pull/604)
-- Enhance the README file for Hubble [#613](https://github.com/apache/incubator-hugegraph-toolchain/pull/613)
-- Update README style referring to the server's style [#615](https://github.com/apache/incubator-hugegraph-toolchain/pull/615)
+- Clarify the contributing guidelines [#604](https://github.com/apache/hugegraph-toolchain/pull/604)
+- Enhance the README file for Hubble [#613](https://github.com/apache/hugegraph-toolchain/pull/613)
+- Update README style referring to the server's style [#615](https://github.com/apache/hugegraph-toolchain/pull/615)
### hugegraph-ai
#### API Changes
-- Added local LLM API and version API. [#41](https://github.com/apache/incubator-hugegraph-ai/pull/41), [#44](https://github.com/apache/incubator-hugegraph-ai/pull/44)
-- Implemented new API and optimized code structure. [#63](https://github.com/apache/incubator-hugegraph-ai/pull/63)
-- Support for graphspace and refactored all APIs. [#67](https://github.com/apache/incubator-hugegraph-ai/pull/67)
+- Added local LLM API and version API. [#41](https://github.com/apache/hugegraph-ai/pull/41), [#44](https://github.com/apache/hugegraph-ai/pull/44)
+- Implemented new API and optimized code structure. [#63](https://github.com/apache/hugegraph-ai/pull/63)
+- Support for graphspace and refactored all APIs. [#67](https://github.com/apache/hugegraph-ai/pull/67)
#### Feature Changes
-- Added openai's apibase configuration and asynchronous methods in RAG web demo. [#41](https://github.com/apache/incubator-hugegraph-ai/pull/41), [#58](https://github.com/apache/incubator-hugegraph-ai/pull/58)
-- Support for multi reranker and enhanced UI. [#73](https://github.com/apache/incubator-hugegraph-ai/pull/73)
-- Node embedding, node classify, and graph classify with models based on DGL. [#83](https://github.com/apache/incubator-hugegraph-ai/pull/83)
-- Graph learning algorithm implementation (10+). [#102](https://github.com/apache/incubator-hugegraph-ai/pull/102)
-- Support for any openai-style API (standard). [#95](https://github.com/apache/incubator-hugegraph-ai/pull/95)
+- Added openai's apibase configuration and asynchronous methods in RAG web demo. [#41](https://github.com/apache/hugegraph-ai/pull/41), [#58](https://github.com/apache/hugegraph-ai/pull/58)
+- Support for multi reranker and enhanced UI. [#73](https://github.com/apache/hugegraph-ai/pull/73)
+- Node embedding, node classify, and graph classify with models based on DGL. [#83](https://github.com/apache/hugegraph-ai/pull/83)
+- Graph learning algorithm implementation (10+). [#102](https://github.com/apache/hugegraph-ai/pull/102)
+- Support for any openai-style API (standard). [#95](https://github.com/apache/hugegraph-ai/pull/95)
#### Bug Fixes
-- Fixed fusiform_similarity test in traverser for server 1.3.0. [#37](https://github.com/apache/incubator-hugegraph-ai/pull/37)
-- Avoid generating config twice and corrected e_cache type. [#56](https://github.com/apache/incubator-hugegraph-ai/pull/56), [#117](https://github.com/apache/incubator-hugegraph-ai/pull/117)
-- Fixed null value detection on vid attributes. [#115](https://github.com/apache/incubator-hugegraph-ai/pull/115)
-- Handled profile regenerate error. [#98](https://github.com/apache/incubator-hugegraph-ai/pull/98)
+- Fixed fusiform_similarity test in traverser for server 1.3.0. [#37](https://github.com/apache/hugegraph-ai/pull/37)
+- Avoid generating config twice and corrected e_cache type. [#56](https://github.com/apache/hugegraph-ai/pull/56), [#117](https://github.com/apache/hugegraph-ai/pull/117)
+- Fixed null value detection on vid attributes. [#115](https://github.com/apache/hugegraph-ai/pull/115)
+- Handled profile regenerate error. [#98](https://github.com/apache/hugegraph-ai/pull/98)
#### Option Changes
-- Added auth for fastapi and gradio. [#70](https://github.com/apache/incubator-hugegraph-ai/pull/70)
-- Support for multiple property types and importing graph from the entire doc. [#84](https://github.com/apache/incubator-hugegraph-ai/pull/84)
+- Added auth for fastapi and gradio. [#70](https://github.com/apache/hugegraph-ai/pull/70)
+- Support for multiple property types and importing graph from the entire doc. [#84](https://github.com/apache/hugegraph-ai/pull/84)
#### Other Changes
-- Reformatted documentation and updated README. [#36](https://github.com/apache/incubator-hugegraph-ai/pull/36), [#81](https://github.com/apache/incubator-hugegraph-ai/pull/81)
-- Introduced a black for code format in GitHub actions. [#47](https://github.com/apache/incubator-hugegraph-ai/pull/47)
-- Updated dependencies and environment preparations. [#45](https://github.com/apache/incubator-hugegraph-ai/pull/45), [#65](https://github.com/apache/incubator-hugegraph-ai/pull/65)
-- Enhanced user-friendly README. [#82](https://github.com/apache/incubator-hugegraph-ai/pull/82)
+- Reformatted documentation and updated README. [#36](https://github.com/apache/hugegraph-ai/pull/36), [#81](https://github.com/apache/hugegraph-ai/pull/81)
+- Introduced a black for code format in GitHub actions. [#47](https://github.com/apache/hugegraph-ai/pull/47)
+- Updated dependencies and environment preparations. [#45](https://github.com/apache/hugegraph-ai/pull/45), [#65](https://github.com/apache/hugegraph-ai/pull/65)
+- Enhanced user-friendly README. [#82](https://github.com/apache/hugegraph-ai/pull/82)
### hugegraph-computer
#### Feature Changes
-- Support Single Source Shortest Path Algorithm [#285](https://github.com/apache/incubator-hugegraph-computer/pull/285)
-- Support Output Filter [#303](https://github.com/apache/incubator-hugegraph-computer/pull/303)
+- Support Single Source Shortest Path Algorithm [#285](https://github.com/apache/hugegraph-computer/pull/285)
+- Support Output Filter [#303](https://github.com/apache/hugegraph-computer/pull/303)
#### Bug Fixes
-- Fix: base-ref/head-ref Missed in Dependency-Review on Schedule Push [#304](https://github.com/apache/incubator-hugegraph-computer/pull/304)
+- Fix: base-ref/head-ref Missed in Dependency-Review on Schedule Push [#304](https://github.com/apache/hugegraph-computer/pull/304)
#### Option Changes
-- Refactor(core): StringEncoding [#300](https://github.com/apache/incubator-hugegraph-computer/pull/300)
+- Refactor(core): StringEncoding [#300](https://github.com/apache/hugegraph-computer/pull/300)
#### Other Changes
-- Improve(algorithm): Random Walk Vertex Inactive [#301](https://github.com/apache/incubator-hugegraph-computer/pull/301)
-- Upgrade Version to 1.3.0 [#305](https://github.com/apache/incubator-hugegraph-computer/pull/305)
-- Doc(readme): Clarify the Contributing Guidelines [#306](https://github.com/apache/incubator-hugegraph-computer/pull/306)
-- Doc(readme): Add Hyperlink to Apache 2.0 [#308](https://github.com/apache/incubator-hugegraph-computer/pull/308)
-- Migrate Project to Computer Directory [#310](https://github.com/apache/incubator-hugegraph-computer/pull/310)
-- Update for Release 1.5 [#317](https://github.com/apache/incubator-hugegraph-computer/pull/317)
-- Fix Path When Exporting Source Package [#319](https://github.com/apache/incubator-hugegraph-computer/pull/319)
+- Improve(algorithm): Random Walk Vertex Inactive [#301](https://github.com/apache/hugegraph-computer/pull/301)
+- Upgrade Version to 1.3.0 [#305](https://github.com/apache/hugegraph-computer/pull/305)
+- Doc(readme): Clarify the Contributing Guidelines [#306](https://github.com/apache/hugegraph-computer/pull/306)
+- Doc(readme): Add Hyperlink to Apache 2.0 [#308](https://github.com/apache/hugegraph-computer/pull/308)
+- Migrate Project to Computer Directory [#310](https://github.com/apache/hugegraph-computer/pull/310)
+- Update for Release 1.5 [#317](https://github.com/apache/hugegraph-computer/pull/317)
+- Fix Path When Exporting Source Package [#319](https://github.com/apache/hugegraph-computer/pull/319)
### Release Details
Please check the release details/contributor in each repository:
-- [Server Release Notes](https://github.com/apache/incubator-hugegraph/releases)
-- [Toolchain Release Notes](https://github.com/apache/incubator-hugegraph-toolchain/releases)
-- [Computer Release Notes](https://github.com/apache/incubator-hugegraph-computer/releases)
-- [AI Release Notes](https://github.com/apache/incubator-hugegraph-ai/releases)
+- [Server Release Notes](https://github.com/apache/hugegraph/releases)
+- [Toolchain Release Notes](https://github.com/apache/hugegraph-toolchain/releases)
+- [Computer Release Notes](https://github.com/apache/hugegraph-computer/releases)
+- [AI Release Notes](https://github.com/apache/hugegraph-ai/releases)
diff --git a/content/en/docs/changelog/hugegraph-1.7.0-release-notes.md b/content/en/docs/changelog/hugegraph-1.7.0-release-notes.md
index 874730079..d86da02d8 100644
--- a/content/en/docs/changelog/hugegraph-1.7.0-release-notes.md
+++ b/content/en/docs/changelog/hugegraph-1.7.0-release-notes.md
@@ -1,7 +1,7 @@
---
title: "HugeGraph 1.7.0 Release Notes"
linkTitle: "Release-1.7.0"
-weight: 7
+weight: 1
---
> WIP: This doc is under construction, please wait for the final version (BETA)
@@ -14,244 +14,244 @@ For **1.7.0** version `hugegraph`, related components only support Java11.
#### API Changes
-- **BREAKING CHANGE**: Disable legacy backends include MySQL/PG/c*(.etc) [#2746](https://github.com/apache/incubator-hugegraph/pull/2746)
-- **BREAKING CHANGE**: Release version 1.7.0 [server + pd + store] [#2889](https://github.com/apache/incubator-hugegraph/pull/2889)
+- **BREAKING CHANGE**: Disable legacy backends include MySQL/PG/c*(.etc) [#2746](https://github.com/apache/hugegraph/pull/2746)
+- **BREAKING CHANGE**: Release version 1.7.0 [server + pd + store] [#2889](https://github.com/apache/hugegraph/pull/2889)
#### Feature Changes
-- Support MemoryManagement for graph query framework [#2649](https://github.com/apache/incubator-hugegraph/pull/2649)
-- LoginAPI support token_expire field [#2754](https://github.com/apache/incubator-hugegraph/pull/2754)
-- Add option for task role election [#2843](https://github.com/apache/incubator-hugegraph/pull/2843)
-- Optimize perf by avoid boxing long [#2861](https://github.com/apache/incubator-hugegraph/pull/2861)
-- StringId hold bytes to avoid decode/encode [#2862](https://github.com/apache/incubator-hugegraph/pull/2862)
-- Add PerfExample5 and PerfExample6 [#2860](https://github.com/apache/incubator-hugegraph/pull/2860)
-- RocksDBStore remove redundant checkOpened() call [#2863](https://github.com/apache/incubator-hugegraph/pull/2863)
-- Add path filter [#2898](https://github.com/apache/incubator-hugegraph/pull/2898)
-- Init serena memory system & add memories [#2902](https://github.com/apache/incubator-hugegraph/pull/2902)
+- Support MemoryManagement for graph query framework [#2649](https://github.com/apache/hugegraph/pull/2649)
+- LoginAPI support token_expire field [#2754](https://github.com/apache/hugegraph/pull/2754)
+- Add option for task role election [#2843](https://github.com/apache/hugegraph/pull/2843)
+- Optimize perf by avoid boxing long [#2861](https://github.com/apache/hugegraph/pull/2861)
+- StringId hold bytes to avoid decode/encode [#2862](https://github.com/apache/hugegraph/pull/2862)
+- Add PerfExample5 and PerfExample6 [#2860](https://github.com/apache/hugegraph/pull/2860)
+- RocksDBStore remove redundant checkOpened() call [#2863](https://github.com/apache/hugegraph/pull/2863)
+- Add path filter [#2898](https://github.com/apache/hugegraph/pull/2898)
+- Init serena memory system & add memories [#2902](https://github.com/apache/hugegraph/pull/2902)
#### Bug Fixes
-- Filter dynamice path(PUT/GET/DELETE) with params cause OOM [#2569](https://github.com/apache/incubator-hugegraph/pull/2569)
-- JRaft Histogram Metrics Value NaN [#2631](https://github.com/apache/incubator-hugegraph/pull/2631)
-- Update server image desc [#2702](https://github.com/apache/incubator-hugegraph/pull/2702)
-- Kneigbor-api has unmatched edge type with server [#2699](https://github.com/apache/incubator-hugegraph/pull/2699)
-- Add license for swagger-ui & reset use stage to false in ci yml [#2706](https://github.com/apache/incubator-hugegraph/pull/2706)
-- Fix build pd-store arm image [#2744](https://github.com/apache/incubator-hugegraph/pull/2744)
-- Fix graph server cache notifier mechanism [#2729](https://github.com/apache/incubator-hugegraph/pull/2729)
-- Tx leak when stopping the graph server [#2791](https://github.com/apache/incubator-hugegraph/pull/2791)
-- Ensure backend is initialized in gremlin script [#2824](https://github.com/apache/incubator-hugegraph/pull/2824)
-- Fix some potential lock & type cast issues [#2895](https://github.com/apache/incubator-hugegraph/pull/2895)
-- Fix npe in getVersion [#2897](https://github.com/apache/incubator-hugegraph/pull/2897)
-- Fix the support for graphsapi in rocksdb and add testing for graphsapi [#2900](https://github.com/apache/incubator-hugegraph/pull/2900)
-- Remove graph path in auth api path [#2899](https://github.com/apache/incubator-hugegraph/pull/2899)
-- Migrate to LTS jdk11 in all Dockerfile [#2901](https://github.com/apache/incubator-hugegraph/pull/2901)
-- Remove the judgment for java8 compatibility in the init-store [#2905](https://github.com/apache/incubator-hugegraph/pull/2905)
-- Add missing license and remove binary license.txt & fix tinkerpop ci & remove duplicate module [#2910](https://github.com/apache/incubator-hugegraph/pull/2910)
+- Filter dynamice path(PUT/GET/DELETE) with params cause OOM [#2569](https://github.com/apache/hugegraph/pull/2569)
+- JRaft Histogram Metrics Value NaN [#2631](https://github.com/apache/hugegraph/pull/2631)
+- Update server image desc [#2702](https://github.com/apache/hugegraph/pull/2702)
+- Kneigbor-api has unmatched edge type with server [#2699](https://github.com/apache/hugegraph/pull/2699)
+- Add license for swagger-ui & reset use stage to false in ci yml [#2706](https://github.com/apache/hugegraph/pull/2706)
+- Fix build pd-store arm image [#2744](https://github.com/apache/hugegraph/pull/2744)
+- Fix graph server cache notifier mechanism [#2729](https://github.com/apache/hugegraph/pull/2729)
+- Tx leak when stopping the graph server [#2791](https://github.com/apache/hugegraph/pull/2791)
+- Ensure backend is initialized in gremlin script [#2824](https://github.com/apache/hugegraph/pull/2824)
+- Fix some potential lock & type cast issues [#2895](https://github.com/apache/hugegraph/pull/2895)
+- Fix npe in getVersion [#2897](https://github.com/apache/hugegraph/pull/2897)
+- Fix the support for graphsapi in rocksdb and add testing for graphsapi [#2900](https://github.com/apache/hugegraph/pull/2900)
+- Remove graph path in auth api path [#2899](https://github.com/apache/hugegraph/pull/2899)
+- Migrate to LTS jdk11 in all Dockerfile [#2901](https://github.com/apache/hugegraph/pull/2901)
+- Remove the judgment for java8 compatibility in the init-store [#2905](https://github.com/apache/hugegraph/pull/2905)
+- Add missing license and remove binary license.txt & fix tinkerpop ci & remove duplicate module [#2910](https://github.com/apache/hugegraph/pull/2910)
#### Option Changes
-- Remove some outdated configuration [#2678](https://github.com/apache/incubator-hugegraph/pull/2678)
+- Remove some outdated configuration [#2678](https://github.com/apache/hugegraph/pull/2678)
#### Other Changes
-- Update outdated docs for release 1.5.0 [#2690](https://github.com/apache/incubator-hugegraph/pull/2690)
-- Fix licenses and remove empty files [#2692](https://github.com/apache/incubator-hugegraph/pull/2692)
-- Update repo artifacts references [#2695](https://github.com/apache/incubator-hugegraph/pull/2695)
-- Adjust release fury version [#2698](https://github.com/apache/incubator-hugegraph/pull/2698)
-- Fix the JSON license issue [#2697](https://github.com/apache/incubator-hugegraph/pull/2697)
-- Add debug info for tp test [#2688](https://github.com/apache/incubator-hugegraph/pull/2688)
-- Enhance words in README [#2734](https://github.com/apache/incubator-hugegraph/pull/2734)
-- Add collaborators in asf config [#2741](https://github.com/apache/incubator-hugegraph/pull/2741)
-- Adjust the related filters of sofa-bolt [#2735](https://github.com/apache/incubator-hugegraph/pull/2735)
-- Reopen discussion in .asf.yml config [#2751](https://github.com/apache/incubator-hugegraph/pull/2751)
-- Fix typo in README [#2806](https://github.com/apache/incubator-hugegraph/pull/2806)
-- Centralize version management in project [#2797](https://github.com/apache/incubator-hugegraph/pull/2797)
-- Update notice year [#2826](https://github.com/apache/incubator-hugegraph/pull/2826)
-- Improve maven Reproducible Builds → upgrade plugins [#2874](https://github.com/apache/incubator-hugegraph/pull/2874)
-- Enhance docker instruction with auth opened graph [#2881](https://github.com/apache/incubator-hugegraph/pull/2881)
-- Remove the package existing in java8 [#2792](https://github.com/apache/incubator-hugegraph/pull/2792)
-- Revise Docker usage instructions in README [#2882](https://github.com/apache/incubator-hugegraph/pull/2882)
-- Add DeepWiki badge to README [#2883](https://github.com/apache/incubator-hugegraph/pull/2883)
-- Update guidance for store module [#2894](https://github.com/apache/incubator-hugegraph/pull/2894)
-- Update test commands and improve documentation clarity [#2893](https://github.com/apache/incubator-hugegraph/pull/2893)
-- Bump rocksdb version from 7.2.2 to 8.10.2 [#2896](https://github.com/apache/incubator-hugegraph/pull/2896)
+- Update outdated docs for release 1.5.0 [#2690](https://github.com/apache/hugegraph/pull/2690)
+- Fix licenses and remove empty files [#2692](https://github.com/apache/hugegraph/pull/2692)
+- Update repo artifacts references [#2695](https://github.com/apache/hugegraph/pull/2695)
+- Adjust release fury version [#2698](https://github.com/apache/hugegraph/pull/2698)
+- Fix the JSON license issue [#2697](https://github.com/apache/hugegraph/pull/2697)
+- Add debug info for tp test [#2688](https://github.com/apache/hugegraph/pull/2688)
+- Enhance words in README [#2734](https://github.com/apache/hugegraph/pull/2734)
+- Add collaborators in asf config [#2741](https://github.com/apache/hugegraph/pull/2741)
+- Adjust the related filters of sofa-bolt [#2735](https://github.com/apache/hugegraph/pull/2735)
+- Reopen discussion in .asf.yml config [#2751](https://github.com/apache/hugegraph/pull/2751)
+- Fix typo in README [#2806](https://github.com/apache/hugegraph/pull/2806)
+- Centralize version management in project [#2797](https://github.com/apache/hugegraph/pull/2797)
+- Update notice year [#2826](https://github.com/apache/hugegraph/pull/2826)
+- Improve maven Reproducible Builds → upgrade plugins [#2874](https://github.com/apache/hugegraph/pull/2874)
+- Enhance docker instruction with auth opened graph [#2881](https://github.com/apache/hugegraph/pull/2881)
+- Remove the package existing in java8 [#2792](https://github.com/apache/hugegraph/pull/2792)
+- Revise Docker usage instructions in README [#2882](https://github.com/apache/hugegraph/pull/2882)
+- Add DeepWiki badge to README [#2883](https://github.com/apache/hugegraph/pull/2883)
+- Update guidance for store module [#2894](https://github.com/apache/hugegraph/pull/2894)
+- Update test commands and improve documentation clarity [#2893](https://github.com/apache/hugegraph/pull/2893)
+- Bump rocksdb version from 7.2.2 to 8.10.2 [#2896](https://github.com/apache/hugegraph/pull/2896)
### hugegraph-toolchain
#### API Changes
-- Support graphspace [#633](https://github.com/apache/incubator-hugegraph-toolchain/pull/633)
+- Support graphspace [#633](https://github.com/apache/hugegraph-toolchain/pull/633)
#### Feature Changes
-- Support jdbc date type & sync .editorconfig [#648](https://github.com/apache/incubator-hugegraph-toolchain/pull/648)
-- Add a useSSL option for mysql [#650](https://github.com/apache/incubator-hugegraph-toolchain/pull/650)
-- Patch for father sub edge [#654](https://github.com/apache/incubator-hugegraph-toolchain/pull/654)
-- Improve user experience for user script [#666](https://github.com/apache/incubator-hugegraph-toolchain/pull/666)
-- Support concurrent readers, short-id & Graphsrc [#683](https://github.com/apache/incubator-hugegraph-toolchain/pull/683)
-- Init serena onboarding & project memory files [#692](https://github.com/apache/incubator-hugegraph-toolchain/pull/692)
+- Support jdbc date type & sync .editorconfig [#648](https://github.com/apache/hugegraph-toolchain/pull/648)
+- Add a useSSL option for mysql [#650](https://github.com/apache/hugegraph-toolchain/pull/650)
+- Patch for father sub edge [#654](https://github.com/apache/hugegraph-toolchain/pull/654)
+- Improve user experience for user script [#666](https://github.com/apache/hugegraph-toolchain/pull/666)
+- Support concurrent readers, short-id & Graphsrc [#683](https://github.com/apache/hugegraph-toolchain/pull/683)
+- Init serena onboarding & project memory files [#692](https://github.com/apache/hugegraph-toolchain/pull/692)
#### Bug Fixes
-- Typo word in display [#655](https://github.com/apache/incubator-hugegraph-toolchain/pull/655)
-- Patch up missing classes and methods for hubble [#657](https://github.com/apache/incubator-hugegraph-toolchain/pull/657)
-- Adjust Client to 1.7.0 server [#689](https://github.com/apache/incubator-hugegraph-toolchain/pull/689)
-- Remove json license for release 1.7.0 [#698](https://github.com/apache/incubator-hugegraph-toolchain/pull/698)
+- Typo word in display [#655](https://github.com/apache/hugegraph-toolchain/pull/655)
+- Patch up missing classes and methods for hubble [#657](https://github.com/apache/hugegraph-toolchain/pull/657)
+- Adjust Client to 1.7.0 server [#689](https://github.com/apache/hugegraph-toolchain/pull/689)
+- Remove json license for release 1.7.0 [#698](https://github.com/apache/hugegraph-toolchain/pull/698)
#### Other Changes
-- Update hugegraph source commit id [#640](https://github.com/apache/incubator-hugegraph-toolchain/pull/640)
-- Add collaborators in asf config [#656](https://github.com/apache/incubator-hugegraph-toolchain/pull/656)
-- Update pom for version-1.7.0 [#681](https://github.com/apache/incubator-hugegraph-toolchain/pull/681)
-- Add DeepWiki badge to README [#684](https://github.com/apache/incubator-hugegraph-toolchain/pull/684)
-- Adjust APIs to compatible with 1.7.0 server [#685](https://github.com/apache/incubator-hugegraph-toolchain/pull/685)
-- Adjust LoadContext to 1.7.0 version [#687](https://github.com/apache/incubator-hugegraph-toolchain/pull/687)
-- Migrate to LTS jdk11 in all Dockerfile [#691](https://github.com/apache/incubator-hugegraph-toolchain/pull/691)
-- Update copyright year in NOTICE file [#697](https://github.com/apache/incubator-hugegraph-toolchain/pull/697)
+- Update hugegraph source commit id [#640](https://github.com/apache/hugegraph-toolchain/pull/640)
+- Add collaborators in asf config [#656](https://github.com/apache/hugegraph-toolchain/pull/656)
+- Update pom for version-1.7.0 [#681](https://github.com/apache/hugegraph-toolchain/pull/681)
+- Add DeepWiki badge to README [#684](https://github.com/apache/hugegraph-toolchain/pull/684)
+- Adjust APIs to compatible with 1.7.0 server [#685](https://github.com/apache/hugegraph-toolchain/pull/685)
+- Adjust LoadContext to 1.7.0 version [#687](https://github.com/apache/hugegraph-toolchain/pull/687)
+- Migrate to LTS jdk11 in all Dockerfile [#691](https://github.com/apache/hugegraph-toolchain/pull/691)
+- Update copyright year in NOTICE file [#697](https://github.com/apache/hugegraph-toolchain/pull/697)
### hugegraph-computer
#### Feature Changes
-- Migration Vermeer to hugegraph-computer [#316](https://github.com/apache/incubator-hugegraph-computer/pull/316)
-- Make startChan's size configurable [#328](https://github.com/apache/incubator-hugegraph-computer/pull/328)
-- Assign WorkerGroup via worker configuration [#332](https://github.com/apache/incubator-hugegraph-computer/pull/332)
-- Support task priority based scheduling [#336](https://github.com/apache/incubator-hugegraph-computer/pull/336)
-- Avoid 800k [#340](https://github.com/apache/incubator-hugegraph-computer/pull/340)
+- Migration Vermeer to hugegraph-computer [#316](https://github.com/apache/hugegraph-computer/pull/316)
+- Make startChan's size configurable [#328](https://github.com/apache/hugegraph-computer/pull/328)
+- Assign WorkerGroup via worker configuration [#332](https://github.com/apache/hugegraph-computer/pull/332)
+- Support task priority based scheduling [#336](https://github.com/apache/hugegraph-computer/pull/336)
+- Avoid 800k [#340](https://github.com/apache/hugegraph-computer/pull/340)
#### Bug Fixes
-- Fix docker file build [#341](https://github.com/apache/incubator-hugegraph-computer/pull/341)
+- Fix docker file build [#341](https://github.com/apache/hugegraph-computer/pull/341)
#### Other Changes
-- Update release version to 1.5.0 [#318](https://github.com/apache/incubator-hugegraph-computer/pull/318)
-- Update go depends module & fix headers [#321](https://github.com/apache/incubator-hugegraph-computer/pull/321)
-- Update go version to 1.23 [#322](https://github.com/apache/incubator-hugegraph-computer/pull/322)
-- Add collaborator in .asf.yaml [#323](https://github.com/apache/incubator-hugegraph-computer/pull/323)
-- Update the Go version in docker image [#333](https://github.com/apache/incubator-hugegraph-computer/pull/333)
-- Add DeepWiki badge to README [#337](https://github.com/apache/incubator-hugegraph-computer/pull/337)
-- Bump project version to 1.7.0 (RELEASE) [#338](https://github.com/apache/incubator-hugegraph-computer/pull/338)
-- Update copyright year in NOTICE file [#342](https://github.com/apache/incubator-hugegraph-computer/pull/342)
+- Update release version to 1.5.0 [#318](https://github.com/apache/hugegraph-computer/pull/318)
+- Update go depends module & fix headers [#321](https://github.com/apache/hugegraph-computer/pull/321)
+- Update go version to 1.23 [#322](https://github.com/apache/hugegraph-computer/pull/322)
+- Add collaborator in .asf.yaml [#323](https://github.com/apache/hugegraph-computer/pull/323)
+- Update the Go version in docker image [#333](https://github.com/apache/hugegraph-computer/pull/333)
+- Add DeepWiki badge to README [#337](https://github.com/apache/hugegraph-computer/pull/337)
+- Bump project version to 1.7.0 (RELEASE) [#338](https://github.com/apache/hugegraph-computer/pull/338)
+- Update copyright year in NOTICE file [#342](https://github.com/apache/hugegraph-computer/pull/342)
### hugegraph-ai
#### API Changes
-- Support choose template in api [#135](https://github.com/apache/incubator-hugegraph-ai/pull/135)
-- Add post method for paths-api [#162](https://github.com/apache/incubator-hugegraph-ai/pull/162)
-- Support switch graph in api & add some query configs [#184](https://github.com/apache/incubator-hugegraph-ai/pull/184)
-- Text2gremlin api [#258](https://github.com/apache/incubator-hugegraph-ai/pull/258)
-- Support switching prompt EN/CN [#269](https://github.com/apache/incubator-hugegraph-ai/pull/269)
-- **BREAKING CHANGE**: Update keyword extraction method [#282](https://github.com/apache/incubator-hugegraph-ai/pull/282)
+- Support choose template in api [#135](https://github.com/apache/hugegraph-ai/pull/135)
+- Add post method for paths-api [#162](https://github.com/apache/hugegraph-ai/pull/162)
+- Support switch graph in api & add some query configs [#184](https://github.com/apache/hugegraph-ai/pull/184)
+- Text2gremlin api [#258](https://github.com/apache/hugegraph-ai/pull/258)
+- Support switching prompt EN/CN [#269](https://github.com/apache/hugegraph-ai/pull/269)
+- **BREAKING CHANGE**: Update keyword extraction method [#282](https://github.com/apache/hugegraph-ai/pull/282)
#### Feature Changes
-- Added the process of text2gql in graphrag V1.0 [#105](https://github.com/apache/incubator-hugegraph-ai/pull/105)
-- Use pydantic-settings for config management [#122](https://github.com/apache/incubator-hugegraph-ai/pull/122)
-- Timely execute vid embedding & enhance some HTTP logic [#141](https://github.com/apache/incubator-hugegraph-ai/pull/141)
-- Use retry from tenacity [#143](https://github.com/apache/incubator-hugegraph-ai/pull/143)
-- Modify the summary info and enhance the request logic [#147](https://github.com/apache/incubator-hugegraph-ai/pull/147)
-- Automatic backup graph data timely [#151](https://github.com/apache/incubator-hugegraph-ai/pull/151)
-- Add a button to backup data & count together [#153](https://github.com/apache/incubator-hugegraph-ai/pull/153)
-- Extract topk_per_keyword & topk_return_results to .env [#154](https://github.com/apache/incubator-hugegraph-ai/pull/154)
-- Modify clear buttons [#156](https://github.com/apache/incubator-hugegraph-ai/pull/156)
-- Support intent recognition V1 [#159](https://github.com/apache/incubator-hugegraph-ai/pull/159)
-- Change vid embedding x:yy to yy & use multi-thread [#158](https://github.com/apache/incubator-hugegraph-ai/pull/158)
-- Support mathjax in rag query block V1 [#157](https://github.com/apache/incubator-hugegraph-ai/pull/157)
-- Use poetry to manage the dependencies [#149](https://github.com/apache/incubator-hugegraph-ai/pull/149)
-- Return schema.groovy first when backup graph data [#161](https://github.com/apache/incubator-hugegraph-ai/pull/161)
-- Merge all logs into one file [#171](https://github.com/apache/incubator-hugegraph-ai/pull/171)
-- Use uv for the CI action [#175](https://github.com/apache/incubator-hugegraph-ai/pull/175)
-- Use EN prompt for keywords extraction [#174](https://github.com/apache/incubator-hugegraph-ai/pull/174)
-- Support litellm LLM provider [#178](https://github.com/apache/incubator-hugegraph-ai/pull/178)
-- Improve graph extraction default prompt [#187](https://github.com/apache/incubator-hugegraph-ai/pull/187)
-- Replace vid by full vertexes info [#189](https://github.com/apache/incubator-hugegraph-ai/pull/189)
-- Support asynchronous streaming generation in rag block by using async_generator and asyncio.wait [#190](https://github.com/apache/incubator-hugegraph-ai/pull/190)
-- Generalize the regex extraction func [#194](https://github.com/apache/incubator-hugegraph-ai/pull/194)
-- Create quick_start.md [#196](https://github.com/apache/incubator-hugegraph-ai/pull/196)
-- Support Docker & K8s deployment way [#195](https://github.com/apache/incubator-hugegraph-ai/pull/195)
-- Multi-stage building in Dockerfile [#199](https://github.com/apache/incubator-hugegraph-ai/pull/199)
-- Support graph checking before updating vid embedding [#205](https://github.com/apache/incubator-hugegraph-ai/pull/205)
-- Disable text2gql by default [#216](https://github.com/apache/incubator-hugegraph-ai/pull/216)
-- Use 4.1-mini and 0.01 temperature by default [#214](https://github.com/apache/incubator-hugegraph-ai/pull/214)
-- Enhance the multi configs for LLM [#212](https://github.com/apache/incubator-hugegraph-ai/pull/212)
-- Textbox to Code [#217](https://github.com/apache/incubator-hugegraph-ai/pull/223)
-- Replace the IP + Port with URL [#209](https://github.com/apache/incubator-hugegraph-ai/pull/209)
-- Update gradio's version [#235](https://github.com/apache/incubator-hugegraph-ai/pull/235)
-- Use asyncio to get embeddings [#215](https://github.com/apache/incubator-hugegraph-ai/pull/215)
-- Change QPS -> RPM for timer decorator [#241](https://github.com/apache/incubator-hugegraph-ai/pull/241)
-- Support batch embedding [#238](https://github.com/apache/incubator-hugegraph-ai/pull/238)
-- Using nuitka to provide a binary/perf way for the service [#242](https://github.com/apache/incubator-hugegraph-ai/pull/242)
-- Use uv instead poetry [#226](https://github.com/apache/incubator-hugegraph-ai/pull/226)
-- Basic compatible in text2gremlin generation [#261](https://github.com/apache/incubator-hugegraph-ai/pull/261)
-- Enhance config path handling and add project root validation [#262](https://github.com/apache/incubator-hugegraph-ai/pull/262)
-- Add vermeer python client for graph computing [#263](https://github.com/apache/incubator-hugegraph-ai/pull/263)
-- Use uv in client & ml modules & adapter the CI [#257](https://github.com/apache/incubator-hugegraph-ai/pull/257)
-- Use uv to manage pkgs & update README [#272](https://github.com/apache/incubator-hugegraph-ai/pull/272)
-- Limit the deps version to handle critical init problems [#279](https://github.com/apache/incubator-hugegraph-ai/pull/279)
-- Support semi-automated prompt generation [#281](https://github.com/apache/incubator-hugegraph-ai/pull/281)
-- Support semi-automated generated graph schema [#274](https://github.com/apache/incubator-hugegraph-ai/pull/274)
-- Unify all modules with uv [#287](https://github.com/apache/incubator-hugegraph-ai/pull/287)
-- Add GitHub Actions for auto upstream sync and update SEALData subsample logic [#289](https://github.com/apache/incubator-hugegraph-ai/pull/289)
-- Add a basic LLM/AI coding instruction file [#290](https://github.com/apache/incubator-hugegraph-ai/pull/290)
-- Add rules for AI coding guideline - V1.0 [#293](https://github.com/apache/incubator-hugegraph-ai/pull/293)
-- Replace QianFan by OpenAI-compatible format [#285](https://github.com/apache/incubator-hugegraph-ai/pull/285)
-- Optimize vector index with asyncio embedding [#264](https://github.com/apache/incubator-hugegraph-ai/pull/264)
-- Refactor embedding parallelization to preserve order [#295](https://github.com/apache/incubator-hugegraph-ai/pull/295)
-- Support storing vector data for a graph instance by model type/name [#265](https://github.com/apache/incubator-hugegraph-ai/pull/265)
-- Add AGENTS.md as new document standard [#299](https://github.com/apache/incubator-hugegraph-ai/pull/299)
-- Add Fixed Workflow Execution Engine: Flow, Node, and Scheduler Architecture [#302](https://github.com/apache/incubator-hugegraph-ai/pull/302)
-- Support vector db layer V1.0 [#304](https://github.com/apache/incubator-hugegraph-ai/pull/304)
+- Added the process of text2gql in graphrag V1.0 [#105](https://github.com/apache/hugegraph-ai/pull/105)
+- Use pydantic-settings for config management [#122](https://github.com/apache/hugegraph-ai/pull/122)
+- Timely execute vid embedding & enhance some HTTP logic [#141](https://github.com/apache/hugegraph-ai/pull/141)
+- Use retry from tenacity [#143](https://github.com/apache/hugegraph-ai/pull/143)
+- Modify the summary info and enhance the request logic [#147](https://github.com/apache/hugegraph-ai/pull/147)
+- Automatic backup graph data timely [#151](https://github.com/apache/hugegraph-ai/pull/151)
+- Add a button to backup data & count together [#153](https://github.com/apache/hugegraph-ai/pull/153)
+- Extract topk_per_keyword & topk_return_results to .env [#154](https://github.com/apache/hugegraph-ai/pull/154)
+- Modify clear buttons [#156](https://github.com/apache/hugegraph-ai/pull/156)
+- Support intent recognition V1 [#159](https://github.com/apache/hugegraph-ai/pull/159)
+- Change vid embedding x:yy to yy & use multi-thread [#158](https://github.com/apache/hugegraph-ai/pull/158)
+- Support mathjax in rag query block V1 [#157](https://github.com/apache/hugegraph-ai/pull/157)
+- Use poetry to manage the dependencies [#149](https://github.com/apache/hugegraph-ai/pull/149)
+- Return schema.groovy first when backup graph data [#161](https://github.com/apache/hugegraph-ai/pull/161)
+- Merge all logs into one file [#171](https://github.com/apache/hugegraph-ai/pull/171)
+- Use uv for the CI action [#175](https://github.com/apache/hugegraph-ai/pull/175)
+- Use EN prompt for keywords extraction [#174](https://github.com/apache/hugegraph-ai/pull/174)
+- Support litellm LLM provider [#178](https://github.com/apache/hugegraph-ai/pull/178)
+- Improve graph extraction default prompt [#187](https://github.com/apache/hugegraph-ai/pull/187)
+- Replace vid by full vertexes info [#189](https://github.com/apache/hugegraph-ai/pull/189)
+- Support asynchronous streaming generation in rag block by using async_generator and asyncio.wait [#190](https://github.com/apache/hugegraph-ai/pull/190)
+- Generalize the regex extraction func [#194](https://github.com/apache/hugegraph-ai/pull/194)
+- Create quick_start.md [#196](https://github.com/apache/hugegraph-ai/pull/196)
+- Support Docker & K8s deployment way [#195](https://github.com/apache/hugegraph-ai/pull/195)
+- Multi-stage building in Dockerfile [#199](https://github.com/apache/hugegraph-ai/pull/199)
+- Support graph checking before updating vid embedding [#205](https://github.com/apache/hugegraph-ai/pull/205)
+- Disable text2gql by default [#216](https://github.com/apache/hugegraph-ai/pull/216)
+- Use 4.1-mini and 0.01 temperature by default [#214](https://github.com/apache/hugegraph-ai/pull/214)
+- Enhance the multi configs for LLM [#212](https://github.com/apache/hugegraph-ai/pull/212)
+- Textbox to Code [#217](https://github.com/apache/hugegraph-ai/pull/223)
+- Replace the IP + Port with URL [#209](https://github.com/apache/hugegraph-ai/pull/209)
+- Update gradio's version [#235](https://github.com/apache/hugegraph-ai/pull/235)
+- Use asyncio to get embeddings [#215](https://github.com/apache/hugegraph-ai/pull/215)
+- Change QPS -> RPM for timer decorator [#241](https://github.com/apache/hugegraph-ai/pull/241)
+- Support batch embedding [#238](https://github.com/apache/hugegraph-ai/pull/238)
+- Using nuitka to provide a binary/perf way for the service [#242](https://github.com/apache/hugegraph-ai/pull/242)
+- Use uv instead poetry [#226](https://github.com/apache/hugegraph-ai/pull/226)
+- Basic compatible in text2gremlin generation [#261](https://github.com/apache/hugegraph-ai/pull/261)
+- Enhance config path handling and add project root validation [#262](https://github.com/apache/hugegraph-ai/pull/262)
+- Add vermeer python client for graph computing [#263](https://github.com/apache/hugegraph-ai/pull/263)
+- Use uv in client & ml modules & adapter the CI [#257](https://github.com/apache/hugegraph-ai/pull/257)
+- Use uv to manage pkgs & update README [#272](https://github.com/apache/hugegraph-ai/pull/272)
+- Limit the deps version to handle critical init problems [#279](https://github.com/apache/hugegraph-ai/pull/279)
+- Support semi-automated prompt generation [#281](https://github.com/apache/hugegraph-ai/pull/281)
+- Support semi-automated generated graph schema [#274](https://github.com/apache/hugegraph-ai/pull/274)
+- Unify all modules with uv [#287](https://github.com/apache/hugegraph-ai/pull/287)
+- Add GitHub Actions for auto upstream sync and update SEALData subsample logic [#289](https://github.com/apache/hugegraph-ai/pull/289)
+- Add a basic LLM/AI coding instruction file [#290](https://github.com/apache/hugegraph-ai/pull/290)
+- Add rules for AI coding guideline - V1.0 [#293](https://github.com/apache/hugegraph-ai/pull/293)
+- Replace QianFan by OpenAI-compatible format [#285](https://github.com/apache/hugegraph-ai/pull/285)
+- Optimize vector index with asyncio embedding [#264](https://github.com/apache/hugegraph-ai/pull/264)
+- Refactor embedding parallelization to preserve order [#295](https://github.com/apache/hugegraph-ai/pull/295)
+- Support storing vector data for a graph instance by model type/name [#265](https://github.com/apache/hugegraph-ai/pull/265)
+- Add AGENTS.md as new document standard [#299](https://github.com/apache/hugegraph-ai/pull/299)
+- Add Fixed Workflow Execution Engine: Flow, Node, and Scheduler Architecture [#302](https://github.com/apache/hugegraph-ai/pull/302)
+- Support vector db layer V1.0 [#304](https://github.com/apache/hugegraph-ai/pull/304)
#### Bug Fixes
-- Limit the length of log & improve the format [#121](https://github.com/apache/incubator-hugegraph-ai/pull/121)
-- Pylint in ml [#125](https://github.com/apache/incubator-hugegraph-ai/pull/125)
-- Critical bug with pylint usage [#131](https://github.com/apache/incubator-hugegraph-ai/pull/131)
-- Multi vid k-neighbor query only return the data of first vid [#132](https://github.com/apache/incubator-hugegraph-ai/pull/132)
-- Replace getenv usage to settings [#133](https://github.com/apache/incubator-hugegraph-ai/pull/133)
-- Correct header writing errors [#140](https://github.com/apache/incubator-hugegraph-ai/pull/140)
-- Update prompt to fit prefix cache [#137](https://github.com/apache/incubator-hugegraph-ai/pull/137)
-- Extract_graph_data use wrong method [#145](https://github.com/apache/incubator-hugegraph-ai/pull/145)
-- Use empty str for llm config [#155](https://github.com/apache/incubator-hugegraph-ai/pull/155)
-- Update gremlin generate prompt to apply fuzzy match [#163](https://github.com/apache/incubator-hugegraph-ai/pull/163)
-- Enable fastapi auto reload function [#164](https://github.com/apache/incubator-hugegraph-ai/pull/164)
-- Fix tiny bugs & optimize reranker layout [#202](https://github.com/apache/incubator-hugegraph-ai/pull/202)
-- Enable tasks concurrency configs in Gradio [#188](https://github.com/apache/incubator-hugegraph-ai/pull/188)
-- Align regex extraction of json to json format of prompt [#211](https://github.com/apache/incubator-hugegraph-ai/pull/211)
-- Fix documentation sample code error [#219](https://github.com/apache/incubator-hugegraph-ai/pull/219)
-- Failed to remove vectors when updating vid embedding [#243](https://github.com/apache/incubator-hugegraph-ai/pull/243)
-- Skip empty chunk in LLM steaming mode [#245](https://github.com/apache/incubator-hugegraph-ai/pull/245)
-- Ollama batch embedding bug [#250](https://github.com/apache/incubator-hugegraph-ai/pull/250)
-- Fix Dockerfile to add pyproject.toml anchor file [#266](https://github.com/apache/incubator-hugegraph-ai/pull/266)
-- Add missing 'properties' in gremlin prompt formatting [#298](https://github.com/apache/incubator-hugegraph-ai/pull/298)
-- Fixed cgraph version [#305](https://github.com/apache/incubator-hugegraph-ai/pull/305)
-- Ollama embedding API usage and config param [#306](https://github.com/apache/incubator-hugegraph-ai/pull/306)
+- Limit the length of log & improve the format [#121](https://github.com/apache/hugegraph-ai/pull/121)
+- Pylint in ml [#125](https://github.com/apache/hugegraph-ai/pull/125)
+- Critical bug with pylint usage [#131](https://github.com/apache/hugegraph-ai/pull/131)
+- Multi vid k-neighbor query only return the data of first vid [#132](https://github.com/apache/hugegraph-ai/pull/132)
+- Replace getenv usage to settings [#133](https://github.com/apache/hugegraph-ai/pull/133)
+- Correct header writing errors [#140](https://github.com/apache/hugegraph-ai/pull/140)
+- Update prompt to fit prefix cache [#137](https://github.com/apache/hugegraph-ai/pull/137)
+- Extract_graph_data use wrong method [#145](https://github.com/apache/hugegraph-ai/pull/145)
+- Use empty str for llm config [#155](https://github.com/apache/hugegraph-ai/pull/155)
+- Update gremlin generate prompt to apply fuzzy match [#163](https://github.com/apache/hugegraph-ai/pull/163)
+- Enable fastapi auto reload function [#164](https://github.com/apache/hugegraph-ai/pull/164)
+- Fix tiny bugs & optimize reranker layout [#202](https://github.com/apache/hugegraph-ai/pull/202)
+- Enable tasks concurrency configs in Gradio [#188](https://github.com/apache/hugegraph-ai/pull/188)
+- Align regex extraction of json to json format of prompt [#211](https://github.com/apache/hugegraph-ai/pull/211)
+- Fix documentation sample code error [#219](https://github.com/apache/hugegraph-ai/pull/219)
+- Failed to remove vectors when updating vid embedding [#243](https://github.com/apache/hugegraph-ai/pull/243)
+- Skip empty chunk in LLM steaming mode [#245](https://github.com/apache/hugegraph-ai/pull/245)
+- Ollama batch embedding bug [#250](https://github.com/apache/hugegraph-ai/pull/250)
+- Fix Dockerfile to add pyproject.toml anchor file [#266](https://github.com/apache/hugegraph-ai/pull/266)
+- Add missing 'properties' in gremlin prompt formatting [#298](https://github.com/apache/hugegraph-ai/pull/298)
+- Fixed cgraph version [#305](https://github.com/apache/hugegraph-ai/pull/305)
+- Ollama embedding API usage and config param [#306](https://github.com/apache/hugegraph-ai/pull/306)
#### Option Changes
-- Remove enable_gql logic in api & rag block [#148](https://github.com/apache/incubator-hugegraph-ai/pull/148)
+- Remove enable_gql logic in api & rag block [#148](https://github.com/apache/hugegraph-ai/pull/148)
#### Other Changes
-- Update README for python-client/SDK [#150](https://github.com/apache/incubator-hugegraph-ai/pull/150)
-- Enable pip cache [#142](https://github.com/apache/incubator-hugegraph-ai/pull/142)
-- Enable discussion & change merge way [#201](https://github.com/apache/incubator-hugegraph-ai/pull/201)
-- Synchronization with official documentation [#273](https://github.com/apache/incubator-hugegraph-ai/pull/273)
-- Fix grammar errors [#275](https://github.com/apache/incubator-hugegraph-ai/pull/275)
-- Improve README clarity and deployment instructions [#276](https://github.com/apache/incubator-hugegraph-ai/pull/276)
-- Add docker-compose deployment and improve container networking instructions [#280](https://github.com/apache/incubator-hugegraph-ai/pull/280)
-- Update docker compose command [#283](https://github.com/apache/incubator-hugegraph-ai/pull/283)
-- Reduce third-party library log output [#244](https://github.com/apache/incubator-hugegraph-ai/pull/284)
-- Update README with improved setup instructions [#294](https://github.com/apache/incubator-hugegraph-ai/pull/294)
-- Add collaborators in asf config [#182](https://github.com/apache/incubator-hugegraph-ai/pull/182)
+- Update README for python-client/SDK [#150](https://github.com/apache/hugegraph-ai/pull/150)
+- Enable pip cache [#142](https://github.com/apache/hugegraph-ai/pull/142)
+- Enable discussion & change merge way [#201](https://github.com/apache/hugegraph-ai/pull/201)
+- Synchronization with official documentation [#273](https://github.com/apache/hugegraph-ai/pull/273)
+- Fix grammar errors [#275](https://github.com/apache/hugegraph-ai/pull/275)
+- Improve README clarity and deployment instructions [#276](https://github.com/apache/hugegraph-ai/pull/276)
+- Add docker-compose deployment and improve container networking instructions [#280](https://github.com/apache/hugegraph-ai/pull/280)
+- Update docker compose command [#283](https://github.com/apache/hugegraph-ai/pull/283)
+- Reduce third-party library log output [#244](https://github.com/apache/hugegraph-ai/pull/284)
+- Update README with improved setup instructions [#294](https://github.com/apache/hugegraph-ai/pull/294)
+- Add collaborators in asf config [#182](https://github.com/apache/hugegraph-ai/pull/182)
### Release Details
Please check the release details/contributor in each repository:
-- [Server Release Notes](https://github.com/apache/incubator-hugegraph/releases)
-- [Toolchain Release Notes](https://github.com/apache/incubator-hugegraph-toolchain/releases)
-- [Computer Release Notes](https://github.com/apache/incubator-hugegraph-computer/releases)
-- [AI Release Notes](https://github.com/apache/incubator-hugegraph-ai/releases)
+- [Server Release Notes](https://github.com/apache/hugegraph/releases)
+- [Toolchain Release Notes](https://github.com/apache/hugegraph-toolchain/releases)
+- [Computer Release Notes](https://github.com/apache/hugegraph-computer/releases)
+- [AI Release Notes](https://github.com/apache/hugegraph-ai/releases)
diff --git a/content/en/docs/clients/gremlin-console.md b/content/en/docs/clients/gremlin-console.md
index a1de36d40..d13a09229 100644
--- a/content/en/docs/clients/gremlin-console.md
+++ b/content/en/docs/clients/gremlin-console.md
@@ -43,7 +43,7 @@ gremlin>
> The `--` here will be parsed by getopts as the last option, allowing the subsequent options to be passed to Gremlin-Console for processing. `-i` represents `Execute the specified script and leave the console open on completion`. For more options, you can refer to the [source code](https://github.com/apache/tinkerpop/blob/3.5.1/gremlin-console/src/main/groovy/org/apache/tinkerpop/gremlin/console/Console.groovy#L483) of Gremlin-Console.
-[`example.groovy`](https://github.com/apache/incubator-hugegraph/blob/master/hugegraph-server/hugegraph-dist/src/assembly/static/scripts/example.groovy) is an example script under the `scripts` directory. This script inserts some data and queries the number of vertices and edges in the graph at the end.
+[`example.groovy`](https://github.com/apache/hugegraph/blob/master/hugegraph-server/hugegraph-dist/src/assembly/static/scripts/example.groovy) is an example script under the `scripts` directory. This script inserts some data and queries the number of vertices and edges in the graph at the end.
You can continue to enter Gremlin statements to operate on the graph:
diff --git a/content/en/docs/clients/restful-api/_index.md b/content/en/docs/clients/restful-api/_index.md
index 355b4cbc3..c5158247e 100644
--- a/content/en/docs/clients/restful-api/_index.md
+++ b/content/en/docs/clients/restful-api/_index.md
@@ -9,7 +9,7 @@ weight: 1
> - HugeGraph 1.7.0+ introduces graphspaces, and REST paths follow `/graphspaces/{graphspace}/graphs/{graph}`.
> - HugeGraph 1.5.x and earlier still rely on the legacy `/graphs/{graph}` path, and the create/clone graph APIs require `Content-Type: text/plain`; 1.7.0+ expects JSON bodies.
> - The default graphspace name is `DEFAULT`, which you can use directly if you do not need multi-tenant isolation.
-> - **Note**: Before version 1.5.0, the format of ids such as group/target was similar to -69:grant. After version 1.7.0, the id and name were consistent, such as admin [HugeGraph 1.5.x RESTful API](https://github.com/apache/incubator-hugegraph-doc/tree/release-1.5.0)
+> - **Note**: Before version 1.5.0, the format of ids such as group/target was similar to -69:grant. After version 1.7.0, the id and name were consistent, such as admin [HugeGraph 1.5.x RESTful API](https://github.com/apache/hugegraph-doc/tree/release-1.5.0)
Besides the documentation below, you can also open `swagger-ui` at `localhost:8080/swagger-ui/index.html` to explore the RESTful API. [Here is an example](/docs/quickstart/hugegraph/hugegraph-server#swaggerui-example)
diff --git a/content/en/docs/clients/restful-api/auth.md b/content/en/docs/clients/restful-api/auth.md
index e90b84089..a3af6339b 100644
--- a/content/en/docs/clients/restful-api/auth.md
+++ b/content/en/docs/clients/restful-api/auth.md
@@ -2,8 +2,13 @@
title: "Authentication API"
linkTitle: "Authentication"
weight: 16
+description: "Authentication REST API: Manage users, roles, permissions, and access control to implement fine-grained graph data security."
---
+> **Version Change Notice**:
+> - 1.7.0+: Auth API paths use GraphSpace format, such as `/graphspaces/DEFAULT/auth/users`, and group/target IDs match their names (e.g., `admin`)
+> - 1.5.x and earlier: Auth API paths include graph name, and group/target IDs use format like `-69:grant`. See [HugeGraph 1.5.x RESTful API](https://github.com/apache/hugegraph-doc/tree/release-1.5.0)
+
### 10.1 User Authentication and Access Control
> To enable authentication and related configurations, please refer to the [Authentication Configuration](/docs/config/config-authentication/) documentation.
@@ -17,7 +22,7 @@ Description: User 'boss' has read permission for people in the 'graph1' graph fr
##### Interface Description:
The user authentication and access control interface includes 5 categories: UserAPI, GroupAPI, TargetAPI, BelongAPI, AccessAPI.
-**Note** Before 1.5.0, the format of ids such as group/target was similar to -69:grant. After 1.7.0, the id and name were consistent. Such as admin [HugeGraph 1.5 x RESTful API](https://github.com/apache/incubator-hugegraph-doc/tree/release-1.5.0)
+**Note** Before 1.5.0, the format of ids such as group/target was similar to -69:grant. After 1.7.0, the id and name were consistent. Such as admin [HugeGraph 1.5 x RESTful API](https://github.com/apache/hugegraph-doc/tree/release-1.5.0)
### 10.2 User (User) API
The user interface includes APIs for creating users, deleting users, modifying users, and querying user-related information.
@@ -1044,7 +1049,7 @@ GET http://localhost:8080/graphspaces/DEFAULT/auth/accesses/S-69:all>-88>11>S-77
### 10.7 Graphspace Manager (Manager) API
-> **Note**: Before using the following APIs, you need to create a graphspace first. For example, create a graphspace named `gs1` via the [Graphspace API](../graphspace). The examples below assume that `gs1` already exists.
+> **Note**: Before using the following APIs, you need to create a graphspace first. For example, create a graphspace named `gs1` via the [Graphspace API](./graphspace). The examples below assume that `gs1` already exists.
1. The graphspace manager API is used to grant/revoke manager roles for users at the graphspace level, and to query the roles of the current user or other users in a graphspace. Supported role types include `SPACE`, `SPACE_MEMBER`, and `ADMIN`.
diff --git a/content/en/docs/clients/restful-api/cypher.md b/content/en/docs/clients/restful-api/cypher.md
index ba120e2c7..4d4a5b940 100644
--- a/content/en/docs/clients/restful-api/cypher.md
+++ b/content/en/docs/clients/restful-api/cypher.md
@@ -2,6 +2,7 @@
title: "Cypher API"
linkTitle: "Cypher"
weight: 15
+description: "Cypher REST API: Execute OpenCypher declarative graph query language via HTTP interface."
---
### 9.1 Cypher
diff --git a/content/en/docs/clients/restful-api/edge.md b/content/en/docs/clients/restful-api/edge.md
index aaff63967..0e28fbe70 100644
--- a/content/en/docs/clients/restful-api/edge.md
+++ b/content/en/docs/clients/restful-api/edge.md
@@ -2,6 +2,7 @@
title: "Edge API"
linkTitle: "Edge"
weight: 8
+description: "Edge REST API: Create, query, update, and delete relationship data between vertices with support for batch operations and directional queries."
---
### 2.2 Edge
diff --git a/content/en/docs/clients/restful-api/edgelabel.md b/content/en/docs/clients/restful-api/edgelabel.md
index 4906c687f..a452ec5e9 100644
--- a/content/en/docs/clients/restful-api/edgelabel.md
+++ b/content/en/docs/clients/restful-api/edgelabel.md
@@ -2,6 +2,7 @@
title: "EdgeLabel API"
linkTitle: "EdgeLabel"
weight: 4
+description: "EdgeLabel REST API: Define edge types and relationship constraints between source and target vertices to construct graph connection rules."
---
### 1.4 EdgeLabel
@@ -310,4 +311,4 @@ DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/edgelab
Note:
-> You can query the execution status of an asynchronous task by using `GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1` (where "1" is the task_id). For more information, refer to the [Asynchronous Task RESTful API](../task).
+> You can query the execution status of an asynchronous task by using `GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1` (where "1" is the task_id). For more information, refer to the [Asynchronous Task RESTful API](./task).
diff --git a/content/en/docs/clients/restful-api/graphs.md b/content/en/docs/clients/restful-api/graphs.md
index 913d8ae2a..269c48843 100644
--- a/content/en/docs/clients/restful-api/graphs.md
+++ b/content/en/docs/clients/restful-api/graphs.md
@@ -2,6 +2,7 @@
title: "Graphs API"
linkTitle: "Graphs"
weight: 12
+description: "Graphs REST API: Manage graph instance lifecycle including creating, querying, cloning, clearing, and deleting graph databases."
---
### 6.1 Graphs
@@ -112,25 +113,25 @@ DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/clear?confirm_
##### Method & Url
```
-POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph_clone?clone_graph_name=hugegraph
+POST http://localhost:8080/graphspaces/DEFAULT/graphs/cloneGraph?clone_graph_name=hugegraph
```
##### Request Body [Optional]
Clone a `non-auth` mode graph (set `Content-Type: application/json`)
-```json
+```javascript
{
"gremlin.graph": "org.apache.hugegraph.HugeFactory",
"backend": "rocksdb",
"serializer": "binary",
- "store": "hugegraph",
+ "store": "cloneGraph",
"rocksdb.data_path": "./rks-data-xx",
"rocksdb.wal_path": "./rks-data-xx"
}
```
-> Note:
+> Note:
> 1. The data/wal_path can't be the same as the existing graph (use separate directories)
> 2. Replace "gremlin.graph=org.apache.hugegraph.auth.HugeFactoryAuthProxy" to enable auth mode
@@ -144,8 +145,8 @@ Clone a `non-auth` mode graph (set `Content-Type: application/json`)
```javascript
{
- "name": "hugegraph_clone",
- "backend": "rocksdb"
+ "name": "cloneGraph",
+ "backend": "rocksdb"
}
```
@@ -166,11 +167,21 @@ POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph2
##### Request Body
-Create a non-auth graph (set `Content-Type: application/json`)
+Create a graph (set `Content-Type: application/json`)
+
+**`gremlin.graph` Configuration:**
+- Auth mode: `"gremlin.graph": "org.apache.hugegraph.auth.HugeFactoryAuthProxy"` (Recommended)
+- Non-auth mode: `"gremlin.graph": "org.apache.hugegraph.HugeFactory"`
+
+**Note**!!
+1. In version 1.7.0, dynamic graph creation would cause a NPE. This issue has been fixed in [PR#2912](https://github.com/apache/hugegraph/pull/2912). The current master version and versions after 1.7.0 do not have this problem.
+2. For version 1.7.0 and earlier, if the backend is hstore, you must add "task.scheduler_type": "distributed" in the request body. Also ensure HugeGraph-Server is properly configured with PD, see [HStore Configuration](/docs/quickstart/hugegraph/hugegraph-server/#511-distributed-storage-hstore).
+
+**RocksDB Example:**
-```json
+```javascript
{
- "gremlin.graph": "org.apache.hugegraph.HugeFactory",
+ "gremlin.graph": "org.apache.hugegraph.auth.HugeFactoryAuthProxy",
"backend": "rocksdb",
"serializer": "binary",
"store": "hugegraph2",
@@ -179,9 +190,20 @@ Create a non-auth graph (set `Content-Type: application/json`)
}
```
-> Note:
-> 1. The data/wal_path can't be the same as the existing graph (use separate directories)
-> 2. Replace "gremlin.graph=org.apache.hugegraph.auth.HugeFactoryAuthProxy" to enable auth mode
+**HStore Example (for version 1.7.0 and earlier):**
+
+```javascript
+{
+ "gremlin.graph": "org.apache.hugegraph.auth.HugeFactoryAuthProxy",
+ "backend": "hstore",
+ "serializer": "binary",
+ "store": "hugegraph2",
+ "task.scheduler_type": "distributed",
+ "pd.peers": "127.0.0.1:8686"
+}
+```
+
+> Note: The data/wal_path can't be the same as the existing graph (use separate directories)
##### Response Status
@@ -216,7 +238,7 @@ Since deleting a graph is a dangerous operation, we have added parameters for co
##### Method & Url
```
-DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph_clone?confirm_message=I%27m%20sure%20to%20drop%20the%20graph
+DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/graphA?confirm_message=I%27m%20sure%20to%20drop%20the%20graph
```
##### Response Status
diff --git a/content/en/docs/clients/restful-api/graphspace.md b/content/en/docs/clients/restful-api/graphspace.md
index 15eb1a91b..d38d056d4 100644
--- a/content/en/docs/clients/restful-api/graphspace.md
+++ b/content/en/docs/clients/restful-api/graphspace.md
@@ -2,6 +2,7 @@
title: "Graphspace API"
linkTitle: "Graphspace"
weight: 1
+description: "Graphspace REST API: Multi-tenancy and resource isolation for creating, viewing, updating, and deleting graph spaces with prerequisites and constraints."
---
### 2.0 Graphspace
diff --git a/content/en/docs/clients/restful-api/gremlin.md b/content/en/docs/clients/restful-api/gremlin.md
index f78ad082c..ccd72686c 100644
--- a/content/en/docs/clients/restful-api/gremlin.md
+++ b/content/en/docs/clients/restful-api/gremlin.md
@@ -2,6 +2,7 @@
title: "Gremlin API"
linkTitle: "Gremlin"
weight: 14
+description: "Gremlin REST API: Execute Gremlin graph traversal language scripts via HTTP interface."
---
### 8.1 Gremlin
@@ -224,7 +225,7 @@ Note:
Note:
-> You can query the execution status of an asynchronous task by using `GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1` (where "1" is the task_id). For more information, refer to the [Asynchronous Task RESTful API](../task).
+> You can query the execution status of an asynchronous task by using `GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1` (where "1" is the task_id). For more information, refer to the [Asynchronous Task RESTful API](./task).
**Querying edges**
@@ -255,4 +256,4 @@ Note:
Note:
-> You can query the execution status of an asynchronous task by using `GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/2` (where "2" is the task_id). For more information, refer to the [Asynchronous Task RESTful API](../task).
+> You can query the execution status of an asynchronous task by using `GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/2` (where "2" is the task_id). For more information, refer to the [Asynchronous Task RESTful API](./task).
diff --git a/content/en/docs/clients/restful-api/indexlabel.md b/content/en/docs/clients/restful-api/indexlabel.md
index 74320d37d..d56c156a6 100644
--- a/content/en/docs/clients/restful-api/indexlabel.md
+++ b/content/en/docs/clients/restful-api/indexlabel.md
@@ -2,6 +2,7 @@
title: "IndexLabel API"
linkTitle: "IndexLabel"
weight: 5
+description: "IndexLabel REST API: Create indexes on vertex and edge properties to accelerate property-based queries and filtering operations."
---
### 1.5 IndexLabel
@@ -173,4 +174,4 @@ DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/indexla
Note:
-> You can query the execution status of an asynchronous task by using `GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1` (where "1" is the task_id). For more information, refer to the [Asynchronous Task RESTful API](../task).
+> You can query the execution status of an asynchronous task by using `GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1` (where "1" is the task_id). For more information, refer to the [Asynchronous Task RESTful API](./task).
diff --git a/content/en/docs/clients/restful-api/metrics.md b/content/en/docs/clients/restful-api/metrics.md
index 16255b248..c0e74058e 100644
--- a/content/en/docs/clients/restful-api/metrics.md
+++ b/content/en/docs/clients/restful-api/metrics.md
@@ -2,7 +2,7 @@
title: "Metrics API"
linkTitle: "Metrics"
weight: 17
-
+description: "Metrics REST API: Retrieve runtime performance metrics, statistics, and health status data of the system."
---
diff --git a/content/en/docs/clients/restful-api/other.md b/content/en/docs/clients/restful-api/other.md
index ed5135388..23b27d4b5 100644
--- a/content/en/docs/clients/restful-api/other.md
+++ b/content/en/docs/clients/restful-api/other.md
@@ -2,6 +2,7 @@
title: "Other API"
linkTitle: "Other"
weight: 18
+description: "Other REST API: Provide auxiliary functions such as system version query and API version information."
---
### 11.1 Other
diff --git a/content/en/docs/clients/restful-api/propertykey.md b/content/en/docs/clients/restful-api/propertykey.md
index 90c76414c..ec7888bff 100644
--- a/content/en/docs/clients/restful-api/propertykey.md
+++ b/content/en/docs/clients/restful-api/propertykey.md
@@ -2,6 +2,7 @@
title: "PropertyKey API"
linkTitle: "PropertyKey"
weight: 2
+description: "PropertyKey REST API: Define data types and cardinality constraints for all properties in the graph, serving as fundamental schema elements."
---
### 1.2 PropertyKey
diff --git a/content/en/docs/clients/restful-api/rank.md b/content/en/docs/clients/restful-api/rank.md
index e1dd71a4c..9e335292c 100644
--- a/content/en/docs/clients/restful-api/rank.md
+++ b/content/en/docs/clients/restful-api/rank.md
@@ -2,6 +2,7 @@
title: "Rank API"
linkTitle: "Rank"
weight: 10
+description: "Rank REST API: Execute graph node ranking algorithms such as PageRank and Personalized PageRank for centrality analysis."
---
### 4.1 Rank API overview
diff --git a/content/en/docs/clients/restful-api/rebuild.md b/content/en/docs/clients/restful-api/rebuild.md
index b2dbaf6f3..ef76d6e46 100644
--- a/content/en/docs/clients/restful-api/rebuild.md
+++ b/content/en/docs/clients/restful-api/rebuild.md
@@ -2,6 +2,7 @@
title: "Rebuild API"
linkTitle: "Rebuild"
weight: 6
+description: "Rebuild REST API: Rebuild graph schema indexes to ensure consistency between index data and graph data."
---
### 1.6 Rebuild
@@ -29,7 +30,7 @@ PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/jobs/rebuild/inde
```
Note:
-> You can get the asynchronous job status by `GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/${task_id}` (the task_id here should be 1). See More [AsyncJob RESTfull API](../task)
+> You can get the asynchronous job status by `GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/${task_id}` (the task_id here should be 1). See More [AsyncJob RESTfull API](./task)
#### 1.6.2 Rebulid all Indexs of VertexLabel
@@ -55,7 +56,7 @@ PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/jobs/rebuild/vert
Note:
-> You can get the asynchronous job status by `GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/${task_id}` (the task_id here should be 2). See More [AsyncJob RESTfull API](../task)
+> You can get the asynchronous job status by `GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/${task_id}` (the task_id here should be 2). See More [AsyncJob RESTfull API](./task)
#### 1.6.3 Rebulid all Indexs of EdgeLabel
@@ -81,4 +82,4 @@ PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/jobs/rebuild/edge
Note:
-> You can get the asynchronous job status by `GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/${task_id}` (the task_id here should be 3). See More [AsyncJob RESTfull API](../task)
\ No newline at end of file
+> You can get the asynchronous job status by `GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/${task_id}` (the task_id here should be 3). See More [AsyncJob RESTfull API](./task)
\ No newline at end of file
diff --git a/content/en/docs/clients/restful-api/schema.md b/content/en/docs/clients/restful-api/schema.md
index 6364cd3e4..82a9fdc6f 100644
--- a/content/en/docs/clients/restful-api/schema.md
+++ b/content/en/docs/clients/restful-api/schema.md
@@ -2,6 +2,7 @@
title: "Schema API"
linkTitle: "Schema"
weight: 1
+description: "Schema REST API: Query the complete schema definition of a graph, including property keys, vertex labels, edge labels, and index labels."
---
### 1.1 Schema
diff --git a/content/en/docs/clients/restful-api/task.md b/content/en/docs/clients/restful-api/task.md
index 18f87d560..ef5097014 100644
--- a/content/en/docs/clients/restful-api/task.md
+++ b/content/en/docs/clients/restful-api/task.md
@@ -2,6 +2,7 @@
title: "Task API"
linkTitle: "Task"
weight: 13
+description: "Task REST API: Query and manage asynchronous task execution status for long-running operations like index rebuilding and graph traversals."
---
### 7.1 Task
diff --git a/content/en/docs/clients/restful-api/traverser.md b/content/en/docs/clients/restful-api/traverser.md
index 681166132..1f7a1e49f 100644
--- a/content/en/docs/clients/restful-api/traverser.md
+++ b/content/en/docs/clients/restful-api/traverser.md
@@ -2,6 +2,7 @@
title: "Traverser API"
linkTitle: "Traverser"
weight: 9
+description: "Traverser REST API: Execute complex graph algorithms and path queries including shortest path, k-neighbors, similarity computation, and advanced analytics."
---
### 3.1 Overview of Traverser API
diff --git a/content/en/docs/clients/restful-api/variable.md b/content/en/docs/clients/restful-api/variable.md
index 151498771..ad1141670 100644
--- a/content/en/docs/clients/restful-api/variable.md
+++ b/content/en/docs/clients/restful-api/variable.md
@@ -2,6 +2,7 @@
title: "Variable API"
linkTitle: "Variable"
weight: 11
+description: "Variable REST API: Store and manage key-value pairs as global variables for graph-level configuration and state management."
---
### 5.1 Variables
diff --git a/content/en/docs/clients/restful-api/vertex.md b/content/en/docs/clients/restful-api/vertex.md
index d016cb146..ab401f5da 100644
--- a/content/en/docs/clients/restful-api/vertex.md
+++ b/content/en/docs/clients/restful-api/vertex.md
@@ -2,6 +2,7 @@
title: "Vertex API"
linkTitle: "Vertex"
weight: 7
+description: "Vertex REST API: Create, query, update, and delete vertex data in the graph with support for batch operations and conditional filtering."
---
### 2.1 Vertex
diff --git a/content/en/docs/clients/restful-api/vertexlabel.md b/content/en/docs/clients/restful-api/vertexlabel.md
index 241497098..6e6388a7d 100644
--- a/content/en/docs/clients/restful-api/vertexlabel.md
+++ b/content/en/docs/clients/restful-api/vertexlabel.md
@@ -2,6 +2,7 @@
title: "VertexLabel API"
linkTitle: "VertexLabel"
weight: 3
+description: "VertexLabel REST API: Define vertex types, ID strategies, and associated properties that determine vertex structure and constraints."
---
### 1.3 VertexLabel
@@ -307,4 +308,4 @@ DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/vertexl
Note:
-> You can use `GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1` (where "1" is the task_id) to query the execution status of the asynchronous task. For more information, refer to the [Asynchronous Task RESTful API](../task).
+> You can use `GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1` (where "1" is the task_id) to query the execution status of the asynchronous task. For more information, refer to the [Asynchronous Task RESTful API](./task).
diff --git a/content/en/docs/config/_index.md b/content/en/docs/config/_index.md
index 04db80c57..b79b5af96 100644
--- a/content/en/docs/config/_index.md
+++ b/content/en/docs/config/_index.md
@@ -1,5 +1,12 @@
---
-title: "Config"
-linkTitle: "Config"
+title: "HugeGraph-Server Configuration"
+linkTitle: "Server Config"
weight: 4
----
\ No newline at end of file
+---
+
+This section covers HugeGraph-Server configuration, including:
+
+- **[Server Startup Guide](config-guide)** - Understand config file structure and basic setup
+- **[Server Complete Configuration Manual](config-option)** - Complete list of configuration options
+- **[Authentication Config](config-authentication)** - User authentication and authorization
+- **[HTTPS Config](config-https)** - Enable HTTPS secure protocol
\ No newline at end of file
diff --git a/content/en/docs/config/config-authentication.md b/content/en/docs/config/config-authentication.md
index 4ebde6303..e00c8a417 100644
--- a/content/en/docs/config/config-authentication.md
+++ b/content/en/docs/config/config-authentication.md
@@ -101,14 +101,14 @@ If deployed based on Docker image or if HugeGraph has already been initialized a
relevant graph data needs to be deleted and HugeGraph needs to be restarted. If there is already business data in the diagram,
it is temporarily **not possible** to directly convert the authentication mode (version<=1.2.0)
-> Improvements for this feature have been included in the latest release (available in the latest docker image), please refer to [PR 2411](https://github.com/apache/incubator-hugegraph/pull/2411). Seamless switching is now available.
+> Improvements for this feature have been included in the latest release (available in the latest docker image), please refer to [PR 2411](https://github.com/apache/hugegraph/pull/2411). Seamless switching is now available.
```bash
# stop the hugeGraph firstly
bin/stop-hugegraph.sh
# delete the store data (here we use the default path for rocksdb)
-# there is no need to delete in the latest version (fixed in https://github.com/apache/incubator-hugegraph/pull/2411)
+# there is no need to delete in the latest version (fixed in https://github.com/apache/hugegraph/pull/2411)
rm -rf rocksdb-data/
# init store again
diff --git a/content/en/docs/config/config-computer.md b/content/en/docs/config/config-computer.md
deleted file mode 100644
index 08f804c10..000000000
--- a/content/en/docs/config/config-computer.md
+++ /dev/null
@@ -1,177 +0,0 @@
----
-title: "HugeGraph-Computer Config"
-linkTitle: "Config Computer"
-weight: 5
----
-
-### Computer Config Options
-
-| config option | default value | description |
-|-----------------------------------------|-------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| algorithm.message_class | org.apache.hugegraph.computer.core.config.Null | The class of message passed when compute vertex. |
-| algorithm.params_class | org.apache.hugegraph.computer.core.config.Null | The class used to transfer algorithms' parameters before algorithm been run. |
-| algorithm.result_class | org.apache.hugegraph.computer.core.config.Null | The class of vertex's value, the instance is used to store computation result for the vertex. |
-| allocator.max_vertices_per_thread | 10000 | Maximum number of vertices per thread processed in each memory allocator |
-| bsp.etcd_endpoints | http://localhost:2379 | The end points to access etcd. |
-| bsp.log_interval | 30000 | The log interval(in ms) to print the log while waiting bsp event. |
-| bsp.max_super_step | 10 | The max super step of the algorithm. |
-| bsp.register_timeout | 300000 | The max timeout to wait for master and works to register. |
-| bsp.wait_master_timeout | 86400000 | The max timeout(in ms) to wait for master bsp event. |
-| bsp.wait_workers_timeout | 86400000 | The max timeout to wait for workers bsp event. |
-| hgkv.max_data_block_size | 65536 | The max byte size of hgkv-file data block. |
-| hgkv.max_file_size | 2147483648 | The max number of bytes in each hgkv-file. |
-| hgkv.max_merge_files | 10 | The max number of files to merge at one time. |
-| hgkv.temp_file_dir | /tmp/hgkv | This folder is used to store temporary files, temporary files will be generated during the file merging process. |
-| hugegraph.name | hugegraph | The graph name to load data and write results back. |
-| hugegraph.url | http://127.0.0.1:8080 | The hugegraph url to load data and write results back. |
-| input.edge_direction | OUT | The data of the edge in which direction is loaded, when the value is BOTH, the edges in both OUT and IN direction will be loaded. |
-| input.edge_freq | MULTIPLE | The frequency of edges can exist between a pair of vertices, allowed values: [SINGLE, SINGLE_PER_LABEL, MULTIPLE]. SINGLE means that only one edge can exist between a pair of vertices, use sourceId + targetId to identify it; SINGLE_PER_LABEL means that each edge label can exist one edge between a pair of vertices, use sourceId + edgelabel + targetId to identify it; MULTIPLE means that many edge can exist between a pair of vertices, use sourceId + edgelabel + sortValues + targetId to identify it. |
-| input.filter_class | org.apache.hugegraph.computer.core.input.filter.DefaultInputFilter | The class to create input-filter object, input-filter is used to Filter vertex edges according to user needs. |
-| input.loader_schema_path || The schema path of loader input, only takes effect when the input.source_type=loader is enabled |
-| input.loader_struct_path || The struct path of loader input, only takes effect when the input.source_type=loader is enabled |
-| input.max_edges_in_one_vertex | 200 | The maximum number of adjacent edges allowed to be attached to a vertex, the adjacent edges will be stored and transferred together as a batch unit. |
-| input.source_type | hugegraph-server | The source type to load input data, allowed values: ['hugegraph-server', 'hugegraph-loader'], the 'hugegraph-loader' means use hugegraph-loader load data from HDFS or file, if use 'hugegraph-loader' load data then please config 'input.loader_struct_path' and 'input.loader_schema_path'. |
-| input.split_fetch_timeout | 300 | The timeout in seconds to fetch input splits |
-| input.split_max_splits | 10000000 | The maximum number of input splits |
-| input.split_page_size | 500 | The page size for streamed load input split data |
-| input.split_size | 1048576 | The input split size in bytes |
-| job.id | local_0001 | The job id on Yarn cluster or K8s cluster. |
-| job.partitions_count | 1 | The partitions count for computing one graph algorithm job. |
-| job.partitions_thread_nums | 4 | The number of threads for partition parallel compute. |
-| job.workers_count | 1 | The workers count for computing one graph algorithm job. |
-| master.computation_class | org.apache.hugegraph.computer.core.master.DefaultMasterComputation | Master-computation is computation that can determine whether to continue next superstep. It runs at the end of each superstep on master. |
-| output.batch_size | 500 | The batch size of output |
-| output.batch_threads | 1 | The threads number used to batch output |
-| output.hdfs_core_site_path || The hdfs core site path. |
-| output.hdfs_delimiter | , | The delimiter of hdfs output. |
-| output.hdfs_kerberos_enable | false | Is Kerberos authentication enabled for Hdfs. |
-| output.hdfs_kerberos_keytab || The Hdfs's key tab file for kerberos authentication. |
-| output.hdfs_kerberos_principal || The Hdfs's principal for kerberos authentication. |
-| output.hdfs_krb5_conf | /etc/krb5.conf | Kerberos configuration file. |
-| output.hdfs_merge_partitions | true | Whether merge output files of multiple partitions. |
-| output.hdfs_path_prefix | /hugegraph-computer/results | The directory of hdfs output result. |
-| output.hdfs_replication | 3 | The replication number of hdfs. |
-| output.hdfs_site_path || The hdfs site path. |
-| output.hdfs_url | hdfs://127.0.0.1:9000 | The hdfs url of output. |
-| output.hdfs_user | hadoop | The hdfs user of output. |
-| output.output_class | org.apache.hugegraph.computer.core.output.LogOutput | The class to output the computation result of each vertex. Be called after iteration computation. |
-| output.result_name | value | The value is assigned dynamically by #name() of instance created by WORKER_COMPUTATION_CLASS. |
-| output.result_write_type | OLAP_COMMON | The result write-type to output to hugegraph, allowed values are: [OLAP_COMMON, OLAP_SECONDARY, OLAP_RANGE]. |
-| output.retry_interval | 10 | The retry interval when output failed |
-| output.retry_times | 3 | The retry times when output failed |
-| output.single_threads | 1 | The threads number used to single output |
-| output.thread_pool_shutdown_timeout | 60 | The timeout seconds of output threads pool shutdown |
-| output.with_adjacent_edges | false | Output the adjacent edges of the vertex or not |
-| output.with_edge_properties | false | Output the properties of the edge or not |
-| output.with_vertex_properties | false | Output the properties of the vertex or not |
-| sort.thread_nums | 4 | The number of threads performing internal sorting. |
-| transport.client_connect_timeout | 3000 | The timeout(in ms) of client connect to server. |
-| transport.client_threads | 4 | The number of transport threads for client. |
-| transport.close_timeout | 10000 | The timeout(in ms) of close server or close client. |
-| transport.finish_session_timeout | 0 | The timeout(in ms) to finish session, 0 means using (transport.sync_request_timeout * transport.max_pending_requests). |
-| transport.heartbeat_interval | 20000 | The minimum interval(in ms) between heartbeats on client side. |
-| transport.io_mode | AUTO | The network IO Mode, either 'NIO', 'EPOLL', 'AUTO', the 'AUTO' means selecting the property mode automatically. |
-| transport.max_pending_requests | 8 | The max number of client unreceived ack, it will trigger the sending unavailable if the number of unreceived ack >= max_pending_requests. |
-| transport.max_syn_backlog | 511 | The capacity of SYN queue on server side, 0 means using system default value. |
-| transport.max_timeout_heartbeat_count | 120 | The maximum times of timeout heartbeat on client side, if the number of timeouts waiting for heartbeat response continuously > max_heartbeat_timeouts the channel will be closed from client side. |
-| transport.min_ack_interval | 200 | The minimum interval(in ms) of server reply ack. |
-| transport.min_pending_requests | 6 | The minimum number of client unreceived ack, it will trigger the sending available if the number of unreceived ack < min_pending_requests. |
-| transport.network_retries | 3 | The number of retry attempts for network communication,if network unstable. |
-| transport.provider_class | org.apache.hugegraph.computer.core.network.netty.NettyTransportProvider | The transport provider, currently only supports Netty. |
-| transport.receive_buffer_size | 0 | The size of socket receive-buffer in bytes, 0 means using system default value. |
-| transport.recv_file_mode | true | Whether enable receive buffer-file mode, it will receive buffer write file from socket by zero-copy if enable. |
-| transport.send_buffer_size | 0 | The size of socket send-buffer in bytes, 0 means using system default value. |
-| transport.server_host | 127.0.0.1 | The server hostname or ip to listen on to transfer data. |
-| transport.server_idle_timeout | 360000 | The max timeout(in ms) of server idle. |
-| transport.server_port | 0 | The server port to listen on to transfer data. The system will assign a random port if it's set to 0. |
-| transport.server_threads | 4 | The number of transport threads for server. |
-| transport.sync_request_timeout | 10000 | The timeout(in ms) to wait response after sending sync-request. |
-| transport.tcp_keep_alive | true | Whether enable TCP keep-alive. |
-| transport.transport_epoll_lt | false | Whether enable EPOLL level-trigger. |
-| transport.write_buffer_high_mark | 67108864 | The high water mark for write buffer in bytes, it will trigger the sending unavailable if the number of queued bytes > write_buffer_high_mark. |
-| transport.write_buffer_low_mark | 33554432 | The low water mark for write buffer in bytes, it will trigger the sending available if the number of queued bytes < write_buffer_low_mark.org.apache.hugegraph.config.OptionChecker$$Lambda$97/0x00000008001c8440@776a6d9b |
-| transport.write_socket_timeout | 3000 | The timeout(in ms) to write data to socket buffer. |
-| valuefile.max_segment_size | 1073741824 | The max number of bytes in each segment of value-file. |
-| worker.combiner_class | org.apache.hugegraph.computer.core.config.Null | Combiner can combine messages into one value for a vertex, for example page-rank algorithm can combine messages of a vertex to a sum value. |
-| worker.computation_class | org.apache.hugegraph.computer.core.config.Null | The class to create worker-computation object, worker-computation is used to compute each vertex in each superstep. |
-| worker.data_dirs | [jobs] | The directories separated by ',' that received vertices and messages can persist into. |
-| worker.edge_properties_combiner_class | org.apache.hugegraph.computer.core.combiner.OverwritePropertiesCombiner | The combiner can combine several properties of the same edge into one properties at inputstep. |
-| worker.partitioner | org.apache.hugegraph.computer.core.graph.partition.HashPartitioner | The partitioner that decides which partition a vertex should be in, and which worker a partition should be in. |
-| worker.received_buffers_bytes_limit | 104857600 | The limit bytes of buffers of received data, the total size of all buffers can't excess this limit. If received buffers reach this limit, they will be merged into a file. |
-| worker.vertex_properties_combiner_class | org.apache.hugegraph.computer.core.combiner.OverwritePropertiesCombiner | The combiner can combine several properties of the same vertex into one properties at inputstep. |
-| worker.wait_finish_messages_timeout | 86400000 | The max timeout(in ms) message-handler wait for finish-message of all workers. |
-| worker.wait_sort_timeout | 600000 | The max timeout(in ms) message-handler wait for sort-thread to sort one batch of buffers. |
-| worker.write_buffer_capacity | 52428800 | The initial size of write buffer that used to store vertex or message. |
-| worker.write_buffer_threshold | 52428800 | The threshold of write buffer, exceeding it will trigger sorting, the write buffer is used to store vertex or message. |
-
-### K8s Operator Config Options
-
-> NOTE: Option needs to be converted through environment variable settings, e.g. k8s.internal_etcd_url => INTERNAL_ETCD_URL
-
-| config option | default value | description |
-|------------------------------|---------------------------|---------------------------------------------------------------------------------------------------------------------------------|
-| k8s.auto_destroy_pod | true | Whether to automatically destroy all pods when the job is completed or failed. |
-| k8s.close_reconciler_timeout | 120 | The max timeout(in ms) to close reconciler. |
-| k8s.internal_etcd_url | http://127.0.0.1:2379 | The internal etcd url for operator system. |
-| k8s.max_reconcile_retry | 3 | The max retry times of reconcile. |
-| k8s.probe_backlog | 50 | The maximum backlog for serving health probes. |
-| k8s.probe_port | 9892 | The value is the port that the controller bind to for serving health probes. |
-| k8s.ready_check_internal | 1000 | The time interval(ms) of check ready. |
-| k8s.ready_timeout | 30000 | The max timeout(in ms) of check ready. |
-| k8s.reconciler_count | 10 | The max number of reconciler thread. |
-| k8s.resync_period | 600000 | The minimum frequency at which watched resources are reconciled. |
-| k8s.timezone | Asia/Shanghai | The timezone of computer job and operator. |
-| k8s.watch_namespace | hugegraph-computer-system | The value is watch custom resources in the namespace, ignore other namespaces, the '*' means is all namespaces will be watched. |
-
-### HugeGraph-Computer CRD
-
-> CRD: https://github.com/apache/hugegraph-computer/blob/master/computer-k8s-operator/manifest/hugegraph-computer-crd.v1.yaml
-
-| spec | default value | description | required |
-|-----------------|-------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|
-| algorithmName | | The name of algorithm. | true |
-| jobId | | The job id. | true |
-| image | | The image of algorithm. | true |
-| computerConf | | The map of computer config options. | true |
-| workerInstances | | The number of worker instances, it will instead the 'job.workers_count' option. | true |
-| pullPolicy | Always | The pull-policy of image, detail please refer to: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy | false |
-| pullSecrets | | The pull-secrets of Image, detail please refer to: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod | false |
-| masterCpu | | The cpu limit of master, the unit can be 'm' or without unit detail please refer to:[https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu) | false |
-| workerCpu | | The cpu limit of worker, the unit can be 'm' or without unit detail please refer to:[https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu) | false |
-| masterMemory | | The memory limit of master, the unit can be one of Ei、Pi、Ti、Gi、Mi、Ki detail please refer to:[https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory) | false |
-| workerMemory | | The memory limit of worker, the unit can be one of Ei、Pi、Ti、Gi、Mi、Ki detail please refer to:[https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory) | false |
-| log4jXml | | The content of log4j.xml for computer job. | false |
-| jarFile | | The jar path of computer algorithm. | false |
-| remoteJarUri | | The remote jar uri of computer algorithm, it will overlay algorithm image. | false |
-| jvmOptions | | The java startup parameters of computer job. | false |
-| envVars | | please refer to: https://kubernetes.io/docs/tasks/inject-data-application/define-interdependent-environment-variables/ | false |
-| envFrom | | please refer to: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/ | false |
-| masterCommand | bin/start-computer.sh | The run command of master, equivalent to 'Entrypoint' field of Docker. | false |
-| masterArgs | ["-r master", "-d k8s"] | The run args of master, equivalent to 'Cmd' field of Docker. | false |
-| workerCommand | bin/start-computer.sh | The run command of worker, equivalent to 'Entrypoint' field of Docker. | false |
-| workerArgs | ["-r worker", "-d k8s"] | The run args of worker, equivalent to 'Cmd' field of Docker. | false |
-| volumes | | Please refer to: https://kubernetes.io/docs/concepts/storage/volumes/ | false |
-| volumeMounts | | Please refer to: https://kubernetes.io/docs/concepts/storage/volumes/ | false |
-| secretPaths | | The map of k8s-secret name and mount path. | false |
-| configMapPaths | | The map of k8s-configmap name and mount path. | false |
-| podTemplateSpec | | Please refer to: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-template-v1/#PodTemplateSpec | false |
-| securityContext | | Please refer to: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ | false |
-
-### KubeDriver Config Options
-
-| config option | default value | description |
-|----------------------------------|------------------------------------------|-----------------------------------------------------------|
-| k8s.build_image_bash_path || The path of command used to build image. |
-| k8s.enable_internal_algorithm | true | Whether enable internal algorithm. |
-| k8s.framework_image_url | hugegraph/hugegraph-computer:latest | The image url of computer framework. |
-| k8s.image_repository_password || The password for login image repository. |
-| k8s.image_repository_registry || The address for login image repository. |
-| k8s.image_repository_url | hugegraph/hugegraph-computer | The url of image repository. |
-| k8s.image_repository_username || The username for login image repository. |
-| k8s.internal_algorithm | [pageRank] | The name list of all internal algorithm. |
-| k8s.internal_algorithm_image_url | hugegraph/hugegraph-computer:latest | The image url of internal algorithm. |
-| k8s.jar_file_dir | /cache/jars/ | The directory where the algorithm jar to upload location. |
-| k8s.kube_config | ~/.kube/config | The path of k8s config file. |
-| k8s.log4j_xml_path || The log4j.xml path for computer job. |
-| k8s.namespace | hugegraph-computer-system | The namespace of hugegraph-computer system. |
-| k8s.pull_secret_names | [] | The names of pull-secret for pulling image. |
diff --git a/content/en/docs/config/config-guide.md b/content/en/docs/config/config-guide.md
index 979c49332..8d5c3699b 100644
--- a/content/en/docs/config/config-guide.md
+++ b/content/en/docs/config/config-guide.md
@@ -1,6 +1,6 @@
---
-title: "HugeGraph configuration"
-linkTitle: "Config Guide"
+title: "Server Startup Guide"
+linkTitle: "Server Startup Guide"
weight: 1
---
@@ -137,7 +137,7 @@ ssl: {
There are many configuration options mentioned above, but for now, let's focus on the following options: `channelizer` and `graphs`.
- `graphs`: This option specifies the graphs that need to be opened when the GremlinServer starts. It is a map structure where the key is the name of the graph and the value is the configuration file path for that graph.
-- `channelizer`: The GremlinServer supports two communication modes with clients: WebSocket and HTTP (default). If WebSocket is chosen, users can quickly experience the features of HugeGraph using [Gremlin-Console](/clients/gremlin-console.html), but it does not support importing large-scale data. It is recommended to use HTTP for communication, as all peripheral components of HugeGraph are implemented based on HTTP.
+- `channelizer`: The GremlinServer supports two communication modes with clients: WebSocket and HTTP (default). If WebSocket is chosen, users can quickly experience the features of HugeGraph using [Gremlin-Console](../clients/gremlin-console), but it does not support importing large-scale data. It is recommended to use HTTP for communication, as all peripheral components of HugeGraph are implemented based on HTTP.
By default, the GremlinServer serves at `localhost:8182`. If you need to modify it, configure the `host` and `port` settings.
@@ -171,8 +171,7 @@ arthas.ip=127.0.0.1
arthas.disabled_commands=jad
# authentication configs
-# choose 'org.apache.hugegraph.auth.StandardAuthenticator' or
-# 'org.apache.hugegraph.auth.ConfigAuthenticator'
+# choose 'org.apache.hugegraph.auth.StandardAuthenticator' or a custom implementation
#auth.authenticator=
# for StandardAuthenticator mode
@@ -180,10 +179,6 @@ arthas.disabled_commands=jad
# auth client config
#auth.remote_url=127.0.0.1:8899,127.0.0.1:8898,127.0.0.1:8897
-# for ConfigAuthenticator mode
-#auth.admin_token=
-#auth.user_tokens=[]
-
# TODO: Deprecated & removed later (useless from version 1.5.0)
# rpc server configs for multi graph-servers or raft-servers
#rpc.server_host=127.0.0.1
diff --git a/content/en/docs/config/config-option.md b/content/en/docs/config/config-option.md
index c018ef293..e6a074e28 100644
--- a/content/en/docs/config/config-option.md
+++ b/content/en/docs/config/config-option.md
@@ -1,6 +1,6 @@
---
-title: "HugeGraph Config Options"
-linkTitle: "Config Options"
+title: "Server Complete Configuration Manual"
+linkTitle: "Server Complete Configuration Manual"
weight: 2
---
@@ -37,23 +37,31 @@ Corresponding configuration file `rest-server.properties`
| gremlinserver.url | http://127.0.0.1:8182 | The url of gremlin server. |
| gremlinserver.max_route | 8 | The max route number for gremlin server. |
| gremlinserver.timeout | 30 | The timeout in seconds of waiting for gremlin server. |
-| batch.max_edges_per_batch | 500 | The maximum number of edges submitted per batch. |
-| batch.max_vertices_per_batch | 500 | The maximum number of vertices submitted per batch. |
-| batch.max_write_ratio | 50 | The maximum thread ratio for batch writing, only take effect if the batch.max_write_threads is 0. |
+| batch.max_edges_per_batch | 2500 | The maximum number of edges submitted per batch. |
+| batch.max_vertices_per_batch | 2500 | The maximum number of vertices submitted per batch. |
+| batch.max_write_ratio | 70 | The maximum thread ratio for batch writing, only take effect if the batch.max_write_threads is 0. |
| batch.max_write_threads | 0 | The maximum threads for batch writing, if the value is 0, the actual value will be set to batch.max_write_ratio * restserver.max_worker_threads. |
-| auth.authenticator | | The class path of authenticator implementation. e.g., org.apache.hugegraph.auth.StandardAuthenticator, or org.apache.hugegraph.auth.ConfigAuthenticator. |
-| auth.admin_token | 162f7848-0b6d-4faf-b557-3a0797869c55 | Token for administrator operations, only for org.apache.hugegraph.auth.ConfigAuthenticator. |
+| auth.authenticator | | The class path of authenticator implementation. e.g., org.apache.hugegraph.auth.StandardAuthenticator, or a custom implementation. |
| auth.graph_store | hugegraph | The name of graph used to store authentication information, like users, only for org.apache.hugegraph.auth.StandardAuthenticator. |
-| auth.user_tokens | [hugegraph:9fd95c9c-711b-415b-b85f-d4df46ba5c31] | The map of user tokens with name and password, only for org.apache.hugegraph.auth.ConfigAuthenticator. |
| auth.audit_log_rate | 1000.0 | The max rate of audit log output per user, default value is 1000 records per second. |
| auth.cache_capacity | 10240 | The max cache capacity of each auth cache item. |
| auth.cache_expire | 600 | The expiration time in seconds of vertex cache. |
| auth.remote_url | | If the address is empty, it provide auth service, otherwise it is auth client and also provide auth service through rpc forwarding. The remote url can be set to multiple addresses, which are concat by ','. |
| auth.token_expire | 86400 | The expiration time in seconds after token created |
| auth.token_secret | FXQXbJtbCLxODc6tGci732pkH1cyf8Qg | Secret key of HS256 algorithm. |
-| exception.allow_trace | false | Whether to allow exception trace stack. |
-| memory_monitor.threshold | 0.85 | The threshold of JVM(in-heap) memory usage monitoring , 1 means disabling this function. |
+| exception.allow_trace | true | Whether to allow exception trace stack. |
+| memory_monitor.threshold | 0.85 | The threshold of JVM(in-heap) memory usage monitoring , 1 means disabling this function. |
| memory_monitor.period | 2000 | The period in ms of JVM(in-heap) memory usage monitoring. |
+| log.slow_query_threshold | 1000 | Slow query log threshold in milliseconds, 0 means disabled. |
+
+### PD/Meta Config Options (Distributed Mode)
+
+Corresponding configuration file `rest-server.properties`
+
+| config option | default value | description |
+|------------------|------------------------|--------------------------------------------|
+| pd.peers | 127.0.0.1:8686 | PD server addresses (comma separated). |
+| meta.endpoints | http://127.0.0.1:2379 | Meta service endpoints. |
### Basic Config Options
@@ -62,7 +70,7 @@ Basic Config Options and Backend Config Options correspond to configuration file
| config option | default value | description |
|---------------------------------------|----------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| gremlin.graph | org.apache.hugegraph.HugeFactory | Gremlin entrance to create graph. |
-| backend | rocksdb | The data store type, available values are [memory, rocksdb, cassandra, scylladb, hbase, mysql]. |
+| backend | rocksdb | The data store type. For version 1.7.0+: [memory, rocksdb, hstore, hbase]. Note: cassandra, scylladb, mysql, postgresql were removed in 1.7.0 (use <= 1.5.x for legacy backends). |
| serializer | binary | The serializer for backend store, available values are [text, binary, cassandra, hbase, mysql]. |
| store | hugegraph | The database name like Cassandra Keyspace. |
| store.connection_detect_interval | 600 | The interval in seconds for detecting connections, if the idle time of a connection exceeds this value, detect it and reconnect if needed before using, value 0 means detecting every time. |
@@ -131,51 +139,6 @@ Basic Config Options and Backend Config Options correspond to configuration file
| raft.rpc_buf_high_water_mark | 20971520 | The ChannelOutboundBuffer's high water mark of netty, only when buffer size exceed this size, the method ChannelOutboundBuffer.isWritable() will return false, it means that the downstream pressure is too great to process the request or network is very congestion, upstream needs to limit rate at this time. |
| raft.read_strategy | ReadOnlyLeaseBased | The linearizability of read strategy. |
-### RPC server Config Options
-
-| config option | default value | description |
-|-----------------------------|-----------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| rpc.client_connect_timeout | 20 | The timeout(in seconds) of rpc client connect to rpc server. |
-| rpc.client_load_balancer | consistentHash | The rpc client uses a load-balancing algorithm to access multiple rpc servers in one cluster. Default value is 'consistentHash', means forwarding by request parameters. |
-| rpc.client_read_timeout | 40 | The timeout(in seconds) of rpc client read from rpc server. |
-| rpc.client_reconnect_period | 10 | The period(in seconds) of rpc client reconnect to rpc server. |
-| rpc.client_retries | 3 | Failed retry number of rpc client calls to rpc server. |
-| rpc.config_order | 999 | Sofa rpc configuration file loading order, the larger the more later loading. |
-| rpc.logger_impl | com.alipay.sofa.rpc.log.SLF4JLoggerImpl | Sofa rpc log implementation class. |
-| rpc.protocol | bolt | Rpc communication protocol, client and server need to be specified the same value. |
-| rpc.remote_url | | The remote urls of rpc peers, it can be set to multiple addresses, which are concat by ',', empty value means not enabled. |
-| rpc.server_adaptive_port | false | Whether the bound port is adaptive, if it's enabled, when the port is in use, automatically +1 to detect the next available port. Note that this process is not atomic, so there may still be port conflicts. |
-| rpc.server_host | | The hosts/ips bound by rpc server to provide services, empty value means not enabled. |
-| rpc.server_port | 8090 | The port bound by rpc server to provide services. |
-| rpc.server_timeout | 30 | The timeout(in seconds) of rpc server execution. |
-
-### Cassandra Backend Config Options
-
-| config option | default value | description |
-|--------------------------------|----------------|------------------------------------------------------------------------------------------------------------------------------------------------|
-| backend | | Must be set to `cassandra`. |
-| serializer | | Must be set to `cassandra`. |
-| cassandra.host | localhost | The seeds hostname or ip address of cassandra cluster. |
-| cassandra.port | 9042 | The seeds port address of cassandra cluster. |
-| cassandra.connect_timeout | 5 | The cassandra driver connect server timeout(seconds). |
-| cassandra.read_timeout | 20 | The cassandra driver read from server timeout(seconds). |
-| cassandra.keyspace.strategy | SimpleStrategy | The replication strategy of keyspace, valid value is SimpleStrategy or NetworkTopologyStrategy. |
-| cassandra.keyspace.replication | [3] | The keyspace replication factor of SimpleStrategy, like '[3]'.Or replicas in each datacenter of NetworkTopologyStrategy, like '[dc1:2,dc2:1]'. |
-| cassandra.username | | The username to use to login to cassandra cluster. |
-| cassandra.password | | The password corresponding to cassandra.username. |
-| cassandra.compression_type | none | The compression algorithm of cassandra transport: none/snappy/lz4. |
-| cassandra.jmx_port=7199 | 7199 | The port of JMX API service for cassandra. |
-| cassandra.aggregation_timeout | 43200 | The timeout in seconds of waiting for aggregation. |
-
-### ScyllaDB Backend Config Options
-
-| config option | default value | description |
-|---------------|---------------|----------------------------|
-| backend | | Must be set to `scylladb`. |
-| serializer | | Must be set to `scylladb`. |
-
-Other options are consistent with the Cassandra backend.
-
### RocksDB Backend Config Options
| config option | default value | description |
@@ -232,7 +195,34 @@ Other options are consistent with the Cassandra backend.
| rocksdb.level0_stop_writes_trigger | 36 | Hard limit on number of level-0 files for stopping writes. |
| rocksdb.soft_pending_compaction_bytes_limit | 68719476736 | The soft limit to impose on pending compaction in bytes. |
-### HBase Backend Config Options
+
+K8s Config Options (Optional)
+
+Corresponding configuration file `rest-server.properties`
+
+| config option | default value | description |
+|------------------|-------------------------------|------------------------------------------|
+| server.use_k8s | false | Whether to enable K8s multi-tenancy mode. |
+| k8s.namespace | hugegraph-computer-system | K8s namespace for compute jobs. |
+| k8s.kubeconfig | | Path to kubeconfig file. |
+
+
+
+
+Arthas Diagnostic Config Options (Optional)
+
+Corresponding configuration file `rest-server.properties`
+
+| config option | default value | description |
+|--------------------|---------------|-----------------------|
+| arthas.telnetPort | 8562 | Arthas telnet port. |
+| arthas.httpPort | 8561 | Arthas HTTP port. |
+| arthas.ip | 0.0.0.0 | Arthas bind IP. |
+
+
+
+
+HBase Backend Config Options
| config option | default value | description |
|---------------------------|--------------------------------|--------------------------------------------------------------------------|
@@ -253,7 +243,50 @@ Other options are consistent with the Cassandra backend.
| hbase.vertex_partitions | 10 | The number of partitions of the HBase vertex table. |
| hbase.edge_partitions | 30 | The number of partitions of the HBase edge table. |
-### MySQL & PostgreSQL Backend Config Options
+
+
+
+---
+
+## ≤ 1.5 Version Config (Legacy)
+
+The following backend stores are no longer supported in version 1.7.0+ and are only available in version 1.5.x and earlier:
+
+
+Cassandra Backend Config Options
+
+| config option | default value | description |
+|--------------------------------|----------------|------------------------------------------------------------------------------------------------------------------------------------------------|
+| backend | | Must be set to `cassandra`. |
+| serializer | | Must be set to `cassandra`. |
+| cassandra.host | localhost | The seeds hostname or ip address of cassandra cluster. |
+| cassandra.port | 9042 | The seeds port address of cassandra cluster. |
+| cassandra.connect_timeout | 5 | The cassandra driver connect server timeout(seconds). |
+| cassandra.read_timeout | 20 | The cassandra driver read from server timeout(seconds). |
+| cassandra.keyspace.strategy | SimpleStrategy | The replication strategy of keyspace, valid value is SimpleStrategy or NetworkTopologyStrategy. |
+| cassandra.keyspace.replication | [3] | The keyspace replication factor of SimpleStrategy, like '[3]'.Or replicas in each datacenter of NetworkTopologyStrategy, like '[dc1:2,dc2:1]'. |
+| cassandra.username | | The username to use to login to cassandra cluster. |
+| cassandra.password | | The password corresponding to cassandra.username. |
+| cassandra.compression_type | none | The compression algorithm of cassandra transport: none/snappy/lz4. |
+| cassandra.jmx_port=7199 | 7199 | The port of JMX API service for cassandra. |
+| cassandra.aggregation_timeout | 43200 | The timeout in seconds of waiting for aggregation. |
+
+
+
+
+ScyllaDB Backend Config Options
+
+| config option | default value | description |
+|---------------|---------------|----------------------------|
+| backend | | Must be set to `scylladb`. |
+| serializer | | Must be set to `scylladb`. |
+
+Other options are consistent with the Cassandra backend.
+
+
+
+
+MySQL & PostgreSQL Backend Config Options
| config option | default value | description |
|----------------------------------|-----------------------------|-------------------------------------------------------------------------------------|
@@ -269,7 +302,10 @@ Other options are consistent with the Cassandra backend.
| jdbc.storage_engine | InnoDB | The storage engine of backend store database, like InnoDB/MyISAM/RocksDB for MySQL. |
| jdbc.postgresql.connect_database | template1 | The database used to connect when init store, drop store or check store exist. |
-### PostgreSQL Backend Config Options
+
+
+
+PostgreSQL Backend Config Options
| config option | default value | description |
|---------------|---------------|------------------------------|
@@ -281,3 +317,6 @@ Other options are consistent with the MySQL backend.
> The driver and url of the PostgreSQL backend should be set to:
> - `jdbc.driver=org.postgresql.Driver`
> - `jdbc.url=jdbc:postgresql://localhost:5432/`
+
+
+
diff --git a/content/en/docs/contribution-guidelines/committer-guidelines.md b/content/en/docs/contribution-guidelines/committer-guidelines.md
index abb653528..945064e17 100644
--- a/content/en/docs/contribution-guidelines/committer-guidelines.md
+++ b/content/en/docs/contribution-guidelines/committer-guidelines.md
@@ -9,7 +9,7 @@ weight: 5
# Candidate Requirements
1. Candidates must adhere to the [Apache Code of Conduct](https://www.apache.org/foundation/policies/conduct.html).
-2. PMC members will assess candidates' interactions with others and contributions through [mailing lists](https://lists.apache.org/list?dev@hugegraph.apache.org), [issues](https://github.com/apache/hugegraph/issues), [pull requests](https://github.com/apache/incubator-hugegraph/pulls), and [official documentation](https://hugegraph.apache.org/docs).
+2. PMC members will assess candidates' interactions with others and contributions through [mailing lists](https://lists.apache.org/list?dev@hugegraph.apache.org), [issues](https://github.com/apache/hugegraph/issues), [pull requests](https://github.com/apache/hugegraph/pulls), and [official documentation](https://hugegraph.apache.org/docs).
3. Considerations for evaluating candidates as potential Committers include:
1. Ability to collaborate with community members
2. Mentorship capabilities
@@ -23,7 +23,7 @@ weight: 5
## Initiate Community Discussion (DISCUSS)
-Any (P)PMC member of HugeGraph can initiate a voting discussion. After identifying valuable contributions from a community contributor and obtaining the candidate's consent, a discussion can be initiated via private@hugegraph.apache.org.
+Any PMC member of HugeGraph can initiate a voting discussion. After identifying valuable contributions from a community contributor and obtaining the candidate's consent, a discussion can be initiated via private@hugegraph.apache.org.
The initiator of the discussion should clearly state the candidate's contributions in the discussion email and provide URLs or other information for confirming the contributions, facilitating discussion and analysis.
Below is a template for HugeGraph emails: (For reference only)
@@ -72,32 +72,32 @@ Welcome everyone to share opinions~
Thanks!
```
-For contribution links in discussion emails, you can use the statistical feature of [GitHub Search](https://github.com/search) by entering corresponding keywords as needed. You can also adjust parameters and add new repositories such as `repo:apache/incubator-hugegraph-computer`. Pay special attention to adjusting the **time range** (below is a template reference, please adjust the parameters accordingly):
+For contribution links in discussion emails, you can use the statistical feature of [GitHub Search](https://github.com/search) by entering corresponding keywords as needed. You can also adjust parameters and add new repositories such as `repo:apache/hugegraph-computer`. Pay special attention to adjusting the **time range** (below is a template reference, please adjust the parameters accordingly):
- Number of PR submissions
- - `is:pr author:xxx repo:apache/incubator-hugegraph repo:apache/incubator-hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
+ - `is:pr author:xxx repo:apache/hugegraph repo:apache/hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
- Lines of code submissions/changes
- - https://github.com/apache/incubator-hugegraph/graphs/contributors?from=2023-06-01&to=2023-12-25&type=c
- - https://github.com/apache/incubator-hugegraph-doc/graphs/contributors?from=2023-06-01&to=2023-12-25&type=c
+ - https://github.com/apache/hugegraph/graphs/contributors?from=2023-06-01&to=2023-12-25&type=c
+ - https://github.com/apache/hugegraph-doc/graphs/contributors?from=2023-06-01&to=2023-12-25&type=c
- Number of PR submissions associated with issues
- - `linked:issue involves:xxx repo:apache/incubator-hugegraph repo:apache/incubator-hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
+ - `linked:issue involves:xxx repo:apache/hugegraph repo:apache/hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
- Number of PR reviews
- - `type:pr reviewed-by:xxx repo:apache/incubator-hugegraph repo:apache/incubator-hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
+ - `type:pr reviewed-by:xxx repo:apache/hugegraph repo:apache/hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
- Number of merge commits
- - `type:pr author:xxx repo:apache/incubator-hugegraph repo:apache/incubator-hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
+ - `type:pr author:xxx repo:apache/hugegraph repo:apache/hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
- Effective lines merged
- - https://github.com/apache/incubator-hugegraph/graphs/contributors?from=2023-06-01&to=2023-12-25&type=c
- - https://github.com/apache/incubator-hugegraph-doc/graphs/contributors?from=2023-06-01&to=2023-12-25&type=c
+ - https://github.com/apache/hugegraph/graphs/contributors?from=2023-06-01&to=2023-12-25&type=c
+ - https://github.com/apache/hugegraph-doc/graphs/contributors?from=2023-06-01&to=2023-12-25&type=c
- Number of issue submissions
- - `type:issue author:xxx repo:apache/incubator-hugegraph repo:apache/incubator-hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
+ - `type:issue author:xxx repo:apache/hugegraph repo:apache/hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
- Number of issue fixes
- Based on the number of issue submissions, select those with a closed status.
- Number of issue participations
- - `type:issue involves:xxx repo:apache/incubator-hugegraph repo:apache/incubator-hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
+ - `type:issue involves:xxx repo:apache/hugegraph repo:apache/hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
- Number of issue comments
- - `type:issue commenter:xxx repo:apache/incubator-hugegraph repo:apache/incubator-hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
+ - `type:issue commenter:xxx repo:apache/hugegraph repo:apache/hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
- Number of PR comments
- - `type:pr commenter:xxx repo:apache/incubator-hugegraph repo:apache/incubator-hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
+ - `type:pr commenter:xxx repo:apache/hugegraph repo:apache/hugegraph-doc created:>2023-06-01 updated:<2023-12-25`
For participation in mailing lists, you can use https://lists.apache.org/list?dev@hugegraph.apache.org:lte=10M:xxx.
@@ -131,7 +131,7 @@ Please vote accordingly:
Thanks!
```
-Then, (P)PMC members reply to the email with +1 or -1 to express their opinions. Generally, at least 3 votes of +1 are needed to conclude the vote.
+Then, PMC members reply to the email with +1 or -1 to express their opinions. Generally, at least 3 votes of +1 are needed to conclude the vote.
## Announcement of Voting Results (RESULT)
@@ -167,7 +167,7 @@ Subject: Invitation to become HugeGraph committer: xxx
Hello xxx,
-The HugeGraph Project Management Committee (PPMC)
+The HugeGraph Project Management Committee (PMC)
hereby offers you committer privileges to the project.
These privileges are offered on the understanding that you'll use them
reasonably and with common sense. We like to work on trust
@@ -212,7 +212,7 @@ establishing you as a committer.
With the expectation of your acceptance, welcome!
-The Apache HugeGraph(incubating) PPMC
+The Apache HugeGraph PMC
```
## Candidate Accepts Invitation (ACCEPT)
@@ -224,13 +224,13 @@ To: [ Sender's Email ]
Cc: private@hugegraph.apache.org
Subject: Re: Invitation to become HugeGraph committer: xxx
-Hello Apache HugeGraph(incubating) PPMC,
+Hello Apache HugeGraph PMC,
I accept the invitation.
Thanks to the Apache HugeGraph Community for recognizing my work, I
will continue to actively participate in the work of the Apache
-HugeGraph(incubating).
+HugeGraph.
Next, I will follow the instructions to complete the next steps:
Signing and submitting iCLA and registering Apache ID.
@@ -256,7 +256,7 @@ Once the invitation is accepted, the candidate needs to complete the following t
5. **Country:** Country of residence in English
6. **E-mail**: Email address, preferably the same as the one used in the invitation email
7. **(optional) preferred Apache id(s)**: Choose an SVN ID that is not listed on the [Apache committer](http://people.apache.org/committer-index.html) page
- 8. **(optional) notify project**: Apache HugeGraph(incubating)
+ 8. **(optional) notify project**: Apache HugeGraph
9. **Signature: Must be handwritten using a PDF tool**
10. **Date:** Format as xxxx-xx-xx
3. After signing, rename `icla.pdf` to `name-pinyin-icla.pdf`
@@ -268,7 +268,7 @@ Subject: ICLA Information
Hello everyone:
-I have accepted the Apache HugeGraph(incubating) PPMC invitation to
+I have accepted the Apache HugeGraph PMC invitation to
become a HugeGraph committer, the attachment is my ICLA information.
(Optional) My GitHub account is https://github.com/xxx. Thanks!
@@ -300,7 +300,7 @@ After the record is completed, the candidate will receive an email from root@apa
2. Configure personal information at https://whimsy.apache.org/roster/committer/xxx.
3. Associate GitHub account at https://gitbox.apache.org/boxer.
- This step requires configuring GitHub Two-Factor Authentication (2FA).
-4. **The nominating PMC member must add the new Committer to the official list of committers via the [Roster](https://whimsy.apache.org/roster/ppmc/hugegraph) page.** (**Important**, otherwise repository permissions will not take effect).
+4. **The nominating PMC member must add the new Committer to the official list of committers via the [Roster](https://whimsy.apache.org/roster/pmc/hugegraph) page.** (**Important**, otherwise repository permissions will not take effect).
- After this step, the candidate becomes a new Committer and gains write access to the GitHub HugeGraph repository.
5. (Optional) The new Committer can apply for free use of JetBrains' full range of products with their Apache account [here](https://www.jetbrains.com/shop/eform/apache).
@@ -313,7 +313,7 @@ After the candidate completes the above steps, they will officially become a Com
To: dev@hugegraph.apache.org
Subject: [ANNOUNCE] New Committer: xxx
-Hi everyone, The PPMC for Apache HugeGraph(incubating) has invited xxx to
+Hi everyone, The PMC for Apache HugeGraph has invited xxx to
become a Committer and we are pleased to announce that he/she has accepted.
xxx is being active in the HugeGraph community & dedicated to ... modules,
@@ -325,32 +325,19 @@ Welcome xxx, and please enjoy your community journey~
Thanks!
-The Apache HugeGraph PPMC
+The Apache HugeGraph PMC
```
-## Update clutch status information
+## Update Governance Information
-PMC members responsible for nominations need to download clutch status information and update it. Once effective, it can be viewed on the [clutch](https://incubator.apache.org/clutch/hugegraph.html) and [projects](https://incubator.apache.org/projects/hugegraph.html) pages. The process is as follows:
+Since Apache HugeGraph graduated in January 2026, governance information is maintained in ASF committee/project data rather than Incubator clutch pages.
-```text
-# 1. Download clutch status information
-svn co https://svn.apache.org/repos/asf/incubator/public/trunk/content/projects/
-
-# 2. Modify and edit (note the following is for reference only)
-cd projects
-vim hugegraph.xml
-
-
- News
-
- - YYYY-MM-DD New Committer: xxx
- - ...
-
-
-
-# 3. Commit
-svn commit -m "update news for hugegraph"
-```
+Please check:
+
+- [ASF Committee data for HugeGraph](https://projects.apache.org/committee.html?hugegraph)
+- [PMC roster page](https://whimsy.apache.org/roster/pmc/hugegraph)
+
+If an update is required but does not appear automatically, coordinate with Apache Community Development or ASF Infra according to the official process.
# References
diff --git a/content/en/docs/contribution-guidelines/contribute.md b/content/en/docs/contribution-guidelines/contribute.md
index e8741b1f4..8f3e96973 100644
--- a/content/en/docs/contribution-guidelines/contribute.md
+++ b/content/en/docs/contribution-guidelines/contribute.md
@@ -20,7 +20,7 @@ Before submitting the code, we need to do some preparation:
1. Sign up or login to GitHub: [https://github.com](https://github.com)
-2. Fork HugeGraph repo from GitHub: [https://github.com/apache/incubator-hugegraph/fork](https://github.com/apache/hugegraph/fork)
+2. Fork HugeGraph repo from GitHub: [https://github.com/apache/hugegraph/fork](https://github.com/apache/hugegraph/fork)
3. Clone code from fork repo to local: [https://github.com/${GITHUB_USER_NAME}/hugegraph](https://github.com/${GITHUB_USER_NAME}/hugegraph)
@@ -44,7 +44,7 @@ Before submitting the code, we need to do some preparation:
## 2. Create an Issue on GitHub
-If you encounter bugs or have any questions, please go to [GitHub Issues](https://github.com/apache/incubator-hugegraph/issues) to report them and feel free to [create an issue](https://github.com/apache/hugegraph/issues/new).
+If you encounter bugs or have any questions, please go to [GitHub Issues](https://github.com/apache/hugegraph/issues) to report them and feel free to [create an issue](https://github.com/apache/hugegraph/issues/new).
## 3. Make changes of code locally
@@ -72,14 +72,14 @@ vim hugegraph-core/src/main/java/org/apache/hugegraph/HugeFactory.java
# run test locally (optional)
mvn test -Pcore-test,memory
```
-Note: In order to be consistent with the code style easily, if you use [IDEA](https://www.jetbrains.com/idea/) as your IDE, you can directly [import](https://www.jetbrains.com/help/idea/configuring-code-style.html) our code style [configuration file](./hugegraph-style.xml).
+Note: In order to be consistent with the code style easily, if you use IDEA as your IDE, you can import our code style configuration file.
##### 3.2.1 Check licenses
If we want to add new third-party dependencies to the `HugeGraph` project, we need to do the following things:
-1. Find the third-party dependent repository, put the dependent `license` file into [./hugegraph-dist/release-docs/licenses/](https://github.com/apache/incubator-hugegraph/tree/master/hugegraph-server/hugegraph-dist/release-docs/licenses) path.
-2. Declare the dependency in [./hugegraph-dist/release-docs/LICENSE](https://github.com/apache/incubator-hugegraph/blob/master/hugegraph-server/hugegraph-dist/release-docs/LICENSE) `LICENSE` information.
-3. Find the NOTICE file in the repository and append it to [./hugegraph-dist/release-docs/NOTICE](https://github.com/apache/incubator-hugegraph/blob/master/hugegraph-server/hugegraph-dist/release-docs/NOTICE) file (skip this step if there is no NOTICE file).
-4. Execute locally [./hugegraph-dist/scripts/dependency/regenerate_known_dependencies.sh](https://github.com/apache/incubator-hugegraph/blob/master/hugegraph-server/hugegraph-dist/scripts/dependency/regenerate_known_dependencies.sh) to update the dependency list [known-dependencies.txt](https://github.com/apache/incubator-hugegraph/blob/master/hugegraph-server/hugegraph-dist/scripts/dependency/known-dependencies.txt) (or manually update) .
+1. Find the third-party dependent repository, put the dependent `license` file into [./hugegraph-dist/release-docs/licenses/](https://github.com/apache/hugegraph/tree/master/hugegraph-server/hugegraph-dist/release-docs/licenses) path.
+2. Declare the dependency in [./hugegraph-dist/release-docs/LICENSE](https://github.com/apache/hugegraph/blob/master/hugegraph-server/hugegraph-dist/release-docs/LICENSE) `LICENSE` information.
+3. Find the NOTICE file in the repository and append it to [./hugegraph-dist/release-docs/NOTICE](https://github.com/apache/hugegraph/blob/master/hugegraph-server/hugegraph-dist/release-docs/NOTICE) file (skip this step if there is no NOTICE file).
+4. Execute locally [./hugegraph-dist/scripts/dependency/regenerate_known_dependencies.sh](https://github.com/apache/hugegraph/blob/master/hugegraph-server/hugegraph-dist/scripts/dependency/regenerate_known_dependencies.sh) to update the dependency list [known-dependencies.txt](https://github.com/apache/hugegraph/blob/master/hugegraph-server/hugegraph-dist/scripts/dependency/known-dependencies.txt) (or manually update) .
**Example**: A new third-party dependency is introduced into the project -> `ant-1.9.1.jar`
- The project source code is located at: https://github.com/apache/ant/tree/rel/1.9.1
diff --git a/content/en/docs/contribution-guidelines/hugegraph-server-idea-setup.md b/content/en/docs/contribution-guidelines/hugegraph-server-idea-setup.md
index 698af8519..72c5a9cc2 100644
--- a/content/en/docs/contribution-guidelines/hugegraph-server-idea-setup.md
+++ b/content/en/docs/contribution-guidelines/hugegraph-server-idea-setup.md
@@ -4,7 +4,7 @@ linkTitle: "Setup Server in IDEA"
weight: 4
---
-> NOTE: The following configuration is for reference purposes only, and has been tested on Linux and macOS platforms based on [this version](https://github.com/apache/incubator-hugegraph/commit/a946ad1de4e8f922251a5241ffc957c33379677f).
+> NOTE: The following configuration is for reference purposes only, and has been tested on Linux and macOS platforms based on [this version](https://github.com/apache/hugegraph/commit/a946ad1de4e8f922251a5241ffc957c33379677f).
### Background
@@ -17,7 +17,7 @@ The core steps for local startup are the same as starting with **scripts**:
Before proceeding with the following process, make sure that you have cloned the source code of HugeGraph
and have configured the development environment, such as `Java 11` & you could config your local environment
-with this [config-doc](https://github.com/apache/incubator-hugegraph/wiki/The-style-config-for-HugeGraph-in-IDEA)
+with this [config-doc](https://github.com/apache/hugegraph/wiki/The-style-config-for-HugeGraph-in-IDEA)
```bash
git clone https://github.com/apache/hugegraph.git
@@ -57,7 +57,7 @@ Next, open the `Run/Debug Configurations` panel in IntelliJ IDEA and create a ne
- LD_LIBRARY_PATH=/path/to/your/library:$LD_LIBRARY_PATH
- LD_PRELOAD=libjemalloc.so:librocksdbjni-linux64.so
-> If **user authentication** (authenticator) is configured for HugeGraph-Server in the **Java 11** environment, you need to refer to the script [configuration](https://github.com/apache/incubator-hugegraph/blob/master/hugegraph-server/hugegraph-dist/src/assembly/static/bin/init-store.sh#L52) in the binary package and add the following **VM options**:
+> If **user authentication** (authenticator) is configured for HugeGraph-Server in the **Java 11** environment, you need to refer to the script [configuration](https://github.com/apache/hugegraph/blob/master/hugegraph-server/hugegraph-dist/src/assembly/static/bin/init-store.sh#L52) in the binary package and add the following **VM options**:
>
> ```bash
> --add-exports=java.base/jdk.internal.reflect=ALL-UNNAMED
@@ -93,7 +93,7 @@ Similarly, open the `Run/Debug Configurations` panel in IntelliJ IDEA and create
- Set the `Main class` to `org.apache.hugegraph.dist.HugeGraphServer`.
- Set the program arguments to `conf/gremlin-server.yaml conf/rest-server.properties`. Similarly, note that the path here is relative to the working directory, so make sure to set the working directory to `path-to-your-directory`.
-> Similarly, if **user authentication** (authenticator) is configured for HugeGraph-Server in the **Java 11** environment, you need to refer to the script [configuration](https://github.com/apache/incubator-hugegraph/blob/master/hugegraph-server/hugegraph-dist/src/assembly/static/bin/hugegraph-server.sh#L124) in the binary package and add the following **VM options**:
+> Similarly, if **user authentication** (authenticator) is configured for HugeGraph-Server in the **Java 11** environment, you need to refer to the script [configuration](https://github.com/apache/hugegraph/blob/master/hugegraph-server/hugegraph-dist/src/assembly/static/bin/hugegraph-server.sh#L124) in the binary package and add the following **VM options**:
>
> ```bash
> --add-exports=java.base/jdk.internal.reflect=ALL-UNNAMED --add-modules=jdk.unsupported --add-exports=java.base/sun.nio.ch=ALL-UNNAMED
@@ -165,8 +165,8 @@ This is because Log4j2 uses asynchronous loggers. You can refer to the [official
### References
-1. [HugeGraph-Server Quick Start](/docs/quickstart/hugegraph/hugegraph-server/)
+1. [HugeGraph-Server Quick Start](/docs/quickstart/hugegraph/hugegraph-server): Instructions for setting up HugeGraph-Server.
2. [Local Debugging Guide for HugeGraph Server (Win/Unix)](https://gist.github.com/imbajin/1661450f000cd62a67e46d4f1abfe82c)
3. ["package sun.misc does not exist" compilation error](https://youtrack.jetbrains.com/issue/IDEA-180033)
4. [Cannot compile: java: package sun.misc does not exist](https://youtrack.jetbrains.com/issue/IDEA-201168)
-5. [The code-style config for HugeGraph in IDEA](https://github.com/apache/incubator-hugegraph/wiki/The-style-config-for-HugeGraph-in-IDEA)
+5. [The code-style config for HugeGraph in IDEA](https://github.com/apache/hugegraph/wiki/The-style-config-for-HugeGraph-in-IDEA)
diff --git a/content/en/docs/contribution-guidelines/validate-release.md b/content/en/docs/contribution-guidelines/validate-release.md
index 83613d13c..ff6d03034 100644
--- a/content/en/docs/contribution-guidelines/validate-release.md
+++ b/content/en/docs/contribution-guidelines/validate-release.md
@@ -6,11 +6,15 @@ weight: 3
> Note: this doc will be updated continuously.
> You need to use Java11 in runtime verification, we will drop Java8 support from version 1.5.0 (And currently doesn't support Java17)
+>
+> Graduation note: Apache HugeGraph graduated in January 2026. Official release voting is now completed within the HugeGraph community (PMC binding votes on `dev@hugegraph.apache.org`), and no longer requires Incubator `general@incubator.apache.org` approval.
## Verification
When the internal temporary release and packaging work is completed, other community developers (
-especially PMC) need to participate in the [verification link](https://cwiki.apache.org/confluence/display/INCUBATOR/Incubator+Release+Checklist)
+especially PMC) need to participate in verification based on ASF release policy and checklist references:
+- [ASF release policy](https://www.apache.org/legal/release-policy.html)
+- [Incubator checklist (historical reference)](https://cwiki.apache.org/confluence/display/INCUBATOR/Incubator+Release+Checklist)
To ensure the "correctness + completeness" of someone's published version, here requires **everyone
** to participate as much as possible, and then explain which items you have **checked** in the
subsequent **email reply**.(The following are the core items)
@@ -46,10 +50,10 @@ brew install wget
# 4. Download the hugegraph-svn directory
# For version number, pay attention to fill in the verification version
-svn co https://dist.apache.org/repos/dist/dev/incubator/hugegraph/1.x.x/
+svn co https://dist.apache.org/repos/dist/dev/hugegraph/1.x.x/
# (Note) If svn downloads a file very slowly,
# you can consider wget to download a single file, as follows (or consider using a proxy)
-wget https://dist.apache.org/repos/dist/dev/incubator/hugegraph/1.x.x/apache-hugegraph-toolchain-incubating-1.x.x.tar.gz
+wget https://dist.apache.org/repos/dist/dev/hugegraph/1.x.x/apache-hugegraph-toolchain-incubating-1.x.x.tar.gz
```
#### 2. check hash value
@@ -74,7 +78,7 @@ Related commands:
```bash
# 1. Download project trusted public key to local (required for the first time) & import
-curl https://downloads.apache.org/incubator/hugegraph/KEYS > KEYS
+curl https://downloads.apache.org/hugegraph/KEYS > KEYS
gpg --import KEYS
# After importing, you can see the following output, which means that x user public keys have been imported
@@ -123,8 +127,8 @@ the official [Wiki](https://cwiki.apache.org/confluence/display/INCUBATOR/Incuba
After decompressing `*hugegraph*src.tar.gz`, Do the following checks:
-1. folders with `incubating`, and no **empty** files/folders
-2. `LICENSE` + `NOTICE` + `DISCLAIM` file exists and the content is normal
+1. package/folder naming should match the release line (historical releases may still contain `incubating`), and no **empty** files/folders
+2. `LICENSE` + `NOTICE` exist and the content is normal; `DISCLAIMER` is required for historical incubating artifacts
3. **does not exist** binaries (without LICENSE)
4. The source code files all contain the standard `ASF License` header (this could be done with
the `Maven-MAT` plugin)
@@ -144,8 +148,8 @@ mvn clean package -P stage -DskipTests -Dcheckstyle.skip=true
After decompressing `xxx-hugegraph.tar.gz`, perform the following checks:
-1. folders with `incubating`
-2. `LICENSE` and `NOTICE` file exists and the content is normal
+1. package/folder naming should match the release line (historical releases may still contain `incubating`)
+2. `LICENSE` and `NOTICE` file exists and the content is normal (`DISCLAIMER` applies to historical incubating artifacts)
3. start server
```bash
diff --git a/content/en/docs/download/download.md b/content/en/docs/download/download.md
index d5205c570..a23907c5c 100644
--- a/content/en/docs/download/download.md
+++ b/content/en/docs/download/download.md
@@ -1,5 +1,5 @@
---
-title: "Download Apache HugeGraph (Incubating)"
+title: "Download Apache HugeGraph"
linkTitle: "Download"
weight: 2
---
@@ -8,108 +8,100 @@ weight: 2
> Instructions:
>
> - It is recommended to use the latest version of the HugeGraph software package. Please select Java11 for the runtime environment.
-> - To verify downloads, use the corresponding hash (SHA512), signature, and [Project Signature Verification KEYS](https://downloads.apache.org/incubator/hugegraph/KEYS).
-> - Instructions for checking hash (SHA512) and signatures are on the [Validate Release](/docs/contribution-guidelines/validate-release/) page, and you can also refer to [ASF official instructions](https://www.apache.org/dyn/closer.cgi#verify).
-
-> Note: The version numbers of all components of HugeGraph have been kept consistent, and the version numbers of Maven repositories such as `client/loader/hubble/common` are the same. You can refer to these for dependency references [maven example](https://github.com/apache/incubator-hugegraph-toolchain#maven-dependencies).
+> - To verify downloads, use the corresponding hash (SHA512), signature, and [Project Signature Verification KEYS](https://downloads.apache.org/hugegraph/KEYS).
+> - Instructions for checking hash (SHA512) and signatures are on the [Validate Release](/docs/contribution-guidelines/validate-release) page, and you can also refer to [ASF official instructions](https://www.apache.org/dyn/closer.cgi#verify).
+> - Note: The version numbers of all components of HugeGraph have been kept consistent, and the version numbers of Maven repositories such as `client/loader/hubble/common` are the same. You can refer to these for dependency references [maven example](https://github.com/apache/hugegraph-toolchain#maven-dependencies).
+> - Compatibility note: after HugeGraph graduated in January 2026, download paths moved from `/incubator/hugegraph` to `/hugegraph`. Historical release file names may still include `-incubating-`.
### Latest Version 1.7.0
-> Note: Starting from version `1.5.0`, a Java11 runtime environment is required.
-
- Release Date: 2025-11-28
-- [Release Notes](/docs/changelog/hugegraph-1.7.0-release-notes/)
+- [Release Notes](/docs/changelog/hugegraph-1.7.0-release-notes)
#### Binary Packages
| Server | Toolchain |
|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0.tar.gz.sha512)] |
+| [[Binary](https://www.apache.org/dyn/closer.lua/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0.tar.gz.sha512)] |
#### Source Packages
-Please refer to [build from source](/docs/quickstart/hugegraph-server/).
+Please refer to [build from source](/docs/quickstart/hugegraph/hugegraph-server).
| Server | Toolchain | AI | Computer |
|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.7.0/apache-hugegraph-ai-incubating-1.7.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-ai-incubating-1.7.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-ai-incubating-1.7.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.7.0/apache-hugegraph-computer-incubating-1.7.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-computer-incubating-1.7.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-computer-incubating-1.7.0-src.tar.gz.sha512)] |
+| [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.7.0/apache-hugegraph-ai-incubating-1.7.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-ai-incubating-1.7.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-ai-incubating-1.7.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.7.0/apache-hugegraph-computer-incubating-1.7.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-computer-incubating-1.7.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-computer-incubating-1.7.0-src.tar.gz.sha512)] |
---
### Archived Versions
-> Note: `1.3.0` is the last major version compatible with Java8, please switch to or migrate to Java11 as soon as possible (lower versions of Java have potentially more SEC risks and performance impacts).
+> Note: `1.3.0` is the last major version compatible with Java8, please switch to or migrate to Java11 as soon as possible (lower versions of Java have potentially more SEC risks and performance impacts). Starting from version `1.5.0`, a Java11 runtime environment is required.
#### 1.5.0
- Release Date: 2024-12-10
-- [Release Notes](/docs/changelog/hugegraph-1.5.0-release-notes/)
+- [Release Notes](/docs/changelog/hugegraph-1.5.0-release-notes)
##### Binary Packages
| Server | Toolchain |
|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz.sha512)] |
+| [[Binary](https://www.apache.org/dyn/closer.lua/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz.sha512)] |
##### Source Packages
| Server | Toolchain | AI | Computer |
-|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-ai-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-ai-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-ai-incubating-1.5.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-computer-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-computer-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-computer-incubating-1.5.0-src.tar.gz.sha512)] |
+|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.5.0/apache-hugegraph-ai-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.5.0/apache-hugegraph-ai-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.5.0/apache-hugegraph-ai-incubating-1.5.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.5.0/apache-hugegraph-computer-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.5.0/apache-hugegraph-computer-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.5.0/apache-hugegraph-computer-incubating-1.5.0-src.tar.gz.sha512)] |
#### 1.3.0
- Release Date: 2024-04-01
-- [Release Notes](/docs/changelog/hugegraph-1.3.0-release-notes/)
+- [Release Notes](/docs/changelog/hugegraph-1.3.0-release-notes)
##### Binary Packages
| Server | Toolchain |
|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.3.0/apache-hugegraph-incubating-1.3.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.3.0/apache-hugegraph-incubating-1.3.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.3.0/apache-hugegraph-incubating-1.3.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.3.0/apache-hugegraph-toolchain-incubating-1.3.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.3.0/apache-hugegraph-toolchain-incubating-1.3.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.3.0/apache-hugegraph-toolchain-incubating-1.3.0.tar.gz.sha512)] |
+| [[Binary](https://www.apache.org/dyn/closer.lua/hugegraph/1.3.0/apache-hugegraph-incubating-1.3.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.3.0/apache-hugegraph-incubating-1.3.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.3.0/apache-hugegraph-incubating-1.3.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/hugegraph/1.3.0/apache-hugegraph-toolchain-incubating-1.3.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.3.0/apache-hugegraph-toolchain-incubating-1.3.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.3.0/apache-hugegraph-toolchain-incubating-1.3.0.tar.gz.sha512)] |
##### Source Packages
| Server | Toolchain | AI | Common |
|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.3.0/apache-hugegraph-incubating-1.3.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.3.0/apache-hugegraph-incubating-1.3.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.3.0/apache-hugegraph-incubating-1.3.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.3.0/apache-hugegraph-toolchain-incubating-1.3.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.3.0/apache-hugegraph-toolchain-incubating-1.3.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.3.0/apache-hugegraph-toolchain-incubating-1.3.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.3.0/apache-hugegraph-ai-incubating-1.3.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.3.0/apache-hugegraph-ai-incubating-1.3.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.3.0/apache-hugegraph-ai-incubating-1.3.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.3.0/apache-hugegraph-commons-incubating-1.3.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.3.0/apache-hugegraph-commons-incubating-1.3.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.3.0/apache-hugegraph-commons-incubating-1.3.0-src.tar.gz.sha512)] |
+| [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.3.0/apache-hugegraph-incubating-1.3.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.3.0/apache-hugegraph-incubating-1.3.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.3.0/apache-hugegraph-incubating-1.3.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.3.0/apache-hugegraph-toolchain-incubating-1.3.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.3.0/apache-hugegraph-toolchain-incubating-1.3.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.3.0/apache-hugegraph-toolchain-incubating-1.3.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.3.0/apache-hugegraph-ai-incubating-1.3.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.3.0/apache-hugegraph-ai-incubating-1.3.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.3.0/apache-hugegraph-ai-incubating-1.3.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.3.0/apache-hugegraph-commons-incubating-1.3.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.3.0/apache-hugegraph-commons-incubating-1.3.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.3.0/apache-hugegraph-commons-incubating-1.3.0-src.tar.gz.sha512)] |
#### 1.2.0
- Release Date: 2023-12-28
-- [Release Notes](/docs/changelog/hugegraph-1.2.0-release-notes/)
+- [Release Notes](/docs/changelog/hugegraph-1.2.0-release-notes)
##### Binary Packages
| Server | Toolchain |
|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.2.0/apache-hugegraph-incubating-1.2.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.2.0/apache-hugegraph-incubating-1.2.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.2.0/apache-hugegraph-incubating-1.2.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.2.0/apache-hugegraph-toolchain-incubating-1.2.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.2.0/apache-hugegraph-toolchain-incubating-1.2.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.2.0/apache-hugegraph-toolchain-incubating-1.2.0.tar.gz.sha512)] |
+| [[Binary](https://www.apache.org/dyn/closer.lua/hugegraph/1.2.0/apache-hugegraph-incubating-1.2.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.2.0/apache-hugegraph-incubating-1.2.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.2.0/apache-hugegraph-incubating-1.2.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/hugegraph/1.2.0/apache-hugegraph-toolchain-incubating-1.2.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.2.0/apache-hugegraph-toolchain-incubating-1.2.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.2.0/apache-hugegraph-toolchain-incubating-1.2.0.tar.gz.sha512)] |
##### Source Packages
| Server | Toolchain | Computer | Common |
|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.2.0/apache-hugegraph-incubating-1.2.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.2.0/apache-hugegraph-incubating-1.2.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.2.0/apache-hugegraph-incubating-1.2.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.2.0/apache-hugegraph-toolchain-incubating-1.2.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.2.0/apache-hugegraph-toolchain-incubating-1.2.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.2.0/apache-hugegraph-toolchain-incubating-1.2.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.2.0/apache-hugegraph-computer-incubating-1.2.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.2.0/apache-hugegraph-computer-incubating-1.2.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.2.0/apache-hugegraph-computer-incubating-1.2.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.2.0/apache-hugegraph-commons-incubating-1.2.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.2.0/apache-hugegraph-commons-incubating-1.2.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.2.0/apache-hugegraph-commons-incubating-1.2.0-src.tar.gz.sha512)] |
+| [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.2.0/apache-hugegraph-incubating-1.2.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.2.0/apache-hugegraph-incubating-1.2.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.2.0/apache-hugegraph-incubating-1.2.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.2.0/apache-hugegraph-toolchain-incubating-1.2.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.2.0/apache-hugegraph-toolchain-incubating-1.2.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.2.0/apache-hugegraph-toolchain-incubating-1.2.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.2.0/apache-hugegraph-computer-incubating-1.2.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.2.0/apache-hugegraph-computer-incubating-1.2.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.2.0/apache-hugegraph-computer-incubating-1.2.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.2.0/apache-hugegraph-commons-incubating-1.2.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.2.0/apache-hugegraph-commons-incubating-1.2.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.2.0/apache-hugegraph-commons-incubating-1.2.0-src.tar.gz.sha512)] |
#### 1.0.0
- Release Date: 2023-02-22
-- [Release Notes](/docs/changelog/hugegraph-1.0.0-release-notes/)
+- [Release Notes](/docs/changelog/hugegraph-1.0.0-release-notes)
##### Binary Packages
| Server | Toolchain | Computer |
|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0.tar.gz.sha512)] |
+| [[Binary](https://www.apache.org/dyn/closer.lua/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0.tar.gz.sha512)] |
##### Source Packages
| Server | Toolchain | Computer | Common |
|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-commons-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-commons-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-commons-incubating-1.0.0-src.tar.gz.sha512)] |
-
----
-
- Old Versions (Non-ASF Versions)
-Due to ASF rules, non-ASF distribution packages cannot be hosted directly on this page. For download instructions for old versions before 1.0.0 (non-ASF versions), please jump to https://github.com/apache/incubator-hugegraph-doc/wiki/Apache-HugeGraph-(Incubating)-Old-Versions-Download
-
+| [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/hugegraph/1.0.0/apache-hugegraph-commons-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-commons-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-commons-incubating-1.0.0-src.tar.gz.sha512)] |
diff --git a/content/en/docs/guides/architectural.md b/content/en/docs/guides/architectural.md
index 6890b23f7..2b03f920d 100644
--- a/content/en/docs/guides/architectural.md
+++ b/content/en/docs/guides/architectural.md
@@ -22,7 +22,7 @@ HugeGraph consists of three layers of functionality: the application layer, the
- [Tools](/docs/quickstart/toolchain/hugegraph-tools): Command-line tools for deploying, managing, and backing up/restoring data in HugeGraph.
- [Computer](/docs/quickstart/computing/hugegraph-computer): A distributed graph processing system (OLAP), which is an implementation of [Pregel](https://kowshik.github.io/JPregel/pregel_paper.pdf) and can run on Kubernetes.
- [Client](/docs/quickstart/client/hugegraph-client): A HugeGraph client written in Java. Users can use the Client to write Java code to operate HugeGraph. Python, Go, C++ and other language support will be provided in the future as needed.
-- [Graph Engine Layer](/docs/quickstart/hugegraph/hugegraph-server/):
+- [Graph Engine Layer](/docs/quickstart/hugegraph/hugegraph-server):
- REST Server: Provides a RESTful API for querying graph/schema information, supports the [Gremlin](https://tinkerpop.apache.org/gremlin.html) and [Cypher](https://en.wikipedia.org/wiki/Cypher) query languages, and offers APIs for service monitoring and operations.
- Graph Engine: Supports both OLTP and OLAP graph computation types, with OLTP implementing the [Apache TinkerPop3](https://tinkerpop.apache.org) framework.
- Backend Interface: Implements the storage of graph data to the backend.
diff --git a/content/en/docs/guides/faq.md b/content/en/docs/guides/faq.md
index 5dd41caf8..2586164e5 100644
--- a/content/en/docs/guides/faq.md
+++ b/content/en/docs/guides/faq.md
@@ -4,9 +4,13 @@ linkTitle: "FAQ"
weight: 6
---
-- How to choose the back-end storage? Choose RocksDB, Cassandra, ScyllaDB, Hbase or Mysql?
+- How to choose the back-end storage? RocksDB or distributed storage?
- The choice of backend storage depends on specific needs. For installations on a single machine (node) with data volumes under 10 billion records, RocksDB is generally recommended. However, if a distributed backend is needed for scaling across multiple nodes, other options should be considered. ScyllaDB, designed as a drop-in replacement for Cassandra, offers protocol compatibility and better hardware utilization, often requiring less infrastructure. HBase, on the other hand, requires a Hadoop ecosystem to function effectively. Finally, while MySQL supports horizontal scaling, managing it in a distributed setup can be challenging.
+ HugeGraph supports multiple deployment modes. Choose based on your data scale and scenario:
+ - **Standalone Mode**: Server + RocksDB, suitable for development/testing and small to medium-scale data (< 4TB)
+ - **Distributed Mode**: HugeGraph-PD + HugeGraph-Store (HStore), supports horizontal scaling and high availability (< 1000TB data scale), suitable for production environments and large-scale graph data applications
+
+ Note: Cassandra, HBase, MySQL and other backends are only available in HugeGraph <= 1.5 versions and are no longer maintained by the official team
- Prompt when starting the service: `xxx (core dumped) xxx`
diff --git a/content/en/docs/guides/security.md b/content/en/docs/guides/security.md
index 5fde71379..4ce211020 100644
--- a/content/en/docs/guides/security.md
+++ b/content/en/docs/guides/security.md
@@ -29,6 +29,8 @@ The general process for handling security vulnerabilities is as follows:
- [CVE-2024-27348](https://www.cve.org/CVERecord?id=CVE-2024-27348): HugeGraph-Server - Command execution in gremlin
- [CVE-2024-27349](https://www.cve.org/CVERecord?id=CVE-2024-27349): HugeGraph-Server - Bypass whitelist in Auth mode
+- [CVE-2024-43441](https://www.cve.org/CVERecord?id=CVE-2024-43441): HugeGraph-Server - Fixed JWT Token (Secret)
+- [CVE-2025-26866](https://www.cve.org/CVERecord?id=CVE-2025-26866): HugeGraph-Server - RAFT and deserialization vulnerability
### HugeGraph-Toolchain project (Hubble/Loader/Client/Tools/..)
diff --git a/content/en/docs/guides/toolchain-local-test.md b/content/en/docs/guides/toolchain-local-test.md
index df4661825..95f6ebe04 100644
--- a/content/en/docs/guides/toolchain-local-test.md
+++ b/content/en/docs/guides/toolchain-local-test.md
@@ -353,7 +353,8 @@ python -m pip install -r hubble-dist/assembly/travis/requirements.txt
```bash
mvn package -Dmaven.test.skip=true
# Optional: Start and verify
-cd apache-hugegraph-hubble-incubating-*/bin
+# Compatible with both historical (-incubating-) and TLP package naming
+cd apache-hugegraph-hubble*/bin
./start-hubble.sh -d && sleep 10
curl http://localhost:8088/api/health
./stop-hubble.sh
diff --git a/content/en/docs/introduction/README.md b/content/en/docs/introduction/_index.md
similarity index 67%
rename from content/en/docs/introduction/README.md
rename to content/en/docs/introduction/_index.md
index 541fde789..51a172364 100644
--- a/content/en/docs/introduction/README.md
+++ b/content/en/docs/introduction/_index.md
@@ -2,13 +2,17 @@
title: "Introduction with HugeGraph"
linkTitle: "Introduction"
weight: 1
+aliases:
+ - /docs/introduction/readme/
+ - /docs/introduction/README/
---
### Summary
Apache HugeGraph is an easy-to-use, efficient, general-purpose open-source graph database system
-(Graph Database, [GitHub project address](https://github.com/hugegraph/hugegraph)), implementing the [Apache TinkerPop3](https://tinkerpop.apache.org) framework and fully compatible with the [Gremlin](https://tinkerpop.apache.org/gremlin.html) query language,
-With complete toolchain components, it helps users easily build applications and products based on graph databases. HugeGraph supports fast import of more than 10 billion vertices and edges, and provides millisecond-level relational query capability (OLTP).
+(Graph Database, [GitHub project address](https://github.com/apache/hugegraph)), implementing the [Apache TinkerPop3](https://tinkerpop.apache.org) framework and fully compatible with the [Gremlin](https://tinkerpop.apache.org/gremlin.html) query language,
+while also supporting the [Cypher](https://opencypher.org/) query language (OpenCypher standard).
+With complete toolchain components, it helps users easily build applications and products based on graph databases. HugeGraph supports fast import of more than 10 billion vertices and edges, and provides millisecond-level relational query capability (OLTP).
It also supports large-scale distributed graph computing (OLAP).
Typical application scenarios of HugeGraph include deep relationship exploration, association analysis, path search, feature extraction, data clustering, community detection, knowledge graph, etc., and are applicable to business fields such as network security, telecommunication fraud, financial risk control, advertising recommendation, social network, and intelligence Robots, etc.
@@ -16,17 +20,43 @@ Typical application scenarios of HugeGraph include deep relationship exploration
### Features
HugeGraph supports graph operations in online and offline environments, batch importing of data and efficient complex relationship analysis. It can seamlessly be integrated with big data platforms.
-HugeGraph supports multi-user parallel operations. Users can enter Gremlin query statements and get graph query results in time. They can also call the HugeGraph API in user programs for graph analysis or queries.
+HugeGraph supports multi-user parallel operations. Users can enter Gremlin/Cypher query statements and get graph query results in time. They can also call the HugeGraph API in user programs for graph analysis or queries.
-This system has the following features:
+This system has the following features:
-- Ease of use: HugeGraph supports the Gremlin graph query language and a RESTful API, providing common interfaces for graph retrieval, and peripheral tools with complete functions to easily implement various graph-based query and analysis operations.
+- Ease of use: HugeGraph supports the Gremlin/Cypher graph query languages and a RESTful API, providing common interfaces for graph retrieval, and peripheral tools with complete functions to easily implement various graph-based query and analysis operations.
- Efficiency: HugeGraph has been deeply optimized in graph storage and graph computing, and provides a variety of batch import tools, which can easily complete the rapid import of tens of billions of data, and achieve millisecond-level response for graph retrieval through optimized queries. Supports simultaneous online real-time operations of thousands of users.
- Universal: HugeGraph supports the Apache Gremlin standard graph query language and the Property Graph standard graph modeling method, and supports graph-based OLTP and OLAP schemes. Integrate Apache Hadoop and Apache Spark big data platforms.
- Scalable: supports distributed storage, multiple copies of data, and horizontal expansion, built-in multiple back-end storage engines, and can easily expand the back-end storage engine through plug-ins.
- Open: HugeGraph code is open source (Apache 2 License), customers can modify and customize independently, and selectively give back to the open-source community.
-The functions of this system include but are not limited to:
+### Deployment Modes
+
+HugeGraph supports multiple deployment modes to meet different scales and scenarios:
+
+**Standalone Mode**
+- Server + RocksDB backend storage
+- Suitable for development, testing, and small-to-medium scale data (< 4TB)
+- Docker quick start: `docker run hugegraph/hugegraph`
+- See [Server Quickstart](/docs/quickstart/hugegraph/hugegraph-server)
+
+**Distributed Mode**
+- HugeGraph-PD: Metadata management and cluster scheduling
+- HugeGraph-Store (HStore): Distributed storage engine
+- Supports horizontal scaling and high availability (< 1000TB data scale)
+- Suitable for production environments and large-scale graph data applications
+
+### Quick Start Guide
+
+| Use Case | Recommended Path |
+|---------|---------|
+| Quick experience | [Docker deployment](/docs/quickstart/hugegraph/hugegraph-server#docker) |
+| Build OLTP applications | Server → REST API / Gremlin / Cypher |
+| Graph analysis (OLAP) | [Vermeer](/docs/quickstart/computing/hugegraph-computer) (recommended) or Computer |
+| Build AI applications | [HugeGraph-AI](/docs/quickstart/hugegraph-ai) (GraphRAG/Knowledge Graph) |
+| Batch data import | [Loader](/docs/quickstart/toolchain/hugegraph-loader) + [Hubble](/docs/quickstart/toolchain/hugegraph-hubble) |
+
+### System Functions
- Supports batch import of data from multiple data sources (including local files, HDFS files, MySQL databases, and other data sources), and supports import of multiple file formats (including TXT, CSV, JSON, and other formats)
- With a visual operation interface, it can be used for operation, analysis, and display diagrams, reducing the threshold for users to use
@@ -44,24 +74,24 @@ The functions of this system include but are not limited to:
- [HugeGraph-Store]: HugeGraph-Store is a distributed storage engine to manage large-scale graph data by integrating storage and computation within a unified system.
- [HugeGraph-PD]: HugeGraph-PD (Placement Driver) manages metadata and coordinates storage nodes.
-- [HugeGraph-Server](/docs/quickstart/hugegraph-server): HugeGraph-Server is the core part of the HugeGraph project, containing Core, Backend, API and other submodules;
+- [HugeGraph-Server](/docs/quickstart/hugegraph/hugegraph-server): HugeGraph-Server is the core part of the HugeGraph project, containing Core, Backend, API and other submodules;
- Core: Implements the graph engine, connects to the Backend module downwards, and supports the API module upwards;
- Backend: Implements the storage of graph data to the backend, supports backends including Memory, Cassandra, ScyllaDB, RocksDB, HBase, MySQL and PostgreSQL, users can choose one according to the actual situation;
- API: Built-in REST Server provides RESTful API to users and is fully compatible with Gremlin queries. (Supports distributed storage and computation pushdown)
- [HugeGraph-Toolchain](https://github.com/apache/hugegraph-toolchain): (Toolchain)
- - [HugeGraph-Client](/docs/quickstart/client/hugegraph-client): HugeGraph-Client provides a RESTful API client for connecting to HugeGraph-Server, currently only the Java version is implemented, users of other languages can implement it themselves;
+ - [HugeGraph-Client](/docs/quickstart/client/hugegraph-client): HugeGraph-Client provides a RESTful API client for connecting to HugeGraph-Server, supporting Java/Python/Go multi-language versions;
- [HugeGraph-Loader](/docs/quickstart/toolchain/hugegraph-loader): HugeGraph-Loader is a data import tool based on HugeGraph-Client, which transforms ordinary text data into vertices and edges of the graph and inserts them into the graph database;
- - [HugeGraph-Hubble](/docs/quickstart/toolchain/hugegraph-hubble): HugeGraph-Hubble is HugeGraph's Web
+ - [HugeGraph-Hubble](/docs/quickstart/toolchain/hugegraph-hubble): HugeGraph-Hubble is HugeGraph's Web
visualization management platform, a one-stop visualization analysis platform, the platform covers the whole process from data modeling, to fast data import, to online and offline analysis of data, and unified management of the graph;
- [HugeGraph-Tools](/docs/quickstart/toolchain/hugegraph-tools): HugeGraph-Tools is HugeGraph's deployment and management tool, including graph management, backup/recovery, Gremlin execution and other functions.
-- [HugeGraph-Computer](/docs/quickstart/computing/hugegraph-computer): HugeGraph-Computer is a distributed graph processing system (OLAP).
- It is an implementation of [Pregel](https://kowshik.github.io/JPregel/pregel_paper.pdf). It can run on clusters such as Kubernetes/Yarn, and supports large-scale graph computing.
-- [HugeGraph-AI](/docs/quickstart/hugegraph-ai): HugeGraph-AI is HugeGraph's independent AI
- component, providing training and inference functions of graph neural networks, LLM/Graph RAG combination/Python-Client and other related components, continuously updating.
+- [HugeGraph-Computer](/docs/quickstart/computing/hugegraph-computer): HugeGraph-Computer is a distributed graph processing system (OLAP).
+ It is an implementation of [Pregel](https://kowshik.github.io/JPregel/pregel_paper.pdf). It can run on clusters such as Kubernetes/Yarn, and supports large-scale graph computing. Also provides Vermeer lightweight graph computing engine, suitable for quick start and small-to-medium scale graph analysis.
+- [HugeGraph-AI](/docs/quickstart/hugegraph-ai): HugeGraph-AI is HugeGraph's independent AI
+ component, providing LLM/GraphRAG intelligent Q&A, automated knowledge graph construction, graph neural network training/inference, Python-Client and other features, with 20+ built-in graph machine learning algorithms, continuously updating.
### Contact Us
-- [GitHub Issues](https://github.com/apache/incubator-hugegraph/issues): Feedback on usage issues and functional requirements (quick response)
+- [GitHub Issues](https://github.com/apache/hugegraph/issues): Feedback on usage issues and functional requirements (quick response)
- Feedback Email: [dev@hugegraph.apache.org](mailto:dev@hugegraph.apache.org) ([subscriber](https://hugegraph.apache.org/docs/contribution-guidelines/subscribe/) only)
- Security Email: [security@hugegraph.apache.org](mailto:security@hugegraph.apache.org) (Report SEC problems)
- WeChat public account: Apache HugeGraph, welcome to scan this QR code to follow us.
diff --git a/content/en/docs/performance/api-preformance/_index.md b/content/en/docs/performance/api-performance/_index.md
similarity index 70%
rename from content/en/docs/performance/api-preformance/_index.md
rename to content/en/docs/performance/api-performance/_index.md
index d018a136f..5d08db000 100644
--- a/content/en/docs/performance/api-preformance/_index.md
+++ b/content/en/docs/performance/api-performance/_index.md
@@ -12,8 +12,7 @@ The HugeGraph API performance test mainly tests HugeGraph-Server's ability to co
For the performance test of the RESTful API of each release version of HugeGraph, please refer to:
-- [v0.5.6 stand-alone](/docs/performance/api-preformance/hugegraph-api-0.5.6-rocksdb/)
-- [v0.5.6 cluster](/docs/performance/api-preformance/hugegraph-api-0.5.6-cassandra/)
+- [HugeGraph API-0.5.6-RocksDB](/docs/performance/api-performance/hugegraph-api-0.5.6-rocksdb)
+- [HugeGraph API-0.5.6-Cassandra](/docs/performance/api-performance/hugegraph-api-0.5.6-cassandra)
> Updates coming soon, stay tuned!
-
diff --git a/content/en/docs/performance/api-preformance/hugegraph-api-0.2.md b/content/en/docs/performance/api-performance/hugegraph-api-0.2.md
similarity index 100%
rename from content/en/docs/performance/api-preformance/hugegraph-api-0.2.md
rename to content/en/docs/performance/api-performance/hugegraph-api-0.2.md
diff --git a/content/en/docs/performance/api-preformance/hugegraph-api-0.4.4.md b/content/en/docs/performance/api-performance/hugegraph-api-0.4.4.md
similarity index 100%
rename from content/en/docs/performance/api-preformance/hugegraph-api-0.4.4.md
rename to content/en/docs/performance/api-performance/hugegraph-api-0.4.4.md
diff --git a/content/en/docs/performance/api-preformance/hugegraph-api-0.5.6-Cassandra.md b/content/en/docs/performance/api-performance/hugegraph-api-0.5.6-cassandra.md
similarity index 100%
rename from content/en/docs/performance/api-preformance/hugegraph-api-0.5.6-Cassandra.md
rename to content/en/docs/performance/api-performance/hugegraph-api-0.5.6-cassandra.md
diff --git a/content/en/docs/performance/api-preformance/hugegraph-api-0.5.6-RocksDB.md b/content/en/docs/performance/api-performance/hugegraph-api-0.5.6-rocksdb.md
similarity index 100%
rename from content/en/docs/performance/api-preformance/hugegraph-api-0.5.6-RocksDB.md
rename to content/en/docs/performance/api-performance/hugegraph-api-0.5.6-rocksdb.md
diff --git a/content/en/docs/performance/hugegraph-loader-performance.md b/content/en/docs/performance/hugegraph-loader-performance.md
index 77b55f3ff..686ce50a3 100644
--- a/content/en/docs/performance/hugegraph-loader-performance.md
+++ b/content/en/docs/performance/hugegraph-loader-performance.md
@@ -14,7 +14,7 @@ weight: 3
## Use Cases
When the number of graph data to be batch inserted (including vertices and edges) is at the billion level or below,
-or the total data size is less than TB, the [HugeGraph-Loader](/docs/quickstart/hugegraph-loader) tool can be used to continuously and quickly import
+or the total data size is less than TB, the [HugeGraph-Loader](/docs/quickstart/toolchain/hugegraph-loader) tool can be used to continuously and quickly import
graph data.
## Performance
diff --git a/content/en/docs/quickstart/client/hugegraph-client-go.md b/content/en/docs/quickstart/client/hugegraph-client-go.md
index b412ba27d..c26f04c24 100644
--- a/content/en/docs/quickstart/client/hugegraph-client-go.md
+++ b/content/en/docs/quickstart/client/hugegraph-client-go.md
@@ -13,7 +13,7 @@ A HugeGraph Client SDK tool based on the Go language.
## Installation Tutorial
```shell
-go get github.com/apache/incubator-hugegraph-toolchain/hugegraph-client-go
+go get github.com/apache/hugegraph-toolchain/hugegraph-client-go
```
## Implemented APIs
@@ -34,8 +34,8 @@ import (
"log"
"os"
- "github.com/apache/incubator-hugegraph-toolchain/hugegraph-client-go"
- "github.com/apache/incubator-hugegraph-toolchain/hugegraph-client-go/hgtransport"
+ "github.com/apache/hugegraph-toolchain/hugegraph-client-go"
+ "github.com/apache/hugegraph-toolchain/hugegraph-client-go/hgtransport"
)
func main() {
@@ -73,8 +73,8 @@ import (
"log"
"os"
- "github.com/apache/incubator-hugegraph-toolchain/hugegraph-client-go"
- "github.com/apache/incubator-hugegraph-toolchain/hugegraph-client-go/hgtransport"
+ "github.com/apache/hugegraph-toolchain/hugegraph-client-go"
+ "github.com/apache/hugegraph-toolchain/hugegraph-client-go/hgtransport"
)
// initClient initializes and returns a HugeGraph client instance
diff --git a/content/en/docs/quickstart/client/hugegraph-client-python.md b/content/en/docs/quickstart/client/hugegraph-client-python.md
index 13310563f..c468a30e2 100644
--- a/content/en/docs/quickstart/client/hugegraph-client-python.md
+++ b/content/en/docs/quickstart/client/hugegraph-client-python.md
@@ -25,8 +25,8 @@ uv pip install hugegraph-python # Note: may not the latest version, recommend to
To install from the source, clone the repository and install the required dependencies:
```bash
-git clone https://github.com/apache/incubator-hugegraph-ai.git
-cd incubator-hugegraph-ai/hugegraph-python-client
+git clone https://github.com/apache/hugegraph-ai.git
+cd hugegraph-ai/hugegraph-python-client
# Normal install
uv pip install .
@@ -185,5 +185,5 @@ Thank you to all the people who already contributed to `hugegraph-python-client`
## Contact Us
-* [GitHub Issues](https://github.com/apache/incubator-hugegraph-ai/issues): Feedback on usage issues and functional requirements (quick response)
+* [GitHub Issues](https://github.com/apache/hugegraph-ai/issues): Feedback on usage issues and functional requirements (quick response)
diff --git a/content/en/docs/quickstart/client/hugegraph-client.md b/content/en/docs/quickstart/client/hugegraph-client.md
index 91ac7865e..9eb5a2af7 100644
--- a/content/en/docs/quickstart/client/hugegraph-client.md
+++ b/content/en/docs/quickstart/client/hugegraph-client.md
@@ -7,10 +7,10 @@ weight: 1
### 1 Overview Of Hugegraph
[HugeGraph-Client](https://github.com/apache/hugegraph-toolchain) sends HTTP request to HugeGraph-Server to get and parse the execution result of Server.
-We support HugeGraph-Client for Java/Go/[Python](https://github.com/apache/incubator-hugegraph-ai/tree/main/hugegraph-python-client) language.
+We support HugeGraph-Client for Java/Go/[Python](https://github.com/apache/hugegraph-ai/tree/main/hugegraph-python-client) language.
You can use [Client-API](/docs/clients/hugegraph-client) to write code to operate HugeGraph, such as adding, deleting, modifying, and querying schema and graph data, or executing gremlin statements.
-> [HugeGraph client SDK tool based on Go language](https://github.com/apache/incubator-hugegraph-toolchain/blob/master/hugegraph-client-go/README.en.md) (version >=1.2.0)
+> [HugeGraph client SDK tool based on Go language](https://github.com/apache/hugegraph-toolchain/blob/master/hugegraph-client-go/README.en.md) (version >=1.2.0)
### 2 What You Need
diff --git a/content/en/docs/quickstart/computing/_index.md b/content/en/docs/quickstart/computing/_index.md
index 5ec200bb5..80bbd5a7b 100644
--- a/content/en/docs/quickstart/computing/_index.md
+++ b/content/en/docs/quickstart/computing/_index.md
@@ -4,8 +4,8 @@ linkTitle: "HugeGraph Computing (OLAP)"
weight: 4
---
-## 🚀 Best practice: Prioritize using DeepWiki intelligent documents
+> DeepWiki provides real-time updated project documentation with more comprehensive and accurate content, suitable for quickly understanding the latest project information.
+>
+> 📖 [https://deepwiki.com/apache/hugegraph-computer](https://deepwiki.com/apache/hugegraph-computer)
-> To address the issue of outdated static documents, we provide DeepWiki with **real-time updates and more comprehensive content**. It is equivalent to an expert with the latest knowledge of the project, which is very suitable for **all developers** to read and consult before starting the project.
-
-**👉 Strongly recommend visiting and having a conversation with:** [**incubator-hugegraph-computer**](https://deepwiki.com/apache/incubator-hugegraph-computer)
\ No newline at end of file
+**GitHub Access:** [https://github.com/apache/hugegraph-computer](https://github.com/apache/hugegraph-computer)
\ No newline at end of file
diff --git a/content/en/docs/quickstart/computing/hugegraph-computer-config.md b/content/en/docs/quickstart/computing/hugegraph-computer-config.md
new file mode 100644
index 000000000..42afdb2f1
--- /dev/null
+++ b/content/en/docs/quickstart/computing/hugegraph-computer-config.md
@@ -0,0 +1,448 @@
+---
+title: "HugeGraph-Computer Configuration Reference"
+linkTitle: "Computer Config Reference"
+weight: 3
+---
+
+### Computer Config Options
+
+> **Default Value Notes:**
+> - Configuration items listed below show the **code default values** (defined in `ComputerOptions.java`)
+> - When the **packaged configuration file** (`conf/computer.properties` in the distribution) specifies a different value, it's noted as: `value (packaged: value)`
+> - Example: `300000 (packaged: 100000)` means the code default is 300000, but the distributed package defaults to 100000
+> - For production deployments, the packaged defaults take precedence unless you explicitly override them
+
+---
+
+### 1. Basic Configuration
+
+Core job settings for HugeGraph-Computer.
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| hugegraph.url | http://127.0.0.1:8080 | The HugeGraph server URL to load data and write results back. |
+| hugegraph.name | hugegraph | The graph name to load data and write results back. |
+| hugegraph.username | "" (empty) | The username for HugeGraph authentication (leave empty if authentication is disabled). |
+| hugegraph.password | "" (empty) | The password for HugeGraph authentication (leave empty if authentication is disabled). |
+| job.id | local_0001 (packaged: local_001) | The job identifier on YARN cluster or K8s cluster. |
+| job.namespace | "" (empty) | The job namespace that can separate different data sources. 🔒 **Managed by system - do not modify manually**. |
+| job.workers_count | 1 | The number of workers for computing one graph algorithm job. 🔒 **Managed by system - do not modify manually in K8s**. |
+| job.partitions_count | 1 | The number of partitions for computing one graph algorithm job. |
+| job.partitions_thread_nums | 4 | The number of threads for partition parallel compute. |
+
+---
+
+### 2. Algorithm Configuration
+
+Algorithm-specific configuration for computation logic.
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| algorithm.params_class | org.apache.hugegraph.computer.core.config.Null | ⚠️ **REQUIRED** The class used to transfer algorithm parameters before the algorithm is run. |
+| algorithm.result_class | org.apache.hugegraph.computer.core.config.Null | The class of vertex's value, used to store the computation result for the vertex. |
+| algorithm.message_class | org.apache.hugegraph.computer.core.config.Null | The class of message passed when computing a vertex. |
+
+---
+
+### 3. Input Configuration
+
+Configuration for loading input data from HugeGraph or other sources.
+
+#### 3.1 Input Source
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| input.source_type | hugegraph-server | The source type to load input data, allowed values: ['hugegraph-server', 'hugegraph-loader']. The 'hugegraph-loader' means use hugegraph-loader to load data from HDFS or file. If using 'hugegraph-loader', please configure 'input.loader_struct_path' and 'input.loader_schema_path'. |
+| input.loader_struct_path | "" (empty) | The struct path of loader input, only takes effect when input.source_type=loader is enabled. |
+| input.loader_schema_path | "" (empty) | The schema path of loader input, only takes effect when input.source_type=loader is enabled. |
+
+#### 3.2 Input Splits
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| input.split_size | 1048576 (1 MB) | The input split size in bytes. |
+| input.split_max_splits | 10000000 | The maximum number of input splits. |
+| input.split_page_size | 500 | The page size for streamed load input split data. |
+| input.split_fetch_timeout | 300 | The timeout in seconds to fetch input splits. |
+
+#### 3.3 Input Processing
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| input.filter_class | org.apache.hugegraph.computer.core.input.filter.DefaultInputFilter | The class to create input-filter object. Input-filter is used to filter vertex edges according to user needs. |
+| input.edge_direction | OUT | The direction of edges to load, allowed values: [OUT, IN, BOTH]. When the value is BOTH, edges in both OUT and IN directions will be loaded. |
+| input.edge_freq | MULTIPLE | The frequency of edges that can exist between a pair of vertices, allowed values: [SINGLE, SINGLE_PER_LABEL, MULTIPLE]. SINGLE means only one edge can exist between a pair of vertices (identified by sourceId + targetId); SINGLE_PER_LABEL means each edge label can have one edge between a pair of vertices (identified by sourceId + edgeLabel + targetId); MULTIPLE means many edges can exist between a pair of vertices (identified by sourceId + edgeLabel + sortValues + targetId). |
+| input.max_edges_in_one_vertex | 200 | The maximum number of adjacent edges allowed to be attached to a vertex. The adjacent edges will be stored and transferred together as a batch unit. |
+
+#### 3.4 Input Performance
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| input.send_thread_nums | 4 | The number of threads for parallel sending of vertices or edges. |
+
+---
+
+### 4. Snapshot & Storage Configuration
+
+HugeGraph-Computer supports snapshot functionality to save vertex/edge partitions to local storage or MinIO object storage, enabling checkpoint recovery or accelerating repeated computations.
+
+#### 4.1 Basic Snapshot Configuration
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| snapshot.write | false | Whether to write snapshots of input vertex/edge partitions. |
+| snapshot.load | false | Whether to load from snapshots of vertex/edge partitions. |
+| snapshot.name | "" (empty) | User-defined snapshot name to distinguish different snapshots. |
+
+#### 4.2 MinIO Integration (Optional)
+
+MinIO can be used as a distributed object storage backend for snapshots in K8s deployments.
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| snapshot.minio_endpoint | "" (empty) | MinIO service endpoint (e.g., `http://minio:9000`). Required when using MinIO. |
+| snapshot.minio_access_key | minioadmin | MinIO access key for authentication. |
+| snapshot.minio_secret_key | minioadmin | MinIO secret key for authentication. |
+| snapshot.minio_bucket_name | "" (empty) | MinIO bucket name for storing snapshot data. |
+
+**Usage Scenarios:**
+- **Checkpoint Recovery**: Resume from snapshots after job failures, avoiding data reloading
+- **Repeated Computations**: Load data from snapshots when running the same algorithm multiple times
+- **A/B Testing**: Save multiple snapshot versions of the same dataset to test different algorithm parameters
+
+**Example: Local Snapshot** (in `computer.properties`):
+```properties
+snapshot.write=true
+snapshot.name=pagerank-snapshot-20260201
+```
+
+**Example: MinIO Snapshot** (in K8s CRD `computerConf`):
+```yaml
+computerConf:
+ snapshot.write: "true"
+ snapshot.name: "pagerank-snapshot-v1"
+ snapshot.minio_endpoint: "http://minio:9000"
+ snapshot.minio_access_key: "my-access-key"
+ snapshot.minio_secret_key: "my-secret-key"
+ snapshot.minio_bucket_name: "hugegraph-snapshots"
+```
+
+---
+
+### 5. Worker & Master Configuration
+
+Configuration for worker and master computation logic.
+
+#### 5.1 Master Configuration
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| master.computation_class | org.apache.hugegraph.computer.core.master.DefaultMasterComputation | Master-computation is computation that can determine whether to continue to the next superstep. It runs at the end of each superstep on the master. |
+
+#### 5.2 Worker Computation
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| worker.computation_class | org.apache.hugegraph.computer.core.config.Null | The class to create worker-computation object. Worker-computation is used to compute each vertex in each superstep. |
+| worker.combiner_class | org.apache.hugegraph.computer.core.config.Null | Combiner can combine messages into one value for a vertex. For example, PageRank algorithm can combine messages of a vertex to a sum value. |
+| worker.partitioner | org.apache.hugegraph.computer.core.graph.partition.HashPartitioner | The partitioner that decides which partition a vertex should be in, and which worker a partition should be in. |
+
+#### 5.3 Worker Combiners
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| worker.vertex_properties_combiner_class | org.apache.hugegraph.computer.core.combiner.OverwritePropertiesCombiner | The combiner can combine several properties of the same vertex into one properties at input step. |
+| worker.edge_properties_combiner_class | org.apache.hugegraph.computer.core.combiner.OverwritePropertiesCombiner | The combiner can combine several properties of the same edge into one properties at input step. |
+
+#### 5.4 Worker Buffers
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| worker.received_buffers_bytes_limit | 104857600 (100 MB) | The limit bytes of buffers of received data. The total size of all buffers can't exceed this limit. If received buffers reach this limit, they will be merged into a file (spill to disk). |
+| worker.write_buffer_capacity | 52428800 (50 MB) | The initial size of write buffer that used to store vertex or message. |
+| worker.write_buffer_threshold | 52428800 (50 MB) | The threshold of write buffer. Exceeding it will trigger sorting. The write buffer is used to store vertex or message. |
+
+#### 5.5 Worker Data & Timeouts
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| worker.data_dirs | [jobs] | The directories separated by ',' that received vertices and messages can persist into. |
+| worker.wait_sort_timeout | 600000 (10 minutes) | The max timeout (in ms) for message-handler to wait for sort-thread to sort one batch of buffers. |
+| worker.wait_finish_messages_timeout | 86400000 (24 hours) | The max timeout (in ms) for message-handler to wait for finish-message of all workers. |
+
+---
+
+### 6. I/O & Output Configuration
+
+Configuration for output computation results.
+
+#### 6.1 Output Class & Result
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| output.output_class | org.apache.hugegraph.computer.core.output.LogOutput | The class to output the computation result of each vertex. Called after iteration computation. |
+| output.result_name | value | The value is assigned dynamically by #name() of instance created by WORKER_COMPUTATION_CLASS. |
+| output.result_write_type | OLAP_COMMON | The result write-type to output to HugeGraph, allowed values: [OLAP_COMMON, OLAP_SECONDARY, OLAP_RANGE]. |
+
+#### 6.2 Output Behavior
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| output.with_adjacent_edges | false | Whether to output the adjacent edges of the vertex. |
+| output.with_vertex_properties | false | Whether to output the properties of the vertex. |
+| output.with_edge_properties | false | Whether to output the properties of the edge. |
+
+#### 6.3 Batch Output
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| output.batch_size | 500 | The batch size of output. |
+| output.batch_threads | 1 | The number of threads used for batch output. |
+| output.single_threads | 1 | The number of threads used for single output. |
+
+#### 6.4 HDFS Output
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| output.hdfs_url | hdfs://127.0.0.1:9000 | The HDFS URL for output. |
+| output.hdfs_user | hadoop | The HDFS user for output. |
+| output.hdfs_path_prefix | /hugegraph-computer/results | The directory of HDFS output results. |
+| output.hdfs_delimiter | , (comma) | The delimiter of HDFS output. |
+| output.hdfs_merge_partitions | true | Whether to merge output files of multiple partitions. |
+| output.hdfs_replication | 3 | The replication number of HDFS. |
+| output.hdfs_core_site_path | "" (empty) | The HDFS core site path. |
+| output.hdfs_site_path | "" (empty) | The HDFS site path. |
+| output.hdfs_kerberos_enable | false | Whether Kerberos authentication is enabled for HDFS. |
+| output.hdfs_kerberos_principal | "" (empty) | The HDFS principal for Kerberos authentication. |
+| output.hdfs_kerberos_keytab | "" (empty) | The HDFS keytab file for Kerberos authentication. |
+| output.hdfs_krb5_conf | /etc/krb5.conf | Kerberos configuration file path. |
+
+#### 6.5 Retry & Timeout
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| output.retry_times | 3 | The retry times when output fails. |
+| output.retry_interval | 10 | The retry interval (in seconds) when output fails. |
+| output.thread_pool_shutdown_timeout | 60 | The timeout (in seconds) of output thread pool shutdown. |
+
+---
+
+### 7. Network & Transport Configuration
+
+Configuration for network communication between workers and master.
+
+#### 7.1 Server Configuration
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| transport.server_host | 127.0.0.1 | 🔒 **Managed by system** The server hostname or IP to listen on to transfer data. Do not modify manually. |
+| transport.server_port | 0 | 🔒 **Managed by system** The server port to listen on to transfer data. The system will assign a random port if set to 0. Do not modify manually. |
+| transport.server_threads | 4 | The number of transport threads for server. |
+
+#### 7.2 Client Configuration
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| transport.client_threads | 4 | The number of transport threads for client. |
+| transport.client_connect_timeout | 3000 | The timeout (in ms) of client connect to server. |
+
+#### 7.3 Protocol Configuration
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| transport.provider_class | org.apache.hugegraph.computer.core.network.netty.NettyTransportProvider | The transport provider, currently only supports Netty. |
+| transport.io_mode | AUTO | The network IO mode, allowed values: [NIO, EPOLL, AUTO]. AUTO means selecting the appropriate mode automatically. |
+| transport.tcp_keep_alive | true | Whether to enable TCP keep-alive. |
+| transport.transport_epoll_lt | false | Whether to enable EPOLL level-trigger (only effective when io_mode=EPOLL). |
+
+#### 7.4 Buffer Configuration
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| transport.send_buffer_size | 0 | The size of socket send-buffer in bytes. 0 means using system default value. |
+| transport.receive_buffer_size | 0 | The size of socket receive-buffer in bytes. 0 means using system default value. |
+| transport.write_buffer_high_mark | 67108864 (64 MB) | The high water mark for write buffer in bytes. It will trigger sending unavailable if the number of queued bytes > write_buffer_high_mark. |
+| transport.write_buffer_low_mark | 33554432 (32 MB) | The low water mark for write buffer in bytes. It will trigger sending available if the number of queued bytes < write_buffer_low_mark. |
+
+#### 7.5 Flow Control
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| transport.max_pending_requests | 8 | The max number of client unreceived ACKs. It will trigger sending unavailable if the number of unreceived ACKs >= max_pending_requests. |
+| transport.min_pending_requests | 6 | The minimum number of client unreceived ACKs. It will trigger sending available if the number of unreceived ACKs < min_pending_requests. |
+| transport.min_ack_interval | 200 | The minimum interval (in ms) of server reply ACK. |
+
+#### 7.6 Timeouts
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| transport.close_timeout | 10000 | The timeout (in ms) of close server or close client. |
+| transport.sync_request_timeout | 10000 | The timeout (in ms) to wait for response after sending sync-request. |
+| transport.finish_session_timeout | 0 | The timeout (in ms) to finish session. 0 means using (transport.sync_request_timeout × transport.max_pending_requests). |
+| transport.write_socket_timeout | 3000 | The timeout (in ms) to write data to socket buffer. |
+| transport.server_idle_timeout | 360000 (6 minutes) | The max timeout (in ms) of server idle. |
+
+#### 7.7 Heartbeat
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| transport.heartbeat_interval | 20000 (20 seconds) | The minimum interval (in ms) between heartbeats on client side. |
+| transport.max_timeout_heartbeat_count | 120 | The maximum times of timeout heartbeat on client side. If the number of timeouts waiting for heartbeat response continuously > max_timeout_heartbeat_count, the channel will be closed from client side. |
+
+#### 7.8 Advanced Network Settings
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| transport.max_syn_backlog | 511 | The capacity of SYN queue on server side. 0 means using system default value. |
+| transport.recv_file_mode | true | Whether to enable receive buffer-file mode. It will receive buffer and write to file from socket using zero-copy if enabled. **Note**: Requires OS support for zero-copy (e.g., Linux sendfile/splice). |
+| transport.network_retries | 3 | The number of retry attempts for network communication if network is unstable. |
+
+---
+
+### 8. Storage & Persistence Configuration
+
+Configuration for HGKV (HugeGraph Key-Value) storage engine and value files.
+
+#### 8.1 HGKV Configuration
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| hgkv.max_file_size | 2147483648 (2 GB) | The max number of bytes in each HGKV file. |
+| hgkv.max_data_block_size | 65536 (64 KB) | The max byte size of HGKV file data block. |
+| hgkv.max_merge_files | 10 | The max number of files to merge at one time. |
+| hgkv.temp_file_dir | /tmp/hgkv | This folder is used to store temporary files during the file merging process. |
+
+#### 8.2 Value File Configuration
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| valuefile.max_segment_size | 1073741824 (1 GB) | The max number of bytes in each segment of value-file. |
+
+---
+
+### 9. BSP & Coordination Configuration
+
+Configuration for Bulk Synchronous Parallel (BSP) protocol and etcd coordination.
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| bsp.etcd_endpoints | http://localhost:2379 | 🔒 **Managed by system in K8s** The endpoints to access etcd. For multiple endpoints, use comma-separated list: `http://host1:port1,http://host2:port2`. Do not modify manually in K8s deployments. |
+| bsp.max_super_step | 10 (packaged: 2) | The max super step of the algorithm. |
+| bsp.register_timeout | 300000 (packaged: 100000) | The max timeout (in ms) to wait for master and workers to register. |
+| bsp.wait_workers_timeout | 86400000 (24 hours) | The max timeout (in ms) to wait for workers BSP event. |
+| bsp.wait_master_timeout | 86400000 (24 hours) | The max timeout (in ms) to wait for master BSP event. |
+| bsp.log_interval | 30000 (30 seconds) | The log interval (in ms) to print the log while waiting for BSP event. |
+
+---
+
+### 10. Performance Tuning Configuration
+
+Configuration for performance optimization.
+
+| config option | default value | description |
+|---------------|---------------|-------------|
+| allocator.max_vertices_per_thread | 10000 | Maximum number of vertices per thread processed in each memory allocator. |
+| sort.thread_nums | 4 | The number of threads performing internal sorting. |
+
+---
+
+### 11. System Administration Configuration
+
+⚠️ **Configuration items managed by the system - users are prohibited from modifying these manually.**
+
+The following configuration items are automatically managed by the K8s Operator, Driver, or runtime system. Manual modification will cause cluster communication failures or job scheduling errors.
+
+| config option | managed by | description |
+|---------------|------------|-------------|
+| bsp.etcd_endpoints | K8s Operator | Automatically set to operator's etcd service address |
+| transport.server_host | Runtime | Automatically set to pod/container hostname |
+| transport.server_port | Runtime | Automatically assigned random port |
+| job.namespace | K8s Operator | Automatically set to job namespace |
+| job.id | K8s Operator | Automatically set to job ID from CRD |
+| job.workers_count | K8s Operator | Automatically set from CRD `workerInstances` |
+| rpc.server_host | Runtime | RPC server hostname (system-managed) |
+| rpc.server_port | Runtime | RPC server port (system-managed) |
+| rpc.remote_url | Runtime | RPC remote URL (system-managed) |
+
+**Why These Are Forbidden:**
+- **BSP/RPC Configuration**: Must match the actual deployed etcd/RPC services. Manual overrides break coordination.
+- **Job Configuration**: Must match K8s CRD specifications. Mismatches cause worker count errors.
+- **Transport Configuration**: Must use actual pod hostnames/ports. Manual values prevent inter-worker communication.
+
+---
+
+### K8s Operator Config Options
+
+> NOTE: Option needs to be converted through environment variable settings, e.g. k8s.internal_etcd_url => INTERNAL_ETCD_URL
+
+| config option | default value | description |
+|------------------------------|---------------------------|----------------------------------------------------------------------------------------------------------------------------------|
+| k8s.auto_destroy_pod | true | Whether to automatically destroy all pods when the job is completed or failed. |
+| k8s.close_reconciler_timeout | 120 | The max timeout (in ms) to close reconciler. |
+| k8s.internal_etcd_url | http://127.0.0.1:2379 | The internal etcd URL for operator system. |
+| k8s.max_reconcile_retry | 3 | The max retry times of reconcile. |
+| k8s.probe_backlog | 50 | The maximum backlog for serving health probes. |
+| k8s.probe_port | 9892 | The port that the controller binds to for serving health probes. |
+| k8s.ready_check_internal | 1000 | The time interval (ms) of check ready. |
+| k8s.ready_timeout | 30000 | The max timeout (in ms) of check ready. |
+| k8s.reconciler_count | 10 | The max number of reconciler threads. |
+| k8s.resync_period | 600000 | The minimum frequency at which watched resources are reconciled. |
+| k8s.timezone | Asia/Shanghai | The timezone of computer job and operator. |
+| k8s.watch_namespace | hugegraph-computer-system | The namespace to watch custom resources in. Use '*' to watch all namespaces. |
+
+---
+
+### HugeGraph-Computer CRD
+
+> CRD: https://github.com/apache/hugegraph-computer/blob/master/computer-k8s-operator/manifest/hugegraph-computer-crd.v1.yaml
+
+| spec | default value | description | required |
+|-----------------|-------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|
+| algorithmName | | The name of algorithm. | true |
+| jobId | | The job id. | true |
+| image | | The image of algorithm. | true |
+| computerConf | | The map of computer config options. | true |
+| workerInstances | | The number of worker instances, it will override the 'job.workers_count' option. | true |
+| pullPolicy | Always | The pull-policy of image, detail please refer to: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy | false |
+| pullSecrets | | The pull-secrets of Image, detail please refer to: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod | false |
+| masterCpu | | The cpu limit of master, the unit can be 'm' or without unit detail please refer to: [https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu) | false |
+| workerCpu | | The cpu limit of worker, the unit can be 'm' or without unit detail please refer to: [https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu) | false |
+| masterMemory | | The memory limit of master, the unit can be one of Ei、Pi、Ti、Gi、Mi、Ki detail please refer to: [https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory) | false |
+| workerMemory | | The memory limit of worker, the unit can be one of Ei、Pi、Ti、Gi、Mi、Ki detail please refer to: [https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory) | false |
+| log4jXml | | The content of log4j.xml for computer job. | false |
+| jarFile | | The jar path of computer algorithm. | false |
+| remoteJarUri | | The remote jar uri of computer algorithm, it will overlay algorithm image. | false |
+| jvmOptions | | The java startup parameters of computer job. | false |
+| envVars | | please refer to: https://kubernetes.io/docs/tasks/inject-data-application/define-interdependent-environment-variables/ | false |
+| envFrom | | please refer to: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/ | false |
+| masterCommand | bin/start-computer.sh | The run command of master, equivalent to 'Entrypoint' field of Docker. | false |
+| masterArgs | ["-r master", "-d k8s"] | The run args of master, equivalent to 'Cmd' field of Docker. | false |
+| workerCommand | bin/start-computer.sh | The run command of worker, equivalent to 'Entrypoint' field of Docker. | false |
+| workerArgs | ["-r worker", "-d k8s"] | The run args of worker, equivalent to 'Cmd' field of Docker. | false |
+| volumes | | Please refer to: https://kubernetes.io/docs/concepts/storage/volumes/ | false |
+| volumeMounts | | Please refer to: https://kubernetes.io/docs/concepts/storage/volumes/ | false |
+| secretPaths | | The map of k8s-secret name and mount path. | false |
+| configMapPaths | | The map of k8s-configmap name and mount path. | false |
+| podTemplateSpec | | Please refer to: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-template-v1/#PodTemplateSpec | false |
+| securityContext | | Please refer to: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ | false |
+
+---
+
+### KubeDriver Config Options
+
+| config option | default value | description |
+|----------------------------------|------------------------------------------|-----------------------------------------------------------|
+| k8s.build_image_bash_path | | The path of command used to build image. |
+| k8s.enable_internal_algorithm | true | Whether enable internal algorithm. |
+| k8s.framework_image_url | hugegraph/hugegraph-computer:latest | The image url of computer framework. |
+| k8s.image_repository_password | | The password for login image repository. |
+| k8s.image_repository_registry | | The address for login image repository. |
+| k8s.image_repository_url | hugegraph/hugegraph-computer | The url of image repository. |
+| k8s.image_repository_username | | The username for login image repository. |
+| k8s.internal_algorithm | [pageRank] | The name list of all internal algorithm. **Note**: Algorithm names use camelCase here (e.g., `pageRank`), but algorithm implementations return underscore_case (e.g., `page_rank`). |
+| k8s.internal_algorithm_image_url | hugegraph/hugegraph-computer:latest | The image url of internal algorithm. |
+| k8s.jar_file_dir | /cache/jars/ | The directory where the algorithm jar will be uploaded. |
+| k8s.kube_config | ~/.kube/config | The path of k8s config file. |
+| k8s.log4j_xml_path | | The log4j.xml path for computer job. |
+| k8s.namespace | hugegraph-computer-system | The namespace of hugegraph-computer system. |
+| k8s.pull_secret_names | [] | The names of pull-secret for pulling image. |
diff --git a/content/en/docs/quickstart/computing/hugegraph-computer.md b/content/en/docs/quickstart/computing/hugegraph-computer.md
index 26924c610..92b84d454 100644
--- a/content/en/docs/quickstart/computing/hugegraph-computer.md
+++ b/content/en/docs/quickstart/computing/hugegraph-computer.md
@@ -6,7 +6,7 @@ weight: 2
## 1 HugeGraph-Computer Overview
-The [`HugeGraph-Computer`](https://github.com/apache/incubator-hugegraph-computer) is a distributed graph processing system for HugeGraph (OLAP). It is an implementation of [Pregel](https://kowshik.github.io/JPregel/pregel_paper.pdf). It runs on a Kubernetes(K8s) framework.(It focuses on supporting graph data volumes of hundreds of billions to trillions, using disk for sorting and acceleration, which is one of the biggest differences from Vermeer)
+The [`HugeGraph-Computer`](https://github.com/apache/hugegraph-computer) is a distributed graph processing system for HugeGraph (OLAP). It is an implementation of [Pregel](https://kowshik.github.io/JPregel/pregel_paper.pdf). It runs on a Kubernetes(K8s) framework.(It focuses on supporting graph data volumes of hundreds of billions to trillions, using disk for sorting and acceleration, which is one of the biggest differences from Vermeer)
### Features
@@ -44,7 +44,7 @@ There are two ways to get HugeGraph-Computer:
Download the latest version of the HugeGraph-Computer release package:
```bash
-wget https://downloads.apache.org/incubator/hugegraph/${version}/apache-hugegraph-computer-incubating-${version}.tar.gz
+wget https://downloads.apache.org/hugegraph/${version}/apache-hugegraph-computer-incubating-${version}.tar.gz
tar zxvf apache-hugegraph-computer-incubating-${version}.tar.gz -C hugegraph-computer
```
@@ -63,24 +63,53 @@ cd hugegraph-computer
mvn clean package -DskipTests
```
-#### 3.1.3 Start master node
+#### 3.1.3 Configure computer.properties
-> You can use `-c` parameter specify the configuration file, more computer config please see:[Computer Config Options](/docs/config/config-computer#computer-config-options)
+Edit `conf/computer.properties` to configure the connection to HugeGraph-Server and etcd:
+
+```properties
+# Job configuration
+job.id=local_pagerank_001
+job.partitions_count=4
+
+# HugeGraph connection (✅ Correct configuration keys)
+hugegraph.url=http://localhost:8080
+hugegraph.name=hugegraph
+# If authentication is enabled on HugeGraph-Server
+hugegraph.username=
+hugegraph.password=
+
+# BSP coordination (✅ Correct key: bsp.etcd_endpoints)
+bsp.etcd_endpoints=http://localhost:2379
+bsp.max_super_step=10
+
+# Algorithm parameters (⚠️ Required)
+algorithm.params_class=org.apache.hugegraph.computer.algorithm.centrality.pagerank.PageRankParams
+```
+
+> **Important Configuration Notes:**
+> - Use `bsp.etcd_endpoints` (NOT `bsp.etcd.url`) for etcd connection
+> - `algorithm.params_class` is required for all algorithms
+> - For multiple etcd endpoints, use comma-separated list: `http://host1:2379,http://host2:2379`
+
+#### 3.1.4 Start master node
+
+> You can use `-c` parameter specify the configuration file, more computer config please see:[Computer Config Options](/docs/quickstart/computing/hugegraph-computer-config#computer-config-options)
```bash
cd hugegraph-computer
bin/start-computer.sh -d local -r master
```
-#### 3.1.4 Start worker node
+#### 3.1.5 Start worker node
```bash
bin/start-computer.sh -d local -r worker
```
-#### 3.1.5 Query algorithm results
+#### 3.1.6 Query algorithm results
-3.1.5.1 Enable `OLAP` index query for server
+3.1.6.1 Enable `OLAP` index query for server
If the OLAP index is not enabled, it needs to be enabled. More reference: [modify-graphs-read-mode](/docs/clients/restful-api/graphs/#634-modify-graphs-read-mode-this-operation-requires-administrator-privileges)
@@ -90,12 +119,14 @@ PUT http://localhost:8080/graphs/hugegraph/graph_read_mode
"ALL"
```
-3.1.5.2 Query `page_rank` property value:
+3.1.6.2 Query `page_rank` property value:
```bash
curl "http://localhost:8080/graphs/hugegraph/graph/vertices?page&limit=3" | gunzip
```
+---
+
### 3.2 Run PageRank algorithm in Kubernetes
> To run an algorithm with HugeGraph-Computer, you need to deploy HugeGraph-Server first
@@ -137,9 +168,11 @@ hugegraph-computer-operator-etcd-28lm67jxk5 1/1 Runnin
#### 3.2.5 Submit a job
-> More computer crd please see: [Computer CRD](/docs/config/config-computer#hugegraph-computer-crd)
+> More computer crd please see: [Computer CRD](/docs/quickstart/computing/hugegraph-computer-config#hugegraph-computer-crd)
>
-> More computer config please see: [Computer Config Options](/docs/config/config-computer#computer-config-options)
+> More computer config please see: [Computer Config Options](/docs/quickstart/computing/hugegraph-computer-config#computer-config-options)
+
+**Basic Example:**
```yaml
cat < To address the issue of outdated static documents, we provide DeepWiki with **real-time updates and more comprehensive content**. It is equivalent to an expert with the latest knowledge of the project, which is very suitable for **all developers** to read and consult before starting the project.
-
-**👉 Strongly recommend visiting and having a conversation with:** [**incubator-hugegraph-ai**](https://deepwiki.com/apache/incubator-hugegraph-ai)
+> DeepWiki provides real-time updated project documentation with more comprehensive and accurate content, suitable for quickly understanding the latest project information.
+>
+> 📖 [https://deepwiki.com/apache/hugegraph-ai](https://deepwiki.com/apache/hugegraph-ai)
`hugegraph-ai` integrates [HugeGraph](https://github.com/apache/hugegraph) with artificial intelligence capabilities, providing comprehensive support for developers to build AI-powered graph applications.
## ✨ Key Features
- **GraphRAG**: Build intelligent question-answering systems with graph-enhanced retrieval
+- **Text2Gremlin**: Natural language to graph query conversion with REST API
- **Knowledge Graph Construction**: Automated graph building from text using LLMs
-- **Graph ML**: Integration with 20+ graph learning algorithms (GCN, GAT, GraphSAGE, etc.)
+- **Graph ML**: Integration with 21 graph learning algorithms (GCN, GAT, GraphSAGE, etc.)
- **Python Client**: Easy-to-use Python interface for HugeGraph operations
- **AI Agents**: Intelligent graph analysis and reasoning capabilities
+### 🎉 What's New in v1.5.0
+
+- **Text2Gremlin REST API**: Convert natural language queries to Gremlin commands via REST endpoints
+- **Multi-Model Vector Support**: Each graph instance can use independent embedding models
+- **Bilingual Prompt Support**: Switch between English and Chinese prompts (EN/CN)
+- **Semi-Automatic Schema Generation**: Intelligent schema inference from text data
+- **Semi-Automatic Prompt Generation**: Context-aware prompt templates
+- **Enhanced Reranker Support**: Integration with Cohere and SiliconFlow rerankers
+- **LiteLLM Multi-Provider Support**: Unified interface for OpenAI, Anthropic, Gemini, and more
+
## 🚀 Quick Start
> [!NOTE]
-> For a complete deployment guide and detailed examples, please refer to [hugegraph-llm/README.md](https://github.com/apache/incubator-hugegraph-ai/blob/main/hugegraph-llm/README.md)
+> For a complete deployment guide and detailed examples, please refer to [hugegraph-llm/README.md](https://github.com/apache/hugegraph-ai/blob/main/hugegraph-llm/README.md)
### Prerequisites
-- Python 3.9+ (3.10+ recommended for hugegraph-llm)
-- [uv](https://docs.astral.sh/uv/) (recommended package manager)
-- HugeGraph Server 1.3+ (1.5+ recommended)
+- Python 3.10+ (required for hugegraph-llm)
+- [uv](https://docs.astral.sh/uv/) 0.7+ (recommended package manager)
+- HugeGraph Server 1.5+ (required)
- Docker (optional, for containerized deployment)
### Option 1: Docker Deployment (Recommended)
```bash
# Clone the repository
-git clone https://github.com/apache/incubator-hugegraph-ai.git
-cd incubator-hugegraph-ai
+git clone https://github.com/apache/hugegraph-ai.git
+cd hugegraph-ai
# Set up environment and start services
cp docker/env.template docker/.env
@@ -59,8 +68,8 @@ docker-compose -f docker-compose-network.yml up -d
docker run -itd --name=server -p 8080:8080 hugegraph/hugegraph
# 2. Clone and set up the project
-git clone https://github.com/apache/incubator-hugegraph-ai.git
-cd incubator-hugegraph-ai/hugegraph-llm
+git clone https://github.com/apache/hugegraph-ai.git
+cd hugegraph-ai/hugegraph-llm
# 3. Install dependencies
uv venv && source .venv/bin/activate
@@ -115,21 +124,23 @@ from pyhugegraph.client import PyHugeClient
## 📦 Modules
-### [hugegraph-llm](https://github.com/apache/incubator-hugegraph-ai/tree/main/hugegraph-llm) [](https://deepwiki.com/apache/incubator-hugegraph-ai)
+### [hugegraph-llm](https://github.com/apache/hugegraph-ai/tree/main/hugegraph-llm) [](https://deepwiki.com/apache/hugegraph-ai)
Large language model integration for graph applications:
- **GraphRAG**: Retrieval-augmented generation with graph data
- **Knowledge Graph Construction**: Build KGs from text automatically
- **Natural Language Interface**: Query graphs using natural language
- **AI Agents**: Intelligent graph analysis and reasoning
-### [hugegraph-ml](https://github.com/apache/incubator-hugegraph-ai/tree/main/hugegraph-ml)
-Graph machine learning with 20+ implemented algorithms:
-- **Node Classification**: GCN, GAT, GraphSAGE, APPNP, etc.
-- **Graph Classification**: DiffPool, P-GNN, etc.
-- **Graph Embedding**: DeepWalk, Node2Vec, GRACE, etc.
-- **Link Prediction**: SEAL, GATNE, etc.
+### [hugegraph-ml](https://github.com/apache/hugegraph-ai/tree/main/hugegraph-ml)
+Graph machine learning with 21 implemented algorithms:
+- **Node Classification**: GCN, GAT, GraphSAGE, APPNP, AGNN, ARMA, DAGNN, DeeperGCN, GRAND, JKNet, Cluster-GCN
+- **Graph Classification**: DiffPool, GIN
+- **Graph Embedding**: DGI, BGRL, GRACE
+- **Link Prediction**: SEAL, P-GNN, GATNE
+- **Fraud Detection**: CARE-GNN, BGNN
+- **Post-Processing**: C&S (Correct & Smooth)
-### [hugegraph-python-client](https://github.com/apache/incubator-hugegraph-ai/tree/main/hugegraph-python-client)
+### [hugegraph-python-client](https://github.com/apache/hugegraph-ai/tree/main/hugegraph-python-client)
Python client for HugeGraph operations:
- **Schema Management**: Define vertex/edge labels and properties
- **CRUD Operations**: Create, read, update, delete graph data
@@ -139,8 +150,8 @@ Python client for HugeGraph operations:
## 📚 Learn More
- [Project Homepage](https://hugegraph.apache.org/docs/quickstart/hugegraph-ai/)
-- [LLM Quick Start Guide](https://github.com/apache/incubator-hugegraph-ai/blob/main/hugegraph-llm/quick_start.md)
-- [DeepWiki AI Documentation](https://deepwiki.com/apache/incubator-hugegraph-ai)
+- [LLM Quick Start Guide](https://github.com/apache/hugegraph-ai/blob/main/hugegraph-llm/quick_start.md)
+- [DeepWiki AI Documentation](https://deepwiki.com/apache/hugegraph-ai)
## 🔗 Related Projects
@@ -157,16 +168,16 @@ We welcome contributions! Please see our [contribution guidelines](https://hugeg
- Run `./style/code_format_and_analysis.sh` before submitting PRs
- Check existing issues before reporting bugs
-[](https://github.com/apache/incubator-hugegraph-ai/graphs/contributors)
+[](https://github.com/apache/hugegraph-ai/graphs/contributors)
## 📄 License
-hugegraph-ai is licensed under [Apache 2.0 License](https://github.com/apache/incubator-hugegraph-ai/blob/main/LICENSE).
+hugegraph-ai is licensed under [Apache 2.0 License](https://github.com/apache/hugegraph-ai/blob/main/LICENSE).
## 📞 Contact Us
-- **GitHub Issues**: [Report bugs or request features](https://github.com/apache/incubator-hugegraph-ai/issues) (fastest response)
+- **GitHub Issues**: [Report bugs or request features](https://github.com/apache/hugegraph-ai/issues) (fastest response)
- **Email**: [dev@hugegraph.apache.org](mailto:dev@hugegraph.apache.org) ([subscription required](https://hugegraph.apache.org/docs/contribution-guidelines/subscribe/))
-- **WeChat**: Follow "Apache HugeGraph" official account
+- **WeChat**: Follow "Apache HugeGraph" on WeChat
diff --git a/content/en/docs/quickstart/hugegraph-ai/config-reference.md b/content/en/docs/quickstart/hugegraph-ai/config-reference.md
new file mode 100644
index 000000000..502a1d568
--- /dev/null
+++ b/content/en/docs/quickstart/hugegraph-ai/config-reference.md
@@ -0,0 +1,396 @@
+---
+title: "Configuration Reference"
+linkTitle: "Configuration Reference"
+weight: 4
+---
+
+This document provides a comprehensive reference for all configuration options in HugeGraph-LLM.
+
+## Configuration Files
+
+- **Environment File**: `.env` (created from template or auto-generated)
+- **Prompt Configuration**: `src/hugegraph_llm/resources/demo/config_prompt.yaml`
+
+> [!TIP]
+> Run `python -m hugegraph_llm.config.generate --update` to auto-generate or update configuration files with defaults.
+
+## Environment Variables Overview
+
+### 1. Language and Model Type Selection
+
+```bash
+# Prompt language (affects system prompts and generated text)
+LANGUAGE=EN # Options: EN | CN
+
+# LLM Type for different tasks
+CHAT_LLM_TYPE=openai # Chat/RAG: openai | litellm | ollama/local
+EXTRACT_LLM_TYPE=openai # Entity extraction: openai | litellm | ollama/local
+TEXT2GQL_LLM_TYPE=openai # Text2Gremlin: openai | litellm | ollama/local
+
+# Embedding type
+EMBEDDING_TYPE=openai # Options: openai | litellm | ollama/local
+
+# Reranker type (optional)
+RERANKER_TYPE= # Options: cohere | siliconflow | (empty for none)
+```
+
+### 2. OpenAI Configuration
+
+Each LLM task (chat, extract, text2gql) has independent configuration:
+
+#### 2.1 Chat LLM (RAG Answer Generation)
+
+```bash
+OPENAI_CHAT_API_BASE=https://api.openai.com/v1
+OPENAI_CHAT_API_KEY=sk-your-api-key-here
+OPENAI_CHAT_LANGUAGE_MODEL=gpt-4o-mini
+OPENAI_CHAT_TOKENS=8192 # Max tokens for chat responses
+```
+
+#### 2.2 Extract LLM (Entity & Relation Extraction)
+
+```bash
+OPENAI_EXTRACT_API_BASE=https://api.openai.com/v1
+OPENAI_EXTRACT_API_KEY=sk-your-api-key-here
+OPENAI_EXTRACT_LANGUAGE_MODEL=gpt-4o-mini
+OPENAI_EXTRACT_TOKENS=1024 # Max tokens for extraction
+```
+
+#### 2.3 Text2GQL LLM (Natural Language to Gremlin)
+
+```bash
+OPENAI_TEXT2GQL_API_BASE=https://api.openai.com/v1
+OPENAI_TEXT2GQL_API_KEY=sk-your-api-key-here
+OPENAI_TEXT2GQL_LANGUAGE_MODEL=gpt-4o-mini
+OPENAI_TEXT2GQL_TOKENS=4096 # Max tokens for query generation
+```
+
+#### 2.4 Embedding Model
+
+```bash
+OPENAI_EMBEDDING_API_BASE=https://api.openai.com/v1
+OPENAI_EMBEDDING_API_KEY=sk-your-api-key-here
+OPENAI_EMBEDDING_MODEL=text-embedding-3-small
+```
+
+> [!NOTE]
+> You can use different API keys/endpoints for each task to optimize costs or use specialized models.
+
+### 3. LiteLLM Configuration (Multi-Provider Support)
+
+LiteLLM enables unified access to 100+ LLM providers (OpenAI, Anthropic, Google, Azure, etc.).
+
+#### 3.1 Chat LLM
+
+```bash
+LITELLM_CHAT_API_BASE=http://localhost:4000 # LiteLLM proxy URL
+LITELLM_CHAT_API_KEY=sk-litellm-key # LiteLLM API key
+LITELLM_CHAT_LANGUAGE_MODEL=anthropic/claude-3-5-sonnet-20241022
+LITELLM_CHAT_TOKENS=8192
+```
+
+#### 3.2 Extract LLM
+
+```bash
+LITELLM_EXTRACT_API_BASE=http://localhost:4000
+LITELLM_EXTRACT_API_KEY=sk-litellm-key
+LITELLM_EXTRACT_LANGUAGE_MODEL=openai/gpt-4o-mini
+LITELLM_EXTRACT_TOKENS=256
+```
+
+#### 3.3 Text2GQL LLM
+
+```bash
+LITELLM_TEXT2GQL_API_BASE=http://localhost:4000
+LITELLM_TEXT2GQL_API_KEY=sk-litellm-key
+LITELLM_TEXT2GQL_LANGUAGE_MODEL=openai/gpt-4o-mini
+LITELLM_TEXT2GQL_TOKENS=4096
+```
+
+#### 3.4 Embedding
+
+```bash
+LITELLM_EMBEDDING_API_BASE=http://localhost:4000
+LITELLM_EMBEDDING_API_KEY=sk-litellm-key
+LITELLM_EMBEDDING_MODEL=openai/text-embedding-3-small
+```
+
+**Model Format**: `provider/model-name`
+
+Examples:
+- `openai/gpt-4o-mini`
+- `anthropic/claude-3-5-sonnet-20241022`
+- `google/gemini-2.0-flash-exp`
+- `azure/gpt-4`
+
+See [LiteLLM Providers](https://docs.litellm.ai/docs/providers) for the complete list.
+
+### 4. Ollama Configuration (Local Deployment)
+
+Run local LLMs with Ollama for privacy and cost control.
+
+#### 4.1 Chat LLM
+
+```bash
+OLLAMA_CHAT_HOST=127.0.0.1
+OLLAMA_CHAT_PORT=11434
+OLLAMA_CHAT_LANGUAGE_MODEL=llama3.1:8b
+```
+
+#### 4.2 Extract LLM
+
+```bash
+OLLAMA_EXTRACT_HOST=127.0.0.1
+OLLAMA_EXTRACT_PORT=11434
+OLLAMA_EXTRACT_LANGUAGE_MODEL=llama3.1:8b
+```
+
+#### 4.3 Text2GQL LLM
+
+```bash
+OLLAMA_TEXT2GQL_HOST=127.0.0.1
+OLLAMA_TEXT2GQL_PORT=11434
+OLLAMA_TEXT2GQL_LANGUAGE_MODEL=qwen2.5-coder:7b
+```
+
+#### 4.4 Embedding
+
+```bash
+OLLAMA_EMBEDDING_HOST=127.0.0.1
+OLLAMA_EMBEDDING_PORT=11434
+OLLAMA_EMBEDDING_MODEL=nomic-embed-text
+```
+
+> [!TIP]
+> Download models: `ollama pull llama3.1:8b` or `ollama pull qwen2.5-coder:7b`
+
+### 5. Reranker Configuration
+
+Rerankers improve RAG accuracy by reordering retrieved results based on relevance.
+
+#### 5.1 Cohere Reranker
+
+```bash
+RERANKER_TYPE=cohere
+COHERE_BASE_URL=https://api.cohere.com/v1/rerank
+RERANKER_API_KEY=your-cohere-api-key
+RERANKER_MODEL=rerank-english-v3.0
+```
+
+Available models:
+- `rerank-english-v3.0` (English)
+- `rerank-multilingual-v3.0` (100+ languages)
+
+#### 5.2 SiliconFlow Reranker
+
+```bash
+RERANKER_TYPE=siliconflow
+RERANKER_API_KEY=your-siliconflow-api-key
+RERANKER_MODEL=BAAI/bge-reranker-v2-m3
+```
+
+### 6. HugeGraph Connection
+
+Configure connection to your HugeGraph server instance.
+
+```bash
+# Server connection
+GRAPH_IP=127.0.0.1
+GRAPH_PORT=8080
+GRAPH_NAME=hugegraph # Graph instance name
+GRAPH_USER=admin # Username
+GRAPH_PWD=admin-password # Password
+GRAPH_SPACE= # Graph space (optional, for multi-tenancy)
+```
+
+### 7. Query Parameters
+
+Control graph traversal behavior and result limits.
+
+```bash
+# Graph traversal limits
+MAX_GRAPH_PATH=10 # Max path depth for graph queries
+MAX_GRAPH_ITEMS=30 # Max items to retrieve from graph
+EDGE_LIMIT_PRE_LABEL=8 # Max edges per label type
+
+# Property filtering
+LIMIT_PROPERTY=False # Limit properties in results (True/False)
+```
+
+### 8. Vector Search Configuration
+
+Configure vector similarity search parameters.
+
+```bash
+# Vector search thresholds
+VECTOR_DIS_THRESHOLD=0.9 # Min cosine similarity (0-1, higher = stricter)
+TOPK_PER_KEYWORD=1 # Top-K results per extracted keyword
+```
+
+### 9. Rerank Configuration
+
+```bash
+# Rerank result limits
+TOPK_RETURN_RESULTS=20 # Number of top results after reranking
+```
+
+## Configuration Priority
+
+The system loads configuration in the following order (later sources override earlier ones):
+
+1. **Default Values** (in `*_config.py` files)
+2. **Environment Variables** (from `.env` file)
+3. **Runtime Updates** (via Web UI or API calls)
+
+## Example Configurations
+
+### Minimal Setup (OpenAI)
+
+```bash
+# Language
+LANGUAGE=EN
+
+# LLM Types
+CHAT_LLM_TYPE=openai
+EXTRACT_LLM_TYPE=openai
+TEXT2GQL_LLM_TYPE=openai
+EMBEDDING_TYPE=openai
+
+# OpenAI Credentials (single key for all tasks)
+OPENAI_API_BASE=https://api.openai.com/v1
+OPENAI_API_KEY=sk-your-api-key-here
+OPENAI_LANGUAGE_MODEL=gpt-4o-mini
+OPENAI_EMBEDDING_MODEL=text-embedding-3-small
+
+# HugeGraph Connection
+GRAPH_IP=127.0.0.1
+GRAPH_PORT=8080
+GRAPH_NAME=hugegraph
+GRAPH_USER=admin
+GRAPH_PWD=admin
+```
+
+### Production Setup (LiteLLM + Reranker)
+
+```bash
+# Bilingual support
+LANGUAGE=EN
+
+# LiteLLM for flexibility
+CHAT_LLM_TYPE=litellm
+EXTRACT_LLM_TYPE=litellm
+TEXT2GQL_LLM_TYPE=litellm
+EMBEDDING_TYPE=litellm
+
+# LiteLLM Proxy
+LITELLM_CHAT_API_BASE=http://localhost:4000
+LITELLM_CHAT_API_KEY=sk-litellm-master-key
+LITELLM_CHAT_LANGUAGE_MODEL=anthropic/claude-3-5-sonnet-20241022
+LITELLM_CHAT_TOKENS=8192
+
+LITELLM_EXTRACT_API_BASE=http://localhost:4000
+LITELLM_EXTRACT_API_KEY=sk-litellm-master-key
+LITELLM_EXTRACT_LANGUAGE_MODEL=openai/gpt-4o-mini
+LITELLM_EXTRACT_TOKENS=256
+
+LITELLM_TEXT2GQL_API_BASE=http://localhost:4000
+LITELLM_TEXT2GQL_API_KEY=sk-litellm-master-key
+LITELLM_TEXT2GQL_LANGUAGE_MODEL=openai/gpt-4o-mini
+LITELLM_TEXT2GQL_TOKENS=4096
+
+LITELLM_EMBEDDING_API_BASE=http://localhost:4000
+LITELLM_EMBEDDING_API_KEY=sk-litellm-master-key
+LITELLM_EMBEDDING_MODEL=openai/text-embedding-3-small
+
+# Cohere Reranker for better accuracy
+RERANKER_TYPE=cohere
+COHERE_BASE_URL=https://api.cohere.com/v1/rerank
+RERANKER_API_KEY=your-cohere-key
+RERANKER_MODEL=rerank-multilingual-v3.0
+
+# HugeGraph with authentication
+GRAPH_IP=prod-hugegraph.example.com
+GRAPH_PORT=8080
+GRAPH_NAME=production_graph
+GRAPH_USER=rag_user
+GRAPH_PWD=secure-password
+GRAPH_SPACE=prod_space
+
+# Optimized query parameters
+MAX_GRAPH_PATH=15
+MAX_GRAPH_ITEMS=50
+VECTOR_DIS_THRESHOLD=0.85
+TOPK_RETURN_RESULTS=30
+```
+
+### Local/Offline Setup (Ollama)
+
+```bash
+# Language
+LANGUAGE=EN
+
+# All local models via Ollama
+CHAT_LLM_TYPE=ollama/local
+EXTRACT_LLM_TYPE=ollama/local
+TEXT2GQL_LLM_TYPE=ollama/local
+EMBEDDING_TYPE=ollama/local
+
+# Ollama endpoints
+OLLAMA_CHAT_HOST=127.0.0.1
+OLLAMA_CHAT_PORT=11434
+OLLAMA_CHAT_LANGUAGE_MODEL=llama3.1:8b
+
+OLLAMA_EXTRACT_HOST=127.0.0.1
+OLLAMA_EXTRACT_PORT=11434
+OLLAMA_EXTRACT_LANGUAGE_MODEL=llama3.1:8b
+
+OLLAMA_TEXT2GQL_HOST=127.0.0.1
+OLLAMA_TEXT2GQL_PORT=11434
+OLLAMA_TEXT2GQL_LANGUAGE_MODEL=qwen2.5-coder:7b
+
+OLLAMA_EMBEDDING_HOST=127.0.0.1
+OLLAMA_EMBEDDING_PORT=11434
+OLLAMA_EMBEDDING_MODEL=nomic-embed-text
+
+# No reranker for offline setup
+RERANKER_TYPE=
+
+# Local HugeGraph
+GRAPH_IP=127.0.0.1
+GRAPH_PORT=8080
+GRAPH_NAME=hugegraph
+GRAPH_USER=admin
+GRAPH_PWD=admin
+```
+
+## Configuration Validation
+
+After modifying `.env`, verify your configuration:
+
+1. **Via Web UI**: Visit `http://localhost:8001` and check the settings panel
+2. **Via Python**:
+```python
+from hugegraph_llm.config import settings
+print(settings.llm_config)
+print(settings.hugegraph_config)
+```
+3. **Via REST API**:
+```bash
+curl http://localhost:8001/config
+```
+
+## Troubleshooting
+
+| Issue | Solution |
+|-------|----------|
+| "API key not found" | Check `*_API_KEY` is set correctly in `.env` |
+| "Connection refused" | Verify `GRAPH_IP` and `GRAPH_PORT` are correct |
+| "Model not found" | For Ollama: run `ollama pull ` |
+| "Rate limit exceeded" | Reduce `MAX_GRAPH_ITEMS` or use different API keys |
+| "Embedding dimension mismatch" | Delete existing vectors and rebuild with correct model |
+
+## See Also
+
+- [HugeGraph-LLM Overview](./hugegraph-llm.md)
+- [REST API Reference](./rest-api.md)
+- [Quick Start Guide](./quick_start.md)
diff --git a/content/en/docs/quickstart/hugegraph-ai/hugegraph-llm.md b/content/en/docs/quickstart/hugegraph-ai/hugegraph-llm.md
index b64b1fa7d..509bb98be 100644
--- a/content/en/docs/quickstart/hugegraph-ai/hugegraph-llm.md
+++ b/content/en/docs/quickstart/hugegraph-ai/hugegraph-llm.md
@@ -4,11 +4,11 @@ linkTitle: "HugeGraph-LLM"
weight: 1
---
-> Please refer to the AI repository [README](https://github.com/apache/incubator-hugegraph-ai/tree/main/hugegraph-llm#readme) for the most up-to-date documentation, and the official website **regularly** is updated and synchronized.
+> Please refer to the AI repository [README](https://github.com/apache/hugegraph-ai/tree/main/hugegraph-llm#readme) for the most up-to-date documentation, and the official website **regularly** is updated and synchronized.
> **Bridge the gap between Graph Databases and Large Language Models**
-> AI summarizes the project documentation: [](https://deepwiki.com/apache/incubator-hugegraph-ai)
+> AI summarizes the project documentation: [](https://deepwiki.com/apache/hugegraph-ai)
## 🎯 Overview
@@ -21,7 +21,7 @@ It enables seamless integration between HugeGraph and LLMs for building intellig
- 🗣️ **Natural Language Querying** - Operate graph databases using natural language (Gremlin/Cypher)
- 🔍 **Graph-Enhanced RAG** - Leverage knowledge graphs to improve answer accuracy (GraphRAG & Graph Agent)
-For detailed source code doc, visit our [DeepWiki](https://deepwiki.com/apache/incubator-hugegraph-ai) page. (Recommended)
+For detailed source code doc, visit our [DeepWiki](https://deepwiki.com/apache/hugegraph-ai) page. (Recommended)
## 📋 Prerequisites
@@ -92,8 +92,8 @@ docker run -itd --name=server -p 8080:8080 hugegraph/hugegraph
curl -LsSf https://astral.sh/uv/install.sh | sh
# 3. Clone and setup project
-git clone https://github.com/apache/incubator-hugegraph-ai.git
-cd incubator-hugegraph-ai/hugegraph-llm
+git clone https://github.com/apache/hugegraph-ai.git
+cd hugegraph-ai/hugegraph-llm
# 4. Create virtual environment and install dependencies
uv venv && source .venv/bin/activate
@@ -118,7 +118,7 @@ python -m hugegraph_llm.config.generate --update
```
> [!TIP]
-> Check our [Quick Start Guide](https://github.com/apache/incubator-hugegraph-ai/blob/main/hugegraph-llm/quick_start.md) for detailed usage examples and query logic explanations.
+> Check our [Quick Start Guide](https://github.com/apache/hugegraph-ai/blob/main/hugegraph-llm/quick_start.md) for detailed usage examples and query logic explanations.
## 💡 Usage Examples
@@ -133,7 +133,7 @@ Use the Gradio interface for visual knowledge graph building:
- **Files**: Upload TXT or DOCX files (multiple selection supported)
**Schema Configuration:**
-- **Custom Schema**: JSON format following our [template](https://github.com/apache/incubator-hugegraph-ai/blob/aff3bbe25fa91c3414947a196131be812c20ef11/hugegraph-llm/src/hugegraph_llm/config/config_data.py#L125)
+- **Custom Schema**: JSON format following our [template](https://github.com/apache/hugegraph-ai/blob/aff3bbe25fa91c3414947a196131be812c20ef11/hugegraph-llm/src/hugegraph_llm/config/config_data.py#L125)
- **HugeGraph Schema**: Use existing graph instance schema (e.g., "hugegraph")

@@ -224,7 +224,80 @@ After running the demo, configuration files are automatically generated:
> [!NOTE]
> Configuration changes are automatically saved when using the web interface. For manual changes, simply refresh the page to load updates.
-**LLM Provider Support**: This project uses [LiteLLM](https://docs.litellm.ai/docs/providers) for multi-provider LLM support.
+### LLM Provider Configuration
+
+This project uses [LiteLLM](https://docs.litellm.ai/docs/providers) for multi-provider LLM support, enabling unified access to OpenAI, Anthropic, Google, Cohere, and 100+ other providers.
+
+#### Option 1: Direct LLM Connection (OpenAI, Ollama)
+
+```bash
+# .env configuration
+chat_llm_type=openai # or ollama/local
+openai_api_key=sk-xxx
+openai_api_base=https://api.openai.com/v1
+openai_language_model=gpt-4o-mini
+openai_max_tokens=4096
+```
+
+#### Option 2: LiteLLM Multi-Provider Support
+
+LiteLLM acts as a unified proxy for multiple LLM providers:
+
+```bash
+# .env configuration
+chat_llm_type=litellm
+extract_llm_type=litellm
+text2gql_llm_type=litellm
+
+# LiteLLM settings
+litellm_api_base=http://localhost:4000 # LiteLLM proxy server
+litellm_api_key=sk-1234 # LiteLLM API key
+
+# Model selection (provider/model format)
+litellm_language_model=anthropic/claude-3-5-sonnet-20241022
+litellm_max_tokens=4096
+```
+
+**Supported Providers**: OpenAI, Anthropic, Google (Gemini), Azure, Cohere, Bedrock, Vertex AI, Hugging Face, and more.
+
+For full provider list and configuration details, visit [LiteLLM Providers](https://docs.litellm.ai/docs/providers).
+
+### Reranker Configuration
+
+Rerankers improve RAG accuracy by reordering retrieved results. Supported providers:
+
+```bash
+# Cohere Reranker
+reranker_type=cohere
+cohere_api_key=your-cohere-key
+cohere_rerank_model=rerank-english-v3.0
+
+# SiliconFlow Reranker
+reranker_type=siliconflow
+siliconflow_api_key=your-siliconflow-key
+siliconflow_rerank_model=BAAI/bge-reranker-v2-m3
+```
+
+### Text2Gremlin Configuration
+
+Convert natural language to Gremlin queries:
+
+```python
+from hugegraph_llm.operators.graph_rag_task import Text2GremlinPipeline
+
+# Initialize pipeline
+text2gremlin = Text2GremlinPipeline()
+
+# Generate Gremlin query
+result = (
+ text2gremlin
+ .query_to_gremlin(query="Find all movies directed by Francis Ford Coppola")
+ .execute_gremlin_query()
+ .run()
+)
+```
+
+**REST API Endpoint**: See the [REST API documentation](./rest-api.md) for HTTP endpoint details.
## 📚 Additional Resources
diff --git a/content/en/docs/quickstart/hugegraph-ai/hugegraph-ml.md b/content/en/docs/quickstart/hugegraph-ai/hugegraph-ml.md
new file mode 100644
index 000000000..86aae8d24
--- /dev/null
+++ b/content/en/docs/quickstart/hugegraph-ai/hugegraph-ml.md
@@ -0,0 +1,289 @@
+---
+title: "HugeGraph-ML"
+linkTitle: "HugeGraph-ML"
+weight: 2
+---
+
+HugeGraph-ML integrates HugeGraph with popular graph learning libraries, enabling end-to-end machine learning workflows directly on graph data.
+
+## Overview
+
+`hugegraph-ml` provides a unified interface for applying graph neural networks and machine learning algorithms to data stored in HugeGraph. It eliminates the need for complex data export/import pipelines by seamlessly converting HugeGraph data to formats compatible with leading ML frameworks.
+
+### Key Features
+
+- **Direct HugeGraph Integration**: Query graph data directly from HugeGraph without manual exports
+- **21 Implemented Algorithms**: Comprehensive coverage of node classification, graph classification, embedding, and link prediction
+- **DGL Backend**: Leverages Deep Graph Library (DGL) for efficient training
+- **End-to-End Workflows**: From data loading to model training and evaluation
+- **Modular Tasks**: Reusable task abstractions for common ML scenarios
+
+## Prerequisites
+
+- **Python**: 3.9+ (standalone module)
+- **HugeGraph Server**: 1.0+ (recommended: 1.5+)
+- **UV Package Manager**: 0.7+ (for dependency management)
+
+## Installation
+
+### 1. Start HugeGraph Server
+
+```bash
+# Option 1: Docker (recommended)
+docker run -itd --name=hugegraph -p 8080:8080 hugegraph/hugegraph
+
+# Option 2: Binary packages
+# See https://hugegraph.apache.org/docs/download/download/
+```
+
+### 2. Clone and Setup
+
+```bash
+git clone https://github.com/apache/hugegraph-ai.git
+cd hugegraph-ai/hugegraph-ml
+```
+
+### 3. Install Dependencies
+
+```bash
+# uv sync automatically creates .venv and installs all dependencies
+uv sync
+
+# Activate virtual environment
+source .venv/bin/activate
+```
+
+### 4. Navigate to Source Directory
+
+```bash
+cd ./src
+```
+
+> [!NOTE]
+> All examples assume you're in the activated virtual environment.
+
+## Implemented Algorithms
+
+HugeGraph-ML currently implements **21 graph machine learning algorithms** across multiple categories:
+
+### Node Classification (11 algorithms)
+
+Predict labels for graph nodes based on network structure and features.
+
+| Algorithm | Paper | Description |
+|-----------|-------|-------------|
+| **GCN** | [Kipf & Welling, 2017](https://arxiv.org/abs/1609.02907) | Graph Convolutional Networks |
+| **GAT** | [Veličković et al., 2018](https://arxiv.org/abs/1710.10903) | Graph Attention Networks |
+| **GraphSAGE** | [Hamilton et al., 2017](https://arxiv.org/abs/1706.02216) | Inductive representation learning |
+| **APPNP** | [Klicpera et al., 2019](https://arxiv.org/abs/1810.05997) | Personalized PageRank propagation |
+| **AGNN** | [Thekumparampil et al., 2018](https://arxiv.org/abs/1803.03735) | Attention-based GNN |
+| **ARMA** | [Bianchi et al., 2019](https://arxiv.org/abs/1901.01343) | Autoregressive moving average filters |
+| **DAGNN** | [Liu et al., 2020](https://arxiv.org/abs/2007.09296) | Deep adaptive graph neural networks |
+| **DeeperGCN** | [Li et al., 2020](https://arxiv.org/abs/2006.07739) | Very deep GCN architectures |
+| **GRAND** | [Feng et al., 2020](https://arxiv.org/abs/2005.11079) | Graph random neural networks |
+| **JKNet** | [Xu et al., 2018](https://arxiv.org/abs/1806.03536) | Jumping knowledge networks |
+| **Cluster-GCN** | [Chiang et al., 2019](https://arxiv.org/abs/1905.07953) | Scalable GCN training via clustering |
+
+### Graph Classification (2 algorithms)
+
+Classify entire graphs based on their structure and node features.
+
+| Algorithm | Paper | Description |
+|-----------|-------|-------------|
+| **DiffPool** | [Ying et al., 2018](https://arxiv.org/abs/1806.08804) | Differentiable graph pooling |
+| **GIN** | [Xu et al., 2019](https://arxiv.org/abs/1810.00826) | Graph isomorphism networks |
+
+### Graph Embedding (3 algorithms)
+
+Learn unsupervised node representations for downstream tasks.
+
+| Algorithm | Paper | Description |
+|-----------|-------|-------------|
+| **DGI** | [Veličković et al., 2019](https://arxiv.org/abs/1809.10341) | Deep graph infomax (contrastive learning) |
+| **BGRL** | [Thakoor et al., 2021](https://arxiv.org/abs/2102.06514) | Bootstrapped graph representation learning |
+| **GRACE** | [Zhu et al., 2020](https://arxiv.org/abs/2006.04131) | Graph contrastive learning |
+
+### Link Prediction (3 algorithms)
+
+Predict missing or future connections in graphs.
+
+| Algorithm | Paper | Description |
+|-----------|-------|-------------|
+| **SEAL** | [Zhang & Chen, 2018](https://arxiv.org/abs/1802.09691) | Subgraph extraction and labeling |
+| **P-GNN** | [You et al., 2019](http://proceedings.mlr.press/v97/you19b/you19b.pdf) | Position-aware GNN |
+| **GATNE** | [Cen et al., 2019](https://arxiv.org/abs/1905.01669) | Attributed multiplex heterogeneous network embedding |
+
+### Fraud Detection (2 algorithms)
+
+Detect anomalous nodes in graphs (e.g., fraudulent accounts).
+
+| Algorithm | Paper | Description |
+|-----------|-------|-------------|
+| **CARE-GNN** | [Dou et al., 2020](https://arxiv.org/abs/2008.08692) | Camouflage-resistant GNN |
+| **BGNN** | [Zheng et al., 2021](https://arxiv.org/abs/2101.08543) | Bipartite graph neural network |
+
+### Post-Processing (1 algorithm)
+
+Improve predictions via label propagation.
+
+| Algorithm | Paper | Description |
+|-----------|-------|-------------|
+| **C&S** | [Huang et al., 2020](https://arxiv.org/abs/2010.13993) | Correct & Smooth (prediction refinement) |
+
+## Usage Examples
+
+### Example 1: Node Embedding with DGI
+
+Perform unsupervised node embedding on the Cora dataset using Deep Graph Infomax (DGI).
+
+#### Step 1: Import Dataset (if needed)
+
+```python
+from hugegraph_ml.utils.dgl2hugegraph_utils import import_graph_from_dgl
+
+# Import Cora dataset from DGL to HugeGraph
+import_graph_from_dgl("cora")
+```
+
+#### Step 2: Convert Graph Data
+
+```python
+from hugegraph_ml.data.hugegraph2dgl import HugeGraph2DGL
+
+# Convert HugeGraph data to DGL format
+hg2d = HugeGraph2DGL()
+graph = hg2d.convert_graph(vertex_label="CORA_vertex", edge_label="CORA_edge")
+```
+
+#### Step 3: Initialize Model
+
+```python
+from hugegraph_ml.models.dgi import DGI
+
+# Create DGI model
+model = DGI(n_in_feats=graph.ndata["feat"].shape[1])
+```
+
+#### Step 4: Train and Generate Embeddings
+
+```python
+from hugegraph_ml.tasks.node_embed import NodeEmbed
+
+# Train model and generate node embeddings
+node_embed_task = NodeEmbed(graph=graph, model=model)
+embedded_graph = node_embed_task.train_and_embed(
+ add_self_loop=True,
+ n_epochs=300,
+ patience=30
+)
+```
+
+#### Step 5: Downstream Task (Node Classification)
+
+```python
+from hugegraph_ml.models.mlp import MLPClassifier
+from hugegraph_ml.tasks.node_classify import NodeClassify
+
+# Use embeddings for node classification
+model = MLPClassifier(
+ n_in_feat=embedded_graph.ndata["feat"].shape[1],
+ n_out_feat=embedded_graph.ndata["label"].unique().shape[0]
+)
+node_clf_task = NodeClassify(graph=embedded_graph, model=model)
+node_clf_task.train(lr=1e-3, n_epochs=400, patience=40)
+print(node_clf_task.evaluate())
+```
+
+**Expected Output:**
+```python
+{'accuracy': 0.82, 'loss': 0.5714246034622192}
+```
+
+**Full Example**: See [dgi_example.py](https://github.com/apache/hugegraph-ai/blob/main/hugegraph-ml/src/hugegraph_ml/examples/dgi_example.py)
+
+### Example 2: Node Classification with GRAND
+
+Directly classify nodes using the GRAND model (no separate embedding step needed).
+
+```python
+from hugegraph_ml.data.hugegraph2dgl import HugeGraph2DGL
+from hugegraph_ml.models.grand import GRAND
+from hugegraph_ml.tasks.node_classify import NodeClassify
+
+# Load graph
+hg2d = HugeGraph2DGL()
+graph = hg2d.convert_graph(vertex_label="CORA_vertex", edge_label="CORA_edge")
+
+# Initialize GRAND model
+model = GRAND(
+ n_in_feats=graph.ndata["feat"].shape[1],
+ n_out_feats=graph.ndata["label"].unique().shape[0]
+)
+
+# Train and evaluate
+node_clf_task = NodeClassify(graph=graph, model=model)
+node_clf_task.train(lr=1e-2, n_epochs=1500, patience=100)
+print(node_clf_task.evaluate())
+```
+
+**Full Example**: See [grand_example.py](https://github.com/apache/hugegraph-ai/blob/main/hugegraph-ml/src/hugegraph_ml/examples/grand_example.py)
+
+## Core Components
+
+### HugeGraph2DGL Converter
+
+Seamlessly converts HugeGraph data to DGL graph format:
+
+```python
+from hugegraph_ml.data.hugegraph2dgl import HugeGraph2DGL
+
+hg2d = HugeGraph2DGL()
+graph = hg2d.convert_graph(
+ vertex_label="person", # Vertex label to extract
+ edge_label="knows", # Edge label to extract
+ directed=False # Graph directionality
+)
+```
+
+### Task Abstractions
+
+Reusable task objects for common ML workflows:
+
+| Task | Class | Purpose |
+|------|-------|---------|
+| Node Embedding | `NodeEmbed` | Generate unsupervised node embeddings |
+| Node Classification | `NodeClassify` | Predict node labels |
+| Graph Classification | `GraphClassify` | Predict graph-level labels |
+| Link Prediction | `LinkPredict` | Predict missing edges |
+
+## Best Practices
+
+1. **Start with Small Datasets**: Test your pipeline on small graphs (e.g., Cora, Citeseer) before scaling
+2. **Use Early Stopping**: Set `patience` parameter to avoid overfitting
+3. **Tune Hyperparameters**: Adjust learning rate, hidden dimensions, and epochs based on dataset size
+4. **Monitor GPU Memory**: Large graphs may require batch training (e.g., Cluster-GCN)
+5. **Validate Schema**: Ensure vertex/edge labels match your HugeGraph schema
+
+## Troubleshooting
+
+| Issue | Solution |
+|-------|----------|
+| "Connection refused" to HugeGraph | Verify server is running on port 8080 |
+| CUDA out of memory | Reduce batch size or use CPU-only mode |
+| Model convergence issues | Try different learning rates (1e-2, 1e-3, 1e-4) |
+| ImportError for DGL | Run `uv sync` to reinstall dependencies |
+
+## Contributing
+
+To add a new algorithm:
+
+1. Create model file in `src/hugegraph_ml/models/your_model.py`
+2. Inherit from base model class and implement `forward()` method
+3. Add example script in `src/hugegraph_ml/examples/`
+4. Update this documentation with algorithm details
+
+## See Also
+
+- [HugeGraph-AI Overview](../_index.md) - Full AI ecosystem
+- [HugeGraph-LLM](./hugegraph-llm.md) - RAG and knowledge graph construction
+- [GitHub Repository](https://github.com/apache/hugegraph-ai/tree/main/hugegraph-ml) - Source code and examples
diff --git a/content/en/docs/quickstart/hugegraph-ai/quick_start.md b/content/en/docs/quickstart/hugegraph-ai/quick_start.md
index 58e367787..04852db52 100644
--- a/content/en/docs/quickstart/hugegraph-ai/quick_start.md
+++ b/content/en/docs/quickstart/hugegraph-ai/quick_start.md
@@ -207,3 +207,63 @@ graph TD;
# 5. Graph Tools
Input Gremlin queries to execute corresponding operations.
+
+# 6. Language Switching (v1.5.0+)
+
+HugeGraph-LLM supports bilingual prompts for improved accuracy across languages.
+
+### Switching Between English and Chinese
+
+The system language affects:
+- **System prompts**: Internal prompts used by the LLM
+- **Keyword extraction**: Language-specific extraction logic
+- **Answer generation**: Response formatting and style
+
+#### Configuration Method 1: Environment Variable
+
+Edit your `.env` file:
+
+```bash
+# English prompts (default)
+LANGUAGE=EN
+
+# Chinese prompts
+LANGUAGE=CN
+```
+
+Restart the service after changing the language setting.
+
+#### Configuration Method 2: Web UI (Dynamic)
+
+If available in your deployment, use the settings panel in the Web UI to switch languages without restarting:
+
+1. Navigate to the **Settings** or **Configuration** tab
+2. Select **Language**: `EN` or `CN`
+3. Click **Save** - changes apply immediately
+
+#### Language-Specific Behavior
+
+| Language | Keyword Extraction | Answer Style | Use Case |
+|----------|-------------------|--------------|----------|
+| `EN` | English NLP models | Professional, concise | International users, English documents |
+| `CN` | Chinese NLP models | Natural Chinese phrasing | Chinese users, Chinese documents |
+
+> [!TIP]
+> Match the `LANGUAGE` setting to your primary document language for best RAG accuracy.
+
+### REST API Language Override
+
+When using the REST API, you can specify custom prompts per request to override the default language setting:
+
+```bash
+curl -X POST http://localhost:8001/rag \
+ -H "Content-Type: application/json" \
+ -d '{
+ "query": "告诉我关于阿尔·帕西诺的信息",
+ "graph_only": true,
+ "keywords_extract_prompt": "请从以下文本中提取关键实体...",
+ "answer_prompt": "请根据以下上下文回答问题..."
+ }'
+```
+
+See the [REST API Reference](./rest-api.md) for complete parameter details.
diff --git a/content/en/docs/quickstart/hugegraph-ai/rest-api.md b/content/en/docs/quickstart/hugegraph-ai/rest-api.md
new file mode 100644
index 000000000..484afdac8
--- /dev/null
+++ b/content/en/docs/quickstart/hugegraph-ai/rest-api.md
@@ -0,0 +1,428 @@
+---
+title: "REST API Reference"
+linkTitle: "REST API"
+weight: 5
+---
+
+HugeGraph-LLM provides REST API endpoints for integrating RAG and Text2Gremlin capabilities into your applications.
+
+## Base URL
+
+```
+http://localhost:8001
+```
+
+Change host/port as configured when starting the service:
+```bash
+python -m hugegraph_llm.demo.rag_demo.app --host 127.0.0.1 --port 8001
+```
+
+## Authentication
+
+Currently, the API supports optional token-based authentication:
+
+```bash
+# Enable authentication in .env
+ENABLE_LOGIN=true
+USER_TOKEN=your-user-token
+ADMIN_TOKEN=your-admin-token
+```
+
+Pass tokens in request headers:
+```bash
+Authorization: Bearer
+```
+
+---
+
+## RAG Endpoints
+
+### 1. Complete RAG Query
+
+**POST** `/rag`
+
+Execute a full RAG pipeline including keyword extraction, graph retrieval, vector search, reranking, and answer generation.
+
+#### Request Body
+
+```json
+{
+ "query": "Tell me about Al Pacino's movies",
+ "raw_answer": false,
+ "vector_only": false,
+ "graph_only": true,
+ "graph_vector_answer": false,
+ "graph_ratio": 0.5,
+ "rerank_method": "cohere",
+ "near_neighbor_first": false,
+ "gremlin_tmpl_num": 5,
+ "max_graph_items": 30,
+ "topk_return_results": 20,
+ "vector_dis_threshold": 0.9,
+ "topk_per_keyword": 1,
+ "custom_priority_info": "",
+ "answer_prompt": "",
+ "keywords_extract_prompt": "",
+ "gremlin_prompt": "",
+ "client_config": {
+ "url": "127.0.0.1:8080",
+ "graph": "hugegraph",
+ "user": "admin",
+ "pwd": "admin",
+ "gs": ""
+ }
+}
+```
+
+**Parameters:**
+
+| Field | Type | Required | Default | Description |
+|-------|------|----------|---------|-------------|
+| `query` | string | Yes | - | User's natural language question |
+| `raw_answer` | boolean | No | false | Return LLM answer without retrieval |
+| `vector_only` | boolean | No | false | Use only vector search (no graph) |
+| `graph_only` | boolean | No | false | Use only graph retrieval (no vector) |
+| `graph_vector_answer` | boolean | No | false | Combine graph and vector results |
+| `graph_ratio` | float | No | 0.5 | Ratio of graph vs vector results (0-1) |
+| `rerank_method` | string | No | "" | Reranker: "cohere", "siliconflow", "" |
+| `near_neighbor_first` | boolean | No | false | Prioritize direct neighbors |
+| `gremlin_tmpl_num` | integer | No | 5 | Number of Gremlin templates to try |
+| `max_graph_items` | integer | No | 30 | Max items from graph retrieval |
+| `topk_return_results` | integer | No | 20 | Top-K after reranking |
+| `vector_dis_threshold` | float | No | 0.9 | Vector similarity threshold (0-1) |
+| `topk_per_keyword` | integer | No | 1 | Top-K vectors per keyword |
+| `custom_priority_info` | string | No | "" | Custom context to prioritize |
+| `answer_prompt` | string | No | "" | Custom answer generation prompt |
+| `keywords_extract_prompt` | string | No | "" | Custom keyword extraction prompt |
+| `gremlin_prompt` | string | No | "" | Custom Gremlin generation prompt |
+| `client_config` | object | No | null | Override graph connection settings |
+
+#### Response
+
+```json
+{
+ "query": "Tell me about Al Pacino's movies",
+ "graph_only": {
+ "answer": "Al Pacino starred in The Godfather (1972), directed by Francis Ford Coppola...",
+ "context": ["The Godfather is a 1972 crime film...", "..."],
+ "graph_paths": ["..."],
+ "keywords": ["Al Pacino", "movies"]
+ }
+}
+```
+
+#### Example (curl)
+
+```bash
+curl -X POST http://localhost:8001/rag \
+ -H "Content-Type: application/json" \
+ -d '{
+ "query": "Tell me about Al Pacino",
+ "graph_only": true,
+ "max_graph_items": 30
+ }'
+```
+
+### 2. Graph Retrieval Only
+
+**POST** `/rag/graph`
+
+Retrieve graph context without generating an answer. Useful for debugging or custom processing.
+
+#### Request Body
+
+```json
+{
+ "query": "Al Pacino movies",
+ "max_graph_items": 30,
+ "topk_return_results": 20,
+ "vector_dis_threshold": 0.9,
+ "topk_per_keyword": 1,
+ "gremlin_tmpl_num": 5,
+ "rerank_method": "cohere",
+ "near_neighbor_first": false,
+ "custom_priority_info": "",
+ "gremlin_prompt": "",
+ "get_vertex_only": false,
+ "client_config": {
+ "url": "127.0.0.1:8080",
+ "graph": "hugegraph",
+ "user": "admin",
+ "pwd": "admin",
+ "gs": ""
+ }
+}
+```
+
+**Additional Parameter:**
+
+| Field | Type | Default | Description |
+|-------|------|---------|-------------|
+| `get_vertex_only` | boolean | false | Return only vertex IDs without full details |
+
+#### Response
+
+```json
+{
+ "graph_recall": {
+ "query": "Al Pacino movies",
+ "keywords": ["Al Pacino", "movies"],
+ "match_vids": ["1:Al Pacino", "2:The Godfather"],
+ "graph_result_flag": true,
+ "gremlin": "g.V('1:Al Pacino').outE().inV().limit(30)",
+ "graph_result": [
+ {"id": "1:Al Pacino", "label": "person", "properties": {"name": "Al Pacino"}},
+ {"id": "2:The Godfather", "label": "movie", "properties": {"title": "The Godfather"}}
+ ],
+ "vertex_degree_list": [5, 12]
+ }
+}
+```
+
+#### Example (curl)
+
+```bash
+curl -X POST http://localhost:8001/rag/graph \
+ -H "Content-Type: application/json" \
+ -d '{
+ "query": "Al Pacino",
+ "max_graph_items": 30,
+ "get_vertex_only": false
+ }'
+```
+
+---
+
+## Text2Gremlin Endpoint
+
+### 3. Natural Language to Gremlin
+
+**POST** `/text2gremlin`
+
+Convert natural language queries to executable Gremlin commands.
+
+#### Request Body
+
+```json
+{
+ "query": "Find all movies directed by Francis Ford Coppola",
+ "example_num": 5,
+ "gremlin_prompt": "",
+ "output_types": ["GREMLIN", "RESULT"],
+ "client_config": {
+ "url": "127.0.0.1:8080",
+ "graph": "hugegraph",
+ "user": "admin",
+ "pwd": "admin",
+ "gs": ""
+ }
+}
+```
+
+**Parameters:**
+
+| Field | Type | Required | Default | Description |
+|-------|------|----------|---------|-------------|
+| `query` | string | Yes | - | Natural language query |
+| `example_num` | integer | No | 5 | Number of example templates to use |
+| `gremlin_prompt` | string | No | "" | Custom prompt for Gremlin generation |
+| `output_types` | array | No | null | Output types: ["GREMLIN", "RESULT", "CYPHER"] |
+| `client_config` | object | No | null | Graph connection override |
+
+**Output Types:**
+- `GREMLIN`: Generated Gremlin query
+- `RESULT`: Execution result from graph
+- `CYPHER`: Cypher query (if requested)
+
+#### Response
+
+```json
+{
+ "gremlin": "g.V().has('person','name','Francis Ford Coppola').out('directed').hasLabel('movie').values('title')",
+ "result": [
+ "The Godfather",
+ "The Godfather Part II",
+ "Apocalypse Now"
+ ]
+}
+```
+
+#### Example (curl)
+
+```bash
+curl -X POST http://localhost:8001/text2gremlin \
+ -H "Content-Type: application/json" \
+ -d '{
+ "query": "Find all movies directed by Francis Ford Coppola",
+ "output_types": ["GREMLIN", "RESULT"]
+ }'
+```
+
+---
+
+## Configuration Endpoints
+
+### 4. Update Graph Connection
+
+**POST** `/config/graph`
+
+Dynamically update HugeGraph connection settings.
+
+#### Request Body
+
+```json
+{
+ "url": "127.0.0.1:8080",
+ "name": "hugegraph",
+ "user": "admin",
+ "pwd": "admin",
+ "gs": ""
+}
+```
+
+#### Response
+
+```json
+{
+ "status_code": 201,
+ "message": "Graph configuration updated successfully"
+}
+```
+
+### 5. Update LLM Configuration
+
+**POST** `/config/llm`
+
+Update chat/extract LLM settings at runtime.
+
+#### Request Body (OpenAI)
+
+```json
+{
+ "llm_type": "openai",
+ "api_key": "sk-your-api-key",
+ "api_base": "https://api.openai.com/v1",
+ "language_model": "gpt-4o-mini",
+ "max_tokens": 4096
+}
+```
+
+#### Request Body (Ollama)
+
+```json
+{
+ "llm_type": "ollama/local",
+ "host": "127.0.0.1",
+ "port": 11434,
+ "language_model": "llama3.1:8b"
+}
+```
+
+### 6. Update Embedding Configuration
+
+**POST** `/config/embedding`
+
+Update embedding model settings.
+
+#### Request Body
+
+```json
+{
+ "llm_type": "openai",
+ "api_key": "sk-your-api-key",
+ "api_base": "https://api.openai.com/v1",
+ "language_model": "text-embedding-3-small"
+}
+```
+
+### 7. Update Reranker Configuration
+
+**POST** `/config/rerank`
+
+Configure reranker settings.
+
+#### Request Body (Cohere)
+
+```json
+{
+ "reranker_type": "cohere",
+ "api_key": "your-cohere-key",
+ "reranker_model": "rerank-multilingual-v3.0",
+ "cohere_base_url": "https://api.cohere.com/v1/rerank"
+}
+```
+
+#### Request Body (SiliconFlow)
+
+```json
+{
+ "reranker_type": "siliconflow",
+ "api_key": "your-siliconflow-key",
+ "reranker_model": "BAAI/bge-reranker-v2-m3"
+}
+```
+
+---
+
+## Error Responses
+
+All endpoints return standard HTTP status codes:
+
+| Code | Meaning |
+|------|---------|
+| 200 | Success |
+| 201 | Created (config updated) |
+| 400 | Bad Request (invalid parameters) |
+| 500 | Internal Server Error |
+| 501 | Not Implemented |
+
+Error response format:
+```json
+{
+ "detail": "Error message describing what went wrong"
+}
+```
+
+---
+
+## Python Client Example
+
+```python
+import requests
+
+BASE_URL = "http://localhost:8001"
+
+# 1. Configure graph connection
+graph_config = {
+ "url": "127.0.0.1:8080",
+ "name": "hugegraph",
+ "user": "admin",
+ "pwd": "admin"
+}
+requests.post(f"{BASE_URL}/config/graph", json=graph_config)
+
+# 2. Execute RAG query
+rag_request = {
+ "query": "Tell me about Al Pacino",
+ "graph_only": True,
+ "max_graph_items": 30
+}
+response = requests.post(f"{BASE_URL}/rag", json=rag_request)
+print(response.json())
+
+# 3. Generate Gremlin from natural language
+text2gql_request = {
+ "query": "Find all directors who worked with Al Pacino",
+ "output_types": ["GREMLIN", "RESULT"]
+}
+response = requests.post(f"{BASE_URL}/text2gremlin", json=text2gql_request)
+print(response.json())
+```
+
+---
+
+## See Also
+
+- [Configuration Reference](./config-reference.md) - Complete .env configuration guide
+- [HugeGraph-LLM Overview](./hugegraph-llm.md) - Architecture and features
+- [Quick Start Guide](./quick_start.md) - Getting started with the Web UI
diff --git a/content/en/docs/quickstart/hugegraph-spark.md b/content/en/docs/quickstart/hugegraph-spark.md
index 2184e093b..92a287e23 100644
--- a/content/en/docs/quickstart/hugegraph-spark.md
+++ b/content/en/docs/quickstart/hugegraph-spark.md
@@ -11,7 +11,7 @@ HugeGraph-Spark 是一个连接 HugeGraph 和 Spark GraphX 的工具,能够读
### 2 环境依赖
-在使用 HugeGraph-Spark 前,需要依赖 HugeGraph Server 服务,下载和启动 Server 请参考 [HugeGraph-Server Quick Start](/docs/quickstart/hugegraph-server)。另外,由于 HugeGraph-Spark 需要使用 Spark GraphX,所以还需要下载 spark,本文的示例使用的是 apache-spark-2.1.1。
+Before using HugeGraph-Spark, you need to deploy HugeGraph-Server and Spark. Please refer to [HugeGraph-Server Quick Start](/docs/quickstart/hugegraph/hugegraph-server) for HugeGraph-Server deployment. Additionally, since HugeGraph-Spark requires Spark GraphX, you also need to download Spark. This example uses apache-spark-2.1.1.
```
wget https://archive.apache.org/dist/spark/spark-2.1.1/spark-2.1.1-bin-hadoop2.7.tgz
diff --git a/content/en/docs/quickstart/hugegraph-studio.md b/content/en/docs/quickstart/hugegraph-studio.md
index e6503a0fc..864b5589f 100644
--- a/content/en/docs/quickstart/hugegraph-studio.md
+++ b/content/en/docs/quickstart/hugegraph-studio.md
@@ -145,7 +145,7 @@ graph.schema().propertyKey("price").asInt().ifNotExist().create()
**在这里有几点需要说明**
1、上述语句是`groovy`语言形式(类似但不是`java`)的`gremlin`语句,这些`gremlin`语句会被发送到`HugeGraphServer`上执行。
-关于`gremlin`本身可以参考[Gremlin Query Language](/language/hugegraph-gremlin.md)或[Tinkerpop官网](http://tinkerpop.apache.org/);
+关于`gremlin`本身可以参考[Gremlin Query Language](../language/hugegraph-gremlin)或[Tinkerpop官网](http://tinkerpop.apache.org/);
2、上述语句是通过`graph.schema()`获取到`SchemaManager`对象后操作元数据,通过`gremlin`语句操作Schema可参考文档[HugeGraph-Client](/docs/clients/hugegraph-client),
需要注意的是`HugeGraph-Client`是`java`语法,大体上与`gremlin`风格是一致的,具体的差异见文档`HugeGraph-Client`中的说明。
diff --git a/content/en/docs/quickstart/hugegraph/_index.md b/content/en/docs/quickstart/hugegraph/_index.md
index e35040e3f..26bffacbc 100644
--- a/content/en/docs/quickstart/hugegraph/_index.md
+++ b/content/en/docs/quickstart/hugegraph/_index.md
@@ -4,8 +4,8 @@ linkTitle: "HugeGraph (OLTP)"
weight: 1
---
-## 🚀 Best practice: Prioritize using DeepWiki intelligent documents
+> DeepWiki provides real-time updated project documentation with more comprehensive and accurate content, suitable for quickly understanding the latest project information.
+>
+> 📖 [https://deepwiki.com/apache/hugegraph](https://deepwiki.com/apache/hugegraph)
-> To address the issue of outdated static documents, we provide DeepWiki with **real-time updates and more comprehensive content**. It is equivalent to an expert with the latest knowledge of the project, which is very suitable for **all developers** to read and consult before starting the project.
-
-**👉 Strongly recommend visiting and having a conversation with:** [**incubator-hugegraph**](https://deepwiki.com/apache/incubator-hugegraph)
+**GitHub Access:** [https://github.com/apache/hugegraph](https://github.com/apache/hugegraph)
diff --git a/content/en/docs/quickstart/hugegraph/hugegraph-hstore.md b/content/en/docs/quickstart/hugegraph/hugegraph-hstore.md
index a61d9f348..2d82a48d5 100644
--- a/content/en/docs/quickstart/hugegraph/hugegraph-hstore.md
+++ b/content/en/docs/quickstart/hugegraph/hugegraph-hstore.md
@@ -30,7 +30,7 @@ Download the latest version of HugeGraph-Store from the Apache HugeGraph officia
```bash
# Replace {version} with the latest version number, e.g., 1.5.0
-wget https://downloads.apache.org/incubator/hugegraph/{version}/apache-hugegraph-incubating-{version}.tar.gz
+wget https://downloads.apache.org/hugegraph/{version}/apache-hugegraph-incubating-{version}.tar.gz
tar zxf apache-hugegraph-incubating-{version}.tar.gz
cd apache-hugegraph-incubating-{version}/apache-hugegraph-hstore-incubating-{version}
```
diff --git a/content/en/docs/quickstart/hugegraph/hugegraph-pd.md b/content/en/docs/quickstart/hugegraph/hugegraph-pd.md
index 7a520a6d9..c8b6cf401 100644
--- a/content/en/docs/quickstart/hugegraph/hugegraph-pd.md
+++ b/content/en/docs/quickstart/hugegraph/hugegraph-pd.md
@@ -29,7 +29,7 @@ Download the latest version of HugeGraph-PD from the Apache HugeGraph official d
```bash
# Replace {version} with the latest version number, e.g., 1.5.0
-wget https://downloads.apache.org/incubator/hugegraph/{version}/apache-hugegraph-incubating-{version}.tar.gz
+wget https://downloads.apache.org/hugegraph/{version}/apache-hugegraph-incubating-{version}.tar.gz
tar zxf apache-hugegraph-incubating-{version}.tar.gz
cd apache-hugegraph-incubating-{version}/apache-hugegraph-pd-incubating-{version}
```
diff --git a/content/en/docs/quickstart/hugegraph/hugegraph-server.md b/content/en/docs/quickstart/hugegraph/hugegraph-server.md
index 226238ae2..ee70e6d2d 100644
--- a/content/en/docs/quickstart/hugegraph/hugegraph-server.md
+++ b/content/en/docs/quickstart/hugegraph/hugegraph-server.md
@@ -8,7 +8,9 @@ weight: 1
`HugeGraph-Server` is the core part of the HugeGraph Project, contains submodules such as graph-core, backend, API.
-The Core Module is an implementation of the Tinkerpop interface; The Backend module is used to save the graph data to the data store, currently supported backends include: Memory, Cassandra, ScyllaDB, RocksDB; The API Module provides HTTP Server, which converts Client's HTTP request into a call to Core Module.
+The Core Module is an implementation of the Tinkerpop interface; The Backend module is used to save the graph data to the data store. For version 1.7.0+, supported backends include: RocksDB (standalone default), HStore (distributed), HBase, and Memory. The API Module provides HTTP Server, which converts Client's HTTP request into a call to Core Module.
+
+> ⚠️ **Important Change**: Starting from version 1.7.0, legacy backends such as MySQL, PostgreSQL, Cassandra, and ScyllaDB have been removed. If you need to use these backends, please use version 1.5.x or earlier.
> There will be two spellings HugeGraph-Server and HugeGraphServer in the document, and other
> modules are similar. There is no big difference in the meaning of these two ways,
@@ -40,13 +42,13 @@ There are four ways to deploy HugeGraph-Server components:
#### 3.1 Use Docker container (Convenient for Test/Dev)
-You can refer to the [Docker deployment guide](https://github.com/apache/incubator-hugegraph/blob/master/hugegraph-server/hugegraph-dist/docker/README.md).
+You can refer to the [Docker deployment guide](https://github.com/apache/hugegraph/blob/master/hugegraph-server/hugegraph-dist/docker/README.md).
-We can use `docker run -itd --name=server -p 8080:8080 -e PASSWORD=xxx hugegraph/hugegraph:1.5.0` to quickly start a `HugeGraph Server` with a built-in `RocksDB` backend.
+We can use `docker run -itd --name=server -p 8080:8080 -e PASSWORD=xxx hugegraph/hugegraph:1.7.0` to quickly start a `HugeGraph Server` with a built-in `RocksDB` backend.
-Optional:
+Optional:
1. use `docker exec -it graph bash` to enter the container to do some operations.
-2. use `docker run -itd --name=graph -p 8080:8080 -e PRELOAD="true" hugegraph/hugegraph:1.5.0` to start with a **built-in** example graph. We can use `RESTful API` to verify the result. The detailed step can refer to [5.1.8](#518-create-an-example-graph-when-startup)
+2. use `docker run -itd --name=graph -p 8080:8080 -e PRELOAD="true" hugegraph/hugegraph:1.7.0` to start with a **built-in** example graph. We can use `RESTful API` to verify the result. The detailed step can refer to [5.1.8](#518-create-an-example-graph-when-startup)
3. use `-e PASSWORD=xxx` to enable auth mode and set the password for admin. You can find more details from [Config Authentication](/docs/config/config-authentication#use-docker-to-enable-authentication-mode)
If you use docker desktop, you can set the option like:
@@ -60,7 +62,7 @@ Also, if we want to manage the other Hugegraph related instances in one file, we
version: '3'
services:
server:
- image: hugegraph/hugegraph:1.5.0
+ image: hugegraph/hugegraph:1.7.0
container_name: server
environment:
- PASSWORD=xxx
@@ -75,19 +77,19 @@ services:
>
> 1. The docker image of the hugegraph is a convenient release to start it quickly, but not **official distribution** artifacts. You can find more details from [ASF Release Distribution Policy](https://infra.apache.org/release-distribution.html#dockerhub).
>
-> 2. Recommend to use `release tag` (like `1.5.0`/`1.x.0`) for the stable version. Use `latest` tag to experience the newest functions in development.
+> 2. Recommend to use `release tag` (like `1.7.0`/`1.x.0`) for the stable version. Use `latest` tag to experience the newest functions in development.
#### 3.2 Download the binary tar tarball
You could download the binary tarball from the download page of the ASF site like this:
```bash
-# use the latest version, here is 1.5.0 for example
-wget https://downloads.apache.org/incubator/hugegraph/{version}/apache-hugegraph-incubating-{version}.tar.gz
+# use the latest version, here is 1.7.0 for example
+wget https://downloads.apache.org/hugegraph/{version}/apache-hugegraph-incubating-{version}.tar.gz
tar zxf *hugegraph*.tar.gz
# (Optional) verify the integrity with SHA512 (recommended)
shasum -a 512 apache-hugegraph-incubating-{version}.tar.gz
-curl https://downloads.apache.org/incubator/hugegraph/{version}/apache-hugegraph-incubating-{version}.tar.gz.sha512
+curl https://downloads.apache.org/hugegraph/{version}/apache-hugegraph-incubating-{version}.tar.gz.sha512
```
#### 3.3 Source code compilation
@@ -100,12 +102,12 @@ Download HugeGraph **source code** in either of the following 2 ways (so as the
```bash
# Way 1. download release package from the ASF site
-wget https://downloads.apache.org/incubator/hugegraph/{version}/apache-hugegraph-incubating-src-{version}.tar.gz
+wget https://downloads.apache.org/hugegraph/{version}/apache-hugegraph-incubating-src-{version}.tar.gz
tar zxf *hugegraph*.tar.gz
# (Optional) verify the integrity with SHA512 (recommended)
shasum -a 512 apache-hugegraph-incubating-src-{version}.tar.gz
-curl https://downloads.apache.org/incubator/hugegraph/{version}/apache-hugegraph-incubating-{version}-src.tar.gz.sha512
+curl https://downloads.apache.org/hugegraph/{version}/apache-hugegraph-incubating-{version}-src.tar.gz.sha512
# Way2 : clone the latest code by git way (e.g GitHub)
git clone https://github.com/apache/hugegraph.git
@@ -156,8 +158,8 @@ Of course, you should download the tarball of `HugeGraph-Toolchain` first.
```bash
# download toolchain binary package, it includes loader + tool + hubble
-# please check the latest version (e.g. here is 1.5.0)
-wget https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz
+# please check the latest version (e.g. here is 1.7.0)
+wget https://downloads.apache.org/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0.tar.gz
tar zxf *hugegraph-*.tar.gz
# enter the tool's package
@@ -216,10 +218,31 @@ task.scheduler_type=distributed
pd.peers=127.0.0.1:8686,127.0.0.1:8687,127.0.0.1:8688
```
+```properties
+# Simple example (with authentication)
+gremlin.graph=org.apache.hugegraph.auth.HugeFactoryAuthProxy
+
+# Specify storage backend hstore
+backend=hstore
+serializer=binary
+store=hugegraph
+
+# Specify the task scheduler (for versions 1.7.0 and earlier, hstore storage is required)
+task.scheduler_type=distributed
+
+# pd config
+pd.peers=127.0.0.1:8686
+```
+
Then enable PD discovery in `rest-server.properties` (required for every HugeGraph-Server node):
```properties
usePD=true
+
+# notice: must have this conf in 1.7.0
+pd.peers=127.0.0.1:8686,127.0.0.1:8687,127.0.0.1:8688
+# If auth is needed
+# auth.authenticator=org.apache.hugegraph.auth.StandardAuthenticator
```
If configuring multiple HugeGraph-Server nodes, you need to modify the `rest-server.properties` configuration file for each node, for example:
@@ -229,6 +252,7 @@ Node 1 (Master node):
usePD=true
restserver.url=http://127.0.0.1:8081
gremlinserver.url=http://127.0.0.1:8181
+pd.peers=127.0.0.1:8686
rpc.server_host=127.0.0.1
rpc.server_port=8091
@@ -242,6 +266,7 @@ Node 2 (Worker node):
usePD=true
restserver.url=http://127.0.0.1:8082
gremlinserver.url=http://127.0.0.1:8182
+pd.peers=127.0.0.1:8686
rpc.server_host=127.0.0.1
rpc.server_port=8092
@@ -363,6 +388,8 @@ Connecting to HugeGraphServer (http://127.0.0.1:8080/graphs)....OK
##### 5.1.4 Cassandra
+> ⚠️ **Deprecated**: This backend has been removed starting from HugeGraph 1.7.0. If you need to use it, please refer to version 1.5.x documentation.
+
Click to expand/collapse Cassandra configuration and startup methods
@@ -423,6 +450,8 @@ Connecting to HugeGraphServer (http://127.0.0.1:8080/graphs)....OK
##### 5.1.5 ScyllaDB
+> ⚠️ **Deprecated**: This backend has been removed starting from HugeGraph 1.7.0. If you need to use it, please refer to version 1.5.x documentation.
+
Click to expand/collapse ScyllaDB configuration and startup methods
@@ -509,6 +538,8 @@ Connecting to HugeGraphServer (http://127.0.0.1:8080/graphs)....OK
##### 5.1.7 MySQL
+> ⚠️ **Deprecated**: This backend has been removed starting from HugeGraph 1.7.0. If you need to use it, please refer to version 1.5.x documentation.
+
Click to expand/collapse MySQL configuration and startup methods
@@ -579,12 +610,14 @@ In [3.1 Use Docker container](#31-use-docker-container-convenient-for-testdev),
##### 5.2.1 Uses Cassandra as storage
+> ⚠️ **Deprecated**: Cassandra backend has been removed starting from HugeGraph 1.7.0. If you need to use it, please refer to version 1.5.x documentation.
+
Click to expand/collapse Cassandra configuration and startup methods
When using Docker, we can use Cassandra as the backend storage. We highly recommend using docker-compose directly to manage both the server and Cassandra.
-The sample `docker-compose.yml` can be obtained on [GitHub](https://github.com/apache/incubator-hugegraph/blob/master/hugegraph-server/hugegraph-dist/docker/example/docker-compose-cassandra.yml), and you can start it with `docker-compose up -d`. (If using Cassandra 4.0 as the backend storage, it takes approximately two minutes to initialize. Please be patient.)
+The sample `docker-compose.yml` can be obtained on [GitHub](https://github.com/apache/hugegraph/blob/master/hugegraph-server/hugegraph-dist/docker/example/docker-compose-cassandra.yml), and you can start it with `docker-compose up -d`. (If using Cassandra 4.0 as the backend storage, it takes approximately two minutes to initialize. Please be patient.)
```yaml
version: "3"
@@ -647,17 +680,17 @@ Set the environment variable `PRELOAD=true` when starting Docker to load data du
1. Use `docker run`
- Use `docker run -itd --name=server -p 8080:8080 -e PRELOAD=true hugegraph/hugegraph:1.5.0`
+ Use `docker run -itd --name=server -p 8080:8080 -e PRELOAD=true hugegraph/hugegraph:1.7.0`
2. Use `docker-compose`
- Create `docker-compose.yml` as following. We should set the environment variable `PRELOAD=true`. [`example.groovy`](https://github.com/apache/incubator-hugegraph/blob/master/hugegraph-server/hugegraph-dist/src/assembly/static/scripts/example.groovy) is a predefined script to preload the sample data. If needed, we can mount a new `example.groovy` to change the preload data.
+ Create `docker-compose.yml` as following. We should set the environment variable `PRELOAD=true`. [`example.groovy`](https://github.com/apache/hugegraph/blob/master/hugegraph-server/hugegraph-dist/src/assembly/static/scripts/example.groovy) is a predefined script to preload the sample data. If needed, we can mount a new `example.groovy` to change the preload data.
```yaml
version: '3'
services:
server:
- image: hugegraph/hugegraph:1.5.0
+ image: hugegraph/hugegraph:1.7.0
container_name: server
environment:
- PRELOAD=true
diff --git a/content/en/docs/quickstart/toolchain/_index.md b/content/en/docs/quickstart/toolchain/_index.md
index 5c7508230..c2030ea39 100644
--- a/content/en/docs/quickstart/toolchain/_index.md
+++ b/content/en/docs/quickstart/toolchain/_index.md
@@ -6,8 +6,8 @@ weight: 2
> **Testing Guide**: For running toolchain tests locally, please refer to [HugeGraph Toolchain Local Testing Guide](/docs/guides/toolchain-local-test)
-## 🚀 Best practice: Prioritize using DeepWiki intelligent documents
+> DeepWiki provides real-time updated project documentation with more comprehensive and accurate content, suitable for quickly understanding the latest project information.
+>
+> 📖 [https://deepwiki.com/apache/hugegraph-toolchain](https://deepwiki.com/apache/hugegraph-toolchain)
-> To address the issue of outdated static documents, we provide DeepWiki with **real-time updates and more comprehensive content**. It is equivalent to an expert with the latest knowledge of the project, which is very suitable for **all developers** to read and consult before starting the project.
-
-**👉 Strongly recommend visiting and having a conversation with:** [**incubator-hugegraph-toolchain**](https://deepwiki.com/apache/incubator-hugegraph-toolchain)
\ No newline at end of file
+**GitHub Access:** [https://github.com/apache/hugegraph-toolchain](https://github.com/apache/hugegraph-toolchain)
\ No newline at end of file
diff --git a/content/en/docs/quickstart/toolchain/hugegraph-hubble.md b/content/en/docs/quickstart/toolchain/hugegraph-hubble.md
index d73403f0c..2746ea771 100644
--- a/content/en/docs/quickstart/toolchain/hugegraph-hubble.md
+++ b/content/en/docs/quickstart/toolchain/hugegraph-hubble.md
@@ -101,7 +101,7 @@ services:
`hubble` is in the `toolchain` project. First, download the binary tar tarball
```bash
-wget https://downloads.apache.org/incubator/hugegraph/{version}/apache-hugegraph-toolchain-incubating-{version}.tar.gz
+wget https://downloads.apache.org/hugegraph/{version}/apache-hugegraph-toolchain-incubating-{version}.tar.gz
tar -xvf apache-hugegraph-toolchain-incubating-{version}.tar.gz
cd apache-hugegraph-toolchain-incubating-{version}.tar.gz/apache-hugegraph-hubble-incubating-{version}
```
@@ -557,3 +557,26 @@ There is no visual OLAP algorithm execution on Hubble. You can call the RESTful
+
+### 5 Configuration
+
+HugeGraph-Hubble can be configured through the `conf/hugegraph-hubble.properties` file.
+
+#### 5.1 Server Configuration
+
+| Configuration Item | Default Value | Description |
+|-------------------|---------------|-------------|
+| `hubble.host` | `0.0.0.0` | The address that Hubble service binds to |
+| `hubble.port` | `8088` | The port that Hubble service listens on |
+
+#### 5.2 Gremlin Query Limits
+
+These settings control query result limits to prevent memory issues:
+
+| Configuration Item | Default Value | Description |
+|-------------------|---------------|-------------|
+| `gremlin.suffix_limit` | `250` | Maximum query suffix length |
+| `gremlin.vertex_degree_limit` | `100` | Maximum vertex degree to display |
+| `gremlin.edges_total_limit` | `500` | Maximum number of edges returned |
+| `gremlin.batch_query_ids` | `100` | ID batch query size |
+
diff --git a/content/en/docs/quickstart/toolchain/hugegraph-loader.md b/content/en/docs/quickstart/toolchain/hugegraph-loader.md
index 6d14d05ae..a079db847 100644
--- a/content/en/docs/quickstart/toolchain/hugegraph-loader.md
+++ b/content/en/docs/quickstart/toolchain/hugegraph-loader.md
@@ -65,7 +65,7 @@ The specific data loading process can be referenced under [4.5 User Docker to lo
Download the latest version of the HugeGraph-Toolchain release package:
```bash
-wget https://downloads.apache.org/incubator/hugegraph/{version}/apache-hugegraph-toolchain-incubating-{version}.tar.gz
+wget https://downloads.apache.org/hugegraph/{version}/apache-hugegraph-toolchain-incubating-{version}.tar.gz
tar zxf *hugegraph*.tar.gz
```
@@ -78,7 +78,7 @@ Clone the latest version of HugeGraph-Loader source package:
git clone https://github.com/apache/hugegraph-toolchain.git
# 2. get from direct (e.g. here is 1.0.0, please choose the latest version)
-wget https://downloads.apache.org/incubator/hugegraph/{version}/apache-hugegraph-toolchain-incubating-{version}-src.tar.gz
+wget https://downloads.apache.org/hugegraph/{version}/apache-hugegraph-toolchain-incubating-{version}-src.tar.gz
```
@@ -794,47 +794,71 @@ The import process is controlled by commands submitted by the user, and the user
##### 3.4.1 Parameter description
-| Parameter | Default value | Required or not | Description |
-|---------------------------|---------------|-----------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `-f` or `--file` | | Y | Path to configure script |
-| `-g` or `--graph` | | Y | Graph name |
-| `-gs` or `--graphspace` | DEFAULT | | Graph space name |
-| `-s` or `--schema` | | Y | Schema file path |
-| `-h` or `--host` or `-i` | localhost | | Address of HugeGraphServer |
-| `-p` or `--port` | 8080 | | Port number of HugeGraphServer |
-| `--username` | null | | When HugeGraphServer enables permission authentication, the username of the current graph |
-| `--password` | null | | When HugeGraphServer enables permission authentication, the password of the current graph |
-| `--create-graph` | false | | Whether to automatically create the graph if it does not exist |
-| `--token` | null | | When HugeGraphServer has enabled authorization authentication, the token of the current graph |
-| `--protocol` | http | | Protocol for sending requests to the server, optional http or https |
-| `--pd-peers` | | | PD service node addresses |
-| `--pd-token` | | | Token for accessing PD service |
-| `--meta-endpoints` | | | Meta information storage service addresses |
-| `--direct` | false | | Whether to directly connect to HugeGraph-Store |
-| `--route-type` | NODE_PORT | | Route selection method (optional values: NODE_PORT / DDS / BOTH) |
-| `--cluster` | hg | | Cluster name |
-| `--trust-store-file` | | | When the request protocol is https, the client's certificate file path |
-| `--trust-store-password` | | | When the request protocol is https, the client certificate password |
-| `--clear-all-data` | false | | Whether to clear the original data on the server before importing data |
-| `--clear-timeout` | 240 | | Timeout for clearing the original data on the server before importing data |
-| `--incremental-mode` | false | | Whether to use the breakpoint resume mode; only input sources FILE and HDFS support this mode. Enabling this mode allows starting the import from where the last import stopped |
-| `--failure-mode` | false | | When failure mode is true, previously failed data will be imported. Generally, the failed data file needs to be manually corrected and edited before re-importing |
-| `--batch-insert-threads` | CPUs | | Batch insert thread pool size (CPUs is the number of **logical cores** available to the current OS) |
-| `--single-insert-threads` | 8 | | Size of single insert thread pool |
-| `--max-conn` | 4 * CPUs | | The maximum number of HTTP connections between HugeClient and HugeGraphServer; it is recommended to adjust this when **adjusting threads** |
-| `--max-conn-per-route` | 2 * CPUs | | The maximum number of HTTP connections for each route between HugeClient and HugeGraphServer; it is recommended to adjust this item when **adjusting threads** |
-| `--batch-size` | 500 | | The number of data items in each batch when importing data |
-| `--max-parse-errors` | 1 | | The maximum number of data parsing errors allowed (per line); the program exits when this value is reached |
-| `--max-insert-errors` | 500 | | The maximum number of data insertion errors allowed (per row); the program exits when this value is reached |
-| `--timeout` | 60 | | Timeout (seconds) for insert result return |
-| `--shutdown-timeout` | 10 | | Waiting time for multithreading to stop (seconds) |
-| `--retry-times` | 0 | | Number of retries when a specific exception occurs |
-| `--retry-interval` | 10 | | Interval before retry (seconds) |
-| `--check-vertex` | false | | Whether to check if the vertices connected by the edge exist when inserting the edge |
-| `--print-progress` | true | | Whether to print the number of imported items in real time on the console |
-| `--dry-run` | false | | Enable this mode to only parse data without importing; usually used for testing |
-| `--help` | false | | Print help information |
-
+| Parameter | Default value | Required or not | Description |
+|------------------------------------------|---------------|-----------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `-f` or `--file` | | Y | Path to configure script |
+| `-g` or `--graph` | | Y | Graph name |
+| `--graphspace` | DEFAULT | | Graph space name |
+| `-s` or `--schema` | | Y | Schema file path |
+| `-h` or `--host` or `-i` | localhost | | Address of HugeGraphServer |
+| `-p` or `--port` | 8080 | | Port number of HugeGraphServer |
+| `--username` | null | | When HugeGraphServer enables permission authentication, the username of the current graph |
+| `--password` | null | | When HugeGraphServer enables permission authentication, the password of the current graph |
+| `--create-graph` | false | | Whether to automatically create the graph if it does not exist |
+| `--token` | null | | When HugeGraphServer has enabled authorization authentication, the token of the current graph |
+| `--protocol` | http | | Protocol for sending requests to the server, optional http or https |
+| `--pd-peers` | | | PD service node addresses |
+| `--pd-token` | | | Token for accessing PD service |
+| `--meta-endpoints` | | | Meta information storage service addresses |
+| `--direct` | false | | Whether to directly connect to HugeGraph-Store |
+| `--route-type` | NODE_PORT | | Route selection method (optional values: NODE_PORT / DDS / BOTH) |
+| `--cluster` | hg | | Cluster name |
+| `--trust-store-file` | | | When the request protocol is https, the client's certificate file path |
+| `--trust-store-password` | | | When the request protocol is https, the client certificate password |
+| `--clear-all-data` | false | | Whether to clear the original data on the server before importing data |
+| `--clear-timeout` | 240 | | Timeout for clearing the original data on the server before importing data |
+| `--incremental-mode` | false | | Whether to use the breakpoint resume mode; only input sources FILE and HDFS support this mode. Enabling this mode allows starting the import from where the last import stopped |
+| `--failure-mode` | false | | When failure mode is true, previously failed data will be imported. Generally, the failed data file needs to be manually corrected and edited before re-importing |
+| `--batch-insert-threads` | CPUs | | Batch insert thread pool size (CPUs is the number of **logical cores** available to the current OS) |
+| `--single-insert-threads` | 8 | | Size of single insert thread pool |
+| `--max-conn` | 4 * CPUs | | The maximum number of HTTP connections between HugeClient and HugeGraphServer; it is recommended to adjust this when **adjusting threads** |
+| `--max-conn-per-route` | 2 * CPUs | | The maximum number of HTTP connections for each route between HugeClient and HugeGraphServer; it is recommended to adjust this item when **adjusting threads** |
+| `--batch-size` | 500 | | The number of data items in each batch when importing data |
+| `--max-parse-errors` | 1 | | The maximum number of data parsing errors allowed (per line); the program exits when this value is reached |
+| `--max-insert-errors` | 500 | | The maximum number of data insertion errors allowed (per row); the program exits when this value is reached |
+| `--timeout` | 60 | | Timeout (seconds) for insert result return |
+| `--shutdown-timeout` | 10 | | Waiting time for multithreading to stop (seconds) |
+| `--retry-times` | 0 | | Number of retries when a specific exception occurs |
+| `--retry-interval` | 10 | | Interval before retry (seconds) |
+| `--check-vertex` | false | | Whether to check if the vertices connected by the edge exist when inserting the edge |
+| `--print-progress` | true | | Whether to print the number of imported items in real time on the console |
+| `--dry-run` | false | | Enable this mode to only parse data without importing; usually used for testing |
+| `--help` or `-help` | false | | Print help information |
+| `--parser-threads` or `--parallel-count` | max(2,CPUS) | | Parallel read pipelines for data files |
+| `--start-file` | 0 | | Start file index for partial loading |
+| `--end-file` | -1 | | End file index for partial loading |
+| `--scatter-sources` | false | | Scatter multiple sources for I/O optimization |
+| `--cdc-flush-interval` | 30000 | | The flush interval for Flink CDC |
+| `--cdc-sink-parallelism` | 1 | | The sink parallelism for Flink CDC |
+| `--max-read-errors` | 1 | | The maximum number of read error lines before exiting |
+| `--max-read-lines` | -1L | | The maximum number of read lines, task stops when reached |
+| `--test-mode` | false | | Whether the loader works in test mode |
+| `--use-prefilter` | false | | Whether to filter vertex in advance |
+| `--short-id` | | | Mapping customized ID to shorter ID |
+| `--vertex-edge-limit` | -1L | | The maximum number of vertex's edges |
+| `--sink-type` | true | | Sink to different storage type switch |
+| `--vertex-partitions` | 64 | | The number of partitions of the HBase vertex table |
+| `--edge-partitions` | 64 | | The number of partitions of the HBase edge table |
+| `--vertex-table-name` | | | HBase vertex table name |
+| `--edge-table-name` | | | HBase edge table name |
+| `--hbase-zk-quorum` | | | HBase ZooKeeper quorum |
+| `--hbase-zk-port` | | | HBase ZooKeeper port |
+| `--hbase-zk-parent` | | | HBase ZooKeeper parent |
+| `--restore` | false | | Set graph mode to RESTORING |
+| `--backend` | hstore | | The backend store type when creating graph if not exists |
+| `--serializer` | binary | | The serializer type when creating graph if not exists |
+| `--scheduler-type` | distributed | | The task scheduler type when creating graph if not exists |
+| `--batch-failure-fallback` | true | | Whether to fallback to single insert when batch insert fails |##### 3.4.2 Breakpoint Continuation Mode
##### 3.4.2 Breakpoint Continuation Mode
Usually, the Loader task takes a long time to execute. If the import interrupt process exits for some reason, and next time you want to continue the import from the interrupted point, this is the scenario of using breakpoint continuation.
diff --git a/content/en/docs/quickstart/toolchain/hugegraph-spark-connector.md b/content/en/docs/quickstart/toolchain/hugegraph-spark-connector.md
new file mode 100644
index 000000000..fb7494efa
--- /dev/null
+++ b/content/en/docs/quickstart/toolchain/hugegraph-spark-connector.md
@@ -0,0 +1,182 @@
+---
+title: "HugeGraph-Spark-Connector Quick Start"
+linkTitle: "Read/Write Graph Data with Spark Connector"
+weight: 4
+---
+
+### 1 HugeGraph-Spark-Connector Overview
+
+HugeGraph-Spark-Connector is a Spark connector application for reading and writing HugeGraph data in Spark standard format.
+
+### 2 Environment Requirements
+
+- Java 8+
+- Maven 3.6+
+- Spark 3.x
+- Scala 2.12
+
+### 3 Building
+
+#### 3.1 Build without executing tests
+
+```bash
+mvn clean package -DskipTests
+```
+
+#### 3.2 Build with default tests
+
+```bash
+mvn clean package
+```
+
+### 4 Usage
+
+First add the dependency in your pom.xml:
+
+```xml
+
+ org.apache.hugegraph
+ hugegraph-spark-connector
+ ${revision}
+
+```
+
+#### 4.1 Schema Definition Example
+
+If we have a graph, the schema is defined as follows:
+
+```groovy
+schema.propertyKey("name").asText().ifNotExist().create()
+schema.propertyKey("age").asInt().ifNotExist().create()
+schema.propertyKey("city").asText().ifNotExist().create()
+schema.propertyKey("weight").asDouble().ifNotExist().create()
+schema.propertyKey("lang").asText().ifNotExist().create()
+schema.propertyKey("date").asText().ifNotExist().create()
+schema.propertyKey("price").asDouble().ifNotExist().create()
+
+schema.vertexLabel("person")
+ .properties("name", "age", "city")
+ .useCustomizeStringId()
+ .nullableKeys("age", "city")
+ .ifNotExist()
+ .create()
+
+schema.vertexLabel("software")
+ .properties("name", "lang", "price")
+ .primaryKeys("name")
+ .ifNotExist()
+ .create()
+
+schema.edgeLabel("knows")
+ .sourceLabel("person")
+ .targetLabel("person")
+ .properties("date", "weight")
+ .ifNotExist()
+ .create()
+
+schema.edgeLabel("created")
+ .sourceLabel("person")
+ .targetLabel("software")
+ .properties("date", "weight")
+ .ifNotExist()
+ .create()
+```
+
+#### 4.2 Vertex Sink (Scala)
+
+```scala
+val df = sparkSession.createDataFrame(Seq(
+ Tuple3("marko", 29, "Beijing"),
+ Tuple3("vadas", 27, "HongKong"),
+ Tuple3("Josh", 32, "Beijing"),
+ Tuple3("peter", 35, "ShangHai"),
+ Tuple3("li,nary", 26, "Wu,han"),
+ Tuple3("Bob", 18, "HangZhou"),
+)) toDF("name", "age", "city")
+
+df.show()
+
+df.write
+ .format("org.apache.hugegraph.spark.connector.DataSource")
+ .option("host", "127.0.0.1")
+ .option("port", "8080")
+ .option("graph", "hugegraph")
+ .option("data-type", "vertex")
+ .option("label", "person")
+ .option("id", "name")
+ .option("batch-size", 2)
+ .mode(SaveMode.Overwrite)
+ .save()
+```
+
+#### 4.3 Edge Sink (Scala)
+
+```scala
+val df = sparkSession.createDataFrame(Seq(
+ Tuple4("marko", "vadas", "20160110", 0.5),
+ Tuple4("peter", "Josh", "20230801", 1.0),
+ Tuple4("peter", "li,nary", "20130220", 2.0)
+)).toDF("source", "target", "date", "weight")
+
+df.show()
+
+df.write
+ .format("org.apache.hugegraph.spark.connector.DataSource")
+ .option("host", "127.0.0.1")
+ .option("port", "8080")
+ .option("graph", "hugegraph")
+ .option("data-type", "edge")
+ .option("label", "knows")
+ .option("source-name", "source")
+ .option("target-name", "target")
+ .option("batch-size", 2)
+ .mode(SaveMode.Overwrite)
+ .save()
+```
+
+### 5 Configuration Parameters
+
+#### 5.1 Client Configs
+
+Client Configs are used to configure hugegraph-client.
+
+| Parameter | Default Value | Description |
+|----------------------|---------------|----------------------------------------------------------------------------------------------|
+| `host` | `localhost` | Address of HugeGraphServer |
+| `port` | `8080` | Port of HugeGraphServer |
+| `graph` | `hugegraph` | Graph space name |
+| `protocol` | `http` | Protocol for sending requests to the server, optional `http` or `https` |
+| `username` | `null` | Username of the current graph when HugeGraphServer enables permission authentication |
+| `token` | `null` | Token of the current graph when HugeGraphServer has enabled authorization authentication |
+| `timeout` | `60` | Timeout (seconds) for inserting results to return |
+| `max-conn` | `CPUS * 4` | The maximum number of HTTP connections between HugeClient and HugeGraphServer |
+| `max-conn-per-route` | `CPUS * 2` | The maximum number of HTTP connections for each route between HugeClient and HugeGraphServer |
+| `trust-store-file` | `null` | The client's certificate file path when the request protocol is https |
+| `trust-store-token` | `null` | The client's certificate password when the request protocol is https |
+
+#### 5.2 Graph Data Configs
+
+Graph Data Configs are used to set graph space configuration.
+
+| Parameter | Default Value | Description |
+|-------------------|---------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `data-type` | | Graph data type, must be `vertex` or `edge` |
+| `label` | | Label to which the vertex/edge data to be imported belongs |
+| `id` | | Specify a column as the id column of the vertex. When the vertex id policy is CUSTOMIZE, it is required; when the id policy is PRIMARY_KEY, it must be empty |
+| `source-name` | | Select certain columns of the input source as the id column of source vertex. When the id policy of the source vertex is CUSTOMIZE, a certain column must be specified as the id column of the vertex; when the id policy of the source vertex is PRIMARY_KEY, one or more columns must be specified for splicing the id of the generated vertex, that is, no matter which id strategy is used, this item is required |
+| `target-name` | | Specify certain columns as the id columns of target vertex, similar to source-name |
+| `selected-fields` | | Select some columns to insert, other unselected ones are not inserted, cannot exist at the same time as ignored-fields |
+| `ignored-fields` | | Ignore some columns so that they do not participate in insertion, cannot exist at the same time as selected-fields |
+| `batch-size` | `500` | The number of data items in each batch when importing data |
+
+#### 5.3 Common Configs
+
+Common Configs contains some common configurations.
+
+| Parameter | Default Value | Description |
+|-------------|---------------|---------------------------------------------------------------------------------|
+| `delimiter` | `,` | Separator of `source-name`, `target-name`, `selected-fields` or `ignored-fields` |
+
+### 6 License
+
+The same as HugeGraph, hugegraph-spark-connector is also licensed under Apache 2.0 License.
diff --git a/content/en/docs/quickstart/toolchain/hugegraph-tools.md b/content/en/docs/quickstart/toolchain/hugegraph-tools.md
index c55199f8e..3ebb3543e 100644
--- a/content/en/docs/quickstart/toolchain/hugegraph-tools.md
+++ b/content/en/docs/quickstart/toolchain/hugegraph-tools.md
@@ -22,7 +22,7 @@ There are two ways to get HugeGraph-Tools:
Download the latest version of the HugeGraph-Toolchain package:
```bash
-wget https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0.tar.gz
+wget https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0.tar.gz
tar zxf *hugegraph*.tar.gz
```
@@ -36,7 +36,7 @@ Download the latest version of the HugeGraph-Tools source package:
git clone https://github.com/apache/hugegraph-toolchain.git
# 2. get from direct (e.g. here is 1.0.0, please choose the latest version)
-wget https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0-src.tar.gz
+wget https://downloads.apache.org/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0-src.tar.gz
```
Compile and generate tar package:
@@ -55,11 +55,12 @@ Generate tar package hugegraph-tools-${version}.tar.gz
After decompression, enter the hugegraph-tools directory, you can use `bin/hugegraph` or `bin/hugegraph help` to view the usage information. mainly divided:
-- Graph management Type,graph-mode-set、graph-mode-get、graph-list、graph-get and graph-clear
-- Asynchronous task management Type,task-list、task-get、task-delete、task-cancel and task-clear
-- Gremlin Type,gremlin-execute and gremlin-schedule
-- Backup/Restore Type,backup、restore、migrate、schedule-backup and dump
-- Install the deployment Type,deploy、clear、start-all and stop-all
+- Graph management type, graph-mode-set, graph-mode-get, graph-list, graph-get, graph-clear, graph-create, graph-clone and graph-drop
+- Asynchronous task management type, task-list, task-get, task-delete, task-cancel and task-clear
+- Gremlin type, gremlin-execute and gremlin-schedule
+- Backup/Restore type, backup, restore, migrate, schedule-backup and dump
+- Authentication data backup/restore type, auth-backup and auth-restore
+- Install deployment type, deploy, clear, start-all and stop-all
```bash
Usage: hugegraph [options] [command] [command options]
@@ -105,15 +106,23 @@ Another way is to set the environment variable in the bin/hugegraph script:
#export HUGEGRAPH_TRUST_STORE_PASSWORD=
```
-##### 3.3 Graph Management Type,graph-mode-set、graph-mode-get、graph-list、graph-get and graph-clear
+##### 3.3 Graph Management Type, graph-mode-set, graph-mode-get, graph-list, graph-get, graph-clear, graph-create, graph-clone and graph-drop
-- graph-mode-set,set graph restore mode
+- graph-mode-set, set graph restore mode
- --graph-mode or -m, required, specifies the mode to be set, legal values include [NONE, RESTORING, MERGING, LOADING]
-- graph-mode-get,get graph restore mode
-- graph-list,list all graphs in a HugeGraph-Server
-- graph-get,get a graph and its storage backend type
-- graph-clear,clear all schema and data of a graph
- - --confirm-message Or -c, required, delete confirmation information, manual input is required, double confirmation to prevent accidental deletion, "I'm sure to delete all data", including double quotes
+- graph-mode-get, get graph restore mode
+- graph-list, list all graphs in a HugeGraph-Server
+- graph-get, get a graph and its storage backend type
+- graph-clear, clear all schema and data of a graph
+ - --confirm-message or -c, required, delete confirmation information, manual input is required, double confirmation to prevent accidental deletion, "I'm sure to delete all data", including double quotes
+- graph-create, create a new graph with configuration file
+ - --name or -n, optional, the name of the new graph, default is hugegraph
+ - --file or -f, required, the path to the graph configuration file
+- graph-clone, clone an existing graph
+ - --name or -n, optional, the name of the cloned graph, default is hugegraph
+ - --clone-graph-name, optional, the name of the source graph to clone from, default is hugegraph
+- graph-drop, drop a graph (different from graph-clear, this completely removes the graph)
+ - --confirm-message or -c, required, confirmation message "I'm sure to drop the graph", including double quotes
> When you need to restore the backup graph to a new graph, you need to set the graph mode to RESTORING mode; when you need to merge the backup graph into an existing graph, you need to first set the graph mode to MERGING model.
@@ -159,6 +168,7 @@ Another way is to set the environment variable in the bin/hugegraph script:
- --huge-types or -t, the data types to be backed up, separated by commas, the optional value is 'all' or a combination of one or more [vertex, edge, vertex_label, edge_label, property_key, index_label], 'all' Represents all 6 types, namely vertices, edges and all schemas
- --log or -l, specify the log directory, the default is the current directory
- --retry, specify the number of failed retries, the default is 3
+ - --thread-num or -T, the number of threads to use, default is Math.min(10, Math.max(4, CPUs / 2))
- --split-size or -s, specifies the size of splitting vertices or edges when backing up, the default is 1048576
- -D, use the mode of -Dkey=value to specify dynamic parameters, and specify HDFS configuration items when backing up data to HDFS, for example: -Dfs.default.name=hdfs://localhost:9000
- restore, restore schema or data stored in JSON format to a new graph (RESTORING mode) or merge into an existing graph (MERGING mode)
@@ -167,6 +177,7 @@ Another way is to set the environment variable in the bin/hugegraph script:
- --huge-types or -t, data types to restore, separated by commas, optional value is 'all' or a combination of one or more [vertex, edge, vertex_label, edge_label, property_key, index_label], 'all' Represents all 6 types, namely vertices, edges and all schemas
- --log or -l, specify the log directory, the default is the current directory
- --retry, specify the number of failed retries, the default is 3
+ - --thread-num or -T, the number of threads to use, default is Math.min(10, Math.max(4, CPUs / 2))
- -D, use the mode of -Dkey=value to specify dynamic parameters, which are used to specify HDFS configuration items when restoring graphs from HDFS, for example: -Dfs.default.name=hdfs://localhost:9000
> restore command can be used only if --format is executed as backup for json
- migrate, migrate the currently connected graph to another HugeGraphServer
@@ -200,7 +211,26 @@ Another way is to set the environment variable in the bin/hugegraph script:
- --split-size or -s, specifies the size of splitting vertices or edges when backing up, the default is 1048576
- -D, use the mode of -Dkey=value to specify dynamic parameters, and specify HDFS configuration items when backing up data to HDFS, for example: -Dfs.default.name=hdfs://localhost:9000
-##### 3.7 Install the deployment type
+##### 3.7 Authentication data backup/restore type
+
+- auth-backup, backup authentication data to a specified directory
+ - --types or -t, types of authentication data to back up, separated by commas, optional value is 'all' or a combination of one or more [user, group, target, belong, access], 'all' represents all 5 types
+ - --directory or -d, directory to store backup data, defaults to current directory
+ - --log or -l, specify the log directory, the default is the current directory
+ - --retry, specify the number of failed retries, the default is 3
+ - --thread-num or -T, the number of threads to use, default is Math.min(10, Math.max(4, CPUs / 2))
+ - -D, use the mode of -Dkey=value to specify dynamic parameters, and specify HDFS configuration items when backing up data to HDFS, for example: -Dfs.default.name=hdfs://localhost:9000
+- auth-restore, restore authentication data from a specified directory
+ - --types or -t, types of authentication data to restore, separated by commas, optional value is 'all' or a combination of one or more [user, group, target, belong, access], 'all' represents all 5 types
+ - --directory or -d, directory where backup data is stored, defaults to current directory
+ - --log or -l, specify the log directory, the default is the current directory
+ - --retry, specify the number of failed retries, the default is 3
+ - --thread-num or -T, the number of threads to use, default is Math.min(10, Math.max(4, CPUs / 2))
+ - --strategy, conflict handling strategy, optional values are [stop, ignore], default is stop. stop means stop restoring when encountering conflicts, ignore means ignore conflicts and continue restoring
+ - --init-password, initial password to set when restoring users, required when restoring user data
+ - -D, use the mode of -Dkey=value to specify dynamic parameters, which are used to specify HDFS configuration items when restoring data from HDFS, for example: -Dfs.default.name=hdfs://localhost:9000
+
+##### 3.8 Install the deployment type
- deploy, one-click download, install and start HugeGraph-Server and HugeGraph-Studio
- -v, required, specifies the version number of HugeGraph-Server and HugeGraph-Studio installed, the latest is 0.9
@@ -215,7 +245,7 @@ Another way is to set the environment variable in the bin/hugegraph script:
> There is an optional parameter -u in the deploy command. When provided, the specified download address will be used instead of the default download address to download the tar package, and the address will be written into the `~/hugegraph-download-url-prefix` file; if no address is specified later When -u and `~/hugegraph-download-url-prefix` are not specified, the tar package will be downloaded from the address specified by `~/hugegraph-download-url-prefix`; if there is neither -u nor `~/hugegraph-download-url-prefix`, it will be downloaded from the default download address
-##### 3.8 Specific command parameters
+##### 3.9 Specific command parameters
The specific parameters of each subcommand are as follows:
@@ -524,7 +554,7 @@ Usage: hugegraph [options] [command] [command options]
```
-##### 3.9 Specific command example
+##### 3.10 Specific command example
###### 1. gremlin statement
diff --git a/contribution.md b/contribution.md
index 9b59ab136..a0e662aad 100644
--- a/contribution.md
+++ b/contribution.md
@@ -1,10 +1,25 @@
-# How to help us (如何参与)
+# Contribution Guide - Detailed Reference
+
+> **快速开始请看 [README.md](./README.md)**,这里是详细的参考文档。
+
+## PR 检查清单
+
+提交 Pull Request 前请确认:
+
+- [ ] 本地构建并验证了修改效果
+- [ ] 同时更新了中文 (`content/cn/`) 和英文 (`content/en/`) 版本
+- [ ] PR 描述中包含修改前后的截图对比
+- [ ] 如有相关 Issue,已在 PR 中关联
+
+---
+
+## How to help us (如何参与)
1. 在本地 3 步快速构建官网环境,启动起来看下目前效果 (Auto reload)
2. 先 fork 仓库,然后基于 `master` 创建一个**新的**分支,修改完成后提交 PR ✅ (请在 PR 内**截图**对比一下修改**前后**的效果 & 简要说明,感谢)
3. 新增/修改网站/文档 (提供**中/英文**页面翻译,基本为 `markdown` 格式)
-Refer: 不熟悉 **github-pr** 流程的同学, 可参考[贡献流程](https://github.com/apache/incubator-hugegraph/blob/master/CONTRIBUTING.md)文档, 推荐使用 [github desktop](https://desktop.github.com/) 应用, 会简单方便许多~
+Refer: 不熟悉 **github-pr** 流程的同学, 可参考[贡献流程](https://github.com/apache/hugegraph/blob/master/CONTRIBUTING.md)文档, 推荐使用 [github desktop](https://desktop.github.com/) 应用, 会简单方便许多~
**PS:** 可以参考其他官网的[源码](https://www.docsy.dev/docs/examples), 方便快速了解 docsy 主题结构.
diff --git a/dist/README.md b/dist/README.md
index 0ccdb8ce4..1258e2a70 100644
--- a/dist/README.md
+++ b/dist/README.md
@@ -1,10 +1,10 @@
# Apache HugeGraph 发版验证脚本
-Apache HugeGraph (Incubating) 发布包的自动化验证脚本。
+Apache HugeGraph 发布包的自动化验证脚本。
## 概述
-`validate-release.sh` 脚本对 Apache HugeGraph 发布包进行全面验证,自动执行 [Apache 发布政策](https://www.apache.org/legal/release-policy.html) 和 [孵化器发布检查清单](https://cwiki.apache.org/confluence/display/INCUBATOR/Incubator+Release+Checklist) 要求的大部分检查。
+`validate-release.sh` 脚本对 Apache HugeGraph 发布包进行全面验证,自动执行 [Apache 发布政策](https://www.apache.org/legal/release-policy.html) 的核心要求。
## 功能特性
@@ -12,7 +12,7 @@ Apache HugeGraph (Incubating) 发布包的自动化验证脚本。
- ✅ **SHA512 和 GPG 签名验证** - 确保包的完整性和真实性
- ✅ **许可证合规性验证** - 检查禁止的 ASF Category X 和需要文档化的 Category B 许可证
- ✅ **详细的许可证错误报告** - 对 Category X 违规显示文件路径、许可证名称和上下文
-- ✅ **包内容验证** - 验证必需文件(LICENSE、NOTICE、DISCLAIMER)
+- ✅ **包内容验证** - 强校验 `LICENSE`、`NOTICE`
- ✅ **ASF 许可证头检查** - 验证所有源文件中的许可证头,支持第三方代码文档化
- ✅ **版本一致性验证** - 验证 Maven `` 属性与预期发布版本匹配
- ✅ **多语言项目支持** - 自动跳过 Python 项目(hugegraph-ai)的 Maven 版本检查
@@ -69,6 +69,7 @@ Apache HugeGraph (Incubating) 发布包的自动化验证脚本。
# 非交互模式(用于 CI/CD)
./validate-release.sh --non-interactive 1.7.0 pengjunzhi
+
```
### 命令行选项
@@ -77,6 +78,14 @@ Apache HugeGraph (Incubating) 发布包的自动化验证脚本。
- `--version`, `-v` - 显示脚本版本并退出
- `--non-interactive` - 无提示运行(用于 CI/CD 管道)
+### `deploy-release.sh`(本地快速拉起)
+
+该脚本用于从 `downloads.apache.org/hugegraph` 下载对应版本并快速启动 server/hubble。
+
+```bash
+./deploy-release.sh 1.7.0
+```
+
## 验证步骤
脚本执行以下 9 个验证步骤:
@@ -86,8 +95,8 @@ Apache HugeGraph (Incubating) 发布包的自动化验证脚本。
3. **导入并信任 GPG 密钥** - 导入 KEYS 文件并信任所有公钥
4. **验证 SHA512 和 GPG 签名** - 验证所有包的校验和和签名
5. **验证源码包** - 对源码包进行全面检查:
- - 包命名(包含 "incubating")
- - 必需文件(LICENSE、NOTICE、DISCLAIMER)
+ - 包命名(必须符合 `apache-hugegraph-*`)
+ - 必需文件(`LICENSE`、`NOTICE`)
- 许可证合规性(禁止 Category X,记录 Category B)
- 详细的许可证违规报告(文件路径、许可证名称、上下文)
- 无空文件或目录
@@ -110,7 +119,7 @@ Apache HugeGraph (Incubating) 发布包的自动化验证脚本。
```
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
- Apache HugeGraph Release Validation v2.0.0
+ Apache HugeGraph Release Validation v2.2.0
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Version: 1.7.0
@@ -287,7 +296,7 @@ export PATH=$JAVA_HOME/bin:$PATH
```bash
# 手动下载并导入 KEYS
-curl https://downloads.apache.org/incubator/hugegraph/KEYS > KEYS
+curl https://downloads.apache.org/hugegraph/KEYS > KEYS
gpg --import KEYS
# 信任特定密钥
@@ -341,7 +350,6 @@ grep "Step \[5/9\]" logs/validate-*.log
## 参考文档
- [Apache 发布政策](https://www.apache.org/legal/release-policy.html)
-- [孵化器发布检查清单](https://cwiki.apache.org/confluence/display/INCUBATOR/Incubator+Release+Checklist)
- [Apache 许可证分类](https://www.apache.org/legal/resolved.html)
- [HugeGraph 验证发布指南](../content/cn/docs/contribution-guidelines/validate-release.md)
@@ -349,5 +357,5 @@ grep "Step \[5/9\]" logs/validate-*.log
如果发现问题或有改进建议,请:
-1. 查看现有问题:https://github.com/apache/incubator-hugegraph-doc/issues
+1. 查看现有问题:https://github.com/apache/hugegraph-doc/issues
2. 提交新问题或 pull request
diff --git a/dist/deploy-release.sh b/dist/deploy-release.sh
index b3de9a0b2..e49162ec0 100755
--- a/dist/deploy-release.sh
+++ b/dist/deploy-release.sh
@@ -15,35 +15,126 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Download hugegraph-server and hugegraph-toolcahain, then start them:
-# hugegraph-server and hugegraph-hubble
-
-set -e
-
-RELEASE_VERSION=$1 # like 1.2.0
-RELEASE_VERSION=${RELEASE_VERSION:?"Please input the release version, like 1.2.0"}
-
-DOWNLOAD_URL_PREFIX="https://downloads.apache.org/incubator/hugegraph"
-SERVER_TAR="apache-hugegraph-incubating-${RELEASE_VERSION}.tar.gz"
-TOOLCHAIN_TAR="apache-hugegraph-toolchain-incubating-${RELEASE_VERSION}.tar.gz"
-
-echo "download hugegraph tars from $DOWNLOAD_URL_PREFIX..."
-if [[ ! -f "${SERVER_TAR}" ]]; then
- wget "${DOWNLOAD_URL_PREFIX}/${RELEASE_VERSION}/${SERVER_TAR}"
- tar -xzvf "${SERVER_TAR}"
-fi
-if [[ ! -f "${TOOLCHAIN_TAR}" ]]; then
- wget "${DOWNLOAD_URL_PREFIX}/${RELEASE_VERSION}/${TOOLCHAIN_TAR}"
- tar -xzvf ${TOOLCHAIN_TAR}
-fi
-
-echo "start hugegraph-server..."
-cd ./*hugegraph-incubating*${RELEASE_VERSION}
-bin/init-store.sh
-sleep 3
-bin/start-hugegraph.sh
-cd ..
-
-echo "start hugegraph-hubble..."
-cd ./*toolchain*${RELEASE_VERSION}/*hubble*${RELEASE_VERSION}
-bin/start-hubble.sh
+set -euo pipefail
+
+DOWNLOAD_URL_PREFIX="https://downloads.apache.org/hugegraph"
+
+usage() {
+ cat <
+
+Examples:
+ $(basename "$0") 1.7.0
+USAGE
+}
+
+log() {
+ echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*"
+}
+
+download_tarball() {
+ local tar_name=$1
+ local url=""
+ if [[ -f "$tar_name" ]]; then
+ log "Reuse local tarball: $tar_name"
+ return 0
+ fi
+
+ url="${DOWNLOAD_URL_PREFIX}/${RELEASE_VERSION}/${tar_name}"
+ if ! wget --spider -q "$url"; then
+ echo "Cannot find tarball: $url" >&2
+ return 1
+ fi
+
+ log "Download $url"
+ wget "$url"
+}
+
+extract_tar_if_needed() {
+ local tar_name=$1
+ local top_dir
+ top_dir=$(tar -tzf "$tar_name" | head -n1 | cut -d'/' -f1)
+ if [[ -n "$top_dir" && -d "$top_dir" ]]; then
+ log "Skip extract, directory already exists: $top_dir"
+ else
+ log "Extract $tar_name"
+ tar -xzvf "$tar_name"
+ fi
+}
+
+find_server_dir() {
+ find . -maxdepth 3 -type d -name "*hugegraph-server*${RELEASE_VERSION}*" | head -n1
+}
+
+find_hubble_dir() {
+ find . -maxdepth 4 -type d -name "*hubble*${RELEASE_VERSION}*" | head -n1
+}
+
+parse_args() {
+ while [[ $# -gt 0 ]]; do
+ case "$1" in
+ --help|-h)
+ usage
+ exit 0
+ ;;
+ --*)
+ echo "Unknown option: $1" >&2
+ usage
+ exit 1
+ ;;
+ *)
+ break
+ ;;
+ esac
+ done
+
+ RELEASE_VERSION=${1:-}
+ if [[ -z "$RELEASE_VERSION" ]]; then
+ echo "Please provide release version, e.g. 1.7.0" >&2
+ usage
+ exit 1
+ fi
+}
+
+main() {
+ parse_args "$@"
+ log "Release version: $RELEASE_VERSION"
+ log "Download prefix: $DOWNLOAD_URL_PREFIX"
+
+ local server_tar="apache-hugegraph-${RELEASE_VERSION}.tar.gz"
+ local toolchain_tar="apache-hugegraph-toolchain-${RELEASE_VERSION}.tar.gz"
+
+ download_tarball "$server_tar"
+ download_tarball "$toolchain_tar"
+
+ extract_tar_if_needed "${server_tar}"
+ extract_tar_if_needed "${toolchain_tar}"
+
+ local server_dir
+ server_dir=$(find_server_dir)
+ if [[ -z "$server_dir" ]]; then
+ echo "Cannot find hugegraph-server directory for version $RELEASE_VERSION" >&2
+ exit 1
+ fi
+
+ local hubble_dir
+ hubble_dir=$(find_hubble_dir)
+ if [[ -z "$hubble_dir" ]]; then
+ echo "Cannot find hugegraph-hubble directory for version $RELEASE_VERSION" >&2
+ exit 1
+ fi
+
+ log "Start hugegraph-server from $server_dir"
+ pushd "$server_dir" >/dev/null
+ bin/init-store.sh
+ sleep 3
+ bin/start-hugegraph.sh
+ popd >/dev/null
+
+ log "Start hugegraph-hubble from $hubble_dir"
+ pushd "$hubble_dir" >/dev/null
+ bin/start-hubble.sh
+ popd >/dev/null
+}
+
+main "$@"
diff --git a/dist/validate-links.sh b/dist/validate-links.sh
new file mode 100755
index 000000000..23e9069da
--- /dev/null
+++ b/dist/validate-links.sh
@@ -0,0 +1,289 @@
+#!/bin/bash
+set -o errexit
+set -o pipefail
+
+CONTENT_DIR="content"
+EXIT_CODE=0
+
+VERBOSE="${VERBOSE:-0}"
+
+log_verbose() {
+ if [[ "$VERBOSE" == "1" ]]; then
+ echo "Info: $*"
+ fi
+}
+
+ASSET_EXTENSIONS_REGEX='png|jpg|jpeg|svg|gif|webp|avif|ico|xml|yaml|yml|json|css|js|pdf|zip|tar\.gz|woff|woff2|ttf|eot|mp4|webm'
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" || exit 1
+REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" || exit 1
+CONTENT_ROOT="$(cd "$REPO_ROOT/$CONTENT_DIR" && pwd)" || exit 1
+
+if [[ ! -d "$CONTENT_ROOT" ]]; then
+ echo "Error: content directory not found. Run from repository root."
+ exit 1
+fi
+
+normalize_link() {
+ local link="$1"
+
+ # Decode common URL-encoded characters explicitly
+ link="${link//%20/ }" # space
+ link="${link//%23/#}" # hash
+ link="${link//%2F/\/}" # forward slash
+
+ # Generic percent-decoding for remaining cases
+ link="${link//%/\\x}"
+ link="$(printf '%b' "$link")"
+
+ link="${link%%#*}"
+ link="${link%%\?*}"
+
+ if [[ "$link" != "/" ]]; then
+ link="${link%/}"
+ fi
+
+ printf "%s" "$link"
+}
+
+canonicalize_path() {
+ local path="$1"
+ local result=()
+ local part
+ local parts
+
+ # Bash 3.2 compatible: use here-string
+ IFS='/' read -r -a parts <<< "$path"
+
+ for part in "${parts[@]}"; do
+ if [[ -z "$part" || "$part" == "." ]]; then
+ continue
+ elif [[ "$part" == ".." ]]; then
+ # Bash 3.2 compatible: calculate last index instead of using -1
+ if [[ ${#result[@]} -gt 0 ]]; then
+ local last_idx=$((${#result[@]} - 1))
+ unset "result[$last_idx]"
+ fi
+ else
+ result+=("$part")
+ fi
+ done
+
+ if [[ ${#result[@]} -eq 0 ]]; then
+ printf "/"
+ else
+ ( IFS='/'; printf "/%s" "${result[*]}" )
+ fi
+}
+
+resolve_real_path() {
+ local path="$1"
+
+ if command -v python3 >/dev/null 2>&1; then
+ # Use Python to compute realpath which resolves symlinks AND normalizes paths
+ # Python's os.path.realpath is tolerant of non-existent final targets
+ python3 - <<'PY' "$path"
+import os
+import sys
+p = sys.argv[1]
+print(os.path.realpath(p))
+PY
+ else
+ # Fallback: Normalize without symlink resolution if Python3 unavailable
+ # Note: This won't resolve symlinks, only normalize .. and . components
+ canonicalize_path "$path"
+ fi
+}
+
+check_internal_link() {
+ local link="$1"
+ local file="$2"
+ local line_no="$3"
+ local clean_link
+ local target_path
+ local location
+
+ clean_link="$(normalize_link "$link")"
+
+ [[ -z "$clean_link" || "$clean_link" == "#" ]] && return 0
+
+ if [[ "$clean_link" == "{{"* ]]; then
+ log_verbose "Skipping Hugo shortcode link: $link ($file:$line_no)"
+ return 0
+ fi
+
+ local clean_lower
+ clean_lower="$(printf "%s" "$clean_link" | tr '[:upper:]' '[:lower:]')"
+
+ if [[ "$clean_lower" == http://* || "$clean_lower" == https://* || "$clean_lower" == "//"* ]]; then
+ log_verbose "Skipping external link: $link ($file:$line_no)"
+ return 0
+ fi
+
+ case "$clean_lower" in
+ mailto:*|tel:*|javascript:*|data:*)
+ return 0
+ ;;
+ esac
+
+ if [[ "$clean_link" == /docs/* ]]; then
+ target_path="$CONTENT_ROOT/en${clean_link}"
+
+ elif [[ "$clean_link" == /cn/docs/* ]]; then
+ target_path="$CONTENT_ROOT${clean_link}"
+
+ elif [[ "$clean_link" == /community/* ]]; then
+ target_path="$CONTENT_ROOT/en${clean_link}"
+
+ elif [[ "$clean_link" == /blog/* || "$clean_link" == /cn/blog/* ]]; then
+ # Blog URLs are permalink-based and don't map 1:1 to content file paths.
+ # Skip deterministic filesystem validation for these routes.
+ log_verbose "Skipping permalink-based blog link: $link ($file:$line_no)"
+ return 0
+
+ elif [[ "$clean_link" == /language/* ]]; then
+ target_path="$CONTENT_ROOT/en${clean_link}"
+
+ elif [[ "$clean_link" == /clients/* ]]; then
+ target_path="$REPO_ROOT/static${clean_link}"
+
+ elif [[ "$clean_link" == /* ]]; then
+ location="$file"
+ [[ -n "$line_no" ]] && location="$file:$line_no"
+
+ echo "Error: Unsupported absolute internal path (cannot validate deterministically)"
+ echo " File: $location"
+ echo " Link: $link"
+
+ EXIT_CODE=1
+ return
+
+ else
+ local file_dir
+ file_dir="$(cd "$(dirname "$file")" && pwd)"
+ target_path="$file_dir/$clean_link"
+ fi
+
+ target_path="$(canonicalize_path "$target_path")"
+ target_path="$(resolve_real_path "$target_path")"
+
+ case "$target_path" in
+ "$CONTENT_ROOT"/*) ;;
+ "$REPO_ROOT/static"/*) ;;
+ *)
+ location="$file"
+ [[ -n "$line_no" ]] && location="$file:$line_no"
+ echo "Error: Link resolves outside content directory"
+ echo " File: $location"
+ echo " Link: $link"
+ EXIT_CODE=1
+ return
+ ;;
+ esac
+
+ if [[ "$clean_lower" =~ \.(${ASSET_EXTENSIONS_REGEX})$ ]]; then
+ if [[ -f "$target_path" ]]; then
+ return 0
+ else
+ location="$file"
+ [[ -n "$line_no" ]] && location="$file:$line_no"
+ echo "Error: Broken link"
+ echo " File: $location"
+ echo " Link: $link"
+ echo " Target: $target_path"
+ EXIT_CODE=1
+ return
+ fi
+ fi
+
+ if [[ -f "$target_path" || -f "$target_path.md" || -f "$target_path/_index.md" || -f "$target_path/README.md" ]]; then
+ return 0
+ fi
+
+ location="$file"
+ [[ -n "$line_no" ]] && location="$file:$line_no"
+
+ echo "Error: Broken link"
+ echo " File: $location"
+ echo " Link: $link"
+ echo " Target: $target_path"
+ EXIT_CODE=1
+}
+
+echo "Starting link validation..."
+
+while IFS= read -r FILE; do
+
+ CODE_LINES=""
+ in_fence=false
+ line_no=0
+
+ while IFS= read -r line || [[ -n "$line" ]]; do
+ ((++line_no))
+ # NOTE:
+ # Code fence detection is heuristic and does not validate proper pairing.
+ # The logic simply toggles state when encountering ``` or ~~~ markers.
+ # If a Markdown file contains an unclosed fence or mismatched fence types,
+ # all subsequent lines may be treated as code and skipped from validation.
+ # This behavior is intentional to keep the validator lightweight and
+ # avoids implementing a full Markdown parser. Such cases require manual review.
+ if [[ "$line" =~ ^[[:space:]]*(\`\`\`|~~~) ]]; then
+ # NOTE:
+ # Code fence detection assumes fences are properly paired.
+ # If a Markdown file contains an unclosed or mismatched fence,
+ # subsequent content may be treated as code and skipped.
+ # This script does not attempt full Markdown validation.
+
+ if $in_fence; then
+ in_fence=false
+ else
+ in_fence=true
+ fi
+ CODE_LINES="$CODE_LINES $line_no "
+ continue
+ fi
+
+ if $in_fence; then
+ CODE_LINES="$CODE_LINES $line_no "
+ continue
+ fi
+
+ # NOTE:
+ # Inline code detection is heuristic and intentionally simplistic.
+ # The logic assumes backticks are properly paired within a single line
+ # after removing escaped backticks. Malformed Markdown, complex inline
+ # constructs, or unusual escaping patterns may cause false positives
+ # or false negatives. This validator does not implement a full Markdown
+ # parser and therefore cannot guarantee perfect inline code detection.
+ escaped_line="${line//\\\`/}"
+ only_ticks="${escaped_line//[^\`]/}"
+ inline_count=${#only_ticks}
+ if (( inline_count % 2 == 1 )); then
+ CODE_LINES="$CODE_LINES $line_no "
+ fi
+
+ done < "$FILE"
+
+ while read -r MATCH || [[ -n "$MATCH" ]]; do
+ [[ -z "$MATCH" ]] && continue
+
+ LINE_NO="${MATCH%%:*}"
+ LINK_PART="${MATCH#*:}"
+
+ [[ "$CODE_LINES" == *" $LINE_NO "* ]] && continue
+
+ LINK="${LINK_PART#*](}"
+ LINK="${LINK%)}"
+
+ check_internal_link "$LINK" "$FILE" "$LINE_NO"
+ done < <(grep -n -oE '\]\([^)]+\)' "$FILE" || true)
+
+ unset CODE_LINES
+done < <(find "$CONTENT_ROOT" -type f -name "*.md" 2>/dev/null || true)
+
+if [[ $EXIT_CODE -eq 0 ]]; then
+ echo "Link validation passed!"
+else
+ echo "Link validation failed!"
+fi
+
+exit $EXIT_CODE
diff --git a/dist/validate-release.sh b/dist/validate-release.sh
index cf39fdc6f..f0f415069 100755
--- a/dist/validate-release.sh
+++ b/dist/validate-release.sh
@@ -3,7 +3,7 @@
# Apache HugeGraph Release Validation Script
################################################################################
#
-# This script validates Apache HugeGraph (Incubating) release packages:
+# This script validates Apache HugeGraph release packages:
# 1. Check package integrity (SHA512, GPG signatures)
# 2. Validate package names and required files
# 3. Check license compliance (ASF categories)
@@ -42,12 +42,12 @@ set -o nounset
# Configuration Constants
################################################################################
-readonly SCRIPT_VERSION="2.0.0"
+readonly SCRIPT_VERSION="2.2.0"
readonly SCRIPT_NAME=$(basename "$0")
# URLs
-readonly SVN_URL_PREFIX="https://dist.apache.org/repos/dist/dev/incubator/hugegraph"
-readonly KEYS_URL="https://downloads.apache.org/incubator/hugegraph/KEYS"
+readonly SVN_URL_PREFIX="https://dist.apache.org/repos/dist/dev/hugegraph"
+readonly KEYS_URL="https://downloads.apache.org/hugegraph/KEYS"
# Validation Rules
readonly MAX_FILE_SIZE="800k"
@@ -96,6 +96,7 @@ HUBBLE_STARTED=0
# Script execution time tracking
SCRIPT_START_TIME=0
+ENABLE_CLEANUP=0
################################################################################
# Helper Functions - Output & Logging
@@ -142,7 +143,7 @@ Examples:
${SCRIPT_NAME} --non-interactive 1.7.0 pengjunzhi
For more information, visit:
- https://github.com/apache/incubator-hugegraph-doc/tree/master/dist
+ https://github.com/apache/hugegraph-doc/tree/master/dist
EOF
}
@@ -265,6 +266,7 @@ setup_logging() {
local log_dir="${WORK_DIR}/logs"
mkdir -p "$log_dir"
LOG_FILE="$log_dir/validate-${RELEASE_VERSION}-$(date +%Y%m%d-%H%M%S).log"
+ ENABLE_CLEANUP=1
info "Logging to: ${LOG_FILE}"
log "INIT" "Starting validation for HugeGraph ${RELEASE_VERSION}"
@@ -348,6 +350,12 @@ find_package_dir() {
echo "$found"
}
+find_package_dir_silent() {
+ local pattern=$1
+ local base_dir=${2:-"${DIST_DIR}"}
+ find "$base_dir" -maxdepth 3 -type d -path "$pattern" 2>/dev/null | head -n1
+}
+
################################################################################
# Helper Functions - GPG & Signatures
################################################################################
@@ -399,12 +407,22 @@ import_and_trust_gpg_keys() {
# Validation Functions - Package Checks
################################################################################
-check_incubating_name() {
+check_package_name() {
local package=$1
TOTAL_CHECKS=$((TOTAL_CHECKS + 1))
- if [[ ! "$package" =~ "incubating" ]]; then
- collect_error "Package name '$package' should include 'incubating'"
+ if [[ "$package" != apache-hugegraph* ]]; then
+ collect_error "Package name '$package' should start with 'apache-hugegraph'"
+ return 1
+ fi
+
+ if [[ "$package" != *"${RELEASE_VERSION}"* ]]; then
+ collect_error "Package name '$package' does not include release version '${RELEASE_VERSION}'"
+ return 1
+ fi
+
+ if [[ "$package" =~ incubating ]]; then
+ collect_error "Package '$package' should not contain 'incubating' for post-graduation releases"
return 1
fi
@@ -414,7 +432,6 @@ check_incubating_name() {
check_required_files() {
local package=$1
- local require_disclaimer=${2:-true}
local has_error=0
if [[ ! -f "LICENSE" ]]; then
@@ -431,13 +448,6 @@ check_required_files() {
mark_check_passed
fi
- if [[ "$require_disclaimer" == "true" ]] && [[ ! -f "DISCLAIMER" ]]; then
- collect_error "Package '$package' missing DISCLAIMER file"
- has_error=1
- else
- mark_check_passed
- fi
-
return $has_error
}
@@ -812,8 +822,8 @@ validate_source_package() {
pushd "$package_dir" > /dev/null
# Run all checks
- check_incubating_name "$package_file"
- check_required_files "$package_file" true
+ check_package_name "$package_file"
+ check_required_files "$package_file"
check_license_categories "$package_file" "LICENSE NOTICE"
check_empty_files_and_dirs "$package_file"
check_file_sizes "$package_file" "$MAX_FILE_SIZE"
@@ -876,8 +886,8 @@ validate_binary_package() {
pushd "$package_dir" > /dev/null
# Run checks
- check_incubating_name "$package_file"
- check_required_files "$package_file" true
+ check_package_name "$package_file"
+ check_required_files "$package_file"
# Binary packages should have licenses directory
TOTAL_CHECKS=$((TOTAL_CHECKS + 1))
@@ -905,12 +915,16 @@ validate_binary_package() {
cleanup() {
local exit_code=$?
+ if [[ $ENABLE_CLEANUP -eq 0 ]]; then
+ return "$exit_code"
+ fi
+
log "CLEANUP" "Starting cleanup (exit code: $exit_code)"
# Stop running services
if [[ $SERVER_STARTED -eq 1 ]]; then
info "Stopping HugeGraph server..."
- local server_dir=$(find_package_dir "*hugegraph-incubating*src/hugegraph-server/*hugegraph*${RELEASE_VERSION}" 2>/dev/null || echo "")
+ local server_dir=$(find_package_dir_silent "*hugegraph*${RELEASE_VERSION}*src/hugegraph-server/*hugegraph-server*${RELEASE_VERSION}*")
if [[ -n "$server_dir" ]] && [[ -d "$server_dir" ]]; then
pushd "$server_dir" > /dev/null 2>&1
bin/stop-hugegraph.sh || true
@@ -1088,6 +1102,10 @@ main() {
ls -lh "${DIST_DIR}"
else
# Download from SVN
+ if ! svn ls "${SVN_URL_PREFIX}/${RELEASE_VERSION}" &>/dev/null; then
+ collect_error "Release version '${RELEASE_VERSION}' not found in TLP dist path: ${SVN_URL_PREFIX}/${RELEASE_VERSION}"
+ exit 1
+ fi
DIST_DIR="${WORK_DIR}/dist/${RELEASE_VERSION}"
info "Downloading from SVN to: ${DIST_DIR}"
@@ -1171,7 +1189,7 @@ main() {
####################################################
print_step 6 9 "Test Compiled Server Package"
- local server_dir=$(find_package_dir "*hugegraph-incubating*src/hugegraph-server/*hugegraph*${RELEASE_VERSION}")
+ local server_dir=$(find_package_dir "*hugegraph*${RELEASE_VERSION}*src/hugegraph-server/*hugegraph-server*${RELEASE_VERSION}*")
if [[ -n "$server_dir" ]]; then
info "Starting HugeGraph server from: $server_dir"
pushd "$server_dir" > /dev/null
@@ -1297,7 +1315,7 @@ main() {
print_step 9 9 "Test Binary Server & Toolchain"
# Test binary server
- local bin_server_dir=$(find_package_dir "*hugegraph-incubating*${RELEASE_VERSION}/*hugegraph-server-incubating*${RELEASE_VERSION}")
+ local bin_server_dir=$(find_package_dir "*hugegraph*${RELEASE_VERSION}/*hugegraph-server*${RELEASE_VERSION}*")
if [[ -n "$bin_server_dir" ]]; then
info "Testing binary server package..."
pushd "$bin_server_dir" > /dev/null
diff --git a/package.json b/package.json
index 960eee9bf..4b5628153 100644
--- a/package.json
+++ b/package.json
@@ -20,5 +20,8 @@
"autoprefixer": "^10.4.0",
"postcss": "^8.3.7",
"postcss-cli": "^9.0.2"
+ },
+ "dependencies": {
+ "mermaid": "^10.9.5"
}
}
diff --git a/static/client-go/index.html b/static/client-go/index.html
index 792935cd7..d72348fb1 100644
--- a/static/client-go/index.html
+++ b/static/client-go/index.html
@@ -1,7 +1,7 @@
-
-
+
+
diff --git a/themes/docsy/assets/vendor/bootstrap/site/content/docs/4.6/examples/sticky-footer/sticky-footer.css b/themes/docsy/assets/vendor/bootstrap/site/content/docs/4.6/examples/sticky-footer/sticky-footer.css
index 7e85cdd2a..a56699831 100644
--- a/themes/docsy/assets/vendor/bootstrap/site/content/docs/4.6/examples/sticky-footer/sticky-footer.css
+++ b/themes/docsy/assets/vendor/bootstrap/site/content/docs/4.6/examples/sticky-footer/sticky-footer.css
@@ -11,3 +11,4 @@
.footer {
background-color: #f5f5f5;
}
+Last modified
diff --git a/themes/docsy/i18n/en.toml b/themes/docsy/i18n/en.toml
index 403a111e9..ee6e8ab90 100644
--- a/themes/docsy/i18n/en.toml
+++ b/themes/docsy/i18n/en.toml
@@ -36,7 +36,7 @@ other = "By"
[post_created]
other = "Created"
[post_last_mod]
-other = "Last modified"
+other = "Page last updated"
[post_edit_this]
other = "Edit this page"
[post_view_this]
diff --git a/themes/docsy/i18n/zh-cn.toml b/themes/docsy/i18n/zh-cn.toml
index cff701ff2..dfa3f6b1a 100644
--- a/themes/docsy/i18n/zh-cn.toml
+++ b/themes/docsy/i18n/zh-cn.toml
@@ -34,7 +34,7 @@ other = "By"
[post_created]
other = "创建"
[post_last_mod]
-other = "最后修改"
+other = "本页最后更新于"
[post_edit_this]
other = "编辑此页"
[post_create_child_page]
diff --git a/themes/docsy/i18n/zh.toml b/themes/docsy/i18n/zh.toml
index cff701ff2..dfa3f6b1a 100644
--- a/themes/docsy/i18n/zh.toml
+++ b/themes/docsy/i18n/zh.toml
@@ -34,7 +34,7 @@ other = "By"
[post_created]
other = "创建"
[post_last_mod]
-other = "最后修改"
+other = "本页最后更新于"
[post_edit_this]
other = "编辑此页"
[post_create_child_page]
diff --git a/themes/docsy/layouts/_default/single.html b/themes/docsy/layouts/_default/single.html
index b2e137af6..5417bebe1 100644
--- a/themes/docsy/layouts/_default/single.html
+++ b/themes/docsy/layouts/_default/single.html
@@ -1,5 +1,19 @@
{{ define "main" }}
-{{ with .Content }}
-{{ . }}
+
+
+
+
+
+ {{ with .Content }}{{ . }}{{ end }}
+ {{ partial "page-meta-lastmod.html" . }}
+
+
+
+ {{ partial "toc.html" . }}
+
+
{{ end }}
-{{ end }}
\ No newline at end of file
diff --git a/themes/docsy/layouts/partials/footer.html b/themes/docsy/layouts/partials/footer.html
index 82a8461a0..5c9a44641 100644
--- a/themes/docsy/layouts/partials/footer.html
+++ b/themes/docsy/layouts/partials/footer.html
@@ -6,25 +6,23 @@
-
Apache HugeGraph is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.
-
Copyright © {{ now.Year}} The Apache Software Foundation, Licensed under the Apache License Version 2.0
Apache, the names of Apache projects, and the feather logo are either registered trademarks or trademarks of the Apache Software Foundation in the United States and/or other countries.
+
Copyright © {{ now.Year}} The Apache Software Foundation, Licensed under the Apache License Version 2.0
+
Apache, the names of Apache projects, and the feather logo are either registered trademarks or trademarks of the Apache Software Foundation in the United States and/or other countries.
diff --git a/themes/docsy/layouts/partials/hooks/head-end.html b/themes/docsy/layouts/partials/hooks/head-end.html
index 1f89b7316..fcf73f0d4 100644
--- a/themes/docsy/layouts/partials/hooks/head-end.html
+++ b/themes/docsy/layouts/partials/hooks/head-end.html
@@ -3,7 +3,6 @@
{{ end }}
{{ if .Site.Params.mermaid.enable }}
-