diff --git a/.serena/project.yml b/.serena/project.yml
index d4fd762ad4..16ccb11693 100644
--- a/.serena/project.yml
+++ b/.serena/project.yml
@@ -103,23 +103,3 @@ default_modes:
# fixed set of tools to use as the base tool set (if non-empty), replacing Serena's default set of tools.
# This cannot be combined with non-empty excluded_tools or included_optional_tools.
fixed_tools: []
-
-# override of the corresponding setting in serena_config.yml, see the documentation there.
-# If null or missing, the value from the global config is used.
-symbol_info_budget:
-
-# The language backend to use for this project.
-# If not set, the global setting from serena_config.yml is used.
-# Valid values: LSP, JetBrains
-# Note: the backend is fixed at startup. If a project with a different backend
-# is activated post-init, an error will be returned.
-language_backend:
-
-# list of regex patterns which, when matched, mark a memory entry as read‑only.
-# Extends the list from the global configuration, merging the two lists.
-read_only_memory_patterns: []
-
-# line ending convention to use when writing source files.
-# Possible values: unset (use global setting), "lf", "crlf", or "native" (platform default)
-# This does not affect Serena's own files (e.g. memories and configuration files), which always use native line endings.
-line_ending:
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 7056afd978..f06c8beafa 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -19,13 +19,13 @@ Before submitting the code, we need to do some preparation:
1. Sign up or login to GitHub: [https://github.com](https://github.com)
-2. Fork HugeGraph repo from GitHub: [https://github.com/apache/hugegraph/fork](https://github.com/apache/hugegraph/fork)
+2. Fork HugeGraph repo from GitHub: [https://github.com/apache/incubator-hugegraph/fork](https://github.com/apache/incubator-hugegraph/fork)
-3. Clone code from fork repo to local: [https://github.com/${GITHUB_USER_NAME}/hugegraph](https://github.com/${GITHUB_USER_NAME}/hugegraph)
+3. Clone code from fork repo to local: [https://github.com/${GITHUB_USER_NAME}/incubator-hugegraph](https://github.com/${GITHUB_USER_NAME}/incubator-hugegraph)
```shell
# clone code from remote to local repo
- git clone https://github.com/${GITHUB_USER_NAME}/hugegraph.git hugegraph
+ git clone https://github.com/${GITHUB_USER_NAME}/incubator-hugegraph.git hugegraph
```
4. Configure local HugeGraph repo
@@ -34,7 +34,7 @@ Before submitting the code, we need to do some preparation:
cd hugegraph
# add upstream to synchronize the latest code
- git remote add hugegraph https://github.com/apache/hugegraph
+ git remote add hugegraph https://github.com/apache/incubator-hugegraph
# set name and email to push code to github
git config user.name "{full-name}" # like "Jermy Li"
@@ -43,7 +43,7 @@ Before submitting the code, we need to do some preparation:
## 2. Create an Issue on GitHub
-If you encounter bugs or have any questions, please go to [GitHub Issues](https://github.com/apache/hugegraph/issues) to report them and feel free to [create an issue](https://github.com/apache/hugegraph/issues/new).
+If you encounter bugs or have any questions, please go to [GitHub Issues](https://github.com/apache/incubator-hugegraph/issues) to report them and feel free to [create an issue](https://github.com/apache/incubator-hugegraph/issues/new).
## 3. Make changes of code locally
@@ -75,10 +75,10 @@ Note: Code style is defined by the `.editorconfig` file at the repository root.
##### 3.2.1 Check licenses
If we want to add new third-party dependencies to the `HugeGraph` project, we need to do the following things:
-1. Find the third-party dependent repository, put the dependent `license` file into [./install-dist/release-docs/licenses/](https://github.com/apache/hugegraph/tree/master/install-dist/release-docs/licenses) path.
-2. Declare the dependency in [./install-dist/release-docs/LICENSE](https://github.com/apache/hugegraph/blob/master/install-dist/release-docs/LICENSE) `LICENSE` information.
-3. Find the NOTICE file in the repository and append it to [./install-dist/release-docs/NOTICE](https://github.com/apache/hugegraph/blob/master/install-dist/release-docs/NOTICE) file (skip this step if there is no NOTICE file).
-4. Execute locally [./install-dist/scripts/dependency/regenerate_known_dependencies.sh](https://github.com/apache/hugegraph/blob/master/install-dist/scripts/dependency/regenerate_known_dependencies.sh) to update the dependency list [known-dependencies.txt](https://github.com/apache/hugegraph/blob/master/install-dist/scripts/dependency/known-dependencies.txt) (or manually update).
+1. Find the third-party dependent repository, put the dependent `license` file into [./hugegraph-dist/release-docs/licenses/](https://github.com/apache/incubator-hugegraph/tree/master/hugegraph-dist/release-docs/licenses) path.
+2. Declare the dependency in [./install-dist/release-docs/LICENSE](https://github.com/apache/incubator-hugegraph/blob/master/install-dist/release-docs/LICENSE) `LICENSE` information.
+3. Find the NOTICE file in the repository and append it to [./install-dist/release-docs/NOTICE](https://github.com/apache/incubator-hugegraph/blob/master/install-dist/release-docs/NOTICE) file (skip this step if there is no NOTICE file).
+4. Execute locally [./install-dist/scripts/dependency/regenerate_known_dependencies.sh](https://github.com/apache/incubator-hugegraph/blob/master/install-dist/scripts/dependency/regenerate_known_dependencies.sh) to update the dependency list [known-dependencies.txt](https://github.com/apache/incubator-hugegraph/blob/master/install-dist/scripts/dependency/known-dependencies.txt) (or manually update).
**Example**: A new third-party dependency is introduced into the project -> `ant-1.9.1.jar`
- The project source code is located at: https://github.com/apache/ant/tree/rel/1.9.1
diff --git a/DISCLAIMER b/DISCLAIMER
new file mode 100644
index 0000000000..be718eef3b
--- /dev/null
+++ b/DISCLAIMER
@@ -0,0 +1,7 @@
+Apache HugeGraph (incubating) is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator PMC.
+
+Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications,
+and decision making process have stabilized in a manner consistent with other successful ASF projects.
+
+While incubation status is not necessarily a reflection of the completeness or stability of the code,
+it does indicate that the project has yet to be fully endorsed by the ASF.
diff --git a/NOTICE b/NOTICE
index 8e48b813b8..aa6764af84 100644
--- a/NOTICE
+++ b/NOTICE
@@ -1,5 +1,5 @@
-Apache HugeGraph
-Copyright 2022-2026 The Apache Software Foundation
+Apache HugeGraph(incubating)
+Copyright 2022-2025 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).
diff --git a/README.md b/README.md
index eba5d980ee..c027cda43f 100644
--- a/README.md
+++ b/README.md
@@ -7,8 +7,8 @@
[](https://www.apache.org/licenses/LICENSE-2.0.html)
-[](https://github.com/apache/hugegraph/actions/workflows/ci.yml)
-[](https://github.com/apache/hugegraph/actions/workflows/licence-checker.yml)
+[](https://github.com/apache/incubator-hugegraph/actions/workflows/ci.yml)
+[](https://github.com/apache/incubator-hugegraph/actions/workflows/licence-checker.yml)
[](https://github.com/apache/hugegraph/releases)
[](https://deepwiki.com/apache/hugegraph)
@@ -48,7 +48,7 @@ Complete **HugeGraph** ecosystem components:
2. **[hugegraph-computer](https://github.com/apache/hugegraph-computer)** - Integrated **graph computing** system
-3. **[hugegraph-ai](https://github.com/apache/hugegraph-ai)** - **Graph AI/LLM/Knowledge Graph** integration
+3. **[hugegraph-ai](https://github.com/apache/incubator-hugegraph-ai)** - **Graph AI/LLM/Knowledge Graph** integration
4. **[hugegraph-website](https://github.com/apache/hugegraph-doc)** - **Documentation & website** repository
@@ -223,17 +223,9 @@ Download pre-built packages from the [Download Page](https://hugegraph.apache.or
```bash
# Download and extract
-# For historical 1.7.0 and earlier releases, use the archive URL and
-# set PACKAGE=apache-hugegraph-incubating-{version} instead.
-BASE_URL="https://downloads.apache.org/hugegraph/{version}"
-PACKAGE="apache-hugegraph-{version}"
-# Historical alternative:
-# BASE_URL="https://archive.apache.org/dist/incubator/hugegraph/{version}"
-# PACKAGE="apache-hugegraph-incubating-{version}"
-
-wget ${BASE_URL}/${PACKAGE}.tar.gz
-tar -xzf ${PACKAGE}.tar.gz
-cd ${PACKAGE}
+wget https://downloads.apache.org/incubator/hugegraph/{version}/apache-hugegraph-incubating-{version}.tar.gz
+tar -xzf apache-hugegraph-incubating-{version}.tar.gz
+cd apache-hugegraph-incubating-{version}
# Initialize backend storage
bin/init-store.sh
@@ -371,7 +363,7 @@ Welcome to contribute to HugeGraph!
Thank you to all the contributors who have helped make HugeGraph better!
-[](https://github.com/apache/hugegraph/graphs/contributors)
+[](https://github.com/apache/incubator-hugegraph/graphs/contributors)
## License
diff --git a/docker/configs/application-pd0.yml b/docker/configs/application-pd0.yml
new file mode 100644
index 0000000000..6531cbafb2
--- /dev/null
+++ b/docker/configs/application-pd0.yml
@@ -0,0 +1,63 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+spring:
+ application:
+ name: hugegraph-pd
+
+management:
+ metrics:
+ export:
+ prometheus:
+ enabled: true
+ endpoints:
+ web:
+ exposure:
+ include: "*"
+
+logging:
+ config: 'file:./conf/log4j2.xml'
+license:
+ verify-path: ./conf/verify-license.json
+ license-path: ./conf/hugegraph.license
+grpc:
+ port: 8686
+ host: 127.0.0.1
+
+server:
+ port: 8620
+
+pd:
+ data-path: ./pd_data
+ patrol-interval: 1800
+ initial-store-count: 3
+ initial-store-list: 127.0.0.1:8500,127.0.0.1:8501,127.0.0.1:8502
+
+raft:
+ address: 127.0.0.1:8610
+ peers-list: 127.0.0.1:8610,127.0.0.1:8611,127.0.0.1:8612
+
+store:
+ max-down-time: 172800
+ monitor_data_enabled: true
+ monitor_data_interval: 1 minute
+ monitor_data_retention: 1 day
+ initial-store-count: 1
+
+partition:
+ default-shard-count: 1
+ store-max-shard-count: 12
diff --git a/docker/configs/application-pd1.yml b/docker/configs/application-pd1.yml
new file mode 100644
index 0000000000..0cf9f54297
--- /dev/null
+++ b/docker/configs/application-pd1.yml
@@ -0,0 +1,63 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+spring:
+ application:
+ name: hugegraph-pd
+
+management:
+ metrics:
+ export:
+ prometheus:
+ enabled: true
+ endpoints:
+ web:
+ exposure:
+ include: "*"
+
+logging:
+ config: 'file:./conf/log4j2.xml'
+license:
+ verify-path: ./conf/verify-license.json
+ license-path: ./conf/hugegraph.license
+grpc:
+ port: 8687
+ host: 127.0.0.1
+
+server:
+ port: 8621
+
+pd:
+ data-path: ./pd_data
+ patrol-interval: 1800
+ initial-store-count: 3
+ initial-store-list: 127.0.0.1:8500,127.0.0.1:8501,127.0.0.1:8502
+
+raft:
+ address: 127.0.0.1:8611
+ peers-list: 127.0.0.1:8610,127.0.0.1:8611,127.0.0.1:8612
+
+store:
+ max-down-time: 172800
+ monitor_data_enabled: true
+ monitor_data_interval: 1 minute
+ monitor_data_retention: 1 day
+ initial-store-count: 1
+
+partition:
+ default-shard-count: 1
+ store-max-shard-count: 12
diff --git a/docker/configs/application-pd2.yml b/docker/configs/application-pd2.yml
new file mode 100644
index 0000000000..a0d2c79ea3
--- /dev/null
+++ b/docker/configs/application-pd2.yml
@@ -0,0 +1,63 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+spring:
+ application:
+ name: hugegraph-pd
+
+management:
+ metrics:
+ export:
+ prometheus:
+ enabled: true
+ endpoints:
+ web:
+ exposure:
+ include: "*"
+
+logging:
+ config: 'file:./conf/log4j2.xml'
+license:
+ verify-path: ./conf/verify-license.json
+ license-path: ./conf/hugegraph.license
+grpc:
+ port: 8688
+ host: 127.0.0.1
+
+server:
+ port: 8622
+
+pd:
+ data-path: ./pd_data
+ patrol-interval: 1800
+ initial-store-count: 3
+ initial-store-list: 127.0.0.1:8500,127.0.0.1:8501,127.0.0.1:8502
+
+raft:
+ address: 127.0.0.1:8612
+ peers-list: 127.0.0.1:8610,127.0.0.1:8611,127.0.0.1:8612
+
+store:
+ max-down-time: 172800
+ monitor_data_enabled: true
+ monitor_data_interval: 1 minute
+ monitor_data_retention: 1 day
+ initial-store-count: 1
+
+partition:
+ default-shard-count: 1
+ store-max-shard-count: 12
diff --git a/docker/configs/application-store0.yml b/docker/configs/application-store0.yml
new file mode 100644
index 0000000000..d093f1bfbd
--- /dev/null
+++ b/docker/configs/application-store0.yml
@@ -0,0 +1,57 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+pdserver:
+ address: 127.0.0.1:8686,127.0.0.1:8687,127.0.0.1:8688
+
+management:
+ metrics:
+ export:
+ prometheus:
+ enabled: true
+ endpoints:
+ web:
+ exposure:
+ include: "*"
+
+grpc:
+ host: 127.0.0.1
+ port: 8500
+ netty-server:
+ max-inbound-message-size: 1000MB
+raft:
+ disruptorBufferSize: 1024
+ address: 127.0.0.1:8510
+ max-log-file-size: 600000000000
+ snapshotInterval: 1800
+server:
+ port: 8520
+
+app:
+ data-path: ./storage
+
+spring:
+ application:
+ name: store-node-grpc-server
+ profiles:
+ active: default
+ include: pd
+
+logging:
+ config: 'file:./conf/log4j2.xml'
+ level:
+ root: info
diff --git a/docker/configs/application-store1.yml b/docker/configs/application-store1.yml
new file mode 100644
index 0000000000..0aeba62cf6
--- /dev/null
+++ b/docker/configs/application-store1.yml
@@ -0,0 +1,57 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+pdserver:
+ address: 127.0.0.1:8686,127.0.0.1:8687,127.0.0.1:8688
+
+management:
+ metrics:
+ export:
+ prometheus:
+ enabled: true
+ endpoints:
+ web:
+ exposure:
+ include: "*"
+
+grpc:
+ host: 127.0.0.1
+ port: 8501
+ netty-server:
+ max-inbound-message-size: 1000MB
+raft:
+ disruptorBufferSize: 1024
+ address: 127.0.0.1:8511
+ max-log-file-size: 600000000000
+ snapshotInterval: 1800
+server:
+ port: 8521
+
+app:
+ data-path: ./storage
+
+spring:
+ application:
+ name: store-node-grpc-server
+ profiles:
+ active: default
+ include: pd
+
+logging:
+ config: 'file:./conf/log4j2.xml'
+ level:
+ root: info
diff --git a/docker/configs/application-store2.yml b/docker/configs/application-store2.yml
new file mode 100644
index 0000000000..e18dc62a3c
--- /dev/null
+++ b/docker/configs/application-store2.yml
@@ -0,0 +1,57 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+pdserver:
+ address: 127.0.0.1:8686,127.0.0.1:8687,127.0.0.1:8688
+
+management:
+ metrics:
+ export:
+ prometheus:
+ enabled: true
+ endpoints:
+ web:
+ exposure:
+ include: "*"
+
+grpc:
+ host: 127.0.0.1
+ port: 8502
+ netty-server:
+ max-inbound-message-size: 1000MB
+raft:
+ disruptorBufferSize: 1024
+ address: 127.0.0.1:8512
+ max-log-file-size: 600000000000
+ snapshotInterval: 1800
+server:
+ port: 8522
+
+app:
+ data-path: ./storage
+
+spring:
+ application:
+ name: store-node-grpc-server
+ profiles:
+ active: default
+ include: pd
+
+logging:
+ config: 'file:./conf/log4j2.xml'
+ level:
+ root: info
diff --git a/hugegraph-server/hugegraph-dist/src/assembly/static/bin/wait-partition.sh b/docker/configs/server1-conf/gremlin-driver-settings.yaml
old mode 100755
new mode 100644
similarity index 62%
rename from hugegraph-server/hugegraph-dist/src/assembly/static/bin/wait-partition.sh
rename to docker/configs/server1-conf/gremlin-driver-settings.yaml
index d14bc90244..2f60ff8379
--- a/hugegraph-server/hugegraph-dist/src/assembly/static/bin/wait-partition.sh
+++ b/docker/configs/server1-conf/gremlin-driver-settings.yaml
@@ -1,4 +1,3 @@
-#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
@@ -7,7 +6,7 @@
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,20 +14,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-set -euo pipefail
-
-: "${STORE_REST:?STORE_REST not set}"
-
-timeout "${WAIT_PARTITION_TIMEOUT_S:-120}s" bash -c '
-until curl -fsS "http://${STORE_REST}" 2>/dev/null | \
- grep -q "\"partitionCount\":[1-9]"
-do
- echo "Waiting for partition assignment..."
- sleep 5
-done
-'
-
-echo "Partitions detected:"
-URL="http://${STORE_REST}/v1/partitions"
-echo "$URL"
-curl -v "$URL"
+hosts: [localhost]
+port: 8181
+serializer: {
+ className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV1d0,
+ config: {
+ serializeResultToString: false,
+ ioRegistries: [org.apache.hugegraph.io.HugeGraphIoRegistry]
+ }
+}
diff --git a/docker/configs/server1-conf/gremlin-server.yaml b/docker/configs/server1-conf/gremlin-server.yaml
new file mode 100644
index 0000000000..df73386b26
--- /dev/null
+++ b/docker/configs/server1-conf/gremlin-server.yaml
@@ -0,0 +1,127 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# host and port of gremlin server, need to be consistent with host and port in rest-server.properties
+host: 127.0.0.1
+port: 8181
+
+# timeout in ms of gremlin query
+evaluationTimeout: 30000
+
+channelizer: org.apache.tinkerpop.gremlin.server.channel.WsAndHttpChannelizer
+# don't set graph at here, this happens after support for dynamically adding graph
+graphs: {
+}
+scriptEngines: {
+ gremlin-groovy: {
+ staticImports: [
+ org.opencypher.gremlin.process.traversal.CustomPredicates.*',
+ org.opencypher.gremlin.traversal.CustomFunctions.*
+ ],
+ plugins: {
+ org.apache.hugegraph.plugin.HugeGraphGremlinPlugin: {},
+ org.apache.tinkerpop.gremlin.server.jsr223.GremlinServerGremlinPlugin: {},
+ org.apache.tinkerpop.gremlin.jsr223.ImportGremlinPlugin: {
+ classImports: [
+ java.lang.Math,
+ org.apache.hugegraph.backend.id.IdGenerator,
+ org.apache.hugegraph.type.define.Directions,
+ org.apache.hugegraph.type.define.NodeRole,
+ org.apache.hugegraph.masterelection.GlobalMasterInfo,
+ org.apache.hugegraph.util.DateUtil,
+ org.apache.hugegraph.traversal.algorithm.CollectionPathsTraverser,
+ org.apache.hugegraph.traversal.algorithm.CountTraverser,
+ org.apache.hugegraph.traversal.algorithm.CustomizedCrosspointsTraverser,
+ org.apache.hugegraph.traversal.algorithm.CustomizePathsTraverser,
+ org.apache.hugegraph.traversal.algorithm.FusiformSimilarityTraverser,
+ org.apache.hugegraph.traversal.algorithm.HugeTraverser,
+ org.apache.hugegraph.traversal.algorithm.JaccardSimilarTraverser,
+ org.apache.hugegraph.traversal.algorithm.KneighborTraverser,
+ org.apache.hugegraph.traversal.algorithm.KoutTraverser,
+ org.apache.hugegraph.traversal.algorithm.MultiNodeShortestPathTraverser,
+ org.apache.hugegraph.traversal.algorithm.NeighborRankTraverser,
+ org.apache.hugegraph.traversal.algorithm.PathsTraverser,
+ org.apache.hugegraph.traversal.algorithm.PersonalRankTraverser,
+ org.apache.hugegraph.traversal.algorithm.SameNeighborTraverser,
+ org.apache.hugegraph.traversal.algorithm.ShortestPathTraverser,
+ org.apache.hugegraph.traversal.algorithm.SingleSourceShortestPathTraverser,
+ org.apache.hugegraph.traversal.algorithm.SubGraphTraverser,
+ org.apache.hugegraph.traversal.algorithm.TemplatePathsTraverser,
+ org.apache.hugegraph.traversal.algorithm.steps.EdgeStep,
+ org.apache.hugegraph.traversal.algorithm.steps.RepeatEdgeStep,
+ org.apache.hugegraph.traversal.algorithm.steps.WeightedEdgeStep,
+ org.apache.hugegraph.traversal.optimize.ConditionP,
+ org.apache.hugegraph.traversal.optimize.Text,
+ org.apache.hugegraph.traversal.optimize.TraversalUtil,
+ org.opencypher.gremlin.traversal.CustomFunctions,
+ org.opencypher.gremlin.traversal.CustomPredicate
+ ],
+ methodImports: [
+ java.lang.Math#*,
+ org.opencypher.gremlin.traversal.CustomPredicate#*,
+ org.opencypher.gremlin.traversal.CustomFunctions#*
+ ]
+ },
+ org.apache.tinkerpop.gremlin.jsr223.ScriptFileGremlinPlugin: {
+ files: [scripts/empty-sample.groovy]
+ }
+ }
+ }
+}
+serializers:
+ - {className: org.apache.tinkerpop.gremlin.driver.ser.GraphBinaryMessageSerializerV1,
+ config: {
+ serializeResultToString: false,
+ ioRegistries: [org.apache.hugegraph.io.HugeGraphIoRegistry]
+ }
+ }
+ - {className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV1d0,
+ config: {
+ serializeResultToString: false,
+ ioRegistries: [org.apache.hugegraph.io.HugeGraphIoRegistry]
+ }
+ }
+ - {className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV2d0,
+ config: {
+ serializeResultToString: false,
+ ioRegistries: [org.apache.hugegraph.io.HugeGraphIoRegistry]
+ }
+ }
+ - {className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV3d0,
+ config: {
+ serializeResultToString: false,
+ ioRegistries: [org.apache.hugegraph.io.HugeGraphIoRegistry]
+ }
+ }
+metrics: {
+ consoleReporter: {enabled: false, interval: 180000},
+ csvReporter: {enabled: false, interval: 180000, fileName: ./metrics/gremlin-server-metrics.csv},
+ jmxReporter: {enabled: false},
+ slf4jReporter: {enabled: false, interval: 180000},
+ gangliaReporter: {enabled: false, interval: 180000, addressingMode: MULTICAST},
+ graphiteReporter: {enabled: false, interval: 180000}
+}
+maxInitialLineLength: 4096
+maxHeaderSize: 8192
+maxChunkSize: 8192
+maxContentLength: 65536
+maxAccumulationBufferComponents: 1024
+resultIterationBatchSize: 64
+writeBufferLowWaterMark: 32768
+writeBufferHighWaterMark: 65536
+ssl: {
+ enabled: false
+}
diff --git a/docker/configs/server1-conf/log4j2.xml b/docker/configs/server1-conf/log4j2.xml
new file mode 100644
index 0000000000..f1dd7e8395
--- /dev/null
+++ b/docker/configs/server1-conf/log4j2.xml
@@ -0,0 +1,144 @@
+
+
+
+
+
+ logs
+ hugegraph-server
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docker/configs/server1-conf/remote-objects.yaml b/docker/configs/server1-conf/remote-objects.yaml
new file mode 100644
index 0000000000..94ebc99190
--- /dev/null
+++ b/docker/configs/server1-conf/remote-objects.yaml
@@ -0,0 +1,30 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+hosts: [localhost]
+port: 8181
+serializer: {
+ className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV1d0,
+ config: {
+ serializeResultToString: false,
+ # The duplication of HugeGraphIoRegistry is meant to fix a bug in the
+ # 'org.apache.tinkerpop.gremlin.driver.Settings:from(Configuration)' method.
+ ioRegistries: [
+ org.apache.hugegraph.io.HugeGraphIoRegistry,
+ org.apache.hugegraph.io.HugeGraphIoRegistry
+ ]
+ }
+}
diff --git a/docker/configs/server1-conf/remote.yaml b/docker/configs/server1-conf/remote.yaml
new file mode 100644
index 0000000000..2f60ff8379
--- /dev/null
+++ b/docker/configs/server1-conf/remote.yaml
@@ -0,0 +1,25 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+hosts: [localhost]
+port: 8181
+serializer: {
+ className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV1d0,
+ config: {
+ serializeResultToString: false,
+ ioRegistries: [org.apache.hugegraph.io.HugeGraphIoRegistry]
+ }
+}
diff --git a/docker/configs/server1-conf/rest-server.properties b/docker/configs/server1-conf/rest-server.properties
new file mode 100644
index 0000000000..fce537bb1c
--- /dev/null
+++ b/docker/configs/server1-conf/rest-server.properties
@@ -0,0 +1,29 @@
+# bind url
+restserver.url=127.0.0.1:8081
+# gremlin server url, need to be consistent with host and port in gremlin-server.yaml
+gremlinserver.url=127.0.0.1:8181
+
+graphs=./conf/graphs
+
+# configuration of arthas
+arthas.telnet_port=8562
+arthas.http_port=8561
+arthas.ip=127.0.0.1
+arthas.disabled_commands=jad
+
+# authentication configs
+# choose 'org.apache.hugegraph.auth.StandardAuthenticator' or a custom implementation
+#auth.authenticator=
+# for admin password, By default, it is pa and takes effect upon the first startup
+#auth.admin_pa=pa
+
+# rpc server configs for multi graph-servers or raft-servers
+rpc.server_host=127.0.0.1
+rpc.server_port=8091
+
+# lightweight load balancing (beta)
+server.id=server-1
+server.role=master
+
+# slow query log
+log.slow_query_threshold=1000
diff --git a/docker/configs/server2-conf/gremlin-driver-settings.yaml b/docker/configs/server2-conf/gremlin-driver-settings.yaml
new file mode 100644
index 0000000000..55f38ab97d
--- /dev/null
+++ b/docker/configs/server2-conf/gremlin-driver-settings.yaml
@@ -0,0 +1,25 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+hosts: [localhost]
+port: 8182
+serializer: {
+ className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV1d0,
+ config: {
+ serializeResultToString: false,
+ ioRegistries: [org.apache.hugegraph.io.HugeGraphIoRegistry]
+ }
+}
diff --git a/docker/configs/server2-conf/gremlin-server.yaml b/docker/configs/server2-conf/gremlin-server.yaml
new file mode 100644
index 0000000000..048dded559
--- /dev/null
+++ b/docker/configs/server2-conf/gremlin-server.yaml
@@ -0,0 +1,127 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# host and port of gremlin server, need to be consistent with host and port in rest-server.properties
+host: 127.0.0.1
+port: 8182
+
+# timeout in ms of gremlin query
+evaluationTimeout: 30000
+
+channelizer: org.apache.tinkerpop.gremlin.server.channel.WsAndHttpChannelizer
+# don't set graph at here, this happens after support for dynamically adding graph
+graphs: {
+}
+scriptEngines: {
+ gremlin-groovy: {
+ staticImports: [
+ org.opencypher.gremlin.process.traversal.CustomPredicates.*',
+ org.opencypher.gremlin.traversal.CustomFunctions.*
+ ],
+ plugins: {
+ org.apache.hugegraph.plugin.HugeGraphGremlinPlugin: {},
+ org.apache.tinkerpop.gremlin.server.jsr223.GremlinServerGremlinPlugin: {},
+ org.apache.tinkerpop.gremlin.jsr223.ImportGremlinPlugin: {
+ classImports: [
+ java.lang.Math,
+ org.apache.hugegraph.backend.id.IdGenerator,
+ org.apache.hugegraph.type.define.Directions,
+ org.apache.hugegraph.type.define.NodeRole,
+ org.apache.hugegraph.masterelection.GlobalMasterInfo,
+ org.apache.hugegraph.util.DateUtil,
+ org.apache.hugegraph.traversal.algorithm.CollectionPathsTraverser,
+ org.apache.hugegraph.traversal.algorithm.CountTraverser,
+ org.apache.hugegraph.traversal.algorithm.CustomizedCrosspointsTraverser,
+ org.apache.hugegraph.traversal.algorithm.CustomizePathsTraverser,
+ org.apache.hugegraph.traversal.algorithm.FusiformSimilarityTraverser,
+ org.apache.hugegraph.traversal.algorithm.HugeTraverser,
+ org.apache.hugegraph.traversal.algorithm.JaccardSimilarTraverser,
+ org.apache.hugegraph.traversal.algorithm.KneighborTraverser,
+ org.apache.hugegraph.traversal.algorithm.KoutTraverser,
+ org.apache.hugegraph.traversal.algorithm.MultiNodeShortestPathTraverser,
+ org.apache.hugegraph.traversal.algorithm.NeighborRankTraverser,
+ org.apache.hugegraph.traversal.algorithm.PathsTraverser,
+ org.apache.hugegraph.traversal.algorithm.PersonalRankTraverser,
+ org.apache.hugegraph.traversal.algorithm.SameNeighborTraverser,
+ org.apache.hugegraph.traversal.algorithm.ShortestPathTraverser,
+ org.apache.hugegraph.traversal.algorithm.SingleSourceShortestPathTraverser,
+ org.apache.hugegraph.traversal.algorithm.SubGraphTraverser,
+ org.apache.hugegraph.traversal.algorithm.TemplatePathsTraverser,
+ org.apache.hugegraph.traversal.algorithm.steps.EdgeStep,
+ org.apache.hugegraph.traversal.algorithm.steps.RepeatEdgeStep,
+ org.apache.hugegraph.traversal.algorithm.steps.WeightedEdgeStep,
+ org.apache.hugegraph.traversal.optimize.ConditionP,
+ org.apache.hugegraph.traversal.optimize.Text,
+ org.apache.hugegraph.traversal.optimize.TraversalUtil,
+ org.opencypher.gremlin.traversal.CustomFunctions,
+ org.opencypher.gremlin.traversal.CustomPredicate
+ ],
+ methodImports: [
+ java.lang.Math#*,
+ org.opencypher.gremlin.traversal.CustomPredicate#*,
+ org.opencypher.gremlin.traversal.CustomFunctions#*
+ ]
+ },
+ org.apache.tinkerpop.gremlin.jsr223.ScriptFileGremlinPlugin: {
+ files: [scripts/empty-sample.groovy]
+ }
+ }
+ }
+}
+serializers:
+ - {className: org.apache.tinkerpop.gremlin.driver.ser.GraphBinaryMessageSerializerV1,
+ config: {
+ serializeResultToString: false,
+ ioRegistries: [org.apache.hugegraph.io.HugeGraphIoRegistry]
+ }
+ }
+ - {className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV1d0,
+ config: {
+ serializeResultToString: false,
+ ioRegistries: [org.apache.hugegraph.io.HugeGraphIoRegistry]
+ }
+ }
+ - {className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV2d0,
+ config: {
+ serializeResultToString: false,
+ ioRegistries: [org.apache.hugegraph.io.HugeGraphIoRegistry]
+ }
+ }
+ - {className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV3d0,
+ config: {
+ serializeResultToString: false,
+ ioRegistries: [org.apache.hugegraph.io.HugeGraphIoRegistry]
+ }
+ }
+metrics: {
+ consoleReporter: {enabled: false, interval: 180000},
+ csvReporter: {enabled: false, interval: 180000, fileName: ./metrics/gremlin-server-metrics.csv},
+ jmxReporter: {enabled: false},
+ slf4jReporter: {enabled: false, interval: 180000},
+ gangliaReporter: {enabled: false, interval: 180000, addressingMode: MULTICAST},
+ graphiteReporter: {enabled: false, interval: 180000}
+}
+maxInitialLineLength: 4096
+maxHeaderSize: 8192
+maxChunkSize: 8192
+maxContentLength: 65536
+maxAccumulationBufferComponents: 1024
+resultIterationBatchSize: 64
+writeBufferLowWaterMark: 32768
+writeBufferHighWaterMark: 65536
+ssl: {
+ enabled: false
+}
diff --git a/docker/configs/server2-conf/log4j2.xml b/docker/configs/server2-conf/log4j2.xml
new file mode 100644
index 0000000000..f1dd7e8395
--- /dev/null
+++ b/docker/configs/server2-conf/log4j2.xml
@@ -0,0 +1,144 @@
+
+
+
+
+
+ logs
+ hugegraph-server
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docker/configs/server2-conf/remote-objects.yaml b/docker/configs/server2-conf/remote-objects.yaml
new file mode 100644
index 0000000000..39679d8c30
--- /dev/null
+++ b/docker/configs/server2-conf/remote-objects.yaml
@@ -0,0 +1,30 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+hosts: [localhost]
+port: 8182
+serializer: {
+ className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV1d0,
+ config: {
+ serializeResultToString: false,
+ # The duplication of HugeGraphIoRegistry is meant to fix a bug in the
+ # 'org.apache.tinkerpop.gremlin.driver.Settings:from(Configuration)' method.
+ ioRegistries: [
+ org.apache.hugegraph.io.HugeGraphIoRegistry,
+ org.apache.hugegraph.io.HugeGraphIoRegistry
+ ]
+ }
+}
diff --git a/docker/configs/server2-conf/remote.yaml b/docker/configs/server2-conf/remote.yaml
new file mode 100644
index 0000000000..55f38ab97d
--- /dev/null
+++ b/docker/configs/server2-conf/remote.yaml
@@ -0,0 +1,25 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+hosts: [localhost]
+port: 8182
+serializer: {
+ className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV1d0,
+ config: {
+ serializeResultToString: false,
+ ioRegistries: [org.apache.hugegraph.io.HugeGraphIoRegistry]
+ }
+}
diff --git a/docker/configs/server2-conf/rest-server.properties b/docker/configs/server2-conf/rest-server.properties
new file mode 100644
index 0000000000..0e296b17b4
--- /dev/null
+++ b/docker/configs/server2-conf/rest-server.properties
@@ -0,0 +1,27 @@
+# bind url
+restserver.url=127.0.0.1:8082
+# gremlin server url, need to be consistent with host and port in gremlin-server.yaml
+gremlinserver.url=127.0.0.1:8182
+
+graphs=./conf/graphs
+
+# configuration of arthas
+arthas.telnet_port=8572
+arthas.http_port=8571
+arthas.ip=127.0.0.1
+arthas.disabled_commands=jad
+
+# authentication configs
+# choose 'org.apache.hugegraph.auth.StandardAuthenticator' or a custom implementation
+#auth.authenticator=
+# for admin password, By default, it is pa and takes effect upon the first startup
+#auth.admin_pa=pa
+
+# rpc server configs for multi graph-servers or raft-servers
+rpc.server_host=127.0.0.1
+rpc.server_port=8092
+#rpc.server_timeout=30
+
+# lightweight load balancing (beta)
+server.id=server-2
+server.role=worker
diff --git a/docker/configs/server3-conf/gremlin-driver-settings.yaml b/docker/configs/server3-conf/gremlin-driver-settings.yaml
new file mode 100644
index 0000000000..00ef046699
--- /dev/null
+++ b/docker/configs/server3-conf/gremlin-driver-settings.yaml
@@ -0,0 +1,25 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+hosts: [localhost]
+port: 8183
+serializer: {
+ className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV1d0,
+ config: {
+ serializeResultToString: false,
+ ioRegistries: [org.apache.hugegraph.io.HugeGraphIoRegistry]
+ }
+}
diff --git a/docker/configs/server3-conf/gremlin-server.yaml b/docker/configs/server3-conf/gremlin-server.yaml
new file mode 100644
index 0000000000..e153926bc9
--- /dev/null
+++ b/docker/configs/server3-conf/gremlin-server.yaml
@@ -0,0 +1,127 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# host and port of gremlin server, need to be consistent with host and port in rest-server.properties
+host: 127.0.0.1
+port: 8183
+
+# timeout in ms of gremlin query
+evaluationTimeout: 30000
+
+channelizer: org.apache.tinkerpop.gremlin.server.channel.WsAndHttpChannelizer
+# don't set graph at here, this happens after support for dynamically adding graph
+graphs: {
+}
+scriptEngines: {
+ gremlin-groovy: {
+ staticImports: [
+ org.opencypher.gremlin.process.traversal.CustomPredicates.*',
+ org.opencypher.gremlin.traversal.CustomFunctions.*
+ ],
+ plugins: {
+ org.apache.hugegraph.plugin.HugeGraphGremlinPlugin: {},
+ org.apache.tinkerpop.gremlin.server.jsr223.GremlinServerGremlinPlugin: {},
+ org.apache.tinkerpop.gremlin.jsr223.ImportGremlinPlugin: {
+ classImports: [
+ java.lang.Math,
+ org.apache.hugegraph.backend.id.IdGenerator,
+ org.apache.hugegraph.type.define.Directions,
+ org.apache.hugegraph.type.define.NodeRole,
+ org.apache.hugegraph.masterelection.GlobalMasterInfo,
+ org.apache.hugegraph.util.DateUtil,
+ org.apache.hugegraph.traversal.algorithm.CollectionPathsTraverser,
+ org.apache.hugegraph.traversal.algorithm.CountTraverser,
+ org.apache.hugegraph.traversal.algorithm.CustomizedCrosspointsTraverser,
+ org.apache.hugegraph.traversal.algorithm.CustomizePathsTraverser,
+ org.apache.hugegraph.traversal.algorithm.FusiformSimilarityTraverser,
+ org.apache.hugegraph.traversal.algorithm.HugeTraverser,
+ org.apache.hugegraph.traversal.algorithm.JaccardSimilarTraverser,
+ org.apache.hugegraph.traversal.algorithm.KneighborTraverser,
+ org.apache.hugegraph.traversal.algorithm.KoutTraverser,
+ org.apache.hugegraph.traversal.algorithm.MultiNodeShortestPathTraverser,
+ org.apache.hugegraph.traversal.algorithm.NeighborRankTraverser,
+ org.apache.hugegraph.traversal.algorithm.PathsTraverser,
+ org.apache.hugegraph.traversal.algorithm.PersonalRankTraverser,
+ org.apache.hugegraph.traversal.algorithm.SameNeighborTraverser,
+ org.apache.hugegraph.traversal.algorithm.ShortestPathTraverser,
+ org.apache.hugegraph.traversal.algorithm.SingleSourceShortestPathTraverser,
+ org.apache.hugegraph.traversal.algorithm.SubGraphTraverser,
+ org.apache.hugegraph.traversal.algorithm.TemplatePathsTraverser,
+ org.apache.hugegraph.traversal.algorithm.steps.EdgeStep,
+ org.apache.hugegraph.traversal.algorithm.steps.RepeatEdgeStep,
+ org.apache.hugegraph.traversal.algorithm.steps.WeightedEdgeStep,
+ org.apache.hugegraph.traversal.optimize.ConditionP,
+ org.apache.hugegraph.traversal.optimize.Text,
+ org.apache.hugegraph.traversal.optimize.TraversalUtil,
+ org.opencypher.gremlin.traversal.CustomFunctions,
+ org.opencypher.gremlin.traversal.CustomPredicate
+ ],
+ methodImports: [
+ java.lang.Math#*,
+ org.opencypher.gremlin.traversal.CustomPredicate#*,
+ org.opencypher.gremlin.traversal.CustomFunctions#*
+ ]
+ },
+ org.apache.tinkerpop.gremlin.jsr223.ScriptFileGremlinPlugin: {
+ files: [scripts/empty-sample.groovy]
+ }
+ }
+ }
+}
+serializers:
+ - {className: org.apache.tinkerpop.gremlin.driver.ser.GraphBinaryMessageSerializerV1,
+ config: {
+ serializeResultToString: false,
+ ioRegistries: [org.apache.hugegraph.io.HugeGraphIoRegistry]
+ }
+ }
+ - {className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV1d0,
+ config: {
+ serializeResultToString: false,
+ ioRegistries: [org.apache.hugegraph.io.HugeGraphIoRegistry]
+ }
+ }
+ - {className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV2d0,
+ config: {
+ serializeResultToString: false,
+ ioRegistries: [org.apache.hugegraph.io.HugeGraphIoRegistry]
+ }
+ }
+ - {className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV3d0,
+ config: {
+ serializeResultToString: false,
+ ioRegistries: [org.apache.hugegraph.io.HugeGraphIoRegistry]
+ }
+ }
+metrics: {
+ consoleReporter: {enabled: false, interval: 180000},
+ csvReporter: {enabled: false, interval: 180000, fileName: ./metrics/gremlin-server-metrics.csv},
+ jmxReporter: {enabled: false},
+ slf4jReporter: {enabled: false, interval: 180000},
+ gangliaReporter: {enabled: false, interval: 180000, addressingMode: MULTICAST},
+ graphiteReporter: {enabled: false, interval: 180000}
+}
+maxInitialLineLength: 4096
+maxHeaderSize: 8192
+maxChunkSize: 8192
+maxContentLength: 65536
+maxAccumulationBufferComponents: 1024
+resultIterationBatchSize: 64
+writeBufferLowWaterMark: 32768
+writeBufferHighWaterMark: 65536
+ssl: {
+ enabled: false
+}
diff --git a/docker/configs/server3-conf/log4j2.xml b/docker/configs/server3-conf/log4j2.xml
new file mode 100644
index 0000000000..f1dd7e8395
--- /dev/null
+++ b/docker/configs/server3-conf/log4j2.xml
@@ -0,0 +1,144 @@
+
+
+
+
+
+ logs
+ hugegraph-server
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docker/configs/server3-conf/remote-objects.yaml b/docker/configs/server3-conf/remote-objects.yaml
new file mode 100644
index 0000000000..ce99fcb2f6
--- /dev/null
+++ b/docker/configs/server3-conf/remote-objects.yaml
@@ -0,0 +1,30 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+hosts: [localhost]
+port: 8183
+serializer: {
+ className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV1d0,
+ config: {
+ serializeResultToString: false,
+ # The duplication of HugeGraphIoRegistry is meant to fix a bug in the
+ # 'org.apache.tinkerpop.gremlin.driver.Settings:from(Configuration)' method.
+ ioRegistries: [
+ org.apache.hugegraph.io.HugeGraphIoRegistry,
+ org.apache.hugegraph.io.HugeGraphIoRegistry
+ ]
+ }
+}
diff --git a/docker/configs/server3-conf/remote.yaml b/docker/configs/server3-conf/remote.yaml
new file mode 100644
index 0000000000..00ef046699
--- /dev/null
+++ b/docker/configs/server3-conf/remote.yaml
@@ -0,0 +1,25 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+hosts: [localhost]
+port: 8183
+serializer: {
+ className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV1d0,
+ config: {
+ serializeResultToString: false,
+ ioRegistries: [org.apache.hugegraph.io.HugeGraphIoRegistry]
+ }
+}
diff --git a/docker/configs/server3-conf/rest-server.properties b/docker/configs/server3-conf/rest-server.properties
new file mode 100644
index 0000000000..f628dc61b4
--- /dev/null
+++ b/docker/configs/server3-conf/rest-server.properties
@@ -0,0 +1,26 @@
+# bind url
+restserver.url=127.0.0.1:8083
+# gremlin server url, need to be consistent with host and port in gremlin-server.yaml
+gremlinserver.url=127.0.0.1:8183
+
+graphs=./conf/graphs
+
+# configuration of arthas
+arthas.telnet_port=8582
+arthas.http_port=8581
+arthas.ip=127.0.0.1
+arthas.disabled_commands=jad
+
+# authentication configs
+# choose 'org.apache.hugegraph.auth.StandardAuthenticator' or a custom implementation
+#auth.authenticator=
+# for admin password, By default, it is pa and takes effect upon the first startup
+#auth.admin_pa=pa
+
+# rpc server configs for multi graph-servers or raft-servers
+rpc.server_host=127.0.0.1
+rpc.server_port=8093
+
+# lightweight load balancing (beta)
+server.id=server-3
+server.role=worker
diff --git a/docker/docker-compose-3pd-3store-3server.yml b/docker/docker-compose-3pd-3store-3server.yml
index 26610db01f..f704c1c0f6 100644
--- a/docker/docker-compose-3pd-3store-3server.yml
+++ b/docker/docker-compose-3pd-3store-3server.yml
@@ -15,210 +15,166 @@
# limitations under the License.
#
-name: hugegraph-3x3
-
-networks:
- hg-net:
- driver: bridge
-
-volumes:
- hg-pd0-data:
- hg-pd1-data:
- hg-pd2-data:
- hg-store0-data:
- hg-store1-data:
- hg-store2-data:
-
-# ── Shared service defaults ──────────────────────────────────────────
-# TODO: remove volume mounts below once images are published with new entrypoints
-x-pd-common: &pd-common
- image: hugegraph/pd:${HUGEGRAPH_VERSION:-latest}
- pull_policy: missing
- restart: unless-stopped
- networks: [hg-net]
- entrypoint: ["/hugegraph-pd/docker-entrypoint.sh"]
- healthcheck:
- test: ["CMD-SHELL", "curl -fsS http://localhost:8620/v1/health >/dev/null || exit 1"]
- interval: 15s
- timeout: 10s
- retries: 30
- start_period: 120s
-
-x-store-common: &store-common
- image: hugegraph/store:${HUGEGRAPH_VERSION:-latest}
- pull_policy: missing
- restart: unless-stopped
- networks: [hg-net]
- depends_on:
- pd0: { condition: service_healthy }
- pd1: { condition: service_healthy }
- pd2: { condition: service_healthy }
- entrypoint: ["/hugegraph-store/docker-entrypoint.sh"]
- healthcheck:
- test: ["CMD-SHELL", "curl -fsS http://localhost:8520/v1/health >/dev/null || exit 1"]
- interval: 15s
- timeout: 15s
- retries: 40
- start_period: 120s
-
-x-server-common: &server-common
- image: hugegraph/server:${HUGEGRAPH_VERSION:-latest}
- pull_policy: missing
- restart: unless-stopped
- networks: [hg-net]
- depends_on:
- store0: { condition: service_healthy }
- store1: { condition: service_healthy }
- store2: { condition: service_healthy }
- entrypoint: ["/hugegraph-server/docker-entrypoint.sh"]
- environment:
- STORE_REST: store0:8520
- HG_SERVER_BACKEND: hstore
- HG_SERVER_PD_PEERS: pd0:8686,pd1:8686,pd2:8686
- healthcheck:
- test: ["CMD-SHELL", "curl -fsS http://localhost:8080/versions >/dev/null || exit 1"]
- interval: 10s
- timeout: 5s
- retries: 30
- start_period: 60s
-
-# ── Services ──────────────────────────────────────────────────────────
+# TODO: reuse the configs for same type containers
+# User could modify the node nums and the port by themselves
+version: "3"
services:
- # --- PD cluster (3 nodes) ---
pd0:
- <<: *pd-common
- container_name: hg-pd0
+ image: hugegraph/pd
+ container_name: pd0
hostname: pd0
- networks: [ hg-net ]
- environment:
- HG_PD_GRPC_HOST: pd0
- HG_PD_GRPC_PORT: "8686"
- HG_PD_REST_PORT: "8620"
- HG_PD_RAFT_ADDRESS: pd0:8610
- HG_PD_RAFT_PEERS_LIST: pd0:8610,pd1:8610,pd2:8610
- HG_PD_INITIAL_STORE_LIST: store0:8500,store1:8500,store2:8500
- HG_PD_DATA_PATH: /hugegraph-pd/pd_data
- HG_PD_INITIAL_STORE_COUNT: 3
- ports: ["8620:8620", "8686:8686"]
+ network_mode: host
+ healthcheck:
+ test: ["CMD", "curl", "-f", "http://localhost:8620"]
+ interval: 10s
+ timeout: 5s
+ retries: 3
volumes:
- - hg-pd0-data:/hugegraph-pd/pd_data
- - ../hugegraph-pd/hg-pd-dist/docker/docker-entrypoint.sh:/hugegraph-pd/docker-entrypoint.sh
+ - ./configs/application-pd0.yml:/hugegraph-pd/conf/application.yml
pd1:
- <<: *pd-common
- container_name: hg-pd1
+ image: hugegraph/pd
+ container_name: pd1
hostname: pd1
- networks: [ hg-net ]
- environment:
- HG_PD_GRPC_HOST: pd1
- HG_PD_GRPC_PORT: "8686"
- HG_PD_REST_PORT: "8620"
- HG_PD_RAFT_ADDRESS: pd1:8610
- HG_PD_RAFT_PEERS_LIST: pd0:8610,pd1:8610,pd2:8610
- HG_PD_INITIAL_STORE_LIST: store0:8500,store1:8500,store2:8500
- HG_PD_DATA_PATH: /hugegraph-pd/pd_data
- HG_PD_INITIAL_STORE_COUNT: 3
- ports: ["8621:8620", "8687:8686"]
+ network_mode: host
+ healthcheck:
+ test: ["CMD", "curl", "-f", "http://localhost:8621"]
+ interval: 10s
+ timeout: 5s
+ retries: 3
volumes:
- - hg-pd1-data:/hugegraph-pd/pd_data
- - ../hugegraph-pd/hg-pd-dist/docker/docker-entrypoint.sh:/hugegraph-pd/docker-entrypoint.sh
+ - ./configs/application-pd1.yml:/hugegraph-pd/conf/application.yml
pd2:
- <<: *pd-common
- container_name: hg-pd2
+ image: hugegraph/pd
+ container_name: pd2
hostname: pd2
- networks: [ hg-net ]
- environment:
- HG_PD_GRPC_HOST: pd2
- HG_PD_GRPC_PORT: "8686"
- HG_PD_REST_PORT: "8620"
- HG_PD_RAFT_ADDRESS: pd2:8610
- HG_PD_RAFT_PEERS_LIST: pd0:8610,pd1:8610,pd2:8610
- HG_PD_INITIAL_STORE_LIST: store0:8500,store1:8500,store2:8500
- HG_PD_DATA_PATH: /hugegraph-pd/pd_data
- HG_PD_INITIAL_STORE_COUNT: 3
- ports: ["8622:8620", "8688:8686"]
+ network_mode: host
+ healthcheck:
+ test: ["CMD", "curl", "-f", "http://localhost:8622"]
+ interval: 10s
+ timeout: 5s
+ retries: 3
volumes:
- - hg-pd2-data:/hugegraph-pd/pd_data
- - ../hugegraph-pd/hg-pd-dist/docker/docker-entrypoint.sh:/hugegraph-pd/docker-entrypoint.sh
+ - ./configs/application-pd2.yml:/hugegraph-pd/conf/application.yml
- # --- Store cluster (3 nodes) ---
store0:
- <<: *store-common
- container_name: hg-store0
+ image: hugegraph/store
+ container_name: store0
hostname: store0
- environment:
- HG_STORE_PD_ADDRESS: pd0:8686,pd1:8686,pd2:8686
- HG_STORE_GRPC_HOST: store0
- HG_STORE_GRPC_PORT: "8500"
- HG_STORE_REST_PORT: "8520"
- HG_STORE_RAFT_ADDRESS: store0:8510
- HG_STORE_DATA_PATH: /hugegraph-store/storage
- ports: ["8500:8500", "8510:8510", "8520:8520"]
+ network_mode: host
+ depends_on:
+ pd0:
+ condition: service_healthy
+ pd1:
+ condition: service_healthy
+ pd2:
+ condition: service_healthy
+ healthcheck:
+ test: ["CMD", "curl", "-f", "http://localhost:8520"]
+ interval: 10s
+ timeout: 5s
+ retries: 3
volumes:
- - hg-store0-data:/hugegraph-store/storage
- - ../hugegraph-store/hg-store-dist/docker/docker-entrypoint.sh:/hugegraph-store/docker-entrypoint.sh
+ - ./configs/application-store0.yml:/hugegraph-store/conf/application.yml
store1:
- <<: *store-common
- container_name: hg-store1
+ image: hugegraph/store
+ container_name: store1
hostname: store1
- environment:
- HG_STORE_PD_ADDRESS: pd0:8686,pd1:8686,pd2:8686
- HG_STORE_GRPC_HOST: store1
- HG_STORE_GRPC_PORT: "8500"
- HG_STORE_REST_PORT: "8520"
- HG_STORE_RAFT_ADDRESS: store1:8510
- HG_STORE_DATA_PATH: /hugegraph-store/storage
- ports: ["8501:8500", "8511:8510", "8521:8520"]
+ network_mode: host
+ depends_on:
+ pd0:
+ condition: service_healthy
+ pd1:
+ condition: service_healthy
+ pd2:
+ condition: service_healthy
+ healthcheck:
+ test: ["CMD", "curl", "-f", "http://localhost:8521"]
+ interval: 10s
+ timeout: 5s
+ retries: 3
volumes:
- - hg-store1-data:/hugegraph-store/storage
- - ../hugegraph-store/hg-store-dist/docker/docker-entrypoint.sh:/hugegraph-store/docker-entrypoint.sh
+ - ./configs/application-store1.yml:/hugegraph-store/conf/application.yml
store2:
- <<: *store-common
- container_name: hg-store2
+ image: hugegraph/store
+ container_name: store2
hostname: store2
- environment:
- HG_STORE_PD_ADDRESS: pd0:8686,pd1:8686,pd2:8686
- HG_STORE_GRPC_HOST: store2
- HG_STORE_GRPC_PORT: "8500"
- HG_STORE_REST_PORT: "8520"
- HG_STORE_RAFT_ADDRESS: store2:8510
- HG_STORE_DATA_PATH: /hugegraph-store/storage
- ports: ["8502:8500", "8512:8510", "8522:8520"]
- volumes:
- - hg-store2-data:/hugegraph-store/storage
- - ../hugegraph-store/hg-store-dist/docker/docker-entrypoint.sh:/hugegraph-store/docker-entrypoint.sh
-
- # --- Server cluster (3 nodes) ---
- server0:
- <<: *server-common
- container_name: hg-server0
- hostname: server0
- ports: ["8080:8080"]
+ network_mode: host
+ depends_on:
+ pd0:
+ condition: service_healthy
+ pd1:
+ condition: service_healthy
+ pd2:
+ condition: service_healthy
+ healthcheck:
+ test: ["CMD", "curl", "-f", "http://localhost:8522"]
+ interval: 10s
+ timeout: 5s
+ retries: 3
volumes:
- - ../hugegraph-server/hugegraph-dist/docker/docker-entrypoint.sh:/hugegraph-server/docker-entrypoint.sh
- - ../hugegraph-server/hugegraph-dist/src/assembly/static/bin/wait-storage.sh:/hugegraph-server/bin/wait-storage.sh
- - ../hugegraph-server/hugegraph-dist/src/assembly/static/bin/wait-partition.sh:/hugegraph-server/bin/wait-partition.sh
+ - ./configs/application-store2.yml:/hugegraph-store/conf/application.yml
server1:
- <<: *server-common
- container_name: hg-server1
+ image: hugegraph/server
+ container_name: server1
hostname: server1
- ports: ["8081:8080"]
+ network_mode: host
+ depends_on:
+ store0:
+ condition: service_healthy
+ store1:
+ condition: service_healthy
+ store2:
+ condition: service_healthy
+ healthcheck:
+ test: ["CMD", "curl", "-f", "http://localhost:8081"]
+ interval: 10s
+ timeout: 5s
+ retries: 3
volumes:
- - ../hugegraph-server/hugegraph-dist/docker/docker-entrypoint.sh:/hugegraph-server/docker-entrypoint.sh
- - ../hugegraph-server/hugegraph-dist/src/assembly/static/bin/wait-storage.sh:/hugegraph-server/bin/wait-storage.sh
- - ../hugegraph-server/hugegraph-dist/src/assembly/static/bin/wait-partition.sh:/hugegraph-server/bin/wait-partition.sh
+ - ./configs/server1-conf:/hugegraph-server/conf
server2:
- <<: *server-common
- container_name: hg-server2
+ image: hugegraph/server
+ container_name: server2
hostname: server2
- ports: ["8082:8080"]
+ network_mode: host
+ depends_on:
+ store0:
+ condition: service_healthy
+ store1:
+ condition: service_healthy
+ store2:
+ condition: service_healthy
+ healthcheck:
+ test: ["CMD", "curl", "-f", "http://localhost:8082"]
+ interval: 10s
+ timeout: 5s
+ retries: 3
+ volumes:
+ - ./configs/server2-conf:/hugegraph-server/conf
+
+ server3:
+ image: hugegraph/server
+ container_name: server3
+ hostname: server3
+ network_mode: host
+ depends_on:
+ store0:
+ condition: service_healthy
+ store1:
+ condition: service_healthy
+ store2:
+ condition: service_healthy
+ healthcheck:
+ test: ["CMD", "curl", "-f", "http://localhost:8083"]
+ interval: 10s
+ timeout: 5s
+ retries: 3
volumes:
- - ../hugegraph-server/hugegraph-dist/docker/docker-entrypoint.sh:/hugegraph-server/docker-entrypoint.sh
- - ../hugegraph-server/hugegraph-dist/src/assembly/static/bin/wait-storage.sh:/hugegraph-server/bin/wait-storage.sh
- - ../hugegraph-server/hugegraph-dist/src/assembly/static/bin/wait-partition.sh:/hugegraph-server/bin/wait-partition.sh
+ - ./configs/server3-conf:/hugegraph-server/conf
diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml
deleted file mode 100644
index aa0736a38b..0000000000
--- a/docker/docker-compose.dev.yml
+++ /dev/null
@@ -1,106 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-name: hugegraph-single
-
-networks:
- hg-net:
- driver: bridge
-
-volumes:
- hg-pd-data:
- hg-store-data:
-
-services:
- pd:
- build:
- context: ..
- dockerfile: hugegraph-pd/Dockerfile
- container_name: hg-pd
- hostname: pd
- restart: unless-stopped
- networks: [hg-net]
- environment:
- HG_PD_GRPC_HOST: pd
- HG_PD_GRPC_PORT: "8686"
- HG_PD_REST_PORT: "8620"
- HG_PD_RAFT_ADDRESS: pd:8610
- HG_PD_RAFT_PEERS_LIST: pd:8610
- HG_PD_INITIAL_STORE_LIST: store:8500
- HG_PD_DATA_PATH: /hugegraph-pd/pd_data
- ports:
- - "8620:8620"
- volumes:
- - hg-pd-data:/hugegraph-pd/pd_data
- healthcheck:
- test: ["CMD-SHELL", "curl -fsS http://localhost:8620/v1/health >/dev/null || exit 1"]
- interval: 10s
- timeout: 5s
- retries: 12
- start_period: 20s
-
- store:
- build:
- context: ..
- dockerfile: hugegraph-store/Dockerfile
- container_name: hg-store
- hostname: store
- restart: unless-stopped
- networks: [hg-net]
- depends_on:
- pd:
- condition: service_healthy
- environment:
- HG_STORE_PD_ADDRESS: pd:8686
- HG_STORE_GRPC_HOST: store
- HG_STORE_GRPC_PORT: "8500"
- HG_STORE_REST_PORT: "8520"
- HG_STORE_RAFT_ADDRESS: store:8510
- HG_STORE_DATA_PATH: /hugegraph-store/storage
- ports:
- - "8520:8520"
- volumes:
- - hg-store-data:/hugegraph-store/storage
- healthcheck:
- test: ["CMD-SHELL", "curl -fsS http://localhost:8520/v1/health >/dev/null || exit 1"]
- interval: 10s
- timeout: 10s
- retries: 30
- start_period: 30s
-
- server:
- build:
- context: ..
- dockerfile: hugegraph-server/Dockerfile-hstore
- container_name: hg-server
- hostname: server
- restart: unless-stopped
- networks: [hg-net]
- depends_on:
- store:
- condition: service_healthy
- environment:
- HG_SERVER_BACKEND: hstore
- HG_SERVER_PD_PEERS: pd:8686
- ports:
- - "8080:8080"
- healthcheck:
- test: ["CMD-SHELL", "curl -fsS http://localhost:8080/versions >/dev/null || exit 1"]
- interval: 10s
- timeout: 5s
- retries: 30
- start_period: 60s
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index d3700daf96..0c90c1e451 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -14,119 +14,45 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-# TODO: remove volume mounts below once images are published with new entrypoints
-name: hugegraph-single
-networks:
- hg-net:
- driver: bridge
-
-volumes:
- hg-pd-data:
- hg-store-data:
+version: "3"
services:
-
pd:
- image: hugegraph/pd:${HUGEGRAPH_VERSION:-latest}
- pull_policy: always
- container_name: hg-pd
+ image: hugegraph/pd
+ container_name: pd
hostname: pd
- restart: unless-stopped
- networks: [hg-net]
-
- entrypoint: ["/hugegraph-pd/docker-entrypoint.sh"]
-
- environment:
- HG_PD_GRPC_HOST: pd
- HG_PD_GRPC_PORT: "8686"
- HG_PD_REST_PORT: "8620"
- HG_PD_RAFT_ADDRESS: pd:8610
- HG_PD_RAFT_PEERS_LIST: pd:8610
- HG_PD_INITIAL_STORE_LIST: store:8500
- HG_PD_DATA_PATH: /hugegraph-pd/pd_data
-
- ports:
- - "8620:8620"
-
- volumes:
- - hg-pd-data:/hugegraph-pd/pd_data
- - ../hugegraph-pd/hg-pd-dist/docker/docker-entrypoint.sh:/hugegraph-pd/docker-entrypoint.sh
-
+ network_mode: host
healthcheck:
- test: ["CMD-SHELL", "curl -fsS http://localhost:8620/v1/health >/dev/null || exit 1"]
+ test: ["CMD", "curl", "-f", "http://localhost:8620"]
interval: 10s
timeout: 5s
- retries: 12
- start_period: 30s
-
+ retries: 3
store:
- image: hugegraph/store:${HUGEGRAPH_VERSION:-latest}
- pull_policy: always
- container_name: hg-store
+ image: hugegraph/store
+ container_name: store
hostname: store
- restart: unless-stopped
- networks: [hg-net]
-
- entrypoint: ["/hugegraph-store/docker-entrypoint.sh"]
-
+ network_mode: host
depends_on:
pd:
condition: service_healthy
-
- environment:
- HG_STORE_PD_ADDRESS: pd:8686
- HG_STORE_GRPC_HOST: store
- HG_STORE_GRPC_PORT: "8500"
- HG_STORE_REST_PORT: "8520"
- HG_STORE_RAFT_ADDRESS: store:8510
- HG_STORE_DATA_PATH: /hugegraph-store/storage
-
- ports:
- - "8520:8520"
-
- volumes:
- - hg-store-data:/hugegraph-store/storage
- - ../hugegraph-store/hg-store-dist/docker/docker-entrypoint.sh:/hugegraph-store/docker-entrypoint.sh
-
healthcheck:
- test: ["CMD-SHELL", "curl -fsS http://localhost:8520/v1/health >/dev/null || exit 1"]
+ test: ["CMD", "curl", "-f", "http://localhost:8520"]
interval: 10s
- timeout: 10s
- retries: 30
- start_period: 60s
-
+ timeout: 5s
+ retries: 3
server:
- image: hugegraph/server:${HUGEGRAPH_VERSION:-latest}
- pull_policy: always
- container_name: hg-server
+ image: hugegraph/server
+ container_name: server
hostname: server
- restart: unless-stopped
- networks: [hg-net]
-
- entrypoint: ["/hugegraph-server/docker-entrypoint.sh"]
-
+ network_mode: host
depends_on:
store:
condition: service_healthy
-
- environment:
- HG_SERVER_BACKEND: hstore
- HG_SERVER_PD_PEERS: pd:8686
-
- ports:
- - "8080:8080"
-
- volumes:
- - ../hugegraph-server/hugegraph-dist/docker/docker-entrypoint.sh:/hugegraph-server/docker-entrypoint.sh
- - ../hugegraph-server/hugegraph-dist/src/assembly/static/bin/wait-storage.sh:/hugegraph-server/bin/wait-storage.sh
- - ../hugegraph-server/hugegraph-dist/src/assembly/static/bin/wait-partition.sh:/hugegraph-server/bin/wait-partition.sh
-
healthcheck:
- test: ["CMD-SHELL", "curl -fsS http://localhost:8080/versions >/dev/null || exit 1"]
+ test: ["CMD", "curl", "-f", "http://localhost:8080"]
interval: 10s
timeout: 5s
- retries: 30
- start_period: 60s
+ retries: 3
diff --git a/hugegraph-cluster-test/hugegraph-clustertest-dist/src/assembly/static/conf/hugegraph.properties.template b/hugegraph-cluster-test/hugegraph-clustertest-dist/src/assembly/static/conf/hugegraph.properties.template
index 005031fe60..f97e365748 100644
--- a/hugegraph-cluster-test/hugegraph-clustertest-dist/src/assembly/static/conf/hugegraph.properties.template
+++ b/hugegraph-cluster-test/hugegraph-clustertest-dist/src/assembly/static/conf/hugegraph.properties.template
@@ -45,7 +45,6 @@ store=hugegraph
pd.peers=$PD_PEERS_LIST$
# task config
-task.scheduler_type=local
task.schedule_period=10
task.retry=0
task.wait_timeout=10
diff --git a/hugegraph-cluster-test/hugegraph-clustertest-minicluster/src/main/java/org/apache/hugegraph/ct/base/ClusterConstant.java b/hugegraph-cluster-test/hugegraph-clustertest-minicluster/src/main/java/org/apache/hugegraph/ct/base/ClusterConstant.java
index 730bbc53ed..9120c0cf92 100644
--- a/hugegraph-cluster-test/hugegraph-clustertest-minicluster/src/main/java/org/apache/hugegraph/ct/base/ClusterConstant.java
+++ b/hugegraph-cluster-test/hugegraph-clustertest-minicluster/src/main/java/org/apache/hugegraph/ct/base/ClusterConstant.java
@@ -33,12 +33,12 @@ public class ClusterConstant {
public static final String PLUGINS_DIR = "plugins";
public static final String BIN_DIR = "bin";
public static final String CONF_DIR = "conf";
- public static final String PD_PACKAGE_PREFIX = "apache-hugegraph-pd";
+ public static final String PD_PACKAGE_PREFIX = "apache-hugegraph-pd-incubating";
public static final String PD_JAR_PREFIX = "hg-pd-service";
- public static final String STORE_PACKAGE_PREFIX = "apache-hugegraph-store";
+ public static final String STORE_PACKAGE_PREFIX = "apache-hugegraph-store-incubating";
public static final String STORE_JAR_PREFIX = "hg-store-node";
- public static final String SERVER_PACKAGE_PREFIX = "apache-hugegraph-server";
- public static final String CT_PACKAGE_PREFIX = "apache-hugegraph-ct";
+ public static final String SERVER_PACKAGE_PREFIX = "apache-hugegraph-server-incubating";
+ public static final String CT_PACKAGE_PREFIX = "apache-hugegraph-ct-incubating";
public static final String APPLICATION_FILE = "application.yml";
public static final String SERVER_PROPERTIES = "rest-server.properties";
public static final String HUGEGRAPH_PROPERTIES = "graphs/hugegraph.properties";
diff --git a/hugegraph-cluster-test/hugegraph-clustertest-test/src/main/java/org/apache/hugegraph/MultiClusterTest/BaseMultiClusterTest.java b/hugegraph-cluster-test/hugegraph-clustertest-test/src/main/java/org/apache/hugegraph/MultiClusterTest/BaseMultiClusterTest.java
index 9e90933026..af640b3a94 100644
--- a/hugegraph-cluster-test/hugegraph-clustertest-test/src/main/java/org/apache/hugegraph/MultiClusterTest/BaseMultiClusterTest.java
+++ b/hugegraph-cluster-test/hugegraph-clustertest-test/src/main/java/org/apache/hugegraph/MultiClusterTest/BaseMultiClusterTest.java
@@ -38,7 +38,7 @@
* MultiNode Test generate the cluster env with 3 pd node + 3 store node + 3 server node.
* Or you can set different num of nodes by using env = new MultiNodeEnv(pdNum, storeNum, serverNum)
* All nodes are deployed in ports generated randomly, the application of nodes are stored
- * in /apache-hugegraph-ct-1.7.0, you can visit each node with rest api.
+ * in /apache-hugegraph-ct-incubating-1.7.0, you can visit each node with rest api.
*/
public class BaseMultiClusterTest {
diff --git a/hugegraph-cluster-test/hugegraph-clustertest-test/src/main/java/org/apache/hugegraph/SimpleClusterTest/BaseSimpleTest.java b/hugegraph-cluster-test/hugegraph-clustertest-test/src/main/java/org/apache/hugegraph/SimpleClusterTest/BaseSimpleTest.java
index f0f0c33461..849b4b835f 100644
--- a/hugegraph-cluster-test/hugegraph-clustertest-test/src/main/java/org/apache/hugegraph/SimpleClusterTest/BaseSimpleTest.java
+++ b/hugegraph-cluster-test/hugegraph-clustertest-test/src/main/java/org/apache/hugegraph/SimpleClusterTest/BaseSimpleTest.java
@@ -45,7 +45,7 @@
/**
* Simple Test generate the cluster env with 1 pd node + 1 store node + 1 server node.
* All nodes are deployed in ports generated randomly; The application of nodes is stored
- * in /apache-hugegraph-ct-1.7.0, you can visit each node with rest api.
+ * in /apache-hugegraph-ct-incubating-1.7.0, you can visit each node with rest api.
*/
public class BaseSimpleTest {
diff --git a/hugegraph-cluster-test/pom.xml b/hugegraph-cluster-test/pom.xml
index cd54ac0ffe..ecb47b7970 100644
--- a/hugegraph-cluster-test/pom.xml
+++ b/hugegraph-cluster-test/pom.xml
@@ -42,7 +42,7 @@
1111UTF-8
- apache-${release.name}-ct-${project.version}
+ apache-${release.name}-ct-incubating-${project.version}
diff --git a/hugegraph-commons/README.md b/hugegraph-commons/README.md
index d8cbcbc24a..7162e93137 100644
--- a/hugegraph-commons/README.md
+++ b/hugegraph-commons/README.md
@@ -3,8 +3,8 @@
[](https://www.apache.org/licenses/LICENSE-2.0.html)
[](https://codecov.io/gh/hugegraph/hugegraph-common)
[](https://mvnrepository.com/artifact/org.apache.hugegraph/hugegraph-common)
-[](https://github.com/apache/hugegraph-commons/actions/workflows/codeql-analysis.yml)
-[](https://github.com/apache/hugegraph-commons/actions/workflows/ci.yml)
+[](https://github.com/apache/incubator-hugegraph-commons/actions/workflows/codeql-analysis.yml)
+[](https://github.com/apache/incubator-hugegraph-commons/actions/workflows/ci.yml)
hugegraph-commons is a common module for [HugeGraph](https://github.com/apache/hugegraph) and its peripheral components.
@@ -49,7 +49,7 @@ And here are links of other repositories:
- Note: It's recommended to use [GitHub Desktop](https://desktop.github.com/) to greatly simplify the PR and commit process.
- Thank you to all the people who already contributed to HugeGraph!
-[](https://github.com/apache/hugegraph-commons/graphs/contributors)
+[](https://github.com/apache/incubator-hugegraph-commons/graphs/contributors)
## Licence
@@ -59,8 +59,8 @@ Same as HugeGraph, hugegraph-commons are also licensed under [Apache 2.0](./LICE
---
- - [GitHub Issues](https://github.com/apache/hugegraph-commons/issues): Feedback on usage issues and functional requirements (quick response)
+ - [GitHub Issues](https://github.com/apache/incubator-hugegraph-commons/issues): Feedback on usage issues and functional requirements (quick response)
- Feedback Email: [dev@hugegraph.apache.org](mailto:dev@hugegraph.apache.org) ([subscriber](https://hugegraph.apache.org/docs/contribution-guidelines/subscribe/) only)
- WeChat public account: Apache HugeGraph, welcome to scan this QR code to follow us.
-
+
diff --git a/hugegraph-commons/hugegraph-common/pom.xml b/hugegraph-commons/hugegraph-common/pom.xml
index 14f7cc217c..a57bcf59cd 100644
--- a/hugegraph-commons/hugegraph-common/pom.xml
+++ b/hugegraph-commons/hugegraph-common/pom.xml
@@ -28,7 +28,7 @@
hugegraph-common${project.artifactId}
- https://github.com/apache/hugegraph-commons/tree/master/hugegraph-common
+ https://github.com/apache/incubator-hugegraph-commons/tree/master/hugegraph-common
hugegraph-common is a common module for HugeGraph and its peripheral components.
hugegraph-common encapsulates locks, configurations, events, iterators, rest and some
diff --git a/hugegraph-commons/hugegraph-common/src/test/java/org/apache/hugegraph/unit/rest/RestClientTest.java b/hugegraph-commons/hugegraph-common/src/test/java/org/apache/hugegraph/unit/rest/RestClientTest.java
index 93a69dd8ec..712aea7ab2 100644
--- a/hugegraph-commons/hugegraph-common/src/test/java/org/apache/hugegraph/unit/rest/RestClientTest.java
+++ b/hugegraph-commons/hugegraph-common/src/test/java/org/apache/hugegraph/unit/rest/RestClientTest.java
@@ -112,7 +112,7 @@ public void testPostWithTokenAndAllParams() {
@Test
public void testPostHttpsWithAllParams() {
- String url = "https://github.com/apache/hugegraph-doc/" +
+ String url = "https://github.com/apache/incubator-hugegraph-doc/" +
"raw/master/dist/commons/cacerts.jks";
String trustStoreFile = "src/test/resources/cacerts.jks";
BaseUnitTest.downloadFileByUrl(url, trustStoreFile);
@@ -129,7 +129,7 @@ public void testPostHttpsWithAllParams() {
@Test
public void testPostHttpsWithTokenAndAllParams() {
- String url = "https://github.com/apache/hugegraph-doc/" +
+ String url = "https://github.com/apache/incubator-hugegraph-doc/" +
"raw/master/dist/commons/cacerts.jks";
String trustStoreFile = "src/test/resources/cacerts.jks";
BaseUnitTest.downloadFileByUrl(url, trustStoreFile);
diff --git a/hugegraph-commons/pom.xml b/hugegraph-commons/pom.xml
index b9e780bd32..59d12b99ad 100644
--- a/hugegraph-commons/pom.xml
+++ b/hugegraph-commons/pom.xml
@@ -50,7 +50,7 @@
- Apache HugeGraph
+ Apache Hugegraph(Incubating)dev-subscribe@hugegraph.apache.orghttps://hugegraph.apache.org/
@@ -61,7 +61,7 @@
Developer Listdev-subscribe@hugegraph.apache.orgdev-unsubscribe@hugegraph.apache.org
- dev@hugegraph.apache.org
+ dev@hugegraph.incubator.apache.orgCommits List
diff --git a/hugegraph-pd/AGENTS.md b/hugegraph-pd/AGENTS.md
index c9ba2bcfa0..0b501bf640 100644
--- a/hugegraph-pd/AGENTS.md
+++ b/hugegraph-pd/AGENTS.md
@@ -110,7 +110,7 @@ mvn clean install
# Build distribution package only
mvn clean package -pl hg-pd-dist -am -DskipTests
-# Output: hugegraph-pd/apache-hugegraph-pd-.tar.gz
+# Output: hg-pd-dist/target/apache-hugegraph-pd-incubating-.tar.gz
```
### Running Tests
@@ -165,7 +165,7 @@ mvn clean
After building, extract the tarball:
```
-apache-hugegraph-pd-/
+apache-hugegraph-pd-incubating-/
├── bin/
│ ├── start-hugegraph-pd.sh # Start PD server
│ ├── stop-hugegraph-pd.sh # Stop PD server
@@ -183,7 +183,7 @@ apache-hugegraph-pd-/
### Starting PD
```bash
-cd apache-hugegraph-pd-/
+cd apache-hugegraph-pd-incubating-/
bin/start-hugegraph-pd.sh
# With custom GC options
diff --git a/hugegraph-pd/Dockerfile b/hugegraph-pd/Dockerfile
index 812e05e7d9..c30cc3dfe2 100644
--- a/hugegraph-pd/Dockerfile
+++ b/hugegraph-pd/Dockerfile
@@ -30,7 +30,7 @@ RUN mvn package $MAVEN_ARGS -e -B -ntp -Dmaven.test.skip=true -Dmaven.javadoc.sk
# Note: ZGC (The Z Garbage Collector) is only supported on ARM-Mac with java > 13
FROM eclipse-temurin:11-jre-jammy
-COPY --from=build /pkg/hugegraph-pd/apache-hugegraph-pd-*/ /hugegraph-pd/
+COPY --from=build /pkg/hugegraph-pd/apache-hugegraph-pd-incubating-*/ /hugegraph-pd/
LABEL maintainer="HugeGraph Docker Maintainers "
# TODO: use g1gc or zgc as default
diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java
index 2b08de7d4e..e70ac92340 100644
--- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java
+++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java
@@ -23,12 +23,9 @@
import java.util.HashSet;
import java.util.List;
import java.util.Objects;
-import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
@@ -43,6 +40,7 @@
import com.alipay.sofa.jraft.JRaftUtils;
import com.alipay.sofa.jraft.Node;
import com.alipay.sofa.jraft.RaftGroupService;
+import com.alipay.sofa.jraft.ReplicatorGroup;
import com.alipay.sofa.jraft.Status;
import com.alipay.sofa.jraft.conf.Configuration;
import com.alipay.sofa.jraft.core.Replicator;
@@ -50,11 +48,13 @@
import com.alipay.sofa.jraft.entity.Task;
import com.alipay.sofa.jraft.error.RaftError;
import com.alipay.sofa.jraft.option.NodeOptions;
+import com.alipay.sofa.jraft.option.RaftOptions;
import com.alipay.sofa.jraft.option.RpcOptions;
import com.alipay.sofa.jraft.rpc.RaftRpcServerFactory;
import com.alipay.sofa.jraft.rpc.RpcServer;
import com.alipay.sofa.jraft.rpc.impl.BoltRpcServer;
import com.alipay.sofa.jraft.util.Endpoint;
+import com.alipay.sofa.jraft.util.ThreadId;
import com.alipay.sofa.jraft.util.internal.ThrowUtil;
import io.netty.channel.ChannelHandler;
@@ -86,12 +86,8 @@ public synchronized boolean init(PDConfig.Raft config) {
}
this.config = config;
- // Wire configured rpc timeout into RaftRpcClient so the Bolt transport
- // timeout and the future.get() caller timeout in getLeaderGrpcAddress() are consistent.
raftRpcClient = new RaftRpcClient();
- RpcOptions rpcOptions = new RpcOptions();
- rpcOptions.setRpcDefaultTimeout(config.getRpcTimeout());
- raftRpcClient.init(rpcOptions);
+ raftRpcClient.init(new RpcOptions());
String raftPath = config.getDataPath() + "/" + groupId;
new File(raftPath).mkdirs();
@@ -123,7 +119,10 @@ public synchronized boolean init(PDConfig.Raft config) {
nodeOptions.setRpcConnectTimeoutMs(config.getRpcTimeout());
nodeOptions.setRpcDefaultTimeout(config.getRpcTimeout());
nodeOptions.setRpcInstallSnapshotTimeout(config.getRpcTimeout());
- // TODO: tune RaftOptions for PD (see hugegraph-store PartitionEngine for reference)
+ // Set the raft configuration
+ RaftOptions raftOptions = nodeOptions.getRaftOptions();
+
+ nodeOptions.setEnableMetrics(true);
final PeerId serverId = JRaftUtils.getPeerId(config.getAddress());
@@ -229,7 +228,7 @@ public PeerId getLeader() {
}
/**
- * Send a message to the leader to get the grpc address.
+ * Send a message to the leader to get the grpc address;
*/
public String getLeaderGrpcAddress() throws ExecutionException, InterruptedException {
if (isLeader()) {
@@ -237,49 +236,11 @@ public String getLeaderGrpcAddress() throws ExecutionException, InterruptedExcep
}
if (raftNode.getLeaderId() == null) {
- waitingForLeader(config.getRpcTimeout());
- }
-
- // Cache leader to avoid repeated getLeaderId() calls and guard against
- // waitingForLeader() returning without a leader being elected.
- PeerId leader = raftNode.getLeaderId();
- if (leader == null) {
- throw new ExecutionException(new IllegalStateException("Leader is not ready"));
- }
-
- RaftRpcProcessor.GetMemberResponse response = null;
- try {
- // TODO: a more complete fix would need a source of truth for the leader's
- // actual grpcAddress rather than deriving it from the local node's port config.
- response = raftRpcClient
- .getGrpcAddress(leader.getEndpoint().toString())
- .get(config.getRpcTimeout(), TimeUnit.MILLISECONDS);
- if (response != null && response.getGrpcAddress() != null) {
- return response.getGrpcAddress();
- }
- if (response == null) {
- log.warn("Leader RPC response is null for {}, falling back to derived address",
- leader);
- } else {
- log.warn("Leader gRPC address field is null in RPC response for {}, "
- + "falling back to derived address", leader);
- }
- } catch (TimeoutException e) {
- log.warn("Timed out resolving leader gRPC address for {}, falling back to derived "
- + "address", leader);
- } catch (ExecutionException e) {
- Throwable cause = e.getCause() != null ? e.getCause() : e;
- log.warn("Failed to resolve leader gRPC address for {}, falling back to derived "
- + "address", leader, cause);
+ waitingForLeader(10000);
}
- // Best-effort fallback: derive from leader raft endpoint IP + local gRPC port.
- // WARNING: this may be incorrect in clusters where PD nodes use different grpc.port
- // values, a proper fix requires a cluster-wide source of truth for gRPC addresses.
- String derived = leader.getEndpoint().getIp() + ":" + config.getGrpcPort();
- log.info("Using derived leader gRPC address {} - may be incorrect if nodes use different ports",
- derived);
- return derived;
+ return raftRpcClient.getGrpcAddress(raftNode.getLeaderId().getEndpoint().toString()).get()
+ .getGrpcAddress();
}
/**
@@ -352,55 +313,23 @@ public List getMembers() throws ExecutionException, InterruptedEx
public Status changePeerList(String peerList) {
AtomicReference result = new AtomicReference<>();
- Configuration newPeers = new Configuration();
try {
String[] peers = peerList.split(",", -1);
if ((peers.length & 1) != 1) {
throw new PDException(-1, "the number of peer list must be odd.");
}
+ Configuration newPeers = new Configuration();
newPeers.parse(peerList);
CountDownLatch latch = new CountDownLatch(1);
this.raftNode.changePeers(newPeers, status -> {
- result.compareAndSet(null, status);
- if (status != null && status.isOk()) {
- IpAuthHandler handler = IpAuthHandler.getInstance();
- if (handler != null) {
- Set newIps = newPeers.getPeers()
- .stream()
- .map(PeerId::getIp)
- .collect(Collectors.toSet());
- handler.refresh(newIps);
- log.info("IpAuthHandler refreshed after peer list change to: {}",
- peerList);
- } else {
- log.warn("IpAuthHandler not initialized, skipping refresh for "
- + "peer list: {}", peerList);
- }
- }
+ result.set(status);
latch.countDown();
});
- boolean completed = latch.await(3L * config.getRpcTimeout(), TimeUnit.MILLISECONDS);
- if (!completed && result.get() == null) {
- Status timeoutStatus = new Status(RaftError.EINTERNAL,
- "changePeerList timed out after %d ms",
- 3L * config.getRpcTimeout());
- if (!result.compareAndSet(null, timeoutStatus)) {
- timeoutStatus = null;
- }
- if (timeoutStatus != null) {
- log.error("changePeerList to {} timed out after {} ms",
- peerList, 3L * config.getRpcTimeout());
- }
- }
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
- result.set(new Status(RaftError.EINTERNAL, "changePeerList interrupted"));
- log.error("changePeerList to {} was interrupted", peerList, e);
+ latch.await();
} catch (Exception e) {
log.error("failed to changePeerList to {},{}", peerList, e);
result.set(new Status(-1, e.getMessage()));
}
-
return result.get();
}
@@ -415,8 +344,7 @@ public PeerId waitingForLeader(long timeOut) {
long start = System.currentTimeMillis();
while ((System.currentTimeMillis() - start < timeOut) && (leader == null)) {
try {
- long remaining = timeOut - (System.currentTimeMillis() - start);
- this.wait(Math.min(1000, Math.max(0, remaining)));
+ this.wait(1000);
} catch (InterruptedException e) {
log.error("Raft wait for leader exception", e);
}
@@ -424,6 +352,7 @@ public PeerId waitingForLeader(long timeOut) {
}
return leader;
}
+
}
public Node getRaftNode() {
@@ -437,8 +366,7 @@ private boolean peerEquals(PeerId p1, PeerId p2) {
if (p1 == null || p2 == null) {
return false;
}
- return Objects.equals(p1.getIp(), p2.getIp()) &&
- Objects.equals(p1.getPort(), p2.getPort());
+ return Objects.equals(p1.getIp(), p2.getIp()) && Objects.equals(p1.getPort(), p2.getPort());
}
private Replicator.State getReplicatorState(PeerId peerId) {
diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/auth/IpAuthHandler.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/auth/IpAuthHandler.java
index bdccb6dd7f..2ac384541d 100644
--- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/auth/IpAuthHandler.java
+++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/auth/IpAuthHandler.java
@@ -17,11 +17,8 @@
package org.apache.hugegraph.pd.raft.auth;
-import java.net.InetAddress;
import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
import java.util.Collections;
-import java.util.HashSet;
import java.util.Set;
import io.netty.channel.ChannelDuplexHandler;
@@ -33,11 +30,11 @@
@ChannelHandler.Sharable
public class IpAuthHandler extends ChannelDuplexHandler {
- private volatile Set resolvedIps;
+ private final Set allowedIps;
private static volatile IpAuthHandler instance;
private IpAuthHandler(Set allowedIps) {
- this.resolvedIps = resolveAll(allowedIps);
+ this.allowedIps = Collections.unmodifiableSet(allowedIps);
}
public static IpAuthHandler getInstance(Set allowedIps) {
@@ -51,25 +48,6 @@ public static IpAuthHandler getInstance(Set allowedIps) {
return instance;
}
- /**
- * Returns the existing singleton instance, or null if not yet initialized.
- * Should only be called after getInstance(Set) has been called during startup.
- */
- public static IpAuthHandler getInstance() {
- return instance;
- }
-
- /**
- * Refreshes the resolved IP allowlist from a new set of hostnames or IPs.
- * Should be called when the Raft peer list changes via RaftEngine#changePeerList().
- * Note: DNS-only changes (e.g. container restart with new IP, same hostname)
- * are not automatically detected and still require a process restart.
- */
- public void refresh(Set newAllowedIps) {
- this.resolvedIps = resolveAll(newAllowedIps);
- log.info("IpAuthHandler allowlist refreshed, resolved {} entries", resolvedIps.size());
- }
-
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
String clientIp = getClientIp(ctx);
@@ -87,25 +65,7 @@ private static String getClientIp(ChannelHandlerContext ctx) {
}
private boolean isIpAllowed(String ip) {
- Set resolved = this.resolvedIps;
- // Empty allowlist means no restriction is configured — allow all
- return resolved.isEmpty() || resolved.contains(ip);
- }
-
- private static Set resolveAll(Set entries) {
- Set result = new HashSet<>(entries);
-
- for (String entry : entries) {
- try {
- for (InetAddress addr : InetAddress.getAllByName(entry)) {
- result.add(addr.getHostAddress());
- }
- } catch (UnknownHostException e) {
- log.warn("Could not resolve allowlist entry '{}': {}", entry, e.getMessage());
- }
- }
-
- return Collections.unmodifiableSet(result);
+ return allowedIps.isEmpty() || allowedIps.contains(ip);
}
@Override
diff --git a/hugegraph-pd/hg-pd-dist/docker/docker-entrypoint.sh b/hugegraph-pd/hg-pd-dist/docker/docker-entrypoint.sh
old mode 100755
new mode 100644
index d1ae5c3c3a..fd894d5518
--- a/hugegraph-pd/hg-pd-dist/docker/docker-entrypoint.sh
+++ b/hugegraph-pd/hg-pd-dist/docker/docker-entrypoint.sh
@@ -15,72 +15,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-set -euo pipefail
-log() { echo "[hugegraph-pd-entrypoint] $*"; }
+# start hugegraph pd
+./bin/start-hugegraph-pd.sh -j "$JAVA_OPTS"
-require_env() {
- local name="$1"
- if [[ -z "${!name:-}" ]]; then
- echo "ERROR: missing required env '${name}'" >&2; exit 2
- fi
-}
-
-json_escape() {
- local s="$1"
- s=${s//\\/\\\\}; s=${s//\"/\\\"}; s=${s//$'\n'/}
- printf "%s" "$s"
-}
-
-migrate_env() {
- local old_name="$1" new_name="$2"
-
- if [[ -n "${!old_name:-}" && -z "${!new_name:-}" ]]; then
- log "WARN: deprecated env '${old_name}' detected; mapping to '${new_name}'"
- export "${new_name}=${!old_name}"
- fi
-}
-
-migrate_env "GRPC_HOST" "HG_PD_GRPC_HOST"
-migrate_env "RAFT_ADDRESS" "HG_PD_RAFT_ADDRESS"
-migrate_env "RAFT_PEERS" "HG_PD_RAFT_PEERS_LIST"
-migrate_env "PD_INITIAL_STORE_LIST" "HG_PD_INITIAL_STORE_LIST"
-
-# ── Required vars ─────────────────────────────────────────────────────
-require_env "HG_PD_GRPC_HOST"
-require_env "HG_PD_RAFT_ADDRESS"
-require_env "HG_PD_RAFT_PEERS_LIST"
-require_env "HG_PD_INITIAL_STORE_LIST"
-
-: "${HG_PD_GRPC_PORT:=8686}"
-: "${HG_PD_REST_PORT:=8620}"
-: "${HG_PD_DATA_PATH:=/hugegraph-pd/pd_data}"
-: "${HG_PD_INITIAL_STORE_COUNT:=1}"
-
-SPRING_APPLICATION_JSON="$(cat < {
if (status.isOk()) {
log.info("updatePdRaft, change peers success");
- // Refresh IpAuthHandler so newly added peers are not blocked
- IpAuthHandler handler = IpAuthHandler.getInstance();
- if (handler != null) {
- Set newIps = new HashSet<>();
- config.getPeers().forEach(p -> newIps.add(p.getIp()));
- config.getLearners().forEach(p -> newIps.add(p.getIp()));
- handler.refresh(newIps);
- log.info("IpAuthHandler refreshed after updatePdRaft peer change");
- } else {
- log.warn("IpAuthHandler not initialized, skipping refresh");
- }
} else {
log.error("changePeers status: {}, msg:{}, code: {}, raft error:{}",
status, status.getErrorMsg(), status.getCode(),
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/interceptor/Authentication.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/interceptor/Authentication.java
index 48bcf38683..83901bca1a 100644
--- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/interceptor/Authentication.java
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/interceptor/Authentication.java
@@ -77,8 +77,6 @@ protected T authenticate(String authority, String token, Function
}
String name = info.substring(0, delim);
- // TODO: password validation is skipped — only service name is checked against
- // innerModules. Full credential validation should be added as part of the auth refactor.
//String pwd = info.substring(delim + 1);
if (innerModules.contains(name)) {
return call.get();
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/GRpcServerConfig.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/GRpcServerConfig.java
index 2b1103739b..fce6d2379d 100644
--- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/GRpcServerConfig.java
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/GRpcServerConfig.java
@@ -40,8 +40,6 @@ public void configure(ServerBuilder> serverBuilder) {
HgExecutorUtil.createExecutor(EXECUTOR_NAME, poolGrpc.getCore(), poolGrpc.getMax(),
poolGrpc.getQueue()));
serverBuilder.maxInboundMessageSize(MAX_INBOUND_MESSAGE_SIZE);
- // TODO: GrpcAuthentication is instantiated as a Spring bean but never registered
- // here — add serverBuilder.intercept(grpcAuthentication) once auth is refactored.
}
}
diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreSuiteTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreSuiteTest.java
index 87d1500bcb..5098645128 100644
--- a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreSuiteTest.java
+++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreSuiteTest.java
@@ -19,9 +19,6 @@
import org.apache.hugegraph.pd.core.meta.MetadataKeyHelperTest;
import org.apache.hugegraph.pd.core.store.HgKVStoreImplTest;
-import org.apache.hugegraph.pd.raft.IpAuthHandlerTest;
-import org.apache.hugegraph.pd.raft.RaftEngineIpAuthIntegrationTest;
-import org.apache.hugegraph.pd.raft.RaftEngineLeaderAddressTest;
import org.junit.runner.RunWith;
import org.junit.runners.Suite;
@@ -39,9 +36,6 @@
StoreMonitorDataServiceTest.class,
StoreServiceTest.class,
TaskScheduleServiceTest.class,
- IpAuthHandlerTest.class,
- RaftEngineIpAuthIntegrationTest.class,
- RaftEngineLeaderAddressTest.class,
// StoreNodeServiceTest.class,
})
@Slf4j
diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/raft/IpAuthHandlerTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/raft/IpAuthHandlerTest.java
deleted file mode 100644
index 31647b6d39..0000000000
--- a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/raft/IpAuthHandlerTest.java
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hugegraph.pd.raft;
-
-import java.net.InetAddress;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.hugegraph.pd.raft.auth.IpAuthHandler;
-import org.apache.hugegraph.testutil.Whitebox;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-public class IpAuthHandlerTest {
-
- @Before
- public void setUp() {
- // Must reset BEFORE each test — earlier suite classes (e.g. ConfigServiceTest)
- // initialize RaftEngine which creates the IpAuthHandler singleton with their
- // own peer IPs. Without this reset, our getInstance() calls return the stale
- // singleton and ignore the allowlist passed by the test.
- Whitebox.setInternalState(IpAuthHandler.class, "instance", null);
- }
-
- @After
- public void tearDown() {
- // Must reset AFTER each test — prevents our test singleton from leaking
- // into later suite classes that also depend on IpAuthHandler state.
- Whitebox.setInternalState(IpAuthHandler.class, "instance", null);
- }
-
- private boolean isIpAllowed(IpAuthHandler handler, String ip) {
- return Whitebox.invoke(IpAuthHandler.class,
- new Class[]{String.class},
- "isIpAllowed", handler, ip);
- }
-
- @Test
- public void testHostnameResolvesToIp() throws Exception {
- // "localhost" should resolve to one or more IPs via InetAddress.getAllByName()
- // This verifies the core fix: hostname allowlists match numeric remote addresses
- // Using dynamic resolution avoids hardcoding "127.0.0.1" which may not be
- // returned on IPv6-only or custom resolver environments
- IpAuthHandler handler = IpAuthHandler.getInstance(
- Collections.singleton("localhost"));
- InetAddress[] addresses = InetAddress.getAllByName("localhost");
- // All resolved addresses should be allowed — resolveAll() adds every address
- // returned by getAllByName() so none should be blocked
- Assert.assertTrue("Expected at least one resolved address",
- addresses.length > 0);
- for (InetAddress address : addresses) {
- Assert.assertTrue(
- "Expected " + address.getHostAddress() + " to be allowed",
- isIpAllowed(handler, address.getHostAddress()));
- }
- }
-
- @Test
- public void testUnresolvableHostnameDoesNotCrash() {
- // Should log a warning and skip — no exception thrown during construction
- // Uses .invalid TLD which is RFC-2606 reserved and guaranteed to never resolve
- IpAuthHandler handler = IpAuthHandler.getInstance(
- Collections.singleton("nonexistent.invalid"));
- // Handler was still created successfully despite bad hostname
- Assert.assertNotNull(handler);
- // Unresolvable entry is skipped so no IPs should be allowed
- Assert.assertFalse(isIpAllowed(handler, "127.0.0.1"));
- Assert.assertFalse(isIpAllowed(handler, "192.168.0.1"));
- }
-
- @Test
- public void testRefreshUpdatesResolvedIps() {
- // Start with 127.0.0.1
- IpAuthHandler handler = IpAuthHandler.getInstance(
- Collections.singleton("127.0.0.1"));
- Assert.assertTrue(isIpAllowed(handler, "127.0.0.1"));
-
- // Refresh with a different IP — verifies refresh() swaps the set correctly
- Set newIps = new HashSet<>();
- newIps.add("192.168.0.1");
- handler.refresh(newIps);
-
- // Old IP should no longer be allowed
- Assert.assertFalse(isIpAllowed(handler, "127.0.0.1"));
- // New IP should now be allowed
- Assert.assertTrue(isIpAllowed(handler, "192.168.0.1"));
- }
-
- @Test
- public void testEmptyAllowlistAllowsAll() {
- // Empty allowlist = no restriction configured = allow all connections
- // This is intentional fallback behavior and must be explicitly tested
- // because it is a security-relevant boundary
- IpAuthHandler handler = IpAuthHandler.getInstance(
- Collections.emptySet());
- Assert.assertTrue(isIpAllowed(handler, "1.2.3.4"));
- Assert.assertTrue(isIpAllowed(handler, "192.168.99.99"));
- }
-
- @Test
- public void testGetInstanceReturnsSingletonIgnoresNewAllowlist() {
- // First call creates the singleton with 127.0.0.1
- IpAuthHandler first = IpAuthHandler.getInstance(
- Collections.singleton("127.0.0.1"));
- // Second call with a different set must return the same instance
- // and must NOT reinitialize or override the existing allowlist
- IpAuthHandler second = IpAuthHandler.getInstance(
- Collections.singleton("192.168.0.1"));
- Assert.assertSame(first, second);
- // Original allowlist still in effect
- Assert.assertTrue(isIpAllowed(second, "127.0.0.1"));
- // New set was ignored — 192.168.0.1 should not be allowed
- Assert.assertFalse(isIpAllowed(second, "192.168.0.1"));
- }
-}
diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/raft/RaftEngineIpAuthIntegrationTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/raft/RaftEngineIpAuthIntegrationTest.java
deleted file mode 100644
index 1f9857df0f..0000000000
--- a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/raft/RaftEngineIpAuthIntegrationTest.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hugegraph.pd.raft;
-
-import java.util.Collections;
-
-import org.apache.hugegraph.pd.raft.auth.IpAuthHandler;
-import org.apache.hugegraph.testutil.Whitebox;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.alipay.sofa.jraft.Closure;
-import com.alipay.sofa.jraft.Node;
-import com.alipay.sofa.jraft.Status;
-import com.alipay.sofa.jraft.conf.Configuration;
-import com.alipay.sofa.jraft.error.RaftError;
-
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.mock;
-
-public class RaftEngineIpAuthIntegrationTest {
-
- private Node originalRaftNode;
-
- @Before
- public void setUp() {
- // Save original raftNode so we can restore it after the test
- originalRaftNode = RaftEngine.getInstance().getRaftNode();
- // Reset IpAuthHandler singleton for a clean state
- Whitebox.setInternalState(IpAuthHandler.class, "instance", null);
- }
-
- @After
- public void tearDown() {
- // Restore original raftNode
- Whitebox.setInternalState(RaftEngine.getInstance(), "raftNode", originalRaftNode);
- // Reset IpAuthHandler singleton
- Whitebox.setInternalState(IpAuthHandler.class, "instance", null);
- }
-
- @Test
- public void testChangePeerListRefreshesIpAuthHandler() throws Exception {
- // Initialize IpAuthHandler with an old IP
- IpAuthHandler handler = IpAuthHandler.getInstance(
- Collections.singleton("10.0.0.1"));
- Assert.assertTrue(invokeIsIpAllowed(handler, "10.0.0.1"));
- Assert.assertFalse(invokeIsIpAllowed(handler, "127.0.0.1"));
-
- // Mock Node to fire the changePeers callback synchronously with Status.OK()
- // This simulates a successful peer change without a real Raft cluster
-
- // Important: fire the closure synchronously or changePeerList() will
- // block on latch.await(...) until the configured timeout elapses
- Node mockNode = mock(Node.class);
- doAnswer(invocation -> {
- Closure closure = invocation.getArgument(1);
- closure.run(Status.OK());
- return null;
- }).when(mockNode).changePeers(any(Configuration.class), any(Closure.class));
-
- // Inject mock node into RaftEngine
- Whitebox.setInternalState(RaftEngine.getInstance(), "raftNode", mockNode);
-
- // Call changePeerList with new peer — must be odd count
- RaftEngine.getInstance().changePeerList("127.0.0.1:8610");
-
- // Verify IpAuthHandler was refreshed with the new peer IP
- Assert.assertTrue(invokeIsIpAllowed(handler, "127.0.0.1"));
- // Old IP should no longer be allowed
- Assert.assertFalse(invokeIsIpAllowed(handler, "10.0.0.1"));
- }
-
- @Test
- public void testChangePeerListDoesNotRefreshOnFailure() throws Exception {
- // Initialize IpAuthHandler with original IP
- IpAuthHandler handler = IpAuthHandler.getInstance(
- Collections.singleton("10.0.0.1"));
- Assert.assertTrue(invokeIsIpAllowed(handler, "10.0.0.1"));
-
- // Mock Node to fire callback with a failed status
- // Simulates a failed peer change — handler should NOT be refreshed
-
- // Important: fire the closure synchronously or changePeerList() will
- // block on latch.await(...) until the configured timeout elapses
- Node mockNode = mock(Node.class);
- doAnswer(invocation -> {
- Closure closure = invocation.getArgument(1);
- closure.run(new Status(RaftError.EINTERNAL, "simulated failure"));
- return null;
- }).when(mockNode).changePeers(any(Configuration.class), any(Closure.class));
-
- Whitebox.setInternalState(RaftEngine.getInstance(), "raftNode", mockNode);
-
- RaftEngine.getInstance().changePeerList("127.0.0.1:8610");
-
- // Handler should NOT be refreshed — old IP still allowed
- Assert.assertTrue(invokeIsIpAllowed(handler, "10.0.0.1"));
- Assert.assertFalse(invokeIsIpAllowed(handler, "127.0.0.1"));
- }
-
- private boolean invokeIsIpAllowed(IpAuthHandler handler, String ip) {
- return Whitebox.invoke(IpAuthHandler.class,
- new Class[]{String.class},
- "isIpAllowed", handler, ip);
- }
-}
diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/raft/RaftEngineLeaderAddressTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/raft/RaftEngineLeaderAddressTest.java
deleted file mode 100644
index 420b106a27..0000000000
--- a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/raft/RaftEngineLeaderAddressTest.java
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hugegraph.pd.raft;
-
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.hugegraph.pd.config.PDConfig;
-import org.apache.hugegraph.testutil.Whitebox;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.alipay.sofa.jraft.Node;
-import com.alipay.sofa.jraft.entity.PeerId;
-import com.alipay.sofa.jraft.util.Endpoint;
-
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.ArgumentMatchers.anyString;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-public class RaftEngineLeaderAddressTest {
-
- private static final String LEADER_IP = "10.0.0.1";
- private static final int GRPC_PORT = 8686;
- private static final String LEADER_GRPC_ADDRESS = "10.0.0.1:8686";
-
- private Node originalRaftNode;
- private RaftRpcClient originalRaftRpcClient;
- private PDConfig.Raft originalConfig;
-
- private Node mockNode;
- private RaftRpcClient mockRpcClient;
- private PDConfig.Raft mockConfig;
- private PeerId mockLeader;
-
- @Before
- public void setUp() {
- RaftEngine engine = RaftEngine.getInstance();
-
- // Save originals
- originalRaftNode = engine.getRaftNode();
- originalRaftRpcClient = Whitebox.getInternalState(engine, "raftRpcClient");
- originalConfig = Whitebox.getInternalState(engine, "config");
-
- // Build mock leader PeerId with real Endpoint
- mockLeader = mock(PeerId.class);
- Endpoint endpoint = new Endpoint(LEADER_IP, 8610);
- when(mockLeader.getEndpoint()).thenReturn(endpoint);
-
- // Build mock Node that reports itself as follower with a known leader
- mockNode = mock(Node.class);
- when(mockNode.isLeader(true)).thenReturn(false);
- when(mockNode.getLeaderId()).thenReturn(mockLeader);
-
- // Build mock config
- // Use a short default timeout (100ms); specific tests may override getRpcTimeout()
- mockConfig = mock(PDConfig.Raft.class);
- when(mockConfig.getGrpcAddress()).thenReturn("127.0.0.1:" + GRPC_PORT);
- when(mockConfig.getGrpcPort()).thenReturn(GRPC_PORT);
- when(mockConfig.getRpcTimeout()).thenReturn(100);
-
- // Build mock RpcClient
- mockRpcClient = mock(RaftRpcClient.class);
-
- // Inject mocks
- Whitebox.setInternalState(engine, "raftNode", mockNode);
- Whitebox.setInternalState(engine, "raftRpcClient", mockRpcClient);
- Whitebox.setInternalState(engine, "config", mockConfig);
- }
-
- @After
- public void tearDown() {
- RaftEngine engine = RaftEngine.getInstance();
- Whitebox.setInternalState(engine, "raftNode", originalRaftNode);
- Whitebox.setInternalState(engine, "raftRpcClient", originalRaftRpcClient);
- Whitebox.setInternalState(engine, "config", originalConfig);
- }
-
- @Test
- public void testSuccessReturnsGrpcAddress() throws Exception {
- // RPC succeeds and returns a valid gRPC address
- RaftRpcProcessor.GetMemberResponse response =
- mock(RaftRpcProcessor.GetMemberResponse.class);
- when(response.getGrpcAddress()).thenReturn(LEADER_GRPC_ADDRESS);
-
- CompletableFuture future =
- CompletableFuture.completedFuture(response);
- when(mockRpcClient.getGrpcAddress(anyString())).thenReturn(future);
-
- String result = RaftEngine.getInstance().getLeaderGrpcAddress();
- Assert.assertEquals(LEADER_GRPC_ADDRESS, result);
- }
-
- @Test
- public void testTimeoutFallsBackToDerivedAddress() throws Exception {
- // RPC times out — should fall back to leaderIp:grpcPort
- CompletableFuture future =
- mock(CompletableFuture.class);
- when(future.get(anyLong(), eq(TimeUnit.MILLISECONDS)))
- .thenThrow(new TimeoutException("simulated timeout"));
- when(mockRpcClient.getGrpcAddress(anyString())).thenReturn(future);
-
- String result = RaftEngine.getInstance().getLeaderGrpcAddress();
- Assert.assertEquals(LEADER_IP + ":" + GRPC_PORT, result);
- }
-
- @Test
- public void testRpcExceptionFallsBackToDerivedAddress() throws Exception {
- // RPC throws ExecutionException — should fall back to leaderIp:grpcPort
- CompletableFuture future =
- mock(CompletableFuture.class);
- when(future.get(anyLong(), eq(TimeUnit.MILLISECONDS)))
- .thenThrow(new ExecutionException("simulated rpc failure",
- new RuntimeException("bolt error")));
- when(mockRpcClient.getGrpcAddress(anyString())).thenReturn(future);
-
- String result = RaftEngine.getInstance().getLeaderGrpcAddress();
- Assert.assertEquals(LEADER_IP + ":" + GRPC_PORT, result);
- }
-
- @Test
- public void testNullResponseFallsBackToDerivedAddress() throws Exception {
- // RPC returns null response — should fall back to leaderIp:grpcPort
- CompletableFuture future =
- CompletableFuture.completedFuture(null);
- when(mockRpcClient.getGrpcAddress(anyString())).thenReturn(future);
-
- String result = RaftEngine.getInstance().getLeaderGrpcAddress();
- Assert.assertEquals(LEADER_IP + ":" + GRPC_PORT, result);
- }
-
- @Test
- public void testNullGrpcAddressInResponseFallsBackToDerivedAddress() throws Exception {
- // RPC returns a response but grpcAddress field is null — should fall back
- RaftRpcProcessor.GetMemberResponse response =
- mock(RaftRpcProcessor.GetMemberResponse.class);
- when(response.getGrpcAddress()).thenReturn(null);
-
- CompletableFuture future =
- CompletableFuture.completedFuture(response);
- when(mockRpcClient.getGrpcAddress(anyString())).thenReturn(future);
-
- String result = RaftEngine.getInstance().getLeaderGrpcAddress();
- Assert.assertEquals(LEADER_IP + ":" + GRPC_PORT, result);
- }
-
- @Test
- public void testNullLeaderAfterWaitThrowsExecutionException() throws Exception {
- // Use 0ms timeout so waitingForLeader(0) skips the wait loop and returns immediately
- when(mockConfig.getRpcTimeout()).thenReturn(0);
- // Leader is still null after waitingForLeader() — should throw ExecutionException
- when(mockNode.getLeaderId()).thenReturn(null);
-
- try {
- RaftEngine.getInstance().getLeaderGrpcAddress();
- Assert.fail("Expected ExecutionException");
- } catch (ExecutionException e) {
- Assert.assertTrue(e.getCause() instanceof IllegalStateException);
- Assert.assertEquals("Leader is not ready", e.getCause().getMessage());
- }
- }
-}
diff --git a/hugegraph-pd/pom.xml b/hugegraph-pd/pom.xml
index ceb8af33b2..4af7896bb2 100644
--- a/hugegraph-pd/pom.xml
+++ b/hugegraph-pd/pom.xml
@@ -44,7 +44,7 @@
2.17.0
- apache-${release.name}-pd-${project.version}
+ apache-${release.name}-pd-incubating-${project.version}3.12.04.13.2
diff --git a/hugegraph-server/Dockerfile b/hugegraph-server/Dockerfile
index f7613f8485..c9df67dc3f 100644
--- a/hugegraph-server/Dockerfile
+++ b/hugegraph-server/Dockerfile
@@ -30,7 +30,7 @@ RUN mvn package $MAVEN_ARGS -e -B -ntp -Dmaven.test.skip=true -Dmaven.javadoc.sk
# Note: ZGC (The Z Garbage Collector) is only supported on ARM-Mac with java > 13
FROM eclipse-temurin:11-jre-jammy
-COPY --from=build /pkg/hugegraph-server/apache-hugegraph-server-*/ /hugegraph-server/
+COPY --from=build /pkg/hugegraph-server/apache-hugegraph-server-incubating-*/ /hugegraph-server/
LABEL maintainer="HugeGraph Docker Maintainers "
# TODO: use g1gc or zgc as default
diff --git a/hugegraph-server/Dockerfile-hstore b/hugegraph-server/Dockerfile-hstore
index 2c6e4b110f..8f7017b6d2 100644
--- a/hugegraph-server/Dockerfile-hstore
+++ b/hugegraph-server/Dockerfile-hstore
@@ -30,7 +30,7 @@ RUN mvn package $MAVEN_ARGS -e -B -ntp -DskipTests -Dmaven.javadoc.skip=true &&
# Note: ZGC (The Z Garbage Collector) is only supported on ARM-Mac with java > 13
FROM eclipse-temurin:11-jre-jammy
-COPY --from=build /pkg/hugegraph-server/apache-hugegraph-server-*/ /hugegraph-server/
+COPY --from=build /pkg/hugegraph-server/apache-hugegraph-server-incubating-*/ /hugegraph-server/
# remove hugegraph.properties and rename hstore.properties.template for default hstore backend
RUN cd /hugegraph-server/conf/graphs \
&& rm hugegraph.properties && mv hstore.properties.template hugegraph.properties
@@ -62,7 +62,7 @@ RUN set -x \
# 2. Init docker script
COPY hugegraph-server/hugegraph-dist/docker/scripts/remote-connect.groovy ./scripts
-#COPY hugegraph-server/hugegraph-dist/docker/scripts/detect-storage.groovy ./scripts
+COPY hugegraph-server/hugegraph-dist/docker/scripts/detect-storage.groovy ./scripts
COPY hugegraph-server/hugegraph-dist/docker/docker-entrypoint.sh .
RUN chmod 755 ./docker-entrypoint.sh
diff --git a/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/API.java b/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/API.java
index 3220cf6b02..c476864711 100644
--- a/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/API.java
+++ b/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/API.java
@@ -86,8 +86,6 @@ public class API {
MetricsUtil.registerMeter(API.class, "expected-error");
private static final Meter unknownErrorMeter =
MetricsUtil.registerMeter(API.class, "unknown-error");
- private static final String STANDALONE_ERROR =
- "GraphSpace management is not supported in standalone mode";
public static HugeGraph graph(GraphManager manager, String graphSpace,
String graph) {
@@ -243,20 +241,6 @@ public static boolean checkAndParseAction(String action) {
}
}
- /**
- * Ensures the graph manager is available and PD mode is enabled.
- *
- * @param manager the graph manager of current request
- * @throws IllegalArgumentException if the graph manager is null
- * @throws HugeException if PD mode is disabled
- */
- protected static void ensurePdModeEnabled(GraphManager manager) {
- E.checkArgumentNotNull(manager, "Graph manager can't be null");
- if (!manager.isPDEnabled()) {
- throw new HugeException(STANDALONE_ERROR);
- }
- }
-
public static boolean hasAdminPerm(GraphManager manager, String user) {
return manager.authManager().isAdminManager(user);
}
diff --git a/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/AccessAPI.java b/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/AccessAPI.java
index 35b05eedb1..8fc8f04442 100644
--- a/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/AccessAPI.java
+++ b/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/AccessAPI.java
@@ -35,8 +35,6 @@
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
-import io.swagger.v3.oas.annotations.Parameter;
-import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.tags.Tag;
import jakarta.inject.Singleton;
import jakarta.ws.rs.Consumes;
@@ -64,7 +62,6 @@ public class AccessAPI extends API {
@Consumes(APPLICATION_JSON)
@Produces(APPLICATION_JSON_WITH_CHARSET)
public String create(@Context GraphManager manager,
- @Parameter(description = "The graph space name")
@PathParam("graphspace") String graphSpace,
JsonAccess jsonAccess) {
LOG.debug("GraphSpace [{}] create access: {}", graphSpace, jsonAccess);
@@ -81,9 +78,7 @@ public String create(@Context GraphManager manager,
@Consumes(APPLICATION_JSON)
@Produces(APPLICATION_JSON_WITH_CHARSET)
public String update(@Context GraphManager manager,
- @Parameter(description = "The graph space name")
@PathParam("graphspace") String graphSpace,
- @Parameter(description = "The access id")
@PathParam("id") String id,
JsonAccess jsonAccess) {
LOG.debug("GraphSpace [{}] update access: {}", graphSpace, jsonAccess);
@@ -104,13 +99,9 @@ public String update(@Context GraphManager manager,
@Timed
@Produces(APPLICATION_JSON_WITH_CHARSET)
public String list(@Context GraphManager manager,
- @Parameter(description = "The graph space name")
@PathParam("graphspace") String graphSpace,
- @Parameter(description = "The group id to filter by")
@QueryParam("group") String group,
- @Parameter(description = "The target id to filter by")
@QueryParam("target") String target,
- @Parameter(description = "The limit of results to return")
@QueryParam("limit") @DefaultValue("100") long limit) {
LOG.debug("GraphSpace [{}] list accesses by group {} or target {}",
graphSpace, group, target);
@@ -135,9 +126,7 @@ public String list(@Context GraphManager manager,
@Path("{id}")
@Produces(APPLICATION_JSON_WITH_CHARSET)
public String get(@Context GraphManager manager,
- @Parameter(description = "The graph space name")
@PathParam("graphspace") String graphSpace,
- @Parameter(description = "The access id")
@PathParam("id") String id) {
LOG.debug("GraphSpace [{}] get access: {}", graphSpace, id);
@@ -150,9 +139,7 @@ public String get(@Context GraphManager manager,
@Path("{id}")
@Consumes(APPLICATION_JSON)
public void delete(@Context GraphManager manager,
- @Parameter(description = "The graph space name")
@PathParam("graphspace") String graphSpace,
- @Parameter(description = "The access id")
@PathParam("id") String id) {
LOG.debug("GraphSpace [{}] delete access: {}", graphSpace, id);
@@ -168,16 +155,12 @@ public void delete(@Context GraphManager manager,
private static class JsonAccess implements Checkable {
@JsonProperty("group")
- @Schema(description = "The group id", required = true)
private String group;
@JsonProperty("target")
- @Schema(description = "The target id", required = true)
private String target;
@JsonProperty("access_permission")
- @Schema(description = "The access permission", required = true)
private HugePermission permission;
@JsonProperty("access_description")
- @Schema(description = "The access description")
private String description;
public HugeAccess build(HugeAccess access) {
diff --git a/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/BelongAPI.java b/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/BelongAPI.java
index 09af7f51c9..1064802e29 100644
--- a/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/BelongAPI.java
+++ b/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/BelongAPI.java
@@ -34,8 +34,6 @@
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
-import io.swagger.v3.oas.annotations.Parameter;
-import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.tags.Tag;
import jakarta.inject.Singleton;
import jakarta.ws.rs.Consumes;
@@ -63,7 +61,6 @@ public class BelongAPI extends API {
@Consumes(APPLICATION_JSON)
@Produces(APPLICATION_JSON_WITH_CHARSET)
public String create(@Context GraphManager manager,
- @Parameter(description = "The graph space name")
@PathParam("graphspace") String graphSpace,
JsonBelong jsonBelong) {
LOG.debug("GraphSpace [{}] create belong: {}", graphSpace, jsonBelong);
@@ -80,9 +77,7 @@ public String create(@Context GraphManager manager,
@Consumes(APPLICATION_JSON)
@Produces(APPLICATION_JSON_WITH_CHARSET)
public String update(@Context GraphManager manager,
- @Parameter(description = "The graph space name")
@PathParam("graphspace") String graphSpace,
- @Parameter(description = "The belong id")
@PathParam("id") String id,
JsonBelong jsonBelong) {
LOG.debug("GraphSpace [{}] update belong: {}", graphSpace, jsonBelong);
@@ -103,13 +98,9 @@ public String update(@Context GraphManager manager,
@Timed
@Produces(APPLICATION_JSON_WITH_CHARSET)
public String list(@Context GraphManager manager,
- @Parameter(description = "The graph space name")
@PathParam("graphspace") String graphSpace,
- @Parameter(description = "The user id to filter by")
@QueryParam("user") String user,
- @Parameter(description = "The group id to filter by")
@QueryParam("group") String group,
- @Parameter(description = "The limit of results to return")
@QueryParam("limit") @DefaultValue("100") long limit) {
LOG.debug("GraphSpace [{}] list belongs by user {} or group {}",
graphSpace, user, group);
@@ -134,9 +125,7 @@ public String list(@Context GraphManager manager,
@Path("{id}")
@Produces(APPLICATION_JSON_WITH_CHARSET)
public String get(@Context GraphManager manager,
- @Parameter(description = "The graph space name")
@PathParam("graphspace") String graphSpace,
- @Parameter(description = "The belong id")
@PathParam("id") String id) {
LOG.debug("GraphSpace [{}] get belong: {}", graphSpace, id);
@@ -149,9 +138,7 @@ public String get(@Context GraphManager manager,
@Path("{id}")
@Consumes(APPLICATION_JSON)
public void delete(@Context GraphManager manager,
- @Parameter(description = "The graph space name")
@PathParam("graphspace") String graphSpace,
- @Parameter(description = "The belong id")
@PathParam("id") String id) {
LOG.debug("GraphSpace [{}] delete belong: {}", graphSpace, id);
@@ -167,13 +154,10 @@ public void delete(@Context GraphManager manager,
private static class JsonBelong implements Checkable {
@JsonProperty("user")
- @Schema(description = "The user id", required = true)
private String user;
@JsonProperty("group")
- @Schema(description = "The group id", required = true)
private String group;
@JsonProperty("belong_description")
- @Schema(description = "The belong description")
private String description;
public HugeBelong build(HugeBelong belong) {
diff --git a/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/GroupAPI.java b/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/GroupAPI.java
index ae13beb4a6..2786ef0b6d 100644
--- a/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/GroupAPI.java
+++ b/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/GroupAPI.java
@@ -34,8 +34,6 @@
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
-import io.swagger.v3.oas.annotations.Parameter;
-import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.tags.Tag;
import jakarta.annotation.security.RolesAllowed;
import jakarta.inject.Singleton;
@@ -81,7 +79,6 @@ public String create(@Context GraphManager manager,
@Produces(APPLICATION_JSON_WITH_CHARSET)
@RolesAllowed({"admin"})
public String update(@Context GraphManager manager,
- @Parameter(description = "The group id")
@PathParam("id") String id,
JsonGroup jsonGroup) {
LOG.debug("update group: {}", jsonGroup);
@@ -103,7 +100,6 @@ public String update(@Context GraphManager manager,
@Produces(APPLICATION_JSON_WITH_CHARSET)
@RolesAllowed({"admin"})
public String list(@Context GraphManager manager,
- @Parameter(description = "The limit of results to return")
@QueryParam("limit") @DefaultValue("100") long limit) {
LOG.debug("list groups");
@@ -117,7 +113,6 @@ public String list(@Context GraphManager manager,
@Produces(APPLICATION_JSON_WITH_CHARSET)
@RolesAllowed({"admin"})
public String get(@Context GraphManager manager,
- @Parameter(description = "The group id")
@PathParam("id") String id) {
LOG.debug("get group: {}", id);
@@ -131,7 +126,6 @@ public String get(@Context GraphManager manager,
@Consumes(APPLICATION_JSON)
@RolesAllowed({"admin"})
public void delete(@Context GraphManager manager,
- @Parameter(description = "The group id")
@PathParam("id") String id) {
LOG.debug("delete group: {}", id);
@@ -147,10 +141,8 @@ public void delete(@Context GraphManager manager,
private static class JsonGroup implements Checkable {
@JsonProperty("group_name")
- @Schema(description = "The group name", required = true)
private String name;
@JsonProperty("group_description")
- @Schema(description = "The group description")
private String description;
public HugeGroup build(HugeGroup group) {
diff --git a/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/LoginAPI.java b/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/LoginAPI.java
index faf62c4064..7086b77af2 100644
--- a/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/LoginAPI.java
+++ b/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/LoginAPI.java
@@ -35,7 +35,6 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.collect.ImmutableMap;
-import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.tags.Tag;
import jakarta.inject.Singleton;
import jakarta.ws.rs.BadRequestException;
@@ -126,13 +125,10 @@ public String verifyToken(@Context GraphManager manager,
private static class JsonLogin implements Checkable {
@JsonProperty("user_name")
- @Schema(description = "The user name")
private String name;
@JsonProperty("user_password")
- @Schema(description = "The user password")
private String password;
@JsonProperty("token_expire")
- @Schema(description = "Token expiration time in seconds")
private long expire;
@Override
diff --git a/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/ManagerAPI.java b/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/ManagerAPI.java
index 071e4b8a66..80b91d2731 100644
--- a/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/ManagerAPI.java
+++ b/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/ManagerAPI.java
@@ -37,8 +37,6 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.collect.ImmutableMap;
-import io.swagger.v3.oas.annotations.Parameter;
-import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.tags.Tag;
import jakarta.inject.Singleton;
import jakarta.ws.rs.Consumes;
@@ -64,11 +62,9 @@ public class ManagerAPI extends API {
@Consumes(APPLICATION_JSON)
@Produces(APPLICATION_JSON_WITH_CHARSET)
public String createManager(@Context GraphManager manager,
- @Parameter(description = "The graph space name")
@PathParam("graphspace") String graphSpace,
JsonManager jsonManager) {
LOG.debug("Create manager: {}", jsonManager);
- ensurePdModeEnabled(manager);
String user = jsonManager.user;
HugePermission type = jsonManager.type;
// graphSpace now comes from @PathParam instead of JsonManager
@@ -117,14 +113,10 @@ public String createManager(@Context GraphManager manager,
@Timed
@Consumes(APPLICATION_JSON)
public void delete(@Context GraphManager manager,
- @Parameter(description = "The graph space name")
@PathParam("graphspace") String graphSpace,
- @Parameter(description = "The user name")
@QueryParam("user") String user,
- @Parameter(description = "The manager type: SPACE, SPACE_MEMBER, or ADMIN")
@QueryParam("type") HugePermission type) {
LOG.debug("Delete graph manager: {} {} {}", user, type, graphSpace);
- ensurePdModeEnabled(manager);
E.checkArgument(!"admin".equals(user) ||
type != HugePermission.ADMIN,
"User 'admin' can't be removed from ADMIN");
@@ -165,12 +157,10 @@ public void delete(@Context GraphManager manager,
@Timed
@Consumes(APPLICATION_JSON)
public String list(@Context GraphManager manager,
- @Parameter(description = "The graph space name")
@PathParam("graphspace") String graphSpace,
- @Parameter(description = "The manager type: SPACE, SPACE_MEMBER or ADMIN")
@QueryParam("type") HugePermission type) {
LOG.debug("list graph manager: {} {}", type, graphSpace);
- ensurePdModeEnabled(manager);
+
AuthManager authManager = manager.authManager();
validType(type);
List adminManagers;
@@ -197,13 +187,10 @@ public String list(@Context GraphManager manager,
@Path("check")
@Consumes(APPLICATION_JSON)
public String checkRole(@Context GraphManager manager,
- @Parameter(description = "The graph space name")
@PathParam("graphspace") String graphSpace,
- @Parameter(description = "The manager type: " +
- "SPACE, SPACE_MEMBER, or ADMIN")
@QueryParam("type") HugePermission type) {
LOG.debug("check if current user is graph manager: {} {}", type, graphSpace);
- ensurePdModeEnabled(manager);
+
validType(type);
AuthManager authManager = manager.authManager();
String user = HugeGraphAuthProxy.username();
@@ -232,12 +219,9 @@ public String checkRole(@Context GraphManager manager,
@Path("role")
@Consumes(APPLICATION_JSON)
public String getRolesInGs(@Context GraphManager manager,
- @Parameter(description = "The graph space name")
@PathParam("graphspace") String graphSpace,
- @Parameter(description = "The user name") @QueryParam("user")
- String user) {
+ @QueryParam("user") String user) {
LOG.debug("get user [{}]'s role in graph space [{}]", user, graphSpace);
- ensurePdModeEnabled(manager);
AuthManager authManager = manager.authManager();
List result = new ArrayList<>();
validGraphSpace(manager, graphSpace);
@@ -280,10 +264,8 @@ private void validGraphSpace(GraphManager manager, String graphSpace) {
private static class JsonManager implements Checkable {
@JsonProperty("user")
- @Schema(description = "The user or group name", required = true)
private String user;
@JsonProperty("type")
- @Schema(description = "The manager type: SPACE, SPACE_MEMBER, or ADMIN", required = true)
private HugePermission type;
@Override
diff --git a/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/ProjectAPI.java b/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/ProjectAPI.java
index 4380093ba0..229903c137 100644
--- a/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/ProjectAPI.java
+++ b/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/ProjectAPI.java
@@ -39,8 +39,6 @@
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
-import io.swagger.v3.oas.annotations.Parameter;
-import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.tags.Tag;
import jakarta.inject.Singleton;
import jakarta.ws.rs.Consumes;
@@ -70,7 +68,6 @@ public class ProjectAPI extends API {
@Consumes(APPLICATION_JSON)
@Produces(APPLICATION_JSON_WITH_CHARSET)
public String create(@Context GraphManager manager,
- @Parameter(description = "The graph space name")
@PathParam("graphspace") String graphSpace,
JsonProject jsonProject) {
LOG.debug("GraphSpace [{}] create project: {}", graphSpace, jsonProject);
@@ -92,15 +89,8 @@ public String create(@Context GraphManager manager,
@Consumes(APPLICATION_JSON)
@Produces(APPLICATION_JSON_WITH_CHARSET)
public String update(@Context GraphManager manager,
- @Parameter(description = "The graph space name")
@PathParam("graphspace") String graphSpace,
- @Parameter(description = "The project id")
@PathParam("id") String id,
- @Parameter(
- description = "The action to perform: " +
- "add_graph, remove_graph, " +
- "or empty for description " +
- "update")
@QueryParam("action") String action,
JsonProject jsonProject) {
LOG.debug("GraphSpace [{}] update {} project: {}", graphSpace, action,
@@ -136,9 +126,7 @@ public String update(@Context GraphManager manager,
@Timed
@Produces(APPLICATION_JSON_WITH_CHARSET)
public String list(@Context GraphManager manager,
- @Parameter(description = "The graph space name")
@PathParam("graphspace") String graphSpace,
- @Parameter(description = "The limit of results to return")
@QueryParam("limit") @DefaultValue("100") long limit) {
LOG.debug("GraphSpace [{}] list project", graphSpace);
@@ -152,9 +140,7 @@ public String list(@Context GraphManager manager,
@Path("{id}")
@Produces(APPLICATION_JSON_WITH_CHARSET)
public String get(@Context GraphManager manager,
- @Parameter(description = "The graph space name")
@PathParam("graphspace") String graphSpace,
- @Parameter(description = "The project id")
@PathParam("id") String id) {
LOG.debug("GraphSpace [{}] get project: {}", graphSpace, id);
@@ -172,9 +158,7 @@ public String get(@Context GraphManager manager,
@Path("{id}")
@Consumes(APPLICATION_JSON)
public void delete(@Context GraphManager manager,
- @Parameter(description = "The graph space name")
@PathParam("graphspace") String graphSpace,
- @Parameter(description = "The project id")
@PathParam("id") String id) {
LOG.debug("GraphSpace [{}] delete project: {}", graphSpace, id);
@@ -200,13 +184,10 @@ public static boolean isRemoveGraph(String action) {
private static class JsonProject implements Checkable {
@JsonProperty("project_name")
- @Schema(description = "The project name", required = true)
private String name;
@JsonProperty("project_graphs")
- @Schema(description = "Set of graph names associated with the project")
private Set graphs;
@JsonProperty("project_description")
- @Schema(description = "The project description")
private String description;
public HugeProject build() {
diff --git a/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/TargetAPI.java b/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/TargetAPI.java
index 7f673048dc..d59023f871 100644
--- a/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/TargetAPI.java
+++ b/hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/auth/TargetAPI.java
@@ -35,8 +35,6 @@
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
-import io.swagger.v3.oas.annotations.Parameter;
-import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.tags.Tag;
import jakarta.inject.Singleton;
import jakarta.ws.rs.Consumes;
@@ -64,7 +62,6 @@ public class TargetAPI extends API {
@Consumes(APPLICATION_JSON)
@Produces(APPLICATION_JSON_WITH_CHARSET)
public String create(@Context GraphManager manager,
- @Parameter(description = "The graph space name")
@PathParam("graphspace") String graphSpace,
JsonTarget jsonTarget) {
LOG.debug("GraphSpace [{}] create target: {}", graphSpace, jsonTarget);
@@ -81,9 +78,7 @@ public String create(@Context GraphManager manager,
@Consumes(APPLICATION_JSON)
@Produces(APPLICATION_JSON_WITH_CHARSET)
public String update(@Context GraphManager manager,
- @Parameter(description = "The graph space name")
@PathParam("graphspace") String graphSpace,
- @Parameter(description = "The target id")
@PathParam("id") String id,
JsonTarget jsonTarget) {
LOG.debug("GraphSpace [{}] update target: {}", graphSpace, jsonTarget);
@@ -104,9 +99,7 @@ public String update(@Context GraphManager manager,
@Timed
@Produces(APPLICATION_JSON_WITH_CHARSET)
public String list(@Context GraphManager manager,
- @Parameter(description = "The graph space name")
@PathParam("graphspace") String graphSpace,
- @Parameter(description = "The limit of results to return")
@QueryParam("limit") @DefaultValue("100") long limit) {
LOG.debug("GraphSpace [{}] list targets", graphSpace);
@@ -119,9 +112,7 @@ public String list(@Context GraphManager manager,
@Path("{id}")
@Produces(APPLICATION_JSON_WITH_CHARSET)
public String get(@Context GraphManager manager,
- @Parameter(description = "The graph space name")
@PathParam("graphspace") String graphSpace,
- @Parameter(description = "The target id")
@PathParam("id") String id) {
LOG.debug("GraphSpace [{}] get target: {}", graphSpace, id);
@@ -134,9 +125,7 @@ public String get(@Context GraphManager manager,
@Path("{id}")
@Consumes(APPLICATION_JSON)
public void delete(@Context GraphManager manager,
- @Parameter(description = "The graph space name")
@PathParam("graphspace") String graphSpace,
- @Parameter(description = "The target id")
@PathParam("id") String id) {
LOG.debug("GraphSpace [{}] delete target: {}", graphSpace, id);
@@ -152,16 +141,12 @@ public void delete(@Context GraphManager manager,
private static class JsonTarget implements Checkable {
@JsonProperty("target_name")
- @Schema(description = "The target name", required = true)
private String name;
@JsonProperty("target_graph")
- @Schema(description = "The target graph name", required = true)
private String graph;
@JsonProperty("target_url")
- @Schema(description = "The target URL", required = true)
private String url;
@JsonProperty("target_resources") // error when List
- @Schema(description = "The target resources")
private List