Skip to content

Commit

Permalink
Merge branch 'master' into annotation
Browse files Browse the repository at this point in the history
  • Loading branch information
jackwener authored Feb 28, 2022
2 parents d5be5f6 + 9bb4709 commit a581ba0
Show file tree
Hide file tree
Showing 339 changed files with 4,226 additions and 1,944 deletions.
3 changes: 1 addition & 2 deletions .github/workflows/nightly.yml
Original file line number Diff line number Diff line change
Expand Up @@ -82,8 +82,7 @@ jobs:
with:
context: .
file: ./docker/Dockerfile.${{ matrix.service }}
# platforms: linux/amd64,linux/arm64
platforms: linux/amd64
platforms: linux/amd64,linux/arm64
tags: |
vesoft/nebula-${{ matrix.service }}:nightly
push: true
Expand Down
3 changes: 1 addition & 2 deletions .github/workflows/rc.yml
Original file line number Diff line number Diff line change
Expand Up @@ -99,8 +99,7 @@ jobs:
with:
context: .
file: ./docker/Dockerfile.${{ matrix.service }}
# platforms: linux/amd64,linux/arm64
platforms: linux/amd64
platforms: linux/amd64,linux/arm64
tags: |
${{ secrets.HARBOR_REGISTRY }}/vesoft/nebula-${{ matrix.service }}:${{ steps.tagname.outputs.tag }}
${{ secrets.HARBOR_REGISTRY }}/vesoft/nebula-${{ matrix.service }}:${{ steps.tagname.outputs.majorver }}
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ jobs:
env:
FROM_IMAGE: docker://${{ secrets.HARBOR_REGISTRY }}/vesoft/nebula-${{ matrix.service }}
TO_IMAGE: docker://docker.io/vesoft/nebula-${{ matrix.service }}
CMD: docker run --rm -ti quay.io/containers/skopeo:v1.4.1 copy -a --src-creds ${{ secrets.HARBOR_USERNAME }}:${{ secrets.HARBOR_PASSWORD }} --dest-creds ${{ secrets.DOCKER_USERNAME }}:${{ secrets.DOCKER_PASSWORD }}
CMD: docker run --rm -i quay.io/containers/skopeo:v1.4.1 copy -a --src-creds ${{ secrets.HARBOR_USERNAME }}:${{ secrets.HARBOR_PASSWORD }} --dest-creds ${{ secrets.DOCKER_USERNAME }}:${{ secrets.DOCKER_PASSWORD }}
run: |
${{ env.CMD }} ${{ env.FROM_IMAGE }}:${{ steps.tagname.outputs.tag }} ${{ env.TO_IMAGE }}:${{ steps.tagname.outputs.tag }}
${{ env.CMD }} ${{ env.FROM_IMAGE }}:${{ steps.tagname.outputs.tag }} ${{ env.TO_IMAGE }}:${{ steps.tagname.outputs.majorver }}
Expand Down
2 changes: 0 additions & 2 deletions conf/nebula-graphd.conf.default
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,6 @@
--ws_ip=0.0.0.0
# HTTP service port
--ws_http_port=19669
# HTTP2 service port
--ws_h2_port=19670
# storage client timeout
--storage_client_timeout_ms=60000
# Port to listen on Meta with HTTP protocol, it corresponds to ws_http_port in metad's configuration file
Expand Down
2 changes: 0 additions & 2 deletions conf/nebula-graphd.conf.production
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,6 @@
--ws_ip=0.0.0.0
# HTTP service port
--ws_http_port=19669
# HTTP2 service port
--ws_h2_port=19670
# storage client timeout
--storage_client_timeout_ms=60000
# Port to listen on Meta with HTTP protocol, it corresponds to ws_http_port in metad's configuration file
Expand Down
2 changes: 0 additions & 2 deletions conf/nebula-metad.conf.default
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,6 @@
--ws_ip=0.0.0.0
# HTTP service port
--ws_http_port=19559
# HTTP2 service port
--ws_h2_port=19560
# Port to listen on Storage with HTTP protocol, it corresponds to ws_http_port in storage's configuration file
--ws_storage_http_port=19779

Expand Down
2 changes: 0 additions & 2 deletions conf/nebula-metad.conf.production
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,6 @@
--ws_ip=0.0.0.0
# HTTP service port
--ws_http_port=19559
# HTTP2 service port
--ws_h2_port=19560
# Port to listen on Storage with HTTP protocol, it corresponds to ws_http_port in storage's configuration file
--ws_storage_http_port=19779

Expand Down
4 changes: 0 additions & 4 deletions conf/nebula-standalone.conf.default
Original file line number Diff line number Diff line change
Expand Up @@ -68,16 +68,12 @@
--ws_ip=0.0.0.0
# HTTP service port
--ws_http_port=19669
# HTTP2 service port
--ws_h2_port=19670
# storage client timeout
--storage_client_timeout_ms=60000
# Port to listen on Meta with HTTP protocol, it corresponds to ws_http_port in metad's configuration file
--ws_meta_http_port=19559
# HTTP service port
--ws_storage_http_port=19779
# HTTP2 service port
--ws_storage_h2_port=19780
# heartbeat with meta service
--heartbeat_interval_secs=10

Expand Down
2 changes: 0 additions & 2 deletions conf/nebula-storaged-listener.conf.production
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,6 @@
--ws_ip=192.168.2.4
# HTTP service port
--ws_http_port=19789
# HTTP2 service port
--ws_h2_port=19790
# heartbeat with meta service
--heartbeat_interval_secs=10

Expand Down
6 changes: 2 additions & 4 deletions conf/nebula-storaged.conf.default
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,6 @@
--ws_ip=0.0.0.0
# HTTP service port
--ws_http_port=19779
# HTTP2 service port
--ws_h2_port=19780
# heartbeat with meta service
--heartbeat_interval_secs=10

Expand Down Expand Up @@ -100,8 +98,8 @@
############## Key-Value separation ##############
# Whether or not to enable BlobDB (RocksDB key-value separation support)
--rocksdb_enable_kv_separation=false
# RocksDB key value separation threshold. Values at or above this threshold will be written to blob files during flush or compaction.
--rocksdb_kv_separation_threshold=0
# RocksDB key value separation threshold in bytes. Values at or above this threshold will be written to blob files during flush or compaction.
--rocksdb_kv_separation_threshold=100
# Compression algorithm for blobs, options: no,snappy,lz4,lz4hc,zlib,bzip2,zstd
--rocksdb_blob_compression=lz4
# Whether to garbage collect blobs during compaction
Expand Down
9 changes: 5 additions & 4 deletions conf/nebula-storaged.conf.production
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,6 @@
--ws_ip=0.0.0.0
# HTTP service port
--ws_http_port=19779
# HTTP2 service port
--ws_h2_port=19780
# heartbeat with meta service
--heartbeat_interval_secs=10

Expand All @@ -64,6 +62,9 @@
# The default block cache size used in BlockBasedTable. (MB)
# recommend: 1/3 of all memory
--rocksdb_block_cache=4096
# Disable page cache to better control memory used by rocksdb.
# Caution: Make sure to allocate enough block cache if disabling page cache!
--disable_page_cache=false

# Compression algorithm, options: no,snappy,lz4,lz4hc,zlib,bzip2,zstd
# For the sake of binary compatibility, the default value is snappy.
Expand Down Expand Up @@ -106,8 +107,8 @@
############## Key-Value separation ##############
# Whether or not to enable BlobDB (RocksDB key-value separation support)
--rocksdb_enable_kv_separation=false
# RocksDB key value separation threshold. Values at or above this threshold will be written to blob files during flush or compaction.
--rocksdb_kv_separation_threshold=0
# RocksDB key value separation threshold in bytes. Values at or above this threshold will be written to blob files during flush or compaction.
--rocksdb_kv_separation_threshold=100
# Compression algorithm for blobs, options: no,snappy,lz4,lz4hc,zlib,bzip2,zstd
--rocksdb_blob_compression=lz4
# Whether to garbage collect blobs during compaction
Expand Down
28 changes: 14 additions & 14 deletions src/clients/meta/MetaClient.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
#include "clients/meta/stats/MetaClientStats.h"
#include "common/base/Base.h"
#include "common/base/MurmurHash2.h"
#include "common/base/Status.h"
#include "common/conf/Configuration.h"
#include "common/http/HttpClient.h"
#include "common/meta/NebulaSchemaProvider.h"
Expand All @@ -30,7 +29,6 @@
#include "webservice/Common.h"

DECLARE_int32(ws_meta_http_port);
DECLARE_int32(ws_meta_h2_port);

DEFINE_uint32(expired_time_factor, 5, "The factor of expired time based on heart beat interval");
DEFINE_int32(heartbeat_interval_secs, 10, "Heartbeat interval in seconds");
Expand Down Expand Up @@ -402,7 +400,6 @@ bool MetaClient::loadData() {

TagSchemas MetaClient::buildTagSchemas(std::vector<cpp2::TagItem> tagItemVec) {
TagSchemas tagSchemas;
TagID lastTagId = -1;
for (auto& tagIt : tagItemVec) {
// meta will return the different version from new to old
auto schema = std::make_shared<NebulaSchemaProvider>(tagIt.get_version());
Expand All @@ -411,20 +408,21 @@ TagSchemas MetaClient::buildTagSchemas(std::vector<cpp2::TagItem> tagItemVec) {
}
// handle schema property
schema->setProp(tagIt.get_schema().get_schema_prop());
if (tagIt.get_tag_id() != lastTagId) {
// init schema vector, since schema version is zero-based, need to add one
tagSchemas[tagIt.get_tag_id()].resize(schema->getVersion() + 1);
lastTagId = tagIt.get_tag_id();
auto& schemas = tagSchemas[tagIt.get_tag_id()];
// Because of the byte order of schema version in meta is not same as numerical order, we have
// to check schema version
if (schemas.size() <= static_cast<size_t>(schema->getVersion())) {
// since schema version is zero-based, need to add one
schemas.resize(schema->getVersion() + 1);
}
tagSchemas[tagIt.get_tag_id()][schema->getVersion()] = std::move(schema);
schemas[schema->getVersion()] = std::move(schema);
}
return tagSchemas;
}

EdgeSchemas MetaClient::buildEdgeSchemas(std::vector<cpp2::EdgeItem> edgeItemVec) {
EdgeSchemas edgeSchemas;
std::unordered_set<std::pair<GraphSpaceID, EdgeType>> edges;
EdgeType lastEdgeType = -1;
for (auto& edgeIt : edgeItemVec) {
// meta will return the different version from new to old
auto schema = std::make_shared<NebulaSchemaProvider>(edgeIt.get_version());
Expand All @@ -433,12 +431,14 @@ EdgeSchemas MetaClient::buildEdgeSchemas(std::vector<cpp2::EdgeItem> edgeItemVec
}
// handle shcem property
schema->setProp(edgeIt.get_schema().get_schema_prop());
if (edgeIt.get_edge_type() != lastEdgeType) {
// init schema vector, since schema version is zero-based, need to add one
edgeSchemas[edgeIt.get_edge_type()].resize(schema->getVersion() + 1);
lastEdgeType = edgeIt.get_edge_type();
auto& schemas = edgeSchemas[edgeIt.get_edge_type()];
// Because of the byte order of schema version in meta is not same as numerical order, we have
// to check schema version
if (schemas.size() <= static_cast<size_t>(schema->getVersion())) {
// since schema version is zero-based, need to add one
schemas.resize(schema->getVersion() + 1);
}
edgeSchemas[edgeIt.get_edge_type()][schema->getVersion()] = std::move(schema);
schemas[schema->getVersion()] = std::move(schema);
}
return edgeSchemas;
}
Expand Down
3 changes: 1 addition & 2 deletions src/clients/meta/MetaClient.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

#include "common/base/Base.h"
#include "common/base/ObjectPool.h"
#include "common/base/Status.h"
#include "common/base/StatusOr.h"
#include "common/meta/Common.h"
#include "common/meta/GflagsManager.h"
Expand Down Expand Up @@ -221,8 +220,8 @@ class MetaClient {
FRIEND_TEST(MetaClientTest, RetryUntilLimitTest);
FRIEND_TEST(MetaClientTest, RocksdbOptionsTest);
FRIEND_TEST(MetaClientTest, VerifyClientTest);
friend class KillQueryMetaWrapper;
FRIEND_TEST(ChainAddEdgesTest, AddEdgesLocalTest);
friend class KillQueryMetaWrapper;
friend class storage::MetaClientTestUpdater;

public:
Expand Down
2 changes: 0 additions & 2 deletions src/clients/storage/GeneralStorageClient.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,6 @@
#ifndef CLIENTS_STORAGE_GENERALSTORAGECLIENT_H_
#define CLIENTS_STORAGE_GENERALSTORAGECLIENT_H_

#include <gtest/gtest_prod.h>

#include "clients/meta/MetaClient.h"
#include "clients/storage/StorageClientBase.h"
#include "common/base/Base.h"
Expand Down
2 changes: 0 additions & 2 deletions src/clients/storage/InternalStorageClient.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,6 @@
#ifndef CLIENTS_STORAGE_INTERNALSTORAGEClient_H_
#define CLIENTS_STORAGE_INTERNALSTORAGEClient_H_

#include <gtest/gtest_prod.h>

#include "clients/storage/StorageClientBase.h"
#include "common/base/Base.h"
#include "common/base/ErrorOr.h"
Expand Down
2 changes: 0 additions & 2 deletions src/clients/storage/StorageClient.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,6 @@
#ifndef CLIENTS_STORAGE_STORAGECLIENT_H
#define CLIENTS_STORAGE_STORAGECLIENT_H

#include <gtest/gtest_prod.h>

#include "clients/storage/StorageClientBase.h"
#include "common/base/Base.h"
#include "common/thrift/ThriftClientManager.h"
Expand Down
2 changes: 0 additions & 2 deletions src/common/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,13 @@ nebula_add_subdirectory(time)
nebula_add_subdirectory(network)
nebula_add_subdirectory(thrift)
nebula_add_subdirectory(fs)
nebula_add_subdirectory(concurrent)
nebula_add_subdirectory(thread)
nebula_add_subdirectory(process)
nebula_add_subdirectory(hdfs)
nebula_add_subdirectory(http)
nebula_add_subdirectory(stats)
nebula_add_subdirectory(charset)
nebula_add_subdirectory(algorithm)
nebula_add_subdirectory(encryption)
nebula_add_subdirectory(datatypes)
nebula_add_subdirectory(conf)
nebula_add_subdirectory(meta)
Expand Down
1 change: 0 additions & 1 deletion src/common/base/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ nebula_add_library(
SanitizerOptions.cpp
SignalHandler.cpp
SlowOpTracker.cpp
StringValue.cpp
${gdb_debug_script}
)

Expand Down
3 changes: 2 additions & 1 deletion src/common/base/ObjectPool.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

#include <folly/SpinLock.h>

#include <boost/core/noncopyable.hpp>
#include <functional>
#include <list>
#include <type_traits>
Expand All @@ -21,7 +22,7 @@ class Expression;

typedef std::lock_guard<folly::SpinLock> SLGuard;

class ObjectPool final : private cpp::NonCopyable, private cpp::NonMovable {
class ObjectPool final : private boost::noncopyable, private cpp::NonMovable {
public:
ObjectPool() {}

Expand Down
Loading

0 comments on commit a581ba0

Please sign in to comment.