Skip to content

Commit

Permalink
chore(store): translate CJK comments to English for store-dist, store…
Browse files Browse the repository at this point in the history
…-grpc, store-node, store-rocksdb, store-test (#2645)

Co-authored-by: Peng Junzhi <[email protected]>
  • Loading branch information
VGalaxies and Pengzna authored Aug 23, 2024
1 parent f88fad4 commit 0b24aca
Show file tree
Hide file tree
Showing 51 changed files with 258 additions and 258 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -75,12 +75,12 @@ export FILE_LIMITN=1024
function check_evn_limit() {
local limit_check=$(ulimit -n)
if [[ ${limit_check} != "unlimited" && ${limit_check} -lt ${FILE_LIMITN} ]]; then
echo -e "${BASH_SOURCE[0]##*/}:${LINENO}:\E[1;32m ulimit -n 可以打开的最大文件描述符数太少,需要(${FILE_LIMITN})!! \E[0m"
echo -e "${BASH_SOURCE[0]##*/}:${LINENO}:\E[1;32m ulimit -n can open too few maximum file descriptors, need (${FILE_LIMITN})!! \E[0m"
return 1
fi
limit_check=$(ulimit -u)
if [[ ${limit_check} != "unlimited" && ${limit_check} -lt ${PROC_LIMITN} ]]; then
echo -e "${BASH_SOURCE[0]##*/}:${LINENO}:\E[1;32m ulimit -u 用户最大可用的进程数太少,需要(${PROC_LIMITN})!! \E[0m"
echo -e "${BASH_SOURCE[0]##*/}:${LINENO}:\E[1;32m ulimit -u too few available processes for the user, need (${PROC_LIMITN})!! \E[0m"
return 2
fi
return 0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@ management:
include: "*"

rocksdb:
# rocksdb 使用的总内存大小,达到该值强制写盘
# rocksdb total memory usage, force flush to disk when reaching this value
total_memory_size: 32000000000
# rocksdb 使用的 memtable 大小
# memtable size used by rocksdb
write_buffer_size: 32000000
# 对于每个 rocksdb 来说,memtable 个数达到该值进行写盘
# For each rocksdb, the number of memtables reaches this value for writing to disk.
min_write_buffer_number_to_merge: 16
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
#

pdserver:
# pd 服务地址,多个 pd 地址用逗号分割
# PD service address, multiple PD addresses separated by commas
address: localhost:8686

management:
Expand All @@ -30,24 +30,24 @@ management:
include: "*"

grpc:
# grpc 的服务地址
# grpc service address
host: 127.0.0.1
port: 8500
netty-server:
max-inbound-message-size: 1000MB
raft:
# raft 缓存队列大小
# raft cache queue size
disruptorBufferSize: 1024
address: 127.0.0.1:8510
max-log-file-size: 600000000000
# 快照生成时间间隔,单位秒
# Snapshot generation interval, in seconds
snapshotInterval: 1800
server:
# rest 服务地址
# rest service address
port: 8520

app:
# 存储路径,支持多个路径,逗号分割
# Storage path, support multiple paths, separated by commas
data-path: ./storage
#raft-path: ./storage

Expand Down
14 changes: 7 additions & 7 deletions hugegraph-store/hg-store-grpc/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -91,23 +91,23 @@
<pluginArtifact>
io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}
</pluginArtifact>
<!--默认值-->
<!-- Default value -->
<protoSourceRoot>${project.basedir}/src/main/proto</protoSourceRoot>
<!--默认值-->
<!-- Default value -->
<!--<outputDirectory>${project.build.directory}/generated-sources/protobuf/java</outputDirectory>-->
<outputDirectory>${project.basedir}/src/main/java</outputDirectory>
<!--设置是否在生成java文件之前清空outputDirectory的文件,默认值为true,设置为false时也会覆盖同名文件-->
<!-- Set whether to clear the files in outputDirectory before generating java files, the default value is true, and it will also override files with the same name when set to false -->
<clearOutputDirectory>false</clearOutputDirectory>
<!--更多配置信息可以查看https://www.xolstice.org/protobuf-maven-plugin/compile-mojo.html-->
<!-- More configuration information can be found at https://www.xolstice.org/protobuf-maven-plugin/compile-mojo.html -->
</configuration>
<executions>
<execution>
<!--在执行mvn compile的时候会执行以下操作-->
<!-- When executing mvn compile, the following operations will be performed -->
<phase>generate-sources</phase>
<goals>
<!--生成OuterClass类-->
<!-- Generate OuterClass class -->
<goal>compile</goal>
<!--生成Grpc类-->
<!-- Generate Grpc classes -->
<goal>compile-custom</goal>
</goals>
</execution>
Expand Down
26 changes: 13 additions & 13 deletions hugegraph-store/hg-store-grpc/src/main/proto/graphpb.proto
Original file line number Diff line number Diff line change
Expand Up @@ -30,20 +30,20 @@ message ScanPartitionRequest{
SCAN_VERTEX = 1;
SCAN_EDGE = 2;
}
// 请求参数
// Request parameters
message Request{
ScanType scan_type = 1;
string graph_name = 2;
uint32 partition_id = 3;
uint32 start_code = 4;
uint32 end_code = 5;
// 过滤条件
// Filter conditions
string condition = 6;
string table = 7;
int64 limit = 8;
int32 boundary = 9;
bytes position = 10;
// 返回条件
// Return condition
repeated int64 properties = 11;
}

Expand All @@ -54,14 +54,14 @@ message ScanPartitionRequest{
RequestHeader header = 1;
oneof request {
Request scan_request = 2;
// 每消费一个数据包,通知服务端一次,返回消息序号
// Each time a data packet is consumed, notify the server once, return the message sequence number
Reply reply_request = 4;
}
}

message ScanResponse{
ResponseHeader header = 1;
// 消息序号
// Message Sequence Number
int32 seq_no = 2;
repeated Vertex vertex = 3;
repeated Edge edge = 4;
Expand All @@ -74,19 +74,19 @@ message Property{
}

message Vertex{
int64 label = 1; // 点类型
Variant id = 2; // 点ID
repeated Property properties = 3; //点属性
int64 label = 1; // Point type
Variant id = 2; // Point ID
repeated Property properties = 3; // Point properties
}

message Edge{
int64 label = 1; // 边类型
int64 label = 1; // Edge type
int64 sourceLabel = 2;
int64 targetLabel = 3;
Variant source_id = 4; // 源点ID
Variant target_id = 5; // 目标点ID
Variant source_id = 4; // Source point ID
Variant target_id = 5; // Target point ID

repeated Property properties = 6; //边属性
repeated Property properties = 6; // Edge properties
}

message Variant {
Expand Down Expand Up @@ -116,7 +116,7 @@ enum VariantType {


message RequestHeader {
// 发送者 ID.
// Sender ID.
uint64 sender_id = 2;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,10 +82,10 @@ enum ScanMethod {
}

enum ScanOrderType{
// 批量接口下,返回顺序的要求
ORDER_NONE = 0; // 允许无序
ORDER_WITHIN_VERTEX = 1; // 一个点内的边不会被打断,单不同点之间为无序
ORDER_STRICT = 2; // 保证原始的输入点顺序
// Under batch interface, the requirement for return order
ORDER_NONE = 0; // Allow unordered
ORDER_WITHIN_VERTEX = 1; // Edges within a vertex will not be broken, but the order between different vertices is unordered.
ORDER_STRICT = 2; // Ensure the original input point order
}

enum OpType {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,11 +122,11 @@ message PartitionLeader {

enum PartitionFaultType{
PARTITION_FAULT_TYPE_UNKNOWN = 0;
// 当前不是Leader,返回Leader所在store
// Currently not the Leader, return the store where the Leader is located.
PARTITION_FAULT_TYPE_NOT_LEADER = 1;
// 等待Leader超时,可能raft group创建失败
// Wait for Leader timeout, possibly raft group creation failed
PARTITION_FAULT_TYPE_WAIT_LEADER_TIMEOUT = 2;
// 分区不属于本机
// Partition does not belong to this machine
PARTITION_FAULT_TYPE_NOT_LOCAL = 3;

}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,14 +49,14 @@ public class AppConfig {
@Value("${server.port}")
private int restPort;

//内置pd模式,用于单机部署
// Built-in pd mode, for standalone deployment
@Value("${app.data-path: store}")
private String dataPath;

@Value("${app.raft-path:}")
private String raftPath;

//内置pd模式,用于单机部署
// Built-in pd mode, for standalone deployment
@Value("${app.fake-pd: false}")
private boolean fakePd;
@Autowired
Expand Down Expand Up @@ -97,7 +97,7 @@ public void init() {
if (raft.getDisruptorBufferSize() == 0) {
int size = (int) (totalMemory / 1000 / 1000 / 1000);
size = (int) Math.pow(2, Math.round(Math.log(size) / Math.log(2))) * 32;
raft.setDisruptorBufferSize(size); // 每32M增加一个buffer
raft.setDisruptorBufferSize(size); // Increase one buffer every 32M
}

if (!rocksdb.containsKey("write_buffer_size") ||
Expand Down Expand Up @@ -213,7 +213,7 @@ public class FakePdConfig {
@Value("${fake-pd.store-list:''}")
private String storeList;
@Value("${fake-pd.peers-list:''}")
private String peersList; //fakePd模式下,raft集群初始配置
private String peersList; // fakePd mode, raft cluster initial configuration
@Value("${fake-pd.partition-count:3}")
private int partitionCount;
@Value("${fake-pd.shard-count:3}")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ public void run() {
doSomethingForShutdown();

try {
mainThread.join(); //当收到停止信号时,等待mainThread的执行完成
mainThread.join(); // Wait for mainThread to finish when a stop signal is received.
} catch (InterruptedException ignored) {
}
System.out.println("Shut down complete.");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ public static void main(String[] args) {
}

public static void start() {
// 设置solt用到的日志位置
// Set the log location for the slot usage
String logPath = System.getProperty("logging.path");
if (StringUtils.isBlank(logPath)) {
System.setProperty("logging.path", "logs");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
import lombok.extern.slf4j.Slf4j;

/**
* 仅用于测试
* For testing only
*/
@RestController
@Slf4j
Expand Down Expand Up @@ -75,7 +75,7 @@ public String deleteRaftNode(@PathVariable(value = "groupId") int groupId) {
nodeService.getStoreEngine().destroyPartitionEngine(groupId, graphs);
return "OK";
} else {
return "未找到分区";
return "Partition not found";
}

}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ public Map<String, Object> getPartitions(
String graphName = partitionEntry.getKey();
Partition pt = partitionEntry.getValue();
PartitionInfo partition = new PartitionInfo(pt);
// 此处为了打开所有的图,metric只返回已打开的图
// Here to open all the graphs, metric only returns the opened graph
businessHandler.getLatestSequenceNumber(graphName, pt.getId());
partition.setMetric(
businessHandler.getPartitionMetric(graphName, pt.getId(), accurate));
Expand Down Expand Up @@ -142,7 +142,7 @@ public Raft getPartition(@PathVariable(value = "id") int id) {
}

/**
* 打印分区的所有key
* Print all keys in the partition
*/
@GetMapping(value = "/partition/dump/{id}", produces = MediaType.APPLICATION_JSON_VALUE)
public Map<String, Object> dumpPartition(@PathVariable(value = "id") int id) throws
Expand Down Expand Up @@ -171,7 +171,7 @@ public Map<String, Object> dumpPartition(@PathVariable(value = "id") int id) thr
}

/**
* 打印分区的所有key
* Print all keys in the partition
*/
@GetMapping(value = "/partition/clean/{id}", produces = MediaType.APPLICATION_JSON_VALUE)
public Map<String, Object> cleanPartition(@PathVariable(value = "id") int id) throws
Expand All @@ -196,7 +196,7 @@ public Map<String, Object> arthasstart(
ArthasAgent.attach(configMap);
// DashResponse retPose = new DashResponse();
List<String> ret = new ArrayList<>();
ret.add("Arthas 启动成功");
ret.add("Arthas started successfully");
return okMap("arthasstart", ret);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@
import lombok.extern.slf4j.Slf4j;

/**
* 批量处理的grpc回调封装类
* Batch processing grpc callback wrapper class
*
* @param <V>
*/
Expand Down Expand Up @@ -95,7 +95,7 @@ public void run(Status status) {
}

/**
* 不使用计数器latch
* Not using counter latch
*
* @return
*/
Expand Down Expand Up @@ -158,13 +158,13 @@ public String getErrorMsg() {
}

/**
* 等待raft执行结束,返回结果给grpc
* Wait for the raft execution to complete, return the result to grpc
*/
public void waitFinish(StreamObserver<V> observer, Function<List<V>, V> ok, long timeout) {
try {
countDownLatch.await(timeout, TimeUnit.MILLISECONDS);

if (errorStatus.isEmpty()) { // 没有错误时,合并结果
if (errorStatus.isEmpty()) { // No error, merge results
observer.onNext(ok.apply(results));
} else {
observer.onNext((V) FeedbackRes.newBuilder()
Expand All @@ -186,7 +186,7 @@ public void waitFinish(StreamObserver<V> observer, Function<List<V>, V> ok, long
}

/**
* 从多个结果中选择一个错误的结果返回,如果没有错误,返回第一个
* Select one incorrect result from multiple results, if there are no errors, return the first one.
*/
public FeedbackRes selectError(List<FeedbackRes> results) {
if (!CollectionUtils.isEmpty(results)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ abstract class GrpcClosure<V> implements RaftClosure {
private V result;

/**
* 设置输出结果给raftClosure,对于Follower来说,raftClosure为空
* Set the output result to raftClosure, for Follower, raftClosure is empty.
*/
public static <V> void setResult(RaftClosure raftClosure, V result) {
GrpcClosure closure = (GrpcClosure) raftClosure;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ public boolean invoke(int partId, byte[] request, RaftClosure response) throws
invoke(partId, methodId, CleanReq.parseFrom(input), response);
break;
default:
return false; // 未处理
return false; // Unhandled
}
} catch (IOException e) {
throw new HgStoreException(e.getMessage(), e);
Expand Down Expand Up @@ -214,7 +214,7 @@ public boolean invoke(int partId, byte methodId, Object req, RaftClosure respons
hgStoreSession.doClean(partId, (CleanReq) req, response);
break;
default:
return false; // 未处理
return false; // Unhandled
}
return true;
}
Expand Down
Loading

0 comments on commit 0b24aca

Please sign in to comment.