Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add formatting #277

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 44 additions & 0 deletions .clang-format
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
AccessModifierOffset: -1
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: Consecutive
AlignConsecutiveDeclarations: Consecutive
AlignEscapedNewlines: DontAlign
AlignOperands: AlignAfterOperator
AlignTrailingComments: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortCaseLabelsOnASingleLine: false
AllowShortEnumsOnASingleLine: false
AllowShortIfStatementsOnASingleLine: false
AlwaysBreakTemplateDeclarations: Yes
BasedOnStyle: WebKit
BitFieldColonSpacing: After
BinPackParameters: false
BreakBeforeBinaryOperators: NonAssignment
BreakBeforeBraces: Custom
BraceWrapping:
AfterFunction: false
AfterClass: false
AfterControlStatement: true
BeforeElse: true
BreakBeforeTernaryOperators: true
BreakConstructorInitializers: AfterColon
BreakStringLiterals: false
ColumnLimit: 100
ContinuationIndentWidth: 2
Cpp11BracedListStyle: true
IndentGotoLabels: false
IndentPPDirectives: BeforeHash
IndentWidth: 4
MaxEmptyLinesToKeep: 2
NamespaceIndentation: None
PackConstructorInitializers: Never
ReflowComments: false
SortIncludes: false
SortUsingDeclarations: false
SpaceAfterCStyleCast: true
SpaceAfterTemplateKeyword: false
SpaceBeforeCaseColon: true
SpaceBeforeCpp11BracedList: false
SpaceBeforeInheritanceColon: false
SpaceInEmptyBlock: false
SpacesBeforeTrailingComments: 2
6 changes: 6 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
SRCS = training_data_loader.cpp
HEADERS = lib/nnue_training_data_formats.h lib/nnue_training_data_stream.h lib/rng.h

format:
black .
clang-format -i $(SRCS) $(HEADERS) -style=file
5 changes: 4 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -85,4 +85,7 @@ This script runs in a loop, and will monitor the directory for new checkpoints.
* syzygy - http://www.talkchess.com/forum3/viewtopic.php?f=7&t=75506
* https://github.com/DanielUranga/TensorFlowNNUE
* https://hxim.github.io/Stockfish-Evaluation-Guide/
* dkappe - Suggesting ranger (https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer)
* dkappe - Suggesting ranger (https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer)

[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
[![Code style: clang-format](https://img.shields.io/badge/code%20style-clang%20format-000000.svg)](https://github.com/llvm/llvm-project)
152 changes: 115 additions & 37 deletions cross_check_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,24 +7,53 @@
import chess
from model import NNUE


def read_model(nnue_path, feature_set):
with open(nnue_path, 'rb') as f:
with open(nnue_path, "rb") as f:
reader = serialize.NNUEReader(f, feature_set)
return reader.model


def make_fen_batch_provider(data_path, batch_size):
return nnue_dataset.FenBatchProvider(data_path, True, 1, batch_size, False, 10)

def eval_model_batch(model, batch):
us, them, white_indices, white_values, black_indices, black_values, outcome, score, psqt_indices, layer_stack_indices = batch.contents.get_tensors('cuda')

evals = [v.item() for v in model.forward(us, them, white_indices, white_values, black_indices, black_values, psqt_indices, layer_stack_indices) * 600.0]
def eval_model_batch(model, batch):
(
us,
them,
white_indices,
white_values,
black_indices,
black_values,
outcome,
score,
psqt_indices,
layer_stack_indices,
) = batch.contents.get_tensors("cuda")

evals = [
v.item()
for v in model.forward(
us,
them,
white_indices,
white_values,
black_indices,
black_values,
psqt_indices,
layer_stack_indices,
)
* 600.0
]
for i in range(len(evals)):
if them[i] > 0.5:
evals[i] = -evals[i]
return evals

re_nnue_eval = re.compile(r'NNUE evaluation:?\s*?([-+]?\d*?\.\d*)')

re_nnue_eval = re.compile(r"NNUE evaluation:?\s*?([-+]?\d*?\.\d*)")


def compute_basic_eval_stats(evals):
min_engine_eval = min(evals)
Expand All @@ -34,39 +63,78 @@ def compute_basic_eval_stats(evals):

return min_engine_eval, max_engine_eval, avg_engine_eval, avg_abs_engine_eval


def compute_correlation(engine_evals, model_evals):
if len(engine_evals) != len(model_evals):
raise Exception("number of engine evals doesn't match the number of model evals")

min_engine_eval, max_engine_eval, avg_engine_eval, avg_abs_engine_eval = compute_basic_eval_stats(engine_evals)
min_model_eval, max_model_eval, avg_model_eval, avg_abs_model_eval = compute_basic_eval_stats(model_evals)

print('Min engine/model eval: {} / {}'.format(min_engine_eval, min_model_eval))
print('Max engine/model eval: {} / {}'.format(max_engine_eval, max_model_eval))
print('Avg engine/model eval: {} / {}'.format(avg_engine_eval, avg_model_eval))
print('Avg abs engine/model eval: {} / {}'.format(avg_abs_engine_eval, avg_abs_model_eval))

relative_model_error = sum(abs(model - engine) / (abs(engine)+0.001) for model, engine in zip(model_evals, engine_evals)) / len(engine_evals)
relative_engine_error = sum(abs(model - engine) / (abs(model)+0.001) for model, engine in zip(model_evals, engine_evals)) / len(engine_evals)
min_diff = min(abs(model - engine) for model, engine in zip(model_evals, engine_evals))
max_diff = max(abs(model - engine) for model, engine in zip(model_evals, engine_evals))
print('Relative engine error: {}'.format(relative_engine_error))
print('Relative model error: {}'.format(relative_model_error))
print('Avg abs difference: {}'.format(sum(abs(model - engine) for model, engine in zip(model_evals, engine_evals)) / len(engine_evals)))
print('Min difference: {}'.format(min_diff))
print('Max difference: {}'.format(max_diff))
raise Exception(
"number of engine evals doesn't match the number of model evals"
)

(
min_engine_eval,
max_engine_eval,
avg_engine_eval,
avg_abs_engine_eval,
) = compute_basic_eval_stats(engine_evals)
(
min_model_eval,
max_model_eval,
avg_model_eval,
avg_abs_model_eval,
) = compute_basic_eval_stats(model_evals)

print("Min engine/model eval: {} / {}".format(min_engine_eval, min_model_eval))
print("Max engine/model eval: {} / {}".format(max_engine_eval, max_model_eval))
print("Avg engine/model eval: {} / {}".format(avg_engine_eval, avg_model_eval))
print(
"Avg abs engine/model eval: {} / {}".format(
avg_abs_engine_eval, avg_abs_model_eval
)
)

relative_model_error = sum(
abs(model - engine) / (abs(engine) + 0.001)
for model, engine in zip(model_evals, engine_evals)
) / len(engine_evals)
relative_engine_error = sum(
abs(model - engine) / (abs(model) + 0.001)
for model, engine in zip(model_evals, engine_evals)
) / len(engine_evals)
min_diff = min(
abs(model - engine) for model, engine in zip(model_evals, engine_evals)
)
max_diff = max(
abs(model - engine) for model, engine in zip(model_evals, engine_evals)
)
print("Relative engine error: {}".format(relative_engine_error))
print("Relative model error: {}".format(relative_model_error))
print(
"Avg abs difference: {}".format(
sum(abs(model - engine) for model, engine in zip(model_evals, engine_evals))
/ len(engine_evals)
)
)
print("Min difference: {}".format(min_diff))
print("Max difference: {}".format(max_diff))


def eval_engine_batch(engine_path, net_path, fens):
engine = subprocess.Popen([engine_path], stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
parts = ['uci', 'setoption name EvalFile value {}'.format(net_path)]
engine = subprocess.Popen(
[engine_path],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True,
)
parts = ["uci", "setoption name EvalFile value {}".format(net_path)]
for fen in fens:
parts.append('position fen {}'.format(fen))
parts.append('eval')
parts.append('quit')
query = '\n'.join(parts)
parts.append("position fen {}".format(fen))
parts.append("eval")
parts.append("quit")
query = "\n".join(parts)
out = engine.communicate(input=query)[0]
evals = re.findall(re_nnue_eval, out)
return [int(float(v)*208) for v in evals]
return [int(float(v) * 208) for v in evals]


def filter_fens(fens):
# We don't want fens where a king is in check, as these cannot be evaluated by the engine.
Expand All @@ -77,13 +145,20 @@ def filter_fens(fens):
filtered_fens.append(fen)
return filtered_fens


def main():
parser = argparse.ArgumentParser(description="")
parser.add_argument("--net", type=str, help="path to a .nnue net")
parser.add_argument("--engine", type=str, help="path to stockfish")
parser.add_argument("--data", type=str, help="path to a .bin or .binpack dataset")
parser.add_argument("--checkpoint", type=str, help="Optional checkpoint (used instead of nnue for local eval)")
parser.add_argument("--count", type=int, default=100, help="number of datapoints to process")
parser.add_argument(
"--checkpoint",
type=str,
help="Optional checkpoint (used instead of nnue for local eval)",
)
parser.add_argument(
"--count", type=int, default=100, help="number of datapoints to process"
)
features.add_argparse_args(parser)
args = parser.parse_args()

Expand All @@ -102,20 +177,23 @@ def main():
engine_evals = []

done = 0
print('Processed {} positions.'.format(done))
print("Processed {} positions.".format(done))
while done < args.count:
fens = filter_fens(next(fen_batch_provider))

b = nnue_dataset.make_sparse_batch_from_fens(feature_set, fens, [0] * len(fens), [1] * len(fens), [0] * len(fens))
b = nnue_dataset.make_sparse_batch_from_fens(
feature_set, fens, [0] * len(fens), [1] * len(fens), [0] * len(fens)
)
model_evals += eval_model_batch(model, b)
nnue_dataset.destroy_sparse_batch(b)

engine_evals += eval_engine_batch(args.engine, args.net, fens)

done += len(fens)
print('Processed {} positions.'.format(done))
print("Processed {} positions.".format(done))

compute_correlation(engine_evals, model_evals)

if __name__ == '__main__':

if __name__ == "__main__":
main()
Loading