diff --git a/README.md b/README.md index 7072e17..9012595 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,9 @@ -### Usefull commands to make charts and tables from validator-node +## Usefull commands to make charts and tables from validator-node > [Please refer to ton-validation](https://github.com/everstake/ton-validation) +##### WARNING! Highly experimental. Use at your own risk! + #### Get rewards in GRAMs using grep and jq `cat db.json | jq -r '._default' | jq '[.[]]' | grep "reward" | grep -v '"reward": -1,' | awk 'BEGIN{FS=":"} {print ($2/1000000000) }'` @@ -29,7 +31,7 @@ Then you can convert db.json to csv and import to Libreoffice Calc Thanks [SO](https://stackoverflow.com/questions/32960857/how-to-convert-arbitrary-simple-json-to-csv-using-jq/34282594#34282594) for answers! -### Parse logs from validating node +#### Parse logs from validating node to get your blocks Let next command run in background with *nohup* or *tmux* to collect logs while your node will validate `tail -F /TON/dir/with/logs/* | grep --line-buffered "new Block created" >> blocks.log` @@ -49,3 +51,53 @@ You`ll get these records in blocks.log [ 3][t 2][1573576970.334105492][collator.cpp:3695][!collate(-1,8000000000000000):1195359] new Block created [ 3][t 4][1573576972.634427786][collator.cpp:3695][!collate(-1,8000000000000000):1195360] new Block created ``` + +Then you parse them with awk and output to another file, e.g. *blocks_parsed.log* + +`cat blocks.log | awk '{print $3}' | awk -F '[' '{print $2}' | awk -F '.' '{print $1}' > blocks_parsed.log` + + +You`ll get these records +``` +1573636135 +1573636146 +1573636148 +1573636149 +1573636163 +``` + + +## How to use db.json and blocks_parsed.log to make charts: + +#### Installation steps: +``` +#set env variable in .bashrc file using export +export BETTER_EXCEPTIONS=1 +sudo apt install python3-pip +sudo apt install python3-venv +python3 -m venv env +if you use bash -> source env/bin/activate +#After that your promt will change +pip install -r requirements.txt +#Work with you data +#To exit run +deactivate +``` +#### How to run: + +***python count_parse.py db.json blocks_parsed.log chart.html*** + +`Usage: count_parse.py [SWITCHES] db block out_html [validators_elected_for=65536]` + +You will get output in console + an html file containing chart with some basic info. + +![output](img/console-output.png) + +![chart](img/chart.png) + +#### Notes + +We get a chart showing how many new blocks validator created per validating period + rewards for it. +The tooltip and annotation show election_id in numeric and human-readable datetime format. + +Please keep in mind that there may be inconsistent info and error in algorithm! \ No newline at end of file diff --git a/count_parse.py b/count_parse.py new file mode 100755 index 0000000..77a76e7 --- /dev/null +++ b/count_parse.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python +import os, sys, re, time +from plumbum import local, cli, FG, BG, TF, TEE, ProcessExecutionError, colors +from plumbum.cmd import echo +from contextlib import contextmanager +from tinydb import TinyDB, Query, where +import pendulum +from loguru import logger +from mako.template import Template + +class MyApp(cli.Application): + def main(self, db : cli.ExistingFile, block : cli.ExistingFile, out_html : cli.NonexistentPath, validators_elected_for = 65536 ): + try: + with open(block, 'r') as infile: + data = infile.read() + blocks = data.splitlines() + template = Template(filename='template.txt') + + db = TinyDB(db) + except Exception as error: + logger.opt(exception=True).debug('Failed on open files') + + query = Query() + start = 0 # set to another number if you want to skip some records from start + query_res = db.search(((query['success'] == True) | (query['reward'] != -1)) & (query['id'] >= start)) + work=[] + t = 0 + count=0 + count_reward = 0 + try: + for i in range(len(query_res)): + if (query_res[i]['success'] == True ): + count+=1 + #print(count,query_res[i]) + t=i + while (t+1 < len(query_res)): + t+=1 + if (query_res[t]['reward'] != -1): + if ( (query_res[t]['time'] - query_res[i]['election_time']) < (int(validators_elected_for) + 500) ): # empirically found value, when 'validators_elected_for' was 4000 = 4500 + continue + z = {'id_reward': query_res[t]['id'], 'election_time': query_res[i]['election_time'], 'reward_time': query_res[t]['time'], 'reward': query_res[t]['reward'] , 'blocks': 0} + work.append(z) + count_reward+=1 + #print(count_reward, z) + break + except Exception as error: + logger.opt(exception=True).debug('Failed on binding') + + # cat db.json | jq -r '._default' | jq '[.[]]' | grep "reward" | grep -v '"reward": -1,' | awk 'BEGIN{FS=":"} {print ($2/1000000000) }''| wc -l + logger.info("Count of parsed reward records = {}, success records = {}, please check it with cat | grep", count_reward, count) + + try: + for i in range(len(work)): + if (i+1 < len(work)): + #print(i,work[i]) + if ((work[i]['reward'] == work[i+1]['reward'])): + logger.info("Duplicates found, deleting first occurence") + #print(i,work[i]) + del(work[i]) + except Exception as error: + logger.opt(exception=True).debug('Failed on deleting duplicates') + + try: + for i in range(len(work)): + a = work[i]['election_time'] + b = a + int(validators_elected_for) + sum = 0 + for y in blocks: + d = int(y) + if (d >= a and d <= b): + sum+=1 + if (sum != 0) : + work[i]['blocks'] = sum + #print(work[i]['blocks']) + + print("------------------------------------") + for i in range(len(work)): + print(work[i]) # Resulting array + print("------------------------------------") + text=[] + sep="," + for i in range(len(work)): + #if ((work[i]['blocks'] != 0)): # Maybe you can get rewards without validating any blocks + dt = pendulum.from_timestamp(work[i]['election_time']) + pr=dt.format('YYYY,MM,DD HH:mm:ss') + #pr=dt.to_datetime_string() + #pr=dt.to_iso8601_string() + #print(f"[new Date(\"{pr}\"), {work[i]['reward'] / 1000000000}, \"{work[i]['election_time']}\", {work[i]['blocks']}]") + text.append(f"[new Date(\"{pr}\"), {work[i]['reward'] / 1000000000}, \"{work[i]['election_time']}\", {work[i]['blocks']}]") + FilledTemplate = template.render(Variable=(sep.join( text ))) + FileName = out_html + f= open(FileName,"w+") + f.write(FilledTemplate) + f.close() + + except Exception as error: + logger.opt(exception=True).debug('Failed on last iter') + +if __name__ == "__main__": + MyApp.run() \ No newline at end of file diff --git a/img/chart.png b/img/chart.png new file mode 100644 index 0000000..40f8180 Binary files /dev/null and b/img/chart.png differ diff --git a/img/console-output.png b/img/console-output.png new file mode 100644 index 0000000..5b1851e Binary files /dev/null and b/img/console-output.png differ diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..302668d --- /dev/null +++ b/requirements.txt @@ -0,0 +1,12 @@ +better-exceptions==0.2.2 +loguru==0.3.2 +Mako==1.1.0 +MarkupSafe==1.1.1 +pendulum==2.0.5 +pep8==1.7.1 +pkg-resources==0.0.0 +plumbum==1.6.8 +python-dateutil==2.8.1 +pytzdata==2019.3 +six==1.13.0 +tinydb==3.15.1 diff --git a/template.txt b/template.txt new file mode 100644 index 0000000..8f0568c --- /dev/null +++ b/template.txt @@ -0,0 +1,78 @@ + + +
+