-
Notifications
You must be signed in to change notification settings - Fork 3
/
values.production.template.yaml
986 lines (952 loc) · 34 KB
/
values.production.template.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
osm-seed:
# ====================================================================================================
# ====================================================================================================
# ==================================Global Configurations=============================================
# ====================================================================================================
# ====================================================================================================
environment: production
# cloudProvider is provider where you are going to deploy osm-seed, it could be: aws, gcp, minikube
cloudProvider: aws
# ====================================================================================================
# AWS: In case you are using the cloudProvider=aws set the below variables, We are assuming the nodes has a policies access to S3
# ====================================================================================================
AWS_S3_BUCKET: {{PRODUCTION_S3_BUCKET}}
# AWS SSL ARN
AWS_SSL_ARN: {{AWS_SSL_ARN}}
# Specify serviceType.
#
# serviceType can be one of three values: 'NodePort', 'ClusterIP' or 'LoadBalancer'
# Use `NodePort` for local testing on minikube.
#
# The recommended setting is `ClusterIP`, and then following the instructions to
# point a DNS record to the cluster IP address. This will setup the ingress rules
# for all services as subdomains and configure SSL using Lets Encrypt.
#
# If you specify `LoadBalancer` as the service type, if you also specify
# an `AWS_SSL_ARN` that is a wildcart certificate, that will be configured
# as the SSL certificate for your services. Else, you will need to configure
# SSL separately.
serviceType: ClusterIP
createClusterIssuer: true
# Domain that is pointed to the clusterIP
# You will need to create an A record like *.osmseed.example.com pointed to the ClusterIP
# Then, the cluster configuration will setup services at their respective subdomains:
# - web.osmseed.example.com
# - overpass.osmseed.example.com
# - nominatim.osmseed.example.com
# - etc.
domain: openhistoricalmap.org
# ====================================================================================================
# Configuration for Lets Encrypt setup
# ====================================================================================================
# Admin Email address used when generating Lets Encrypt certificates.
# You will be notified of expirations, etc. on this email address.
adminEmail: [email protected]
# ====================================================================================================
# Variables for osm-seed database
# ====================================================================================================
db:
enabled: true
# For node selector you should create the node with a label "nodegroup_type"
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: api_db_xlarge
env:
POSTGRES_DB: {{PRODUCTION_DB}}
POSTGRES_USER: {{PRODUCTION_DB_USER}}
POSTGRES_PASSWORD: {{PRODUCTION_DB_PASSWORD}}
LOG_STATEMENT: "none"
persistenceDisk:
enabled: true
accessMode: ReadWriteOnce
mountPath: /var/lib/postgresql/data
subPath: postgresql-db
# In case cloudProvider: aws
AWS_ElasticBlockStore_volumeID : vol-08b85b3e370f2d690
AWS_ElasticBlockStore_size: 600Gi
resources:
enabled: true
requests:
memory: "13Gi"
cpu: "2800m"
limits:
memory: "14Gi"
cpu: "3800m"
sharedMemorySize: 2Gi
postgresqlConfig:
enabled: true
values: |
listen_addresses = '*'
max_connections = 200
shared_buffers = 4GB
work_mem = 20MB
maintenance_work_mem = 512MB
dynamic_shared_memory_type = posix
effective_io_concurrency = 200
max_wal_size = 1GB
min_wal_size = 256MB
random_page_cost = 1.0
effective_cache_size = 8GB
log_min_duration_statement = 3000
log_connections = on
log_disconnections = on
log_duration = off
log_lock_waits = on
log_statement = 'none'
log_timezone = 'Etc/UTC'
datestyle = 'iso, mdy'
timezone = 'Etc/UTC'
lc_messages = 'en_US.utf8'
lc_monetary = 'en_US.utf8'
lc_numeric = 'en_US.utf8'
lc_time = 'en_US.utf8'
default_text_search_config = 'pg_catalog.english'
# Parallelism settings
max_parallel_workers_per_gather = 2
max_parallel_workers = 4
max_worker_processes = 4
parallel_tuple_cost = 0.05
parallel_setup_cost = 500
min_parallel_table_scan_size = 2MB
min_parallel_index_scan_size = 256kB
session_preload_libraries = 'auto_explain'
auto_explain.log_min_duration = '3s'
# ====================================================================================================
# Variables for osm-seed website
# ====================================================================================================
web:
enabled: true
replicaCount: 2
# Set staticIp, if you are using cloudProvider=gcp
staticIp: c
serviceAnnotations:
service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "300"
ingressDomain: www.openhistoricalmap.org
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: web_large
env:
MAILER_ADDRESS: {{MAILER_ADDRESS}}
MAILER_DOMAIN: openhistoricalmap.org
MAILER_USERNAME: {{MAILER_USERNAME}}
MAILER_PASSWORD: {{MAILER_PASSWORD}}
OSM_id_key: {{PRODUCTION_ID_APPLICATION}}
OAUTH_CLIENT_ID: {{PRODUCTION_OAUTH_CLIENT_ID}}
OAUTH_KEY: {{PRODUCTION_OAUTH_KEY}}
MAILER_FROM: [email protected]
NOMINATIM_URL: nominatim-api.openhistoricalmap.org
OVERPASS_URL: overpass-api.openhistoricalmap.org
NEW_RELIC_LICENSE_KEY: 'none'
NEW_RELIC_APP_NAME: 'none'
ORGANIZATION_NAME: OpenHistoricalMap
WEBSITE_STATUS: 'online'
resources:
enabled: true
requests:
memory: "2Gi"
cpu: "1000m"
limits:
memory: "3Gi"
cpu: "1500m"
autoscaling:
enabled: true
minReplicas: 2
maxReplicas: 6
cpuUtilization: 80
sharedMemorySize: 512Mi
livenessProbeExec: true
# ====================================================================================================
# Variables for memcached. Memcached is used to store session cookies
# ====================================================================================================
memcached:
enabled: true
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: web_large
resources:
enabled: false
requests:
memory: "8Gi"
cpu: "2"
limits:
memory: "8Gi"
cpu: "2"
# ====================================================================================================
# Variables for osm-seed for osmosis, this configuration os to get the planet dump files from apidb
# ====================================================================================================
planetDump:
enabled: true
schedule: '0 0 * * *'
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: job
env:
OVERWRITE_PLANET_FILE: false
resources:
enabled: false
requests:
memory: "4Gi"
cpu: "2"
limits:
memory: "8Gi"
cpu: "4"
# ====================================================================================================
# Variables for full-history container
# ====================================================================================================
fullHistory:
enabled: true
schedule: '0 0 * * 0'
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: job
env:
OVERWRITE_FHISTORY_FILE: false
resources:
enabled: false
requests:
memory: "4Gi"
cpu: "2"
limits:
memory: "8Gi"
cpu: "4"
# ====================================================================================================
# Variables for replication-job, Configuration to create the replication files by, minute, hour, or day
# ====================================================================================================
replicationJob:
enabled: true
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: web_large
env:
ENABLE_SEND_SLACK_MESSAGE: "true"
SLACK_WEBHOOK_URL: {{OHM_SLACK_WEBHOOK_URL}}
resources:
enabled: false
requests:
memory: "20Gi"
cpu: "8"
limits:
memory: "24Gi"
cpu: "10"
# ====================================================================================================
# Variables for osm-seed to pupulate the apidb
# ====================================================================================================
populateApidb:
enabled: false
env:
URL_FILE_TO_IMPORT: 'https://storage.googleapis.com/osm-seed/osm-processor/history-latest-to-import-output.pbf'
resources:
enabled: false
requests:
memory: "1Gi"
cpu: "2"
limits:
memory: "2Gi"
cpu: "2.5"
# ====================================================================================================
# Variables to start a pod to process osm files
# ====================================================================================================
osmProcessor:
enabled: false
env:
URL_FILE_TO_PROCESS: 'https://storage.googleapis.com/osm-seed/planet/full-history/history-latest-to-import.pbf'
OSM_FILE_ACTION: simple_pbf
resources:
enabled: false
requests:
memory: "14Gi"
cpu: "4"
limits:
memory: "16Gi"
cpu: "4"
# ====================================================================================================
# Variables for restoring the DB
# ====================================================================================================
dbBackupRestore:
cronjobs:
- name: web-db
enabled: true
schedule: '0 0 * * *'
env:
# backup/restore
DB_ACTION: backup
# Naming backup files
SET_DATE_AT_NAME: true
BACKUP_CLOUD_FOLDER: database/web-api-db
BACKUP_CLOUD_FILE: ohm-api-web-db
AWS_S3_BUCKET: {{PRODUCTION_DB_BACKUP_S3_BUCKET}}
# Clean up backups options
CLEANUP_BACKUPS: true
RETENTION_DAYS: '30'
resources:
enabled: false
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: job
- name: tm-db
enabled: true
schedule: '0 1 * * *'
env:
# backup/restore
DB_ACTION: backup
# Naming backup files
SET_DATE_AT_NAME: true
BACKUP_CLOUD_FOLDER: database/tm-db
BACKUP_CLOUD_FILE: ohm-tm-db
AWS_S3_BUCKET: {{PRODUCTION_DB_BACKUP_S3_BUCKET}}
resources:
enabled: false
requests:
memory: '300Mi'
cpu: '0.5'
limits:
memory: '400Mi'
cpu: '0.6'
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: job
- name: osmcha-db
enabled: false
schedule: '0 0 * * *'
env:
# backup/restore
DB_ACTION: backup
# Naming backup files
SET_DATE_AT_NAME: 'true'
BACKUP_CLOUD_FOLDER: database/osmcha-db
BACKUP_CLOUD_FILE: osmseed-osmcha-db
AWS_S3_BUCKET: {{PRODUCTION_DB_BACKUP_S3_BUCKET}}
# Clean up backups options
CLEANUP_BACKUPS: true
RETENTION_DAYS: '30'
resources:
enabled: false
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: job
# ====================================================================================================
# Variables for tiler-db
# ====================================================================================================
tilerDb:
enabled: true
useExternalHost: # When we are using useExternalHost.enabled= true other variables are giong to be disable ans use the external host config
enabled: true
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: api_db
env:
POSTGRES_HOST: {{PRODUCTION_TILER_DB_HOST}}
POSTGRES_DB: tiler_osm_prod_v4
POSTGRES_USER: postgres
POSTGRES_PASSWORD: {{PRODUCTION_TILER_DB_PASSWORD}}
POSTGRES_PORT: 5432
sharedMemorySize: 2Gi
persistenceDisk:
enabled: true
accessMode: ReadWriteOnce
mountPath: /var/lib/postgresql/data
subPath: postgresql-d
# In case cloudProvider: aws
AWS_ElasticBlockStore_volumeID: vol-07b5a7a8e85a6caee
AWS_ElasticBlockStore_size: 200Gi
resources:
enabled: true
requests:
memory: "29Gi"
cpu: "7500m"
limits:
memory: "29Gi"
cpu: "7600m"
postgresqlConfig:
enabled: true
values: |
listen_addresses = '*'
max_connections = 100
shared_buffers = 10GB
work_mem = 256MB
maintenance_work_mem = 2GB
dynamic_shared_memory_type = posix
effective_io_concurrency = 300
max_wal_size = 4GB
min_wal_size = 512MB
random_page_cost = 1.0
effective_cache_size = 24GB
log_min_duration_statement = 15000
log_connections = on
log_disconnections = on
log_duration = off
log_lock_waits = on
log_statement = 'none'
log_timezone = 'Etc/UTC'
datestyle = 'iso, mdy'
timezone = 'Etc/UTC'
lc_messages = 'en_US.utf8'
lc_monetary = 'en_US.utf8'
lc_numeric = 'en_US.utf8'
lc_time = 'en_US.utf8'
default_text_search_config = 'pg_catalog.english'
# Parallelism settings
max_parallel_workers_per_gather = 8
max_parallel_workers = 16
max_worker_processes = 16
parallel_tuple_cost = 0.05
parallel_setup_cost = 500
min_parallel_table_scan_size = 8MB
min_parallel_index_scan_size = 512kB
# Enable auto_explain and pg_stat_statements
shared_preload_libraries = 'auto_explain'
auto_explain.log_min_duration = '10s'
# Timeout settings
tcp_keepalives_idle = 300
tcp_keepalives_interval = 60
tcp_keepalives_count = 10
# Disable join options for routes
enable_mergejoin = true
enable_hashjoin = true
# Timeout settings for queries
statement_timeout = '600s'
lock_timeout = '60s'
idle_in_transaction_session_timeout = '300s'
# pg_stat_statements settings
pg_stat_statements.max = 15000
pg_stat_statements.track = none
# ====================================================================================================
# Variables for tiler-imposm
# ====================================================================================================
tilerImposm:
enabled: true
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: web_large
env:
TILER_IMPORT_FROM: osm
TILER_IMPORT_PBF_URL: https://s3.amazonaws.com/planet.openhistoricalmap.org/planet/planet-241214_1202.osm.pbf
REPLICATION_URL: http://s3.amazonaws.com/planet.openhistoricalmap.org/replication/minute/
SEQUENCE_NUMBER: '1677612'
OVERWRITE_STATE: false
UPLOAD_EXPIRED_FILES: true
IMPORT_NATURAL_EARTH: true
IMPORT_OSM_LAND: true
persistenceDisk:
enabled: false
accessMode: ReadWriteOnce
mountPath: /mnt/data
# In case cloudProvider: aws
AWS_ElasticBlockStore_volumeID: vol-05d06ac388569461f
AWS_ElasticBlockStore_size: 50Gi
resources:
enabled: true
requests:
memory: "2Gi"
cpu: "1"
limits:
memory: "2Gi"
cpu: "1"
# ====================================================================================================
# Variables for tiler-server
# ====================================================================================================
tilerServer:
enabled: true
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: web_large
replicaCount: 1
commad: './start.sh'
serviceAnnotations:
service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "300"
ingressDomain: vtiles.openhistoricalmap.org
env:
TILER_SERVER_PORT: 9090
TILER_CACHE_TYPE: s3
TILER_CACHE_BASEPATH: /mnt/data
TILER_CACHE_MAX_ZOOM: 22
# in case s3
TILER_CACHE_REGION: us-east-1
TILER_CACHE_BUCKET: tiler-cache-production
TILER_CACHE_AWS_ACCESS_KEY_ID: {{PRODUCTION_TILER_CACHE_AWS_ACCESS_KEY_ID}}
TILER_CACHE_AWS_SECRET_ACCESS_KEY: {{PRODUCTION_TILER_CACHE_AWS_SECRET_ACCESS_KEY}}
EXECUTE_REINDEX: false
EXECUTE_VACUUM_ANALYZE: false
# In case you use TILER_CACHE_TYPE: file with persistenceDisk
persistenceDisk:
enabled: false
accessMode: ReadWriteOnce
mountPath: /mnt/data
# In case cloudProvider: aws
# AWS_ElasticBlockStore_volumeID : {{PRODUCTION_TILER_SERVER_EBS}}
# AWS_ElasticBlockStore_size: 100Gi
resources:
enabled: false
requests:
memory: "2Gi"
cpu: "1"
limits:
memory: "4Gi"
cpu: "2"
autoscaling:
enabled: true
minReplicas: 1
maxReplicas: 3
cpuUtilization: 60
# ====================================================================================================
# Variables for tiler-server cache cleaner, only avaliable in case the TILER_CACHE_TYPE = s3
# ====================================================================================================
tilerServerCacheCleaner:
enabled: false
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: web
replicaCount: 1
command: './cache_cleaner.sh'
resources:
enabled: true
requests:
memory: "1Gi"
cpu: "2"
limits:
memory: "2Gi"
cpu: "4"
env:
KILL_PROCESS: manually
MAX_NUM_PS: 5
PROCESS_NAME: tegola
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 1
cpuUtilization: 90
# ====================================================================================================
# Variables for Tasking Manager DB
# ====================================================================================================
tmDb:
enabled: true
image:
name: "postgis/postgis"
tag: "11-2.5"
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: web_large
env:
POSTGRES_DB: tm
POSTGRES_PASSWORD: {{PRODUCTION_TM_DB_PASSWORD}}
POSTGRES_USER: postgres
persistenceDisk:
enabled: true
accessMode: ReadWriteOnce
mountPath: /var/lib/postgresql/data
subPath: postgresql-d
AWS_ElasticBlockStore_volumeID: vol-03a2f95687a51a531
AWS_ElasticBlockStore_size: 20Gi
resources:
enabled: false
requests:
memory: "1Gi"
cpu: "2"
limits:
memory: "2Gi"
cpu: "2"
# ====================================================================================================
# Variables for Tasking Manager API
# ====================================================================================================
tmApi:
enabled: true
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: web_large
replicaCount: 2
serviceAnnotations:
service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '300'
ingressDomain: tm-api.openhistoricalmap.org
env:
TM_ORG_NAME: OpenHistoricalMap
TM_ORG_CODE: OHM
TM_ORG_URL: www.openhistoricalmap.org
TM_ORG_PRIVACY_POLICY_URL: www.openhistoricalmap.org/copyright
TM_ORG_GITHUB: github.com/openhistoricalmap
OSM_SERVER_URL: https://www.openhistoricalmap.org
OSM_NOMINATIM_SERVER_URL: https://nominatim-api.openhistoricalmap.org
OSM_REGISTER_URL: https://www.openhistoricalmap.org/user/new
ID_EDITOR_URL: https://www.openhistoricalmap.org/edit?editor=id
POTLATCH2_EDITOR_URL: https://www.openhistoricalmap.org/edit?editor=potlatch2
TM_SECRET: {{PRODUCTION_TM_API_SECRET}}
TM_CONSUMER_KEY: {{PRODUCTION_TM_API_CONSUMER_KEY}}
TM_CONSUMER_SECRET: {{PRODUCTION_TM_API_CONSUMER_SECRET}}
TM_EMAIL_FROM_ADDRESS: [email protected]
TM_EMAIL_CONTACT_ADDRESS: [email protected]
TM_SMTP_HOST: email-smtp.us-east-1.amazonaws.com
TM_SMTP_PORT: 25
TM_SMTP_USER: {{MAILER_USERNAME}}
TM_SMTP_PASSWORD: {{MAILER_PASSWORD}}
TM_DEFAULT_LOCALE: en
TM_APP_API_URL: https://tm-api.openhistoricalmap.org
TM_APP_BASE_URL: https://tasks.openhistoricalmap.org
TM_IMPORT_MAX_FILESIZE: 3000000
TM_MAX_AOI_AREA: 15000
TM_APP_API_VERSION: v2
# The following environment variables are for future versions of TM
TM_CLIENT_ID: {{PRODUCTION_TM_CLIENT_ID}}
TM_CLIENT_SECRET: {{PRODUCTION_TM_CLIENT_SECRET}}
TM_DEFAULT_CHANGESET_COMMENT: production
TM_REDIRECT_URI: https://tm-api.staging.openhistoricalmap.org/authorized
TM_SCOPE: 'read_prefs write_api'
# Add extra info
TM_ORG_FB: https://www.facebook.com/openhistoricalmap
TM_ORG_INSTAGRAM: https://www.openhistoricalmap.org
TM_ORG_TWITTER: https://x.com/OpenHistMap
TM_ORG_YOUTUBE: https://www.youtube.com/playlist?list=PLOi35w6_Hpx_CYdYBUpPeuiJ1djn5-wIx
resources:
enabled: false
requests:
memory: 1Gi
cpu: '2'
limits:
memory: 2Gi
cpu: '2'
# ====================================================================================================
# Variables for nominatim api
# ====================================================================================================
nominatimApi:
enabled: true
serviceAnnotations:
service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "300"
ingressDomain: nominatim-api.openhistoricalmap.org
replicaCount: 1
env:
PBF_URL: http://s3.amazonaws.com/planet.openhistoricalmap.org/planet/planet-230727_1030.osm.pbf
REPLICATION_URL: http://planet.openhistoricalmap.org.s3.amazonaws.com/replication/minute
REPLICATION_UPDATE_INTERVAL: 60
REPLICATION_RECHECK_INTERVAL: 30
FREEZE: false
IMPORT_WIKIPEDIA: false
IMPORT_US_POSTCODES: false
IMPORT_GB_POSTCODES: false
IMPORT_TIGER_ADDRESSES: false
THREADS: 8
NOMINATIM_PASSWORD: {{PRODUCTION_NOMINATIM_PG_PASSWORD}}
PGDATA: /var/lib/postgresql/14/main
NOMINATIM_ADDRESS_LEVEL_CONFIG_URL: https://raw.githubusercontent.com/OpenHistoricalMap/nominatim-ui/master/address-levels.json
UPDATE_MODE: continuous
OSMSEED_WEB_API_DOMAIN: www.openhistoricalmap.org
resources:
enabled: false
requests:
memory: '1Gi'
cpu: '2'
limits:
memory: '2Gi'
cpu: '2'
persistenceDisk:
enabled: true
accessMode: ReadWriteOnce
mountPath: /var/lib/postgresql/14/main
subPath: nominatim-pgdata
# Minikube
localVolumeHostPath: /mnt/nominatim-db-data
localVolumeSize: 20Gi
# AWS
AWS_ElasticBlockStore_volumeID: vol-0c9fb16e2365c4f36
AWS_ElasticBlockStore_size: 100Gi
# GCP
GCP_gcePersistentDisk_pdName: osmseed-disk-nominatim_db-v1
GCP_gcePersistentDisk_size: 50Gi
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: web_large
# ====================================================================================================
# Variables for overpass-api
# ====================================================================================================
overpassApi:
enabled: true
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: web_large
serviceAnnotations:
service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "300"
ingressDomain: overpass-api.openhistoricalmap.org
env:
OVERPASS_META: 'attic'
OVERPASS_MODE: 'init'
OVERPASS_PLANET_URL: https://s3.amazonaws.com/planet.openhistoricalmap.org/planet/planet-240715_0003.osm.pbf
OVERPASS_DIFF_URL: http://s3.amazonaws.com/planet.openhistoricalmap.org/replication/minute
OVERPASS_RULES_LOAD: '10'
OVERPASS_PLANET_PREPROCESS: 'mv /db/planet.osm.bz2 /db/planet.osm.pbf && osmium cat -o /db/planet.osm.bz2 /db/planet.osm.pbf && rm /db/planet.osm.pbf'
OVERPASS_REPLICATION_SEQUENCE_NUMBER: '1484000'
OVERPASS_ALLOW_DUPLICATE_QUERIES: 'yes'
persistenceDisk:
enabled: true
accessMode: ReadWriteOnce
AWS_ElasticBlockStore_volumeID: vol-01d9060df5d14a903
AWS_ElasticBlockStore_size: 100Gi
resources:
enabled: false
requests:
memory: '1Gi'
cpu: '2'
limits:
memory: '2Gi'
cpu: '2'
# ====================================================================================================
# Variables for taginfo
# ====================================================================================================
taginfo:
enabled: true
serviceAnnotations:
service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "300"
ingressDomain: taginfo.openhistoricalmap.org
env:
URL_PLANET_FILE_STATE: https://s3.amazonaws.com/planet.openhistoricalmap.org/planet/state.txt
URL_HISTORY_PLANET_FILE_STATE: https://s3.amazonaws.com/planet.openhistoricalmap.org/planet/full-history/state.txt
URL_PLANET_FILE: 'none'
URL_HISTORY_PLANET_FILE: 'none'
TIME_UPDATE_INTERVAL: 7d
OVERWRITE_CONFIG_URL: https://raw.githubusercontent.com/OpenHistoricalMap/ohm-deploy/main/images/taginfo/taginfo-config-production.json
TAGINFO_PROJECT_REPO: https://github.com/OpenHistoricalMap/taginfo-projects.git
DOWNLOAD_DB: 'languages wiki'
CREATE_DB: 'db projects chronology'
ENVIRONMENT: production
AWS_S3_BUCKET: taginfo
INTERVAL_DOWNLOAD_DATA: 7d
resources:
enabled: false
requests:
memory: '1Gi'
cpu: '2'
limits:
memory: '2Gi'
cpu: '2'
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: web_large
cronjob:
enabled: true
schedule: "0 2 */3 * *"
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: job_xlarge
resources:
enabled: true
requests:
memory: "13Gi"
cpu: "3600m"
limits:
memory: "14Gi"
cpu: "3800m"
# ====================================================================================================
# Variables for osm-simple-metrics
# ====================================================================================================
osmSimpleMetrics:
enabled: true
schedule: '0 2 * * *'
resources:
enabled: false
requests:
memory: '1Gi'
cpu: '2'
limits:
memory: '2Gi'
cpu: '2'
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: web_large
# ====================================================================================================
# Variables for replication nomitoring task
# ====================================================================================================
monitoringReplication:
enabled: true
schedule: '*/30 * * * *'
env:
CREATE_MISSING_FILES: "empty"
REPLICATION_SEQUENCE_NUMBER: "000000"
resources:
enabled: false
requests:
memory: '1Gi'
cpu: '2'
limits:
memory: '2Gi'
cpu: '2'
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: web_large
# ====================================================================================================
# Variables for changeset-replication-job, Configuration to create the replication files by, minute, hour, or day
# ====================================================================================================
changesetReplicationJob:
enabled: true
resources:
enabled: false
requests:
memory: '20Gi'
cpu: '8'
limits:
memory: '24Gi'
cpu: '10'
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: web_large
# ====================================================================================================
# Variables for osmcha web
# ====================================================================================================
osmchaWeb:
enabled: true
# ====================================================================================================
# Variables for osmcha Api
# ====================================================================================================
osmchaApi:
enabled: true
image:
name: "ghcr.io/openhistoricalmap/osmcha-django"
tag: "a1bcea85dc1f7c27566c20bafe7fff7aaa1e38a4"
ingressDomain: osmcha.openhistoricalmap.org
env:
DJANGO_SETTINGS_MODULE: "config.settings.production"
OSMCHA_FRONTEND_VERSION: "v0.86.0-production"
DJANGO_SECRET_KEY: {{PRODUCTION_OSMCHA_DJANGO_SECRET_KEY}}
OAUTH_OSM_KEY: {{PRODUCTION_OSMCHA_API_CONSUMER_KEY}}
OAUTH_OSM_SECRET: {{PRODUCTION_OSMCHA_API_CONSUMER_SECRET}}
DJANGO_SECURE_SSL_REDIRECT: "False"
OSM_SERVER_URL: https://www.openhistoricalmap.org
OAUTH_REDIRECT_URI: https://osmcha.openhistoricalmap.org/oauth-landing.html
OSM_PLANET_BASE_URL: https://s3.amazonaws.com/planet.openhistoricalmap.org/replication/changesets/
## frontend
OSMCHA_URL: https://osmcha.openhistoricalmap.org
OSMCHA_API_URL: www.openhistoricalmap.org
REACT_APP_OSM_URL: https://www.openhistoricalmap.org
REACT_APP_OSM_API: https://www.openhistoricalmap.org/api/0.6
REACT_APP_OVERPASS_BASE: //overpass-api.openhistoricalmap.org/api/interpreter
REACT_APP_ENABLE_REAL_CHANGESETS: 0
REACT_APP_MAPBOX_ACCESS_TOKEN: {{PRODUCTION_OSMCHA_REACT_APP_MAPBOX_ACCESS_TOKEN}}
resources:
enabled: false
requests:
memory: "512Mi"
cpu: "1"
limits:
memory: "512Mi"
cpu: "1"
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: web_large
# ====================================================================================================
# Variables for osmcha DB
# ====================================================================================================
osmchaDb:
enabled: true
image:
name: "developmentseed/osmseed-osmcha-db"
tag: "0.1.0-n767.h0090e97"
env:
POSTGRES_DB: osmcha
POSTGRES_USER: postgres
POSTGRES_PASSWORD: {{PRODUCTION_OSMCHA_PG_PASSWORD}}
resources:
enabled: false
requests:
memory: "20Gi"
cpu: "8"
limits:
memory: "24Gi"
cpu: "10"
persistenceDisk:
enabled: false
accessMode: ReadWriteOnce
mountPath: /var/lib/postgresql/data
AWS_ElasticBlockStore_volumeID: vol-065901d9a34a6fbf9
AWS_ElasticBlockStore_size: 100Gi
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: web_large
# ====================================================================================================
# Planet files server
# ====================================================================================================
planetFiles:
enabled: false
# ====================================================================================================
# Tiles cache SQS processor
# ====================================================================================================
ohm:
tilerCache:
enabled: true
tilerCachePurge:
enabled: true
env:
REGION_NAME: us-east-1
NAMESPACE: default # Namespace to run the job
DOCKER_IMAGE: ghcr.io/openhistoricalmap/tiler-server:0.0.1-0.dev.git.1964.h8703c77 # TODO, this should be automatically updated from tiler server image
SQS_QUEUE_URL: {{PRODUCTION_SQS_QUEUE_URL}}
NODEGROUP_TYPE: web_large # Nodegroup type to run the purge and seed job
# Maximum number of active jobs in high concurrency queue
MAX_ACTIVE_JOBS: 10
DELETE_OLD_JOBS_AGE: 3600 # 1 hours
## Execute purging
EXECUTE_PURGE: true
PURGE_CONCURRENCY: 128
PURGE_MIN_ZOOM: 3
PURGE_MAX_ZOOM: 12 # Purging zoom 13,14,15,16,17,18,19,20 takes hours to complete,we are going to remove direct from s3 the tiles for zoom 19-20
## Execute seeding
EXECUTE_SEED: true
SEED_CONCURRENCY: 128
SEED_MIN_ZOOM: 0
SEED_MAX_ZOOM: 12
## Remove tiles from s3 for zoom levels
ZOOM_LEVELS_TO_DELETE: 13,14,15,16,17,18,19,20
S3_BUCKET_CACHE_TILER: tiler-cache-production
S3_BUCKET_PATH_FILES: mnt/data/osm
resources:
enabled: false
requests:
memory: '20Gi'
cpu: '8'
limits:
memory: '24Gi'
cpu: '10'
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: web_large
# Tiler seed by default is giong to seet tiles from 0-6 zoom level
tilerCacheSeed:
enabled: true
schedule: '0 */3 * * *'
env:
GEOJSON_URL: https://osmseed-dev.s3.us-east-1.amazonaws.com/tiler/usa-eu.geojson
ZOOM_LEVELS: '8,9,10'
CONCURRENCY: 256
S3_BUCKET: osmseed-dev
OUTPUT_FILE: /logs/tiler_benchmark.log
resources:
enabled: false
requests:
memory: '20Gi'
cpu: '8'
limits:
memory: '24Gi'
cpu: '10'
nodeSelector:
enabled: true
label_key: nodegroup_type
label_value: web_large