From bace0ab55077d9f5cd37bd9d6638c4acb16334a8 Mon Sep 17 00:00:00 2001 From: shrugs Date: Mon, 20 Jan 2025 13:09:18 -0600 Subject: [PATCH] Subgraph-compatible Autogenerated API (#26) * wip: custom graphql schema loaded * wip: basic collection queries working * wip: literally encode ids instead of eq conditions * fix: store labelhash in domains * fix: resolvedAddress is a relationship not an address * fix: lowercase resolver address in resolverId * fix: (in)correctly mark root as migrated and include exception in NewResolver to match subgraph behavior * fix: default null array fields for backwards compat * docs: add documentation to graphql.ts * fix: remove unnecesary context injection * fix: coinType, texts array default, makeResolverId arg order * fix: handle dangling makeResolverId refactor * fix: index all resolver-looking events not just NewResolver * docs: add v2 notes, add comment about root node isMigrated * fix: use enum for OrderDirection to match subgraph * feat: implement scalar Entity_orderBy enums * docs: add rest of graphql autogen api spec * docs: update backfill time estimation * feat: host subgraph-compatible api at /subgraph instead of root * fix: handle subgraph case where version is changed but domain doesn't exist * chore: better comment on the other domain expected error * docs: document which api features are explicitly not supported * chore: remove subgraph-api todo from readme * chore: update package name to ensnode * docs: tidy v2 notes, move _nocase to potential todo * fix: use upsert in Resolver#handleVersionChanged * docs: update backfill time note in README * fix: 13 hours for mainnet backfill * deps: ponder to latest * docs: note about CLUSTER * fix: correctly nullify texts and cointypes on version changed * fix: simplify expiryDate logic o * docs: update comments post feedback --------- Co-authored-by: Tomasz Kopacki --- .gitignore | 2 - README.md | 18 +- docs/GRAPHQL.md | 15 + docs/V2.md | 4 + package.json | 11 +- pnpm-lock.yaml | 160 +++++- ponder.schema.ts | 13 +- src/api/graphql.ts | 747 +++++++++++++++++++++++++ src/api/index.ts | 11 +- src/api/middleware.ts | 85 +++ src/handlers/NameWrapper.ts | 5 +- src/handlers/Registrar.ts | 4 +- src/handlers/Registry.ts | 20 +- src/handlers/Resolver.ts | 55 +- src/lib/ids.ts | 4 +- src/plugins/base.eth/ponder.config.ts | 26 +- src/plugins/eth/handlers/Registry.ts | 22 +- src/plugins/eth/handlers/Resolver.ts | 30 - src/plugins/eth/ponder.config.ts | 49 +- src/plugins/linea.eth/ponder.config.ts | 22 +- 20 files changed, 1171 insertions(+), 132 deletions(-) create mode 100644 src/api/graphql.ts create mode 100644 src/api/middleware.ts diff --git a/.gitignore b/.gitignore index bb9753d..f0c7e11 100644 --- a/.gitignore +++ b/.gitignore @@ -16,5 +16,3 @@ yarn-error.log* # Ponder /generated/ /.ponder/ - -checkpoints/ diff --git a/README.md b/README.md index e99bc96..b0287c8 100644 --- a/README.md +++ b/README.md @@ -2,9 +2,9 @@ > a multichain ENS indexer, powered by Ponder -estimated backfill time @ 50rps = 24-36 hours on M1 Macbook (~10x speedup) +estimated mainnet-only backfill time @ <=500rps = **~13 hours** on M1 Macbook (>10x speedup vs subgraph) -### goals +## goals > an optimized, multichain ens indexer that the community loves and integrates @@ -23,26 +23,24 @@ estimated backfill time @ 50rps = 24-36 hours on M1 Macbook (~10x speedup) - (possible) continued backwards compatibility with subgraph - support indexing subset of data, i.e. only domains under parent node -#### next up +## next up +- [ ] `_nocase` case-insensitive where filters + - not used interally but ensjs does technically expose this as an available filter to users - [ ] confirm all the schema relations are configured correctly - [ ] integrate rainbow tables for label healing - load the table dump into pglite (or just postgres) & query synchronously to match existing behavior - https://github.com/graphprotocol/ens-rainbow -- [ ] subgraph graphql implementation within ponder - - [ ] implement subgraph-style pagination api - - [ ] support the well-known queries in `GRAPHQL.md` - - [ ] support collection queries as well, to power `snapshot-eq` - [ ] CI/CD with indexing? - - more recent endlbock for gut checks + - integrate index to recent block (10m?) and validate with `snapshot-eq` b4 passing - [ ] better understand reverse resolution & how that pertains to L2 primary names and impacts the future schema, etc -### notes +## notes - eth registry is ERC721, has many controllers (), no knowledge of pricing — delegated to registrar controllers - eth old registry & new registry migration due to security issue, new then fallback to old, therefore ignore all old evens on domains that have been seen by new registry -### ENSIP Ideas +## ENSIP Ideas - unable to automatically identify subname registries via onchain event, CCIP standard dosn't include any info about data source, so we'll need to encode manually for now - ENSIP - shared interface for subdomain registrars diff --git a/docs/GRAPHQL.md b/docs/GRAPHQL.md index 4b828be..a57a696 100644 --- a/docs/GRAPHQL.md +++ b/docs/GRAPHQL.md @@ -1,5 +1,20 @@ # graphql info/spec +## backwards-compatibility notes + +the following features of the subgraph graphql api are explicitly unsupported, because they are not used by ensjs or ens-app-v3 + +- [fulltext search queries](https://thegraph.com/docs/en/subgraphs/querying/graphql-api/#fulltext-search-queries) +- [1-level-nested Entity `_orderBy` param](https://thegraph.com/docs/en/subgraphs/querying/graphql-api/#example-for-nested-entity-sorting) +- [subgraph `_Meta_` object](https://thegraph.com/docs/en/subgraphs/querying/graphql-api/#subgraph-metadata) (ponder's `_meta` is available) +- [time travel queries](https://thegraph.com/docs/en/subgraphs/querying/graphql-api/#time-travel-queries) +- [_change_block filtering](https://thegraph.com/docs/en/subgraphs/querying/graphql-api/#example-for-block-filtering) + +## notes + +- offset queries slow? `CLUSTER` your table so scans are faster + - `CLUSTER public.domains USING domains_pkey;` + ## goals 1. ponder indexer 1:1 equivalency of results as compared to subgraph diff --git a/docs/V2.md b/docs/V2.md index 0c039ab..8494d3b 100644 --- a/docs/V2.md +++ b/docs/V2.md @@ -41,6 +41,8 @@ the 'empty' domains should be handled more accurately, depending on how importan - removes need to increment during domain creation - new impl likely needs to exclude 'empty' domains (see registry notes for context) +domain createdAt should not update on re-registration, should be original createdAt + various resources use both null and zeroAddress to indicate emptiness, this is horrible and creates numerous checks like [this](https://github.com/ensdomains/ensjs/blob/main/packages/ensjs/src/functions/subgraph/getNamesForAddress.ts#L255) where they check for `!== NULL && !== zeroAddress` wrappedOwnerId should not be materialized onto domain, should just be resolved through wrappedDomain.owner @@ -75,6 +77,8 @@ any resolver that implements the CCIP Read standard will have to have its record in the subgraph implementation, resolver handlers must upsert resolvers because people can set records etc for a node that has not (yet) specified this resolver as active, meaning the create in `Registry:NewResolver` has yet to fire. in the ideal scenario, this lookup is keyed only by `(chainId, address)` and we can use pure updates instead of an upsert +v1: resolvers are discovered by tracking all emissions of events that look like resolver spec events. very inefficient, as a resolver is only relevant to the protocol once it's been set as a resolver. for v2, we could use factory like normal, starting to track events on `Registry#NewResolver` and then backfilling the events (using the same handlers) with ponder's cached viem client. not sure if this is more or less intensive than just parsing every event ever + ### registrar the subgraph implements all of the BaseRegistrar, EthRegistrarController, and EthRegistrarControllerOld logic together diff --git a/package.json b/package.json index 2cda7f4..2c264da 100644 --- a/package.json +++ b/package.json @@ -1,5 +1,5 @@ { - "name": "ens-multichain", + "name": "ensnode", "version": "0.0.1", "private": true, "type": "module", @@ -13,6 +13,15 @@ }, "dependencies": { "@ensdomains/ensjs": "^4.0.2", + "@escape.tech/graphql-armor-max-aliases": "^2.6.0", + "@escape.tech/graphql-armor-max-depth": "^2.4.0", + "@escape.tech/graphql-armor-max-tokens": "^2.5.0", + "change-case": "^5.4.4", + "dataloader": "^2.2.3", + "drizzle-orm": "^0.38.3", + "graphql": "^16.10.0", + "graphql-scalars": "^1.24.0", + "graphql-yoga": "^5.10.9", "hono": "^4.6.14", "ponder": "^0.8.26", "ts-deepmerge": "^7.0.2", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 62c531e..ae22318 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -14,12 +14,39 @@ importers: '@ensdomains/ensjs': specifier: ^4.0.2 version: 4.0.2(typescript@5.7.2)(viem@2.21.57(typescript@5.7.2)) + '@escape.tech/graphql-armor-max-aliases': + specifier: ^2.6.0 + version: 2.6.0 + '@escape.tech/graphql-armor-max-depth': + specifier: ^2.4.0 + version: 2.4.0 + '@escape.tech/graphql-armor-max-tokens': + specifier: ^2.5.0 + version: 2.5.0 + change-case: + specifier: ^5.4.4 + version: 5.4.4 + dataloader: + specifier: ^2.2.3 + version: 2.2.3 + drizzle-orm: + specifier: ^0.38.3 + version: 0.38.3(@electric-sql/pglite@0.2.13)(@opentelemetry/api@1.9.0)(@types/pg@8.11.10)(kysely@0.26.3)(pg@8.13.1)(react@18.3.1) + graphql: + specifier: ^16.10.0 + version: 16.10.0 + graphql-scalars: + specifier: ^1.24.0 + version: 1.24.0(graphql@16.10.0) + graphql-yoga: + specifier: ^5.10.9 + version: 5.10.9(graphql@16.10.0) hono: specifier: ^4.6.14 version: 4.6.14 ponder: specifier: ^0.8.26 - version: 0.8.26(@opentelemetry/api@1.9.0)(@types/node@20.17.10)(@types/pg@8.11.10)(hono@4.6.14)(typescript@5.7.2)(viem@2.21.57(typescript@5.7.2)) + version: 0.8.28(@opentelemetry/api@1.9.0)(@types/node@20.17.10)(@types/pg@8.11.10)(hono@4.6.14)(typescript@5.7.2)(viem@2.21.57(typescript@5.7.2)) ts-deepmerge: specifier: ^7.0.2 version: 7.0.2 @@ -680,6 +707,9 @@ packages: resolution: {integrity: sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w==} engines: {node: ^12.17.0 || ^14.13 || >=16.0.0} + change-case@5.4.4: + resolution: {integrity: sha512-HRQyTk2/YPEkt9TnUPbOpr64Uw3KOicFWPVBb+xiHvd6eBx/qPr9xqfBFDT8P2vWsvvz4jbEkfDe71W3VyNu2w==} + chokidar@3.6.0: resolution: {integrity: sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==} engines: {node: '>= 8.10.0'} @@ -895,6 +925,98 @@ packages: sqlite3: optional: true + drizzle-orm@0.38.3: + resolution: {integrity: sha512-w41Y+PquMpSff/QDRGdItG0/aWca+/J3Sda9PPGkTxBtjWQvgU1jxlFBXdjog5tYvTu58uvi3PwR1NuCx0KeZg==} + peerDependencies: + '@aws-sdk/client-rds-data': '>=3' + '@cloudflare/workers-types': '>=4' + '@electric-sql/pglite': '>=0.2.0' + '@libsql/client': '>=0.10.0' + '@libsql/client-wasm': '>=0.10.0' + '@neondatabase/serverless': '>=0.10.0' + '@op-engineering/op-sqlite': '>=2' + '@opentelemetry/api': ^1.4.1 + '@planetscale/database': '>=1' + '@prisma/client': '*' + '@tidbcloud/serverless': '*' + '@types/better-sqlite3': '*' + '@types/pg': '*' + '@types/react': '>=18' + '@types/sql.js': '*' + '@vercel/postgres': '>=0.8.0' + '@xata.io/client': '*' + better-sqlite3: '>=7' + bun-types: '*' + expo-sqlite: '>=14.0.0' + knex: '*' + kysely: '*' + mysql2: '>=2' + pg: '>=8' + postgres: '>=3' + prisma: '*' + react: '>=18' + sql.js: '>=1' + sqlite3: '>=5' + peerDependenciesMeta: + '@aws-sdk/client-rds-data': + optional: true + '@cloudflare/workers-types': + optional: true + '@electric-sql/pglite': + optional: true + '@libsql/client': + optional: true + '@libsql/client-wasm': + optional: true + '@neondatabase/serverless': + optional: true + '@op-engineering/op-sqlite': + optional: true + '@opentelemetry/api': + optional: true + '@planetscale/database': + optional: true + '@prisma/client': + optional: true + '@tidbcloud/serverless': + optional: true + '@types/better-sqlite3': + optional: true + '@types/pg': + optional: true + '@types/react': + optional: true + '@types/sql.js': + optional: true + '@vercel/postgres': + optional: true + '@xata.io/client': + optional: true + better-sqlite3: + optional: true + bun-types: + optional: true + expo-sqlite: + optional: true + knex: + optional: true + kysely: + optional: true + mysql2: + optional: true + pg: + optional: true + postgres: + optional: true + prisma: + optional: true + react: + optional: true + sql.js: + optional: true + sqlite3: + optional: true + dset@3.1.4: resolution: {integrity: sha512-2QF/g9/zTaPDc3BjNcVTGoBbXBgYfMTTceLaYcFJ/W9kggFUkhxD/hMEeuLKbugyef9SqAx8cpgwlIP/jinUTA==} engines: {node: '>=4'} @@ -1048,8 +1170,14 @@ packages: peerDependencies: graphql: 14 - 16 - graphql-yoga@5.10.8: - resolution: {integrity: sha512-a3qJOd7t/sWp6yQ0n+M/7KmhGRm6ulSeY7WTdyb/pPCZGccPW9iLz4O2k0DPsF50k8VHJLS2VSlnZOeqkR2mOg==} + graphql-scalars@1.24.0: + resolution: {integrity: sha512-olbFN39m0XsHHESACUdd7jWU/lGxMMS1B7NZ8XqpqhKZrjBxzeGYAnQ4Ax//huYds771wb7gCznA+65QDuUa+g==} + engines: {node: '>=10'} + peerDependencies: + graphql: ^0.8.0 || ^0.9.0 || ^0.10.0 || ^0.11.0 || ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0 || ^16.0.0 + + graphql-yoga@5.10.9: + resolution: {integrity: sha512-g/DIVijSLt/ghzJkyaByapVhVIfqziqDNfVH0vNvjEnKpt7iBM/XV0SqdI06YvwSrnlFGheFeG8hevrwthwA+g==} engines: {node: '>=18.0.0'} peerDependencies: graphql: ^15.2.0 || ^16.0.0 @@ -1455,8 +1583,8 @@ packages: resolution: {integrity: sha512-ip4qdzjkAyDDZklUaZkcRFb2iA118H9SgRh8yzTkSQK8HilsOJF7rSY8HoW5+I0M46AZgX/pxbprf2vvzQCE0Q==} hasBin: true - ponder@0.8.26: - resolution: {integrity: sha512-K1fAaJK8eGdAnur+X6TGW/3y7hJc+3HSB5KJMFYiUNReExgFG9kxDT6+La/myv8IHm3SE2BB7BAWAXmaUYy2Cg==} + ponder@0.8.28: + resolution: {integrity: sha512-L5lJzMAUEu10fVImBYVea8r62GTlmAO2CJ59/RizR1dtzgWYVK96vFrq3ZEhBS6mrA2L0huTKIpVzC2jxouXVg==} engines: {node: '>=18.14'} hasBin: true peerDependencies: @@ -2465,6 +2593,8 @@ snapshots: chalk@5.4.1: {} + change-case@5.4.4: {} + chokidar@3.6.0: dependencies: anymatch: 3.1.3 @@ -2595,6 +2725,15 @@ snapshots: pg: 8.13.1 react: 18.3.1 + drizzle-orm@0.38.3(@electric-sql/pglite@0.2.13)(@opentelemetry/api@1.9.0)(@types/pg@8.11.10)(kysely@0.26.3)(pg@8.13.1)(react@18.3.1): + optionalDependencies: + '@electric-sql/pglite': 0.2.13 + '@opentelemetry/api': 1.9.0 + '@types/pg': 8.11.10 + kysely: 0.26.3 + pg: 8.13.1 + react: 18.3.1 + dset@3.1.4: {} eastasianwidth@0.2.0: {} @@ -2779,7 +2918,12 @@ snapshots: transitivePeerDependencies: - encoding - graphql-yoga@5.10.8(graphql@16.10.0): + graphql-scalars@1.24.0(graphql@16.10.0): + dependencies: + graphql: 16.10.0 + tslib: 2.8.1 + + graphql-yoga@5.10.9(graphql@16.10.0): dependencies: '@envelop/core': 5.0.2 '@graphql-tools/executor': 1.3.10(graphql@16.10.0) @@ -3164,7 +3308,7 @@ snapshots: sonic-boom: 3.8.1 thread-stream: 2.7.0 - ponder@0.8.26(@opentelemetry/api@1.9.0)(@types/node@20.17.10)(@types/pg@8.11.10)(hono@4.6.14)(typescript@5.7.2)(viem@2.21.57(typescript@5.7.2)): + ponder@0.8.28(@opentelemetry/api@1.9.0)(@types/node@20.17.10)(@types/pg@8.11.10)(hono@4.6.14)(typescript@5.7.2)(viem@2.21.57(typescript@5.7.2)): dependencies: '@babel/code-frame': 7.26.2 '@commander-js/extra-typings': 12.1.0(commander@12.1.0) @@ -3183,7 +3327,7 @@ snapshots: drizzle-orm: 0.36.4(@electric-sql/pglite@0.2.13)(@opentelemetry/api@1.9.0)(@types/pg@8.11.10)(kysely@0.26.3)(pg@8.13.1)(react@18.3.1) glob: 10.4.5 graphql: 16.10.0 - graphql-yoga: 5.10.8(graphql@16.10.0) + graphql-yoga: 5.10.9(graphql@16.10.0) hono: 4.6.14 http-terminator: 3.2.0 ink: 4.4.1(react@18.3.1) diff --git a/ponder.schema.ts b/ponder.schema.ts index 87b8935..e38f2d0 100644 --- a/ponder.schema.ts +++ b/ponder.schema.ts @@ -17,7 +17,7 @@ export const domain = onchainTable("domains", (t) => ({ subdomainCount: t.integer("subdomain_count").notNull().default(0), // Address logged from current resolver, if any - resolvedAddress: t.hex("resolved_address"), + resolvedAddressId: t.hex("resolved_address_id"), // The resolver that controls the domain's settings resolverId: t.text(), @@ -45,7 +45,10 @@ export const domain = onchainTable("domains", (t) => ({ })); export const domainRelations = relations(domain, ({ one, many }) => ({ - // has one owner + resolvedAddress: one(account, { + fields: [domain.resolvedAddressId], + references: [account.id], + }), owner: one(account, { fields: [domain.ownerId], references: [account.id], @@ -105,9 +108,11 @@ export const resolver = onchainTable("resolvers", (t) => ({ // The content hash for this resolver, in binary format contentHash: t.text("content_hash"), // The set of observed text record keys for this resolver - texts: t.text().array().notNull().default([]), + // NOTE: we avoid .notNull.default([]) to match subgraph behavior + texts: t.text().array(), // The set of observed SLIP-44 coin types for this resolver - coinTypes: t.bigint("coin_types").array().notNull().default([]), + // NOTE: we avoid .notNull.default([]) to match subgraph behavior + coinTypes: t.bigint("coin_types").array(), // TODO: has many events })); diff --git a/src/api/graphql.ts b/src/api/graphql.ts new file mode 100644 index 0000000..68c8f94 --- /dev/null +++ b/src/api/graphql.ts @@ -0,0 +1,747 @@ +/** + * This is an autogenerated graphql schema, initially based on ponder's, designed to mimic + * the subgraph graphql api for queries we've deemed relevant (see docs). + * + * 1. inlines some ponder internal types + * 2. removes ponder's encoded id params in favor of literal ids + * 3. implement subgraph's simpler pagination style with first & skip w/out Page types + * 4. PascalCase entity names + */ + +// here we inline the following types from this original import +// import type { Drizzle, OnchainTable, Schema } from "ponder"; +import type { NodePgDatabase } from "drizzle-orm/node-postgres"; +import type { PgliteDatabase } from "drizzle-orm/pglite"; + +export type Drizzle = + | NodePgDatabase + | PgliteDatabase; + +export type Schema = { [name: string]: unknown }; + +export const onchain = Symbol.for("ponder:onchain"); + +export type OnchainTable< + T extends TableConfig & { + extra: PgTableExtraConfig | undefined; + } = TableConfig & { extra: PgTableExtraConfig | undefined }, +> = PgTable & { + [Key in keyof T["columns"]]: T["columns"][Key]; +} & { [onchain]: true } & { + enableRLS: () => Omit, "enableRLS">; +}; + +import { pascalCase } from "change-case"; +import DataLoader from "dataloader"; +import { + type Column, + Many, + One, + type SQL, + type TableRelationalConfig, + and, + arrayContained, + arrayContains, + asc, + createTableRelationsHelpers, + desc, + eq, + extractTablesRelationalConfig, + getTableColumns, + gt, + gte, + inArray, + is, + like, + lt, + lte, + ne, + not, + notInArray, + notLike, + or, +} from "drizzle-orm"; +import { + type PgEnum, + PgEnumColumn, + PgInteger, + PgSerial, + PgTable, + PgTableExtraConfig, + TableConfig, + isPgEnum, +} from "drizzle-orm/pg-core"; +import { + GraphQLBoolean, + GraphQLEnumType, + type GraphQLEnumValueConfigMap, + type GraphQLFieldConfig, + type GraphQLFieldConfigMap, + GraphQLFloat, + type GraphQLInputFieldConfigMap, + GraphQLInputObjectType, + type GraphQLInputType, + GraphQLInt, + GraphQLList, + GraphQLNonNull, + GraphQLObjectType, + type GraphQLOutputType, + GraphQLScalarType, + GraphQLSchema, + GraphQLString, +} from "graphql"; +import { GraphQLJSON } from "graphql-scalars"; + +type Parent = Record; +type Context = { + getDataLoader: ReturnType; + metadataStore: any; // NOTE: type metadataStore as any for now + drizzle: Drizzle<{ [key: string]: OnchainTable }>; +}; + +type PluralArgs = { + where?: { [key: string]: number | string }; + first?: number; + skip?: number; + orderBy?: string; + orderDirection?: "asc" | "desc"; +}; + +const DEFAULT_LIMIT = 50 as const; +const MAX_LIMIT = 1000 as const; + +const OrderDirectionEnum = new GraphQLEnumType({ + name: "OrderDirection", + values: { + asc: { value: "asc" }, + desc: { value: "desc" }, + }, +}); + +export function buildGraphQLSchema(schema: Schema): GraphQLSchema { + const tablesConfig = extractTablesRelationalConfig(schema, createTableRelationsHelpers); + + const tables = Object.values(tablesConfig.tables) as TableRelationalConfig[]; + + const enums = Object.entries(schema).filter((el): el is [string, PgEnum<[string, ...string[]]>] => + isPgEnum(el[1]), + ); + const enumTypes: Record = {}; + for (const [enumTsName, enumObject] of enums) { + // Note that this is keyed by enumName (the SQL name) because that's what is + // available on the PgEnumColumn type. See `columnToGraphQLCore` for context. + enumTypes[enumObject.enumName] = new GraphQLEnumType({ + name: enumTsName, + values: enumObject.enumValues.reduce( + (acc: Record, cur) => ({ ...acc, [cur]: {} }), + {}, + ), + }); + } + + // construct Entity_orderBy enums + const entityOrderByEnums: Record = {}; + for (const table of tables) { + // Scalar fields + const values = Object.keys(table.columns).reduce( + (acc, columnName) => ({ + ...acc, + [columnName]: { value: columnName }, + }), + {}, + ); + + // TODO: relationships i.e. parent__labelName iff necessary + + entityOrderByEnums[table.tsName] = new GraphQLEnumType({ + name: `${pascalCase(table.tsName)}_orderBy`, + values, + }); + } + + const entityFilterTypes: Record = {}; + for (const table of tables) { + const filterType = new GraphQLInputObjectType({ + name: `${table.tsName}Filter`, + fields: () => { + const filterFields: GraphQLInputFieldConfigMap = { + // Logical operators + AND: { type: new GraphQLList(filterType) }, + OR: { type: new GraphQLList(filterType) }, + }; + + for (const [columnName, column] of Object.entries(table.columns)) { + const type = columnToGraphQLCore(column, enumTypes); + + // List fields => universal, plural + if (type instanceof GraphQLList) { + const baseType = innerType(type); + + conditionSuffixes.universal.forEach((suffix) => { + filterFields[`${columnName}${suffix}`] = { + type: new GraphQLList(baseType), + }; + }); + + conditionSuffixes.plural.forEach((suffix) => { + filterFields[`${columnName}${suffix}`] = { type: baseType }; + }); + } + + // JSON => no filters. + // Boolean => universal and singular only. + // All other scalar => universal, singular, numeric OR string depending on type + if (type instanceof GraphQLScalarType || type instanceof GraphQLEnumType) { + if (type.name === "JSON") continue; + + conditionSuffixes.universal.forEach((suffix) => { + filterFields[`${columnName}${suffix}`] = { + type, + }; + }); + + conditionSuffixes.singular.forEach((suffix) => { + filterFields[`${columnName}${suffix}`] = { + type: new GraphQLList(type), + }; + }); + + if (["String", "ID"].includes(type.name)) { + conditionSuffixes.string.forEach((suffix) => { + filterFields[`${columnName}${suffix}`] = { + type: type, + }; + }); + } + + if (["Int", "Float", "BigInt"].includes(type.name)) { + conditionSuffixes.numeric.forEach((suffix) => { + filterFields[`${columnName}${suffix}`] = { + type: type, + }; + }); + } + } + } + + return filterFields; + }, + }); + entityFilterTypes[table.tsName] = filterType; + } + + const entityTypes: Record> = {}; + const entityPageTypes: Record = {}; + + for (const table of tables) { + entityTypes[table.tsName] = new GraphQLObjectType({ + name: pascalCase(table.tsName), // NOTE: PascalCase to match subgraph + fields: () => { + const fieldConfigMap: GraphQLFieldConfigMap = {}; + + // Scalar fields + for (const [columnName, column] of Object.entries(table.columns)) { + const type = columnToGraphQLCore(column, enumTypes); + fieldConfigMap[columnName] = { + type: column.notNull ? new GraphQLNonNull(type) : type, + }; + } + + // Relations + const relations = Object.entries(table.relations); + for (const [relationName, relation] of relations) { + const referencedTable = tables.find( + (table) => table.dbName === relation.referencedTableName, + ); + if (!referencedTable) + throw new Error( + `Internal error: Referenced table "${relation.referencedTableName}" not found`, + ); + + const referencedEntityType = entityTypes[referencedTable.tsName]; + const referencedEntityPageType = entityPageTypes[referencedTable.tsName]; + const referencedEntityFilterType = entityFilterTypes[referencedTable.tsName]; + if ( + referencedEntityType === undefined || + referencedEntityPageType === undefined || + referencedEntityFilterType === undefined + ) + throw new Error( + `Internal error: Referenced entity types not found for table "${referencedTable.tsName}" `, + ); + + if (is(relation, One)) { + const fields = relation.config?.fields ?? []; + const references = relation.config?.references ?? []; + + if (fields.length !== references.length) { + throw new Error( + "Internal error: Fields and references arrays must be the same length", + ); + } + + fieldConfigMap[relationName] = { + // Note: There is a `relation.isNullable` field here but it appears + // to be internal / incorrect. Until we have support for foriegn + // key constraints, all `one` relations must be nullable. + type: referencedEntityType, + resolve: (parent, _args, context) => { + const loader = context.getDataLoader({ + table: referencedTable, + }); + + const rowFragment: Record = {}; + for (let i = 0; i < references.length; i++) { + const referenceColumn = references[i]!; + const fieldColumn = fields[i]!; + + const fieldColumnTsName = getColumnTsName(fieldColumn); + const referenceColumnTsName = getColumnTsName(referenceColumn); + + rowFragment[referenceColumnTsName] = parent[fieldColumnTsName]; + } + + const encodedId = rowFragment.id as string; + if (!encodedId) return null; + + return loader.load(encodedId); + }, + }; + } else if (is(relation, Many)) { + // Search the relations of the referenced table for the corresponding `one` relation. + // If "relationName" is not provided, use the first `one` relation that references this table. + const oneRelation = Object.values(referencedTable.relations).find( + (relation) => + relation.relationName === relationName || + (is(relation, One) && relation.referencedTableName === table.dbName), + ) as One | undefined; + if (!oneRelation) + throw new Error( + `Internal error: Relation "${relationName}" not found in table "${referencedTable.tsName}"`, + ); + + const fields = oneRelation.config?.fields ?? []; + const references = oneRelation.config?.references ?? []; + + const referencedEntityOrderByType = entityOrderByEnums[referencedTable.tsName]; + if (!referencedEntityOrderByType) + throw new Error(`Entity_orderBy Enum not found for ${referencedTable.tsName}`); + + fieldConfigMap[relationName] = { + type: referencedEntityPageType, + args: { + where: { type: referencedEntityFilterType }, + orderBy: { type: referencedEntityOrderByType }, + orderDirection: { type: OrderDirectionEnum }, + first: { type: GraphQLInt }, + skip: { type: GraphQLInt }, + }, + resolve: (parent, args: PluralArgs, context, info) => { + const relationalConditions = []; + for (let i = 0; i < references.length; i++) { + const column = fields[i]!; + const value = parent[references[i]!.name]; + relationalConditions.push(eq(column, value)); + } + + return executePluralQuery( + referencedTable, + context.drizzle, + args, + relationalConditions, + ); + }, + }; + } else { + throw new Error( + `Internal error: Relation "${relationName}" is unsupported, expected One or Many`, + ); + } + } + + return fieldConfigMap; + }, + }); + + entityPageTypes[table.tsName] = new GraphQLNonNull( + new GraphQLList(new GraphQLNonNull(entityTypes[table.tsName]!)), + ); + } + + const queryFields: Record> = {}; + for (const table of tables) { + const entityType = entityTypes[table.tsName]!; + const entityPageType = entityPageTypes[table.tsName]!; + const entityFilterType = entityFilterTypes[table.tsName]!; + + const singularFieldName = table.tsName.charAt(0).toLowerCase() + table.tsName.slice(1); + const pluralFieldName = `${singularFieldName}s`; + + queryFields[singularFieldName] = { + type: entityType, + // Find the primary key columns and GraphQL core types and include them + // as arguments to the singular query type. + args: Object.fromEntries( + table.primaryKey.map((column) => [ + getColumnTsName(column), + { + type: new GraphQLNonNull(columnToGraphQLCore(column, enumTypes) as GraphQLInputType), + }, + ]), + ), + resolve: async (_parent, args, context) => { + const loader = context.getDataLoader({ table }); + + // The `args` object here should be a valid `where` argument that + // uses the `eq` shorthand for each primary key column. + const encodedId = args.id as string; + + return loader.load(encodedId); + }, + }; + + const entityOrderByType = entityOrderByEnums[table.tsName]; + if (!entityOrderByType) throw new Error(`Entity_orderBy Enum not found for ${table.tsName}`); + + queryFields[pluralFieldName] = { + type: entityPageType, + args: { + where: { type: entityFilterType }, + orderBy: { type: entityOrderByType }, + orderDirection: { type: OrderDirectionEnum }, + first: { type: GraphQLInt }, + skip: { type: GraphQLInt }, + }, + resolve: async (_parent, args: PluralArgs, context, info) => { + return executePluralQuery(table, context.drizzle, args); + }, + }; + } + + queryFields._meta = { + type: GraphQLMeta, + resolve: async (_source, _args, context) => { + const status = await context.metadataStore.getStatus(); + return { status }; + }, + }; + + return new GraphQLSchema({ + // Include these here so they are listed first in the printed schema. + types: [GraphQLJSON, GraphQLBigInt, GraphQLPageInfo, GraphQLMeta], + query: new GraphQLObjectType({ + name: "Query", + fields: queryFields, + }), + }); +} + +const GraphQLPageInfo = new GraphQLObjectType({ + name: "PageInfo", + fields: { + hasNextPage: { type: new GraphQLNonNull(GraphQLBoolean) }, + hasPreviousPage: { type: new GraphQLNonNull(GraphQLBoolean) }, + startCursor: { type: GraphQLString }, + endCursor: { type: GraphQLString }, + }, +}); + +const GraphQLBigInt = new GraphQLScalarType({ + name: "BigInt", + serialize: (value) => String(value), + parseValue: (value) => BigInt(value as any), + parseLiteral: (value) => { + if (value.kind === "StringValue") { + return BigInt(value.value); + } else { + throw new Error( + `Invalid value kind provided for field of type BigInt: ${value.kind}. Expected: StringValue`, + ); + } + }, +}); + +const GraphQLMeta = new GraphQLObjectType({ + name: "Meta", + fields: { status: { type: GraphQLJSON } }, +}); + +const columnToGraphQLCore = ( + column: Column, + enumTypes: Record, +): GraphQLOutputType => { + if (column.columnType === "PgEvmBigint") { + return GraphQLBigInt; + } + + if (column instanceof PgEnumColumn) { + if (column.enum === undefined) { + throw new Error( + `Internal error: Expected enum column "${getColumnTsName(column)}" to have an "enum" property`, + ); + } + const enumType = enumTypes[column.enum.enumName]; + if (enumType === undefined) { + throw new Error( + `Internal error: Expected to find a GraphQL enum named "${column.enum.enumName}"`, + ); + } + + return enumType; + } + + switch (column.dataType) { + case "boolean": + return GraphQLBoolean; + case "json": + return GraphQLJSON; + case "date": + return GraphQLString; + case "string": + return GraphQLString; + case "bigint": + return GraphQLString; + case "number": + return is(column, PgInteger) || is(column, PgSerial) ? GraphQLInt : GraphQLFloat; + case "buffer": + return new GraphQLList(new GraphQLNonNull(GraphQLInt)); + case "array": { + if (column.columnType === "PgVector") { + return new GraphQLList(new GraphQLNonNull(GraphQLFloat)); + } + + if (column.columnType === "PgGeometry") { + return new GraphQLList(new GraphQLNonNull(GraphQLFloat)); + } + + const innerType = columnToGraphQLCore((column as any).baseColumn, enumTypes); + + return new GraphQLList(new GraphQLNonNull(innerType)); + } + default: + throw new Error(`Type ${column.dataType} is not implemented`); + } +}; + +const innerType = (type: GraphQLOutputType): GraphQLScalarType | GraphQLEnumType => { + if (type instanceof GraphQLScalarType || type instanceof GraphQLEnumType) return type; + if (type instanceof GraphQLList || type instanceof GraphQLNonNull) return innerType(type.ofType); + throw new Error(`Type ${type.toString()} is not implemented`); +}; + +async function executePluralQuery( + table: TableRelationalConfig, + drizzle: Drizzle<{ [key: string]: OnchainTable }>, + args: PluralArgs, + extraConditions: (SQL | undefined)[] = [], +) { + const rawTable = drizzle._.fullSchema[table.tsName]; + const baseQuery = drizzle.query[table.tsName]; + if (rawTable === undefined || baseQuery === undefined) + throw new Error(`Internal error: Table "${table.tsName}" not found in RQB`); + + const limit = args.first ?? DEFAULT_LIMIT; + if (limit > MAX_LIMIT) { + throw new Error(`Invalid limit. Got ${limit}, expected <=${MAX_LIMIT}.`); + } + + const skip = args.skip ?? 0; + + const orderBySchema = buildOrderBySchema(table, args); + const orderBy = orderBySchema.map(([columnName, direction]) => { + const column = table.columns[columnName]; + if (column === undefined) { + throw new Error(`Unknown column "${columnName}" used in orderBy argument`); + } + return direction === "asc" ? asc(column) : desc(column); + }); + + const whereConditions = buildWhereConditions(args.where, table.columns); + + const rows = await baseQuery.findMany({ + where: and(...whereConditions, ...extraConditions), + orderBy, + limit, + offset: skip, + }); + + return rows; +} + +const conditionSuffixes = { + universal: ["", "_not"], + singular: ["_in", "_not_in"], + plural: ["_has", "_not_has"], + numeric: ["_gt", "_lt", "_gte", "_lte"], + string: [ + "_contains", + "_not_contains", + "_starts_with", + "_ends_with", + "_not_starts_with", + "_not_ends_with", + ], +} as const; + +const conditionSuffixesByLengthDesc = Object.values(conditionSuffixes) + .flat() + .sort((a, b) => b.length - a.length); + +function buildWhereConditions( + where: Record | undefined, + columns: Record, +): (SQL | undefined)[] { + const conditions: (SQL | undefined)[] = []; + + if (where === undefined) return conditions; + + for (const [whereKey, rawValue] of Object.entries(where)) { + // Handle the `AND` and `OR` operators + if (whereKey === "AND" || whereKey === "OR") { + if (!Array.isArray(rawValue)) { + throw new Error( + `Invalid query: Expected an array for the ${whereKey} operator. Got: ${rawValue}`, + ); + } + + const nestedConditions = rawValue.flatMap((subWhere) => + buildWhereConditions(subWhere, columns), + ); + + if (nestedConditions.length > 0) { + conditions.push(whereKey === "AND" ? and(...nestedConditions) : or(...nestedConditions)); + } + continue; + } + + // Search for a valid filter suffix, traversing the list from longest to shortest + // to avoid ambiguity between cases like `_not_in` and `_in`. + const conditionSuffix = conditionSuffixesByLengthDesc.find((s) => whereKey.endsWith(s)); + if (conditionSuffix === undefined) { + throw new Error(`Invariant violation: Condition suffix not found for where key ${whereKey}`); + } + + // Remove the condition suffix and use the remaining string as the column name. + const columnName = whereKey.slice(0, whereKey.length - conditionSuffix.length); + + // Validate that the column name is present in the table. + const column = columns[columnName]; + if (column === undefined) { + throw new Error(`Invalid query: Where clause contains unknown column ${columnName}`); + } + + switch (conditionSuffix) { + case "": + if (column.columnType === "PgArray") { + conditions.push(and(arrayContains(column, rawValue), arrayContained(column, rawValue))); + } else { + conditions.push(eq(column, rawValue)); + } + break; + case "_not": + if (column.columnType === "PgArray") { + conditions.push( + not(and(arrayContains(column, rawValue), arrayContained(column, rawValue))!), + ); + } else { + conditions.push(ne(column, rawValue)); + } + break; + case "_in": + conditions.push(inArray(column, rawValue)); + break; + case "_not_in": + conditions.push(notInArray(column, rawValue)); + break; + case "_has": + conditions.push(arrayContains(column, [rawValue])); + break; + case "_not_has": + conditions.push(not(arrayContains(column, [rawValue]))); + break; + case "_gt": + conditions.push(gt(column, rawValue)); + break; + case "_lt": + conditions.push(lt(column, rawValue)); + break; + case "_gte": + conditions.push(gte(column, rawValue)); + break; + case "_lte": + conditions.push(lte(column, rawValue)); + break; + case "_contains": + conditions.push(like(column, `%${rawValue}%`)); + break; + case "_not_contains": + conditions.push(notLike(column, `%${rawValue}%`)); + break; + case "_starts_with": + conditions.push(like(column, `${rawValue}%`)); + break; + case "_ends_with": + conditions.push(like(column, `%${rawValue}`)); + break; + case "_not_starts_with": + conditions.push(notLike(column, `${rawValue}%`)); + break; + case "_not_ends_with": + conditions.push(notLike(column, `%${rawValue}`)); + break; + default: + throw new Error(`Invalid Condition Suffix ${conditionSuffix}`); + } + } + + return conditions; +} + +function buildOrderBySchema(table: TableRelationalConfig, args: PluralArgs) { + // If the user-provided order by does not include the ALL of the ID columns, + // add any missing ID columns to the end of the order by clause (asc). + // This ensures a consistent sort order to unblock cursor pagination. + const userDirection = args.orderDirection ?? "asc"; + const userColumns: [string, "asc" | "desc"][] = + args.orderBy !== undefined ? [[args.orderBy, userDirection]] : []; + const pkColumns = table.primaryKey.map((column) => [getColumnTsName(column), userDirection]); + const missingPkColumns = pkColumns.filter( + (pkColumn) => !userColumns.some((userColumn) => userColumn[0] === pkColumn[0]), + ) as [string, "asc" | "desc"][]; + return [...userColumns, ...missingPkColumns]; +} + +export function buildDataLoaderCache({ drizzle }: { drizzle: Drizzle }) { + const dataLoaderMap = new Map | undefined>(); + return ({ table }: { table: TableRelationalConfig }) => { + const baseQuery = (drizzle as Drizzle<{ [key: string]: OnchainTable }>).query[table.tsName]; + if (baseQuery === undefined) + throw new Error(`Internal error: Unknown table "${table.tsName}" in data loader cache`); + + let dataLoader = dataLoaderMap.get(table); + if (dataLoader === undefined) { + dataLoader = new DataLoader( + async (ids) => { + // NOTE: use literal ids against id column + const idConditions = ids.map((id) => eq(table.columns["id"]!, id)); + + const rows = await baseQuery.findMany({ + where: or(...idConditions), + limit: ids.length, + }); + + return ids.map((id) => rows.find((row) => row.id === id)); + }, + { maxBatchSize: 1_000 }, + ); + dataLoaderMap.set(table, dataLoader); + } + + return dataLoader; + }; +} + +function getColumnTsName(column: Column) { + const tableColumns = getTableColumns(column.table); + return Object.entries(tableColumns).find(([_, c]) => c.name === column.name)![0]; +} diff --git a/src/api/index.ts b/src/api/index.ts index c1af160..5fcbea8 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -1,5 +1,10 @@ import { ponder } from "ponder:registry"; -import { graphql } from "ponder"; -ponder.use("/graphql", graphql()); -ponder.use("/", graphql()); +import { graphql as ponderGraphQL } from "ponder"; +import { graphql as subgraphGraphQL } from "./middleware"; + +// use ponder middleware at root +ponder.use("/", ponderGraphQL()); + +// use our custom graphql middleware at /subgraph +ponder.use("/subgraph", subgraphGraphQL()); diff --git a/src/api/middleware.ts b/src/api/middleware.ts new file mode 100644 index 0000000..14e3281 --- /dev/null +++ b/src/api/middleware.ts @@ -0,0 +1,85 @@ +/** + * This is ponder's graphql/middleware.ts, copied to fix module realm errors. + * The following changes were made: + * 1. removed ponder's GraphiQL, enabled graphql-yoga's GraphiQL. + * 2. builds our custom schema instead of the one provided in hono context + * https://github.com/ponder-sh/ponder/blob/0a5645ca8dec327b0c21da432ee00810edeb087c/packages/core/src/graphql/middleware.ts + */ + +import { maxAliasesPlugin } from "@escape.tech/graphql-armor-max-aliases"; +import { maxDepthPlugin } from "@escape.tech/graphql-armor-max-depth"; +import { maxTokensPlugin } from "@escape.tech/graphql-armor-max-tokens"; +import { type YogaServerInstance, createYoga } from "graphql-yoga"; +import { createMiddleware } from "hono/factory"; +import { buildDataLoaderCache } from "./graphql"; + +import { default as schema } from "ponder:schema"; + +import { buildGraphQLSchema } from "./graphql"; + +/** + * Middleware for GraphQL with an interactive web view. + * + * - Docs: https://ponder.sh/docs/query/api-functions#register-graphql-middleware + * + * @example + * import { ponder } from "ponder:registry"; + * import { graphql } from "ponder"; + * + * ponder.use("/graphql", graphql()); + * + */ +export const graphql = ( + { + maxOperationTokens = 1000, + maxOperationDepth = 100, + maxOperationAliases = 30, + }: { + maxOperationTokens?: number; + maxOperationDepth?: number; + maxOperationAliases?: number; + } = { + // Default limits are from Apollo: + // https://www.apollographql.com/blog/prevent-graph-misuse-with-operation-size-and-complexity-limit + maxOperationTokens: 1000, + maxOperationDepth: 100, + maxOperationAliases: 30, + }, +) => { + let yoga: YogaServerInstance | undefined = undefined; + + return createMiddleware(async (c) => { + if (yoga === undefined) { + const metadataStore = c.get("metadataStore"); + const graphqlSchema = buildGraphQLSchema(schema); + const drizzle = c.get("db"); + + yoga = createYoga({ + schema: graphqlSchema, + context: () => { + const getDataLoader = buildDataLoaderCache({ drizzle }); + return { drizzle, metadataStore, getDataLoader }; + }, + graphqlEndpoint: c.req.path, + maskedErrors: process.env.NODE_ENV === "production", + logging: false, + graphiql: true, // NOTE: enable graph-yoga's default graphiql + parserAndValidationCache: false, + plugins: [ + maxTokensPlugin({ n: maxOperationTokens }), + maxDepthPlugin({ n: maxOperationDepth, ignoreIntrospection: false }), + maxAliasesPlugin({ n: maxOperationAliases, allowList: [] }), + ], + }); + } + + const response = await yoga.handle(c.req.raw); + // TODO: Figure out why Yoga is returning 500 status codes for GraphQL errors. + // @ts-expect-error + response.status = 200; + // @ts-expect-error + response.statusText = "OK"; + + return response; + }); +}; diff --git a/src/handlers/NameWrapper.ts b/src/handlers/NameWrapper.ts index bd1191a..61b25b6 100644 --- a/src/handlers/NameWrapper.ts +++ b/src/handlers/NameWrapper.ts @@ -127,10 +127,9 @@ export const makeNameWrapperHandlers = (ownedName: `${string}eth`) => { await upsertAccount(context, owner); await context.db.update(schema.domain, { id: node }).set((domain) => ({ - // null expiry date if the domain is not a direct child of .eth // https://github.com/ensdomains/ens-subgraph/blob/master/src/nameWrapper.ts#L123 - ...(domain.expiryDate && domain.parentId !== ownedSubnameNode && { expiryDate: null }), - ownerId: owner, + // null expiry date if the domain is not a direct child of .eth + expiryDate: domain.parentId !== ownedSubnameNode ? null : domain.expiryDate, wrappedOwnerId: null, })); diff --git a/src/handlers/Registrar.ts b/src/handlers/Registrar.ts index eb3057b..b21ff29 100644 --- a/src/handlers/Registrar.ts +++ b/src/handlers/Registrar.ts @@ -18,7 +18,9 @@ export const makeRegistrarHandlers = (ownedName: `${string}eth`) => { const node = makeSubnodeNamehash(ownedSubnameNode, label); const domain = await context.db.find(schema.domain, { id: node }); - if (!domain) throw new Error("domain expected"); + + // encode the runtime assertion here https://github.com/ensdomains/ens-subgraph/blob/master/src/ethRegistrar.ts#L101 + if (!domain) throw new Error("domain expected in setNamePreimage but not found"); if (domain.labelName !== name) { await context.db diff --git a/src/handlers/Registry.ts b/src/handlers/Registry.ts index c1dc146..710a341 100644 --- a/src/handlers/Registry.ts +++ b/src/handlers/Registry.ts @@ -40,7 +40,12 @@ export async function setupRootNode({ context }: { context: Context }) { id: ROOT_NODE, ownerId: zeroAddress, createdAt: 0n, - isMigrated: false, + // NOTE: we initialize the root node as migrated because: + // 1. this matches subgraph's existing behavior, despite the root node not technically being + // migrated until the new registry is deployed and + // 2. other plugins (base, linea) don't have the concept of migration but defaulting to true + // is a reasonable behavior + isMigrated: true, }) // only insert the domain entity into the database if it doesn't already exist .onConflictDoNothing(); @@ -129,6 +134,7 @@ export const handleNewOwner = ownerId: owner, parentId: node, createdAt: event.block.timestamp, + labelhash: event.args.label, isMigrated, }); @@ -189,13 +195,15 @@ export async function handleNewResolver({ // if zeroing out a domain's resolver, remove the reference instead of tracking a zeroAddress Resolver // NOTE: old resolver resources are kept for event logs if (event.args.resolver === zeroAddress) { - await context.db.update(schema.domain, { id: node }).set({ resolverId: null }); + await context.db + .update(schema.domain, { id: node }) + .set({ resolverId: null, resolvedAddressId: null }); // garbage collect newly 'empty' domain iff necessary await recursivelyRemoveEmptyDomainFromParentSubdomainCount(context, node); } else { // otherwise upsert the resolver - const resolverId = makeResolverId(node, resolverAddress); + const resolverId = makeResolverId(resolverAddress, node); const resolver = await context.db .insert(schema.resolver) @@ -204,12 +212,14 @@ export async function handleNewResolver({ domainId: event.args.node, address: event.args.resolver, }) - .onConflictDoNothing(); + .onConflictDoUpdate({}); // noop update to return the existing record // update the domain to point to it, and denormalize the eth addr + // NOTE: this implements the logic as documented here + // https://github.com/ensdomains/ens-subgraph/blob/master/src/ensRegistry.ts#L193 await context.db .update(schema.domain, { id: node }) - .set({ resolverId, resolvedAddress: resolver?.addrId }); + .set({ resolverId, resolvedAddressId: resolver.addrId }); } // TODO: log DomainEvent diff --git a/src/handlers/Resolver.ts b/src/handlers/Resolver.ts index d77167a..bda3eac 100644 --- a/src/handlers/Resolver.ts +++ b/src/handlers/Resolver.ts @@ -6,6 +6,12 @@ import { upsertAccount, upsertResolver } from "../lib/db-helpers"; import { hasNullByte, uniq } from "../lib/helpers"; import { makeResolverId } from "../lib/ids"; +// NOTE: both subgraph and this indexer use upserts in this file because a 'Resolver' is _any_ +// contract on the chain that emits an event with this signature, which may or may not actually be +// a contract intended for use with ENS as a Resolver. because of this, each event could be the +// first event the indexer has seen for this contract (and its Resolver id) and therefore needs not +// assume a Resolver entity already exists + export async function handleAddrChanged({ context, event, @@ -19,7 +25,7 @@ export async function handleAddrChanged({ const { a: address, node } = event.args; await upsertAccount(context, address); - const id = makeResolverId(node, event.log.address); + const id = makeResolverId(event.log.address, node); await upsertResolver(context, { id, domainId: node, @@ -27,10 +33,10 @@ export async function handleAddrChanged({ addrId: address, }); - // materialize the resolved add to the domain iff this resolver is active + // materialize the resolved addr to the domain iff this resolver is active const domain = await context.db.find(schema.domain, { id: node }); if (domain?.resolverId === id) { - await context.db.update(schema.domain, { id: node }).set({ resolvedAddress: address }); + await context.db.update(schema.domain, { id: node }).set({ resolvedAddressId: address }); } // TODO: log ResolverEvent @@ -49,7 +55,7 @@ export async function handleAddressChanged({ const { node, coinType, newAddress } = event.args; await upsertAccount(context, newAddress); - const id = makeResolverId(node, event.log.address); + const id = makeResolverId(event.log.address, node); const resolver = await upsertResolver(context, { id, domainId: node, @@ -59,7 +65,7 @@ export async function handleAddressChanged({ // upsert the new coinType await context.db .update(schema.resolver, { id }) - .set({ coinTypes: uniq([...resolver.coinTypes, coinType]) }); + .set({ coinTypes: uniq([...(resolver.coinTypes ?? []), coinType]) }); // TODO: log ResolverEvent } @@ -77,7 +83,7 @@ export async function handleNameChanged({ const { node, name } = event.args; if (hasNullByte(name)) return; - const id = makeResolverId(node, event.log.address); + const id = makeResolverId(event.log.address, node); await upsertResolver(context, { id, domainId: node, @@ -98,7 +104,7 @@ export async function handleABIChanged({ }; }) { const { node } = event.args; - const id = makeResolverId(node, event.log.address); + const id = makeResolverId(event.log.address, node); const resolver = await upsertResolver(context, { id, domainId: node, @@ -119,7 +125,7 @@ export async function handlePubkeyChanged({ }; }) { const { node } = event.args; - const id = makeResolverId(node, event.log.address); + const id = makeResolverId(event.log.address, node); const resolver = await upsertResolver(context, { id, domainId: node, @@ -140,7 +146,7 @@ export async function handleTextChanged({ }; }) { const { node, key } = event.args; - const id = makeResolverId(node, event.log.address); + const id = makeResolverId(event.log.address, node); const resolver = await upsertResolver(context, { id, domainId: node, @@ -148,7 +154,9 @@ export async function handleTextChanged({ }); // upsert new key - await context.db.update(schema.resolver, { id }).set({ texts: uniq([...resolver.texts, key]) }); + await context.db + .update(schema.resolver, { id }) + .set({ texts: uniq([...(resolver.texts ?? []), key]) }); // TODO: log ResolverEvent } @@ -164,7 +172,7 @@ export async function handleContenthashChanged({ }; }) { const { node, hash } = event.args; - const id = makeResolverId(node, event.log.address); + const id = makeResolverId(event.log.address, node); await upsertResolver(context, { id, domainId: node, @@ -190,7 +198,7 @@ export async function handleInterfaceChanged({ }; }) { const { node } = event.args; - const id = makeResolverId(node, event.log.address); + const id = makeResolverId(event.log.address, node); await upsertResolver(context, { id, domainId: node, @@ -216,7 +224,7 @@ export async function handleAuthorisationChanged({ }; }) { const { node } = event.args; - const id = makeResolverId(node, event.log.address); + const id = makeResolverId(event.log.address, node); await upsertResolver(context, { id, domainId: node, @@ -241,21 +249,24 @@ export async function handleVersionChanged({ }) { // a version change nulls out the resolver const { node } = event.args; - const id = makeResolverId(node, event.log.address); + const id = makeResolverId(event.log.address, node); const domain = await context.db.find(schema.domain, { id: node }); - if (!domain) throw new Error("domain expected"); - // materialize the Domain's resolvedAddress field - if (domain.resolverId === id) { - await context.db.update(schema.domain, { id: node }).set({ resolvedAddress: null }); + // materialize the Domain's resolvedAddress field iff exists and is set to this Resolver + if (domain && domain.resolverId === id) { + await context.db.update(schema.domain, { id: node }).set({ resolvedAddressId: null }); } - // clear out the resolver's info - await context.db.update(schema.resolver, { id }).set({ + await upsertResolver(context, { + id, + domainId: node, + address: event.log.address, + + // clear out the resolver's info addrId: null, contentHash: null, - coinTypes: [], - texts: [], + coinTypes: null, + texts: null, }); // TODO: log ResolverEvent diff --git a/src/lib/ids.ts b/src/lib/ids.ts index 4e21edf..9d9e763 100644 --- a/src/lib/ids.ts +++ b/src/lib/ids.ts @@ -1,7 +1,9 @@ import type { Event } from "ponder:registry"; import type { Address, Hex } from "viem"; -export const makeResolverId = (node: Hex, address: Address) => [address, node].join("-"); +// NOTE: subgraph uses lowercase address here, viem provides us checksummed, so we lowercase it +export const makeResolverId = (address: Address, node: Hex) => + [address.toLowerCase(), node].join("-"); // https://github.com/ensdomains/ens-subgraph/blob/master/src/utils.ts#L5 // produces `blocknumber-logIndex` or `blocknumber-logindex-transferindex` diff --git a/src/plugins/base.eth/ponder.config.ts b/src/plugins/base.eth/ponder.config.ts index aadd96e..50fe3df 100644 --- a/src/plugins/base.eth/ponder.config.ts +++ b/src/plugins/base.eth/ponder.config.ts @@ -1,5 +1,5 @@ import { type ContractConfig, createConfig, factory } from "ponder"; -import { http, getAbiItem } from "viem"; +import { http } from "viem"; import { base } from "viem/chains"; import { blockConfig, rpcEndpointUrl, rpcMaxRequestsPerSecond } from "../../lib/helpers"; @@ -37,12 +37,24 @@ export const config = createConfig({ [pluginNamespace("Resolver")]: { network: "base", abi: L2Resolver, - address: factory({ - address: "0xb94704422c2a1e396835a571837aa5ae53285a95", - event: getAbiItem({ abi: Registry, name: "NewResolver" }), - parameter: "resolver", - }), - ...blockConfig(START_BLOCK, 17575714, END_BLOCK), + // NOTE: this indexes every event ever emitted that looks like this + filter: { + event: [ + "AddrChanged", + "AddressChanged", + "NameChanged", + "ABIChanged", + "PubkeyChanged", + "TextChanged", + "ContenthashChanged", + "InterfaceChanged", + "VersionChanged", + "DNSRecordChanged", + "DNSRecordDeleted", + "DNSZonehashChanged", + ], + }, + ...blockConfig(START_BLOCK, 17571480, END_BLOCK), }, [pluginNamespace("BaseRegistrar")]: { network: "base", diff --git a/src/plugins/eth/handlers/Registry.ts b/src/plugins/eth/handlers/Registry.ts index 39b5b51..9c75cdd 100644 --- a/src/plugins/eth/handlers/Registry.ts +++ b/src/plugins/eth/handlers/Registry.ts @@ -8,7 +8,7 @@ import { handleTransfer, setupRootNode, } from "../../../handlers/Registry"; -import { makeSubnodeNamehash } from "../../../lib/subname-helpers"; +import { ROOT_NODE, makeSubnodeNamehash } from "../../../lib/subname-helpers"; import { pluginNamespace } from "../ponder.config"; // a domain is migrated iff it exists and isMigrated is set to true, otherwise it is not @@ -30,15 +30,13 @@ export default function () { }); ponder.on(pluginNamespace("RegistryOld:NewResolver"), async ({ context, event }) => { - // NOTE: the subgraph makes an exception for the root node here - // but i don't know that that's necessary, as in ponder our root node starts out - // unmigrated and once the NewOwner event is emitted by the new registry, - // the root will be considered migrated - // https://github.com/ensdomains/ens-subgraph/blob/master/src/ensRegistry.ts#L246 - - // otherwise, only handle iff not migrated const isMigrated = await isDomainMigrated(context, event.args.node); - if (isMigrated) return; + const isRootNode = event.args.node === ROOT_NODE; + + // inverted logic of https://github.com/ensdomains/ens-subgraph/blob/master/src/ensRegistry.ts#L246 + // NOTE: the subgraph must include an exception here for the root node because it starts out + // isMigrated: true, but we definitely still want to handle NewResolver events for it. + if (isMigrated && !isRootNode) return; return handleNewResolver({ context, event }); }); @@ -49,6 +47,12 @@ export default function () { }); ponder.on(pluginNamespace("RegistryOld:Transfer"), async ({ context, event }) => { + // NOTE: this logic derived from the subgraph introduces a bug for queries with a blockheight + // below 9380380, when the new Registry was deployed, as it implicitly ignores Transfer events + // of the ROOT_NODE. as a result, the root node's owner is always zeroAddress until the new + // Registry events are picked up. for backwards compatibility this beahvior is re-implemented + // here. + const isMigrated = await isDomainMigrated(context, event.args.node); if (isMigrated) return; return handleTransfer({ context, event }); diff --git a/src/plugins/eth/handlers/Resolver.ts b/src/plugins/eth/handlers/Resolver.ts index e7e0046..8fec2e1 100644 --- a/src/plugins/eth/handlers/Resolver.ts +++ b/src/plugins/eth/handlers/Resolver.ts @@ -17,36 +17,6 @@ import { import { pluginNamespace } from "../ponder.config"; export default function () { - // Old registry handlers - ponder.on(pluginNamespace("OldRegistryResolvers:AddrChanged"), handleAddrChanged); - ponder.on(pluginNamespace("OldRegistryResolvers:AddressChanged"), handleAddressChanged); - ponder.on(pluginNamespace("OldRegistryResolvers:NameChanged"), handleNameChanged); - ponder.on(pluginNamespace("OldRegistryResolvers:ABIChanged"), handleABIChanged); - ponder.on(pluginNamespace("OldRegistryResolvers:PubkeyChanged"), handlePubkeyChanged); - ponder.on( - pluginNamespace( - "OldRegistryResolvers:TextChanged(bytes32 indexed node, string indexed indexedKey, string key)", - ), - handleTextChanged, - ); - ponder.on( - pluginNamespace( - "OldRegistryResolvers:TextChanged(bytes32 indexed node, string indexed indexedKey, string key, string value)", - ), - handleTextChanged, - ); - ponder.on(pluginNamespace("OldRegistryResolvers:ContenthashChanged"), handleContenthashChanged); - ponder.on(pluginNamespace("OldRegistryResolvers:InterfaceChanged"), handleInterfaceChanged); - ponder.on( - pluginNamespace("OldRegistryResolvers:AuthorisationChanged"), - handleAuthorisationChanged, - ); - ponder.on(pluginNamespace("OldRegistryResolvers:VersionChanged"), handleVersionChanged); - ponder.on(pluginNamespace("OldRegistryResolvers:DNSRecordChanged"), handleDNSRecordChanged); - ponder.on(pluginNamespace("OldRegistryResolvers:DNSRecordDeleted"), handleDNSRecordDeleted); - ponder.on(pluginNamespace("OldRegistryResolvers:DNSZonehashChanged"), handleDNSZonehashChanged); - - // New registry handlers ponder.on(pluginNamespace("Resolver:AddrChanged"), handleAddrChanged); ponder.on(pluginNamespace("Resolver:AddressChanged"), handleAddressChanged); ponder.on(pluginNamespace("Resolver:NameChanged"), handleNameChanged); diff --git a/src/plugins/eth/ponder.config.ts b/src/plugins/eth/ponder.config.ts index 98134ce..5904523 100644 --- a/src/plugins/eth/ponder.config.ts +++ b/src/plugins/eth/ponder.config.ts @@ -1,5 +1,5 @@ -import { ContractConfig, createConfig, factory, mergeAbis } from "ponder"; -import { http, getAbiItem } from "viem"; +import { ContractConfig, createConfig, mergeAbis } from "ponder"; +import { http } from "viem"; import { mainnet } from "viem/chains"; import { blockConfig, rpcEndpointUrl, rpcMaxRequestsPerSecond } from "../../lib/helpers"; @@ -21,7 +21,10 @@ export const pluginNamespace = createPluginNamespace(ownedName); // constrain indexing between the following start/end blocks // https://ponder.sh/0_6/docs/contracts-and-networks#block-range const START_BLOCK: ContractConfig["startBlock"] = undefined; -const END_BLOCK: ContractConfig["endBlock"] = undefined; +const END_BLOCK: ContractConfig["endBlock"] = 21_000_000; + +const REGISTRY_OLD_ADDRESS = "0x314159265dd8dbb310642f98f50c066173c1259b"; +const REGISTRY_ADDRESS = "0x00000000000C2E074eC69A0dFb2997BA6C7d2e1e"; export const config = createConfig({ networks: { @@ -35,34 +38,38 @@ export const config = createConfig({ [pluginNamespace("RegistryOld")]: { network: "mainnet", abi: Registry, - address: "0x314159265dd8dbb310642f98f50c066173c1259b", + address: REGISTRY_OLD_ADDRESS, ...blockConfig(START_BLOCK, 3327417, END_BLOCK), }, [pluginNamespace("Registry")]: { network: "mainnet", abi: Registry, - address: "0x00000000000C2E074eC69A0dFb2997BA6C7d2e1e", - ...blockConfig(START_BLOCK, 9380380, END_BLOCK), - }, - [pluginNamespace("OldRegistryResolvers")]: { - network: "mainnet", - abi: RESOLVER_ABI, - address: factory({ - address: "0x314159265dd8dbb310642f98f50c066173c1259b", - event: getAbiItem({ abi: Registry, name: "NewResolver" }), - parameter: "resolver", - }), + address: REGISTRY_ADDRESS, ...blockConfig(START_BLOCK, 9380380, END_BLOCK), }, [pluginNamespace("Resolver")]: { network: "mainnet", abi: RESOLVER_ABI, - address: factory({ - address: "0x00000000000C2E074eC69A0dFb2997BA6C7d2e1e", - event: getAbiItem({ abi: Registry, name: "NewResolver" }), - parameter: "resolver", - }), - ...blockConfig(START_BLOCK, 9380380, END_BLOCK), + // NOTE: this indexes every event ever emitted that looks like this + filter: { + event: [ + "AddrChanged", + "AddressChanged", + "NameChanged", + "ABIChanged", + "PubkeyChanged", + "TextChanged(bytes32 indexed node, string indexed indexedKey, string key)", + "TextChanged(bytes32 indexed node, string indexed indexedKey, string key, string value)", + "ContenthashChanged", + "InterfaceChanged", + "AuthorisationChanged", + "VersionChanged", + "DNSRecordChanged", + "DNSRecordDeleted", + "DNSZonehashChanged", + ], + }, + ...blockConfig(START_BLOCK, 3327417, END_BLOCK), }, [pluginNamespace("BaseRegistrar")]: { network: "mainnet", diff --git a/src/plugins/linea.eth/ponder.config.ts b/src/plugins/linea.eth/ponder.config.ts index 17fdb41..4362400 100644 --- a/src/plugins/linea.eth/ponder.config.ts +++ b/src/plugins/linea.eth/ponder.config.ts @@ -37,11 +37,23 @@ export const config = createConfig({ [pluginNamespace("Resolver")]: { network: "linea", abi: Resolver, - address: factory({ - address: "0x50130b669B28C339991d8676FA73CF122a121267", - event: getAbiItem({ abi: Registry, name: "NewResolver" }), - parameter: "resolver", - }), + // NOTE: this indexes every event ever emitted that looks like this + filter: { + event: [ + "AddrChanged", + "AddressChanged", + "NameChanged", + "ABIChanged", + "PubkeyChanged", + "TextChanged", + "ContenthashChanged", + "InterfaceChanged", + "VersionChanged", + "DNSRecordChanged", + "DNSRecordDeleted", + "DNSZonehashChanged", + ], + }, ...blockConfig(START_BLOCK, 6682888, END_BLOCK), }, [pluginNamespace("BaseRegistrar")]: {