From 6320042c525e12cefd13b814047a5235cdde9a30 Mon Sep 17 00:00:00 2001 From: shrugs Date: Mon, 13 Jan 2025 13:04:45 -0600 Subject: [PATCH 01/27] wip: custom graphql schema loaded --- src/api/graphql.ts | 953 ++++++++++++++++++++++++++++++++++++++++++ src/api/index.ts | 14 +- src/api/middleware.ts | 79 ++++ 3 files changed, 1044 insertions(+), 2 deletions(-) create mode 100644 src/api/graphql.ts create mode 100644 src/api/middleware.ts diff --git a/src/api/graphql.ts b/src/api/graphql.ts new file mode 100644 index 0000000..7a7e275 --- /dev/null +++ b/src/api/graphql.ts @@ -0,0 +1,953 @@ +/** + * This is an autogenerated graphql schema, initially based on ponder's, designed to mimic + * the subgraph graphql api for queries we've deemed relevant (see docs). + */ + +// here we inline the following types from this original import +// import type { Drizzle, OnchainTable, Schema } from "ponder"; +import type { NodePgDatabase } from "drizzle-orm/node-postgres"; +import type { PgliteDatabase } from "drizzle-orm/pglite"; + +export type Drizzle = + | NodePgDatabase + | PgliteDatabase; + +export type Schema = { [name: string]: unknown }; + +export const onchain = Symbol.for("ponder:onchain"); + +export type OnchainTable< + T extends TableConfig & { + extra: PgTableExtraConfig | undefined; + } = TableConfig & { extra: PgTableExtraConfig | undefined }, +> = PgTable & { + [Key in keyof T["columns"]]: T["columns"][Key]; +} & { [onchain]: true } & { + enableRLS: () => Omit, "enableRLS">; +}; + +import DataLoader from "dataloader"; +import { + type Column, + Many, + One, + type SQL, + type TableRelationalConfig, + and, + arrayContained, + arrayContains, + asc, + count, + createTableRelationsHelpers, + desc, + eq, + extractTablesRelationalConfig, + getTableColumns, + gt, + gte, + inArray, + is, + like, + lt, + lte, + ne, + not, + notInArray, + notLike, + or, +} from "drizzle-orm"; +import { + type PgEnum, + PgEnumColumn, + PgInteger, + PgSerial, + PgTable, + PgTableExtraConfig, + TableConfig, + isPgEnum, +} from "drizzle-orm/pg-core"; +import { + GraphQLBoolean, + GraphQLEnumType, + type GraphQLFieldConfig, + type GraphQLFieldConfigMap, + GraphQLFloat, + type GraphQLInputFieldConfigMap, + GraphQLInputObjectType, + type GraphQLInputType, + GraphQLInt, + GraphQLList, + GraphQLNonNull, + GraphQLObjectType, + type GraphQLOutputType, + type GraphQLResolveInfo, + GraphQLScalarType, + GraphQLSchema, + GraphQLString, +} from "graphql"; +import { GraphQLJSON } from "graphql-scalars"; + +type Parent = Record; +type Context = { + getDataLoader: ReturnType; + metadataStore: MetadataStore; + drizzle: Drizzle<{ [key: string]: OnchainTable }>; +}; + +type PluralArgs = { + where?: { [key: string]: number | string }; + after?: string; + before?: string; + limit?: number; + orderBy?: string; + orderDirection?: "asc" | "desc"; +}; + +const DEFAULT_LIMIT = 50 as const; +const MAX_LIMIT = 1000 as const; + +export function buildGraphQLSchema(schema: Schema): GraphQLSchema { + const tablesConfig = extractTablesRelationalConfig(schema, createTableRelationsHelpers); + + const tables = Object.values(tablesConfig.tables) as TableRelationalConfig[]; + + const enums = Object.entries(schema).filter((el): el is [string, PgEnum<[string, ...string[]]>] => + isPgEnum(el[1]), + ); + const enumTypes: Record = {}; + for (const [enumTsName, enumObject] of enums) { + // Note that this is keyed by enumName (the SQL name) because that's what is + // available on the PgEnumColumn type. See `columnToGraphQLCore` for context. + enumTypes[enumObject.enumName] = new GraphQLEnumType({ + name: enumTsName, + values: enumObject.enumValues.reduce( + (acc: Record, cur) => ({ ...acc, [cur]: {} }), + {}, + ), + }); + } + + const entityFilterTypes: Record = {}; + for (const table of tables) { + const filterType = new GraphQLInputObjectType({ + name: `${table.tsName}Filter`, + fields: () => { + const filterFields: GraphQLInputFieldConfigMap = { + // Logical operators + AND: { type: new GraphQLList(filterType) }, + OR: { type: new GraphQLList(filterType) }, + }; + + for (const [columnName, column] of Object.entries(table.columns)) { + const type = columnToGraphQLCore(column, enumTypes); + + // List fields => universal, plural + if (type instanceof GraphQLList) { + const baseType = innerType(type); + + conditionSuffixes.universal.forEach((suffix) => { + filterFields[`${columnName}${suffix}`] = { + type: new GraphQLList(baseType), + }; + }); + + conditionSuffixes.plural.forEach((suffix) => { + filterFields[`${columnName}${suffix}`] = { type: baseType }; + }); + } + + // JSON => no filters. + // Boolean => universal and singular only. + // All other scalar => universal, singular, numeric OR string depending on type + if (type instanceof GraphQLScalarType || type instanceof GraphQLEnumType) { + if (type.name === "JSON") continue; + + conditionSuffixes.universal.forEach((suffix) => { + filterFields[`${columnName}${suffix}`] = { + type, + }; + }); + + conditionSuffixes.singular.forEach((suffix) => { + filterFields[`${columnName}${suffix}`] = { + type: new GraphQLList(type), + }; + }); + + if (["String", "ID"].includes(type.name)) { + conditionSuffixes.string.forEach((suffix) => { + filterFields[`${columnName}${suffix}`] = { + type: type, + }; + }); + } + + if (["Int", "Float", "BigInt"].includes(type.name)) { + conditionSuffixes.numeric.forEach((suffix) => { + filterFields[`${columnName}${suffix}`] = { + type: type, + }; + }); + } + } + } + + return filterFields; + }, + }); + entityFilterTypes[table.tsName] = filterType; + } + + const entityTypes: Record> = {}; + const entityPageTypes: Record = {}; + + for (const table of tables) { + entityTypes[table.tsName] = new GraphQLObjectType({ + name: table.tsName, + fields: () => { + const fieldConfigMap: GraphQLFieldConfigMap = {}; + + // Scalar fields + for (const [columnName, column] of Object.entries(table.columns)) { + const type = columnToGraphQLCore(column, enumTypes); + fieldConfigMap[columnName] = { + type: column.notNull ? new GraphQLNonNull(type) : type, + }; + } + + // Relations + const relations = Object.entries(table.relations); + for (const [relationName, relation] of relations) { + const referencedTable = tables.find( + (table) => table.dbName === relation.referencedTableName, + ); + if (!referencedTable) + throw new Error( + `Internal error: Referenced table "${relation.referencedTableName}" not found`, + ); + + const referencedEntityType = entityTypes[referencedTable.tsName]; + const referencedEntityPageType = entityPageTypes[referencedTable.tsName]; + const referencedEntityFilterType = entityFilterTypes[referencedTable.tsName]; + if ( + referencedEntityType === undefined || + referencedEntityPageType === undefined || + referencedEntityFilterType === undefined + ) + throw new Error( + `Internal error: Referenced entity types not found for table "${referencedTable.tsName}" `, + ); + + if (is(relation, One)) { + const fields = relation.config?.fields ?? []; + const references = relation.config?.references ?? []; + + if (fields.length !== references.length) { + throw new Error( + "Internal error: Fields and references arrays must be the same length", + ); + } + + fieldConfigMap[relationName] = { + // Note: There is a `relation.isNullable` field here but it appears + // to be internal / incorrect. Until we have support for foriegn + // key constraints, all `one` relations must be nullable. + type: referencedEntityType, + resolve: (parent, _args, context) => { + const loader = context.getDataLoader({ + table: referencedTable, + }); + + const rowFragment: Record = {}; + for (let i = 0; i < references.length; i++) { + const referenceColumn = references[i]!; + const fieldColumn = fields[i]!; + + const fieldColumnTsName = getColumnTsName(fieldColumn); + const referenceColumnTsName = getColumnTsName(referenceColumn); + + rowFragment[referenceColumnTsName] = parent[fieldColumnTsName]; + } + const encodedId = encodeRowFragment(rowFragment); + + return loader.load(encodedId); + }, + }; + } else if (is(relation, Many)) { + // Search the relations of the referenced table for the corresponding `one` relation. + // If "relationName" is not provided, use the first `one` relation that references this table. + const oneRelation = Object.values(referencedTable.relations).find( + (relation) => + relation.relationName === relationName || + (is(relation, One) && relation.referencedTableName === table.dbName), + ) as One | undefined; + if (!oneRelation) + throw new Error( + `Internal error: Relation "${relationName}" not found in table "${referencedTable.tsName}"`, + ); + + const fields = oneRelation.config?.fields ?? []; + const references = oneRelation.config?.references ?? []; + + fieldConfigMap[relationName] = { + type: referencedEntityPageType, + args: { + where: { type: referencedEntityFilterType }, + orderBy: { type: GraphQLString }, + orderDirection: { type: GraphQLString }, + before: { type: GraphQLString }, + after: { type: GraphQLString }, + limit: { type: GraphQLInt }, + }, + resolve: (parent, args: PluralArgs, context, info) => { + const relationalConditions = []; + for (let i = 0; i < references.length; i++) { + const column = fields[i]!; + const value = parent[references[i]!.name]; + relationalConditions.push(eq(column, value)); + } + + const includeTotalCount = selectionIncludesField(info, "totalCount"); + + return executePluralQuery( + referencedTable, + context.drizzle, + args, + includeTotalCount, + relationalConditions, + ); + }, + }; + } else { + throw new Error( + `Internal error: Relation "${relationName}" is unsupported, expected One or Many`, + ); + } + } + + return fieldConfigMap; + }, + }); + + entityPageTypes[table.tsName] = new GraphQLObjectType({ + name: `${table.tsName}Page`, + fields: () => ({ + items: { + type: new GraphQLNonNull(new GraphQLList(new GraphQLNonNull(entityTypes[table.tsName]!))), + }, + pageInfo: { type: new GraphQLNonNull(GraphQLPageInfo) }, + totalCount: { type: new GraphQLNonNull(GraphQLInt) }, + }), + }); + } + + const queryFields: Record> = {}; + for (const table of tables) { + const entityType = entityTypes[table.tsName]!; + const entityPageType = entityPageTypes[table.tsName]!; + const entityFilterType = entityFilterTypes[table.tsName]!; + + const singularFieldName = table.tsName.charAt(0).toLowerCase() + table.tsName.slice(1); + const pluralFieldName = `${singularFieldName}s`; + + queryFields[singularFieldName] = { + type: entityType, + // Find the primary key columns and GraphQL core types and include them + // as arguments to the singular query type. + args: Object.fromEntries( + table.primaryKey.map((column) => [ + getColumnTsName(column), + { + type: new GraphQLNonNull(columnToGraphQLCore(column, enumTypes) as GraphQLInputType), + }, + ]), + ), + resolve: async (_parent, args, context) => { + const loader = context.getDataLoader({ table }); + + // The `args` object here should be a valid `where` argument that + // uses the `eq` shorthand for each primary key column. + const encodedId = encodeRowFragment(args); + + return loader.load(encodedId); + }, + }; + + queryFields[pluralFieldName] = { + type: new GraphQLNonNull(entityPageType), + args: { + where: { type: entityFilterType }, + orderBy: { type: GraphQLString }, + orderDirection: { type: GraphQLString }, + before: { type: GraphQLString }, + after: { type: GraphQLString }, + limit: { type: GraphQLInt }, + }, + resolve: async (_parent, args: PluralArgs, context, info) => { + const includeTotalCount = selectionIncludesField(info, "totalCount"); + + return executePluralQuery(table, context.drizzle, args, includeTotalCount); + }, + }; + } + + queryFields._meta = { + type: GraphQLMeta, + resolve: async (_source, _args, context) => { + const status = await context.metadataStore.getStatus(); + return { status }; + }, + }; + + return new GraphQLSchema({ + // Include these here so they are listed first in the printed schema. + types: [GraphQLJSON, GraphQLBigInt, GraphQLPageInfo, GraphQLMeta], + query: new GraphQLObjectType({ + name: "Query", + fields: queryFields, + }), + }); +} + +const GraphQLPageInfo = new GraphQLObjectType({ + name: "PageInfo", + fields: { + hasNextPage: { type: new GraphQLNonNull(GraphQLBoolean) }, + hasPreviousPage: { type: new GraphQLNonNull(GraphQLBoolean) }, + startCursor: { type: GraphQLString }, + endCursor: { type: GraphQLString }, + }, +}); + +const GraphQLBigInt = new GraphQLScalarType({ + name: "BigInt", + serialize: (value) => String(value), + parseValue: (value) => BigInt(value as any), + parseLiteral: (value) => { + if (value.kind === "StringValue") { + return BigInt(value.value); + } else { + throw new Error( + `Invalid value kind provided for field of type BigInt: ${value.kind}. Expected: StringValue`, + ); + } + }, +}); + +const GraphQLMeta = new GraphQLObjectType({ + name: "Meta", + fields: { status: { type: GraphQLJSON } }, +}); + +const columnToGraphQLCore = ( + column: Column, + enumTypes: Record, +): GraphQLOutputType => { + if (column.columnType === "PgEvmBigint") { + return GraphQLBigInt; + } + + if (column instanceof PgEnumColumn) { + if (column.enum === undefined) { + throw new Error( + `Internal error: Expected enum column "${getColumnTsName(column)}" to have an "enum" property`, + ); + } + const enumType = enumTypes[column.enum.enumName]; + if (enumType === undefined) { + throw new Error( + `Internal error: Expected to find a GraphQL enum named "${column.enum.enumName}"`, + ); + } + + return enumType; + } + + switch (column.dataType) { + case "boolean": + return GraphQLBoolean; + case "json": + return GraphQLJSON; + case "date": + return GraphQLString; + case "string": + return GraphQLString; + case "bigint": + return GraphQLString; + case "number": + return is(column, PgInteger) || is(column, PgSerial) ? GraphQLInt : GraphQLFloat; + case "buffer": + return new GraphQLList(new GraphQLNonNull(GraphQLInt)); + case "array": { + if (column.columnType === "PgVector") { + return new GraphQLList(new GraphQLNonNull(GraphQLFloat)); + } + + if (column.columnType === "PgGeometry") { + return new GraphQLList(new GraphQLNonNull(GraphQLFloat)); + } + + const innerType = columnToGraphQLCore((column as any).baseColumn, enumTypes); + + return new GraphQLList(new GraphQLNonNull(innerType)); + } + default: + throw new Error(`Type ${column.dataType} is not implemented`); + } +}; + +const innerType = (type: GraphQLOutputType): GraphQLScalarType | GraphQLEnumType => { + if (type instanceof GraphQLScalarType || type instanceof GraphQLEnumType) return type; + if (type instanceof GraphQLList || type instanceof GraphQLNonNull) return innerType(type.ofType); + throw new Error(`Type ${type.toString()} is not implemented`); +}; + +async function executePluralQuery( + table: TableRelationalConfig, + drizzle: Drizzle<{ [key: string]: OnchainTable }>, + args: PluralArgs, + includeTotalCount: boolean, + extraConditions: (SQL | undefined)[] = [], +) { + const rawTable = drizzle._.fullSchema[table.tsName]; + const baseQuery = drizzle.query[table.tsName]; + if (rawTable === undefined || baseQuery === undefined) + throw new Error(`Internal error: Table "${table.tsName}" not found in RQB`); + + const limit = args.limit ?? DEFAULT_LIMIT; + if (limit > MAX_LIMIT) { + throw new Error(`Invalid limit. Got ${limit}, expected <=${MAX_LIMIT}.`); + } + + const orderBySchema = buildOrderBySchema(table, args); + const orderBy = orderBySchema.map(([columnName, direction]) => { + const column = table.columns[columnName]; + if (column === undefined) { + throw new Error(`Unknown column "${columnName}" used in orderBy argument`); + } + return direction === "asc" ? asc(column) : desc(column); + }); + const orderByReversed = orderBySchema.map(([columnName, direction]) => { + const column = table.columns[columnName]; + if (column === undefined) { + throw new Error(`Unknown column "${columnName}" used in orderBy argument`); + } + return direction === "asc" ? desc(column) : asc(column); + }); + + const whereConditions = buildWhereConditions(args.where, table.columns); + + const after = args.after ?? null; + const before = args.before ?? null; + + if (after !== null && before !== null) { + throw new Error("Cannot specify both before and after cursors."); + } + + let startCursor = null; + let endCursor = null; + let hasPreviousPage = false; + let hasNextPage = false; + + const totalCountPromise = includeTotalCount + ? drizzle + .select({ count: count() }) + .from(rawTable) + .where(and(...whereConditions, ...extraConditions)) + .then((rows) => rows[0]?.count ?? null) + : Promise.resolve(null); + + // Neither cursors are specified, apply the order conditions and execute. + if (after === null && before === null) { + const [rows, totalCount] = await Promise.all([ + baseQuery.findMany({ + where: and(...whereConditions, ...extraConditions), + orderBy, + limit: limit + 1, + }), + totalCountPromise, + ]); + + if (rows.length === limit + 1) { + rows.pop(); + hasNextPage = true; + } + + startCursor = rows.length > 0 ? encodeCursor(orderBySchema, rows[0]!) : null; + endCursor = rows.length > 0 ? encodeCursor(orderBySchema, rows[rows.length - 1]!) : null; + + return { + items: rows, + totalCount, + pageInfo: { hasNextPage, hasPreviousPage, startCursor, endCursor }, + }; + } + + if (after !== null) { + // User specified an 'after' cursor. + const cursorObject = decodeCursor(after); + const cursorCondition = buildCursorCondition(table, orderBySchema, "after", cursorObject); + + const [rows, totalCount] = await Promise.all([ + baseQuery.findMany({ + where: and(...whereConditions, cursorCondition, ...extraConditions), + orderBy, + limit: limit + 2, + }), + totalCountPromise, + ]); + + if (rows.length === 0) { + return { + items: rows, + totalCount, + pageInfo: { hasNextPage, hasPreviousPage, startCursor, endCursor }, + }; + } + + // If the cursor of the first returned record equals the `after` cursor, + // `hasPreviousPage` is true. Remove that record. + if (encodeCursor(orderBySchema, rows[0]!) === after) { + rows.shift(); + hasPreviousPage = true; + } else { + // Otherwise, remove the last record. + rows.pop(); + } + + // Now if the length of the records is still equal to limit + 1, + // there is a next page. + if (rows.length === limit + 1) { + rows.pop(); + hasNextPage = true; + } + + // Now calculate the cursors. + startCursor = rows.length > 0 ? encodeCursor(orderBySchema, rows[0]!) : null; + endCursor = rows.length > 0 ? encodeCursor(orderBySchema, rows[rows.length - 1]!) : null; + + return { + items: rows, + totalCount, + pageInfo: { hasNextPage, hasPreviousPage, startCursor, endCursor }, + }; + } + + // User specified a 'before' cursor. + const cursorObject = decodeCursor(before!); + const cursorCondition = buildCursorCondition(table, orderBySchema, "before", cursorObject); + + // Reverse the order by conditions to get the previous page, + // then reverse the results back to the original order. + const [rows, totalCount] = await Promise.all([ + baseQuery + .findMany({ + where: and(...whereConditions, cursorCondition, ...extraConditions), + orderBy: orderByReversed, + limit: limit + 2, + }) + .then((rows) => rows.reverse()), + totalCountPromise, + ]); + + if (rows.length === 0) { + return { + items: rows, + totalCount, + pageInfo: { hasNextPage, hasPreviousPage, startCursor, endCursor }, + }; + } + + // If the cursor of the last returned record equals the `before` cursor, + // `hasNextPage` is true. Remove that record. + if (encodeCursor(orderBySchema, rows[rows.length - 1]!) === before) { + rows.pop(); + hasNextPage = true; + } else { + // Otherwise, remove the first record. + rows.shift(); + } + + // Now if the length of the records is equal to limit + 1, we know + // there is a previous page. + if (rows.length === limit + 1) { + rows.shift(); + hasPreviousPage = true; + } + + // Now calculate the cursors. + startCursor = rows.length > 0 ? encodeCursor(orderBySchema, rows[0]!) : null; + endCursor = rows.length > 0 ? encodeCursor(orderBySchema, rows[rows.length - 1]!) : null; + + return { + items: rows, + totalCount, + pageInfo: { hasNextPage, hasPreviousPage, startCursor, endCursor }, + }; +} + +const conditionSuffixes = { + universal: ["", "_not"], + singular: ["_in", "_not_in"], + plural: ["_has", "_not_has"], + numeric: ["_gt", "_lt", "_gte", "_lte"], + string: [ + "_contains", + "_not_contains", + "_starts_with", + "_ends_with", + "_not_starts_with", + "_not_ends_with", + ], +} as const; + +const conditionSuffixesByLengthDesc = Object.values(conditionSuffixes) + .flat() + .sort((a, b) => b.length - a.length); + +function buildWhereConditions( + where: Record | undefined, + columns: Record, +): (SQL | undefined)[] { + const conditions: (SQL | undefined)[] = []; + + if (where === undefined) return conditions; + + for (const [whereKey, rawValue] of Object.entries(where)) { + // Handle the `AND` and `OR` operators + if (whereKey === "AND" || whereKey === "OR") { + if (!Array.isArray(rawValue)) { + throw new Error( + `Invalid query: Expected an array for the ${whereKey} operator. Got: ${rawValue}`, + ); + } + + const nestedConditions = rawValue.flatMap((subWhere) => + buildWhereConditions(subWhere, columns), + ); + + if (nestedConditions.length > 0) { + conditions.push(whereKey === "AND" ? and(...nestedConditions) : or(...nestedConditions)); + } + continue; + } + + // Search for a valid filter suffix, traversing the list from longest to shortest + // to avoid ambiguity between cases like `_not_in` and `_in`. + const conditionSuffix = conditionSuffixesByLengthDesc.find((s) => whereKey.endsWith(s)); + if (conditionSuffix === undefined) { + throw new Error(`Invariant violation: Condition suffix not found for where key ${whereKey}`); + } + + // Remove the condition suffix and use the remaining string as the column name. + const columnName = whereKey.slice(0, whereKey.length - conditionSuffix.length); + + // Validate that the column name is present in the table. + const column = columns[columnName]; + if (column === undefined) { + throw new Error(`Invalid query: Where clause contains unknown column ${columnName}`); + } + + switch (conditionSuffix) { + case "": + if (column.columnType === "PgArray") { + conditions.push(and(arrayContains(column, rawValue), arrayContained(column, rawValue))); + } else { + conditions.push(eq(column, rawValue)); + } + break; + case "_not": + if (column.columnType === "PgArray") { + conditions.push( + not(and(arrayContains(column, rawValue), arrayContained(column, rawValue))!), + ); + } else { + conditions.push(ne(column, rawValue)); + } + break; + case "_in": + conditions.push(inArray(column, rawValue)); + break; + case "_not_in": + conditions.push(notInArray(column, rawValue)); + break; + case "_has": + conditions.push(arrayContains(column, [rawValue])); + break; + case "_not_has": + conditions.push(not(arrayContains(column, [rawValue]))); + break; + case "_gt": + conditions.push(gt(column, rawValue)); + break; + case "_lt": + conditions.push(lt(column, rawValue)); + break; + case "_gte": + conditions.push(gte(column, rawValue)); + break; + case "_lte": + conditions.push(lte(column, rawValue)); + break; + case "_contains": + conditions.push(like(column, `%${rawValue}%`)); + break; + case "_not_contains": + conditions.push(notLike(column, `%${rawValue}%`)); + break; + case "_starts_with": + conditions.push(like(column, `${rawValue}%`)); + break; + case "_ends_with": + conditions.push(like(column, `%${rawValue}`)); + break; + case "_not_starts_with": + conditions.push(notLike(column, `${rawValue}%`)); + break; + case "_not_ends_with": + conditions.push(notLike(column, `%${rawValue}`)); + break; + default: + never(conditionSuffix); + } + } + + return conditions; +} + +function buildOrderBySchema(table: TableRelationalConfig, args: PluralArgs) { + // If the user-provided order by does not include the ALL of the ID columns, + // add any missing ID columns to the end of the order by clause (asc). + // This ensures a consistent sort order to unblock cursor pagination. + const userDirection = args.orderDirection ?? "asc"; + const userColumns: [string, "asc" | "desc"][] = + args.orderBy !== undefined ? [[args.orderBy, userDirection]] : []; + const pkColumns = table.primaryKey.map((column) => [getColumnTsName(column), userDirection]); + const missingPkColumns = pkColumns.filter( + (pkColumn) => !userColumns.some((userColumn) => userColumn[0] === pkColumn[0]), + ) as [string, "asc" | "desc"][]; + return [...userColumns, ...missingPkColumns]; +} + +function encodeCursor( + orderBySchema: [string, "asc" | "desc"][], + row: { [k: string]: unknown }, +): string { + const cursorObject = Object.fromEntries( + orderBySchema.map(([columnName, _]) => [columnName, row[columnName]]), + ); + return encodeRowFragment(cursorObject); +} +function decodeCursor(cursor: string): { [k: string]: unknown } { + return decodeRowFragment(cursor); +} + +function encodeRowFragment(rowFragment: { [k: string]: unknown }): string { + return Buffer.from(serialize(rowFragment)).toString("base64"); +} +function decodeRowFragment(encodedRowFragment: string): { + [k: string]: unknown; +} { + return deserialize(Buffer.from(encodedRowFragment, "base64").toString()); +} + +function buildCursorCondition( + table: TableRelationalConfig, + orderBySchema: [string, "asc" | "desc"][], + direction: "after" | "before", + cursorObject: { [k: string]: unknown }, +): SQL | undefined { + const cursorColumns = orderBySchema.map(([columnName, orderDirection]) => { + const column = table.columns[columnName]; + if (column === undefined) + throw new Error(`Unknown column "${columnName}" used in orderBy argument`); + + const value = cursorObject[columnName]; + + let comparator: typeof gt | typeof lt; + let comparatorOrEquals: typeof gte | typeof lte; + if (direction === "after") { + [comparator, comparatorOrEquals] = orderDirection === "asc" ? [gt, gte] : [lt, lte]; + } else { + [comparator, comparatorOrEquals] = orderDirection === "asc" ? [lt, lte] : [gt, gte]; + } + + return { column, value, comparator, comparatorOrEquals }; + }); + + const buildCondition = (index: number): SQL | undefined => { + if (index === cursorColumns.length - 1) { + const { column, value, comparatorOrEquals } = cursorColumns[index]!; + return comparatorOrEquals(column, value); + } + + const currentColumn = cursorColumns[index]!; + const nextCondition = buildCondition(index + 1); + + return or( + currentColumn.comparator(currentColumn.column, currentColumn.value), + and(eq(currentColumn.column, currentColumn.value), nextCondition), + ); + }; + + return buildCondition(0); +} + +export function buildDataLoaderCache({ drizzle }: { drizzle: Drizzle }) { + const dataLoaderMap = new Map | undefined>(); + return ({ table }: { table: TableRelationalConfig }) => { + const baseQuery = (drizzle as Drizzle<{ [key: string]: OnchainTable }>).query[table.tsName]; + if (baseQuery === undefined) + throw new Error(`Internal error: Unknown table "${table.tsName}" in data loader cache`); + + let dataLoader = dataLoaderMap.get(table); + if (dataLoader === undefined) { + dataLoader = new DataLoader( + async (encodedIds) => { + const decodedRowFragments = encodedIds.map(decodeRowFragment); + + // The decoded row fragments should be valid `where` objects + // which use the `eq` object shorthand for each primary key column. + const idConditions = decodedRowFragments.map((decodedRowFragment) => + and(...buildWhereConditions(decodedRowFragment, table.columns)), + ); + + const rows = await baseQuery.findMany({ + where: or(...idConditions), + limit: encodedIds.length, + }); + + return decodedRowFragments.map((decodedRowFragment) => { + return rows.find((row) => + Object.entries(decodedRowFragment).every(([col, val]) => row[col] === val), + ); + }); + }, + { maxBatchSize: 1_000 }, + ); + dataLoaderMap.set(table, dataLoader); + } + + return dataLoader; + }; +} + +function getColumnTsName(column: Column) { + const tableColumns = getTableColumns(column.table); + return Object.entries(tableColumns).find(([_, c]) => c.name === column.name)![0]; +} + +/** + * Returns `true` if the query includes a specific field. + * Does not consider nested selections; only works one "layer" deep. + */ +function selectionIncludesField(info: GraphQLResolveInfo, fieldName: string): boolean { + for (const fieldNode of info.fieldNodes) { + for (const selection of fieldNode.selectionSet?.selections ?? []) { + if (selection.kind === "Field" && selection.name.value === fieldName) { + return true; + } + } + } + return false; +} diff --git a/src/api/index.ts b/src/api/index.ts index c1af160..985c2d9 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -1,5 +1,15 @@ import { ponder } from "ponder:registry"; -import { graphql } from "ponder"; +import { default as schema } from "ponder:schema"; -ponder.use("/graphql", graphql()); +import { createMiddleware } from "hono/factory"; +import { buildGraphQLSchema } from "./graphql"; +import { graphql } from "./middleware"; + +// inject our custom schema into the hono context, to be used in middleware.ts +const overrideGraphqlSchemaMiddleware = createMiddleware(async (c, next) => { + c.set("graphqlSchema", buildGraphQLSchema(schema)); + return await next(); +}); + +ponder.use(overrideGraphqlSchemaMiddleware); ponder.use("/", graphql()); diff --git a/src/api/middleware.ts b/src/api/middleware.ts new file mode 100644 index 0000000..4a15a8e --- /dev/null +++ b/src/api/middleware.ts @@ -0,0 +1,79 @@ +/** + * This is ponder's graphql/middleware.ts, copied to fix module realm errors. + * The only other change is enabling graphql-yoga's GraphiQL. + * https://github.com/ponder-sh/ponder/blob/0a5645ca8dec327b0c21da432ee00810edeb087c/packages/core/src/graphql/middleware.ts + */ + +import { maxAliasesPlugin } from "@escape.tech/graphql-armor-max-aliases"; +import { maxDepthPlugin } from "@escape.tech/graphql-armor-max-depth"; +import { maxTokensPlugin } from "@escape.tech/graphql-armor-max-tokens"; +import { type YogaServerInstance, createYoga } from "graphql-yoga"; +import { createMiddleware } from "hono/factory"; +import { buildDataLoaderCache } from "./graphql"; + +/** + * Middleware for GraphQL with an interactive web view. + * + * - Docs: https://ponder.sh/docs/query/api-functions#register-graphql-middleware + * + * @example + * import { ponder } from "ponder:registry"; + * import { graphql } from "ponder"; + * + * ponder.use("/graphql", graphql()); + * + */ +export const graphql = ( + { + maxOperationTokens = 1000, + maxOperationDepth = 100, + maxOperationAliases = 30, + }: { + maxOperationTokens?: number; + maxOperationDepth?: number; + maxOperationAliases?: number; + } = { + // Default limits are from Apollo: + // https://www.apollographql.com/blog/prevent-graph-misuse-with-operation-size-and-complexity-limit + maxOperationTokens: 1000, + maxOperationDepth: 100, + maxOperationAliases: 30, + }, +) => { + let yoga: YogaServerInstance | undefined = undefined; + + return createMiddleware(async (c) => { + if (yoga === undefined) { + const metadataStore = c.get("metadataStore"); + const graphqlSchema = c.get("graphqlSchema"); + const drizzle = c.get("db"); + + yoga = createYoga({ + schema: graphqlSchema, + context: () => { + const getDataLoader = buildDataLoaderCache({ drizzle }); + return { drizzle, metadataStore, getDataLoader }; + }, + graphqlEndpoint: c.req.path, + maskedErrors: process.env.NODE_ENV === "production", + logging: false, + graphiql: true, // NOTE: enable graph-yoga's default graphiql + parserAndValidationCache: false, + plugins: [ + maxTokensPlugin({ n: maxOperationTokens }), + maxDepthPlugin({ n: maxOperationDepth, ignoreIntrospection: false }), + maxAliasesPlugin({ n: maxOperationAliases, allowList: [] }), + ], + }); + } + + const response = await yoga.handle(c.req.raw); + // TODO: Figure out why Yoga is returning 500 status codes for GraphQL errors. + // @ts-expect-error + response.status = 200; + // @ts-expect-error + response.statusText = "OK"; + + return response; + }); +}; From ba2e4db9a6fc525520c055b24ab24554876322a9 Mon Sep 17 00:00:00 2001 From: shrugs Date: Mon, 13 Jan 2025 13:34:37 -0600 Subject: [PATCH 02/27] wip: basic collection queries working --- package.json | 11 +- pnpm-lock.yaml | 162 ++++++++++++++++++++++++++-- src/api/graphql.ts | 260 +++++---------------------------------------- 3 files changed, 190 insertions(+), 243 deletions(-) diff --git a/package.json b/package.json index b0ddcdf..052b4b7 100644 --- a/package.json +++ b/package.json @@ -13,8 +13,17 @@ }, "dependencies": { "@ensdomains/ensjs": "^4.0.2", + "@escape.tech/graphql-armor-max-aliases": "^2.6.0", + "@escape.tech/graphql-armor-max-depth": "^2.4.0", + "@escape.tech/graphql-armor-max-tokens": "^2.5.0", + "change-case": "^5.4.4", + "dataloader": "^2.2.3", + "drizzle-orm": "^0.38.3", + "graphql": "^16.10.0", + "graphql-scalars": "^1.24.0", + "graphql-yoga": "^5.10.9", "hono": "^4.6.14", - "ponder": "^0.8.17", + "ponder": "^0.8.24", "viem": "^2.21.57" }, "devDependencies": { diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index d291439..9bb18f7 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -14,12 +14,39 @@ importers: '@ensdomains/ensjs': specifier: ^4.0.2 version: 4.0.2(typescript@5.7.2)(viem@2.21.57(typescript@5.7.2)) + '@escape.tech/graphql-armor-max-aliases': + specifier: ^2.6.0 + version: 2.6.0 + '@escape.tech/graphql-armor-max-depth': + specifier: ^2.4.0 + version: 2.4.0 + '@escape.tech/graphql-armor-max-tokens': + specifier: ^2.5.0 + version: 2.5.0 + change-case: + specifier: ^5.4.4 + version: 5.4.4 + dataloader: + specifier: ^2.2.3 + version: 2.2.3 + drizzle-orm: + specifier: ^0.38.3 + version: 0.38.3(@electric-sql/pglite@0.2.13)(@opentelemetry/api@1.9.0)(@types/pg@8.11.10)(kysely@0.26.3)(pg@8.13.1)(react@18.3.1) + graphql: + specifier: ^16.10.0 + version: 16.10.0 + graphql-scalars: + specifier: ^1.24.0 + version: 1.24.0(graphql@16.10.0) + graphql-yoga: + specifier: ^5.10.9 + version: 5.10.9(graphql@16.10.0) hono: specifier: ^4.6.14 version: 4.6.14 ponder: - specifier: ^0.8.17 - version: 0.8.17(@opentelemetry/api@1.9.0)(@types/node@20.17.10)(@types/pg@8.11.10)(hono@4.6.14)(typescript@5.7.2)(viem@2.21.57(typescript@5.7.2)) + specifier: ^0.8.24 + version: 0.8.24(@opentelemetry/api@1.9.0)(@types/node@20.17.10)(@types/pg@8.11.10)(hono@4.6.14)(typescript@5.7.2)(viem@2.21.57(typescript@5.7.2)) viem: specifier: ^2.21.57 version: 2.21.57(typescript@5.7.2) @@ -677,6 +704,9 @@ packages: resolution: {integrity: sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w==} engines: {node: ^12.17.0 || ^14.13 || >=16.0.0} + change-case@5.4.4: + resolution: {integrity: sha512-HRQyTk2/YPEkt9TnUPbOpr64Uw3KOicFWPVBb+xiHvd6eBx/qPr9xqfBFDT8P2vWsvvz4jbEkfDe71W3VyNu2w==} + chokidar@3.6.0: resolution: {integrity: sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==} engines: {node: '>= 8.10.0'} @@ -892,6 +922,98 @@ packages: sqlite3: optional: true + drizzle-orm@0.38.3: + resolution: {integrity: sha512-w41Y+PquMpSff/QDRGdItG0/aWca+/J3Sda9PPGkTxBtjWQvgU1jxlFBXdjog5tYvTu58uvi3PwR1NuCx0KeZg==} + peerDependencies: + '@aws-sdk/client-rds-data': '>=3' + '@cloudflare/workers-types': '>=4' + '@electric-sql/pglite': '>=0.2.0' + '@libsql/client': '>=0.10.0' + '@libsql/client-wasm': '>=0.10.0' + '@neondatabase/serverless': '>=0.10.0' + '@op-engineering/op-sqlite': '>=2' + '@opentelemetry/api': ^1.4.1 + '@planetscale/database': '>=1' + '@prisma/client': '*' + '@tidbcloud/serverless': '*' + '@types/better-sqlite3': '*' + '@types/pg': '*' + '@types/react': '>=18' + '@types/sql.js': '*' + '@vercel/postgres': '>=0.8.0' + '@xata.io/client': '*' + better-sqlite3: '>=7' + bun-types: '*' + expo-sqlite: '>=14.0.0' + knex: '*' + kysely: '*' + mysql2: '>=2' + pg: '>=8' + postgres: '>=3' + prisma: '*' + react: '>=18' + sql.js: '>=1' + sqlite3: '>=5' + peerDependenciesMeta: + '@aws-sdk/client-rds-data': + optional: true + '@cloudflare/workers-types': + optional: true + '@electric-sql/pglite': + optional: true + '@libsql/client': + optional: true + '@libsql/client-wasm': + optional: true + '@neondatabase/serverless': + optional: true + '@op-engineering/op-sqlite': + optional: true + '@opentelemetry/api': + optional: true + '@planetscale/database': + optional: true + '@prisma/client': + optional: true + '@tidbcloud/serverless': + optional: true + '@types/better-sqlite3': + optional: true + '@types/pg': + optional: true + '@types/react': + optional: true + '@types/sql.js': + optional: true + '@vercel/postgres': + optional: true + '@xata.io/client': + optional: true + better-sqlite3: + optional: true + bun-types: + optional: true + expo-sqlite: + optional: true + knex: + optional: true + kysely: + optional: true + mysql2: + optional: true + pg: + optional: true + postgres: + optional: true + prisma: + optional: true + react: + optional: true + sql.js: + optional: true + sqlite3: + optional: true + dset@3.1.4: resolution: {integrity: sha512-2QF/g9/zTaPDc3BjNcVTGoBbXBgYfMTTceLaYcFJ/W9kggFUkhxD/hMEeuLKbugyef9SqAx8cpgwlIP/jinUTA==} engines: {node: '>=4'} @@ -1045,8 +1167,14 @@ packages: peerDependencies: graphql: 14 - 16 - graphql-yoga@5.10.8: - resolution: {integrity: sha512-a3qJOd7t/sWp6yQ0n+M/7KmhGRm6ulSeY7WTdyb/pPCZGccPW9iLz4O2k0DPsF50k8VHJLS2VSlnZOeqkR2mOg==} + graphql-scalars@1.24.0: + resolution: {integrity: sha512-olbFN39m0XsHHESACUdd7jWU/lGxMMS1B7NZ8XqpqhKZrjBxzeGYAnQ4Ax//huYds771wb7gCznA+65QDuUa+g==} + engines: {node: '>=10'} + peerDependencies: + graphql: ^0.8.0 || ^0.9.0 || ^0.10.0 || ^0.11.0 || ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0 || ^16.0.0 + + graphql-yoga@5.10.9: + resolution: {integrity: sha512-g/DIVijSLt/ghzJkyaByapVhVIfqziqDNfVH0vNvjEnKpt7iBM/XV0SqdI06YvwSrnlFGheFeG8hevrwthwA+g==} engines: {node: '>=18.0.0'} peerDependencies: graphql: ^15.2.0 || ^16.0.0 @@ -1452,8 +1580,8 @@ packages: resolution: {integrity: sha512-ip4qdzjkAyDDZklUaZkcRFb2iA118H9SgRh8yzTkSQK8HilsOJF7rSY8HoW5+I0M46AZgX/pxbprf2vvzQCE0Q==} hasBin: true - ponder@0.8.17: - resolution: {integrity: sha512-p0gvs0CJpdJ6sf5OOQYXaIfmIeUVoTMkCbPVAJ1jK1O2m62ZnTlxpnGrPp5ZAWYxdlCSQQCpZpNhdsYGejGK+g==} + ponder@0.8.24: + resolution: {integrity: sha512-WMj9FmlY+A2Wb07rHbhekai9Z/JsCFz31+7+Zfjg5I933LbV3FeWYy/q277A4h7ai9o/yrVBfkL8kbUmO40Y7g==} engines: {node: '>=18.14'} hasBin: true peerDependencies: @@ -2458,6 +2586,8 @@ snapshots: chalk@5.4.1: {} + change-case@5.4.4: {} + chokidar@3.6.0: dependencies: anymatch: 3.1.3 @@ -2588,6 +2718,15 @@ snapshots: pg: 8.13.1 react: 18.3.1 + drizzle-orm@0.38.3(@electric-sql/pglite@0.2.13)(@opentelemetry/api@1.9.0)(@types/pg@8.11.10)(kysely@0.26.3)(pg@8.13.1)(react@18.3.1): + optionalDependencies: + '@electric-sql/pglite': 0.2.13 + '@opentelemetry/api': 1.9.0 + '@types/pg': 8.11.10 + kysely: 0.26.3 + pg: 8.13.1 + react: 18.3.1 + dset@3.1.4: {} eastasianwidth@0.2.0: {} @@ -2772,7 +2911,12 @@ snapshots: transitivePeerDependencies: - encoding - graphql-yoga@5.10.8(graphql@16.10.0): + graphql-scalars@1.24.0(graphql@16.10.0): + dependencies: + graphql: 16.10.0 + tslib: 2.8.1 + + graphql-yoga@5.10.9(graphql@16.10.0): dependencies: '@envelop/core': 5.0.2 '@graphql-tools/executor': 1.3.10(graphql@16.10.0) @@ -3157,7 +3301,7 @@ snapshots: sonic-boom: 3.8.1 thread-stream: 2.7.0 - ponder@0.8.17(@opentelemetry/api@1.9.0)(@types/node@20.17.10)(@types/pg@8.11.10)(hono@4.6.14)(typescript@5.7.2)(viem@2.21.57(typescript@5.7.2)): + ponder@0.8.24(@opentelemetry/api@1.9.0)(@types/node@20.17.10)(@types/pg@8.11.10)(hono@4.6.14)(typescript@5.7.2)(viem@2.21.57(typescript@5.7.2)): dependencies: '@babel/code-frame': 7.26.2 '@commander-js/extra-typings': 12.1.0(commander@12.1.0) @@ -3176,7 +3320,7 @@ snapshots: drizzle-orm: 0.36.4(@electric-sql/pglite@0.2.13)(@opentelemetry/api@1.9.0)(@types/pg@8.11.10)(kysely@0.26.3)(pg@8.13.1)(react@18.3.1) glob: 10.4.5 graphql: 16.10.0 - graphql-yoga: 5.10.8(graphql@16.10.0) + graphql-yoga: 5.10.9(graphql@16.10.0) hono: 4.6.14 http-terminator: 3.2.0 ink: 4.4.1(react@18.3.1) diff --git a/src/api/graphql.ts b/src/api/graphql.ts index 7a7e275..5a20ab4 100644 --- a/src/api/graphql.ts +++ b/src/api/graphql.ts @@ -26,6 +26,7 @@ export type OnchainTable< enableRLS: () => Omit, "enableRLS">; }; +import { pascalCase } from "change-case"; import DataLoader from "dataloader"; import { type Column, @@ -90,15 +91,14 @@ import { GraphQLJSON } from "graphql-scalars"; type Parent = Record; type Context = { getDataLoader: ReturnType; - metadataStore: MetadataStore; + metadataStore: any; // NOTE: type metadataStore as any for now drizzle: Drizzle<{ [key: string]: OnchainTable }>; }; type PluralArgs = { where?: { [key: string]: number | string }; - after?: string; - before?: string; - limit?: number; + first?: number; + skip?: number; orderBy?: string; orderDirection?: "asc" | "desc"; }; @@ -199,11 +199,11 @@ export function buildGraphQLSchema(schema: Schema): GraphQLSchema { } const entityTypes: Record> = {}; - const entityPageTypes: Record = {}; + const entityPageTypes: Record = {}; for (const table of tables) { entityTypes[table.tsName] = new GraphQLObjectType({ - name: table.tsName, + name: pascalCase(table.tsName), // NOTE: PascalCase to match subgraph fields: () => { const fieldConfigMap: GraphQLFieldConfigMap = {}; @@ -295,9 +295,8 @@ export function buildGraphQLSchema(schema: Schema): GraphQLSchema { where: { type: referencedEntityFilterType }, orderBy: { type: GraphQLString }, orderDirection: { type: GraphQLString }, - before: { type: GraphQLString }, - after: { type: GraphQLString }, - limit: { type: GraphQLInt }, + first: { type: GraphQLInt }, + skip: { type: GraphQLInt }, }, resolve: (parent, args: PluralArgs, context, info) => { const relationalConditions = []; @@ -307,13 +306,10 @@ export function buildGraphQLSchema(schema: Schema): GraphQLSchema { relationalConditions.push(eq(column, value)); } - const includeTotalCount = selectionIncludesField(info, "totalCount"); - return executePluralQuery( referencedTable, context.drizzle, args, - includeTotalCount, relationalConditions, ); }, @@ -329,16 +325,9 @@ export function buildGraphQLSchema(schema: Schema): GraphQLSchema { }, }); - entityPageTypes[table.tsName] = new GraphQLObjectType({ - name: `${table.tsName}Page`, - fields: () => ({ - items: { - type: new GraphQLNonNull(new GraphQLList(new GraphQLNonNull(entityTypes[table.tsName]!))), - }, - pageInfo: { type: new GraphQLNonNull(GraphQLPageInfo) }, - totalCount: { type: new GraphQLNonNull(GraphQLInt) }, - }), - }); + entityPageTypes[table.tsName] = new GraphQLNonNull( + new GraphQLList(new GraphQLNonNull(entityTypes[table.tsName]!)), + ); } const queryFields: Record> = {}; @@ -374,19 +363,16 @@ export function buildGraphQLSchema(schema: Schema): GraphQLSchema { }; queryFields[pluralFieldName] = { - type: new GraphQLNonNull(entityPageType), + type: entityPageType, args: { where: { type: entityFilterType }, orderBy: { type: GraphQLString }, orderDirection: { type: GraphQLString }, - before: { type: GraphQLString }, - after: { type: GraphQLString }, - limit: { type: GraphQLInt }, + first: { type: GraphQLInt }, + skip: { type: GraphQLInt }, }, resolve: async (_parent, args: PluralArgs, context, info) => { - const includeTotalCount = selectionIncludesField(info, "totalCount"); - - return executePluralQuery(table, context.drizzle, args, includeTotalCount); + return executePluralQuery(table, context.drizzle, args); }, }; } @@ -506,7 +492,6 @@ async function executePluralQuery( table: TableRelationalConfig, drizzle: Drizzle<{ [key: string]: OnchainTable }>, args: PluralArgs, - includeTotalCount: boolean, extraConditions: (SQL | undefined)[] = [], ) { const rawTable = drizzle._.fullSchema[table.tsName]; @@ -514,11 +499,13 @@ async function executePluralQuery( if (rawTable === undefined || baseQuery === undefined) throw new Error(`Internal error: Table "${table.tsName}" not found in RQB`); - const limit = args.limit ?? DEFAULT_LIMIT; + const limit = args.first ?? DEFAULT_LIMIT; if (limit > MAX_LIMIT) { throw new Error(`Invalid limit. Got ${limit}, expected <=${MAX_LIMIT}.`); } + const skip = args.skip ?? 0; + const orderBySchema = buildOrderBySchema(table, args); const orderBy = orderBySchema.map(([columnName, direction]) => { const column = table.columns[columnName]; @@ -537,153 +524,14 @@ async function executePluralQuery( const whereConditions = buildWhereConditions(args.where, table.columns); - const after = args.after ?? null; - const before = args.before ?? null; - - if (after !== null && before !== null) { - throw new Error("Cannot specify both before and after cursors."); - } - - let startCursor = null; - let endCursor = null; - let hasPreviousPage = false; - let hasNextPage = false; - - const totalCountPromise = includeTotalCount - ? drizzle - .select({ count: count() }) - .from(rawTable) - .where(and(...whereConditions, ...extraConditions)) - .then((rows) => rows[0]?.count ?? null) - : Promise.resolve(null); - - // Neither cursors are specified, apply the order conditions and execute. - if (after === null && before === null) { - const [rows, totalCount] = await Promise.all([ - baseQuery.findMany({ - where: and(...whereConditions, ...extraConditions), - orderBy, - limit: limit + 1, - }), - totalCountPromise, - ]); - - if (rows.length === limit + 1) { - rows.pop(); - hasNextPage = true; - } - - startCursor = rows.length > 0 ? encodeCursor(orderBySchema, rows[0]!) : null; - endCursor = rows.length > 0 ? encodeCursor(orderBySchema, rows[rows.length - 1]!) : null; - - return { - items: rows, - totalCount, - pageInfo: { hasNextPage, hasPreviousPage, startCursor, endCursor }, - }; - } - - if (after !== null) { - // User specified an 'after' cursor. - const cursorObject = decodeCursor(after); - const cursorCondition = buildCursorCondition(table, orderBySchema, "after", cursorObject); - - const [rows, totalCount] = await Promise.all([ - baseQuery.findMany({ - where: and(...whereConditions, cursorCondition, ...extraConditions), - orderBy, - limit: limit + 2, - }), - totalCountPromise, - ]); - - if (rows.length === 0) { - return { - items: rows, - totalCount, - pageInfo: { hasNextPage, hasPreviousPage, startCursor, endCursor }, - }; - } - - // If the cursor of the first returned record equals the `after` cursor, - // `hasPreviousPage` is true. Remove that record. - if (encodeCursor(orderBySchema, rows[0]!) === after) { - rows.shift(); - hasPreviousPage = true; - } else { - // Otherwise, remove the last record. - rows.pop(); - } - - // Now if the length of the records is still equal to limit + 1, - // there is a next page. - if (rows.length === limit + 1) { - rows.pop(); - hasNextPage = true; - } - - // Now calculate the cursors. - startCursor = rows.length > 0 ? encodeCursor(orderBySchema, rows[0]!) : null; - endCursor = rows.length > 0 ? encodeCursor(orderBySchema, rows[rows.length - 1]!) : null; - - return { - items: rows, - totalCount, - pageInfo: { hasNextPage, hasPreviousPage, startCursor, endCursor }, - }; - } - - // User specified a 'before' cursor. - const cursorObject = decodeCursor(before!); - const cursorCondition = buildCursorCondition(table, orderBySchema, "before", cursorObject); - - // Reverse the order by conditions to get the previous page, - // then reverse the results back to the original order. - const [rows, totalCount] = await Promise.all([ - baseQuery - .findMany({ - where: and(...whereConditions, cursorCondition, ...extraConditions), - orderBy: orderByReversed, - limit: limit + 2, - }) - .then((rows) => rows.reverse()), - totalCountPromise, - ]); - - if (rows.length === 0) { - return { - items: rows, - totalCount, - pageInfo: { hasNextPage, hasPreviousPage, startCursor, endCursor }, - }; - } - - // If the cursor of the last returned record equals the `before` cursor, - // `hasNextPage` is true. Remove that record. - if (encodeCursor(orderBySchema, rows[rows.length - 1]!) === before) { - rows.pop(); - hasNextPage = true; - } else { - // Otherwise, remove the first record. - rows.shift(); - } - - // Now if the length of the records is equal to limit + 1, we know - // there is a previous page. - if (rows.length === limit + 1) { - rows.shift(); - hasPreviousPage = true; - } - - // Now calculate the cursors. - startCursor = rows.length > 0 ? encodeCursor(orderBySchema, rows[0]!) : null; - endCursor = rows.length > 0 ? encodeCursor(orderBySchema, rows[rows.length - 1]!) : null; + const rows = await baseQuery.findMany({ + where: and(...whereConditions, ...extraConditions), + orderBy, + limit, + offset: skip, + }); - return { - items: rows, - totalCount, - pageInfo: { hasNextPage, hasPreviousPage, startCursor, endCursor }, - }; + return rows; } const conditionSuffixes = { @@ -808,7 +656,7 @@ function buildWhereConditions( conditions.push(notLike(column, `%${rawValue}`)); break; default: - never(conditionSuffix); + throw new Error(`Invalid Condition Suffix ${conditionSuffix}`); } } @@ -829,70 +677,16 @@ function buildOrderBySchema(table: TableRelationalConfig, args: PluralArgs) { return [...userColumns, ...missingPkColumns]; } -function encodeCursor( - orderBySchema: [string, "asc" | "desc"][], - row: { [k: string]: unknown }, -): string { - const cursorObject = Object.fromEntries( - orderBySchema.map(([columnName, _]) => [columnName, row[columnName]]), - ); - return encodeRowFragment(cursorObject); -} -function decodeCursor(cursor: string): { [k: string]: unknown } { - return decodeRowFragment(cursor); -} - function encodeRowFragment(rowFragment: { [k: string]: unknown }): string { return Buffer.from(serialize(rowFragment)).toString("base64"); } + function decodeRowFragment(encodedRowFragment: string): { [k: string]: unknown; } { return deserialize(Buffer.from(encodedRowFragment, "base64").toString()); } -function buildCursorCondition( - table: TableRelationalConfig, - orderBySchema: [string, "asc" | "desc"][], - direction: "after" | "before", - cursorObject: { [k: string]: unknown }, -): SQL | undefined { - const cursorColumns = orderBySchema.map(([columnName, orderDirection]) => { - const column = table.columns[columnName]; - if (column === undefined) - throw new Error(`Unknown column "${columnName}" used in orderBy argument`); - - const value = cursorObject[columnName]; - - let comparator: typeof gt | typeof lt; - let comparatorOrEquals: typeof gte | typeof lte; - if (direction === "after") { - [comparator, comparatorOrEquals] = orderDirection === "asc" ? [gt, gte] : [lt, lte]; - } else { - [comparator, comparatorOrEquals] = orderDirection === "asc" ? [lt, lte] : [gt, gte]; - } - - return { column, value, comparator, comparatorOrEquals }; - }); - - const buildCondition = (index: number): SQL | undefined => { - if (index === cursorColumns.length - 1) { - const { column, value, comparatorOrEquals } = cursorColumns[index]!; - return comparatorOrEquals(column, value); - } - - const currentColumn = cursorColumns[index]!; - const nextCondition = buildCondition(index + 1); - - return or( - currentColumn.comparator(currentColumn.column, currentColumn.value), - and(eq(currentColumn.column, currentColumn.value), nextCondition), - ); - }; - - return buildCondition(0); -} - export function buildDataLoaderCache({ drizzle }: { drizzle: Drizzle }) { const dataLoaderMap = new Map | undefined>(); return ({ table }: { table: TableRelationalConfig }) => { From 439846ee9de19d917639491089359af193217667 Mon Sep 17 00:00:00 2001 From: shrugs Date: Mon, 13 Jan 2025 14:15:34 -0600 Subject: [PATCH 03/27] wip: literally encode ids instead of eq conditions --- src/api/graphql.ts | 50 ++++++-------------------------- src/plugins/eth/ponder.config.ts | 2 +- 2 files changed, 10 insertions(+), 42 deletions(-) diff --git a/src/api/graphql.ts b/src/api/graphql.ts index 5a20ab4..a641d62 100644 --- a/src/api/graphql.ts +++ b/src/api/graphql.ts @@ -268,7 +268,9 @@ export function buildGraphQLSchema(schema: Schema): GraphQLSchema { rowFragment[referenceColumnTsName] = parent[fieldColumnTsName]; } - const encodedId = encodeRowFragment(rowFragment); + + const encodedId = rowFragment.id as string; + if (!encodedId) return null; return loader.load(encodedId); }, @@ -356,7 +358,7 @@ export function buildGraphQLSchema(schema: Schema): GraphQLSchema { // The `args` object here should be a valid `where` argument that // uses the `eq` shorthand for each primary key column. - const encodedId = encodeRowFragment(args); + const encodedId = args.id as string; return loader.load(encodedId); }, @@ -677,16 +679,6 @@ function buildOrderBySchema(table: TableRelationalConfig, args: PluralArgs) { return [...userColumns, ...missingPkColumns]; } -function encodeRowFragment(rowFragment: { [k: string]: unknown }): string { - return Buffer.from(serialize(rowFragment)).toString("base64"); -} - -function decodeRowFragment(encodedRowFragment: string): { - [k: string]: unknown; -} { - return deserialize(Buffer.from(encodedRowFragment, "base64").toString()); -} - export function buildDataLoaderCache({ drizzle }: { drizzle: Drizzle }) { const dataLoaderMap = new Map | undefined>(); return ({ table }: { table: TableRelationalConfig }) => { @@ -697,25 +689,16 @@ export function buildDataLoaderCache({ drizzle }: { drizzle: Drizzle }) let dataLoader = dataLoaderMap.get(table); if (dataLoader === undefined) { dataLoader = new DataLoader( - async (encodedIds) => { - const decodedRowFragments = encodedIds.map(decodeRowFragment); - - // The decoded row fragments should be valid `where` objects - // which use the `eq` object shorthand for each primary key column. - const idConditions = decodedRowFragments.map((decodedRowFragment) => - and(...buildWhereConditions(decodedRowFragment, table.columns)), - ); + async (ids) => { + // NOTE: use literal ids against id column + const idConditions = ids.map((id) => eq(table.columns["id"]!, id)); const rows = await baseQuery.findMany({ where: or(...idConditions), - limit: encodedIds.length, + limit: ids.length, }); - return decodedRowFragments.map((decodedRowFragment) => { - return rows.find((row) => - Object.entries(decodedRowFragment).every(([col, val]) => row[col] === val), - ); - }); + return ids.map((id) => rows.find((row) => row.id === id)); }, { maxBatchSize: 1_000 }, ); @@ -730,18 +713,3 @@ function getColumnTsName(column: Column) { const tableColumns = getTableColumns(column.table); return Object.entries(tableColumns).find(([_, c]) => c.name === column.name)![0]; } - -/** - * Returns `true` if the query includes a specific field. - * Does not consider nested selections; only works one "layer" deep. - */ -function selectionIncludesField(info: GraphQLResolveInfo, fieldName: string): boolean { - for (const fieldNode of info.fieldNodes) { - for (const selection of fieldNode.selectionSet?.selections ?? []) { - if (selection.kind === "Field" && selection.name.value === fieldName) { - return true; - } - } - } - return false; -} diff --git a/src/plugins/eth/ponder.config.ts b/src/plugins/eth/ponder.config.ts index 0c0c490..83100e0 100644 --- a/src/plugins/eth/ponder.config.ts +++ b/src/plugins/eth/ponder.config.ts @@ -21,7 +21,7 @@ export const pluginNamespace = createPluginNamespace(ownedName); // constrain the ponder indexing between the following start/end blocks // https://ponder.sh/0_6/docs/contracts-and-networks#block-range const START_BLOCK: ContractConfig["startBlock"] = undefined; -const END_BLOCK: ContractConfig["endBlock"] = undefined; +const END_BLOCK: ContractConfig["endBlock"] = 4_000_000; export const config = createConfig({ networks: { From be24d63aa2c59f2cc5f0bd6776724c81ef56f37a Mon Sep 17 00:00:00 2001 From: shrugs Date: Mon, 13 Jan 2025 14:17:07 -0600 Subject: [PATCH 04/27] fix: store labelhash in domains --- src/handlers/Registry.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/src/handlers/Registry.ts b/src/handlers/Registry.ts index 0179b8d..8222f16 100644 --- a/src/handlers/Registry.ts +++ b/src/handlers/Registry.ts @@ -106,6 +106,7 @@ export const handleNewOwner = ownerId: owner, parentId: node, createdAt: event.block.timestamp, + labelhash: event.args.label, isMigrated, }); From cb5c2bd2a7c815aaa3f477acc673c870d51ee477 Mon Sep 17 00:00:00 2001 From: shrugs Date: Mon, 13 Jan 2025 16:21:18 -0600 Subject: [PATCH 05/27] fix: resolvedAddress is a relationship not an address --- ponder.schema.ts | 7 +++++-- src/handlers/Registry.ts | 2 +- src/handlers/Resolver.ts | 6 +++--- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/ponder.schema.ts b/ponder.schema.ts index 87b8935..eae5373 100644 --- a/ponder.schema.ts +++ b/ponder.schema.ts @@ -17,7 +17,7 @@ export const domain = onchainTable("domains", (t) => ({ subdomainCount: t.integer("subdomain_count").notNull().default(0), // Address logged from current resolver, if any - resolvedAddress: t.hex("resolved_address"), + resolvedAddressId: t.hex("resolved_address_id"), // The resolver that controls the domain's settings resolverId: t.text(), @@ -45,7 +45,10 @@ export const domain = onchainTable("domains", (t) => ({ })); export const domainRelations = relations(domain, ({ one, many }) => ({ - // has one owner + resolvedAddress: one(account, { + fields: [domain.resolvedAddressId], + references: [account.id], + }), owner: one(account, { fields: [domain.ownerId], references: [account.id], diff --git a/src/handlers/Registry.ts b/src/handlers/Registry.ts index 8222f16..c6046ea 100644 --- a/src/handlers/Registry.ts +++ b/src/handlers/Registry.ts @@ -187,7 +187,7 @@ export async function handleNewResolver({ // update the domain to point to it, and denormalize the eth addr await context.db .update(schema.domain, { id: node }) - .set({ resolverId, resolvedAddress: resolver?.addrId }); + .set({ resolverId, resolvedAddressId: resolver?.addrId }); } // TODO: log DomainEvent diff --git a/src/handlers/Resolver.ts b/src/handlers/Resolver.ts index a70605d..390face 100644 --- a/src/handlers/Resolver.ts +++ b/src/handlers/Resolver.ts @@ -27,10 +27,10 @@ export async function handleAddrChanged({ addrId: address, }); - // materialize the resolved add to the domain iff this resolver is active + // materialize the resolved addr to the domain iff this resolver is active const domain = await context.db.find(schema.domain, { id: node }); if (domain?.resolverId === id) { - await context.db.update(schema.domain, { id: node }).set({ resolvedAddress: address }); + await context.db.update(schema.domain, { id: node }).set({ resolvedAddressId: address }); } // TODO: log ResolverEvent @@ -247,7 +247,7 @@ export async function handleVersionChanged({ // materialize the Domain's resolvedAddress field if (domain.resolverId === id) { - await context.db.update(schema.domain, { id: node }).set({ resolvedAddress: null }); + await context.db.update(schema.domain, { id: node }).set({ resolvedAddressId: null }); } // clear out the resolver's info From 8e49374d8210371fa720993a87fd6bca46f25ca5 Mon Sep 17 00:00:00 2001 From: shrugs Date: Mon, 13 Jan 2025 17:22:32 -0600 Subject: [PATCH 06/27] fix: lowercase resolver address in resolverId --- src/lib/ids.ts | 4 +++- src/plugins/eth/ponder.config.ts | 12 ++++++++---- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/src/lib/ids.ts b/src/lib/ids.ts index 4e21edf..71f1fb8 100644 --- a/src/lib/ids.ts +++ b/src/lib/ids.ts @@ -1,7 +1,9 @@ import type { Event } from "ponder:registry"; import type { Address, Hex } from "viem"; -export const makeResolverId = (node: Hex, address: Address) => [address, node].join("-"); +// NOTE: subgraph uses lowercase address here, viem provides us checksummed, so we lowercase it +export const makeResolverId = (node: Hex, address: Address) => + [address.toLowerCase(), node].join("-"); // https://github.com/ensdomains/ens-subgraph/blob/master/src/utils.ts#L5 // produces `blocknumber-logIndex` or `blocknumber-logindex-transferindex` diff --git a/src/plugins/eth/ponder.config.ts b/src/plugins/eth/ponder.config.ts index 83100e0..36a9a9f 100644 --- a/src/plugins/eth/ponder.config.ts +++ b/src/plugins/eth/ponder.config.ts @@ -23,31 +23,35 @@ export const pluginNamespace = createPluginNamespace(ownedName); const START_BLOCK: ContractConfig["startBlock"] = undefined; const END_BLOCK: ContractConfig["endBlock"] = 4_000_000; +const REGISTRY_OLD_ADDRESS = "0x314159265dd8dbb310642f98f50c066173c1259b"; +const REGISTRY_ADDRESS = "0x00000000000C2E074eC69A0dFb2997BA6C7d2e1e"; + export const config = createConfig({ networks: { mainnet: { chainId: mainnet.id, transport: http(process.env[`RPC_URL_${mainnet.id}`]), + maxRequestsPerSecond: 500, }, }, contracts: { [pluginNamespace("RegistryOld")]: { network: "mainnet", abi: Registry, - address: "0x314159265dd8dbb310642f98f50c066173c1259b", + address: REGISTRY_OLD_ADDRESS, ...blockConfig(START_BLOCK, 3327417, END_BLOCK), }, [pluginNamespace("Registry")]: { network: "mainnet", abi: Registry, - address: "0x00000000000C2E074eC69A0dFb2997BA6C7d2e1e", + address: REGISTRY_ADDRESS, ...blockConfig(START_BLOCK, 9380380, END_BLOCK), }, [pluginNamespace("OldRegistryResolvers")]: { network: "mainnet", abi: RESOLVER_ABI, address: factory({ - address: "0x314159265dd8dbb310642f98f50c066173c1259b", + address: REGISTRY_OLD_ADDRESS, event: getAbiItem({ abi: Registry, name: "NewResolver" }), parameter: "resolver", }), @@ -57,7 +61,7 @@ export const config = createConfig({ network: "mainnet", abi: RESOLVER_ABI, address: factory({ - address: "0x00000000000C2E074eC69A0dFb2997BA6C7d2e1e", + address: REGISTRY_ADDRESS, event: getAbiItem({ abi: Registry, name: "NewResolver" }), parameter: "resolver", }), From 78cd07fe5b69bcd2c18cb7e2a84362ef94108cb0 Mon Sep 17 00:00:00 2001 From: shrugs Date: Mon, 13 Jan 2025 17:55:48 -0600 Subject: [PATCH 07/27] fix: (in)correctly mark root as migrated and include exception in NewResolver to match subgraph behavior --- src/handlers/Registry.ts | 2 +- src/plugins/eth/handlers/Registry.ts | 22 +++++++++++++--------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/src/handlers/Registry.ts b/src/handlers/Registry.ts index c6046ea..92d506a 100644 --- a/src/handlers/Registry.ts +++ b/src/handlers/Registry.ts @@ -19,7 +19,7 @@ export async function setupRootNode({ context }: { context: Context }) { id: ROOT_NODE, ownerId: zeroAddress, createdAt: 0n, - isMigrated: false, + isMigrated: true, }); } diff --git a/src/plugins/eth/handlers/Registry.ts b/src/plugins/eth/handlers/Registry.ts index 39b5b51..9c75cdd 100644 --- a/src/plugins/eth/handlers/Registry.ts +++ b/src/plugins/eth/handlers/Registry.ts @@ -8,7 +8,7 @@ import { handleTransfer, setupRootNode, } from "../../../handlers/Registry"; -import { makeSubnodeNamehash } from "../../../lib/subname-helpers"; +import { ROOT_NODE, makeSubnodeNamehash } from "../../../lib/subname-helpers"; import { pluginNamespace } from "../ponder.config"; // a domain is migrated iff it exists and isMigrated is set to true, otherwise it is not @@ -30,15 +30,13 @@ export default function () { }); ponder.on(pluginNamespace("RegistryOld:NewResolver"), async ({ context, event }) => { - // NOTE: the subgraph makes an exception for the root node here - // but i don't know that that's necessary, as in ponder our root node starts out - // unmigrated and once the NewOwner event is emitted by the new registry, - // the root will be considered migrated - // https://github.com/ensdomains/ens-subgraph/blob/master/src/ensRegistry.ts#L246 - - // otherwise, only handle iff not migrated const isMigrated = await isDomainMigrated(context, event.args.node); - if (isMigrated) return; + const isRootNode = event.args.node === ROOT_NODE; + + // inverted logic of https://github.com/ensdomains/ens-subgraph/blob/master/src/ensRegistry.ts#L246 + // NOTE: the subgraph must include an exception here for the root node because it starts out + // isMigrated: true, but we definitely still want to handle NewResolver events for it. + if (isMigrated && !isRootNode) return; return handleNewResolver({ context, event }); }); @@ -49,6 +47,12 @@ export default function () { }); ponder.on(pluginNamespace("RegistryOld:Transfer"), async ({ context, event }) => { + // NOTE: this logic derived from the subgraph introduces a bug for queries with a blockheight + // below 9380380, when the new Registry was deployed, as it implicitly ignores Transfer events + // of the ROOT_NODE. as a result, the root node's owner is always zeroAddress until the new + // Registry events are picked up. for backwards compatibility this beahvior is re-implemented + // here. + const isMigrated = await isDomainMigrated(context, event.args.node); if (isMigrated) return; return handleTransfer({ context, event }); From bf99f3c13d071f7d7bdb8be4cb68383fcea5f940 Mon Sep 17 00:00:00 2001 From: shrugs Date: Mon, 13 Jan 2025 18:46:08 -0600 Subject: [PATCH 08/27] fix: default null array fields for backwards compat --- ponder.schema.ts | 6 ++++-- src/api/graphql.ts | 1 - 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/ponder.schema.ts b/ponder.schema.ts index eae5373..e38f2d0 100644 --- a/ponder.schema.ts +++ b/ponder.schema.ts @@ -108,9 +108,11 @@ export const resolver = onchainTable("resolvers", (t) => ({ // The content hash for this resolver, in binary format contentHash: t.text("content_hash"), // The set of observed text record keys for this resolver - texts: t.text().array().notNull().default([]), + // NOTE: we avoid .notNull.default([]) to match subgraph behavior + texts: t.text().array(), // The set of observed SLIP-44 coin types for this resolver - coinTypes: t.bigint("coin_types").array().notNull().default([]), + // NOTE: we avoid .notNull.default([]) to match subgraph behavior + coinTypes: t.bigint("coin_types").array(), // TODO: has many events })); diff --git a/src/api/graphql.ts b/src/api/graphql.ts index a641d62..41d73d5 100644 --- a/src/api/graphql.ts +++ b/src/api/graphql.ts @@ -81,7 +81,6 @@ import { GraphQLNonNull, GraphQLObjectType, type GraphQLOutputType, - type GraphQLResolveInfo, GraphQLScalarType, GraphQLSchema, GraphQLString, From b71c8e097071a85d30379ac6b4476dd9fda395f4 Mon Sep 17 00:00:00 2001 From: shrugs Date: Mon, 13 Jan 2025 18:48:48 -0600 Subject: [PATCH 09/27] docs: add documentation to graphql.ts --- src/api/graphql.ts | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/api/graphql.ts b/src/api/graphql.ts index 41d73d5..f5c9d13 100644 --- a/src/api/graphql.ts +++ b/src/api/graphql.ts @@ -1,6 +1,11 @@ /** * This is an autogenerated graphql schema, initially based on ponder's, designed to mimic * the subgraph graphql api for queries we've deemed relevant (see docs). + * + * 1. inlines some ponder internal types + * 2. removes ponder's encoded id params in favor of literal ids + * 3. implement subgraph's simpler pagination style with first & skip w/out Page types + * 4. PascalCase entity names */ // here we inline the following types from this original import @@ -38,7 +43,6 @@ import { arrayContained, arrayContains, asc, - count, createTableRelationsHelpers, desc, eq, From ca7f638dc2598cc89ebd1893441ca15fece2b25f Mon Sep 17 00:00:00 2001 From: shrugs Date: Mon, 13 Jan 2025 18:51:26 -0600 Subject: [PATCH 10/27] fix: remove unnecesary context injection --- src/api/index.ts | 11 +---------- src/api/middleware.ts | 10 ++++++++-- 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/src/api/index.ts b/src/api/index.ts index 985c2d9..d0708d5 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -1,15 +1,6 @@ import { ponder } from "ponder:registry"; -import { default as schema } from "ponder:schema"; -import { createMiddleware } from "hono/factory"; -import { buildGraphQLSchema } from "./graphql"; import { graphql } from "./middleware"; -// inject our custom schema into the hono context, to be used in middleware.ts -const overrideGraphqlSchemaMiddleware = createMiddleware(async (c, next) => { - c.set("graphqlSchema", buildGraphQLSchema(schema)); - return await next(); -}); - -ponder.use(overrideGraphqlSchemaMiddleware); +// use our custom graphql middleware ponder.use("/", graphql()); diff --git a/src/api/middleware.ts b/src/api/middleware.ts index 4a15a8e..14e3281 100644 --- a/src/api/middleware.ts +++ b/src/api/middleware.ts @@ -1,6 +1,8 @@ /** * This is ponder's graphql/middleware.ts, copied to fix module realm errors. - * The only other change is enabling graphql-yoga's GraphiQL. + * The following changes were made: + * 1. removed ponder's GraphiQL, enabled graphql-yoga's GraphiQL. + * 2. builds our custom schema instead of the one provided in hono context * https://github.com/ponder-sh/ponder/blob/0a5645ca8dec327b0c21da432ee00810edeb087c/packages/core/src/graphql/middleware.ts */ @@ -11,6 +13,10 @@ import { type YogaServerInstance, createYoga } from "graphql-yoga"; import { createMiddleware } from "hono/factory"; import { buildDataLoaderCache } from "./graphql"; +import { default as schema } from "ponder:schema"; + +import { buildGraphQLSchema } from "./graphql"; + /** * Middleware for GraphQL with an interactive web view. * @@ -45,7 +51,7 @@ export const graphql = ( return createMiddleware(async (c) => { if (yoga === undefined) { const metadataStore = c.get("metadataStore"); - const graphqlSchema = c.get("graphqlSchema"); + const graphqlSchema = buildGraphQLSchema(schema); const drizzle = c.get("db"); yoga = createYoga({ From 356a45f63caf20b71dca8f302fd441198ac7f45a Mon Sep 17 00:00:00 2001 From: shrugs Date: Mon, 13 Jan 2025 19:05:20 -0600 Subject: [PATCH 11/27] fix: coinType, texts array default, makeResolverId arg order --- src/handlers/Resolver.ts | 26 ++++++++++++++------------ src/lib/ids.ts | 2 +- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/src/handlers/Resolver.ts b/src/handlers/Resolver.ts index 390face..0475b7d 100644 --- a/src/handlers/Resolver.ts +++ b/src/handlers/Resolver.ts @@ -19,7 +19,7 @@ export async function handleAddrChanged({ const { a: address, node } = event.args; await upsertAccount(context, address); - const id = makeResolverId(node, event.log.address); + const id = makeResolverId(event.log.address, node); await upsertResolver(context, { id, domainId: node, @@ -49,7 +49,7 @@ export async function handleAddressChanged({ const { node, coinType, newAddress } = event.args; await upsertAccount(context, newAddress); - const id = makeResolverId(node, event.log.address); + const id = makeResolverId(event.log.address, node); const resolver = await upsertResolver(context, { id, domainId: node, @@ -59,7 +59,7 @@ export async function handleAddressChanged({ // upsert the new coinType await context.db .update(schema.resolver, { id }) - .set({ coinTypes: uniq([...resolver.coinTypes, coinType]) }); + .set({ coinTypes: uniq([...(resolver.coinTypes ?? []), coinType]) }); // TODO: log ResolverEvent } @@ -77,7 +77,7 @@ export async function handleNameChanged({ const { node, name } = event.args; if (hasNullByte(name)) return; - const id = makeResolverId(node, event.log.address); + const id = makeResolverId(event.log.address, node); await upsertResolver(context, { id, domainId: node, @@ -98,7 +98,7 @@ export async function handleABIChanged({ }; }) { const { node } = event.args; - const id = makeResolverId(node, event.log.address); + const id = makeResolverId(event.log.address, node); const resolver = await upsertResolver(context, { id, domainId: node, @@ -119,7 +119,7 @@ export async function handlePubkeyChanged({ }; }) { const { node } = event.args; - const id = makeResolverId(node, event.log.address); + const id = makeResolverId(event.log.address, node); const resolver = await upsertResolver(context, { id, domainId: node, @@ -140,7 +140,7 @@ export async function handleTextChanged({ }; }) { const { node, key } = event.args; - const id = makeResolverId(node, event.log.address); + const id = makeResolverId(event.log.address, node); const resolver = await upsertResolver(context, { id, domainId: node, @@ -148,7 +148,9 @@ export async function handleTextChanged({ }); // upsert new key - await context.db.update(schema.resolver, { id }).set({ texts: uniq([...resolver.texts, key]) }); + await context.db + .update(schema.resolver, { id }) + .set({ texts: uniq([...(resolver.texts ?? []), key]) }); // TODO: log ResolverEvent } @@ -164,7 +166,7 @@ export async function handleContenthashChanged({ }; }) { const { node, hash } = event.args; - const id = makeResolverId(node, event.log.address); + const id = makeResolverId(event.log.address, node); await upsertResolver(context, { id, domainId: node, @@ -190,7 +192,7 @@ export async function handleInterfaceChanged({ }; }) { const { node } = event.args; - const id = makeResolverId(node, event.log.address); + const id = makeResolverId(event.log.address, node); await upsertResolver(context, { id, domainId: node, @@ -216,7 +218,7 @@ export async function handleAuthorisationChanged({ }; }) { const { node } = event.args; - const id = makeResolverId(node, event.log.address); + const id = makeResolverId(event.log.address, node); await upsertResolver(context, { id, domainId: node, @@ -241,7 +243,7 @@ export async function handleVersionChanged({ }) { // a version change nulls out the resolver const { node } = event.args; - const id = makeResolverId(node, event.log.address); + const id = makeResolverId(event.log.address, node); const domain = await context.db.find(schema.domain, { id: node }); if (!domain) throw new Error("domain expected"); diff --git a/src/lib/ids.ts b/src/lib/ids.ts index 71f1fb8..9d9e763 100644 --- a/src/lib/ids.ts +++ b/src/lib/ids.ts @@ -2,7 +2,7 @@ import type { Event } from "ponder:registry"; import type { Address, Hex } from "viem"; // NOTE: subgraph uses lowercase address here, viem provides us checksummed, so we lowercase it -export const makeResolverId = (node: Hex, address: Address) => +export const makeResolverId = (address: Address, node: Hex) => [address.toLowerCase(), node].join("-"); // https://github.com/ensdomains/ens-subgraph/blob/master/src/utils.ts#L5 From 3d37215bc28f0433a4329d9c8d2c8cc2fdee3fa3 Mon Sep 17 00:00:00 2001 From: shrugs Date: Mon, 13 Jan 2025 19:20:50 -0600 Subject: [PATCH 12/27] fix: handle dangling makeResolverId refactor --- src/handlers/Registry.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/handlers/Registry.ts b/src/handlers/Registry.ts index 92d506a..36d893d 100644 --- a/src/handlers/Registry.ts +++ b/src/handlers/Registry.ts @@ -173,7 +173,7 @@ export async function handleNewResolver({ await recursivelyRemoveEmptyDomainFromParentSubdomainCount(context, node); } else { // otherwise upsert the resolver - const resolverId = makeResolverId(node, resolverAddress); + const resolverId = makeResolverId(resolverAddress, node); const resolver = await context.db .insert(schema.resolver) From 6c515029986e881ac9e0af086b6b1659ad3e567e Mon Sep 17 00:00:00 2001 From: shrugs Date: Mon, 13 Jan 2025 20:12:46 -0600 Subject: [PATCH 13/27] fix: index all resolver-looking events not just NewResolver --- src/handlers/Registry.ts | 10 +++++-- src/plugins/base.eth/ponder.config.ts | 24 ++++++++++++---- src/plugins/eth/handlers/Resolver.ts | 30 ------------------- src/plugins/eth/ponder.config.ts | 40 ++++++++++++++------------ src/plugins/linea.eth/ponder.config.ts | 22 ++++++++++---- 5 files changed, 64 insertions(+), 62 deletions(-) diff --git a/src/handlers/Registry.ts b/src/handlers/Registry.ts index 36d893d..742cb08 100644 --- a/src/handlers/Registry.ts +++ b/src/handlers/Registry.ts @@ -167,7 +167,9 @@ export async function handleNewResolver({ // if zeroing out a domain's resolver, remove the reference instead of tracking a zeroAddress Resolver // NOTE: old resolver resources are kept for event logs if (event.args.resolver === zeroAddress) { - await context.db.update(schema.domain, { id: node }).set({ resolverId: null }); + await context.db + .update(schema.domain, { id: node }) + .set({ resolverId: null, resolvedAddressId: null }); // garbage collect newly 'empty' domain iff necessary await recursivelyRemoveEmptyDomainFromParentSubdomainCount(context, node); @@ -182,12 +184,14 @@ export async function handleNewResolver({ domainId: event.args.node, address: event.args.resolver, }) - .onConflictDoNothing(); + .onConflictDoUpdate({}); // noop update to return the existing record // update the domain to point to it, and denormalize the eth addr + // NOTE: this implements the logic as documented here + // https://github.com/ensdomains/ens-subgraph/blob/master/src/ensRegistry.ts#L193 await context.db .update(schema.domain, { id: node }) - .set({ resolverId, resolvedAddressId: resolver?.addrId }); + .set({ resolverId, resolvedAddressId: resolver.addrId }); } // TODO: log DomainEvent diff --git a/src/plugins/base.eth/ponder.config.ts b/src/plugins/base.eth/ponder.config.ts index 6340a2c..4a15e27 100644 --- a/src/plugins/base.eth/ponder.config.ts +++ b/src/plugins/base.eth/ponder.config.ts @@ -30,12 +30,24 @@ export const config = createConfig({ [pluginNamespace("Resolver")]: { network: "base", abi: L2Resolver, - address: factory({ - address: "0xb94704422c2a1e396835a571837aa5ae53285a95", - event: getAbiItem({ abi: Registry, name: "NewResolver" }), - parameter: "resolver", - }), - startBlock: 17575714, + // NOTE: this indexes every event ever emitted that looks like this + filter: { + event: [ + "AddrChanged", + "AddressChanged", + "NameChanged", + "ABIChanged", + "PubkeyChanged", + "TextChanged", + "ContenthashChanged", + "InterfaceChanged", + "VersionChanged", + "DNSRecordChanged", + "DNSRecordDeleted", + "DNSZonehashChanged", + ], + }, + startBlock: 17571480, }, [pluginNamespace("BaseRegistrar")]: { network: "base", diff --git a/src/plugins/eth/handlers/Resolver.ts b/src/plugins/eth/handlers/Resolver.ts index e7e0046..8fec2e1 100644 --- a/src/plugins/eth/handlers/Resolver.ts +++ b/src/plugins/eth/handlers/Resolver.ts @@ -17,36 +17,6 @@ import { import { pluginNamespace } from "../ponder.config"; export default function () { - // Old registry handlers - ponder.on(pluginNamespace("OldRegistryResolvers:AddrChanged"), handleAddrChanged); - ponder.on(pluginNamespace("OldRegistryResolvers:AddressChanged"), handleAddressChanged); - ponder.on(pluginNamespace("OldRegistryResolvers:NameChanged"), handleNameChanged); - ponder.on(pluginNamespace("OldRegistryResolvers:ABIChanged"), handleABIChanged); - ponder.on(pluginNamespace("OldRegistryResolvers:PubkeyChanged"), handlePubkeyChanged); - ponder.on( - pluginNamespace( - "OldRegistryResolvers:TextChanged(bytes32 indexed node, string indexed indexedKey, string key)", - ), - handleTextChanged, - ); - ponder.on( - pluginNamespace( - "OldRegistryResolvers:TextChanged(bytes32 indexed node, string indexed indexedKey, string key, string value)", - ), - handleTextChanged, - ); - ponder.on(pluginNamespace("OldRegistryResolvers:ContenthashChanged"), handleContenthashChanged); - ponder.on(pluginNamespace("OldRegistryResolvers:InterfaceChanged"), handleInterfaceChanged); - ponder.on( - pluginNamespace("OldRegistryResolvers:AuthorisationChanged"), - handleAuthorisationChanged, - ); - ponder.on(pluginNamespace("OldRegistryResolvers:VersionChanged"), handleVersionChanged); - ponder.on(pluginNamespace("OldRegistryResolvers:DNSRecordChanged"), handleDNSRecordChanged); - ponder.on(pluginNamespace("OldRegistryResolvers:DNSRecordDeleted"), handleDNSRecordDeleted); - ponder.on(pluginNamespace("OldRegistryResolvers:DNSZonehashChanged"), handleDNSZonehashChanged); - - // New registry handlers ponder.on(pluginNamespace("Resolver:AddrChanged"), handleAddrChanged); ponder.on(pluginNamespace("Resolver:AddressChanged"), handleAddressChanged); ponder.on(pluginNamespace("Resolver:NameChanged"), handleNameChanged); diff --git a/src/plugins/eth/ponder.config.ts b/src/plugins/eth/ponder.config.ts index 36a9a9f..f676796 100644 --- a/src/plugins/eth/ponder.config.ts +++ b/src/plugins/eth/ponder.config.ts @@ -1,5 +1,5 @@ -import { ContractConfig, createConfig, factory, mergeAbis } from "ponder"; -import { http, getAbiItem } from "viem"; +import { ContractConfig, createConfig, mergeAbis } from "ponder"; +import { http } from "viem"; import { mainnet } from "viem/chains"; import { blockConfig } from "../../lib/helpers"; @@ -47,25 +47,29 @@ export const config = createConfig({ address: REGISTRY_ADDRESS, ...blockConfig(START_BLOCK, 9380380, END_BLOCK), }, - [pluginNamespace("OldRegistryResolvers")]: { - network: "mainnet", - abi: RESOLVER_ABI, - address: factory({ - address: REGISTRY_OLD_ADDRESS, - event: getAbiItem({ abi: Registry, name: "NewResolver" }), - parameter: "resolver", - }), - ...blockConfig(START_BLOCK, 9380380, END_BLOCK), - }, [pluginNamespace("Resolver")]: { network: "mainnet", abi: RESOLVER_ABI, - address: factory({ - address: REGISTRY_ADDRESS, - event: getAbiItem({ abi: Registry, name: "NewResolver" }), - parameter: "resolver", - }), - ...blockConfig(START_BLOCK, 9380380, END_BLOCK), + // NOTE: this indexes every event ever emitted that looks like this + filter: { + event: [ + "AddrChanged", + "AddressChanged", + "NameChanged", + "ABIChanged", + "PubkeyChanged", + "TextChanged(bytes32 indexed node, string indexed indexedKey, string key)", + "TextChanged(bytes32 indexed node, string indexed indexedKey, string key, string value)", + "ContenthashChanged", + "InterfaceChanged", + "AuthorisationChanged", + "VersionChanged", + "DNSRecordChanged", + "DNSRecordDeleted", + "DNSZonehashChanged", + ], + }, + ...blockConfig(START_BLOCK, 3327417, END_BLOCK), }, [pluginNamespace("BaseRegistrar")]: { network: "mainnet", diff --git a/src/plugins/linea.eth/ponder.config.ts b/src/plugins/linea.eth/ponder.config.ts index 0dcaa8a..77acf2a 100644 --- a/src/plugins/linea.eth/ponder.config.ts +++ b/src/plugins/linea.eth/ponder.config.ts @@ -36,11 +36,23 @@ export const config = createConfig({ [pluginNamespace("Resolver")]: { network: "linea", abi: Resolver, - address: factory({ - address: "0x50130b669B28C339991d8676FA73CF122a121267", - event: getAbiItem({ abi: Registry, name: "NewResolver" }), - parameter: "resolver", - }), + // NOTE: this indexes every event ever emitted that looks like this + filter: { + event: [ + "AddrChanged", + "AddressChanged", + "NameChanged", + "ABIChanged", + "PubkeyChanged", + "TextChanged", + "ContenthashChanged", + "InterfaceChanged", + "VersionChanged", + "DNSRecordChanged", + "DNSRecordDeleted", + "DNSZonehashChanged", + ], + }, ...blockConfig(START_BLOCK, 6682888, END_BLOCK), }, [pluginNamespace("BaseRegistrar")]: { From 592da2c63e01bbc00fb93acfa7cff6fbf568ca4d Mon Sep 17 00:00:00 2001 From: shrugs Date: Tue, 14 Jan 2025 11:28:42 -0600 Subject: [PATCH 14/27] docs: add v2 notes, add comment about root node isMigrated --- docs/V2.md | 2 ++ src/handlers/Registry.ts | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/docs/V2.md b/docs/V2.md index 0c039ab..3d9544a 100644 --- a/docs/V2.md +++ b/docs/V2.md @@ -75,6 +75,8 @@ any resolver that implements the CCIP Read standard will have to have its record in the subgraph implementation, resolver handlers must upsert resolvers because people can set records etc for a node that has not (yet) specified this resolver as active, meaning the create in `Registry:NewResolver` has yet to fire. in the ideal scenario, this lookup is keyed only by `(chainId, address)` and we can use pure updates instead of an upsert +v1: resolvers are discovered by tracking all emissions of events that look like resolver spec events. very inefficient, as a resolver is only relevant to the protocol once it's been set as a resolver. for v2, we could use factory like normal, starting to track events on `Registry#NewResolver` and then backfilling the events (using the same handlers) with ponder's cached viem client. not sure if this is more or less intensive than just parsing every event ever + ### registrar the subgraph implements all of the BaseRegistrar, EthRegistrarController, and EthRegistrarControllerOld logic together diff --git a/src/handlers/Registry.ts b/src/handlers/Registry.ts index 742cb08..c3d93fb 100644 --- a/src/handlers/Registry.ts +++ b/src/handlers/Registry.ts @@ -19,6 +19,11 @@ export async function setupRootNode({ context }: { context: Context }) { id: ROOT_NODE, ownerId: zeroAddress, createdAt: 0n, + // NOTE: we initialize the root node as migrated because: + // 1. this matches subgraph's existing behavior, despite the root node not technically being + // migrated until the new registry is deployed and + // 2. other plugins (base, linea) don't have the concept of migration but defaulting to true + // is a reasonable behavior isMigrated: true, }); } From 2eac2088148945295ee9fb2a92068c2bb9461212 Mon Sep 17 00:00:00 2001 From: shrugs Date: Tue, 14 Jan 2025 16:08:45 -0600 Subject: [PATCH 15/27] fix: use enum for OrderDirection to match subgraph --- src/api/graphql.ts | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/api/graphql.ts b/src/api/graphql.ts index f5c9d13..1fda9fe 100644 --- a/src/api/graphql.ts +++ b/src/api/graphql.ts @@ -109,6 +109,14 @@ type PluralArgs = { const DEFAULT_LIMIT = 50 as const; const MAX_LIMIT = 1000 as const; +const OrderDirectionEnum = new GraphQLEnumType({ + name: "OrderDirection", + values: { + asc: { value: "asc" }, + desc: { value: "desc" }, + }, +}); + export function buildGraphQLSchema(schema: Schema): GraphQLSchema { const tablesConfig = extractTablesRelationalConfig(schema, createTableRelationsHelpers); @@ -299,7 +307,7 @@ export function buildGraphQLSchema(schema: Schema): GraphQLSchema { args: { where: { type: referencedEntityFilterType }, orderBy: { type: GraphQLString }, - orderDirection: { type: GraphQLString }, + orderDirection: { type: OrderDirectionEnum }, first: { type: GraphQLInt }, skip: { type: GraphQLInt }, }, @@ -372,7 +380,7 @@ export function buildGraphQLSchema(schema: Schema): GraphQLSchema { args: { where: { type: entityFilterType }, orderBy: { type: GraphQLString }, - orderDirection: { type: GraphQLString }, + orderDirection: { type: OrderDirectionEnum }, first: { type: GraphQLInt }, skip: { type: GraphQLInt }, }, From d67d0b8cb5f6cd87bda06620002c7e59a2897640 Mon Sep 17 00:00:00 2001 From: shrugs Date: Tue, 14 Jan 2025 16:44:36 -0600 Subject: [PATCH 16/27] feat: implement scalar Entity_orderBy enums --- src/api/graphql.ts | 39 ++++++++++++++++++++++++++++++--------- 1 file changed, 30 insertions(+), 9 deletions(-) diff --git a/src/api/graphql.ts b/src/api/graphql.ts index 1fda9fe..68c8f94 100644 --- a/src/api/graphql.ts +++ b/src/api/graphql.ts @@ -74,6 +74,7 @@ import { import { GraphQLBoolean, GraphQLEnumType, + type GraphQLEnumValueConfigMap, type GraphQLFieldConfig, type GraphQLFieldConfigMap, GraphQLFloat, @@ -138,6 +139,26 @@ export function buildGraphQLSchema(schema: Schema): GraphQLSchema { }); } + // construct Entity_orderBy enums + const entityOrderByEnums: Record = {}; + for (const table of tables) { + // Scalar fields + const values = Object.keys(table.columns).reduce( + (acc, columnName) => ({ + ...acc, + [columnName]: { value: columnName }, + }), + {}, + ); + + // TODO: relationships i.e. parent__labelName iff necessary + + entityOrderByEnums[table.tsName] = new GraphQLEnumType({ + name: `${pascalCase(table.tsName)}_orderBy`, + values, + }); + } + const entityFilterTypes: Record = {}; for (const table of tables) { const filterType = new GraphQLInputObjectType({ @@ -302,11 +323,15 @@ export function buildGraphQLSchema(schema: Schema): GraphQLSchema { const fields = oneRelation.config?.fields ?? []; const references = oneRelation.config?.references ?? []; + const referencedEntityOrderByType = entityOrderByEnums[referencedTable.tsName]; + if (!referencedEntityOrderByType) + throw new Error(`Entity_orderBy Enum not found for ${referencedTable.tsName}`); + fieldConfigMap[relationName] = { type: referencedEntityPageType, args: { where: { type: referencedEntityFilterType }, - orderBy: { type: GraphQLString }, + orderBy: { type: referencedEntityOrderByType }, orderDirection: { type: OrderDirectionEnum }, first: { type: GraphQLInt }, skip: { type: GraphQLInt }, @@ -375,11 +400,14 @@ export function buildGraphQLSchema(schema: Schema): GraphQLSchema { }, }; + const entityOrderByType = entityOrderByEnums[table.tsName]; + if (!entityOrderByType) throw new Error(`Entity_orderBy Enum not found for ${table.tsName}`); + queryFields[pluralFieldName] = { type: entityPageType, args: { where: { type: entityFilterType }, - orderBy: { type: GraphQLString }, + orderBy: { type: entityOrderByType }, orderDirection: { type: OrderDirectionEnum }, first: { type: GraphQLInt }, skip: { type: GraphQLInt }, @@ -527,13 +555,6 @@ async function executePluralQuery( } return direction === "asc" ? asc(column) : desc(column); }); - const orderByReversed = orderBySchema.map(([columnName, direction]) => { - const column = table.columns[columnName]; - if (column === undefined) { - throw new Error(`Unknown column "${columnName}" used in orderBy argument`); - } - return direction === "asc" ? desc(column) : asc(column); - }); const whereConditions = buildWhereConditions(args.where, table.columns); From ba27584260bf6855b41803ae899c180086b6ca71 Mon Sep 17 00:00:00 2001 From: shrugs Date: Wed, 15 Jan 2025 10:40:10 -0600 Subject: [PATCH 17/27] docs: add rest of graphql autogen api spec --- README.md | 13 +++++++++---- src/plugins/eth/ponder.config.ts | 2 +- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 2364136..4187963 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ estimated backfill time @ 50rps = 24-36 hours on M1 Macbook (~10x speedup) -### goals +## goals > an optimized, multichain ens indexer that the community loves and integrates @@ -23,8 +23,13 @@ estimated backfill time @ 50rps = 24-36 hours on M1 Macbook (~10x speedup) - (possible) continued backwards compatibility with subgraph - support indexing subset of data, i.e. only domains under parent node -#### next up +## next up +- [ ] `where` filter logic [supported suffixes](https://thegraph.com/docs/en/subgraphs/querying/graphql-api/#all-filters) +- [ ] [fulltext search queries](https://thegraph.com/docs/en/subgraphs/querying/graphql-api/#fulltext-search-queries) +- [ ] `Entity_orderBy` 1-level deep + - `sort by one-level deep String or ID types on @entity and @derivedFrom fields` +- [ ] subgraph [Meta](https://thegraph.com/docs/en/subgraphs/querying/graphql-api/#subgraph-metadata) object - [ ] confirm all the schema relations are configured correctly - [ ] integrate rainbow tables for label healing - load the table dump into pglite (or just postgres) & query synchronously to match existing behavior @@ -37,12 +42,12 @@ estimated backfill time @ 50rps = 24-36 hours on M1 Macbook (~10x speedup) - more recent endlbock for gut checks - [ ] better understand reverse resolution & how that pertains to L2 primary names and impacts the future schema, etc -### notes +## notes - eth registry is ERC721, has many controllers (), no knowledge of pricing — delegated to registrar controllers - eth old registry & new registry migration due to security issue, new then fallback to old, therefore ignore all old evens on domains that have been seen by new registry -### ENSIP Ideas +## ENSIP Ideas - unable to automatically identify subname registries via onchain event, CCIP standard dosn't include any info about data source, so we'll need to encode manually for now - ENSIP - shared interface for subdomain registrars diff --git a/src/plugins/eth/ponder.config.ts b/src/plugins/eth/ponder.config.ts index f676796..4e5914f 100644 --- a/src/plugins/eth/ponder.config.ts +++ b/src/plugins/eth/ponder.config.ts @@ -21,7 +21,7 @@ export const pluginNamespace = createPluginNamespace(ownedName); // constrain the ponder indexing between the following start/end blocks // https://ponder.sh/0_6/docs/contracts-and-networks#block-range const START_BLOCK: ContractConfig["startBlock"] = undefined; -const END_BLOCK: ContractConfig["endBlock"] = 4_000_000; +const END_BLOCK: ContractConfig["endBlock"] = 21_000_000; const REGISTRY_OLD_ADDRESS = "0x314159265dd8dbb310642f98f50c066173c1259b"; const REGISTRY_ADDRESS = "0x00000000000C2E074eC69A0dFb2997BA6C7d2e1e"; From 8ee2353e4a9d6b8e481e726aa1f373ba80527de3 Mon Sep 17 00:00:00 2001 From: shrugs Date: Wed, 15 Jan 2025 10:46:32 -0600 Subject: [PATCH 18/27] docs: update backfill time estimation --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 4187963..24b3751 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ > powered by ponder -estimated backfill time @ 50rps = 24-36 hours on M1 Macbook (~10x speedup) +estimated mainnet backfill time @ <=500rps = ~12 hours on M1 Macbook (~14x-28x speedup vs subgraph) ## goals From d2e0eab640a127520775a7e796e097367b36caaa Mon Sep 17 00:00:00 2001 From: shrugs Date: Wed, 15 Jan 2025 11:02:28 -0600 Subject: [PATCH 19/27] feat: host subgraph-compatible api at /subgraph instead of root --- src/api/index.ts | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/api/index.ts b/src/api/index.ts index d0708d5..5fcbea8 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -1,6 +1,10 @@ import { ponder } from "ponder:registry"; -import { graphql } from "./middleware"; +import { graphql as ponderGraphQL } from "ponder"; +import { graphql as subgraphGraphQL } from "./middleware"; -// use our custom graphql middleware -ponder.use("/", graphql()); +// use ponder middleware at root +ponder.use("/", ponderGraphQL()); + +// use our custom graphql middleware at /subgraph +ponder.use("/subgraph", subgraphGraphQL()); From 5608ed0e18d38b3926a7df8ae061562c5af0165e Mon Sep 17 00:00:00 2001 From: shrugs Date: Wed, 15 Jan 2025 11:05:27 -0600 Subject: [PATCH 20/27] fix: handle subgraph case where version is changed but domain doesn't exist --- src/handlers/Resolver.ts | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/handlers/Resolver.ts b/src/handlers/Resolver.ts index 0475b7d..25b49c6 100644 --- a/src/handlers/Resolver.ts +++ b/src/handlers/Resolver.ts @@ -245,10 +245,9 @@ export async function handleVersionChanged({ const { node } = event.args; const id = makeResolverId(event.log.address, node); const domain = await context.db.find(schema.domain, { id: node }); - if (!domain) throw new Error("domain expected"); - // materialize the Domain's resolvedAddress field - if (domain.resolverId === id) { + // materialize the Domain's resolvedAddress field iff exists + if (domain && domain.resolverId === id) { await context.db.update(schema.domain, { id: node }).set({ resolvedAddressId: null }); } From 7990a175f6d6d525c3bdc98a5da6adaed362a01a Mon Sep 17 00:00:00 2001 From: shrugs Date: Wed, 15 Jan 2025 11:06:20 -0600 Subject: [PATCH 21/27] chore: better comment on the other domain expected error --- src/handlers/Registrar.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/handlers/Registrar.ts b/src/handlers/Registrar.ts index 90c6057..9fc68ee 100644 --- a/src/handlers/Registrar.ts +++ b/src/handlers/Registrar.ts @@ -18,7 +18,9 @@ export const makeRegistrarHandlers = (ownedName: `${string}eth`) => { const node = makeSubnodeNamehash(ownedSubnameNode, label); const domain = await context.db.find(schema.domain, { id: node }); - if (!domain) throw new Error("domain expected"); + + // encode the runtime assertion here https://github.com/ensdomains/ens-subgraph/blob/master/src/ethRegistrar.ts#L101 + if (!domain) throw new Error("domain expected in setNamePreimage but not found"); if (domain.labelName !== name) { await context.db From 53c91fa37122af6e10e3941fee5970c8219ffc86 Mon Sep 17 00:00:00 2001 From: shrugs Date: Wed, 15 Jan 2025 11:22:55 -0600 Subject: [PATCH 22/27] docs: document which api features are explicitly not supported --- README.md | 5 ----- docs/GRAPHQL.md | 9 +++++++++ 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 24b3751..9328f0f 100644 --- a/README.md +++ b/README.md @@ -25,11 +25,6 @@ estimated mainnet backfill time @ <=500rps = ~12 hours on M1 Macbook (~14x-28x s ## next up -- [ ] `where` filter logic [supported suffixes](https://thegraph.com/docs/en/subgraphs/querying/graphql-api/#all-filters) -- [ ] [fulltext search queries](https://thegraph.com/docs/en/subgraphs/querying/graphql-api/#fulltext-search-queries) -- [ ] `Entity_orderBy` 1-level deep - - `sort by one-level deep String or ID types on @entity and @derivedFrom fields` -- [ ] subgraph [Meta](https://thegraph.com/docs/en/subgraphs/querying/graphql-api/#subgraph-metadata) object - [ ] confirm all the schema relations are configured correctly - [ ] integrate rainbow tables for label healing - load the table dump into pglite (or just postgres) & query synchronously to match existing behavior diff --git a/docs/GRAPHQL.md b/docs/GRAPHQL.md index 4b828be..ac5fb27 100644 --- a/docs/GRAPHQL.md +++ b/docs/GRAPHQL.md @@ -1,5 +1,14 @@ # graphql info/spec +## backwards-compatibility notes + +the following features of the subgraph graphql api are explicitly unsupported, because they are not used by ensjs or ens-app-v3 + +- `_nocase` case-insensitive where filters +- [fulltext search queries](https://thegraph.com/docs/en/subgraphs/querying/graphql-api/#fulltext-search-queries) +- 1-level-nested Entity `_orderBy` param +- subgraph `_Meta_` object (ponder's `_meta` is available) + ## goals 1. ponder indexer 1:1 equivalency of results as compared to subgraph From 22ace7c804bdc82ddebab3a8f8ff5a669d114580 Mon Sep 17 00:00:00 2001 From: shrugs Date: Wed, 15 Jan 2025 11:23:20 -0600 Subject: [PATCH 23/27] chore: remove subgraph-api todo from readme --- README.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/README.md b/README.md index 9328f0f..a5f31f8 100644 --- a/README.md +++ b/README.md @@ -29,10 +29,6 @@ estimated mainnet backfill time @ <=500rps = ~12 hours on M1 Macbook (~14x-28x s - [ ] integrate rainbow tables for label healing - load the table dump into pglite (or just postgres) & query synchronously to match existing behavior - https://github.com/graphprotocol/ens-rainbow -- [ ] subgraph graphql implementation within ponder - - [ ] implement subgraph-style pagination api - - [ ] support the well-known queries in `GRAPHQL.md` - - [ ] support collection queries as well, to power `snapshot-eq` - [ ] CI/CD with indexing? - more recent endlbock for gut checks - [ ] better understand reverse resolution & how that pertains to L2 primary names and impacts the future schema, etc From d7edae7ff0dc01d3d44cd9dfe2f742ea808407aa Mon Sep 17 00:00:00 2001 From: shrugs Date: Thu, 16 Jan 2025 16:19:24 -0600 Subject: [PATCH 24/27] chore: update package name to ensnode --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 052b4b7..8f9feb3 100644 --- a/package.json +++ b/package.json @@ -1,5 +1,5 @@ { - "name": "ens-multichain", + "name": "ensnode", "version": "0.0.1", "private": true, "type": "module", From b33099bb50b42dc7ba2b575abc5f36674d95d33d Mon Sep 17 00:00:00 2001 From: shrugs Date: Thu, 16 Jan 2025 16:21:26 -0600 Subject: [PATCH 25/27] docs: tidy v2 notes, move _nocase to potential todo --- README.md | 4 +++- docs/GRAPHQL.md | 1 - docs/V2.md | 2 ++ 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index a5f31f8..1534c94 100644 --- a/README.md +++ b/README.md @@ -25,12 +25,14 @@ estimated mainnet backfill time @ <=500rps = ~12 hours on M1 Macbook (~14x-28x s ## next up +- [ ] `_nocase` case-insensitive where filters + - not used interally but ensjs does technically expose this as an available filter to users - [ ] confirm all the schema relations are configured correctly - [ ] integrate rainbow tables for label healing - load the table dump into pglite (or just postgres) & query synchronously to match existing behavior - https://github.com/graphprotocol/ens-rainbow - [ ] CI/CD with indexing? - - more recent endlbock for gut checks + - integrate index to recent block (10m?) and validate with `snapshot-eq` b4 passing - [ ] better understand reverse resolution & how that pertains to L2 primary names and impacts the future schema, etc ## notes diff --git a/docs/GRAPHQL.md b/docs/GRAPHQL.md index ac5fb27..1e42abf 100644 --- a/docs/GRAPHQL.md +++ b/docs/GRAPHQL.md @@ -4,7 +4,6 @@ the following features of the subgraph graphql api are explicitly unsupported, because they are not used by ensjs or ens-app-v3 -- `_nocase` case-insensitive where filters - [fulltext search queries](https://thegraph.com/docs/en/subgraphs/querying/graphql-api/#fulltext-search-queries) - 1-level-nested Entity `_orderBy` param - subgraph `_Meta_` object (ponder's `_meta` is available) diff --git a/docs/V2.md b/docs/V2.md index 3d9544a..8494d3b 100644 --- a/docs/V2.md +++ b/docs/V2.md @@ -41,6 +41,8 @@ the 'empty' domains should be handled more accurately, depending on how importan - removes need to increment during domain creation - new impl likely needs to exclude 'empty' domains (see registry notes for context) +domain createdAt should not update on re-registration, should be original createdAt + various resources use both null and zeroAddress to indicate emptiness, this is horrible and creates numerous checks like [this](https://github.com/ensdomains/ensjs/blob/main/packages/ensjs/src/functions/subgraph/getNamesForAddress.ts#L255) where they check for `!== NULL && !== zeroAddress` wrappedOwnerId should not be materialized onto domain, should just be resolved through wrappedDomain.owner From a5d683c782bc173ea04db14efd0f39f34f4a6ecb Mon Sep 17 00:00:00 2001 From: shrugs Date: Thu, 16 Jan 2025 16:26:34 -0600 Subject: [PATCH 26/27] fix: use upsert in Resolver#handleVersionChanged --- src/handlers/Resolver.ts | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/src/handlers/Resolver.ts b/src/handlers/Resolver.ts index 25b49c6..1a05918 100644 --- a/src/handlers/Resolver.ts +++ b/src/handlers/Resolver.ts @@ -6,6 +6,12 @@ import { hasNullByte, uniq } from "../lib/helpers"; import { makeResolverId } from "../lib/ids"; import { upsertAccount, upsertResolver } from "../lib/upserts"; +// NOTE: both subgraph and this indexer us upserts in this file because a 'Resolver' is _any_ +// contract on the chain that emits an event with this signature, which may or may not actually be +// an ENS-specific Resolver. because of this each even could theoretically be the first event the +// indexer has seen for a given Resolver id and therefore needs to use an upsert and not assume +// anything else about this Resolver's state + export async function handleAddrChanged({ context, event, @@ -246,13 +252,17 @@ export async function handleVersionChanged({ const id = makeResolverId(event.log.address, node); const domain = await context.db.find(schema.domain, { id: node }); - // materialize the Domain's resolvedAddress field iff exists + // materialize the Domain's resolvedAddress field iff exists and is set to this Resolver if (domain && domain.resolverId === id) { await context.db.update(schema.domain, { id: node }).set({ resolvedAddressId: null }); } - // clear out the resolver's info - await context.db.update(schema.resolver, { id }).set({ + await upsertResolver(context, { + id, + domainId: node, + address: event.log.address, + + // clear out the resolver's info addrId: null, contentHash: null, coinTypes: [], From 10066a58d2f59766c1343bd7b7763ba1172f4287 Mon Sep 17 00:00:00 2001 From: shrugs Date: Thu, 16 Jan 2025 19:55:15 -0600 Subject: [PATCH 27/27] wip: event support --- README.md | 2 + ponder.schema.ts | 401 +++++++++++++++++++++++++------ src/api/graphql.ts | 183 +++++++++++++- src/handlers/Registry.ts | 17 +- src/lib/ids.ts | 2 +- src/plugins/eth/ponder.config.ts | 2 +- 6 files changed, 528 insertions(+), 79 deletions(-) diff --git a/README.md b/README.md index 1534c94..63f15d4 100644 --- a/README.md +++ b/README.md @@ -25,6 +25,8 @@ estimated mainnet backfill time @ <=500rps = ~12 hours on M1 Macbook (~14x-28x s ## next up +- [ ] implement events +- [ ] implement polymorphic events resolver on each relevant entity - [ ] `_nocase` case-insensitive where filters - not used interally but ensjs does technically expose this as an available filter to users - [ ] confirm all the schema relations are configured correctly diff --git a/ponder.schema.ts b/ponder.schema.ts index e38f2d0..6e6c38e 100644 --- a/ponder.schema.ts +++ b/ponder.schema.ts @@ -1,6 +1,10 @@ import { onchainTable, relations } from "ponder"; import type { Address } from "viem"; +/** + * Domain + */ + export const domain = onchainTable("domains", (t) => ({ // The namehash of the name id: t.hex().primaryKey(), @@ -39,62 +43,49 @@ export const domain = onchainTable("domains", (t) => ({ // The expiry date for the domain, from either the registration, or the wrapped domain if PCC is burned expiryDate: t.bigint("expiry_date"), - - // "The events associated with the domain" - // events: [DomainEvent!]! @derivedFrom(field: "domain") })); export const domainRelations = relations(domain, ({ one, many }) => ({ - resolvedAddress: one(account, { - fields: [domain.resolvedAddressId], - references: [account.id], - }), - owner: one(account, { - fields: [domain.ownerId], - references: [account.id], - }), - parent: one(domain, { - fields: [domain.parentId], - references: [domain.id], - }), - resolver: one(resolver, { - fields: [domain.resolverId], - references: [resolver.id], - }), + resolvedAddress: one(account, { fields: [domain.resolvedAddressId], references: [account.id] }), + owner: one(account, { fields: [domain.ownerId], references: [account.id] }), + parent: one(domain, { fields: [domain.parentId], references: [domain.id] }), + resolver: one(resolver, { fields: [domain.resolverId], references: [resolver.id] }), subdomains: many(domain, { relationName: "parent" }), - registrant: one(account, { - fields: [domain.registrantId], - references: [account.id], - }), - wrappedOwner: one(account, { - fields: [domain.wrappedOwnerId], - references: [account.id], - }), - - // The wrapped domain associated with the domain - wrappedDomain: one(wrappedDomain, { - fields: [domain.id], - references: [wrappedDomain.domainId], - }), + registrant: one(account, { fields: [domain.registrantId], references: [account.id] }), + wrappedOwner: one(account, { fields: [domain.wrappedOwnerId], references: [account.id] }), + wrappedDomain: one(wrappedDomain, { fields: [domain.id], references: [wrappedDomain.domainId] }), + registration: one(registration, { fields: [domain.id], references: [registration.domainId] }), - // The registration associated with the domain - registration: one(registration, { - fields: [domain.id], - references: [registration.domainId], - }), + // event relations + transfers: many(transfer), + newOwners: many(newOwner), + newResolvers: many(newResolver), + newTTLs: many(newTTL), + wrappedTransfers: many(wrappedTransfer), + nameWrappeds: many(nameWrapped), + nameUnwrappeds: many(nameUnwrapped), + fusesSets: many(fusesSet), + expiryExtendeds: many(expiryExtended), })); +/** + * Account + */ + export const account = onchainTable("accounts", (t) => ({ id: t.hex().primaryKey(), })); export const accountRelations = relations(account, ({ many }) => ({ - // account has many domains domains: many(domain), - // TODO: has many wrapped domains - // TODO: has many registrations + wrappedDomains: many(wrappedDomain), + registrations: many(registration), })); +/** + * Resolver + */ + export const resolver = onchainTable("resolvers", (t) => ({ // The unique identifier for this resolver, which is a concatenation of the domain namehash and the resolver address id: t.text().primaryKey(), @@ -113,21 +104,29 @@ export const resolver = onchainTable("resolvers", (t) => ({ // The set of observed SLIP-44 coin types for this resolver // NOTE: we avoid .notNull.default([]) to match subgraph behavior coinTypes: t.bigint("coin_types").array(), - - // TODO: has many events })); -export const resolverRelations = relations(resolver, ({ one }) => ({ - addr: one(account, { - fields: [resolver.addrId], - references: [account.id], - }), - domain: one(domain, { - fields: [resolver.domainId], - references: [domain.id], - }), +export const resolverRelations = relations(resolver, ({ one, many }) => ({ + addr: one(account, { fields: [resolver.addrId], references: [account.id] }), + domain: one(domain, { fields: [resolver.domainId], references: [domain.id] }), + + // event relations + addrChangeds: many(addrChanged), + multicoinAddrChangeds: many(multicoinAddrChanged), + nameChangeds: many(nameChanged), + abiChangeds: many(abiChanged), + pubkeyChangeds: many(pubkeyChanged), + textChangeds: many(textChanged), + contenthashChangeds: many(contenthashChanged), + interfaceChangeds: many(interfaceChanged), + authorisationChangeds: many(authorisationChanged), + versionChangeds: many(versionChanged), })); +/** + * Registration + */ + export const registration = onchainTable("registrations", (t) => ({ // The unique identifier of the registration id: t.hex().primaryKey(), @@ -143,22 +142,22 @@ export const registration = onchainTable("registrations", (t) => ({ registrantId: t.hex("registrant_id").notNull(), // The human-readable label name associated with the domain registration labelName: t.text(), - - // The events associated with the domain registration - // TODO: events })); -export const registrationRelations = relations(registration, ({ one }) => ({ - domain: one(domain, { - fields: [registration.domainId], - references: [domain.id], - }), - registrant: one(account, { - fields: [registration.registrantId], - references: [account.id], - }), +export const registrationRelations = relations(registration, ({ one, many }) => ({ + domain: one(domain, { fields: [registration.domainId], references: [domain.id] }), + registrant: one(account, { fields: [registration.registrantId], references: [account.id] }), + + // event relations + nameRegistereds: many(nameRegistered), + nameReneweds: many(nameRenewed), + nameTransferreds: many(nameTransferred), })); +/** + * Wrapped Domain + */ + export const wrappedDomain = onchainTable("wrapped_domains", (t) => ({ // The unique identifier for each instance of the WrappedDomain entity id: t.hex().primaryKey(), @@ -175,12 +174,274 @@ export const wrappedDomain = onchainTable("wrapped_domains", (t) => ({ })); export const wrappedDomainRelations = relations(wrappedDomain, ({ one }) => ({ - domain: one(domain, { - fields: [wrappedDomain.domainId], - references: [domain.id], + domain: one(domain, { fields: [wrappedDomain.domainId], references: [domain.id] }), + owner: one(account, { fields: [wrappedDomain.ownerId], references: [account.id] }), +})); + +/** + * Events + */ + +const domainEvent = (t: any) => ({ + id: t.text().primaryKey(), + domainId: t.hex("domain_id").notNull(), + blockNumber: t.integer("block_number").notNull(), + transactionID: t.hex("transaction_id").notNull(), +}); + +// Domain Event Entities + +export const transfer = onchainTable("transfers", (t) => ({ + ...domainEvent(t), + ownerId: t.hex("owner_id").notNull(), +})); + +export const newOwner = onchainTable("new_owners", (t) => ({ + ...domainEvent(t), + ownerId: t.hex("owner_id").notNull(), + parentDomainId: t.hex("parent_domain_id").notNull(), +})); + +export const newResolver = onchainTable("new_resolvers", (t) => ({ + ...domainEvent(t), + resolverId: t.text("resolver_id").notNull(), +})); + +export const newTTL = onchainTable("new_ttls", (t) => ({ + ...domainEvent(t), + ttl: t.bigint().notNull(), +})); + +export const wrappedTransfer = onchainTable("wrapped_transfers", (t) => ({ + ...domainEvent(t), + ownerId: t.hex("owner_id").notNull(), +})); + +export const nameWrapped = onchainTable("name_wrapped", (t) => ({ + ...domainEvent(t), + name: t.text(), + fuses: t.integer().notNull(), + ownerId: t.hex("owner_id").notNull(), + expiryDate: t.bigint("expiry_date").notNull(), +})); + +export const nameUnwrapped = onchainTable("name_unwrapped", (t) => ({ + ...domainEvent(t), + ownerId: t.hex("owner_id").notNull(), +})); + +export const fusesSet = onchainTable("fuses_set", (t) => ({ + ...domainEvent(t), + fuses: t.integer().notNull(), +})); + +export const expiryExtended = onchainTable("expiry_extended", (t) => ({ + ...domainEvent(t), + expiryDate: t.bigint("expiry_date").notNull(), +})); + +// Registration Event Entities + +export const nameRegistered = onchainTable("name_registered", (t) => ({ + ...domainEvent(t), + registrationId: t.hex("registration_id").notNull(), + registrantId: t.hex("registrant_id").notNull(), + expiryDate: t.bigint("expiry_date").notNull(), +})); + +export const nameRenewed = onchainTable("name_renewed", (t) => ({ + ...domainEvent(t), + registrationId: t.hex("registration_id").notNull(), + expiryDate: t.bigint("expiry_date").notNull(), +})); + +export const nameTransferred = onchainTable("name_transferred", (t) => ({ + ...domainEvent(t), + registrationId: t.hex("registration_id").notNull(), + newOwnerId: t.hex("new_owner_id").notNull(), +})); + +// Resolver Event Entities + +export const addrChanged = onchainTable("addr_changed", (t) => ({ + ...domainEvent(t), + resolverId: t.text("resolver_id").notNull(), + addrId: t.hex("addr_id").notNull(), +})); + +export const multicoinAddrChanged = onchainTable("multicoin_addr_changed", (t) => ({ + ...domainEvent(t), + resolverId: t.text("resolver_id").notNull(), + coinType: t.bigint("coin_type").notNull(), + addr: t.hex().notNull(), +})); + +export const nameChanged = onchainTable("name_changed", (t) => ({ + ...domainEvent(t), + resolverId: t.text("resolver_id").notNull(), + name: t.text().notNull(), +})); + +export const abiChanged = onchainTable("abi_changed", (t) => ({ + ...domainEvent(t), + resolverId: t.text("resolver_id").notNull(), + contentType: t.bigint("content_type").notNull(), +})); + +export const pubkeyChanged = onchainTable("pubkey_changed", (t) => ({ + ...domainEvent(t), + resolverId: t.text("resolver_id").notNull(), + x: t.hex().notNull(), + y: t.hex().notNull(), +})); + +export const textChanged = onchainTable("text_changed", (t) => ({ + ...domainEvent(t), + resolverId: t.text("resolver_id").notNull(), + key: t.text().notNull(), + value: t.text(), +})); + +export const contenthashChanged = onchainTable("contenthash_changed", (t) => ({ + ...domainEvent(t), + resolverId: t.text("resolver_id").notNull(), + hash: t.hex().notNull(), +})); + +export const interfaceChanged = onchainTable("interface_changed", (t) => ({ + ...domainEvent(t), + resolverId: t.text("resolver_id").notNull(), + interfaceID: t.hex("interface_id").notNull(), + implementer: t.hex().notNull(), +})); + +export const authorisationChanged = onchainTable("authorisation_changed", (t) => ({ + ...domainEvent(t), + resolverId: t.text("resolver_id").notNull(), + owner: t.hex().notNull(), + target: t.hex().notNull(), + isAuthorized: t.boolean("is_authorized").notNull(), +})); + +export const versionChanged = onchainTable("version_changed", (t) => ({ + ...domainEvent(t), + resolverId: t.text("resolver_id").notNull(), + version: t.bigint().notNull(), +})); + +/** + * Event Relations + */ + +// Domain Event Relations + +export const transferRelations = relations(transfer, ({ one }) => ({ + domain: one(domain, { fields: [transfer.domainId], references: [domain.id] }), + owner: one(account, { fields: [transfer.ownerId], references: [account.id] }), +})); + +export const newOwnerRelations = relations(newOwner, ({ one }) => ({ + domain: one(domain, { fields: [newOwner.domainId], references: [domain.id] }), + owner: one(account, { fields: [newOwner.ownerId], references: [account.id] }), + parentDomain: one(domain, { fields: [newOwner.parentDomainId], references: [domain.id] }), +})); + +export const newResolverRelations = relations(newResolver, ({ one }) => ({ + domain: one(domain, { fields: [newResolver.domainId], references: [domain.id] }), + resolver: one(resolver, { fields: [newResolver.resolverId], references: [resolver.id] }), +})); + +export const newTTLRelations = relations(newTTL, ({ one }) => ({ + domain: one(domain, { fields: [newTTL.domainId], references: [domain.id] }), +})); + +export const wrappedTransferRelations = relations(wrappedTransfer, ({ one }) => ({ + domain: one(domain, { fields: [wrappedTransfer.domainId], references: [domain.id] }), + owner: one(account, { fields: [wrappedTransfer.ownerId], references: [account.id] }), +})); + +export const nameWrappedRelations = relations(nameWrapped, ({ one }) => ({ + domain: one(domain, { fields: [nameWrapped.domainId], references: [domain.id] }), + owner: one(account, { fields: [nameWrapped.ownerId], references: [account.id] }), +})); + +export const nameUnwrappedRelations = relations(nameUnwrapped, ({ one }) => ({ + domain: one(domain, { fields: [nameUnwrapped.domainId], references: [domain.id] }), + owner: one(account, { fields: [nameUnwrapped.ownerId], references: [account.id] }), +})); + +export const fusesSetRelations = relations(fusesSet, ({ one }) => ({ + domain: one(domain, { fields: [fusesSet.domainId], references: [domain.id] }), +})); + +export const expiryExtendedRelations = relations(expiryExtended, ({ one }) => ({ + domain: one(domain, { fields: [expiryExtended.domainId], references: [domain.id] }), +})); + +// Registration Event Relations + +export const nameRegisteredRelations = relations(nameRegistered, ({ one }) => ({ + registration: one(registration, { + fields: [nameRegistered.registrationId], + references: [registration.id], + }), + registrant: one(account, { fields: [nameRegistered.registrantId], references: [account.id] }), +})); + +export const nameRenewedRelations = relations(nameRenewed, ({ one }) => ({ + registration: one(registration, { + fields: [nameRenewed.registrationId], + references: [registration.id], }), - owner: one(account, { - fields: [wrappedDomain.ownerId], - references: [account.id], +})); + +export const nameTransferredRelations = relations(nameTransferred, ({ one }) => ({ + registration: one(registration, { + fields: [nameTransferred.registrationId], + references: [registration.id], }), + newOwner: one(account, { fields: [nameTransferred.newOwnerId], references: [account.id] }), +})); + +// Resolver Event Relations + +export const addrChangedRelations = relations(addrChanged, ({ one }) => ({ + resolver: one(resolver, { fields: [addrChanged.resolverId], references: [resolver.id] }), + addr: one(account, { fields: [addrChanged.addrId], references: [account.id] }), +})); + +export const multicoinAddrChangedRelations = relations(multicoinAddrChanged, ({ one }) => ({ + resolver: one(resolver, { fields: [multicoinAddrChanged.resolverId], references: [resolver.id] }), +})); + +export const nameChangedRelations = relations(nameChanged, ({ one }) => ({ + resolver: one(resolver, { fields: [nameChanged.resolverId], references: [resolver.id] }), +})); + +export const abiChangedRelations = relations(abiChanged, ({ one }) => ({ + resolver: one(resolver, { fields: [abiChanged.resolverId], references: [resolver.id] }), +})); + +export const pubkeyChangedRelations = relations(pubkeyChanged, ({ one }) => ({ + resolver: one(resolver, { fields: [pubkeyChanged.resolverId], references: [resolver.id] }), +})); + +export const textChangedRelations = relations(textChanged, ({ one }) => ({ + resolver: one(resolver, { fields: [textChanged.resolverId], references: [resolver.id] }), +})); + +export const contenthashChangedRelations = relations(contenthashChanged, ({ one }) => ({ + resolver: one(resolver, { fields: [contenthashChanged.resolverId], references: [resolver.id] }), +})); + +export const interfaceChangedRelations = relations(interfaceChanged, ({ one }) => ({ + resolver: one(resolver, { fields: [interfaceChanged.resolverId], references: [resolver.id] }), +})); + +export const authorisationChangedRelations = relations(authorisationChanged, ({ one }) => ({ + resolver: one(resolver, { fields: [authorisationChanged.resolverId], references: [resolver.id] }), +})); + +export const versionChangedRelations = relations(versionChanged, ({ one }) => ({ + resolver: one(resolver, { fields: [versionChanged.resolverId], references: [resolver.id] }), })); diff --git a/src/api/graphql.ts b/src/api/graphql.ts index 68c8f94..c43ed4f 100644 --- a/src/api/graphql.ts +++ b/src/api/graphql.ts @@ -6,6 +6,7 @@ * 2. removes ponder's encoded id params in favor of literal ids * 3. implement subgraph's simpler pagination style with first & skip w/out Page types * 4. PascalCase entity names + * 5. Hardcoded Polymorphic Event Types */ // here we inline the following types from this original import @@ -70,6 +71,8 @@ import { PgTableExtraConfig, TableConfig, isPgEnum, + union, + unionAll, } from "drizzle-orm/pg-core"; import { GraphQLBoolean, @@ -82,6 +85,7 @@ import { GraphQLInputObjectType, type GraphQLInputType, GraphQLInt, + GraphQLInterfaceType, GraphQLList, GraphQLNonNull, GraphQLObjectType, @@ -89,6 +93,7 @@ import { GraphQLScalarType, GraphQLSchema, GraphQLString, + GraphQLUnionType, } from "graphql"; import { GraphQLJSON } from "graphql-scalars"; @@ -118,6 +123,64 @@ const OrderDirectionEnum = new GraphQLEnumType({ }, }); +/** + * Polymorphic Event TsNames + */ + +const DomainEventTsNames = [ + "transfer", + "newOwner", + "newResolver", + "newTTL", + "wrappedTransfer", + "nameWrapped", + "nameUnwrapped", + "fusesSet", + "expiryExtended", +]; + +const RegistrationEventTsNames = ["nameRegistered", "nameRenewed", "nameTransferred"]; + +const ResolverEventTsNames = [ + "addrChanged", + "multicoinAddrChanged", + "nameChanged", + "abiChanged", + "pubkeyChanged", + "textChanged", + "contenthashChanged", + "interfaceChanged", + "authorisationChanged", + "versionChanged", +]; + +const DomainEvent = new GraphQLInterfaceType({ + name: "DomainEvent", + fields: { + id: { type: new GraphQLNonNull(GraphQLString) }, + blockNumber: { type: new GraphQLNonNull(GraphQLInt) }, + transactionID: { type: new GraphQLNonNull(GraphQLString) }, + }, +}); + +const RegistrationEvent = new GraphQLInterfaceType({ + name: "RegistrationEvent", + fields: { + id: { type: new GraphQLNonNull(GraphQLString) }, + blockNumber: { type: new GraphQLNonNull(GraphQLInt) }, + transactionID: { type: new GraphQLNonNull(GraphQLString) }, + }, +}); + +const ResolverEvent = new GraphQLInterfaceType({ + name: "ResolverEvent", + fields: { + id: { type: new GraphQLNonNull(GraphQLString) }, + blockNumber: { type: new GraphQLNonNull(GraphQLInt) }, + transactionID: { type: new GraphQLNonNull(GraphQLString) }, + }, +}); + export function buildGraphQLSchema(schema: Schema): GraphQLSchema { const tablesConfig = extractTablesRelationalConfig(schema, createTableRelationsHelpers); @@ -154,7 +217,7 @@ export function buildGraphQLSchema(schema: Schema): GraphQLSchema { // TODO: relationships i.e. parent__labelName iff necessary entityOrderByEnums[table.tsName] = new GraphQLEnumType({ - name: `${pascalCase(table.tsName)}_orderBy`, + name: `${getSubgraphEntityName(table.tsName)}_orderBy`, values, }); } @@ -162,7 +225,7 @@ export function buildGraphQLSchema(schema: Schema): GraphQLSchema { const entityFilterTypes: Record = {}; for (const table of tables) { const filterType = new GraphQLInputObjectType({ - name: `${table.tsName}Filter`, + name: `${table.tsName}_filter`, fields: () => { const filterFields: GraphQLInputFieldConfigMap = { // Logical operators @@ -235,7 +298,13 @@ export function buildGraphQLSchema(schema: Schema): GraphQLSchema { for (const table of tables) { entityTypes[table.tsName] = new GraphQLObjectType({ - name: pascalCase(table.tsName), // NOTE: PascalCase to match subgraph + name: getSubgraphEntityName(table.tsName), + // polymorphic event interface logic + interfaces: [ + ...(DomainEventTsNames.includes(table.tsName) ? [DomainEvent] : []), + ...(RegistrationEventTsNames.includes(table.tsName) ? [RegistrationEvent] : []), + ...(ResolverEventTsNames.includes(table.tsName) ? [ResolverEvent] : []), + ], fields: () => { const fieldConfigMap: GraphQLFieldConfigMap = {}; @@ -418,6 +487,104 @@ export function buildGraphQLSchema(schema: Schema): GraphQLSchema { }; } + /** + * Polymorphic Event Logic + * + * Not super happy with how this is implemented but it gets the job done. + * + */ + + entityTypes["domain"]!.getFields().events = { + name: "events", + type: new GraphQLNonNull(new GraphQLList(new GraphQLNonNull(DomainEvent))), + args: [], + description: "The events associated with the domain", + deprecationReason: undefined, + extensions: {}, + astNode: undefined, + resolve: async (parent, _args, { drizzle }) => { + const eventTables = tables.filter((t) => DomainEventTsNames.includes(t.tsName)); + const results = await Promise.all( + eventTables.map((t) => + drizzle.query[t.tsName]!.findMany({ + where: eq(t.columns["domainId"]!, parent.id), + }), + ), + ); + + return results + .flatMap((events, i) => + events.map((event) => ({ + ...event, + __typename: getSubgraphEntityName(eventTables[i]!.tsName), + })), + ) + .sort(sortByBlockNumber); + }, + }; + + entityTypes["registration"]!.getFields().events = { + name: "events", + type: new GraphQLNonNull(new GraphQLList(new GraphQLNonNull(RegistrationEvent))), + args: [], + description: "The events associated with the registration", + deprecationReason: undefined, + extensions: {}, + astNode: undefined, + resolve: async (parent, _args, { drizzle }) => { + const eventTables = tables.filter((t) => RegistrationEventTsNames.includes(t.tsName)); + const results = await Promise.all( + eventTables.map((t) => + drizzle.query[t.tsName]!.findMany({ + where: eq(t.columns["registrationId"]!, parent.id), + }), + ), + ); + + return results + .flatMap((events, i) => + events.map((event) => ({ + ...event, + __typename: getSubgraphEntityName(eventTables[i]!.tsName), + })), + ) + .sort(sortByBlockNumber); + }, + }; + + entityTypes["resolver"]!.getFields().events = { + name: "events", + type: new GraphQLNonNull(new GraphQLList(new GraphQLNonNull(ResolverEvent))), + args: [], + description: "The events associated with the resolver", + deprecationReason: undefined, + extensions: {}, + astNode: undefined, + resolve: async (parent, _args, { drizzle }) => { + const eventTables = tables.filter((t) => ResolverEventTsNames.includes(t.tsName)); + const results = await Promise.all( + eventTables.map((t) => + drizzle.query[t.tsName]!.findMany({ + where: eq(t.columns["resolverId"]!, parent.id), + }), + ), + ); + + return results + .flatMap((events, i) => + events.map((event) => ({ + ...event, + __typename: getSubgraphEntityName(eventTables[i]!.tsName), + })), + ) + .sort(sortByBlockNumber); + }, + }; + + /** + * ok back to ponder's regularly scheduled programming + */ + queryFields._meta = { type: GraphQLMeta, resolve: async (_source, _args, context) => { @@ -745,3 +912,13 @@ function getColumnTsName(column: Column) { const tableColumns = getTableColumns(column.table); return Object.entries(tableColumns).find(([_, c]) => c.name === column.name)![0]; } + +function getSubgraphEntityName(tsName: string) { + if (tsName === "newTTL") return "NewTTL"; + // if (tsName === "contentHashChanged") return "ContenthashChanged"; + return pascalCase(tsName); +} + +function sortByBlockNumber(a: { blockNumber: number }, b: { blockNumber: number }) { + return a.blockNumber - b.blockNumber; +} diff --git a/src/handlers/Registry.ts b/src/handlers/Registry.ts index c3d93fb..270f672 100644 --- a/src/handlers/Registry.ts +++ b/src/handlers/Registry.ts @@ -1,9 +1,9 @@ -import { Context } from "ponder:registry"; +import { Context, Event } from "ponder:registry"; import schema from "ponder:schema"; import { encodeLabelhash } from "@ensdomains/ensjs/utils"; import { Block } from "ponder"; import { type Hex, zeroAddress } from "viem"; -import { makeResolverId } from "../lib/ids"; +import { makeEventId, makeResolverId } from "../lib/ids"; import { ROOT_NODE, makeSubnodeNamehash } from "../lib/subname-helpers"; import { upsertAccount } from "../lib/upserts"; @@ -87,9 +87,8 @@ export const handleNewOwner = event, }: { context: Context; - event: { + event: Omit & { args: { node: Hex; label: Hex; owner: Hex }; - block: Block; }; }) => { const { label, node, owner } = event.args; @@ -137,6 +136,16 @@ export const handleNewOwner = if (owner === zeroAddress) { await recursivelyRemoveEmptyDomainFromParentSubdomainCount(context, domain.id); } + + // DomainEvent + await context.db.insert(schema.newOwner).values({ + id: makeEventId(event), + blockNumber: event.block.number, + transactionID: event.transaction.hash, + parentDomainId: node, + domainId: subnode, + ownerId: owner, + }); }; export async function handleNewTTL({ diff --git a/src/lib/ids.ts b/src/lib/ids.ts index 9d9e763..d74678d 100644 --- a/src/lib/ids.ts +++ b/src/lib/ids.ts @@ -7,7 +7,7 @@ export const makeResolverId = (address: Address, node: Hex) => // https://github.com/ensdomains/ens-subgraph/blob/master/src/utils.ts#L5 // produces `blocknumber-logIndex` or `blocknumber-logindex-transferindex` -export const makeEventId = (event: Event, transferIndex?: number) => +export const makeEventId = (event: Pick, transferIndex?: number) => [event.block.number.toString(), event.log.logIndex.toString(), transferIndex?.toString()] .filter(Boolean) .join("-"); diff --git a/src/plugins/eth/ponder.config.ts b/src/plugins/eth/ponder.config.ts index 4e5914f..f676796 100644 --- a/src/plugins/eth/ponder.config.ts +++ b/src/plugins/eth/ponder.config.ts @@ -21,7 +21,7 @@ export const pluginNamespace = createPluginNamespace(ownedName); // constrain the ponder indexing between the following start/end blocks // https://ponder.sh/0_6/docs/contracts-and-networks#block-range const START_BLOCK: ContractConfig["startBlock"] = undefined; -const END_BLOCK: ContractConfig["endBlock"] = 21_000_000; +const END_BLOCK: ContractConfig["endBlock"] = 4_000_000; const REGISTRY_OLD_ADDRESS = "0x314159265dd8dbb310642f98f50c066173c1259b"; const REGISTRY_ADDRESS = "0x00000000000C2E074eC69A0dFb2997BA6C7d2e1e";