diff --git a/.changeset/five-cows-sneeze.md b/.changeset/five-cows-sneeze.md new file mode 100644 index 000000000..334971457 --- /dev/null +++ b/.changeset/five-cows-sneeze.md @@ -0,0 +1,6 @@ +--- +"ensapi": minor +"@ensnode/ensnode-sdk": minor +--- + +Introduces ENS Analytics API for tracking and analyzing referral metrics. Adds `/ensanalytics/aggregated-referrers` endpoint with pagination support to retrieve aggregated referrer metrics and contribution percentages. diff --git a/.changeset/lemon-flies-sin.md b/.changeset/lemon-flies-sin.md new file mode 100644 index 000000000..9013e8f6e --- /dev/null +++ b/.changeset/lemon-flies-sin.md @@ -0,0 +1,5 @@ +--- +"@ensnode/ensnode-sdk": minor +--- + +Added `staleWhileRevalidate` function for Stale-While-Revalidate caching pattern. diff --git a/.changeset/olive-carrots-remain.md b/.changeset/olive-carrots-remain.md new file mode 100644 index 000000000..3515af074 --- /dev/null +++ b/.changeset/olive-carrots-remain.md @@ -0,0 +1,5 @@ +--- +"@ensnode/ensnode-sdk": patch +--- + +Migrated cache implementation to use `UnixTimestamp` and `Duration` types for better type safety and consistency. diff --git a/.changeset/small-apes-cheer.md b/.changeset/small-apes-cheer.md new file mode 100644 index 000000000..01d166e66 --- /dev/null +++ b/.changeset/small-apes-cheer.md @@ -0,0 +1,5 @@ +--- +"@ensnode/ensnode-sdk": minor +--- + +Added ENS Analytics module with types, serialization/deserialization functions, and Zod validation schemas for `PaginatedAggregatedReferrersResponse`. This includes support for aggregated referrer metrics with contribution percentages and pagination. diff --git a/apps/ensapi/package.json b/apps/ensapi/package.json index 8a355b7f6..a74250fb9 100644 --- a/apps/ensapi/package.json +++ b/apps/ensapi/package.json @@ -25,6 +25,7 @@ "@ensnode/ensnode-schema": "workspace:*", "@ensnode/ensnode-sdk": "workspace:*", "@ensnode/ponder-subgraph": "workspace:*", + "@namehash/ens-referrals": "workspace:*", "@hono/node-server": "^1.19.5", "@hono/otel": "^0.2.2", "@hono/zod-validator": "^0.7.2", diff --git a/apps/ensapi/src/handlers/ensanalytics-api.ts b/apps/ensapi/src/handlers/ensanalytics-api.ts new file mode 100644 index 000000000..eea11b85b --- /dev/null +++ b/apps/ensapi/src/handlers/ensanalytics-api.ts @@ -0,0 +1,152 @@ +import { z } from "zod/v4"; + +import { + type AggregatedReferrerMetrics, + type AggregatedReferrerMetricsContribution, + ITEMS_PER_PAGE_DEFAULT, + ITEMS_PER_PAGE_MAX, + type PaginatedAggregatedReferrersRequest, + type PaginatedAggregatedReferrersResponse, + PaginatedAggregatedReferrersResponseCodes, + serializePaginatedAggregatedReferrersResponse, +} from "@ensnode/ensnode-sdk"; + +import { errorResponse } from "@/lib/handlers/error-response"; +import { validate } from "@/lib/handlers/validate"; +import { factory } from "@/lib/hono-factory"; +import { islice } from "@/lib/itertools"; +import logger from "@/lib/logger"; +import { aggregatedReferrerSnapshotCacheMiddleware } from "@/middleware/aggregated-referrer-snapshot-cache.middleware"; + +const app = factory.createApp(); + +// Apply aggregated referrer snapshot cache middleware to all routes in this handler +app.use(aggregatedReferrerSnapshotCacheMiddleware); + +// Pagination query parameters schema (mirrors PaginatedAggregatedReferrersRequest) +const paginationQuerySchema = z.object({ + page: z.optional(z.coerce.number().int().min(1, "Page must be a positive integer")).default(1), + itemsPerPage: z + .optional( + z.coerce + .number() + .int() + .min(1, "Items per page must be at least 1") + .max(ITEMS_PER_PAGE_MAX, `Items per page must not exceed ${ITEMS_PER_PAGE_MAX}`), + ) + .default(ITEMS_PER_PAGE_DEFAULT), +}) satisfies z.ZodType>; + +/** + * Converts an AggregatedReferrerMetrics object to AggregatedReferrerMetricsContribution + * by calculating contribution percentages based on grand totals. + * + * @param referrer - The referrer metrics to convert + * @param grandTotalReferrals - The sum of all referrals across all referrers + * @param grandTotalIncrementalDuration - The sum of all incremental duration across all referrers + * @returns The referrer metrics with contribution percentages + */ +function calculateContribution( + referrer: AggregatedReferrerMetrics, + grandTotalReferrals: number, + grandTotalIncrementalDuration: number, +): AggregatedReferrerMetricsContribution { + return { + ...referrer, + totalReferralsContribution: + grandTotalReferrals > 0 ? referrer.totalReferrals / grandTotalReferrals : 0, + totalIncrementalDurationContribution: + grandTotalIncrementalDuration > 0 + ? referrer.totalIncrementalDuration / grandTotalIncrementalDuration + : 0, + }; +} + +// Get all aggregated referrers with pagination +app.get("/aggregated-referrers", validate("query", paginationQuerySchema), async (c) => { + try { + const aggregatedReferrerSnapshotCache = c.var.aggregatedReferrerSnapshotCache; + + // Check if cache failed to load + if (aggregatedReferrerSnapshotCache === null) { + return c.json( + serializePaginatedAggregatedReferrersResponse({ + responseCode: PaginatedAggregatedReferrersResponseCodes.Error, + error: "Internal Server Error", + errorMessage: "Failed to load aggregated referrer data.", + } satisfies PaginatedAggregatedReferrersResponse), + 500, + ); + } + + const { page, itemsPerPage } = c.req.valid("query"); + + const totalAggregatedReferrers = aggregatedReferrerSnapshotCache.referrers.size; + + // Calculate total pages + const totalPages = Math.ceil(totalAggregatedReferrers / itemsPerPage); + + // Check if requested page exceeds available pages + if (totalAggregatedReferrers > 0) { + const pageValidationSchema = z + .number() + .max(totalPages, `Page ${page} exceeds total pages ${totalPages}`); + + const pageValidation = pageValidationSchema.safeParse(page); + if (!pageValidation.success) { + return errorResponse(c, pageValidation.error); + } + } + + // Use iterator slice to extract paginated results + const startIndex = (page - 1) * itemsPerPage; + const endIndex = startIndex + itemsPerPage; + const paginatedReferrers = islice( + aggregatedReferrerSnapshotCache.referrers.values(), + startIndex, + endIndex, + ); + + // Convert AggregatedReferrerMetrics to AggregatedReferrerMetricsContribution + const referrersWithContribution = Array.from(paginatedReferrers).map((referrer) => + calculateContribution( + referrer, + aggregatedReferrerSnapshotCache.grandTotalReferrals, + aggregatedReferrerSnapshotCache.grandTotalIncrementalDuration, + ), + ); + + return c.json( + serializePaginatedAggregatedReferrersResponse({ + responseCode: PaginatedAggregatedReferrersResponseCodes.Ok, + data: { + referrers: referrersWithContribution, + total: totalAggregatedReferrers, + paginationParams: { + page, + itemsPerPage, + }, + hasNext: endIndex < totalAggregatedReferrers, + hasPrev: page > 1, + updatedAt: aggregatedReferrerSnapshotCache.updatedAt, + }, + } satisfies PaginatedAggregatedReferrersResponse), + ); + } catch (error) { + logger.error({ error }, "Error in /ensanalytics/aggregated-referrers endpoint"); + const errorMessage = + error instanceof Error + ? error.message + : "An unexpected error occurred while processing your request"; + return c.json( + serializePaginatedAggregatedReferrersResponse({ + responseCode: PaginatedAggregatedReferrersResponseCodes.Error, + error: "Internal server error", + errorMessage, + } satisfies PaginatedAggregatedReferrersResponse), + 500, + ); + } +}); + +export default app; diff --git a/apps/ensapi/src/index.ts b/apps/ensapi/src/index.ts index 28abf3e1d..1bc2adc6d 100644 --- a/apps/ensapi/src/index.ts +++ b/apps/ensapi/src/index.ts @@ -12,8 +12,10 @@ import { errorResponse } from "@/lib/handlers/error-response"; import { factory } from "@/lib/hono-factory"; import logger from "@/lib/logger"; import { sdk } from "@/lib/tracing/instrumentation"; +import { fetcher as referrersCacheFetcher } from "@/middleware/aggregated-referrer-snapshot-cache.middleware"; import { indexingStatusMiddleware } from "@/middleware/indexing-status.middleware"; +import ensanalyticsApi from "./handlers/ensanalytics-api"; import ensNodeApi from "./handlers/ensnode-api"; import subgraphApi from "./handlers/subgraph-api"; @@ -41,6 +43,9 @@ app.route("/api", ensNodeApi); // use Subgraph GraphQL API at /subgraph app.route("/subgraph", subgraphApi); +// use ENSAnalytics API at /ensanalytics +app.route("/ensanalytics", ensanalyticsApi); + // will automatically 500 if config is not available due to ensIndexerPublicConfigMiddleware app.get("/health", async (c) => { return c.json({ ok: true }); @@ -68,6 +73,16 @@ const server = serve( // self-healthcheck to connect to ENSIndexer & warm Indexing Status / Can Accelerate cache await app.request("/health"); + + // warm start ENSAnalytics aggregated referrer snapshot cache + logger.info("Warming up ENSAnalytics aggregated referrer snapshot cache..."); + const cache = await referrersCacheFetcher(); + if (cache) { + logger.info(`ENSAnalytics cache warmed up with ${cache.referrers.size} referrers`); + } else { + logger.error("Failed to warm up ENSAnalytics cache - no cached data available yet"); + // Don't exit - let the service run without pre-warmed analytics + } }, ); diff --git a/apps/ensapi/src/lib/ensanalytics/database.ts b/apps/ensapi/src/lib/ensanalytics/database.ts new file mode 100644 index 000000000..5da5b9561 --- /dev/null +++ b/apps/ensapi/src/lib/ensanalytics/database.ts @@ -0,0 +1,120 @@ +import { getUnixTime } from "date-fns"; +import { and, count, desc, eq, gte, isNotNull, lte, ne, sql, sum } from "drizzle-orm"; +import { zeroAddress } from "viem"; + +import * as schema from "@ensnode/ensnode-schema"; +import { + type AccountId, + deserializeDuration, + serializeAccountId, + type UnixTimestamp, +} from "@ensnode/ensnode-sdk"; + +import { db } from "@/lib/db"; +import type { AggregatedReferrerSnapshot } from "@/lib/ensanalytics/types"; +import { ireduce } from "@/lib/itertools"; +import logger from "@/lib/logger"; + +/** + * Fetches all referrers with 1 or more qualified referrals from the `registrar_actions` table + * and builds an `AggregatedReferrerSnapshot`. + * + * Step 1: Filter for "qualified" referrals where: + * - timestamp is between startDate and endDate + * - decodedReferrer is not null and not the zero address + * - subregistryId matches the provided subregistryId + * + * Step 2: Group by decodedReferrer and calculate: + * - Sum total incrementalDuration for each decodedReferrer + * - Count of qualified referrals for each decodedReferrer + * + * Step 3: Sort by sum total incrementalDuration from highest to lowest + * + * Step 4: Calculate grand totals and build the snapshot object + * + * @param startDate - The start date (Unix timestamp, inclusive) for filtering registrar actions + * @param endDate - The end date (Unix timestamp, inclusive) for filtering registrar actions + * @param subregistryId - The account ID of the subregistry to filter by + * @returns `AggregatedReferrerSnapshot` containing all referrers with at least one qualified referral, grand totals, and updatedAt timestamp + * @throws Error if startDate > endDate (invalid date range) + * @throws Error if the database query fails + */ +export async function getAggregatedReferrerSnapshot( + startDate: UnixTimestamp, + endDate: UnixTimestamp, + subregistryId: AccountId, +): Promise { + if (startDate > endDate) { + throw new Error( + `Invalid date range: startDate (${startDate}) must be less than or equal to endDate (${endDate})`, + ); + } + + try { + const updatedAt = getUnixTime(new Date()); + + const result = await db + .select({ + referrer: schema.registrarActions.decodedReferrer, + totalReferrals: count().as("total_referrals"), + totalIncrementalDuration: sum(schema.registrarActions.incrementalDuration).as( + "total_incremental_duration", + ), + }) + .from(schema.registrarActions) + .where( + and( + // Filter by timestamp range + gte(schema.registrarActions.timestamp, BigInt(startDate)), + lte(schema.registrarActions.timestamp, BigInt(endDate)), + // Filter by decodedReferrer not null + isNotNull(schema.registrarActions.decodedReferrer), + // Filter by decodedReferrer not zero address + ne(schema.registrarActions.decodedReferrer, zeroAddress), + // Filter by subregistryId matching the provided subregistryId + eq(schema.registrarActions.subregistryId, serializeAccountId(subregistryId)), + ), + ) + .groupBy(schema.registrarActions.decodedReferrer) + .orderBy(desc(sql`total_incremental_duration`)); + + // Transform the result to an ordered map (preserves SQL sort order) + const referrers = new Map( + result.map((row) => { + // biome-ignore lint/style/noNonNullAssertion: referrer is guaranteed to be non-null due to isNotNull filter in WHERE clause + const address = row.referrer!; + const metrics = { + referrer: address, + totalReferrals: row.totalReferrals, + // biome-ignore lint/style/noNonNullAssertion: totalIncrementalDuration is guaranteed to be non-null as it is the sum of non-null bigint values + totalIncrementalDuration: deserializeDuration(row.totalIncrementalDuration!), + }; + return [address, metrics]; + }), + ); + + // Calculate grand totals across all referrers + const grandTotalReferrals = ireduce( + referrers.values(), + (sum, metrics) => sum + metrics.totalReferrals, + 0, + ); + const grandTotalIncrementalDuration = ireduce( + referrers.values(), + (sum, metrics) => sum + metrics.totalIncrementalDuration, + 0, + ); + + // Build and return the complete snapshot + return { + referrers, + updatedAt, + grandTotalReferrals, + grandTotalIncrementalDuration, + }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : "Unknown error"; + logger.error({ error }, "Failed to fetch aggregated referrer snapshot from database"); + throw new Error(`Failed to fetch aggregated referrer snapshot: ${errorMessage}`); + } +} diff --git a/apps/ensapi/src/lib/ensanalytics/types.ts b/apps/ensapi/src/lib/ensanalytics/types.ts new file mode 100644 index 000000000..c9bf9a663 --- /dev/null +++ b/apps/ensapi/src/lib/ensanalytics/types.ts @@ -0,0 +1,32 @@ +import type { Address } from "viem"; + +import type { AggregatedReferrerMetrics, Duration, UnixTimestamp } from "@ensnode/ensnode-sdk"; + +/** + * Represents a snapshot of aggregated metrics for all referrers with 1 or more qualifying referrals as of `updatedAt`. + */ +export interface AggregatedReferrerSnapshot { + /** + * Ordered map containing `AggregatedReferrerMetrics` for all referrers with 1 or more qualifying referrals as of `updatedAt`. + * @invariant Map entries are ordered by `totalIncrementalDuration` (descending). + * @invariant Map may be empty if there are no referrers with 1 or more qualifying referrals as of `updatedAt`. + * @invariant If an `Address` is not a key in this map then that `Address` had 0 qualifying referrals as of `updatedAt`. + * @invariant Each `Address` key in this map is unique. + */ + referrers: Map; + + /** Unix timestamp identifying when this `AggregatedReferrerSnapshot` was generated. */ + updatedAt: UnixTimestamp; + + /** + * @invariant The sum of `totalReferrals` across all `referrers`. + * @invariant Guaranteed to be a non-negative integer (>= 0) + */ + grandTotalReferrals: number; + + /** + * @invariant The sum of `totalIncrementalDuration` across all `referrers`. + * @invariant Guaranteed to be a non-negative integer (>= 0), measured in seconds + */ + grandTotalIncrementalDuration: Duration; +} diff --git a/apps/ensapi/src/lib/hono-factory.ts b/apps/ensapi/src/lib/hono-factory.ts index e85482b81..607e9f2fd 100644 --- a/apps/ensapi/src/lib/hono-factory.ts +++ b/apps/ensapi/src/lib/hono-factory.ts @@ -1,9 +1,13 @@ import { createFactory } from "hono/factory"; +import type { AggregatedReferrerSnapshotCacheVariables } from "@/middleware/aggregated-referrer-snapshot-cache.middleware"; import type { CanAccelerateVariables } from "@/middleware/can-accelerate.middleware"; import type { IndexingStatusVariables } from "@/middleware/indexing-status.middleware"; import type { IsRealtimeVariables } from "@/middleware/is-realtime.middleware"; export const factory = createFactory<{ - Variables: IndexingStatusVariables & IsRealtimeVariables & CanAccelerateVariables; + Variables: IndexingStatusVariables & + IsRealtimeVariables & + CanAccelerateVariables & + AggregatedReferrerSnapshotCacheVariables; }>(); diff --git a/apps/ensapi/src/lib/itertools.ts b/apps/ensapi/src/lib/itertools.ts new file mode 100644 index 000000000..3e27f5584 --- /dev/null +++ b/apps/ensapi/src/lib/itertools.ts @@ -0,0 +1,50 @@ +/** + * Utility functions for working with iterables, inspired by Python's itertools. + */ + +/** + * Reduces an iterable to a single value using a reducer function. + * Similar to Array.reduce but works directly on iterables without creating intermediate arrays. + * + * @param iterable - The iterable to reduce + * @param reducer - Function that combines accumulator with each value + * @param initial - Initial value for the accumulator + * @returns The final accumulated value + * + * @example + * const sum = ireduce(map.values(), (acc, val) => acc + val.count, 0); + */ +export function ireduce( + iterable: Iterable, + reducer: (accumulator: R, value: T) => R, + initial: R, +): R { + let accumulator = initial; + for (const value of iterable) { + accumulator = reducer(accumulator, value); + } + return accumulator; +} + +/** + * Returns an iterable containing elements from the iterable from start to stop (exclusive). + * Similar to Python's itertools.islice. + * + * @param iterable - The iterable to slice + * @param start - Starting index (inclusive) + * @param stop - Stopping index (exclusive) + * @returns Iterable containing sliced elements + * + * @example + * const page = islice(map.values(), 0, 25); // First 25 items + * const page2 = islice(map.values(), 25, 50); // Next 25 items + * const array = Array.from(islice(map.values(), 0, 25)); // Convert to array if needed + */ +export function* islice(iterable: Iterable, start: number, stop: number): Iterable { + let index = 0; + for (const item of iterable) { + if (index >= stop) break; + if (index >= start) yield item; + index++; + } +} diff --git a/apps/ensapi/src/middleware/aggregated-referrer-snapshot-cache.middleware.ts b/apps/ensapi/src/middleware/aggregated-referrer-snapshot-cache.middleware.ts new file mode 100644 index 000000000..923c270d1 --- /dev/null +++ b/apps/ensapi/src/middleware/aggregated-referrer-snapshot-cache.middleware.ts @@ -0,0 +1,61 @@ +import config from "@/config"; + +import { + ENS_HOLIDAY_AWARDS_END_DATE, + ENS_HOLIDAY_AWARDS_START_DATE, +} from "@namehash/ens-referrals"; + +import { + type Duration, + getEthnamesSubregistryId, + staleWhileRevalidate, +} from "@ensnode/ensnode-sdk"; + +import { getAggregatedReferrerSnapshot } from "@/lib/ensanalytics/database"; +import { factory } from "@/lib/hono-factory"; +import logger from "@/lib/logger"; + +const TTL: Duration = 5 * 60; // 5 minutes + +export const fetcher = staleWhileRevalidate(async () => { + logger.info("Building aggregated referrer snapshot..."); + const subregistryId = getEthnamesSubregistryId(config.namespace); + + try { + const result = await getAggregatedReferrerSnapshot( + ENS_HOLIDAY_AWARDS_START_DATE, + ENS_HOLIDAY_AWARDS_END_DATE, + subregistryId, + ); + logger.info("Successfully built aggregated referrer snapshot"); + return result; + } catch (error) { + logger.error({ error }, "Failed to build aggregated referrer snapshot"); + throw error; + } +}, TTL); + +export type AggregatedReferrerSnapshotCacheVariables = { + aggregatedReferrerSnapshotCache: Awaited>; +}; + +/** + * Middleware that fetches and caches aggregated referrer snapshot data using Stale-While-Revalidate (SWR) caching. + * + * This middleware uses the SWR caching strategy to serve cached data immediately (even if stale) while + * asynchronously revalidating in the background. This provides: + * - Sub-millisecond response times (after first fetch) + * - Always available data (serves stale data during revalidation) + * - Automatic background updates every TTL (5 minutes) + * + * Retrieves all referrers with at least one qualified referral from the database and caches them. + * Sets the `aggregatedReferrerSnapshotCache` variable on the context for use by other middleware and handlers. + * + * @see {@link staleWhileRevalidate} for detailed documentation on the SWR caching strategy and error handling. + */ +export const aggregatedReferrerSnapshotCacheMiddleware = factory.createMiddleware( + async (c, next) => { + c.set("aggregatedReferrerSnapshotCache", await fetcher()); + await next(); + }, +); diff --git a/apps/ensapi/src/middleware/indexing-status.middleware.ts b/apps/ensapi/src/middleware/indexing-status.middleware.ts index 7bdb12cfe..4f7d4555b 100644 --- a/apps/ensapi/src/middleware/indexing-status.middleware.ts +++ b/apps/ensapi/src/middleware/indexing-status.middleware.ts @@ -3,17 +3,17 @@ import config from "@/config"; import pMemoize from "p-memoize"; import pReflect from "p-reflect"; -import { ENSNodeClient, TtlCache } from "@ensnode/ensnode-sdk"; +import { type Duration, ENSNodeClient, TtlCache } from "@ensnode/ensnode-sdk"; import { factory } from "@/lib/hono-factory"; const client = new ENSNodeClient({ url: config.ensIndexerUrl }); -const TTL_MS = 5_000; // 5 seconds +const TTL: Duration = 5; // 5 seconds -// memoizes the reflected indexing-status-with-retries promise across TTL_MS +// memoizes the reflected indexing-status-with-retries promise across TTL const fetcher = pMemoize(async () => pReflect(client.indexingStatus()), { - cache: new TtlCache(TTL_MS), + cache: new TtlCache(TTL), }); export type IndexingStatusVariables = { @@ -24,7 +24,7 @@ export type IndexingStatusVariables = { * Middleware that fetches and caches ENSIndexer indexing status. * * Retrieves the current indexing status from the configured ENSIndexer instance - * and caches it for TTL_MS duration to avoid excessive API calls. Sets the + * and caches it for TTL duration to avoid excessive API calls. Sets the * `indexingStatus` variable on the context for use by other middleware and handlers. */ export const indexingStatusMiddleware = factory.createMiddleware(async (c, next) => { diff --git a/packages/ens-referrals/src/constants.ts b/packages/ens-referrals/src/constants.ts new file mode 100644 index 000000000..84c7f91ae --- /dev/null +++ b/packages/ens-referrals/src/constants.ts @@ -0,0 +1,22 @@ +/** + * Unix timestamp value + * + * Represents the number of seconds that have elapsed + * since January 1, 1970 (midnight UTC/GMT). + * + * Guaranteed to be an integer. May be zero or negative to represent a time at or + * before Jan 1, 1970. + */ +export type UnixTimestamp = number; + +/** + * Start date for the ENS Holiday Awards referral program. + * December 1, 2025 at 00:00:00 UTC + */ +export const ENS_HOLIDAY_AWARDS_START_DATE: UnixTimestamp = 1733011200; + +/** + * End date for the ENS Holiday Awards referral program. + * December 31, 2025 at 23:59:59 UTC + */ +export const ENS_HOLIDAY_AWARDS_END_DATE: UnixTimestamp = 1735689599; diff --git a/packages/ens-referrals/src/index.ts b/packages/ens-referrals/src/index.ts index 4303b8510..242ab0aa2 100644 --- a/packages/ens-referrals/src/index.ts +++ b/packages/ens-referrals/src/index.ts @@ -1 +1,2 @@ +export * from "./constants"; export * from "./referrer"; diff --git a/packages/ensnode-sdk/package.json b/packages/ensnode-sdk/package.json index 490942d10..5877f2728 100644 --- a/packages/ensnode-sdk/package.json +++ b/packages/ensnode-sdk/package.json @@ -57,6 +57,7 @@ "@ensnode/datasources": "workspace:*", "@namehash/ens-referrals": "workspace:*", "caip": "catalog:", + "date-fns": "catalog:", "zod": "catalog:" } } diff --git a/packages/ensnode-sdk/src/client.ts b/packages/ensnode-sdk/src/client.ts index 2309fd4ef..18a1a10c7 100644 --- a/packages/ensnode-sdk/src/client.ts +++ b/packages/ensnode-sdk/src/client.ts @@ -23,6 +23,12 @@ import type { ResolveRecordsResponse, } from "./api/types"; import { ClientError } from "./client-error"; +import { + deserializePaginatedAggregatedReferrersResponse, + type PaginatedAggregatedReferrersRequest, + type PaginatedAggregatedReferrersResponse, + type SerializedPaginatedAggregatedReferrersResponse, +} from "./ensanalytics"; import { deserializeENSApiPublicConfig, type SerializedENSApiPublicConfig } from "./ensapi"; import type { ResolverRecordsSelection } from "./resolution"; @@ -44,6 +50,7 @@ export interface ClientOptions { * * Provides access to the following ENSNode APIs: * - Resolution API + * - ENSAnalytics API * - 🚧 Configuration API * - 🚧 Indexing Status API * @@ -356,6 +363,67 @@ export class ENSNodeClient { return deserializeIndexingStatusResponse(responseData as SerializedIndexingStatusResponse); } + /** + * Fetch Paginated Aggregated Referrers + * + * Retrieves a paginated list of aggregated referrer metrics with contribution percentages. + * Each referrer's contribution is calculated as a percentage of the grand totals across all referrers. + * + * @param request - Pagination parameters + * @param request.page - The page number to retrieve (1-indexed, default: 1) + * @param request.itemsPerPage - Number of items per page (default: 25, max: 100) + * @returns {PaginatedAggregatedReferrersResponse} + * + * @throws if the ENSNode request fails + * @throws if the ENSNode API returns an error response + * @throws if the ENSNode response breaks required invariants + * + * @example + * ```typescript + * // Get first page with default page size (25 items) + * const response = await client.getAggregatedReferrers(); + * if (response.responseCode === 'ok') { + * console.log(response.data.referrers); + * console.log(`Page ${response.data.paginationParams.page} of ${Math.ceil(response.data.total / response.data.paginationParams.itemsPerPage)}`); + * } + * ``` + * + * @example + * ```typescript + * // Get second page with 50 items per page + * const response = await client.getAggregatedReferrers({ page: 2, itemsPerPage: 50 }); + * ``` + */ + async getAggregatedReferrers( + request?: PaginatedAggregatedReferrersRequest, + ): Promise { + const url = new URL(`/api/ensanalytics/aggregated-referrers`, this.options.url); + + if (request?.page) url.searchParams.set("page", request.page.toString()); + if (request?.itemsPerPage) + url.searchParams.set("itemsPerPage", request.itemsPerPage.toString()); + + const response = await fetch(url); + + // ENSNode API should always allow parsing a response as JSON object. + // If for some reason it's not the case, throw an error. + let responseData: unknown; + try { + responseData = await response.json(); + } catch { + throw new Error("Malformed response data: invalid JSON"); + } + + // The API can return errors with 500 status, but they're still in the + // PaginatedAggregatedReferrersResponse format with responseCode: 'error' + // So we don't need to check response.ok here, just deserialize and let + // the caller handle the responseCode + + return deserializePaginatedAggregatedReferrersResponse( + responseData as SerializedPaginatedAggregatedReferrersResponse, + ); + } + /** * Fetch ENSNode Registrar Actions * diff --git a/packages/ensnode-sdk/src/ensanalytics/deserialize.ts b/packages/ensnode-sdk/src/ensanalytics/deserialize.ts new file mode 100644 index 000000000..da90bb6c0 --- /dev/null +++ b/packages/ensnode-sdk/src/ensanalytics/deserialize.ts @@ -0,0 +1,29 @@ +import { prettifyError } from "zod/v4"; + +import type { SerializedPaginatedAggregatedReferrersResponse } from "./serialized-types"; +import type { PaginatedAggregatedReferrersResponse } from "./types"; +import { makePaginatedAggregatedReferrersResponseSchema } from "./zod-schemas"; + +/** + * Deserialize a {@link PaginatedAggregatedReferrersResponse} object. + * + * Note: While the serialized and deserialized types are identical (all fields + * are primitives), this function performs critical validation using Zod schemas + * to enforce invariants on the data. This ensures data integrity when receiving + * responses from the API. + */ +export function deserializePaginatedAggregatedReferrersResponse( + maybeResponse: SerializedPaginatedAggregatedReferrersResponse, + valueLabel?: string, +): PaginatedAggregatedReferrersResponse { + const schema = makePaginatedAggregatedReferrersResponseSchema(valueLabel); + const parsed = schema.safeParse(maybeResponse); + + if (parsed.error) { + throw new Error( + `Cannot deserialize PaginatedAggregatedReferrersResponse:\n${prettifyError(parsed.error)}\n`, + ); + } + + return parsed.data; +} diff --git a/packages/ensnode-sdk/src/ensanalytics/index.ts b/packages/ensnode-sdk/src/ensanalytics/index.ts new file mode 100644 index 000000000..c80c44518 --- /dev/null +++ b/packages/ensnode-sdk/src/ensanalytics/index.ts @@ -0,0 +1,4 @@ +export * from "./deserialize"; +export * from "./serialize"; +export * from "./serialized-types"; +export * from "./types"; diff --git a/packages/ensnode-sdk/src/ensanalytics/serialize.ts b/packages/ensnode-sdk/src/ensanalytics/serialize.ts new file mode 100644 index 000000000..1aaca0261 --- /dev/null +++ b/packages/ensnode-sdk/src/ensanalytics/serialize.ts @@ -0,0 +1,25 @@ +import type { SerializedPaginatedAggregatedReferrersResponse } from "./serialized-types"; +import { + type PaginatedAggregatedReferrersResponse, + PaginatedAggregatedReferrersResponseCodes, +} from "./types"; + +/** + * Serialize a {@link PaginatedAggregatedReferrersResponse} object. + * + * Note: Since all fields in PaginatedAggregatedReferrersResponse are already + * serializable primitives, this function performs an identity transformation. + * It exists to maintain consistency with the serialization pattern used + * throughout the codebase. + */ +export function serializePaginatedAggregatedReferrersResponse( + response: PaginatedAggregatedReferrersResponse, +): SerializedPaginatedAggregatedReferrersResponse { + switch (response.responseCode) { + case PaginatedAggregatedReferrersResponseCodes.Ok: + return response; + + case PaginatedAggregatedReferrersResponseCodes.Error: + return response; + } +} diff --git a/packages/ensnode-sdk/src/ensanalytics/serialized-types.ts b/packages/ensnode-sdk/src/ensanalytics/serialized-types.ts new file mode 100644 index 000000000..4cc6bbaf6 --- /dev/null +++ b/packages/ensnode-sdk/src/ensanalytics/serialized-types.ts @@ -0,0 +1,28 @@ +import type { + PaginatedAggregatedReferrersResponse, + PaginatedAggregatedReferrersResponseError, + PaginatedAggregatedReferrersResponseOk, +} from "./types"; + +/** + * Serialized representation of {@link PaginatedAggregatedReferrersResponseError}. + * + * Note: All fields are already serializable, so this type is identical to the source type. + */ +export type SerializedPaginatedAggregatedReferrersResponseError = + PaginatedAggregatedReferrersResponseError; + +/** + * Serialized representation of {@link PaginatedAggregatedReferrersResponseOk}. + * + * Note: All fields are already serializable, so this type is identical to the source type. + */ +export type SerializedPaginatedAggregatedReferrersResponseOk = + PaginatedAggregatedReferrersResponseOk; + +/** + * Serialized representation of {@link PaginatedAggregatedReferrersResponse}. + */ +export type SerializedPaginatedAggregatedReferrersResponse = + | SerializedPaginatedAggregatedReferrersResponseOk + | SerializedPaginatedAggregatedReferrersResponseError; diff --git a/packages/ensnode-sdk/src/ensanalytics/types.ts b/packages/ensnode-sdk/src/ensanalytics/types.ts new file mode 100644 index 000000000..bd0e82e6e --- /dev/null +++ b/packages/ensnode-sdk/src/ensanalytics/types.ts @@ -0,0 +1,166 @@ +import type { Address } from "viem"; + +import type { Duration, UnixTimestamp } from "../shared"; + +/** + * The default number of items per page for paginated aggregated referrer queries. + */ +export const ITEMS_PER_PAGE_DEFAULT = 25; + +/** + * The maximum number of items per page for paginated aggregated referrer queries. + */ +export const ITEMS_PER_PAGE_MAX = 100; + +/** + * Represents the aggregated metrics for a single referrer. + */ +export interface AggregatedReferrerMetrics { + /** The Ethereum address of the referrer */ + referrer: Address; + + /** + * The total number of qualified referrals made by this referrer + * @invariant Guaranteed to be a positive integer (> 0) + */ + totalReferrals: number; + + /** + * The total incremental duration (in seconds) of all referrals made by this referrer + * @invariant Guaranteed to be a non-negative integer (>= 0), measured in seconds + */ + totalIncrementalDuration: Duration; +} + +/** + * Represents the aggregated metrics for a single referrer with contribution percentages. + * Extends {@link AggregatedReferrerMetrics} with additional fields that show the referrer's + * contribution as a percentage of the grand totals. + */ +export interface AggregatedReferrerMetricsContribution extends AggregatedReferrerMetrics { + /** + * The referrer's contribution to the grand total referrals as a decimal between 0 and 1 (inclusive). + * Calculated as: totalReferrals / grandTotalReferrals + * @invariant 0 <= totalReferralsContribution <= 1 + */ + totalReferralsContribution: number; + + /** + * The referrer's contribution to the grand total incremental duration as a decimal between 0 and 1 (inclusive). + * Calculated as: totalIncrementalDuration / grandTotalIncrementalDuration + * @invariant 0 <= totalIncrementalDurationContribution <= 1 + */ + totalIncrementalDurationContribution: number; +} + +/** + * Base pagination parameters for paginated queries. + */ +export interface PaginationParams { + /** + * Requested page number (1-indexed) + * @invariant Must be a positive integer (>= 1) + * @default 1 + */ + page?: number; + + /** + * Maximum number of items per page + * @invariant Must be a positive integer (>= 1) and less than or equal to {@link ITEMS_PER_PAGE_MAX} + * @default {@link ITEMS_PER_PAGE_DEFAULT} + */ + itemsPerPage?: number; +} + +/** + * Request parameters for paginated aggregated referrers query. + */ +export interface PaginatedAggregatedReferrersRequest extends PaginationParams {} + +/** + * Paginated aggregated referrers data with metadata. + */ +export interface PaginatedAggregatedReferrers { + /** + * Array of aggregated referrers for the current page with contribution percentages + * @invariant Array may be empty for the first page if there are no qualified referrers. + */ + referrers: AggregatedReferrerMetricsContribution[]; + + /** + * Total number of aggregated referrers across all pages + * @invariant Guaranteed to be a non-negative integer (>= 0) + */ + total: number; + + /** + * Pagination parameters + * @invariant Stores the pagination parameters from the request + */ + paginationParams: PaginationParams; + + /** + * Indicates whether there is a next page available + * @invariant true if and only if (page * itemsPerPage < total) + */ + hasNext: boolean; + + /** + * Indicates whether there is a previous page available + * @invariant true if and only if (page > 1) + */ + hasPrev: boolean; + + /** Unix timestamp of when the leaderboard was last updated */ + updatedAt: UnixTimestamp; +} + +/** + * A status code for paginated aggregated referrers API responses. + */ +export const PaginatedAggregatedReferrersResponseCodes = { + /** + * Represents that the aggregated referrers data is available. + * @note The response may contain an empty array for the first page if there are no qualified referrers. + * When the array is empty, total will be 0, page will be 1, and both hasNext and hasPrev will be false. + */ + Ok: "ok", + + /** + * Represents that the aggregated referrers data is not available. + */ + Error: "error", +} as const; + +/** + * The derived string union of possible {@link PaginatedAggregatedReferrersResponseCodes}. + */ +export type PaginatedAggregatedReferrersResponseCode = + (typeof PaginatedAggregatedReferrersResponseCodes)[keyof typeof PaginatedAggregatedReferrersResponseCodes]; + +/** + * A paginated aggregated referrers response when the data is available. + */ +export type PaginatedAggregatedReferrersResponseOk = { + responseCode: typeof PaginatedAggregatedReferrersResponseCodes.Ok; + data: PaginatedAggregatedReferrers; +}; + +/** + * A paginated aggregated referrers response when the data is not available. + */ +export type PaginatedAggregatedReferrersResponseError = { + responseCode: typeof PaginatedAggregatedReferrersResponseCodes.Error; + error: string; + errorMessage: string; +}; + +/** + * A paginated aggregated referrers API response. + * + * Use the `responseCode` field to determine the specific type interpretation + * at runtime. + */ +export type PaginatedAggregatedReferrersResponse = + | PaginatedAggregatedReferrersResponseOk + | PaginatedAggregatedReferrersResponseError; diff --git a/packages/ensnode-sdk/src/ensanalytics/zod-schemas.ts b/packages/ensnode-sdk/src/ensanalytics/zod-schemas.ts new file mode 100644 index 000000000..5a1e153f0 --- /dev/null +++ b/packages/ensnode-sdk/src/ensanalytics/zod-schemas.ts @@ -0,0 +1,142 @@ +/** + * All zod schemas we define must remain internal implementation details. + * We want the freedom to move away from zod in the future without impacting + * any users of the ensnode-sdk package. + * + * The only way to share Zod schemas is to re-export them from + * `./src/internal.ts` file. + */ +import z from "zod/v4"; + +import { + makeDurationSchema, + makeLowercaseAddressSchema, + makeNonNegativeIntegerSchema, + makePositiveIntegerSchema, + makeUnixTimestampSchema, +} from "../shared/zod-schemas"; +import { + ITEMS_PER_PAGE_DEFAULT, + ITEMS_PER_PAGE_MAX, + PaginatedAggregatedReferrersResponseCodes, +} from "./types"; + +/** + * Schema for AggregatedReferrerMetrics + */ +export const makeAggregatedReferrerMetricsSchema = ( + valueLabel: string = "AggregatedReferrerMetrics", +) => + z.object({ + referrer: makeLowercaseAddressSchema(`${valueLabel}.referrer`), + totalReferrals: makePositiveIntegerSchema(`${valueLabel}.totalReferrals`), + totalIncrementalDuration: makeDurationSchema(`${valueLabel}.totalIncrementalDuration`), + }); + +/** + * Schema for AggregatedReferrerMetricsContribution + */ +export const makeAggregatedReferrerMetricsContributionSchema = ( + valueLabel: string = "AggregatedReferrerMetricsContribution", +) => + makeAggregatedReferrerMetricsSchema(valueLabel).extend({ + totalReferralsContribution: z + .number({ + error: `${valueLabel}.totalReferralsContribution must be a number`, + }) + .min(0, `${valueLabel}.totalReferralsContribution must be >= 0`) + .max(1, `${valueLabel}.totalReferralsContribution must be <= 1`), + totalIncrementalDurationContribution: z + .number({ + error: `${valueLabel}.totalIncrementalDurationContribution must be a number`, + }) + .min(0, `${valueLabel}.totalIncrementalDurationContribution must be >= 0`) + .max(1, `${valueLabel}.totalIncrementalDurationContribution must be <= 1`), + }); + +/** + * Schema for PaginationParams + */ +export const makePaginationParamsSchema = (valueLabel: string = "PaginationParams") => + z.object({ + page: makePositiveIntegerSchema(`${valueLabel}.page`).default(1), + itemsPerPage: makePositiveIntegerSchema(`${valueLabel}.itemsPerPage`) + .max(ITEMS_PER_PAGE_MAX, `${valueLabel}.itemsPerPage must not exceed ${ITEMS_PER_PAGE_MAX}`) + .default(ITEMS_PER_PAGE_DEFAULT), + }); + +/** + * Schema for PaginatedAggregatedReferrers + */ +export const makePaginatedAggregatedReferrersSchema = ( + valueLabel: string = "PaginatedAggregatedReferrers", +) => + z + .object({ + referrers: z.array( + makeAggregatedReferrerMetricsContributionSchema(`${valueLabel}.referrers[item]`), + ), + total: makeNonNegativeIntegerSchema(`${valueLabel}.total`), + paginationParams: makePaginationParamsSchema(`${valueLabel}.paginationParams`), + hasNext: z.boolean(), + hasPrev: z.boolean(), + updatedAt: makeUnixTimestampSchema(`${valueLabel}.updatedAt`), + }) + .check((ctx) => { + const { paginationParams, hasNext, hasPrev, total } = ctx.value; + + // Validate hasPrev + const expectedHasPrev = paginationParams.page > 1; + if (hasPrev !== expectedHasPrev) { + ctx.issues.push({ + code: "custom", + message: `${valueLabel}.hasPrev must be ${expectedHasPrev} when page is ${paginationParams.page}`, + input: ctx.value, + }); + } + + // Validate hasNext + const endIndex = paginationParams.page * paginationParams.itemsPerPage; + const expectedHasNext = endIndex < total; + if (hasNext !== expectedHasNext) { + ctx.issues.push({ + code: "custom", + message: `${valueLabel}.hasNext must be ${expectedHasNext} when page=${paginationParams.page}, itemsPerPage=${paginationParams.itemsPerPage}, total=${total}`, + input: ctx.value, + }); + } + }); + +/** + * Schema for {@link PaginatedAggregatedReferrersResponseOk} + */ +export const makePaginatedAggregatedReferrersResponseOkSchema = ( + valueLabel: string = "PaginatedAggregatedReferrersResponse", +) => + z.object({ + responseCode: z.literal(PaginatedAggregatedReferrersResponseCodes.Ok), + data: makePaginatedAggregatedReferrersSchema(`${valueLabel}.data`), + }); + +/** + * Schema for {@link PaginatedAggregatedReferrersResponseError} + */ +export const makePaginatedAggregatedReferrersResponseErrorSchema = ( + _valueLabel: string = "PaginatedAggregatedReferrersResponse", +) => + z.object({ + responseCode: z.literal(PaginatedAggregatedReferrersResponseCodes.Error), + error: z.string(), + errorMessage: z.string(), + }); + +/** + * Schema for {@link PaginatedAggregatedReferrersResponse} + */ +export const makePaginatedAggregatedReferrersResponseSchema = ( + valueLabel: string = "PaginatedAggregatedReferrersResponse", +) => + z.union([ + makePaginatedAggregatedReferrersResponseOkSchema(valueLabel), + makePaginatedAggregatedReferrersResponseErrorSchema(valueLabel), + ]); diff --git a/packages/ensnode-sdk/src/index.ts b/packages/ensnode-sdk/src/index.ts index aba4fcb95..7e43a3bb8 100644 --- a/packages/ensnode-sdk/src/index.ts +++ b/packages/ensnode-sdk/src/index.ts @@ -2,6 +2,7 @@ export * from "./api"; export { type ClientOptions, ENSNodeClient } from "./client"; export * from "./client-error"; export * from "./ens"; +export * from "./ensanalytics"; export * from "./ensapi"; export * from "./ensindexer"; export * from "./ensrainbow"; diff --git a/packages/ensnode-sdk/src/registrars/ethnames-subregistry.ts b/packages/ensnode-sdk/src/registrars/ethnames-subregistry.ts new file mode 100644 index 000000000..4a2a920da --- /dev/null +++ b/packages/ensnode-sdk/src/registrars/ethnames-subregistry.ts @@ -0,0 +1,28 @@ +import { DatasourceNames, type ENSNamespaceId, maybeGetDatasource } from "@ensnode/datasources"; + +import type { AccountId } from "../shared"; + +/** + * Gets the SubregistryId (an AccountId) of the Ethnames Subregistry contract (this is the + * "BaseRegistrar" contract for direct subnames of .eth) for the provided namespace. + * + * @param namespace The ENS namespace to get the Ethnames Subregistry ID for + * @returns The AccountId for the Ethnames Subregistry contract for the provided namespace. + * @throws Error if the contract is not found for the given namespace. + */ +export function getEthnamesSubregistryId(namespace: ENSNamespaceId): AccountId { + const datasource = maybeGetDatasource(namespace, DatasourceNames.ENSRoot); + if (!datasource) { + throw new Error(`Datasource not found for ${namespace} ${DatasourceNames.ENSRoot}`); + } + + const address = datasource.contracts.BaseRegistrar?.address; + if (address === undefined || Array.isArray(address)) { + throw new Error(`BaseRegistrar contract not found or has multiple addresses for ${namespace}`); + } + + return { + chainId: datasource.chain.id, + address, + }; +} diff --git a/packages/ensnode-sdk/src/registrars/index.ts b/packages/ensnode-sdk/src/registrars/index.ts index d59bd32c9..319172509 100644 --- a/packages/ensnode-sdk/src/registrars/index.ts +++ b/packages/ensnode-sdk/src/registrars/index.ts @@ -1,3 +1,4 @@ +export * from "./ethnames-subregistry"; export * from "./registrar-action"; export * from "./registration-lifecycle"; export * from "./subregistry"; diff --git a/packages/ensnode-sdk/src/shared/cache.test.ts b/packages/ensnode-sdk/src/shared/cache.test.ts index ef02b5bcb..17d721dc7 100644 --- a/packages/ensnode-sdk/src/shared/cache.test.ts +++ b/packages/ensnode-sdk/src/shared/cache.test.ts @@ -1,6 +1,6 @@ import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; -import { LruCache, TtlCache } from "./cache"; +import { LruCache, staleWhileRevalidate, TtlCache } from "./cache"; describe("LruCache", () => { it("throws Error if capacity is not an integer", () => { @@ -87,22 +87,8 @@ describe("TtlCache", () => { vi.useRealTimers(); }); - it("throws Error if ttlMs is not a positive integer", () => { - expect(() => { - new TtlCache(0); - }).toThrow(); - - expect(() => { - new TtlCache(-1); - }).toThrow(); - - expect(() => { - new TtlCache(1.5); - }).toThrow(); - }); - it("stores and retrieves values within TTL", () => { - const ttl = new TtlCache(1000); + const ttl = new TtlCache(1); // 1 second ttl.set("key1", "value1"); expect(ttl.get("key1")).toBe("value1"); @@ -110,44 +96,44 @@ describe("TtlCache", () => { }); it("expires values after TTL", () => { - const ttl = new TtlCache(1000); + const ttl = new TtlCache(1); // 1 second ttl.set("key1", "value1"); expect(ttl.get("key1")).toBe("value1"); - vi.advanceTimersByTime(1001); + vi.advanceTimersByTime(1001); // Advance by 1001ms (1 second + 1ms) expect(ttl.get("key1")).toBeUndefined(); expect(ttl.size).toBe(0); }); it("has method returns true for existing non-expired values", () => { - const ttl = new TtlCache(1000); + const ttl = new TtlCache(1); // 1 second ttl.set("key1", "value1"); expect(ttl.has("key1")).toBe(true); }); it("has method returns false for non-existent keys", () => { - const ttl = new TtlCache(1000); + const ttl = new TtlCache(1); // 1 second expect(ttl.has("nonexistent")).toBe(false); }); it("has method returns false for expired values", () => { - const ttl = new TtlCache(1000); + const ttl = new TtlCache(1); // 1 second ttl.set("key1", "value1"); expect(ttl.has("key1")).toBe(true); - vi.advanceTimersByTime(1001); + vi.advanceTimersByTime(1001); // Advance by 1001ms (1 second + 1ms) expect(ttl.has("key1")).toBe(false); expect(ttl.size).toBe(0); }); it("delete method removes values and returns true if key existed", () => { - const ttl = new TtlCache(1000); + const ttl = new TtlCache(1); // 1 second ttl.set("key1", "value1"); expect(ttl.has("key1")).toBe(true); @@ -160,14 +146,14 @@ describe("TtlCache", () => { }); it("delete method returns false if key does not exist", () => { - const ttl = new TtlCache(1000); + const ttl = new TtlCache(1); // 1 second const deleted = ttl.delete("nonexistent"); expect(deleted).toBe(false); }); it("clears all cached values", () => { - const ttl = new TtlCache(1000); + const ttl = new TtlCache(1); // 1 second ttl.set("key1", "value1"); ttl.set("key2", "value2"); @@ -180,22 +166,22 @@ describe("TtlCache", () => { }); it("capacity returns MAX_SAFE_INTEGER", () => { - const ttl = new TtlCache(1000); + const ttl = new TtlCache(1); // 1 second expect(ttl.capacity).toBe(Number.MAX_SAFE_INTEGER); }); it("automatically cleans up expired entries on size access", () => { - const ttl = new TtlCache(1000); + const ttl = new TtlCache(2); // 2 seconds ttl.set("key1", "value1"); ttl.set("key2", "value2"); expect(ttl.size).toBe(2); - vi.advanceTimersByTime(500); + vi.advanceTimersByTime(1000); // Advance by 1000ms (1 second) ttl.set("key3", "value3"); expect(ttl.size).toBe(3); - vi.advanceTimersByTime(600); + vi.advanceTimersByTime(1100); // Advance by 1100ms (1.1 seconds) - total 2.1 seconds expect(ttl.size).toBe(1); expect(ttl.get("key1")).toBeUndefined(); @@ -204,21 +190,262 @@ describe("TtlCache", () => { }); it("refreshes TTL on each set operation", () => { - const ttl = new TtlCache(1000); + const ttl = new TtlCache(2); // 2 seconds ttl.set("key1", "value1"); - vi.advanceTimersByTime(500); + vi.advanceTimersByTime(1000); // Advance by 1000ms (1 second) ttl.set("key1", "value1-updated"); - vi.advanceTimersByTime(600); + vi.advanceTimersByTime(1000); // Advance by 1000ms (1 second) - total 2 seconds from first set, 1 second from second set expect(ttl.get("key1")).toBe("value1-updated"); expect(ttl.has("key1")).toBe(true); - vi.advanceTimersByTime(500); + vi.advanceTimersByTime(1100); // Advance by 1100ms (1.1 seconds) - total 2.1 seconds from second set expect(ttl.get("key1")).toBeUndefined(); expect(ttl.has("key1")).toBe(false); }); }); + +describe("staleWhileRevalidate", () => { + beforeEach(() => { + vi.useFakeTimers(); + }); + + afterEach(() => { + vi.useRealTimers(); + }); + + it("fetches data on first call", async () => { + const fn = vi.fn(async () => "value1"); + const cached = staleWhileRevalidate(fn, 1); // 1 second + + const result = await cached(); + + expect(result).toBe("value1"); + expect(fn).toHaveBeenCalledTimes(1); + }); + + it("returns cached data within TTL without refetching", async () => { + const fn = vi.fn(async () => "value1"); + const cached = staleWhileRevalidate(fn, 2); // 2 seconds + + await cached(); + vi.advanceTimersByTime(1000); // Advance by 1000ms (1 second) + const result = await cached(); + + expect(result).toBe("value1"); + expect(fn).toHaveBeenCalledTimes(1); + }); + + it("returns stale data immediately after TTL expires", async () => { + const fn = vi.fn(async () => "value1"); + const cached = staleWhileRevalidate(fn, 2); // 2 seconds + + await cached(); + vi.advanceTimersByTime(3000); // Advance by 3000ms (3 seconds) - stale after >2 seconds + const result = await cached(); + + expect(result).toBe("value1"); + }); + + it("triggers background revalidation after TTL expires", async () => { + let value = "value1"; + const fn = vi.fn(async () => value); + const cached = staleWhileRevalidate(fn, 2); // 2 seconds + + await cached(); + expect(fn).toHaveBeenCalledTimes(1); + + vi.advanceTimersByTime(3000); // Advance by 3000ms (3 seconds) - stale after >2 seconds + value = "value2"; + + // This should return stale data but trigger revalidation + const result1 = await cached(); + expect(result1).toBe("value1"); + expect(fn).toHaveBeenCalledTimes(2); + + // Wait for revalidation to complete + await vi.runAllTimersAsync(); + + // Next call should have fresh data + const result2 = await cached(); + expect(result2).toBe("value2"); + }); + + it("does not trigger multiple revalidations concurrently", async () => { + let resolveRevalidation: () => void; + const revalidationPromise = new Promise((resolve) => { + resolveRevalidation = () => resolve("value2"); + }); + + let callCount = 0; + const fn = vi.fn(async () => { + callCount++; + if (callCount === 1) return "value1"; + return revalidationPromise; + }); + + const cached = staleWhileRevalidate(fn, 2); // 2 seconds + + await cached(); + vi.advanceTimersByTime(3000); // Advance by 3000ms (3 seconds) - stale after >2 seconds + + // Multiple calls after stale should not trigger multiple revalidations + const promise1 = cached(); + const promise2 = cached(); + const promise3 = cached(); + + const results = await Promise.all([promise1, promise2, promise3]); + + // All should return stale value + expect(results).toEqual(["value1", "value1", "value1"]); + + // Should only call fn twice: once for initial, once for revalidation + expect(fn).toHaveBeenCalledTimes(2); + + // Complete revalidation + resolveRevalidation!(); + await vi.runAllTimersAsync(); + }); + + it("serves stale data while revalidation is in progress", async () => { + let resolveRevalidation: (value: string) => void; + const revalidationPromise = new Promise((resolve) => { + resolveRevalidation = resolve; + }); + + let callCount = 0; + const fn = vi.fn(async () => { + callCount++; + if (callCount === 1) return "value1"; + return revalidationPromise; + }); + + const cached = staleWhileRevalidate(fn, 2); // 2 seconds + + await cached(); + vi.advanceTimersByTime(3000); // Advance by 3000ms (3 seconds) - stale after >2 seconds + + // First call after TTL triggers revalidation + const result1 = await cached(); + expect(result1).toBe("value1"); + + // Additional calls while revalidating should still return stale + const result2 = await cached(); + const result3 = await cached(); + + expect(result2).toBe("value1"); + expect(result3).toBe("value1"); + expect(fn).toHaveBeenCalledTimes(2); + + // Complete revalidation + resolveRevalidation!("value2"); + await vi.runAllTimersAsync(); + + // Now should have fresh data + const result4 = await cached(); + expect(result4).toBe("value2"); + }); + + it("handles revalidation errors gracefully by keeping stale data", async () => { + let shouldError = false; + const fn = vi.fn(async () => { + if (shouldError) { + throw new Error("Revalidation failed"); + } + return "value1"; + }); + + const cached = staleWhileRevalidate(fn, 2); // 2 seconds + + await cached(); + vi.advanceTimersByTime(3000); // Advance by 3000ms (3 seconds) - stale after >2 seconds + + shouldError = true; + + // Should return stale data even though revalidation will fail + const result1 = await cached(); + expect(result1).toBe("value1"); + + // Wait for failed revalidation + await vi.runAllTimersAsync(); + + // Should still serve stale data + const result2 = await cached(); + expect(result2).toBe("value1"); + + // Should have attempted revalidation twice (once for each call after stale) + expect(fn).toHaveBeenCalledTimes(3); + }); + + it("allows retry after failed revalidation", async () => { + let shouldError = true; + const fn = vi.fn(async () => { + if (shouldError) { + throw new Error("Revalidation failed"); + } + return "value2"; + }); + + const cached = staleWhileRevalidate(fn, 2); // 2 seconds + + // Initial fetch + shouldError = false; + await cached(); + + vi.advanceTimersByTime(3000); // Advance by 3000ms (3 seconds) - stale after >2 seconds + shouldError = true; + + // First revalidation attempt fails + await cached(); + await vi.runAllTimersAsync(); + + // Subsequent call should retry revalidation + shouldError = false; + await cached(); + await vi.runAllTimersAsync(); + + // Should now have fresh data + const result = await cached(); + expect(result).toBe("value2"); + }); + + it("returns null when initial fetch fails with no cache", async () => { + const fn = vi.fn(async () => { + throw new Error("Initial fetch failed"); + }); + + const cached = staleWhileRevalidate(fn, 1); // 1 second + + // Initial fetch should fail and return null + const result = await cached(); + expect(result).toBeNull(); + expect(fn).toHaveBeenCalledTimes(1); + }); + + it("succeeds on retry after initial fetch failure", async () => { + let shouldError = true; + const fn = vi.fn(async () => { + if (shouldError) { + throw new Error("Initial fetch failed"); + } + return "value1"; + }); + + const cached = staleWhileRevalidate(fn, 1); // 1 second + + // Initial fetch fails and returns null + const result1 = await cached(); + expect(result1).toBeNull(); + expect(fn).toHaveBeenCalledTimes(1); + + // Retry should succeed + shouldError = false; + const result2 = await cached(); + expect(result2).toBe("value1"); + expect(fn).toHaveBeenCalledTimes(2); + }); +}); diff --git a/packages/ensnode-sdk/src/shared/cache.ts b/packages/ensnode-sdk/src/shared/cache.ts index c0c404f86..0f482e405 100644 --- a/packages/ensnode-sdk/src/shared/cache.ts +++ b/packages/ensnode-sdk/src/shared/cache.ts @@ -1,3 +1,8 @@ +import { getUnixTime } from "date-fns"; + +import { addDuration, durationBetween } from "./datetime"; +import type { Duration, UnixTimestamp } from "./types"; + /** * Cache that maps from string -> ValueType. */ @@ -102,7 +107,7 @@ export class LruCache implements Cache { value: T; - expiresAt: number; + expiresAt: UnixTimestamp; } /** @@ -112,26 +117,19 @@ interface CacheEntry { */ export class TtlCache implements Cache { private readonly _cache = new Map>(); - private readonly _ttlMs: number; + private readonly _ttl: Duration; /** * Create a new TTL cache with the given TTL. * - * @param ttlMs Time-to-live in milliseconds. Items expire after this duration. - * @throws Error if ttlMs is not positive. + * @param ttl Time-to-live duration in seconds. Items expire after this duration. */ - public constructor(ttlMs: number) { - if (!Number.isInteger(ttlMs) || ttlMs <= 0) { - throw new Error( - `TtlCache requires ttlMs to be a positive integer but a ttlMs of ${ttlMs} was requested.`, - ); - } - - this._ttlMs = ttlMs; + public constructor(ttl: Duration) { + this._ttl = ttl; } private _cleanup(): void { - const now = Date.now(); + const now = getUnixTime(new Date()); for (const [key, entry] of this._cache.entries()) { if (entry.expiresAt <= now) { this._cache.delete(key); @@ -142,7 +140,7 @@ export class TtlCache implements Cache implements Cache implements Cache implements Cache { + /** + * The cached value of type T + */ + value: T; + + /** + * Unix timestamp indicating when the value was last successfully updated + */ + updatedAt: UnixTimestamp; + + /** + * Optional promise of the in-progress revalidation attempt. + * If undefined, no revalidation attempt is in-progress. + * If defined, a revalidation attempt is already in-progress. + * Used to enforce no concurrent revalidation attempts. + */ + revalidating?: Promise; +} + +/** + * Stale-While-Revalidate (SWR) cache wrapper for async functions. + * + * This caching strategy serves cached data immediately (even if stale) while + * asynchronously revalidating the cache in the background. This provides: + * - Sub-millisecond response times (after first fetch) + * - Always available data (serves stale data during revalidation) + * - Automatic background updates + * + * Error Handling: + * - If the function throws an error and a cached value exists, the stale cached value is returned. + * - If the function throws an error and no cached value exists, null is returned. + * - Background revalidation errors are handled gracefully and don't interrupt serving stale data. + * + * @example + * ```typescript + * const fetchExpensiveData = async () => { + * const response = await fetch('/api/data'); + * return response.json(); + * }; + * + * const cachedFetch = staleWhileRevalidate(fetchExpensiveData, 60); // 60 second TTL + * + * // First call: fetches data (slow) + * const data1 = await cachedFetch(); + * + * // Within TTL: returns cached data (fast) + * const data2 = await cachedFetch(); + * + * // After TTL: returns stale data immediately, revalidates in background + * const data3 = await cachedFetch(); // Still fast! + * ``` + * + * @param fn The async function to wrap with SWR caching + * @param ttl Time-to-live duration in seconds. After this duration, data is considered stale + * @returns A cached version of the function with SWR semantics. Returns null if fn throws an error and no cached value is available. + * + * @link https://web.dev/stale-while-revalidate/ + * @link https://datatracker.ietf.org/doc/html/rfc5861 + */ +export function staleWhileRevalidate( + fn: () => Promise, + ttl: Duration, +): () => Promise { + let cache: SWRCache | null = null; + let initialBuild: Promise | null = null; + + return async (): Promise => { + // No cache, attempt to successfully build the first cache (any number of attempts to build the cache may have been attempted and failed previously) + if (!cache) { + // If initial build already in progress, wait for it + if (initialBuild) { + return initialBuild; + } + + // Start initial build + initialBuild = fn() + .then((value) => { + cache = { value, updatedAt: getUnixTime(new Date()) }; + initialBuild = null; + return value; + }) + .catch((_error) => { + initialBuild = null; + // No cached value available, return null + return null; + }); + + return initialBuild; + } + + const isStale = durationBetween(cache.updatedAt, getUnixTime(new Date())) > ttl; + + // Fresh cache, return immediately + if (!isStale) return cache.value; + + // Stale cache, but revalidation already in progress + if (cache.revalidating) return cache.value; + + // Stale cache, kick off revalidation in background + const revalidationPromise = fn() + .then((value) => { + cache = { value, updatedAt: getUnixTime(new Date()) }; + return value; + }) + .catch(() => { + // On error, clear revalidating flag so next request can retry + // Keep serving stale data, swallow error (background revalidation) + if (cache) { + cache.revalidating = undefined; + } + // Don't re-throw - this is background revalidation + // Caller can add their own error handling in the wrapped function + return cache?.value as T; + }); + + cache.revalidating = revalidationPromise; + + // Return stale value immediately + return cache.value; + }; +} diff --git a/packages/ensnode-sdk/src/shared/datetime.test.ts b/packages/ensnode-sdk/src/shared/datetime.test.ts index cc444b0a1..e2d904397 100644 --- a/packages/ensnode-sdk/src/shared/datetime.test.ts +++ b/packages/ensnode-sdk/src/shared/datetime.test.ts @@ -1,6 +1,6 @@ import { describe, expect, it } from "vitest"; -import { durationBetween } from "./datetime"; +import { addDuration, durationBetween } from "./datetime"; describe("datetime", () => { describe("durationBetween()", () => { @@ -14,4 +14,17 @@ describe("datetime", () => { ); }); }); + + describe("addDuration()", () => { + it("adds duration to timestamp", () => { + expect(addDuration(1234, 100)).toEqual(1334); + expect(addDuration(1000, 500)).toEqual(1500); + }); + it("handles zero duration", () => { + expect(addDuration(1234, 0)).toEqual(1234); + }); + it("handles large duration values", () => { + expect(addDuration(1000000, 999999)).toEqual(1999999); + }); + }); }); diff --git a/packages/ensnode-sdk/src/shared/datetime.ts b/packages/ensnode-sdk/src/shared/datetime.ts index 870b05af2..ad95f3b43 100644 --- a/packages/ensnode-sdk/src/shared/datetime.ts +++ b/packages/ensnode-sdk/src/shared/datetime.ts @@ -1,4 +1,4 @@ -import { deserializeDuration } from "./deserialize"; +import { deserializeDuration, deserializeUnixTimestamp } from "./deserialize"; import type { Duration, UnixTimestamp } from "./types"; /** @@ -7,3 +7,10 @@ import type { Duration, UnixTimestamp } from "./types"; export function durationBetween(start: UnixTimestamp, end: UnixTimestamp): Duration { return deserializeDuration(end - start, "Duration"); } + +/** + * Add a duration to a timestamp. + */ +export function addDuration(timestamp: UnixTimestamp, duration: Duration): UnixTimestamp { + return deserializeUnixTimestamp(timestamp + duration, "UnixTimestamp"); +} diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 16d359e6d..96dbf6864 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -292,6 +292,9 @@ importers: '@hono/zod-validator': specifier: ^0.7.2 version: 0.7.4(hono@4.10.3)(zod@3.25.76) + '@namehash/ens-referrals': + specifier: workspace:* + version: link:../../packages/ens-referrals '@opentelemetry/api': specifier: ^1.9.0 version: 1.9.0(patch_hash=4b2adeefaf7c22f9987d0a125d69cab900719bec7ed7636648bea6947107033a) @@ -765,6 +768,9 @@ importers: caip: specifier: 'catalog:' version: 1.1.1 + date-fns: + specifier: 'catalog:' + version: 4.1.0 zod: specifier: 'catalog:' version: 3.25.76