Skip to main content

Documentation Index

Fetch the complete documentation index at: https://docs.tryprofound.com/llms.txt

Use this file to discover all available pages before exploring further.

The Citation Share tile has two views: a daily share line (your owned-domain Citation Share over time, with the previous-period overlay) and a daily volume count. Both come from the same API call — group by ["date", "root_domain"], request count and citation_share, then derive each series from the response.
Don’t average daily rows to derive a period score — they’re computed differently. See Conventions & gotchas.

How this example works

  1. Group by ["date", "root_domain"]. Each row is one (day, domain) pair with its own count and citation_share.
  2. Derive both series in one pass over the response:
    • Daily volume = sum of count across all rows for each date.
    • Daily owned-domain share = sum of citation_share across rows whose root_domain is in your owned set, per date. (Per-AI-model averaged, same as the headline tile.)
  3. Repeat for the prior window to get the overlay line.
import os
from datetime import date, timedelta
from urllib.parse import urlparse
from collections import defaultdict
from profound import Profound

client = Profound(api_key=os.environ["PROFOUND_API_KEY"])

# What to fetch — replace with your own values.
CATEGORY_NAME = "<your-category-name>"
DAYS          = 7
INCLUSIVE_END = date(2026, 5, 11)    # the last day of your current window


def normalize(host_or_url):
    """Strip scheme, www, and trailing slash so domains compare cleanly."""
    p = urlparse(host_or_url if "://" in host_or_url else f"https://{host_or_url}")
    return (p.hostname or "").replace("www.", "").rstrip("/")


def get_owned_domains(category_id):
    """All domains across assets flagged is_owned in the category."""
    out = set()
    for a in client.organizations.categories.assets(category_id):
        if not a.is_owned:
            continue
        if a.website:
            out.add(normalize(a.website))
        for d in a.alternate_domains or []:
            out.add(normalize(d))
    return out


def get_daily_citations(category_id, owned_domains, start, end):
    """One call → both daily volume and daily owned-domain share.

    Returns [(date, volume, owned_share), ...] sorted by date.
    """
    res = client.reports.citations(
        category_id=category_id,
        start_date=start.isoformat(),
        end_date=end.isoformat(),
        metrics=["count", "citation_share"],
        dimensions=["date", "root_domain"],
        date_interval="day",
        filters=[{"field": "prompt_type", "operator": "is", "value": "visibility"}],
        pagination={"limit": 50000, "offset": 0},
    )
    m = res.info.query["metrics"]
    d = res.info.query["dimensions"]
    i_count  = m.index("count")
    i_share  = m.index("citation_share")
    i_date   = d.index("date")
    i_domain = d.index("root_domain")

    volume_by_day      = defaultdict(int)
    owned_share_by_day = defaultdict(float)
    for row in res.data:
        day    = row.dimensions[i_date]
        domain = normalize(row.dimensions[i_domain])
        volume_by_day[day] += int(row.metrics[i_count])
        if domain in owned_domains:
            owned_share_by_day[day] += row.metrics[i_share]

    return sorted(
        (
            (day, volume_by_day[day], owned_share_by_day[day])
            for day in volume_by_day
        ),
        key=lambda p: p[0],
    )


def current_and_prior_windows(inclusive_end, days):
    """Two (start, end_exclusive) pairs of equal length, back-to-back."""
    current = (
        inclusive_end - timedelta(days=days - 1),
        inclusive_end + timedelta(days=1),     # +1 day → exclusive end
    )
    prior = (current[0] - timedelta(days=days), current[0])
    return current, prior


def find_category_id(name):
    """Return the UUID of the category whose name matches (case-insensitive)."""
    for c in client.organizations.categories.list():
        if c.name.lower() == name.lower():
            return c.id
    raise ValueError(f"No category named {name!r}")


# Resolve name → ID, then run both windows.
category_id    = find_category_id(CATEGORY_NAME)
owned          = get_owned_domains(category_id)
current, prior = current_and_prior_windows(INCLUSIVE_END, DAYS)

current_rows = get_daily_citations(category_id, owned, *current)
prior_rows   = get_daily_citations(category_id, owned, *prior)

print(f"{'Date':<12} {'Volume':>8} {'Owned share':>14}")
print("Current period:")
for day, volume, share in current_rows:
    print(f"  {day:<10} {volume:>8} {share:>13.1%}")

print("\nPrevious period:")
for day, volume, share in prior_rows:
    print(f"  {day:<10} {volume:>8} {share:>13.1%}")

Common follow-ups

Headline Citation Share + delta

The single-number KPI above the chart, plus its period-over-period delta.

Citation Rank by domain

Every cited domain ranked by share, with the per-row pp delta column.