Skip to main content

Documentation Index

Fetch the complete documentation index at: https://docs.tryprofound.com/llms.txt

Use this file to discover all available pages before exploring further.

The line chart on every Visibility tile in the Profound app is one call with the date dimension added. Use date_interval to switch between day, week, and month buckets.
Don’t average daily rows to derive a period score — they’re computed differently. See Conventions & gotchas.

How this example works

  1. Add date to dimensions and set date_interval ("day" / "week" / "month"). One row per bucket. Different bucket sizes give different numbers — see Headline + daily.
  2. Filter by asset so each bucket is one row.
  3. Read positions from info.query instead of hardcoding them.
import os
from profound import Profound

client = Profound(api_key=os.environ["PROFOUND_API_KEY"])

# What to fetch — replace with your own values.
CATEGORY_NAME = "<your-category-name>"
ASSET_NAME    = "<your-asset-name>"
START_DATE    = "2026-05-05"
END_DATE      = "2026-05-12"   # exclusive — returns data through 2026-05-11
DATE_INTERVAL = "day"          # or "week" / "month"


def get_visibility_over_time(category_id, asset_name, start_date, end_date, interval="day"):
    """Daily/weekly/monthly Visibility Score for one asset, sorted by date."""
    res = client.reports.visibility(
        category_id=category_id,
        start_date=start_date,
        end_date=end_date,
        metrics=["visibility_score"],
        dimensions=["date"],
        date_interval=interval,
        filters=[{"field": "asset_name", "operator": "is", "value": asset_name}],
    )
    m_order = res.info.query["metrics"]
    d_order = res.info.query["dimensions"]
    i_score = m_order.index("visibility_score")
    i_date  = d_order.index("date")

    points = [
        (row.dimensions[i_date], row.metrics[i_score])
        for row in res.data
    ]
    points.sort(key=lambda p: p[0])
    return points   # → [("2026-05-05", 0.78), ("2026-05-06", 0.81), ...]


# Helpers — translate human-readable names into the IDs the report API needs.

def find_category_id(name):
    """Return the UUID of the category whose name matches (case-insensitive)."""
    for c in client.organizations.categories.list():
        if c.name.lower() == name.lower():
            return c.id
    raise ValueError(f"No category named {name!r}")


def find_asset_name(category_id, name):
    """Return the canonical asset name (case-insensitive) inside the category."""
    for a in client.organizations.categories.assets(category_id):
        if a.name.lower() == name.lower():
            return a.name
    raise ValueError(f"No asset named {name!r} in this category")


# Resolve names → IDs, then run.
category_id = find_category_id(CATEGORY_NAME)
asset_name  = find_asset_name(category_id, ASSET_NAME)

points = get_visibility_over_time(category_id, asset_name, START_DATE, END_DATE, DATE_INTERVAL)
for date, score in points:
    print(f"{date}  {score:.1%}")

Switching the bucket

Swap date_interval to roll the data up to coarser buckets:
date_intervalWhat you get
"day"One point per calendar day (Eastern Time)
"week"One point per ISO week
"month"One point per calendar month
The number of returned rows scales with the bucket size for the same window.