diff --git a/.changeset/common-paws-grab.md b/.changeset/common-paws-grab.md new file mode 100644 index 0000000000000..d4e8b57109303 --- /dev/null +++ b/.changeset/common-paws-grab.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Adds a monitoring dashboard to Gradio apps that can be used to view usage diff --git a/gradio/analytics_dashboard.py b/gradio/analytics_dashboard.py new file mode 100644 index 0000000000000..bec476cd400fb --- /dev/null +++ b/gradio/analytics_dashboard.py @@ -0,0 +1,91 @@ +import random +import time + +import pandas as pd + +import gradio as gr + +data = {"data": {}} + +with gr.Blocks() as demo: + with gr.Row(): + selected_function = gr.Dropdown( + ["All"], + value="All", + label="Endpoint", + info="Select the function to see analytics for, or 'All' for aggregate.", + scale=2, + ) + demo.load( + lambda: gr.Dropdown( + choices=["All"] + + list({row["function"] for row in data["data"].values()}) # type: ignore + ), + None, + selected_function, + ) + timespan = gr.Dropdown( + ["All Time", "24 hours", "1 hours", "10 minutes"], + value="All Time", + label="Timespan", + info="Duration to see data for.", + ) + with gr.Group(): + with gr.Row(): + unique_users = gr.Label(label="Unique Users") + unique_requests = gr.Label(label="Unique Requests") + process_time = gr.Label(label="Avg Process Time") + plot = gr.BarPlot( + x="time", + y="count", + color="status", + title="Requests over Time", + y_title="Requests", + width=600, + ) + + @gr.on( + [demo.load, selected_function.change, timespan.change], + inputs=[selected_function, timespan], + outputs=[unique_users, unique_requests, process_time, plot], + ) + def load_dfs(function, timespan): + df = pd.DataFrame(data["data"].values()) + if df.empty: + return 0, 0, 0, gr.skip() + df["time"] = pd.to_datetime(df["time"], unit="s") + df_filtered = df if function == "All" else df[df["function"] == function] + if timespan != "All Time": + df_filtered = df_filtered[ + df_filtered["time"] > pd.Timestamp.now() - pd.Timedelta(timespan) + ] + + df_filtered["time"] = df_filtered["time"].dt.floor("min") + plot = df_filtered.groupby(["time", "status"]).size().reset_index(name="count") # type: ignore + mean_process_time_for_success = df_filtered[df_filtered["status"] == "success"][ + "process_time" + ].mean() + + return ( + df_filtered["session_hash"].nunique(), + df_filtered.shape[0], + round(mean_process_time_for_success, 2), + plot, + ) + + +if __name__ == "__main__": + data["data"] = { + random.randint(0, 1000000): { + "time": time.time() - random.randint(0, 60 * 60 * 24 * 3), + "status": random.choice( + ["success", "success", "failure", "pending", "queued"] + ), + "function": random.choice(["predict", "chat", "chat"]), + "process_time": random.randint(0, 10), + "session_hash": str(random.randint(0, 4)), + } + for r in range(random.randint(100, 200)) + } + + demo.launch() diff --git a/gradio/blocks.py b/gradio/blocks.py index eaa7b3389e3a4..4b467c0c99200 100644 --- a/gradio/blocks.py +++ b/gradio/blocks.py @@ -2178,6 +2178,7 @@ def launch( auth_dependency: Callable[[fastapi.Request], str | None] | None = None, max_file_size: str | int | None = None, _frontend: bool = True, + enable_monitoring: bool = False, ) -> tuple[FastAPI, str, str]: """ Launches a simple web server that serves the demo. Can also be used to create a @@ -2397,6 +2398,11 @@ def reverse(text): else: self.share = share + if enable_monitoring: + print( + f"Monitoring URL: {self.local_url}monitoring/{self.app.analytics_key}" + ) + # If running in a colab or not able to access localhost, # a shareable link must be created. if ( diff --git a/gradio/queueing.py b/gradio/queueing.py index 70b645fd62537..dcb3d1ec69869 100644 --- a/gradio/queueing.py +++ b/gradio/queueing.py @@ -114,6 +114,7 @@ def __init__( self.default_concurrency_limit = self._resolve_concurrency_limit( default_concurrency_limit ) + self.event_analytics: dict[str, dict[str, float | str | None]] = {} def start(self): self.active_jobs = [None] * self.max_thread_count @@ -227,6 +228,13 @@ async def push( "Event not found in queue. If you are deploying this Gradio app with multiple replicas, please enable stickiness to ensure that all requests from the same user are routed to the same instance." ) from e event_queue.queue.append(event) + self.event_analytics[event._id] = { + "time": time.time(), + "status": "queued", + "process_time": None, + "function": fn.api_name, + "session_hash": body.session_hash, + } self.broadcast_estimations(event.concurrency_id, len(event_queue.queue) - 1) @@ -294,6 +302,8 @@ async def start_processing(self) -> None: event_queue.current_concurrency += 1 start_time = time.time() event_queue.start_times_per_fn[events[0].fn].add(start_time) + for event in events: + self.event_analytics[event._id]["status"] = "processing" process_event_task = run_coro_in_background( self.process_events, events, batch, start_time ) @@ -470,6 +480,7 @@ async def process_events( ) -> None: awake_events: list[Event] = [] fn = events[0].fn + success = False try: for event in events: if event.alive: @@ -587,16 +598,20 @@ async def process_events( for e, event in enumerate(awake_events): if batch and "data" in output: output["data"] = list(zip(*response.get("data")))[e] + success = response is not None self.send_message( event, ProcessCompletedMessage( output=output, - success=response is not None, + success=success, ), ) end_time = time.time() if response is not None: - self.process_time_per_fn[events[0].fn].add(end_time - begin_time) + duration = end_time - begin_time + self.process_time_per_fn[events[0].fn].add(duration) + for event in events: + self.event_analytics[event._id]["process_time"] = duration except Exception as e: traceback.print_exc() finally: @@ -620,6 +635,13 @@ async def process_events( # to start "from scratch" await self.reset_iterators(event._id) + if event in awake_events: + self.event_analytics[event._id]["status"] = ( + "success" if success else "failed" + ) + else: + self.event_analytics[event._id]["status"] = "cancelled" + async def reset_iterators(self, event_id: str): # Do the same thing as the /reset route app = self.server_app diff --git a/gradio/routes.py b/gradio/routes.py index 35120ed99e55a..7c8af3e264aae 100644 --- a/gradio/routes.py +++ b/gradio/routes.py @@ -164,6 +164,8 @@ def __init__( ): self.tokens = {} self.auth = None + self.analytics_key = secrets.token_urlsafe(16) + self.analytics_enabled = False self.blocks: gradio.Blocks | None = None self.state_holder = StateHolder() self.iterators: dict[str, AsyncIterator] = {} @@ -1165,6 +1167,32 @@ def robots_txt(): else: return "User-agent: *\nDisallow: " + @app.get("/monitoring") + async def analytics_login(): + print( + f"Monitoring URL: {app.get_blocks().local_url}monitoring/{app.analytics_key}" + ) + return HTMLResponse("See console for monitoring URL.") + + @app.get("/monitoring/{key}") + async def analytics_dashboard(key: str): + if key == app.analytics_key: + analytics_url = f"/monitoring/{app.analytics_key}/dashboard" + if not app.analytics_enabled: + from gradio.analytics_dashboard import data + from gradio.analytics_dashboard import demo as dashboard + + mount_gradio_app(app, dashboard, path=analytics_url) + dashboard._queue.start() + analytics = app.get_blocks()._queue.event_analytics + data["data"] = analytics + app.analytics_enabled = True + return RedirectResponse( + url=analytics_url, status_code=status.HTTP_302_FOUND + ) + else: + raise HTTPException(status_code=403, detail="Invalid key.") + return app diff --git a/guides/03_additional-features/01_queuing.md b/guides/03_additional-features/01_queuing.md index 42146d8b6e72f..9297eddb4dcdc 100644 --- a/guides/03_additional-features/01_queuing.md +++ b/guides/03_additional-features/01_queuing.md @@ -13,3 +13,4 @@ This limits the number of requests processed for this event listener at a single See the [docs on queueing](/docs/gradio/interface#interface-queue) for more details on configuring the queuing parameters. +You can see analytics on the number and status of all requests processed by the queue by visiting the `/monitoring` endpoint of your app. This endpoint will print a secret URL to your console that links to the full analytics dashboard. \ No newline at end of file