From 4b41cd35d3de2cced545673b8be11cf9b8640e11 Mon Sep 17 00:00:00 2001 From: Philipp Wassibauer Date: Thu, 8 Feb 2024 20:00:35 +0100 Subject: [PATCH 01/20] Example fixs (#20) * use a simple public base query to make the example runnable * add to example * add GTM to enable amplitude * do amplitude over GTM --- api-reference/query-api/endpoint/execute-query.mdx | 10 +++++++++- mint.json | 6 +++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/api-reference/query-api/endpoint/execute-query.mdx b/api-reference/query-api/endpoint/execute-query.mdx index 527fba82..6a2d7c8a 100644 --- a/api-reference/query-api/endpoint/execute-query.mdx +++ b/api-reference/query-api/endpoint/execute-query.mdx @@ -24,11 +24,19 @@ openapi: 'POST /v1/query/{query_id}/execute' # setup Dune Python client dune = DuneClient.from_env() + query = QueryBase( + name="Sample Query", + query_id=1215383, + ) + result = dune.run_query( - query = 12345, # pass in query to run + query = query, performance = 'large' # optionally define which tier to run the execution on (default is "medium") ) + # go over the results returned + for row in result.result.rows: + print (row) # as an example we print the rows ``` diff --git a/mint.json b/mint.json index 22dd89b8..ba35f1d0 100644 --- a/mint.json +++ b/mint.json @@ -119,11 +119,11 @@ "linkedin": "https://www.linkedin.com/company/duneanalytics" }, "analytics": { - "amplitude": { - "apiKey": "f5f9693ef08393e7ab6106db82824661" - }, "ga4": { "measurementId": "G-H1G057R0KN" + }, + "gtm": { + "tagId": "GTM-P46XP3Z5" } } } From bfc24754bbc90c025b174b9b3b45086071d7d7a3 Mon Sep 17 00:00:00 2001 From: Miguel Filipe Date: Mon, 12 Feb 2024 16:39:23 +0000 Subject: [PATCH 02/20] query results API: add pagination related field --- api-reference/query-api/query-openapi.json | 72 ++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/api-reference/query-api/query-openapi.json b/api-reference/query-api/query-openapi.json index e04175c9..8f745ed8 100644 --- a/api-reference/query-api/query-openapi.json +++ b/api-reference/query-api/query-openapi.json @@ -316,6 +316,24 @@ }, "description": "unique identifier of the query" }, + { + "in": "query", + "name": "limit", + "required": false, + "schema": { + "type": "integer" + }, + "description": "limit number of rows to return. This together with 'offset' allows easy pagination through results in an incremental and efficient way." + }, + { + "in": "query", + "name": "offset", + "required": false, + "schema": { + "type": "integer" + }, + "description": "offset row number to start (inclusive, first row means offset=0) returning results from. This together with 'limit' allows easy pagination through results in an incremental and efficient way." + }, { "in": "query", "name": "parameters", @@ -437,6 +455,24 @@ }, "description": "unique identifier of the query" }, + { + "in": "query", + "name": "limit", + "required": false, + "schema": { + "type": "integer" + }, + "description": "limit number of rows to return. This together with 'offset' allows easy pagination through results in an incremental and efficient way." + }, + { + "in": "query", + "name": "offset", + "required": false, + "schema": { + "type": "integer" + }, + "description": "offset row number to start (inclusive, first row means offset=0) returning results from. This together with 'limit' allows easy pagination through results in an incremental and efficient way." + }, { "in": "query", "name": "parameters", @@ -558,6 +594,24 @@ }, "description": "unique identifier of the execution" }, + { + "in": "query", + "name": "limit", + "required": false, + "schema": { + "type": "integer" + }, + "description": "limit number of rows to return. This together with 'offset' allows easy pagination through results in an incremental and efficient way." + }, + { + "in": "query", + "name": "offset", + "required": false, + "schema": { + "type": "integer" + }, + "description": "offset row number to start (inclusive, first row means offset=0) returning results from. This together with 'limit' allows easy pagination through results in an incremental and efficient way." + }, { "in": "query", "name": "ignore_max_datapoints_per_request", @@ -659,6 +713,24 @@ }, "description": "unique identifier of the execution" }, + { + "in": "query", + "name": "limit", + "required": false, + "schema": { + "type": "integer" + }, + "description": "limit number of rows to return. This together with 'offset' allows easy pagination through results in an incremental and efficient way." + }, + { + "in": "query", + "name": "offset", + "required": false, + "schema": { + "type": "integer" + }, + "description": "offset row number to start (inclusive, first row means offset=0) returning results from. This together with 'limit' allows easy pagination through results in an incremental and efficient way." + }, { "in": "query", "name": "ignore_max_datapoints_per_request", From f6b4970f2ac1b3b5cd55caaf709765c5bf2305d6 Mon Sep 17 00:00:00 2001 From: Miguel Filipe Date: Mon, 12 Feb 2024 16:42:16 +0000 Subject: [PATCH 03/20] start adding query result new fields for pagination --- api-reference/query-api/query-openapi.json | 1 + 1 file changed, 1 insertion(+) diff --git a/api-reference/query-api/query-openapi.json b/api-reference/query-api/query-openapi.json index 8f745ed8..cdc2ffe0 100644 --- a/api-reference/query-api/query-openapi.json +++ b/api-reference/query-api/query-openapi.json @@ -836,6 +836,7 @@ "execution_id": "01HKZSJAW6N2MFVCBHA3R8S64X", "query_id": 1252207, "state": "QUERY_STATE_COMPLETED", + "is_execution_finished": true, "submitted_at": "2024-01-12T21:34:37.447476Z", "expires_at": "2024-04-11T21:34:55.737082Z", "execution_started_at": "2024-01-12T21:34:37.464387Z", From 2b4840f7222758b661e51b2c8f1aea864adcd4a4 Mon Sep 17 00:00:00 2001 From: agaperste Date: Mon, 12 Feb 2024 12:16:56 -0500 Subject: [PATCH 04/20] Capitalize start of the sentence --- api-reference/query-api/query-openapi.json | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/api-reference/query-api/query-openapi.json b/api-reference/query-api/query-openapi.json index cdc2ffe0..3f9e5b7e 100644 --- a/api-reference/query-api/query-openapi.json +++ b/api-reference/query-api/query-openapi.json @@ -323,7 +323,7 @@ "schema": { "type": "integer" }, - "description": "limit number of rows to return. This together with 'offset' allows easy pagination through results in an incremental and efficient way." + "description": "Limit number of rows to return. This together with 'offset' allows easy pagination through results in an incremental and efficient way." }, { "in": "query", @@ -332,7 +332,7 @@ "schema": { "type": "integer" }, - "description": "offset row number to start (inclusive, first row means offset=0) returning results from. This together with 'limit' allows easy pagination through results in an incremental and efficient way." + "description": "Offset row number to start (inclusive, first row means offset=0) returning results from. This together with 'limit' allows easy pagination through results in an incremental and efficient way." }, { "in": "query", @@ -462,7 +462,7 @@ "schema": { "type": "integer" }, - "description": "limit number of rows to return. This together with 'offset' allows easy pagination through results in an incremental and efficient way." + "description": "Limit number of rows to return. This together with 'offset' allows easy pagination through results in an incremental and efficient way." }, { "in": "query", @@ -471,7 +471,7 @@ "schema": { "type": "integer" }, - "description": "offset row number to start (inclusive, first row means offset=0) returning results from. This together with 'limit' allows easy pagination through results in an incremental and efficient way." + "description": "Offset row number to start (inclusive, first row means offset=0) returning results from. This together with 'limit' allows easy pagination through results in an incremental and efficient way." }, { "in": "query", @@ -601,7 +601,7 @@ "schema": { "type": "integer" }, - "description": "limit number of rows to return. This together with 'offset' allows easy pagination through results in an incremental and efficient way." + "description": "Limit number of rows to return. This together with 'offset' allows easy pagination through results in an incremental and efficient way." }, { "in": "query", @@ -610,7 +610,7 @@ "schema": { "type": "integer" }, - "description": "offset row number to start (inclusive, first row means offset=0) returning results from. This together with 'limit' allows easy pagination through results in an incremental and efficient way." + "description": "Offset row number to start (inclusive, first row means offset=0) returning results from. This together with 'limit' allows easy pagination through results in an incremental and efficient way." }, { "in": "query", @@ -720,7 +720,7 @@ "schema": { "type": "integer" }, - "description": "limit number of rows to return. This together with 'offset' allows easy pagination through results in an incremental and efficient way." + "description": "Limit number of rows to return. This together with 'offset' allows easy pagination through results in an incremental and efficient way." }, { "in": "query", @@ -729,7 +729,7 @@ "schema": { "type": "integer" }, - "description": "offset row number to start (inclusive, first row means offset=0) returning results from. This together with 'limit' allows easy pagination through results in an incremental and efficient way." + "description": "Offset row number to start (inclusive, first row means offset=0) returning results from. This together with 'limit' allows easy pagination through results in an incremental and efficient way." }, { "in": "query", From e519f708874c1dadbda7d97b174b558882310c73 Mon Sep 17 00:00:00 2001 From: Richard Keo Date: Mon, 12 Feb 2024 17:55:15 +0000 Subject: [PATCH 05/20] Update openapi spec for pagination This adds details about: - new fields added in the response - headers returned when requesting the results in CSV format --- api-reference/query-api/query-openapi.json | 108 +++++++++++++++------ 1 file changed, 76 insertions(+), 32 deletions(-) diff --git a/api-reference/query-api/query-openapi.json b/api-reference/query-api/query-openapi.json index 3f9e5b7e..19703648 100644 --- a/api-reference/query-api/query-openapi.json +++ b/api-reference/query-api/query-openapi.json @@ -319,7 +319,7 @@ { "in": "query", "name": "limit", - "required": false, + "required": true, "schema": { "type": "integer" }, @@ -362,7 +362,7 @@ "schema": { "type": "boolean" }, - "description": "Sometimes request results can be too large to return. By default allow_partial_results is set to false and a failed state is returned. " + "description": "Sometimes request results can be too large to return. By default allow_partial_results is set to false and a failed state is returned." } ], "responses": { @@ -458,7 +458,7 @@ { "in": "query", "name": "limit", - "required": false, + "required": true, "schema": { "type": "integer" }, @@ -507,6 +507,20 @@ "responses": { "200": { "description": "OK", + "headers": { + "x-dune-next-offset": { + "description": "Offset to use to retrieve the next page of results if available.", + "schema": { + "type": "integer" + } + }, + "x-dune-next-uri": { + "description": "URI to retrieve the next page of results if available.", + "schema": { + "type": "string" + } + } + }, "content": { "text/csv": { "schema": { @@ -597,7 +611,7 @@ { "in": "query", "name": "limit", - "required": false, + "required": true, "schema": { "type": "integer" }, @@ -716,7 +730,7 @@ { "in": "query", "name": "limit", - "required": false, + "required": true, "schema": { "type": "integer" }, @@ -753,6 +767,20 @@ "responses": { "200": { "description": "OK", + "headers": { + "x-dune-next-offset": { + "description": "Offset to use to retrieve the next page of results if available.", + "schema": { + "type": "integer" + } + }, + "x-dune-next-uri": { + "description": "URI to retrieve the next page of results if available.", + "schema": { + "type": "string" + } + } + }, "content": { "text/csv": { "schema": { @@ -841,6 +869,8 @@ "expires_at": "2024-04-11T21:34:55.737082Z", "execution_started_at": "2024-01-12T21:34:37.464387Z", "execution_ended_at": "2024-01-12T21:34:55.737081Z", + "next_offset": 5, + "next_uri": "https://api.dev.dune.com/api/v1/execution/01HKZSJAW6N2MFVCBHA3R8S64X/results?limit=5&offset=5", "result": { "metadata": { "column_names": [ @@ -850,9 +880,11 @@ "7 Days Volume", "24 Hours Volume" ], - "result_set_bytes": 440, - "total_row_count": 8, - "datapoint_count": 40, + "result_set_bytes": 310, + "row_count": 5, + "total_result_set_bytes": 4400, + "total_row_count": 300, + "datapoint_count": 25, "pending_time_millis": 16, "execution_time_millis": 18272 }, @@ -891,27 +923,6 @@ "Rank": 5, "Total Volume": 1598674364.7416763, "project": "looksrare" - }, - { - "24 Hours Volume": 19296.919182499998, - "7 Days Volume": 104063.2939935, - "Rank": 6, - "Total Volume": 304480783.11624366, - "project": "superrare" - }, - { - "24 Hours Volume": 10126.247481873, - "7 Days Volume": 161524.37162494668, - "Rank": 7, - "Total Volume": 223026861.49473178, - "project": "foundation" - }, - { - "24 Hours Volume": 109144.69140446292, - "7 Days Volume": 248139.50609401206, - "Rank": 8, - "Total Volume": 110818263.99090719, - "project": "sudoswap" } ] } @@ -932,6 +943,11 @@ "description": "The state of the query execution.", "example": "QUERY_STATE_COMPLETED" }, + "is_execution_finished": { + "type": "boolean", + "description": "Whether the state of the query execution is terminal. This can be used for polling purposes.", + "example": "QUERY_STATE_COMPLETED" + }, "submitted_at": { "type": "string", "format": "date-time", @@ -961,6 +977,16 @@ "format": "date-time", "description": "Timestamp of when the query execution was cancelled, if applicable." }, + "next_offset": { + "type": "integer", + "description": "Offset that can be used to retrieve the next page of results.", + "example": 32000 + }, + "next_uri": { + "type": "string", + "description": "URI that can be used to fetch the next page of results.", + "example": "https://api.dev.dune.com/api/v1/execution/01HKZSJAW6N2MFVCBHA3R8S64X/results?limit=5&offset=5" + }, "result": { "$ref": "#/components/schemas/QueryResultData" }, @@ -976,6 +1002,7 @@ "execution_id": "01HJ3EBNGZ8WT12KX8ZCARM5ES", "query_id": 3298913, "state": "QUERY_STATE_COMPLETED", + "is_execution_finished": true, "submitted_at": "2024-12-20T11:04:18.71989Z", "expires_at": "2025-03-19T11:04:19.423372Z", "execution_started_at": "2024-12-20T11:04:18.724658237Z", @@ -1011,6 +1038,11 @@ "description": "The state of the query execution.", "example": "QUERY_STATE_COMPLETED" }, + "is_execution_finished": { + "type": "boolean", + "description": "Whether the state of the query execution is terminal. This can be used for polling purposes.", + "example": "QUERY_STATE_COMPLETED" + }, "submitted_at": { "type": "string", "format": "date-time", @@ -1159,9 +1191,11 @@ "7 Days Volume", "24 Hours Volume" ], - "result_set_bytes": 440, - "total_row_count": 8, - "datapoint_count": 40, + "result_set_bytes": 280, + "row_count": 5, + "total_result_set_bytes": 4400, + "total_row_count": 400, + "datapoint_count": 25, "pending_time_millis": 16, "execution_time_millis": 18272 }, @@ -1178,6 +1212,16 @@ ] }, "result_set_bytes": { + "type": "integer", + "description": "Number of bytes in the result set for the current page of results.", + "example": 1541 + }, + "row_count": { + "type": "integer", + "description": "Number of rows in the result set for the current page of results.", + "example": 28 + }, + "total_result_set_bytes": { "type": "integer", "description": "Total number of bytes in the result set.", "example": 1541 From 27fb51ca2e146be04446dd5675c2e5f41089b5ff Mon Sep 17 00:00:00 2001 From: agaperste Date: Mon, 12 Feb 2024 16:56:03 -0500 Subject: [PATCH 06/20] adding a page for pagination doc, most of the explanation done, pending some TODOs --- api-reference/overview/pagination.mdx | 116 +++++++++++++++++++++ api-reference/query-api/query-openapi.json | 2 +- mint.json | 1 + 3 files changed, 118 insertions(+), 1 deletion(-) create mode 100644 api-reference/overview/pagination.mdx diff --git a/api-reference/overview/pagination.mdx b/api-reference/overview/pagination.mdx new file mode 100644 index 00000000..37f51fc2 --- /dev/null +++ b/api-reference/overview/pagination.mdx @@ -0,0 +1,116 @@ +--- +title: Pagination +--- +All get results endpoints, including [get execution results](../query-api/endpoint/get-execution-result), [get execution results csv](../query-api/endpoint/get-execution-result-csv), [get query results](../query-api/endpoint/get-query-result), [get query results csv](../query-api/endpoint/get-query-result-csv), support pagination for efficient data retrieval. Pagination divides large datasets into smaller, manageable chunks, preventing overload and ensuring smooth performance. This allows users to navigate through the data easily, avoiding limit errors and streamlining their data fetching experience. + +To paginate through results: +1. Specify the `limit` parameter to determine the number of results per page. +2. Use the `offset` parameter to indicate the starting point for retrieving results. +3. Check the response headers for `x-dune-next-offset` and `x-dune-next-uri` to fetch the next page, if available. + + +### Pagination Parameters + +**`limit`** (Required) +- Type: Integer +- Description: Limits the number of rows returned per request. + +**`offset`** +- Type: Integer +- Description: Specifies the starting point (inclusive) from which to return results. +- **Default:** 0 (i.e., the first row) +- **Usage:** Together with `limit`, enables incremental and efficient pagination. + +### Pagination in Response + +The following fields in the repsonse body are related to pagination and can be utilized when doing paginated get results request. + +**`x-dune-next-offset`** +- Type: Integer +- Description: Provides the offset to use for retrieving the next page of results, if available. + +**`x-dune-next-uri`** +- Type: String (URL) +- Description: Specifies the URI to retrieve the next page of results, if available. + + +If you pass in an invalid `limit` and `offset` parameter values, you will get an empty result set. For example, if there are only 10 rows of result data, and you pass in `offset=11`, you will **not** receive an error, but rather an empty result. + + + + + ```json + { + "execution_id": "01HKZSJAW6N2MFVCBHA3R8S64X", + "query_id": 1252207, + "state": "QUERY_STATE_COMPLETED", + "is_execution_finished": true, + "submitted_at": "2024-01-12T21:34:37.447476Z", + "expires_at": "2024-04-11T21:34:55.737082Z", + "execution_started_at": "2024-01-12T21:34:37.464387Z", + "execution_ended_at": "2024-01-12T21:34:55.737081Z", + "next_offset": 5, + "next_uri": "https://api.dev.dune.com/api/v1/execution/01HKZSJAW6N2MFVCBHA3R8S64X/results?limit=5&offset=5", + "result": { + "metadata": { + "column_names": [ + "Rank", + "project", + "Total Volume", + "7 Days Volume", + "24 Hours Volume" + ], + "result_set_bytes": 310, + "row_count": 5, + "total_result_set_bytes": 4400, + "total_row_count": 300, + "datapoint_count": 25, + "pending_time_millis": 16, + "execution_time_millis": 18272 + }, + "rows": [ + { + "24 Hours Volume": 8466988.095521685, + "7 Days Volume": 39146349.94906045, + "Rank": 1, + "Total Volume": 38382979226.241264, + "project": "opensea" + }, + { + "24 Hours Volume": 21838779.932517685, + "7 Days Volume": 162466310.33138418, + "Rank": 2, + "Total Volume": 8237501261.688846, + "project": "blur" + }, + { + "24 Hours Volume": 671472.4583508199, + "7 Days Volume": 4017792.3280940033, + "Rank": 3, + "Total Volume": 5461953781.154043, + "project": "x2y2" + }, + { + "24 Hours Volume": 1286407.4659000002, + "7 Days Volume": 5173193.591668, + "Rank": 4, + "Total Volume": 2679750259.5093665, + "project": "cryptopunks" + }, + { + "24 Hours Volume": 56173.5344355522, + "7 Days Volume": 554016.6849051005, + "Rank": 5, + "Total Volume": 1598674364.7416763, + "project": "looksrare" + } + ] + } + } + ``` + + + +TODO +- figure out pagination's relationship with allow_partial_results and write blurb about it +- once pagination page finalized, refer to this in all the get results endpoint pages \ No newline at end of file diff --git a/api-reference/query-api/query-openapi.json b/api-reference/query-api/query-openapi.json index 19703648..001fa4e5 100644 --- a/api-reference/query-api/query-openapi.json +++ b/api-reference/query-api/query-openapi.json @@ -1041,7 +1041,7 @@ "is_execution_finished": { "type": "boolean", "description": "Whether the state of the query execution is terminal. This can be used for polling purposes.", - "example": "QUERY_STATE_COMPLETED" + "example": true }, "submitted_at": { "type": "string", diff --git a/mint.json b/mint.json index ba35f1d0..4696f02b 100644 --- a/mint.json +++ b/mint.json @@ -62,6 +62,7 @@ "api-reference/overview/query-parameters", "api-reference/overview/what-is-execution-id", "api-reference/overview/rate-limits", + "api-reference/overview/pagination", "api-reference/overview/troubleshooting", "api-reference/overview/billing", "api-reference/overview/faq" From 1157b178b30a321d34fcddc35864eeef979ec147 Mon Sep 17 00:00:00 2001 From: agaperste Date: Tue, 13 Feb 2024 11:48:40 -0500 Subject: [PATCH 07/20] finished adding example request and reponse for pagination page, and adding some info explainer on partial results; updated the rest of the doc to link back to pagination page. --- api-reference/overview/pagination.mdx | 427 +++++++++++++++--- api-reference/overview/rate-limits.mdx | 4 +- .../endpoint/get-execution-result-csv.mdx | 10 +- .../endpoint/get-execution-result.mdx | 7 +- .../endpoint/get-query-result-csv.mdx | 7 +- .../query-api/endpoint/get-query-result.mdx | 7 +- api-reference/quickstart/query-eg.mdx | 4 + 7 files changed, 386 insertions(+), 80 deletions(-) diff --git a/api-reference/overview/pagination.mdx b/api-reference/overview/pagination.mdx index 37f51fc2..81db02ca 100644 --- a/api-reference/overview/pagination.mdx +++ b/api-reference/overview/pagination.mdx @@ -21,9 +21,162 @@ To paginate through results: - **Default:** 0 (i.e., the first row) - **Usage:** Together with `limit`, enables incremental and efficient pagination. + + + + ```bash + curl -X GET 'https://api.dune.com/api/v1/query/3426636/results?limit=5&offset=0' --H 'x-dune-api-key: {{api_key}}' + ``` + + + + ```python Python SDK + + # coming soon + + ``` + + + + ```python + import requests + + url = "https://api.dune.com/api/v1/query/{query_id}/results" + + headers = {"X-DUNE-API-KEY": ""} + + params = {"limit": 5, "offset": 0} # Define limit and offset parameters + + response = requests.request("GET", url, headers=headers, params=params) + + print(response.text) + + ``` + + + + ```javascript + + const options = { + method: 'GET', + headers: { + 'X-DUNE-API-KEY': '' + } + }; + + const queryParams = new URLSearchParams({limit: 5, offset: 0}); // Define limit and offset parameters + const url = `https://api.dune.com/api/v1/query/{query_id}/results?${queryParams}`; + + fetch(url, options) + .then(response => response.json()) + .then(response => console.log(response)) + .catch(err => console.error(err)); + + + ``` + + ```go Go + + package main + + import ( + "fmt" + "net/http" + "io/ioutil" + "net/url" + ) + + func main() { + url := "https://api.dune.com/api/v1/query/{query_id}/results" + + // Create query parameters + params := url.Values{} + params.Set("limit", "5") + params.Set("offset", "0") + + // Add parameters to URL + fullURL := fmt.Sprintf("%s?%s", url, params.Encode()) + + req, _ := http.NewRequest("GET", fullURL, nil) + + req.Header.Add("X-DUNE-API-KEY", "") + + res, _ := http.DefaultClient.Do(req) + + defer res.Body.Close() + body, _ := ioutil.ReadAll(res.Body) + + fmt.Println(res) + fmt.Println(string(body)) + } + ``` + + + + ```php + 5, + 'offset' => 0 + ]); + $url .= '?' . $queryParams; + + curl_setopt_array($curl, [ + CURLOPT_URL => $url, + CURLOPT_RETURNTRANSFER => true, + CURLOPT_ENCODING => "", + CURLOPT_MAXREDIRS => 10, + CURLOPT_TIMEOUT => 30, + CURLOPT_HTTP_VERSION => CURL_HTTP_VERSION_1_1, + CURLOPT_CUSTOMREQUEST => "GET", + CURLOPT_HTTPHEADER => [ + "X-DUNE-API-KEY: " + ], + ]); + + $response = curl_exec($curl); + $err = curl_error($curl); + + curl_close($curl); + + if ($err) { + echo "cURL Error #:" . $err; + } else { + echo $response; + } + ?> + ``` + + + + ```java + + import kong.unirest.HttpResponse; + import kong.unirest.Unirest; + + public class Main { + public static void main(String[] args) { + HttpResponse response = Unirest.get("https://api.dune.com/api/v1/query/{query_id}/results") + .header("X-DUNE-API-KEY", "") + .queryString("limit", 5) + .queryString("offset", 0) + .asString(); + + System.out.println(response.getBody()); + } + } + ``` + + + + ### Pagination in Response -The following fields in the repsonse body are related to pagination and can be utilized when doing paginated get results request. +The following fields in the repsonse body are related to pagination and can be utilized when doing paginated get results request. If they are available, you can use them to paginate the next page. If they are not avaialble, that means there is no more results to be fetched. **`x-dune-next-offset`** - Type: Integer @@ -34,83 +187,219 @@ The following fields in the repsonse body are related to pagination and can be u - Description: Specifies the URI to retrieve the next page of results, if available. -If you pass in an invalid `limit` and `offset` parameter values, you will get an empty result set. For example, if there are only 10 rows of result data, and you pass in `offset=11`, you will **not** receive an error, but rather an empty result. - - - - +If you pass in an invalid `limit` and `offset` parameter values, you will get an empty result set. For example, if there are only 10 rows of result data, and you pass in `offset=11`, you will **not** receive an error, but rather an empty result with metadata like this. + ```json - { - "execution_id": "01HKZSJAW6N2MFVCBHA3R8S64X", - "query_id": 1252207, - "state": "QUERY_STATE_COMPLETED", + "execution_id": "01HPF1299EV69Z08DBKW1B6MJR", + "query_id": 2616430, "is_execution_finished": true, - "submitted_at": "2024-01-12T21:34:37.447476Z", - "expires_at": "2024-04-11T21:34:55.737082Z", - "execution_started_at": "2024-01-12T21:34:37.464387Z", - "execution_ended_at": "2024-01-12T21:34:55.737081Z", - "next_offset": 5, - "next_uri": "https://api.dev.dune.com/api/v1/execution/01HKZSJAW6N2MFVCBHA3R8S64X/results?limit=5&offset=5", + "state": "QUERY_STATE_COMPLETED", + "submitted_at": "2024-02-12T16:05:40.270193Z", + "expires_at": "2024-05-12T16:05:40.654018Z", + "execution_started_at": "2024-02-12T16:05:40.312207906Z", + "execution_ended_at": "2024-02-12T16:05:40.654017085Z", "result": { + "rows": [], "metadata": { "column_names": [ - "Rank", - "project", - "Total Volume", - "7 Days Volume", - "24 Hours Volume" + "foo" ], - "result_set_bytes": 310, - "row_count": 5, - "total_result_set_bytes": 4400, - "total_row_count": 300, - "datapoint_count": 25, - "pending_time_millis": 16, - "execution_time_millis": 18272 - }, - "rows": [ - { - "24 Hours Volume": 8466988.095521685, - "7 Days Volume": 39146349.94906045, - "Rank": 1, - "Total Volume": 38382979226.241264, - "project": "opensea" - }, - { - "24 Hours Volume": 21838779.932517685, - "7 Days Volume": 162466310.33138418, - "Rank": 2, - "Total Volume": 8237501261.688846, - "project": "blur" - }, - { - "24 Hours Volume": 671472.4583508199, - "7 Days Volume": 4017792.3280940033, - "Rank": 3, - "Total Volume": 5461953781.154043, - "project": "x2y2" - }, - { - "24 Hours Volume": 1286407.4659000002, - "7 Days Volume": 5173193.591668, - "Rank": 4, - "Total Volume": 2679750259.5093665, - "project": "cryptopunks" - }, - { - "24 Hours Volume": 56173.5344355522, - "7 Days Volume": 554016.6849051005, - "Rank": 5, - "Total Volume": 1598674364.7416763, - "project": "looksrare" + "row_count": 0, + "result_set_bytes": 0, + "total_row_count": 25, + "total_result_set_bytes": 28, + "datapoint_count": 0, + "pending_time_millis": 42, + "execution_time_millis": 341 } - ] - } } + } + ``` + + + + + + ```json + { + "execution_id": "01HPFJ7VSFXPTA8WPMDKBXE167", + "query_id": 3426636, + "is_execution_finished": true, + "state": "QUERY_STATE_COMPLETED", + "submitted_at": "2024-02-12T21:05:48.848069Z", + "expires_at": "2024-05-12T21:05:50.199443Z", + "execution_started_at": "2024-02-12T21:05:48.863094766Z", + "execution_ended_at": "2024-02-12T21:05:50.199442351Z", + "result": { + "rows": [ + { + "amount_usd": null, + "block_date": "2021-06-07 00:00:00.000 UTC", + "block_month": "2021-06-01T00:00:00Z", + "block_time": "2021-06-07 13:21:10.000 UTC", + "blockchain": "bnb", + "evt_index": 432, + "maker": null, + "project": "pancakeswap", + "project_contract_address": "0xa41e57459f09a126f358e118b693789d088ea8a0", + "taker": "0x88bf5a2e82510847e5dcbf33f44a9f611f1c1df5", + "token_bought_address": "0x85e76cbf4893c1fbcb34dcf1239a91ce2a4cf5a7", + "token_bought_amount": 2985.646787349244, + "token_bought_amount_raw": "2985646787349243786078", + "token_bought_symbol": "USDG", + "token_pair": "GMT-USDG", + "token_sold_address": "0x99e92123eb77bc8f999316f622e5222498438784", + "token_sold_amount": 192.8996462242313, + "token_sold_amount_raw": "192899646224231304314", + "token_sold_symbol": "GMT", + "tx_from": "0x88bf5a2e82510847e5dcbf33f44a9f611f1c1df5", + "tx_hash": "0xcee1e51083f28655fd9cc434238e2a243aa8f9bad20e717d145f246b5e73e231", + "tx_to": "0x10ed43c718714eb63d5aa57b78b54704e256024e" + }, + { + "amount_usd": 0.0038254393551350966, + "block_date": "2021-06-27 00:00:00.000 UTC", + "block_month": "2021-06-01T00:00:00Z", + "block_time": "2021-06-27 19:21:48.000 UTC", + "blockchain": "bnb", + "evt_index": 38, + "maker": null, + "project": "pancakeswap", + "project_contract_address": "0x73c6542f8a529bf7bf0ac27a1d232a8525748738", + "taker": "0x10ed43c718714eb63d5aa57b78b54704e256024e", + "token_bought_address": "0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c", + "token_bought_amount": 0.00001404037053195, + "token_bought_amount_raw": "14040370531950", + "token_bought_symbol": "WBNB", + "token_pair": null, + "token_sold_address": "0x3da1fd918a4c56b1cf6280ba37018c211db0d943", + "token_sold_amount": null, + "token_sold_amount_raw": "23274980840704048977780", + "token_sold_symbol": null, + "tx_from": "0xc0ffee00d263df1ecb44ece29f63a1e7479b7420", + "tx_hash": "0xa37ab410b4802e4d4aab950d1c3ce186abf4dbc98f7d7dbc54af17cf23519444", + "tx_to": "0x17f07d78e432b91ccfbed98f0617a83a4bfcc446" + }, + { + "amount_usd": 229.4023072060263, + "block_date": "2021-06-29 00:00:00.000 UTC", + "block_month": "2021-06-01T00:00:00Z", + "block_time": "2021-06-29 23:23:56.000 UTC", + "blockchain": "bnb", + "evt_index": 51, + "maker": null, + "project": "pancakeswap", + "project_contract_address": "0x58f876857a02d6762e0101bb5c46a8c1ed44dc16", + "taker": "0xd7d116d7535aa724f8be9c482d2c768bc425a23c", + "token_bought_address": "0xe9e7cea3dedca5984780bafc599bd69add087d56", + "token_bought_amount": 229.23954712756574, + "token_bought_amount_raw": "229239547127565734569", + "token_bought_symbol": "BUSD", + "token_pair": "BUSD-WBNB", + "token_sold_address": "0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c", + "token_sold_amount": 0.761872346852129, + "token_sold_amount_raw": "761872346852129018", + "token_sold_symbol": "WBNB", + "tx_from": "0xd7d116d7535aa724f8be9c482d2c768bc425a23c", + "tx_hash": "0x21429506dd0323b9ec40f4cf9314e58579aa25620bf95130f3a275a356ecd64c", + "tx_to": "0x10ed43c718714eb63d5aa57b78b54704e256024e" + }, + { + "amount_usd": 2.668233112736453, + "block_date": "2021-06-04 00:00:00.000 UTC", + "block_month": "2021-06-01T00:00:00Z", + "block_time": "2021-06-04 00:11:44.000 UTC", + "blockchain": "bnb", + "evt_index": 384, + "maker": null, + "project": "pancakeswap", + "project_contract_address": "0x16b9a82891338f9ba80e2d6970fdda79d1eb0dae", + "taker": "0x452c012e55f7a27d3c25caf15fddfc5d63004cd5", + "token_bought_address": "0x55d398326f99059ff775485246999027b3197955", + "token_bought_amount": 2.662657507914879, + "token_bought_amount_raw": "2662657507914878729", + "token_bought_symbol": "USDT", + "token_pair": "USDT-WBNB", + "token_sold_address": "0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c", + "token_sold_amount": 0.00632140891710662, + "token_sold_amount_raw": "6321408917106620", + "token_sold_symbol": "WBNB", + "tx_from": "0x452c012e55f7a27d3c25caf15fddfc5d63004cd5", + "tx_hash": "0x540ae87272f0fcce54595e7c2b67b9ef32cf2d0b5ccfa23c73a21727ae3350ba", + "tx_to": "0x10ed43c718714eb63d5aa57b78b54704e256024e" + }, + { + "amount_usd": 46.95293131078031, + "block_date": "2021-06-25 00:00:00.000 UTC", + "block_month": "2021-06-01T00:00:00Z", + "block_time": "2021-06-25 18:24:20.000 UTC", + "blockchain": "bnb", + "evt_index": 105, + "maker": null, + "project": "pancakeswap", + "project_contract_address": "0x446f87f15d9a9f15b39d1b24d1d6d7e606e9d32d", + "taker": "0x10ed43c718714eb63d5aa57b78b54704e256024e", + "token_bought_address": "0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c", + "token_bought_amount": 0.16652337675833562, + "token_bought_amount_raw": "166523376758335622", + "token_bought_symbol": "WBNB", + "token_pair": null, + "token_sold_address": "0x384f5a9b720349015a27251684c7a1510dd151ba", + "token_sold_amount": null, + "token_sold_amount_raw": "299011495590365081887", + "token_sold_symbol": null, + "tx_from": "0x60e028a06d04ecfd041ba415c3b8a63258a6506e", + "tx_hash": "0xf5e24e5024ff047210127c87cd12b475354ff84afc4bebc159066f9522daac73", + "tx_to": "0x10ed43c718714eb63d5aa57b78b54704e256024e" + } + ], + "metadata": { + "column_names": [ + "amount_usd", + "block_date", + "token_bought_symbol", + "token_pair", + "block_time", + "blockchain", + "evt_index", + "block_month", + "maker", + "project", + "project_contract_address", + "taker", + "token_bought_address", + "token_bought_amount", + "token_bought_amount_raw", + "token_sold_address", + "token_sold_amount", + "token_sold_amount_raw", + "token_sold_symbol", + "tx_from", + "tx_hash", + "tx_to" + ], + "row_count": 5, + "result_set_bytes": 0, + "total_row_count": 10, + "total_result_set_bytes": 5639, + "datapoint_count": 110, + "pending_time_millis": 15, + "execution_time_millis": 1336 + } + }, + "next_uri": "https://api.dune.com/api/v1/execution/01HPFJ7VSFXPTA8WPMDKBXE167/results?limit=5&offset=5", + "next_offset": 5 + } ``` + +**Data Return Limit** + +Dune imposes a 1GB data limit on query results for each query logic execution. If your query logic yields more than 1GB of data, the result will be truncated in storage. In such cases, pulling the result data with `allow_partial_results` set to false (the default) will trigger an error message: "error": "Partial Result, please request with 'allows_partial_results=true'". To retrieve partial results, you must pass the parameter `allow_partial_results=true`. + +So what? Related to pagination, this means that +- When query results exceed 1GB, set `allow_partial_results=true` to in addition to `limit` and `offset` parameters in order to pull partial result. +- For query results under 1GB, pagination with `limit` and `offset` can be used as usual. -TODO -- figure out pagination's relationship with allow_partial_results and write blurb about it -- once pagination page finalized, refer to this in all the get results endpoint pages \ No newline at end of file + \ No newline at end of file diff --git a/api-reference/overview/rate-limits.mdx b/api-reference/overview/rate-limits.mdx index a67f34c2..a0aeaae8 100644 --- a/api-reference/overview/rate-limits.mdx +++ b/api-reference/overview/rate-limits.mdx @@ -29,6 +29,4 @@ For example, on the Free plan, you have a low limit of 15 requests per minute an **Data Return Limit** -Currently, each API result call is limited to returning a maximum of 1GB of data. - -_Once we implement pagination, we plan to increase this data return limit._ \ No newline at end of file +Dune imposes a 1GB data limit on query results for each query logic execution. If your query logic yields more than 1GB of data, the result will be truncated in storage. In such cases, pulling the result data with `allow_partial_results` set to false (the default) will trigger an error message: "error": "Partial Result, please request with 'allows_partial_results=true'". To retrieve partial results, you must pass the parameter `allow_partial_results=true`. diff --git a/api-reference/query-api/endpoint/get-execution-result-csv.mdx b/api-reference/query-api/endpoint/get-execution-result-csv.mdx index e21dc375..336a2f63 100644 --- a/api-reference/query-api/endpoint/get-execution-result-csv.mdx +++ b/api-reference/query-api/endpoint/get-execution-result-csv.mdx @@ -7,9 +7,15 @@ You must pass the `execution_id` obtained from making an [execute query](execute Result returns the status, metadata, and query results (in CSV) from a query execution. - Results data from an execution are currently stored for 2 years. We may reduce this to something closer to 90 days in the future. This is visible on the API response on the “expires_at” field in the execution status and results body. + - There is currently a 1GB limit in how much data a single API result call can return, but there is a chance we reduce this overall or based on varying paid plan types. +- Results data from an execution are currently stored for 2 years. We may reduce this to something closer to 90 days in the future. This is visible on the API response on the “expires_at” field in the execution status and results body. + +- There is currently a 1GB limit in how much data a single API result call can return, but there is a chance we reduce this overall or based on varying paid plan types. + +- To paginate query results, please visit the [pagination page](../../overview/pagination) to get more info. + + If you are using the [Python SDK](https://github.com/duneanalytics/dune-client/tree/d2195b2a9577e2dcae5d2600cb3eddce20987f38), you can directly executes and fetches result in one function call, like below: diff --git a/api-reference/query-api/endpoint/get-execution-result.mdx b/api-reference/query-api/endpoint/get-execution-result.mdx index 13a5bc4a..ca1986c7 100644 --- a/api-reference/query-api/endpoint/get-execution-result.mdx +++ b/api-reference/query-api/endpoint/get-execution-result.mdx @@ -8,9 +8,12 @@ You must pass the `execution_id` obtained from making an [execute query](execute Result returns the status, metadata, and query results (in JSON) from a query execution. - Results data from an execution are currently stored for 2 years. We may reduce this to something closer to 90 days in the future. This is visible on the API response on the “expires_at” field in the execution status and results body. + +- Results data from an execution are currently stored for 2 years. We may reduce this to something closer to 90 days in the future. This is visible on the API response on the “expires_at” field in the execution status and results body. +- There is currently a 1GB limit in how much data a single API result call can return, but there is a chance we reduce this overall or based on varying paid plan types. +- To paginate query results, please visit the [pagination page](../../overview/pagination) to get more info. - There is currently a 1GB limit in how much data a single API result call can return, but there is a chance we reduce this overall or based on varying paid plan types. + If you are using the [Python SDK](https://github.com/duneanalytics/dune-client/tree/d2195b2a9577e2dcae5d2600cb3eddce20987f38), you can directly executes and fetches result in one function call, like below: diff --git a/api-reference/query-api/endpoint/get-query-result-csv.mdx b/api-reference/query-api/endpoint/get-query-result-csv.mdx index d2b066a9..f450bca1 100644 --- a/api-reference/query-api/endpoint/get-query-result-csv.mdx +++ b/api-reference/query-api/endpoint/get-query-result-csv.mdx @@ -9,9 +9,12 @@ Returns the latest execution id and results (in CSV) of that latest run, regardl The query specified must either be public or a query you have ownership of (you or a team you belong to have ownership). - This endpoint does NOT trigger execution but does [consume credits through datapoints](https://dune.com/pricing). + This endpoint does NOT trigger execution but does [consume credits through datapoints](https://dune.com/pricing). - There is currently a 1GB limit in how much data a single API result call can return, but there is a chance we reduce this overall or based on varying paid plan types. + +- There is currently a 1GB limit in how much data a single API result call can return, but there is a chance we reduce this overall or based on varying paid plan types. +- To paginate query results, please visit the [pagination page](../../overview/pagination) to get more info. + If you are using the [Python SDK](https://github.com/duneanalytics/dune-client/tree/d2195b2a9577e2dcae5d2600cb3eddce20987f38), you can directly executes and fetches result in one function call, like below: diff --git a/api-reference/query-api/endpoint/get-query-result.mdx b/api-reference/query-api/endpoint/get-query-result.mdx index f6d86a52..58d5c126 100644 --- a/api-reference/query-api/endpoint/get-query-result.mdx +++ b/api-reference/query-api/endpoint/get-query-result.mdx @@ -9,9 +9,12 @@ Returns the latest execution id and results (in JSON) of that latest run, regard The query specified must either be public or a query you have ownership of (you or a team you belong to have ownership). - This endpoint does NOT trigger execution but does [consume credits through datapoints](https://dune.com/pricing). + This endpoint does NOT trigger execution but does [consume credits through datapoints](https://dune.com/pricing). - There is currently a 1GB limit in how much data a single API result call can return, but there is a chance we reduce this overall or based on varying paid plan types. + +- There is currently a 1GB limit in how much data a single API result call can return, but there is a chance we reduce this overall or based on varying paid plan types. +- To paginate query results, please visit the [pagination page](../../overview/pagination) to get more info. + If you are using the [Python SDK](https://github.com/duneanalytics/dune-client/tree/d2195b2a9577e2dcae5d2600cb3eddce20987f38), you can directly executes and fetches result in one function call, like below: diff --git a/api-reference/quickstart/query-eg.mdx b/api-reference/quickstart/query-eg.mdx index f1f25aea..f6e91b53 100644 --- a/api-reference/quickstart/query-eg.mdx +++ b/api-reference/quickstart/query-eg.mdx @@ -82,6 +82,10 @@ You can choose to either get the latest query result without triggering an execu + + To paginate query results, please visit the [pagination page](../overview/pagination) to get more info. + + ```python From f0bb27de03cfba497392d8302df908277326824b Mon Sep 17 00:00:00 2001 From: agaperste Date: Tue, 13 Feb 2024 14:09:18 -0500 Subject: [PATCH 08/20] addressing PR comments --- api-reference/overview/pagination.mdx | 82 +++++++++++-------- api-reference/overview/rate-limits.mdx | 2 +- .../endpoint/get-execution-result-csv.mdx | 2 +- .../endpoint/get-execution-result.mdx | 2 +- .../endpoint/get-query-result-csv.mdx | 2 +- .../query-api/endpoint/get-query-result.mdx | 2 +- 6 files changed, 53 insertions(+), 39 deletions(-) diff --git a/api-reference/overview/pagination.mdx b/api-reference/overview/pagination.mdx index 81db02ca..e389a2d8 100644 --- a/api-reference/overview/pagination.mdx +++ b/api-reference/overview/pagination.mdx @@ -6,7 +6,7 @@ All get results endpoints, including [get execution results](../query-api/endpoi To paginate through results: 1. Specify the `limit` parameter to determine the number of results per page. 2. Use the `offset` parameter to indicate the starting point for retrieving results. -3. Check the response headers for `x-dune-next-offset` and `x-dune-next-uri` to fetch the next page, if available. +3. If you are fetching JSON response, check the response headers for `next_offset` and `next_uri` to fetch the next page, if available. If you are fetching CSV response, check the response headers for `x-dune-next-offset` and `x-dune-next-uri` to fetch the next page, if available. ### Pagination Parameters @@ -25,7 +25,7 @@ To paginate through results: ```bash - curl -X GET 'https://api.dune.com/api/v1/query/3426636/results?limit=5&offset=0' --H 'x-dune-api-key: {{api_key}}' + curl -X GET 'https://api.dune.com/api/v1/query/3426636/results?limit=5&offset=0' -H 'x-dune-api-key: {{api_key}}' ``` @@ -178,42 +178,56 @@ To paginate through results: The following fields in the repsonse body are related to pagination and can be utilized when doing paginated get results request. If they are available, you can use them to paginate the next page. If they are not avaialble, that means there is no more results to be fetched. -**`x-dune-next-offset`** -- Type: Integer -- Description: Provides the offset to use for retrieving the next page of results, if available. - -**`x-dune-next-uri`** -- Type: String (URL) -- Description: Specifies the URI to retrieve the next page of results, if available. + + + **`next_offset`** + - Type: Integer + - Description: Provides the offset to use for retrieving the next page of results, if available. + + **`next_uri`** + - Type: String (URL) + - Description: Specifies the URI to retrieve the next page of results, if available. + + + **`x-dune-next-offset`** + - Type: Integer + - Description: Provides the offset to use for retrieving the next page of results, if available. + + **`x-dune-next-uri`** + - Type: String (URL) + - Description: Specifies the URI to retrieve the next page of results, if available. + + If you pass in an invalid `limit` and `offset` parameter values, you will get an empty result set. For example, if there are only 10 rows of result data, and you pass in `offset=11`, you will **not** receive an error, but rather an empty result with metadata like this. ```json - "execution_id": "01HPF1299EV69Z08DBKW1B6MJR", - "query_id": 2616430, - "is_execution_finished": true, - "state": "QUERY_STATE_COMPLETED", - "submitted_at": "2024-02-12T16:05:40.270193Z", - "expires_at": "2024-05-12T16:05:40.654018Z", - "execution_started_at": "2024-02-12T16:05:40.312207906Z", - "execution_ended_at": "2024-02-12T16:05:40.654017085Z", - "result": { - "rows": [], - "metadata": { - "column_names": [ - "foo" - ], - "row_count": 0, - "result_set_bytes": 0, - "total_row_count": 25, - "total_result_set_bytes": 28, - "datapoint_count": 0, - "pending_time_millis": 42, - "execution_time_millis": 341 - } - } - } + { + "execution_id": "01HPF1299EV69Z08DBKW1B6MJR", + "query_id": 2616430, + "is_execution_finished": true, + "state": "QUERY_STATE_COMPLETED", + "submitted_at": "2024-02-12T16:05:40.270193Z", + "expires_at": "2024-05-12T16:05:40.654018Z", + "execution_started_at": "2024-02-12T16:05:40.312207906Z", + "execution_ended_at": "2024-02-12T16:05:40.654017085Z", + "result": { + "rows": [], + "metadata": { + "column_names": [ + "foo" + ], + "row_count": 0, + "result_set_bytes": 0, + "total_row_count": 25, + "total_result_set_bytes": 28, + "datapoint_count": 0, + "pending_time_millis": 42, + "execution_time_millis": 341 + } + } + } ``` @@ -396,7 +410,7 @@ If you pass in an invalid `limit` and `offset` parameter values, you will get an **Data Return Limit** -Dune imposes a 1GB data limit on query results for each query logic execution. If your query logic yields more than 1GB of data, the result will be truncated in storage. In such cases, pulling the result data with `allow_partial_results` set to false (the default) will trigger an error message: "error": "Partial Result, please request with 'allows_partial_results=true'". To retrieve partial results, you must pass the parameter `allow_partial_results=true`. +Dune imposes a 1GB data limit on query results for each query execution. If your query yields more than 1GB of data, the result will be truncated in storage. In such cases, pulling the result data with `allow_partial_results` set to false (the default) will trigger an error message: "error": "Partial Result, please request with 'allows_partial_results=true'". If you wish to retrieve partial results, you can pass the parameter `allow_partial_results=true`. But please make sure you indeed want to fetch the truncated result. So what? Related to pagination, this means that - When query results exceed 1GB, set `allow_partial_results=true` to in addition to `limit` and `offset` parameters in order to pull partial result. diff --git a/api-reference/overview/rate-limits.mdx b/api-reference/overview/rate-limits.mdx index a0aeaae8..329c3cf0 100644 --- a/api-reference/overview/rate-limits.mdx +++ b/api-reference/overview/rate-limits.mdx @@ -29,4 +29,4 @@ For example, on the Free plan, you have a low limit of 15 requests per minute an **Data Return Limit** -Dune imposes a 1GB data limit on query results for each query logic execution. If your query logic yields more than 1GB of data, the result will be truncated in storage. In such cases, pulling the result data with `allow_partial_results` set to false (the default) will trigger an error message: "error": "Partial Result, please request with 'allows_partial_results=true'". To retrieve partial results, you must pass the parameter `allow_partial_results=true`. +Dune imposes a 1GB data limit on query results for each query execution. If your query yields more than 1GB of data, the result will be truncated in storage. In such cases, pulling the result data with `allow_partial_results` set to false (the default) will trigger an error message: "error": "Partial Result, please request with 'allows_partial_results=true'". To retrieve partial results, you must pass the parameter `allow_partial_results=true`. diff --git a/api-reference/query-api/endpoint/get-execution-result-csv.mdx b/api-reference/query-api/endpoint/get-execution-result-csv.mdx index 336a2f63..5c4e5e39 100644 --- a/api-reference/query-api/endpoint/get-execution-result-csv.mdx +++ b/api-reference/query-api/endpoint/get-execution-result-csv.mdx @@ -9,7 +9,7 @@ Result returns the status, metadata, and query results (in CSV) from a query exe -- Results data from an execution are currently stored for 2 years. We may reduce this to something closer to 90 days in the future. This is visible on the API response on the “expires_at” field in the execution status and results body. +- Results data from an execution are currently stored for 90 days. This is visible on the API response on the “expires_at” field in the execution status and results body. - There is currently a 1GB limit in how much data a single API result call can return, but there is a chance we reduce this overall or based on varying paid plan types. diff --git a/api-reference/query-api/endpoint/get-execution-result.mdx b/api-reference/query-api/endpoint/get-execution-result.mdx index ca1986c7..a774026a 100644 --- a/api-reference/query-api/endpoint/get-execution-result.mdx +++ b/api-reference/query-api/endpoint/get-execution-result.mdx @@ -9,7 +9,7 @@ Result returns the status, metadata, and query results (in JSON) from a query ex -- Results data from an execution are currently stored for 2 years. We may reduce this to something closer to 90 days in the future. This is visible on the API response on the “expires_at” field in the execution status and results body. +- Results data from an execution are stored for 90 days. This is visible on the API response on the “expires_at” field in the execution status and results body. - There is currently a 1GB limit in how much data a single API result call can return, but there is a chance we reduce this overall or based on varying paid plan types. - To paginate query results, please visit the [pagination page](../../overview/pagination) to get more info. diff --git a/api-reference/query-api/endpoint/get-query-result-csv.mdx b/api-reference/query-api/endpoint/get-query-result-csv.mdx index f450bca1..64f6d997 100644 --- a/api-reference/query-api/endpoint/get-query-result-csv.mdx +++ b/api-reference/query-api/endpoint/get-query-result-csv.mdx @@ -9,7 +9,7 @@ Returns the latest execution id and results (in CSV) of that latest run, regardl The query specified must either be public or a query you have ownership of (you or a team you belong to have ownership). - This endpoint does NOT trigger execution but does [consume credits through datapoints](https://dune.com/pricing). + This endpoint does NOT trigger an execution but does [consume credits through datapoints](https://dune.com/pricing). - There is currently a 1GB limit in how much data a single API result call can return, but there is a chance we reduce this overall or based on varying paid plan types. diff --git a/api-reference/query-api/endpoint/get-query-result.mdx b/api-reference/query-api/endpoint/get-query-result.mdx index 58d5c126..34c63e58 100644 --- a/api-reference/query-api/endpoint/get-query-result.mdx +++ b/api-reference/query-api/endpoint/get-query-result.mdx @@ -9,7 +9,7 @@ Returns the latest execution id and results (in JSON) of that latest run, regard The query specified must either be public or a query you have ownership of (you or a team you belong to have ownership). - This endpoint does NOT trigger execution but does [consume credits through datapoints](https://dune.com/pricing). + This endpoint does NOT trigger an execution but does [consume credits through datapoints](https://dune.com/pricing). - There is currently a 1GB limit in how much data a single API result call can return, but there is a chance we reduce this overall or based on varying paid plan types. From dddbba9bbc312fb7c091ce6d55e68895cf2e5675 Mon Sep 17 00:00:00 2001 From: agaperste Date: Tue, 13 Feb 2024 14:46:54 -0500 Subject: [PATCH 09/20] minor correction for json response --- api-reference/overview/pagination.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-reference/overview/pagination.mdx b/api-reference/overview/pagination.mdx index e389a2d8..faa491a1 100644 --- a/api-reference/overview/pagination.mdx +++ b/api-reference/overview/pagination.mdx @@ -6,7 +6,7 @@ All get results endpoints, including [get execution results](../query-api/endpoi To paginate through results: 1. Specify the `limit` parameter to determine the number of results per page. 2. Use the `offset` parameter to indicate the starting point for retrieving results. -3. If you are fetching JSON response, check the response headers for `next_offset` and `next_uri` to fetch the next page, if available. If you are fetching CSV response, check the response headers for `x-dune-next-offset` and `x-dune-next-uri` to fetch the next page, if available. +3. If you are fetching JSON response, check for the fields `next_offset` and `next_uri` in the response body to fetch the next page, if available. If you are fetching CSV response, check the response headers for `X-Dune-Next-Offset` and `X-Dune-Next-Uri` to fetch the next page, if available. ### Pagination Parameters From 112ad6ce4a282ce94556dbd2bf29877b5080ef10 Mon Sep 17 00:00:00 2001 From: Miguel Filipe Date: Thu, 15 Feb 2024 12:40:48 +0000 Subject: [PATCH 10/20] small improvements to pagination page --- api-reference/overview/pagination.mdx | 42 +++++++++++++++------------ 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/api-reference/overview/pagination.mdx b/api-reference/overview/pagination.mdx index faa491a1..cbd3104c 100644 --- a/api-reference/overview/pagination.mdx +++ b/api-reference/overview/pagination.mdx @@ -1,12 +1,12 @@ --- title: Pagination --- -All get results endpoints, including [get execution results](../query-api/endpoint/get-execution-result), [get execution results csv](../query-api/endpoint/get-execution-result-csv), [get query results](../query-api/endpoint/get-query-result), [get query results csv](../query-api/endpoint/get-query-result-csv), support pagination for efficient data retrieval. Pagination divides large datasets into smaller, manageable chunks, preventing overload and ensuring smooth performance. This allows users to navigate through the data easily, avoiding limit errors and streamlining their data fetching experience. +All API endpoints to read query results, including [get execution results](../query-api/endpoint/get-execution-result), [get execution results csv](../query-api/endpoint/get-execution-result-csv), [get query results](../query-api/endpoint/get-query-result), [get query results csv](../query-api/endpoint/get-query-result-csv), support pagination for efficient data retrieval. Pagination divides large datasets into smaller, manageable chunks, preventing overload and ensuring smooth performance. This allows users to navigate through the data easily, avoiding limit errors and streamlining their data fetching experience. To paginate through results: -1. Specify the `limit` parameter to determine the number of results per page. -2. Use the `offset` parameter to indicate the starting point for retrieving results. -3. If you are fetching JSON response, check for the fields `next_offset` and `next_uri` in the response body to fetch the next page, if available. If you are fetching CSV response, check the response headers for `X-Dune-Next-Offset` and `X-Dune-Next-Uri` to fetch the next page, if available. +1. Specify the `limit` parameter to determine the number of results you want to retrieve. +2. Use the `offset` parameter to indicate the starting offset point for retrieving results, default value is 0 for the first row. +3. If you are fetching JSON response, check for the fields `next_offset` and `next_uri` in the response body to fetch the next page, if available. If you are fetching CSV response, check the response headers for `X-Dune-Next-Offset` and `X-Dune-Next-Uri` to fetch the next page, if available. Always follow the provided offset or uri, the server is allowed to override the provided limit if it is too large (see section on Data Returned Limit) ### Pagination Parameters @@ -25,7 +25,7 @@ To paginate through results: ```bash - curl -X GET 'https://api.dune.com/api/v1/query/3426636/results?limit=5&offset=0' -H 'x-dune-api-key: {{api_key}}' + curl -X GET 'https://api.dune.com/api/v1/query/3426636/results?limit=1000' -H 'x-dune-api-key: {{api_key}}' ``` @@ -45,7 +45,7 @@ To paginate through results: headers = {"X-DUNE-API-KEY": ""} - params = {"limit": 5, "offset": 0} # Define limit and offset parameters + params = {"limit": 1000, "offset": 0} # Define limit and offset parameters response = requests.request("GET", url, headers=headers, params=params) @@ -64,7 +64,7 @@ To paginate through results: } }; - const queryParams = new URLSearchParams({limit: 5, offset: 0}); // Define limit and offset parameters + const queryParams = new URLSearchParams({limit: 1000, offset: 0}); // Define limit and offset parameters const url = `https://api.dune.com/api/v1/query/{query_id}/results?${queryParams}`; fetch(url, options) @@ -91,8 +91,7 @@ To paginate through results: // Create query parameters params := url.Values{} - params.Set("limit", "5") - params.Set("offset", "0") + params.Set("limit", "1000") // Add parameters to URL fullURL := fmt.Sprintf("%s?%s", url, params.Encode()) @@ -120,7 +119,7 @@ To paginate through results: $url = "https://api.dune.com/api/v1/query/{query_id}/results"; $queryParams = http_build_query([ - 'limit' => 5, + 'limit' => 1000, 'offset' => 0 ]); $url .= '?' . $queryParams; @@ -176,7 +175,7 @@ To paginate through results: ### Pagination in Response -The following fields in the repsonse body are related to pagination and can be utilized when doing paginated get results request. If they are available, you can use them to paginate the next page. If they are not avaialble, that means there is no more results to be fetched. +The following fields in the repsonse body are related to pagination and can be utilized when doing paginated get results request. If they are available, you can use them to paginate the next page. If they are not available, that means there are no more results to be fetched. @@ -186,7 +185,7 @@ The following fields in the repsonse body are related to pagination and can be u **`next_uri`** - Type: String (URL) - - Description: Specifies the URI to retrieve the next page of results, if available. + - Description: Specifies the complete URI to retrieve the next page of results, if available. **`x-dune-next-offset`** @@ -195,12 +194,12 @@ The following fields in the repsonse body are related to pagination and can be u **`x-dune-next-uri`** - Type: String (URL) - - Description: Specifies the URI to retrieve the next page of results, if available. + - Description: Specifies the complete URI to retrieve the next page of results, if available. -If you pass in an invalid `limit` and `offset` parameter values, you will get an empty result set. For example, if there are only 10 rows of result data, and you pass in `offset=11`, you will **not** receive an error, but rather an empty result with metadata like this. +If you pass in an invalid `offset` parameter value, you will get an empty result set. For example, if there are only 25 rows of result data, and you pass in `offset=30`, you will **not** receive an error, but rather an empty result with metadata like this. Note the response field `result.total_row_count`, indicating this result has only 25 rows. ```json { @@ -408,12 +407,17 @@ If you pass in an invalid `limit` and `offset` parameter values, you will get an -**Data Return Limit** +**Data Returned Limit ** +When using pagination, our intention is to use sizes that work well on mobile, with lower data and ram consumption. For this, and to avoid more work on the developer, when the client specifies a very large limit value (for example 500000 rows), instead of returning an error, the server will override this limit to a lower, safer value (for example 30000 rows) and *will always provide* the correct next `offset` and `limit` value to use on the next paginated requests. The exact maximum limit value is subject to change. -Dune imposes a 1GB data limit on query results for each query execution. If your query yields more than 1GB of data, the result will be truncated in storage. In such cases, pulling the result data with `allow_partial_results` set to false (the default) will trigger an error message: "error": "Partial Result, please request with 'allows_partial_results=true'". If you wish to retrieve partial results, you can pass the parameter `allow_partial_results=true`. But please make sure you indeed want to fetch the truncated result. + +**Data Size Limit ** + +Dune internally has a maximum query result size limit (which currently is 8GB, but subject to increase in the future). If your query yields more than 8GB of data, the result will be truncated in storage. In such cases, pulling the result data (using pagination) but without specifying `allow_partial_results` set to true will trigger an error message: "error": "Partial Result, please request with 'allows_partial_results=true'". If you wish to retrieve partial results, you can pass the parameter `allow_partial_results=true`. But please make sure you indeed want to fetch the truncated result. So what? Related to pagination, this means that -- When query results exceed 1GB, set `allow_partial_results=true` to in addition to `limit` and `offset` parameters in order to pull partial result. -- For query results under 1GB, pagination with `limit` and `offset` can be used as usual. +- For query results under 8GB, use the API as normal. +- When your query results exceed 8GB, in addition to `limit` and `offset` parameters in order to read the partial result (the first 8GB of data), set `allow_partial_results=true` +- You can use the (Get Status API)[https://dune.mintlify.app/api-reference/query-api/endpoint/get-execution-status] to check the size of your result, `result.result_set_size` - \ No newline at end of file + From d6f326fb62fb5c1885ea32f83367d0973e027580 Mon Sep 17 00:00:00 2001 From: Miguel Filipe Date: Thu, 15 Feb 2024 15:26:47 +0000 Subject: [PATCH 11/20] query execution API: a slew of small fixes. --- api-reference/query-api/query-openapi.json | 116 ++++++++++++++++----- 1 file changed, 90 insertions(+), 26 deletions(-) diff --git a/api-reference/query-api/query-openapi.json b/api-reference/query-api/query-openapi.json index 001fa4e5..aa68e45a 100644 --- a/api-reference/query-api/query-openapi.json +++ b/api-reference/query-api/query-openapi.json @@ -2,7 +2,7 @@ "openapi": "3.0.1", "info": { "title": "OpenAPI for Query and Execution API", - "description": "API for Querying and Executing Dune Data", + "description": "API for Querying and Reading Dune Data", "version": "1.0.0" }, "servers": [ @@ -24,6 +24,15 @@ "description": "API Key for accessing this service", "required": true }, + { + "in": "path", + "name": "api_key", + "schema": { + "type": "string" + }, + "description": "API Key for accessing this service, alternative to using the HTTP header X-DUNE-API-KEY.", + "required": false + }, { "in": "path", "name": "query_id", @@ -35,26 +44,26 @@ }, { "in": "query", - "name": "parameters", + "name": "performance", "required": false, - "explode": true, "schema": { "items": { - "$ref": "#/components/schemas/ParameterObject" + "$ref": "#/components/schemas/Performance" } }, - "description": "Query parameters in key-value pairs. Each parameter is an object consisting of keys such as 'key', 'type', 'value', and optionally 'enumOptions'. The API allows for partial submission of parameters. For example, if the query expects three parameters and you only pass in two, the third one will automatically use its default value as defined in the API. This feature enables you to customize the query execution according to your specific needs while providing sensible defaults for unspecified parameters." + "description": "The performance engine tier the execution will be run on. Can be either medium or large. Medium consumes 10 credits, and large consumes 20 credits, per run. Default is medium." }, { "in": "query", - "name": "performance", + "name": "parameters", "required": false, + "explode": true, "schema": { "items": { - "$ref": "#/components/schemas/Performance" + "$ref": "#/components/schemas/ParameterObject" } }, - "description": "Defines the engine the execution will be run on. Can be either medium or large tier. Medium consumes 10 credits per run and large consumes 20 credits per run. By default performance is medium." + "description": "SQL Query parameters in key-value pairs. Each parameter is an object consisting of keys such as 'key', 'type', 'value', and optionally 'enumOptions'. The API allows for partial submission of parameters. For example, if the query expects three parameters and you only pass in two, the third one will automatically use its default value as defined in the API. This feature enables you to parameterize the query execution according to your specific needs while providing sensible defaults for unspecified parameters." } ], "responses": { @@ -73,7 +82,7 @@ } }, "400": { - "description": "Bad Request - The request could not be understood by the server due to malformed syntax or validation failure.", + "description": "Bad Request - The request could not be processed by the server due to malformed syntax or validation failure.", "content": { "application/json": { "schema": { @@ -113,13 +122,13 @@ "type": "object" }, "example": { - "error": "Not allowed to execute query. Query is archived or an unsaved query" + "error": "Not allowed to execute query. Query is archived, unsaved or not enough permissions." } } } }, "500": { - "description": "Internal server error occurred. This usually happens due to unexpected issues in processing the request. It may include errors such as failure in core API execution, invalid query parameters, or issues with the customer data provided.", + "description": "Internal server error occurred. This usually happens due to unexpected issues in processing the request.", "content": { "application/json": { "schema": { @@ -144,6 +153,15 @@ "description": "API Key for accessing this service", "required": true }, + { + "in": "path", + "name": "api_key", + "schema": { + "type": "string" + }, + "description": "API Key for accessing this service, alternative to using the HTTP header X-DUNE-API-KEY.", + "required": false + }, { "in": "path", "name": "execution_id", @@ -205,7 +223,7 @@ } }, "500": { - "description": "Internal server error occurred. This usually happens due to unexpected issues in processing the request. It may include errors such as failure in core API execution, invalid query parameters, or issues with the customer data provided.", + "description": "Internal server error occurred. This usually happens due to unexpected issues in processing the request.", "content": { "application/json": { "schema": { @@ -230,6 +248,15 @@ "description": "API Key for accessing this service", "required": true }, + { + "in": "path", + "name": "api_key", + "schema": { + "type": "string" + }, + "description": "API Key for accessing this service, alternative to using the HTTP header X-DUNE-API-KEY.", + "required": false + }, { "in": "path", "name": "execution_id", @@ -282,7 +309,7 @@ } }, "500": { - "description": "Internal server error occurred. This usually happens due to unexpected issues in processing the request. It may include errors such as failure in core API execution, invalid query parameters, or issues with the customer data provided.", + "description": "Internal server error occurred. This usually happens due to unexpected issues in processing the request.", "content": { "application/json": { "schema": { @@ -307,6 +334,15 @@ "description": "API Key for accessing this service", "required": true }, + { + "in": "path", + "name": "api_key", + "schema": { + "type": "string" + }, + "description": "API Key for accessing this service, alternative to using the HTTP header X-DUNE-API-KEY.", + "required": false + }, { "in": "path", "name": "query_id", @@ -344,7 +380,7 @@ "$ref": "#/components/schemas/ParameterObject" } }, - "description": "Query parameters in key-value pairs. Each parameter is an object consisting of keys such as 'key', 'type', 'value', and optionally 'enumOptions'. The API allows for partial submission of parameters. For example, if the query expects three parameters and you only pass in two, the third one will automatically use its default value as defined in the API. This feature enables you to customize the query execution according to your specific needs while providing sensible defaults for unspecified parameters." + "description": "SQL Query parameters in key-value pairs. Each parameter is an object consisting of keys such as 'key', 'type', 'value', and optionally 'enumOptions'. The API allows for partial submission of parameters. For example, if the query expects three parameters and you only pass in two, the third one will automatically use its default value as defined in the API. This feature enables you to parameterize the query execution according to your specific needs while providing sensible defaults for unspecified parameters." }, { "in": "query", @@ -362,7 +398,7 @@ "schema": { "type": "boolean" }, - "description": "Sometimes request results can be too large to return. By default allow_partial_results is set to false and a failed state is returned." + "description": "This enables returning a query result that was too large and only a partial result is available. By default allow_partial_results is set to false and a failed state is returned." } ], "responses": { @@ -446,6 +482,15 @@ "description": "API Key for accessing this service", "required": true }, + { + "in": "path", + "name": "api_key", + "schema": { + "type": "string" + }, + "description": "API Key for accessing this service, alternative to using the HTTP header X-DUNE-API-KEY.", + "required": false + }, { "in": "path", "name": "query_id", @@ -483,7 +528,7 @@ "$ref": "#/components/schemas/ParameterObject" } }, - "description": "Query parameters in key-value pairs. Each parameter is an object consisting of keys such as 'key', 'type', 'value', and optionally 'enumOptions'. The API allows for partial submission of parameters. For example, if the query expects three parameters and you only pass in two, the third one will automatically use its default value as defined in the API. This feature enables you to customize the query execution according to your specific needs while providing sensible defaults for unspecified parameters." + "description": "SQL Query parameters in key-value pairs. Each parameter is an object consisting of keys such as 'key', 'type', 'value', and optionally 'enumOptions'. The API allows for partial submission of parameters. For example, if the query expects three parameters and you only pass in two, the third one will automatically use its default value as defined in the API. This feature enables you to parameterize the query execution according to your specific needs while providing sensible defaults for unspecified parameters." }, { "in": "query", @@ -501,7 +546,7 @@ "schema": { "type": "boolean" }, - "description": "Sometimes request results can be too large to return. By default allow_partial_results is set to false and a failed state is returned. " + "description": "This enables returning a query result that was too large and only a partial result is available. By default allow_partial_results is set to false and a failed state is returned." } ], "responses": { @@ -599,6 +644,15 @@ "description": "API Key for accessing this service", "required": true }, + { + "in": "path", + "name": "api_key", + "schema": { + "type": "string" + }, + "description": "API Key for accessing this service, alternative to using the HTTP header X-DUNE-API-KEY.", + "required": false + }, { "in": "path", "name": "execution_id", @@ -642,7 +696,7 @@ "schema": { "type": "boolean" }, - "description": "Sometimes request results can be too large to return. By default allow_partial_results is set to false and a failed state is returned. " + "description": "This enables returning a query result that was too large and only a partial result is available. By default allow_partial_results is set to false and a failed state is returned." } ], "responses": { @@ -718,6 +772,15 @@ "description": "API Key for accessing this service", "required": true }, + { + "in": "path", + "name": "api_key", + "schema": { + "type": "string" + }, + "description": "API Key for accessing this service, alternative to using the HTTP header X-DUNE-API-KEY.", + "required": false + }, { "in": "path", "name": "execution_id", @@ -761,7 +824,7 @@ "schema": { "type": "boolean" }, - "description": "Sometimes request results can be too large to return. By default allow_partial_results is set to false and a failed state is returned. " + "description": "This enables returning a query result that was too large and only a partial result is available. By default allow_partial_results is set to false and a failed state is returned." } ], "responses": { @@ -834,7 +897,7 @@ "type": "object" }, "example": { - "error": "Query state is not QUERY_STATE_CANCELLED, cannot provide CSV Result", + "error": "Query state is not QUERY_STATE_COMPLETED, cannot provide CSV Result", "errorDetails": null, "state": "QUERY_STATE_CANCELLED" } @@ -1025,7 +1088,7 @@ "properties": { "execution_id": { "type": "string", - "description": "Unique identifier for the execution of the query.", + "description": "Unique identifier for the execution of the query and corresponding result.", "example": "01HKZSJAW6N2MFVCBHA3R8S64X" }, "query_id": { @@ -1085,7 +1148,7 @@ "$ref": "#/components/schemas/ExecutionResultMetadata" }, "rows": { - "$ref": "#/components/schemas/QueryResultRow" + "$ref": "#/components/schemas/QueryResultRows" } } }, @@ -1105,9 +1168,9 @@ } } }, - "QueryResultRow": { + "QueryResultRows": { "type": "object", - "description": "A row is dictionary of key-value pairs returned by the query, each pair corresponding to a column", + "description": "A list of rows. A row is dictionary of key-value pairs returned by the query, each pair corresponding to a column", "example": [ { "24 Hours Volume": 8466988.095521685, @@ -1223,7 +1286,7 @@ }, "total_result_set_bytes": { "type": "integer", - "description": "Total number of bytes in the result set.", + "description": "Total number of bytes in the result set. This doesn't include the json representation overhead.", "example": 1541 }, "total_row_count": { @@ -1251,6 +1314,7 @@ "ParameterObject": { "type": "object", "required": ["key"], + "description": "SQL Query parameters in key-value pairs. Each parameter is an object consisting of keys such as 'key', 'type', 'value', and optionally 'enumOptions'. The API allows for partial submission of parameters. For example, if the query expects three parameters and you only pass in two, the third one will automatically use its default value as defined in the API. This feature enables you to parameterize the query execution according to your specific needs while providing sensible defaults for unspecified parameters.", "properties": { "key": { "type": "string", @@ -1312,7 +1376,7 @@ "Performance": { "type": "string", "enum": ["medium", "large"], - "description": "Defines the engine the execution will be run on. Can be either medium or large tier. Medium consumes 10 credits per run, and large consumes 20 credits per run. By default, performance is medium." + "description": "The performance engine tier the execution will be run on. Can be either medium or large. Medium consumes 10 credits, and large consumes 20 credits, per run. Default is medium." }, "UnauthorizedError": { "type": "object", From 138afa4755e7e680502cf532e4e9afcafd03ef4a Mon Sep 17 00:00:00 2001 From: Miguel Filipe Date: Thu, 15 Feb 2024 15:47:56 +0000 Subject: [PATCH 12/20] fix definition for queryParameters --- api-reference/query-api/query-openapi.json | 70 +++------------------- 1 file changed, 8 insertions(+), 62 deletions(-) diff --git a/api-reference/query-api/query-openapi.json b/api-reference/query-api/query-openapi.json index aa68e45a..0aad8a7d 100644 --- a/api-reference/query-api/query-openapi.json +++ b/api-reference/query-api/query-openapi.json @@ -63,7 +63,7 @@ "$ref": "#/components/schemas/ParameterObject" } }, - "description": "SQL Query parameters in key-value pairs. Each parameter is an object consisting of keys such as 'key', 'type', 'value', and optionally 'enumOptions'. The API allows for partial submission of parameters. For example, if the query expects three parameters and you only pass in two, the third one will automatically use its default value as defined in the API. This feature enables you to parameterize the query execution according to your specific needs while providing sensible defaults for unspecified parameters." + "description": "SQL Query parameters in key-value pairs. Each parameter is to be provided in key-value pairs. This enables you to execute a parameterized query with the provided values for your parameter keys. Partial submission of parameters is allowed. For example, if the query expects three parameters and you only pass in two, the third one will automatically use its default value as defined in the Query Parameter Editor page." } ], "responses": { @@ -380,7 +380,7 @@ "$ref": "#/components/schemas/ParameterObject" } }, - "description": "SQL Query parameters in key-value pairs. Each parameter is an object consisting of keys such as 'key', 'type', 'value', and optionally 'enumOptions'. The API allows for partial submission of parameters. For example, if the query expects three parameters and you only pass in two, the third one will automatically use its default value as defined in the API. This feature enables you to parameterize the query execution according to your specific needs while providing sensible defaults for unspecified parameters." + "description": "SQL Query parameters in key-value pairs. Each parameter is to be provided in key-value pairs. This enables you to execute a parameterized query with the provided values for your parameter keys. Partial submission of parameters is allowed. For example, if the query expects three parameters and you only pass in two, the third one will automatically use its default value as defined in the Query Parameter Editor page." }, { "in": "query", @@ -528,7 +528,7 @@ "$ref": "#/components/schemas/ParameterObject" } }, - "description": "SQL Query parameters in key-value pairs. Each parameter is an object consisting of keys such as 'key', 'type', 'value', and optionally 'enumOptions'. The API allows for partial submission of parameters. For example, if the query expects three parameters and you only pass in two, the third one will automatically use its default value as defined in the API. This feature enables you to parameterize the query execution according to your specific needs while providing sensible defaults for unspecified parameters." + "description": "SQL Query parameters in key-value pairs. Each parameter is to be provided in key-value pairs. This enables you to execute a parameterized query with the provided values for your parameter keys. Partial submission of parameters is allowed. For example, if the query expects three parameters and you only pass in two, the third one will automatically use its default value as defined in the Query Parameter Editor page." }, { "in": "query", @@ -1313,65 +1313,11 @@ }, "ParameterObject": { "type": "object", - "required": ["key"], - "description": "SQL Query parameters in key-value pairs. Each parameter is an object consisting of keys such as 'key', 'type', 'value', and optionally 'enumOptions'. The API allows for partial submission of parameters. For example, if the query expects three parameters and you only pass in two, the third one will automatically use its default value as defined in the API. This feature enables you to parameterize the query execution according to your specific needs while providing sensible defaults for unspecified parameters.", - "properties": { - "key": { - "type": "string", - "description": "The key name of the parameter." - }, - "description": { - "type": "string", - "description": "A brief description of the parameter." - }, - "value": { - "type": "string", - "description": "The default value used by this parameter during execution, format depends on the type." - }, - "values": { - "type": "array", - "items": { - "type": "string" - }, - "description": "An array of string values, used when multiple selections are allowed." - }, - "type": { - "type": "string", - "enum": ["number", "text", "datetime", "enum"], - "description": "The type of the parameter, determines the format of 'value(s)'. 'number': Numeric parameters, the value must be a number (e.g., '20'). 'text': String parameters, value can be any text including hex 0x-prefixed values (e.g., '0xae2fc...'), an empty value defaults to an empty string. 'datetime': Date and time parameters, value must be in 'YYYY-MM-DD hh:mm:ss' format (e.g., '2021-12-31 23:59:59'). 'enum': Parameters with a specific list of values, 'EnumValues' field is mandatory, providing a JSON list of strings representing valid options, the 'value' must be one of these options (e.g., 'Option1')." - }, - "EnumValues": { - "type": "array", - "items": { - "type": "string" - }, - "description": "List of valid options for 'enum' type parameters." - }, - "isMultiselect": { - "type": "boolean", - "description": "Indicates if multiple selections are allowed for this parameter." - }, - "isFreeformAllowed": { - "type": "boolean", - "description": "Indicates if freeform input is allowed for this parameter." - }, - "enumFromResults": { - "$ref": "#/components/schemas/EnumFromResults" - } - } - }, - "EnumFromResults": { - "type": "object", - "properties": { - "query_id": { - "type": "integer", - "description": "The ID of the query to fetch results from." - }, - "columnName": { - "type": "string", - "description": "The column name to use from the query results." - } - } + "example": { + "parameterNameFoo": "value of parameterNameFoo", + "parameterNameBar": 420 + }, + "description": "SQL Query parameters in key-value pairs. Each parameter is to be provided in key-value pairs. This enables you to execute a parameterized query with the provided values for your parameter keys. Partial submission of parameters is allowed. For example, if the query expects three parameters and you only pass in two, the third one will automatically use its default value as defined in the Query Parameter Editor page." }, "Performance": { "type": "string", From 53040c7347998faf56ad8bbbb8074f59730206a4 Mon Sep 17 00:00:00 2001 From: Andrew <47720952+andrewhong5297@users.noreply.github.com> Date: Thu, 15 Feb 2024 10:58:41 -0500 Subject: [PATCH 13/20] Update query-parameters.mdx --- api-reference/overview/query-parameters.mdx | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/api-reference/overview/query-parameters.mdx b/api-reference/overview/query-parameters.mdx index 3e8f9065..bb8c35fb 100644 --- a/api-reference/overview/query-parameters.mdx +++ b/api-reference/overview/query-parameters.mdx @@ -9,7 +9,18 @@ There are four kinds of parameters you can use in a Dune query (these are not AP - date - enum (called a list in the UI) -For passing these parameters through the API request body, you can use the following format: +For passing these parameters through the API request body, you can use the following format for executions: + +```json +{ +"foo": "value", +"bar":1000, +"baz": "2020-12-01T01:20:30Z" +} +``` +Where "foo", "bar", and "baz" are three params in a query. If you leave one out, it goes with the default param valiue. + +For CRUD operations, you'll need to define the type and more: ```json [ @@ -46,4 +57,4 @@ If you are using bytearrays/binary (`0x1234...`), then you will still pass it as If you're using the Dune Python SDK, check out the [sdk doc page](/api-reference/overview/sdks) for an example. - \ No newline at end of file + From 2a7f45fe24dca53d9966339e225f8a1bb4e31e24 Mon Sep 17 00:00:00 2001 From: Miguel Filipe Date: Thu, 15 Feb 2024 16:46:29 +0000 Subject: [PATCH 14/20] apikey wording and query param optional field --- api-reference/crud/crud-openapi.json | 72 +++++++++++++++++++--- api-reference/query-api/query-openapi.json | 32 +++++----- api-reference/upload/upload-openapi.json | 11 +++- 3 files changed, 89 insertions(+), 26 deletions(-) diff --git a/api-reference/crud/crud-openapi.json b/api-reference/crud/crud-openapi.json index ff7835b7..c8b63801 100644 --- a/api-reference/crud/crud-openapi.json +++ b/api-reference/crud/crud-openapi.json @@ -1,8 +1,8 @@ { "openapi": "3.0.1", "info": { - "title": "OpenAPI for CRUD API", - "description": "API for create, read, update, and delete operations on Dune queries.", + "title": "OpenAPI for Query CRUD API", + "description": "API for Create, Read, Update, and Delete operations on Dune queries.", "version": "1.0.0" }, "servers": [ @@ -22,9 +22,18 @@ "schema": { "type": "string" }, - "description": "API Key for accessing this service", + "description": "API Key for the service", "required": true }, + { + "in": "path", + "name": "api_key", + "schema": { + "type": "string" + }, + "description": "API Key for the service, alternative to using the HTTP header X-DUNE-API-KEY.", + "required": false + }, { "in": "path", "name": "queryId", @@ -108,7 +117,7 @@ "schema": { "type": "string" }, - "description": "API Key for accessing this service", + "description": "API Key for the service", "required": true }, { @@ -223,8 +232,17 @@ "schema": { "type": "string" }, - "description": "API Key for accessing this service", + "description": "API Key for the service", "required": true + }, + { + "in": "path", + "name": "api_key", + "schema": { + "type": "string" + }, + "description": "API Key for the service, alternative to using the HTTP header X-DUNE-API-KEY.", + "required": false } ], "requestBody": { @@ -319,9 +337,18 @@ "schema": { "type": "string" }, - "description": "API Key for accessing this service", + "description": "API Key for the service", "required": true }, + { + "in": "path", + "name": "api_key", + "schema": { + "type": "string" + }, + "description": "API Key for the service, alternative to using the HTTP header X-DUNE-API-KEY.", + "required": false + }, { "in": "path", "name": "queryId", @@ -414,9 +441,18 @@ "schema": { "type": "string" }, - "description": "API Key for accessing this service", + "description": "API Key for the service", "required": true }, + { + "in": "path", + "name": "api_key", + "schema": { + "type": "string" + }, + "description": "API Key for the service, alternative to using the HTTP header X-DUNE-API-KEY.", + "required": false + }, { "in": "path", "name": "queryId", @@ -509,9 +545,18 @@ "schema": { "type": "string" }, - "description": "API Key for accessing this service", + "description": "API Key for the service", "required": true }, + { + "in": "path", + "name": "api_key", + "schema": { + "type": "string" + }, + "description": "API Key for the service, alternative to using the HTTP header X-DUNE-API-KEY.", + "required": false + }, { "in": "path", "name": "queryId", @@ -614,9 +659,18 @@ "schema": { "type": "string" }, - "description": "API Key for accessing this service", + "description": "API Key for the service", "required": true }, + { + "in": "path", + "name": "api_key", + "schema": { + "type": "string" + }, + "description": "API Key for the service, alternative to using the HTTP header X-DUNE-API-KEY.", + "required": false + }, { "in": "path", "name": "queryId", diff --git a/api-reference/query-api/query-openapi.json b/api-reference/query-api/query-openapi.json index 0aad8a7d..71272694 100644 --- a/api-reference/query-api/query-openapi.json +++ b/api-reference/query-api/query-openapi.json @@ -21,7 +21,7 @@ "schema": { "type": "string" }, - "description": "API Key for accessing this service", + "description": "API Key for the service", "required": true }, { @@ -30,7 +30,7 @@ "schema": { "type": "string" }, - "description": "API Key for accessing this service, alternative to using the HTTP header X-DUNE-API-KEY.", + "description": "API Key for the service, alternative to using the HTTP header X-DUNE-API-KEY.", "required": false }, { @@ -55,7 +55,7 @@ }, { "in": "query", - "name": "parameters", + "name": "query_parameters", "required": false, "explode": true, "schema": { @@ -150,7 +150,7 @@ "schema": { "type": "string" }, - "description": "API Key for accessing this service", + "description": "API Key for the service", "required": true }, { @@ -159,7 +159,7 @@ "schema": { "type": "string" }, - "description": "API Key for accessing this service, alternative to using the HTTP header X-DUNE-API-KEY.", + "description": "API Key for the service, alternative to using the HTTP header X-DUNE-API-KEY.", "required": false }, { @@ -245,7 +245,7 @@ "schema": { "type": "string" }, - "description": "API Key for accessing this service", + "description": "API Key for the service", "required": true }, { @@ -254,7 +254,7 @@ "schema": { "type": "string" }, - "description": "API Key for accessing this service, alternative to using the HTTP header X-DUNE-API-KEY.", + "description": "API Key for the service, alternative to using the HTTP header X-DUNE-API-KEY.", "required": false }, { @@ -331,7 +331,7 @@ "schema": { "type": "string" }, - "description": "API Key for accessing this service", + "description": "API Key for the service", "required": true }, { @@ -340,7 +340,7 @@ "schema": { "type": "string" }, - "description": "API Key for accessing this service, alternative to using the HTTP header X-DUNE-API-KEY.", + "description": "API Key for the service, alternative to using the HTTP header X-DUNE-API-KEY.", "required": false }, { @@ -372,7 +372,7 @@ }, { "in": "query", - "name": "parameters", + "name": "query_parameters", "required": false, "explode": true, "schema": { @@ -479,7 +479,7 @@ "schema": { "type": "string" }, - "description": "API Key for accessing this service", + "description": "API Key for the service", "required": true }, { @@ -488,7 +488,7 @@ "schema": { "type": "string" }, - "description": "API Key for accessing this service, alternative to using the HTTP header X-DUNE-API-KEY.", + "description": "API Key for the service, alternative to using the HTTP header X-DUNE-API-KEY.", "required": false }, { @@ -641,7 +641,7 @@ "schema": { "type": "string" }, - "description": "API Key for accessing this service", + "description": "API Key for the service", "required": true }, { @@ -650,7 +650,7 @@ "schema": { "type": "string" }, - "description": "API Key for accessing this service, alternative to using the HTTP header X-DUNE-API-KEY.", + "description": "API Key for the service, alternative to using the HTTP header X-DUNE-API-KEY.", "required": false }, { @@ -769,7 +769,7 @@ "schema": { "type": "string" }, - "description": "API Key for accessing this service", + "description": "API Key for the service", "required": true }, { @@ -778,7 +778,7 @@ "schema": { "type": "string" }, - "description": "API Key for accessing this service, alternative to using the HTTP header X-DUNE-API-KEY.", + "description": "API Key for the service, alternative to using the HTTP header X-DUNE-API-KEY.", "required": false }, { diff --git a/api-reference/upload/upload-openapi.json b/api-reference/upload/upload-openapi.json index dfbdfdf3..40bd271e 100644 --- a/api-reference/upload/upload-openapi.json +++ b/api-reference/upload/upload-openapi.json @@ -23,8 +23,17 @@ "schema": { "type": "string" }, - "description": "API Key for accessing this service", + "description": "API Key for the service", "required": true + }, + { + "in": "path", + "name": "api_key", + "schema": { + "type": "string" + }, + "description": "API Key for the service, alternative to using the HTTP header X-DUNE-API-KEY.", + "required": false } ], "requestBody": { From f1b2f4fb82445035f4b4725924c14f0bff23a6b4 Mon Sep 17 00:00:00 2001 From: Miguel Filipe Date: Thu, 15 Feb 2024 16:49:24 +0000 Subject: [PATCH 15/20] other minor edits on the mdx files --- api-reference/overview/authentication.mdx | 2 +- api-reference/overview/introduction.mdx | 4 ++-- api-reference/overview/what-is-execution-id.mdx | 5 +++-- api-reference/query-api/endpoint/execute-query.mdx | 6 +++--- .../query-api/endpoint/get-execution-result-csv.mdx | 5 ++--- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/api-reference/overview/authentication.mdx b/api-reference/overview/authentication.mdx index 4ab1df13..588e1eb2 100644 --- a/api-reference/overview/authentication.mdx +++ b/api-reference/overview/authentication.mdx @@ -5,7 +5,7 @@ title: Authentication The Dune API relies on API keys for authentication. Your API key grants access and determines billing details for private queries, so safeguard it diligently! ## Generate an API key -In order to generate a new API key, go to settings -> API -> create new API key. +In order to generate a new API key, go to settings -> API -> create new API key. - Dune has two types of account: `user` account and `team` account. A team can have many users. A user can join many teams. diff --git a/api-reference/overview/introduction.mdx b/api-reference/overview/introduction.mdx index 2374d854..55c04d0d 100644 --- a/api-reference/overview/introduction.mdx +++ b/api-reference/overview/introduction.mdx @@ -15,7 +15,7 @@ Dune's four core API offerings can be found below: Execute queries and get results in JSON or CSV form @@ -35,4 +35,4 @@ Dune's four core API offerings can be found below: > Push Dune data into your own webhooks, on a custom set schedule - \ No newline at end of file + diff --git a/api-reference/overview/what-is-execution-id.mdx b/api-reference/overview/what-is-execution-id.mdx index 5a86a745..629e02e0 100644 --- a/api-reference/overview/what-is-execution-id.mdx +++ b/api-reference/overview/what-is-execution-id.mdx @@ -8,8 +8,9 @@ Unlike a query ID, which identifies a specific query, an execution ID represents With an execution ID, you can monitor the execution's status using the [check status](../query-api//endpoint/get-execution-status) endpoint. The status can be one of the following: pending, success, or failure. -If an execution is successful, you can also retrieve its latest result (provided it hasn't expired). +If an execution is successful, you can get the result using [get result](../query-api/endpoint/get-execution-result). Results are saved and can be retrieved multiple times. +Results data from an execution are stored with an expiration date of 90 days (subject to change). This is visible on the API response on the “expires_at” field in the execution status and results json body (not on the CSV endpoint). Consider saving your execution ID in some cases to retrieve the result later without initiating a new execution. - \ No newline at end of file + diff --git a/api-reference/query-api/endpoint/execute-query.mdx b/api-reference/query-api/endpoint/execute-query.mdx index 6a2d7c8a..8fd94144 100644 --- a/api-reference/query-api/endpoint/execute-query.mdx +++ b/api-reference/query-api/endpoint/execute-query.mdx @@ -40,9 +40,9 @@ openapi: 'POST /v1/query/{query_id}/execute' ``` -If the query has parameters and you don't add them in your API call, it will just run the default params. You may add query parameters as part of the POST params data. +If the query has parameters and you don't add them in your API call, it will just run with the default params. You may add query parameters as part of the POST params data. -You can choose to include a `performance` parameter, by default it will use the "medium" performance tier which consumes 10 credits. "large" will use 20 credits and be faster. +You can choose to include a `performance` parameter, by default it will use the "medium" performance tier which consumes 10 credits. "large" will use 20 credits and are faster. Returns an execution_id associated with the triggered query execution and the state of the execution. @@ -181,4 +181,4 @@ HttpResponse response = Unirest.post("https://api.dune.com/api/v1/query/ .asString(); ``` - \ No newline at end of file + diff --git a/api-reference/query-api/endpoint/get-execution-result-csv.mdx b/api-reference/query-api/endpoint/get-execution-result-csv.mdx index 5c4e5e39..41b4d91d 100644 --- a/api-reference/query-api/endpoint/get-execution-result-csv.mdx +++ b/api-reference/query-api/endpoint/get-execution-result-csv.mdx @@ -9,8 +9,7 @@ Result returns the status, metadata, and query results (in CSV) from a query exe -- Results data from an execution are currently stored for 90 days. This is visible on the API response on the “expires_at” field in the execution status and results body. - +- Results data from an execution are stored with an expiration date of 90 days. This is visible on the API response on the “expires_at” field in the execution status and results json body (not on the CSV endpoint). - There is currently a 1GB limit in how much data a single API result call can return, but there is a chance we reduce this overall or based on varying paid plan types. - To paginate query results, please visit the [pagination page](../../overview/pagination) to get more info. @@ -169,4 +168,4 @@ HttpResponse response = Unirest.get("https://api.dune.com/api/v1/executi ``` - \ No newline at end of file + From 627b0739e9d9f86aa2a4fb021dc558912363f7fc Mon Sep 17 00:00:00 2001 From: Miguel Filipe Date: Thu, 15 Feb 2024 17:32:35 +0000 Subject: [PATCH 16/20] fix semantic typo --- api-reference/crud/crud-openapi.json | 12 ++++++------ api-reference/query-api/query-openapi.json | 14 +++++++------- api-reference/upload/upload-openapi.json | 2 +- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/api-reference/crud/crud-openapi.json b/api-reference/crud/crud-openapi.json index c8b63801..cfd1bb2c 100644 --- a/api-reference/crud/crud-openapi.json +++ b/api-reference/crud/crud-openapi.json @@ -26,7 +26,7 @@ "required": true }, { - "in": "path", + "in": "query", "name": "api_key", "schema": { "type": "string" @@ -236,7 +236,7 @@ "required": true }, { - "in": "path", + "in": "query", "name": "api_key", "schema": { "type": "string" @@ -341,7 +341,7 @@ "required": true }, { - "in": "path", + "in": "query", "name": "api_key", "schema": { "type": "string" @@ -445,7 +445,7 @@ "required": true }, { - "in": "path", + "in": "query", "name": "api_key", "schema": { "type": "string" @@ -549,7 +549,7 @@ "required": true }, { - "in": "path", + "in": "query", "name": "api_key", "schema": { "type": "string" @@ -663,7 +663,7 @@ "required": true }, { - "in": "path", + "in": "query", "name": "api_key", "schema": { "type": "string" diff --git a/api-reference/query-api/query-openapi.json b/api-reference/query-api/query-openapi.json index 71272694..d553ee9c 100644 --- a/api-reference/query-api/query-openapi.json +++ b/api-reference/query-api/query-openapi.json @@ -25,7 +25,7 @@ "required": true }, { - "in": "path", + "in": "query", "name": "api_key", "schema": { "type": "string" @@ -154,7 +154,7 @@ "required": true }, { - "in": "path", + "in": "query", "name": "api_key", "schema": { "type": "string" @@ -249,7 +249,7 @@ "required": true }, { - "in": "path", + "in": "query", "name": "api_key", "schema": { "type": "string" @@ -335,7 +335,7 @@ "required": true }, { - "in": "path", + "in": "query", "name": "api_key", "schema": { "type": "string" @@ -483,7 +483,7 @@ "required": true }, { - "in": "path", + "in": "query", "name": "api_key", "schema": { "type": "string" @@ -645,7 +645,7 @@ "required": true }, { - "in": "path", + "in": "query", "name": "api_key", "schema": { "type": "string" @@ -773,7 +773,7 @@ "required": true }, { - "in": "path", + "in": "query", "name": "api_key", "schema": { "type": "string" diff --git a/api-reference/upload/upload-openapi.json b/api-reference/upload/upload-openapi.json index 40bd271e..e6969d5c 100644 --- a/api-reference/upload/upload-openapi.json +++ b/api-reference/upload/upload-openapi.json @@ -27,7 +27,7 @@ "required": true }, { - "in": "path", + "in": "query", "name": "api_key", "schema": { "type": "string" From 8807926fd8ac137bb7ca5cbc824237a8e5b6e1db Mon Sep 17 00:00:00 2001 From: agaperste Date: Thu, 15 Feb 2024 12:40:09 -0500 Subject: [PATCH 17/20] formatting update --- api-reference/overview/pagination.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/api-reference/overview/pagination.mdx b/api-reference/overview/pagination.mdx index cbd3104c..885b2f22 100644 --- a/api-reference/overview/pagination.mdx +++ b/api-reference/overview/pagination.mdx @@ -407,17 +407,17 @@ If you pass in an invalid `offset` parameter value, you will get an empty result -**Data Returned Limit ** +**Data Returned Limit** When using pagination, our intention is to use sizes that work well on mobile, with lower data and ram consumption. For this, and to avoid more work on the developer, when the client specifies a very large limit value (for example 500000 rows), instead of returning an error, the server will override this limit to a lower, safer value (for example 30000 rows) and *will always provide* the correct next `offset` and `limit` value to use on the next paginated requests. The exact maximum limit value is subject to change. -**Data Size Limit ** +**Data Size Limit** Dune internally has a maximum query result size limit (which currently is 8GB, but subject to increase in the future). If your query yields more than 8GB of data, the result will be truncated in storage. In such cases, pulling the result data (using pagination) but without specifying `allow_partial_results` set to true will trigger an error message: "error": "Partial Result, please request with 'allows_partial_results=true'". If you wish to retrieve partial results, you can pass the parameter `allow_partial_results=true`. But please make sure you indeed want to fetch the truncated result. So what? Related to pagination, this means that - For query results under 8GB, use the API as normal. - When your query results exceed 8GB, in addition to `limit` and `offset` parameters in order to read the partial result (the first 8GB of data), set `allow_partial_results=true` -- You can use the (Get Status API)[https://dune.mintlify.app/api-reference/query-api/endpoint/get-execution-status] to check the size of your result, `result.result_set_size` +- You can use the [Get Status API](https://dune.mintlify.app/api-reference/query-api/endpoint/get-execution-status) to check the size of your result, `result.result_set_size` From c907dc3044a13e6b0e689b23d884831a78222eb9 Mon Sep 17 00:00:00 2001 From: Andrew <47720952+andrewhong5297@users.noreply.github.com> Date: Thu, 15 Feb 2024 10:58:41 -0500 Subject: [PATCH 18/20] Update query-parameters.mdx --- api-reference/overview/query-parameters.mdx | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/api-reference/overview/query-parameters.mdx b/api-reference/overview/query-parameters.mdx index 3e8f9065..bb8c35fb 100644 --- a/api-reference/overview/query-parameters.mdx +++ b/api-reference/overview/query-parameters.mdx @@ -9,7 +9,18 @@ There are four kinds of parameters you can use in a Dune query (these are not AP - date - enum (called a list in the UI) -For passing these parameters through the API request body, you can use the following format: +For passing these parameters through the API request body, you can use the following format for executions: + +```json +{ +"foo": "value", +"bar":1000, +"baz": "2020-12-01T01:20:30Z" +} +``` +Where "foo", "bar", and "baz" are three params in a query. If you leave one out, it goes with the default param valiue. + +For CRUD operations, you'll need to define the type and more: ```json [ @@ -46,4 +57,4 @@ If you are using bytearrays/binary (`0x1234...`), then you will still pass it as If you're using the Dune Python SDK, check out the [sdk doc page](/api-reference/overview/sdks) for an example. - \ No newline at end of file + From d2f9b2a2d5ce3a83404b679bd4203a7caacf671d Mon Sep 17 00:00:00 2001 From: Miguel Filipe Date: Thu, 15 Feb 2024 17:37:37 +0000 Subject: [PATCH 19/20] a few more fixes --- api-reference/overview/pagination.mdx | 19 ++++++++++--------- api-reference/query-api/query-openapi.json | 4 ++-- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/api-reference/overview/pagination.mdx b/api-reference/overview/pagination.mdx index 885b2f22..bdcad82c 100644 --- a/api-reference/overview/pagination.mdx +++ b/api-reference/overview/pagination.mdx @@ -4,7 +4,7 @@ title: Pagination All API endpoints to read query results, including [get execution results](../query-api/endpoint/get-execution-result), [get execution results csv](../query-api/endpoint/get-execution-result-csv), [get query results](../query-api/endpoint/get-query-result), [get query results csv](../query-api/endpoint/get-query-result-csv), support pagination for efficient data retrieval. Pagination divides large datasets into smaller, manageable chunks, preventing overload and ensuring smooth performance. This allows users to navigate through the data easily, avoiding limit errors and streamlining their data fetching experience. To paginate through results: -1. Specify the `limit` parameter to determine the number of results you want to retrieve. +1. Specify the `limit` parameter to determine the number of results you want to retrieve. 2. Use the `offset` parameter to indicate the starting offset point for retrieving results, default value is 0 for the first row. 3. If you are fetching JSON response, check for the fields `next_offset` and `next_uri` in the response body to fetch the next page, if available. If you are fetching CSV response, check the response headers for `X-Dune-Next-Offset` and `X-Dune-Next-Uri` to fetch the next page, if available. Always follow the provided offset or uri, the server is allowed to override the provided limit if it is too large (see section on Data Returned Limit) @@ -24,11 +24,11 @@ To paginate through results: - ```bash - curl -X GET 'https://api.dune.com/api/v1/query/3426636/results?limit=1000' -H 'x-dune-api-key: {{api_key}}' + ```bash + curl -X GET 'https://api.dune.com/api/v1/query/3426636/results?limit=1000' -H 'x-dune-api-key: {{api_key}}' ``` - + ```python Python SDK @@ -55,7 +55,7 @@ To paginate through results: - ```javascript + ```javascript const options = { method: 'GET', @@ -112,7 +112,7 @@ To paginate through results: - ```php + ```php @@ -400,7 +400,7 @@ If you pass in an invalid `offset` parameter value, you will get an empty result "execution_time_millis": 1336 } }, - "next_uri": "https://api.dune.com/api/v1/execution/01HPFJ7VSFXPTA8WPMDKBXE167/results?limit=5&offset=5", + "next_uri": "https://api.dune.com/api/v1/execution/01HPFJ7VSFXPTA8WPMDKBXE167/results?limit=1000&offset=5", "next_offset": 5 } ``` @@ -408,6 +408,7 @@ If you pass in an invalid `offset` parameter value, you will get an empty result **Data Returned Limit** + When using pagination, our intention is to use sizes that work well on mobile, with lower data and ram consumption. For this, and to avoid more work on the developer, when the client specifies a very large limit value (for example 500000 rows), instead of returning an error, the server will override this limit to a lower, safer value (for example 30000 rows) and *will always provide* the correct next `offset` and `limit` value to use on the next paginated requests. The exact maximum limit value is subject to change. @@ -415,7 +416,7 @@ When using pagination, our intention is to use sizes that work well on mobile, w Dune internally has a maximum query result size limit (which currently is 8GB, but subject to increase in the future). If your query yields more than 8GB of data, the result will be truncated in storage. In such cases, pulling the result data (using pagination) but without specifying `allow_partial_results` set to true will trigger an error message: "error": "Partial Result, please request with 'allows_partial_results=true'". If you wish to retrieve partial results, you can pass the parameter `allow_partial_results=true`. But please make sure you indeed want to fetch the truncated result. -So what? Related to pagination, this means that +So what? Related to pagination, this means that - For query results under 8GB, use the API as normal. - When your query results exceed 8GB, in addition to `limit` and `offset` parameters in order to read the partial result (the first 8GB of data), set `allow_partial_results=true` - You can use the [Get Status API](https://dune.mintlify.app/api-reference/query-api/endpoint/get-execution-status) to check the size of your result, `result.result_set_size` diff --git a/api-reference/query-api/query-openapi.json b/api-reference/query-api/query-openapi.json index d553ee9c..df6447aa 100644 --- a/api-reference/query-api/query-openapi.json +++ b/api-reference/query-api/query-openapi.json @@ -933,7 +933,7 @@ "execution_started_at": "2024-01-12T21:34:37.464387Z", "execution_ended_at": "2024-01-12T21:34:55.737081Z", "next_offset": 5, - "next_uri": "https://api.dev.dune.com/api/v1/execution/01HKZSJAW6N2MFVCBHA3R8S64X/results?limit=5&offset=5", + "next_uri": "https://api.dev.dune.com/api/v1/execution/01HKZSJAW6N2MFVCBHA3R8S64X/results?limit=1000&offset=5", "result": { "metadata": { "column_names": [ @@ -1048,7 +1048,7 @@ "next_uri": { "type": "string", "description": "URI that can be used to fetch the next page of results.", - "example": "https://api.dev.dune.com/api/v1/execution/01HKZSJAW6N2MFVCBHA3R8S64X/results?limit=5&offset=5" + "example": "https://api.dev.dune.com/api/v1/execution/01HKZSJAW6N2MFVCBHA3R8S64X/results?limit=1000&offset=5" }, "result": { "$ref": "#/components/schemas/QueryResultData" From e99d138be1f9af64878f191d6f440f959cecf9ab Mon Sep 17 00:00:00 2001 From: agaperste Date: Thu, 15 Feb 2024 12:57:23 -0500 Subject: [PATCH 20/20] updating 1GB to 8GB; updating wording for pagination doc --- api-reference/overview/faq.mdx | 4 ---- api-reference/overview/rate-limits.mdx | 2 +- api-reference/overview/troubleshooting.mdx | 18 ------------------ .../endpoint/get-execution-result-csv.mdx | 5 ++--- .../endpoint/get-execution-result.mdx | 4 ++-- .../endpoint/get-query-result-csv.mdx | 4 ++-- .../query-api/endpoint/get-query-result.mdx | 4 ++-- 7 files changed, 9 insertions(+), 32 deletions(-) diff --git a/api-reference/overview/faq.mdx b/api-reference/overview/faq.mdx index 56d278a3..fab89c0e 100644 --- a/api-reference/overview/faq.mdx +++ b/api-reference/overview/faq.mdx @@ -54,10 +54,6 @@ Yes! The results storage period can be found on the API response on the “expires_at” field in the execution status and results body. -#### How much data can I retrieve in a single API result call? - -There is currently a ~1GB limit. The API does not currently return an explicit error upon hitting this limit but will instead fail (timeout) when attempting to retrieve the results. - #### How do I import Dune data into a google sheet? Use "Import Data" to import your CSV results into a google sheet using "api_key" as a param. (We advise against doing this any public document where your API key can be viewed and compromised.) diff --git a/api-reference/overview/rate-limits.mdx b/api-reference/overview/rate-limits.mdx index 329c3cf0..04508692 100644 --- a/api-reference/overview/rate-limits.mdx +++ b/api-reference/overview/rate-limits.mdx @@ -29,4 +29,4 @@ For example, on the Free plan, you have a low limit of 15 requests per minute an **Data Return Limit** -Dune imposes a 1GB data limit on query results for each query execution. If your query yields more than 1GB of data, the result will be truncated in storage. In such cases, pulling the result data with `allow_partial_results` set to false (the default) will trigger an error message: "error": "Partial Result, please request with 'allows_partial_results=true'". To retrieve partial results, you must pass the parameter `allow_partial_results=true`. +Dune internally has a maximum query result size limit (which currently is 8GB, but subject to increase in the future). If your query yields more than 8GB of data, the result will be truncated in storage. In such cases, pulling the result data (using pagination) but without specifying `allow_partial_results` set to true will trigger an error message: "error": "Partial Result, please request with 'allows_partial_results=true'". If you wish to retrieve partial results, you can pass the parameter `allow_partial_results=true`. But please make sure you indeed want to fetch the truncated result. diff --git a/api-reference/overview/troubleshooting.mdx b/api-reference/overview/troubleshooting.mdx index fabdd03d..18a584b3 100644 --- a/api-reference/overview/troubleshooting.mdx +++ b/api-reference/overview/troubleshooting.mdx @@ -26,24 +26,6 @@ For specific error code information, please refer to each of the endpoint itself ``` You did not input a valid API key. You can go generate a new key and make sure you save it in a safe place and paste the key over. -#### Result too large -```json -{ - "execution_id": "01HMF87Y5WW6NQ7VQ35Z691JER", - "query_id": 3362169, - "state": "QUERY_STATE_FAILED", - "submitted_at": "2024-01-18T21:39:41.885699Z", - "expires_at": "2024-04-17T21:40:15.146884Z", - "execution_started_at": "2024-01-18T21:39:41.905154Z", - "execution_ended_at": "2024-01-18T21:40:15.146883Z", - "error": { - "type": "FAILED_TYPE_RESULT_SIZE_EXCEEDED", - "message": "Result is too large and was truncated to 1.0GiB. Set allow_partial_results=true query parameter to get the partial result." - } -} -``` -The result for the query exceeds the 1GB limit. If you'd like to get partial, truncated result, please set `allow_partial_results` to true. - #### Permission error ```json diff --git a/api-reference/query-api/endpoint/get-execution-result-csv.mdx b/api-reference/query-api/endpoint/get-execution-result-csv.mdx index 41b4d91d..0fcd61be 100644 --- a/api-reference/query-api/endpoint/get-execution-result-csv.mdx +++ b/api-reference/query-api/endpoint/get-execution-result-csv.mdx @@ -10,9 +10,8 @@ Result returns the status, metadata, and query results (in CSV) from a query exe - Results data from an execution are stored with an expiration date of 90 days. This is visible on the API response on the “expires_at” field in the execution status and results json body (not on the CSV endpoint). -- There is currently a 1GB limit in how much data a single API result call can return, but there is a chance we reduce this overall or based on varying paid plan types. - -- To paginate query results, please visit the [pagination page](../../overview/pagination) to get more info. +- Dune internally has a maximum query result size limit (which currently is 8GB, but subject to increase in the future). If your query yields more than 8GB of data, the result will be truncated in storage. In such cases, pulling the result data (using pagination) but without specifying `allow_partial_results` set to true will trigger an error message: "error": "Partial Result, please request with 'allows_partial_results=true'". If you wish to retrieve partial results, you can pass the parameter `allow_partial_results=true`. But please make sure you indeed want to fetch the truncated result. +- We recommend reading about [Pagination](../../overview/pagination) to get the most out of the API and handle large results. diff --git a/api-reference/query-api/endpoint/get-execution-result.mdx b/api-reference/query-api/endpoint/get-execution-result.mdx index a774026a..a97b1fac 100644 --- a/api-reference/query-api/endpoint/get-execution-result.mdx +++ b/api-reference/query-api/endpoint/get-execution-result.mdx @@ -10,8 +10,8 @@ Result returns the status, metadata, and query results (in JSON) from a query ex - Results data from an execution are stored for 90 days. This is visible on the API response on the “expires_at” field in the execution status and results body. -- There is currently a 1GB limit in how much data a single API result call can return, but there is a chance we reduce this overall or based on varying paid plan types. -- To paginate query results, please visit the [pagination page](../../overview/pagination) to get more info. +- Dune internally has a maximum query result size limit (which currently is 8GB, but subject to increase in the future). If your query yields more than 8GB of data, the result will be truncated in storage. In such cases, pulling the result data (using pagination) but without specifying `allow_partial_results` set to true will trigger an error message: "error": "Partial Result, please request with 'allows_partial_results=true'". If you wish to retrieve partial results, you can pass the parameter `allow_partial_results=true`. But please make sure you indeed want to fetch the truncated result. +- We recommend reading about [Pagination](../../overview/pagination) to get the most out of the API and handle large results. diff --git a/api-reference/query-api/endpoint/get-query-result-csv.mdx b/api-reference/query-api/endpoint/get-query-result-csv.mdx index 64f6d997..ab7bcd16 100644 --- a/api-reference/query-api/endpoint/get-query-result-csv.mdx +++ b/api-reference/query-api/endpoint/get-query-result-csv.mdx @@ -12,8 +12,8 @@ The query specified must either be public or a query you have ownership of (you This endpoint does NOT trigger an execution but does [consume credits through datapoints](https://dune.com/pricing). -- There is currently a 1GB limit in how much data a single API result call can return, but there is a chance we reduce this overall or based on varying paid plan types. -- To paginate query results, please visit the [pagination page](../../overview/pagination) to get more info. +- Dune internally has a maximum query result size limit (which currently is 8GB, but subject to increase in the future). If your query yields more than 8GB of data, the result will be truncated in storage. In such cases, pulling the result data (using pagination) but without specifying `allow_partial_results` set to true will trigger an error message: "error": "Partial Result, please request with 'allows_partial_results=true'". If you wish to retrieve partial results, you can pass the parameter `allow_partial_results=true`. But please make sure you indeed want to fetch the truncated result. +- We recommend reading about [Pagination](../../overview/pagination) to get the most out of the API and handle large results. diff --git a/api-reference/query-api/endpoint/get-query-result.mdx b/api-reference/query-api/endpoint/get-query-result.mdx index 34c63e58..1c599db5 100644 --- a/api-reference/query-api/endpoint/get-query-result.mdx +++ b/api-reference/query-api/endpoint/get-query-result.mdx @@ -12,8 +12,8 @@ The query specified must either be public or a query you have ownership of (you This endpoint does NOT trigger an execution but does [consume credits through datapoints](https://dune.com/pricing). -- There is currently a 1GB limit in how much data a single API result call can return, but there is a chance we reduce this overall or based on varying paid plan types. -- To paginate query results, please visit the [pagination page](../../overview/pagination) to get more info. +- Dune internally has a maximum query result size limit (which currently is 8GB, but subject to increase in the future). If your query yields more than 8GB of data, the result will be truncated in storage. In such cases, pulling the result data (using pagination) but without specifying `allow_partial_results` set to true will trigger an error message: "error": "Partial Result, please request with 'allows_partial_results=true'". If you wish to retrieve partial results, you can pass the parameter `allow_partial_results=true`. But please make sure you indeed want to fetch the truncated result. +- We recommend reading about [Pagination](../../overview/pagination) to get the most out of the API and handle large results.