mina_producer_dashboard/archive/
mod.rs

1use postgres_types::ChainStatus;
2use serde::{Deserialize, Serialize};
3use sqlx::PgPool;
4
5pub mod postgres_types;
6pub mod raw_types;
7pub mod watchdog;
8
9#[derive(Debug, Clone)]
10pub struct ArchiveConnector {
11    pool: PgPool,
12}
13
14pub enum ArchiveUrl {
15    Url(String),
16    Env,
17}
18
19impl ArchiveConnector {
20    pub async fn connect(postgres_url: ArchiveUrl) -> Self {
21        let db_url = match postgres_url {
22            ArchiveUrl::Url(url) => url,
23            ArchiveUrl::Env => {
24                if let Ok(url) = dotenvy::var("DATABASE_URL") {
25                    url
26                } else {
27                    std::env::var("DATABASE_URL")
28                        .expect("No db url found, check env var DATABASE_URL")
29                }
30            }
31        };
32        // TODO(adonagy): unwrap
33        let pool = PgPool::connect(&db_url).await.unwrap();
34
35        Self { pool }
36    }
37
38    pub async fn _get_producer_blocks(&self, producer_pk: &str) -> Result<Vec<Block>, sqlx::Error> {
39        sqlx::query_file_as!(
40            Block,
41            "src/archive/sql/query_producer_blocks.sql",
42            producer_pk
43        )
44        .fetch_all(&self.pool)
45        .await
46    }
47
48    pub async fn get_blocks_in_slot_range(
49        &self,
50        start_slot: i64,
51        finish_slot: i64,
52    ) -> Result<Vec<Block>, sqlx::Error> {
53        sqlx::query_file_as!(
54            Block,
55            "src/archive/sql/query_blocks_in_slot_range.sql",
56            start_slot,
57            finish_slot
58        )
59        .fetch_all(&self.pool)
60        .await
61    }
62
63    pub async fn get_canonical_chain(
64        &self,
65        start_slot: i64,
66        finish_slot: i64,
67        best_tip_hash: String,
68    ) -> Result<Vec<Block>, sqlx::Error> {
69        sqlx::query_file_as!(
70            Block,
71            "src/archive/sql/query_canonical_chain.sql",
72            best_tip_hash,
73            start_slot,
74            finish_slot
75        )
76        .fetch_all(&self.pool)
77        .await
78    }
79
80    pub async fn get_last_canonical_blocks(
81        &self,
82        best_tip_hash: String,
83        limit: i64,
84    ) -> Result<Vec<Block>, sqlx::Error> {
85        sqlx::query_file_as!(
86            Block,
87            "src/archive/sql/query_last_canonical_blocks.sql",
88            best_tip_hash,
89            limit
90        )
91        .fetch_all(&self.pool)
92        .await
93    }
94
95    pub async fn get_latest_block(&self) -> Result<StateHash, sqlx::Error> {
96        let block = sqlx::query_file_as!(LatestBlock, "src/archive/sql/query_latest_block.sql")
97            .fetch_one(&self.pool)
98            .await?;
99
100        Ok(block.state_hash)
101    }
102}
103
104pub type StateHash = String;
105struct LatestBlock {
106    state_hash: String,
107}
108
109#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
110pub struct Block {
111    id: i32,
112    pub state_hash: String,
113    pub height: i64,
114    timestamp: String,
115    pub chain_status: ChainStatus,
116    pub creator_key: String,
117    winner_key: String,
118    global_slot_since_hard_fork: i64,
119    global_slot_since_genesis: i64,
120    parent_id: Option<i32>,
121}
122
123impl Block {
124    pub fn global_slot(&self) -> u32 {
125        self.global_slot_since_hard_fork as u32
126    }
127}
128
129#[cfg(test)]
130mod test {
131    use super::*;
132
133    #[tokio::test]
134    async fn test() {
135        let db = ArchiveConnector::connect(ArchiveUrl::Env).await;
136
137        let blocks = db
138            ._get_producer_blocks("B62qkPpK6z4ktWjxcmFzM4cFWjWLzrjNh6USjUMiYGcF3YAVbdo2p4H")
139            .await
140            .unwrap();
141
142        let canonical = blocks
143            .iter()
144            .filter(|block| block.chain_status == ChainStatus::Pending)
145            .collect::<Vec<_>>();
146
147        println!("Canonical blocks: {}", canonical.len());
148    }
149}