Files
ingey/src/graph/pull_from_db.rs

50 lines
1.6 KiB
Rust

use super::sql::QueryExecutor;
use crate::graph::node::RunnableNode;
use crate::graph::upload_to_db::{upload_file_bulk, DBType};
use async_trait::async_trait;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use sqlx::AnyPool;
use tiberius::Config;
use tokio_util::compat::TokioAsyncWriteCompatExt;
/**
* Pull data from a db using a db query into a csv file that can be used by another node
*/
async fn pull_from_db(executor: &mut impl QueryExecutor, node: &PullFromDBNode) {}
#[derive(Serialize, Deserialize, Clone, JsonSchema)]
pub struct PullFromDBNode {
file_path: String,
query: String,
parameters: Vec<String>,
db_type: DBType,
connection_string: String,
}
pub struct PullFromDBNodeRunner {
pub pull_from_db_node: PullFromDBNode,
}
#[async_trait]
impl RunnableNode for PullFromDBNodeRunner {
async fn run(&self) -> anyhow::Result<()> {
let node = self.pull_from_db_node.clone();
// TODO: Clean up grabbing of connection/executor so don't need to repeat this between upload/download
match node.db_type {
DBType::Mssql => {
let config = Config::from_jdbc_string(&node.connection_string)?;
let tcp = tokio::net::TcpStream::connect(config.get_addr()).await?;
tcp.set_nodelay(true)?;
let mut client = tiberius::Client::connect(config, tcp.compat_write()).await?;
pull_from_db(&mut client, &node).await;
}
_ => {
let mut pool = AnyPool::connect(&node.connection_string).await?;
pull_from_db(&mut pool, &node).await;
}
}
Ok(())
}
}