Apply a bunch of clippy lints

This commit is contained in:
Piv
2023-08-01 18:21:47 +09:30
parent f992efc018
commit 2485e45026
8 changed files with 80 additions and 81 deletions

View File

@@ -1,7 +1,4 @@
use sqlx::{
mssql::{MssqlConnectOptions, MssqlPoolOptions},
ConnectOptions,
};
use sqlx::mssql::MssqlPoolOptions;
#[tokio::main]
async fn main() -> anyhow::Result<()> {

View File

@@ -3,23 +3,23 @@ use std::{collections::HashMap, io::Read, str::FromStr};
use crate::io::RecordSerializer;
pub enum Comparator<T: PartialOrd> {
EQUAL(T),
NOT_EQUAL(T),
GREATER_THAN(T),
LESS_THAN(T),
IN(Vec<T>),
NOT_IN(Vec<T>),
Equal(T),
NotEqual(T),
GreaterThan(T),
LessThan(T),
In(Vec<T>),
NotIn(Vec<T>),
}
impl<T: PartialOrd> Comparator<T> {
pub fn is_valid(&self, value: T) -> bool {
match self {
Comparator::EQUAL(v) => value == *v,
Comparator::NOT_EQUAL(v) => value != *v,
Comparator::GREATER_THAN(v) => value > *v,
Comparator::LESS_THAN(v) => value < *v,
Comparator::IN(v) => v.contains(&value),
Comparator::NOT_IN(v) => !v.contains(&value),
Comparator::Equal(v) => value == *v,
Comparator::NotEqual(v) => value != *v,
Comparator::GreaterThan(v) => value > *v,
Comparator::LessThan(v) => value < *v,
Comparator::In(v) => v.contains(&value),
Comparator::NotIn(v) => !v.contains(&value),
}
}
}
@@ -31,7 +31,7 @@ pub trait FieldName {
pub trait DataValidator: FieldName {
// Whether the given value is valid for the validator
fn is_valid(&self, s: &String) -> bool;
fn is_valid(&self, s: &str) -> bool;
}
pub struct FilterRule<T: PartialOrd> {
@@ -46,7 +46,7 @@ impl<T: PartialOrd> FieldName for FilterRule<T> {
}
impl<T: FromStr + PartialOrd> DataValidator for FilterRule<T> {
fn is_valid(&self, s: &String) -> bool {
fn is_valid(&self, s: &str) -> bool {
s.parse().map_or(false, |f| self.comparator.is_valid(f))
}
}

View File

@@ -1,14 +1,8 @@
use std::{
io::{Read, Seek, Write},
thread::current,
};
use std::io::{Read, Seek, Write};
use anyhow::bail;
use csv::Position;
use rmp_serde::{
decode::{ReadReader, ReadRefReader, ReadSlice},
from_read, Deserializer, Serializer,
};
use rmp_serde::{decode::ReadReader, Deserializer, Serializer};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
pub trait RecordSerializer {

View File

@@ -9,6 +9,7 @@ pub use self::overhead_allocation::*;
mod products;
pub use self::products::create_products;
pub use self::products::CreateProductInputs;
mod shared_models;
pub use self::shared_models::*;
@@ -56,7 +57,7 @@ pub extern "C" fn move_money_from_text(
}
#[no_mangle]
pub extern "C" fn move_money_from_text_free(s: *mut c_char) {
pub unsafe extern "C" fn move_money_from_text_free(s: *mut c_char) {
unsafe {
if s.is_null() {
return;
@@ -150,7 +151,7 @@ fn unwrap_c_char<'a>(s: *const c_char) -> &'a CStr {
}
#[no_mangle]
pub extern "C" fn allocate_overheads_from_text_free(s: *mut c_char) {
pub unsafe extern "C" fn allocate_overheads_from_text_free(s: *mut c_char) {
unsafe {
if s.is_null() {
return;

View File

@@ -1,6 +1,7 @@
use std::{fs::File, io::BufWriter, path::PathBuf};
use clap::{Parser, Subcommand};
use coster_rs::CreateProductInputs;
#[derive(Parser)]
#[command(name = "coster-rs")]
@@ -177,11 +178,13 @@ fn main() -> anyhow::Result<()> {
output,
} => coster_rs::create_products(
&mut csv::Reader::from_path(definitions)?,
&mut csv::Reader::from_path(encounters)?,
&mut csv::Reader::from_path(services)?,
&mut csv::Reader::from_path(transfers)?,
&mut csv::Reader::from_path(procedures)?,
&mut csv::Reader::from_path(diagnoses)?,
CreateProductInputs {
encounters: csv::Reader::from_path(encounters)?,
services: csv::Reader::from_path(services)?,
transfers: csv::Reader::from_path(transfers)?,
procedures: csv::Reader::from_path(procedures)?,
diagnoses: csv::Reader::from_path(diagnoses)?,
},
&mut csv::Writer::from_path(output)?,
1000000,
),

View File

@@ -201,8 +201,7 @@ where
let is_separator = movement_rule.apply == "-DIVIDER-";
let from_accounts = if is_separator {
HashSet::new()
} else {
if movement_rule.cost_output.is_some() {
} else if movement_rule.cost_output.is_some() {
account_mappings
.iter()
.filter(|(_, account)| {
@@ -218,12 +217,10 @@ where
movement_rule.source_to_account,
&all_accounts_sorted,
)
}
};
let to_accounts = if is_separator {
HashSet::new()
} else {
if movement_rule.cost_output.is_some() {
} else if movement_rule.cost_output.is_some() {
account_mappings
.iter()
.filter(|(_, account)| {
@@ -239,7 +236,6 @@ where
movement_rule.dest_to_account,
&all_accounts_sorted,
)
}
};
let from_departments = if is_separator {
HashSet::new()

View File

@@ -418,7 +418,7 @@ where
initial_account_costs
.into_iter()
.map(|(account, total_cost)| AccountCost {
account: account,
account,
summed_department_costs: total_cost,
})
.collect(),
@@ -752,7 +752,7 @@ fn solve_reciprocal_with_from<T: ReciprocalAllocationSolver + Sync + Send>(
.map(|(department, value)| MovedAmount {
account: total_costs.account.clone(),
cost_centre: department.clone(),
value: value,
value,
from_cost_centre: department.clone(),
})
.filter(|cost| cost.value != 0_f64)

View File

@@ -1,9 +1,6 @@
use core::panic;
use std::{
collections::HashMap,
io::{Read, Write},
sync::mpsc,
thread,
};
use chrono::NaiveDateTime;
@@ -31,15 +28,26 @@ struct Product {
source_allocated_amount: Option<f64>,
}
pub struct CreateProductInputs<E, S, T, P, Di>
where
E: Read,
S: Read,
T: Read,
P: Read,
Di: Read,
{
pub encounters: csv::Reader<E>,
pub services: csv::Reader<S>,
pub transfers: csv::Reader<T>,
pub procedures: csv::Reader<P>,
pub diagnoses: csv::Reader<Di>,
}
// TODO: Build from linked dataset is pretty hard, it potentially requires knowing everything abuot the previous year's
// cosing run (BSCO, Dataset_Encounter_Cache, etc).
pub fn create_products<D, E, S, T, P, Di, O>(
definitions: &mut csv::Reader<D>,
encounters: &mut csv::Reader<E>,
services: &mut csv::Reader<S>,
transfers: &mut csv::Reader<T>,
procedures: &mut csv::Reader<P>,
diagnoses: &mut csv::Reader<Di>,
product_inputs: CreateProductInputs<E, S, T, P, Di>,
// TODO: Looks kind of bad, any other way around it? I'd rather not have to depend on crossbeam as well
output: &mut csv::Writer<O>,
// TODO: Default to 10 million or something sane
@@ -82,7 +90,7 @@ where
// TODO: Try with and without rayon, should be able to help I think as we're going through so much data sequentially,
// although we're still likely to be bottlenecked by just write-speed
let mut encounters = encounters;
let mut encounters = product_inputs.encounters;
let headers = encounters.headers()?.clone();
for encounter in encounters.records() {
@@ -105,9 +113,9 @@ where
}
let field = field.unwrap();
if filter.equal {
return filter.value == *field;
filter.value == *field
} else {
return filter.value != *field;
filter.value != *field
}
}))
&& (definition.constraints.is_empty()
@@ -130,7 +138,7 @@ where
}
// TODO: Generate the built service
output.serialize(Product::default());
output.serialize(Product::default())?;
}
// Now do the same with transfers, services, etc, referencing the encounter reader by using the