Skip to content
Merged
Show file tree
Hide file tree
Changes from 11 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 7 additions & 0 deletions crates/pgt_configuration/src/analyser/linter/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,12 @@ pub struct LinterConfiguration {
/// match these patterns.
#[partial(bpaf(hide))]
pub include: StringSet,

/// Default search path schemas for type checking.
/// Can be a list of schema names or glob patterns like ["private", "app_*"].
/// "public" will always be searched, and it will be searched last.
#[partial(bpaf(long("search_path")))]
pub search_path_patterns: StringSet,
}

impl LinterConfiguration {
Expand All @@ -43,6 +49,7 @@ impl Default for LinterConfiguration {
rules: Default::default(),
ignore: Default::default(),
include: Default::default(),
search_path_patterns: ["public".to_string()].into_iter().collect(),
}
}
}
Expand Down
2 changes: 2 additions & 0 deletions crates/pgt_typecheck/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ version = "0.0.0"


[dependencies]
globset = "0.4.16"
itertools = { version = "0.14.0" }
pgt_console.workspace = true
pgt_diagnostics.workspace = true
pgt_query.workspace = true
Expand Down
49 changes: 49 additions & 0 deletions crates/pgt_typecheck/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,9 @@ mod typed_identifier;

pub use diagnostics::TypecheckDiagnostic;
use diagnostics::create_type_error;
use globset::Glob;
use itertools::Itertools;
use pgt_schema_cache::SchemaCache;
use sqlx::postgres::PgDatabaseError;
pub use sqlx::postgres::PgSeverity;
use sqlx::{Executor, PgPool};
Expand All @@ -17,6 +20,9 @@ pub struct TypecheckParams<'a> {
pub tree: &'a tree_sitter::Tree,
pub schema_cache: &'a pgt_schema_cache::SchemaCache,
pub identifiers: Vec<TypedIdentifier>,
/// Set of glob patterns that will be matched against the schemas in the database.
/// Each matching schema will be added to the search_path for the typecheck.
pub search_path_patterns: Vec<String>,
}

pub async fn check_sql(
Expand Down Expand Up @@ -49,6 +55,19 @@ pub async fn check_sql(
params.sql,
);

let mut search_path_schemas =
get_schemas_in_search_path(params.schema_cache, params.search_path_patterns);

if !search_path_schemas.is_empty() {
// Always include public if we have any schemas in search path
if !search_path_schemas.contains(&"public") {
search_path_schemas.push("public");
}

let search_path_query = format!("SET search_path TO {};", search_path_schemas.join(", "));
conn.execute(&*search_path_query).await?;
}

let res = conn.prepare(&prepared).await;

match res {
Expand All @@ -64,3 +83,33 @@ pub async fn check_sql(
Err(err) => Err(err),
}
}

fn get_schemas_in_search_path(schema_cache: &SchemaCache, glob_patterns: Vec<String>) -> Vec<&str> {
// iterate over glob_patterns on the outside to keep the order
glob_patterns
.iter()
.filter_map(|pattern| {
if let Ok(glob) = Glob::new(pattern) {
let matcher = glob.compile_matcher();

Some(
schema_cache
.schemas
.iter()
.filter_map(|s| {
if matcher.is_match(s.name.as_str()) {
Some(s.name.as_str())
} else {
None
}
})
.collect::<Vec<&str>>(),
)
} else {
None
}
})
.flatten()
.unique()
.collect()
}
1 change: 1 addition & 0 deletions crates/pgt_typecheck/tests/diagnostics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ async fn test(name: &str, query: &str, setup: Option<&str>, test_db: &PgPool) {
ast: &root,
tree: &tree,
schema_cache: &schema_cache,
search_path_patterns: vec![],
identifiers: vec![],
})
.await;
Expand Down
5 changes: 5 additions & 0 deletions crates/pgt_workspace/src/settings.rs
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,7 @@ fn to_linter_settings(
rules: Some(conf.rules),
ignored_files: to_matcher(working_directory.clone(), Some(&conf.ignore))?,
included_files: to_matcher(working_directory.clone(), Some(&conf.include))?,
search_path_patterns: conf.search_path_patterns.into_iter().collect(),
})
}

Expand Down Expand Up @@ -388,6 +389,9 @@ pub struct LinterSettings {

/// List of included paths/files to match
pub included_files: Matcher,

/// Glob patterns for additional schemas to check when typechecking
pub search_path_patterns: Vec<String>,
}

impl Default for LinterSettings {
Expand All @@ -397,6 +401,7 @@ impl Default for LinterSettings {
rules: Some(pgt_configuration::analyser::linter::Rules::default()),
ignored_files: Matcher::empty(),
included_files: Matcher::empty(),
search_path_patterns: vec!["public".to_string()],
}
}
}
Expand Down
4 changes: 4 additions & 0 deletions crates/pgt_workspace/src/workspace/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -455,6 +455,7 @@ impl Workspace for WorkspaceServer {
let path_clone = params.path.clone();
let schema_cache = self.schema_cache.load(pool.clone())?;
let input = doc.iter(TypecheckDiagnosticsMapper).collect::<Vec<_>>();
let search_path_patterns = settings.linter.search_path_patterns.clone();

// Combined async context for both typecheck and plpgsql_check
let async_results = run_async(async move {
Expand All @@ -463,6 +464,8 @@ impl Workspace for WorkspaceServer {
let pool = pool.clone();
let path = path_clone.clone();
let schema_cache = Arc::clone(&schema_cache);
let search_path_patterns = search_path_patterns.clone();

async move {
let mut diagnostics = Vec::new();

Expand All @@ -474,6 +477,7 @@ impl Workspace for WorkspaceServer {
ast: &ast,
tree: &cst,
schema_cache: schema_cache.as_ref(),
search_path_patterns,
identifiers: sign
.map(|s| {
s.args
Expand Down
114 changes: 113 additions & 1 deletion crates/pgt_workspace/src/workspace/server.tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@ use std::sync::Arc;
use biome_deserialize::{Merge, StringSet};
use pgt_analyse::RuleCategories;
use pgt_configuration::{
PartialConfiguration, database::PartialDatabaseConfiguration, files::PartialFilesConfiguration,
PartialConfiguration, PartialLinterConfiguration, database::PartialDatabaseConfiguration,
files::PartialFilesConfiguration,
};
use pgt_diagnostics::Diagnostic;
use pgt_fs::PgTPath;
Expand Down Expand Up @@ -331,3 +332,114 @@ async fn test_positional_params(test_db: PgPool) {

assert_eq!(diagnostics.len(), 0, "Expected no diagnostic");
}

#[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")]
async fn test_search_path_configuration(test_db: PgPool) {
// Setup test schemas and functions
let setup_sql = r#"
create schema if not exists private;

create or replace function private.get_user_id() returns integer as $$
select 1;
$$ language sql;
"#;
test_db.execute(setup_sql).await.expect("setup sql failed");

let path_glob = PgTPath::new("test_glob.sql");
let file_content = r#"
select get_user_id(); -- on private schema
"#;

// first check that the we get a valid typecheck
let mut glob_conf = PartialConfiguration::init();
glob_conf.merge_with(PartialConfiguration {
db: Some(PartialDatabaseConfiguration {
database: Some(
test_db
.connect_options()
.get_database()
.unwrap()
.to_string(),
),
..Default::default()
}),
..Default::default()
});

// without glob
{
let workspace =
get_test_workspace(Some(glob_conf.clone())).expect("Unable to create test workspace");

workspace
.open_file(OpenFileParams {
path: path_glob.clone(),
content: file_content.into(),
version: 1,
})
.expect("Unable to open test file");

let diagnostics_glob = workspace
.pull_diagnostics(crate::workspace::PullDiagnosticsParams {
path: path_glob.clone(),
categories: RuleCategories::all(),
max_diagnostics: 100,
only: vec![],
skip: vec![],
})
.expect("Unable to pull diagnostics")
.diagnostics;

assert_eq!(
diagnostics_glob.len(),
1,
"get_user_id() should not be found in search_path"
);

// yep, type error!
assert_eq!(
diagnostics_glob[0].category().map(|c| c.name()),
Some("typecheck")
);
}

// adding the glob
glob_conf.merge_with(PartialConfiguration {
linter: Some(PartialLinterConfiguration {
// Adding glob pattern to match the "private" schema
search_path_patterns: Some(StringSet::from_iter(vec!["pr*".to_string()])),
..Default::default()
}),
..Default::default()
}); // checking with the pattern should yield no diagnostics

{
let workspace =
get_test_workspace(Some(glob_conf.clone())).expect("Unable to create test workspace");

workspace
.open_file(OpenFileParams {
path: path_glob.clone(),
content: file_content.into(),
version: 1,
})
.expect("Unable to open test file");

let diagnostics_glob = workspace
.pull_diagnostics(crate::workspace::PullDiagnosticsParams {
path: path_glob.clone(),
categories: RuleCategories::all(),
max_diagnostics: 100,
only: vec![],
skip: vec![],
})
.expect("Unable to pull diagnostics")
.diagnostics;

assert_eq!(
diagnostics_glob.len(),
0,
"Glob pattern should put private schema in search path"
);
}
}
11 changes: 11 additions & 0 deletions docs/schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,17 @@
"type": "null"
}
]
},
"searchPathPatterns": {
"description": "Default search path schemas for type checking. Can be a list of schema names or glob patterns like [\"private\", \"app_*\"]. \"public\" will always be searched, and it will be searched last.",
"anyOf": [
{
"$ref": "#/definitions/StringSet"
},
{
"type": "null"
}
]
}
},
"additionalProperties": false
Expand Down
4 changes: 4 additions & 0 deletions packages/@postgrestools/backend-jsonrpc/src/workspace.ts
Original file line number Diff line number Diff line change
Expand Up @@ -326,6 +326,10 @@ export interface PartialLinterConfiguration {
* List of rules
*/
rules?: Rules;
/**
* Default search path schemas for type checking. Can be a list of schema names or glob patterns like ["private", "app_*"]. "public" will always be searched, and it will be searched last.
*/
searchPathPatterns?: StringSet;
}
/**
* The configuration of the filesystem
Expand Down