From 53e34f0b085bb6adf7c30b31e495b922b359cd30 Mon Sep 17 00:00:00 2001 From: David Miguel Date: Sun, 19 Jun 2022 13:36:32 +0100 Subject: [PATCH] Implementation of tokio-postgres (#1) adaptation of sqlx adapter to tokio-postgres, uses deadpool-postgres as pool manager --- .github/dependabot.yml | 18 + .github/workflows/ci.yml | 106 ++++ .github/workflows/coverage.yml | 64 +++ .github/workflows/release.yml | 46 ++ .gitignore | 23 + Cargo.toml | 34 ++ LICENSE | 201 ++++++++ README.md | 95 +++- docker-compose.yaml | 11 + examples/rbac_model.conf | 14 + examples/rbac_policy.csv | 5 + examples/rbac_with_domains_model.conf | 14 + examples/rbac_with_domains_policy.csv | 6 + sample.env | 2 + src/actions.rs | 680 ++++++++++++++++++++++++++ src/adapter.rs | 601 +++++++++++++++++++++++ src/error.rs | 51 ++ src/lib.rs | 14 + src/models.rs | 40 ++ 19 files changed, 2024 insertions(+), 1 deletion(-) create mode 100644 .github/dependabot.yml create mode 100644 .github/workflows/ci.yml create mode 100644 .github/workflows/coverage.yml create mode 100644 .github/workflows/release.yml create mode 100644 .gitignore create mode 100644 Cargo.toml create mode 100644 LICENSE create mode 100644 docker-compose.yaml create mode 100644 examples/rbac_model.conf create mode 100644 examples/rbac_policy.csv create mode 100644 examples/rbac_with_domains_model.conf create mode 100644 examples/rbac_with_domains_policy.csv create mode 100644 sample.env create mode 100644 src/actions.rs create mode 100644 src/adapter.rs create mode 100644 src/error.rs create mode 100644 src/lib.rs create mode 100644 src/models.rs diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..00a84b9 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,18 @@ +version: 2 +updates: +- package-ecosystem: cargo + directory: "/" + schedule: + interval: daily + time: "13:00" + open-pull-requests-limit: 10 + ignore: + - dependency-name: actix-rt + versions: + - 2.7.0 + - dependency-name: casbin + versions: + - 2.0.9 + - dependency-name: tokio + versions: + - 1.19.0 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..09de7ea --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,106 @@ +name: CI + +on: + pull_request: + push: + branches: + - master + +jobs: + build: + name: Auto Build CI + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ ubuntu-latest, windows-latest, macOS-latest ] + rust: [ stable, beta, nightly ] + + steps: + - name: Checkout Repository + uses: actions/checkout@master + + - name: Install Rust toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: ${{ matrix.rust }} + components: rustfmt, clippy + override: true + + - name: Setup PostgreSQL (for ubuntu) + if: matrix.os == 'ubuntu-latest' + run: | + sudo apt-get update + sudo apt-get install -y libpq-dev postgresql + echo "host all all 127.0.0.1/32 md5" > sudo tee -a /etc/postgresql/10/main/pg_hba.conf + sudo service postgresql restart && sleep 3 + sudo -u postgres createuser casbin_rs + sudo -u postgres createdb casbin + sudo -u postgres psql -c "alter user casbin_rs with encrypted password 'casbin_rs'; grant all privileges on database casbin to casbin_rs;" + sudo service postgresql restart && sleep 3 + + - name: Setup PostgreSQL (for macOS) + if: matrix.os == 'macOS-latest' + run: | + brew update + pg_ctl -D /usr/local/var/postgres start + sleep 3 + createuser casbin_rs + createdb casbin + psql postgres -c "alter user casbin_rs with encrypted password 'casbin_rs'; grant all privileges on database casbin to casbin_rs;" + + - name: Setup PostgreSQL (for windows) + if: matrix.os == 'windows-latest' + shell: cmd + run: | + choco install postgresql11 --force --params '/Password:root' + "C:\Program Files\PostgreSQL\11\bin\createuser" casbin_rs + "C:\Program Files\PostgreSQL\11\bin\createdb" casbin + "C:\Program Files\PostgreSQL\11\bin\psql" -c "alter user casbin_rs with encrypted password 'casbin_rs'; grant all privileges on database casbin to casbin_rs;" + + - name: Set environment variables (for windows) + if: matrix.os == 'windows-latest' + shell: bash + run: | + echo "C:\Program Files\PostgreSQL\11\bin" >> $GITHUB_PATH + echo "PQ_LIB_DIR=C:\Program Files\PostgreSQL\11\lib" >> $GITHUB_ENV + + - name: Create PostgresSQL Table + run: psql -c "CREATE TABLE IF NOT EXISTS casbin_rule ( + id SERIAL PRIMARY KEY, + ptype VARCHAR NOT NULL, + v0 VARCHAR NOT NULL, + v1 VARCHAR NOT NULL, + v2 VARCHAR NOT NULL, + v3 VARCHAR NOT NULL, + v4 VARCHAR NOT NULL, + v5 VARCHAR NOT NULL, + CONSTRAINT unique_key_sqlx_adapter UNIQUE(ptype, v0, v1, v2, v3, v4, v5) + );" postgres://casbin_rs:casbin_rs@127.0.0.1:5432/casbin + + - name: Cargo Build + uses: actions-rs/cargo@v1 + with: + command: build + + # PostgreSQL tests + # tokio + - name: Cargo Test For PostgreSQL,runtime-tokio + uses: actions-rs/cargo@v1 + env: + DATABASE_URL: postgres://casbin_rs:casbin_rs@localhost:5432/casbin + with: + command: test + args: --no-default-features --features runtime-tokio + + - name: Cargo Clippy + uses: actions-rs/cargo@v1 + with: + command: clippy + args: -- -D warnings + + - name: Cargo Fmt Check + uses: actions-rs/cargo@v1 + with: + command: fmt + args: --all -- --check diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml new file mode 100644 index 0000000..6760c31 --- /dev/null +++ b/.github/workflows/coverage.yml @@ -0,0 +1,64 @@ +name: Coverage + +on: + push: + branches: + - master + pull_request: + branches: + - master + +jobs: + cover: + name: Auto Codecov Coverage + runs-on: ubuntu-latest + + services: + postgres: + image: postgres:11 + env: + POSTGRES_USER: casbin_rs + POSTGRES_PASSWORD: casbin_rs + POSTGRES_DB: casbin + ports: + - 5432:5432 + options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 + + steps: + - name: Checkout Repository + uses: actions/checkout@master + + - name: Install Rust toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - name: Install PostgreSQL Dependencies + run: sudo apt-get install libpq-dev postgresql-client + + - name: Create Table + run: psql postgres://casbin_rs:casbin_rs@127.0.0.1:5432/casbin -c "CREATE TABLE IF NOT EXISTS casbin_rule ( + id SERIAL PRIMARY KEY, + ptype VARCHAR NOT NULL, + v0 VARCHAR NOT NULL, + v1 VARCHAR NOT NULL, + v2 VARCHAR NOT NULL, + v3 VARCHAR NOT NULL, + v4 VARCHAR NOT NULL, + v5 VARCHAR NOT NULL, + CONSTRAINT unique_key_sqlx_adapter UNIQUE(ptype, v0, v1, v2, v3, v4, v5) + );" + + - name: Run cargo-tarpaulin + uses: actions-rs/tarpaulin@v0.1 + env: + DATABASE_URL: postgres://casbin_rs:casbin_rs@localhost:5432/casbin + with: + args: --avoid-cfg-tarpaulin --out Xml + + - name: Upload to codecov.io + uses: codecov/codecov-action@v1 + with: + token: ${{secrets.CODECOV_TOKEN}} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..faf6d16 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,46 @@ +name: Auto Release + +on: + push: + # Sequence of patterns matched against refs/tags + tags: + - "v*" # Push events to matching v*, i.e. v1.0, v20.15.10 + +jobs: + release: + name: Auto Release by Tags + runs-on: ubuntu-latest + + steps: + - name: Checkout Repository + uses: actions/checkout@master + + - name: Install Rust toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - name: Cargo Login + uses: actions-rs/cargo@v1 + with: + command: login + args: -- ${{ secrets.CARGO_TOKEN }} + + - name: Cargo Publish + uses: actions-rs/cargo@v1 + with: + command: publish + args: --no-verify + + - name: GitHub Release + id: create_release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # This token is provided by Actions, you do not need to create your own token + with: + tag_name: ${{ github.ref }} + release_name: Release ${{ github.ref }} + draft: false + prerelease: false \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..0b2fe33 --- /dev/null +++ b/.gitignore @@ -0,0 +1,23 @@ +# Generated by Cargo +# will have compiled files and executables +/target + +# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries +# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html +Cargo.lock + +# These are backup files generated by rustfmt +**/*.rs.bk + +# Ignore ENV files +.env + +# Ignore IDE files +.vscode/ +.idea/ +.DS_Store + +# Ignore sqlite DB files +*.db +*.db-wal +*.db-shm \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..6b425c4 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "tokio-postgres-adapter" +version = "0.1.0" +authors = ["Eason Chai ", "Cheng JIANG ", "David Miguel "] +edition = "2021" +license = "Apache-2.0" +description = "tokio-postgres adapter for casbin-rs" +homepage= "https://github.com/casbin-rs/postgres-adapter" +readme= "README.md" + +[dependencies] +casbin = { version = "2.0.9", default-features = false } +async-trait = "0.1.56" +dotenv = { version = "0.15.0", default-features = false } +tokio = { version = "1.19.2", default-features = false, optional = true } +tokio-postgres = { version = "0.7.6", default-features = false } +deadpool-postgres = { version = "0.10.2", default-features = false } +deadpool = { version = "0.9.5", default-features = false } +futures = "0.3" + +[features] +default = ["runtime-tokio"] +runtime-tokio = ["casbin/runtime-tokio", "deadpool/rt_tokio_1"] + +[dev-dependencies] +tokio = { version = "1.19.2", features = [ "full" ] } + +[profile.release] +codegen-units = 1 +lto = true +opt-level = 3 + +[profile.dev] +split-debuginfo = "unpacked" diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md index eb846bc..1c4a996 100644 --- a/README.md +++ b/README.md @@ -1 +1,94 @@ -# postgres-adapter \ No newline at end of file +# postgres-adapter + +[![Crates.io](https://img.shields.io/crates/v/tokio-postgres.svg)](https://crates.io/crates/tokio-postgres) +[![Docs](https://docs.rs/tokio-postgres/badge.svg)](https://docs.rs/tokio-postgres) +[![Crates.io](https://img.shields.io/crates/v/deadpool-postgres.svg)](https://crates.io/crates/deadpool-postgres) +[![Docs](https://docs.rs/deadpool-postgres/badge.svg)](https://docs.rs/deadpool-postgres) + +[![CI](https://github.com/casbin-rs/postgres-adapter/workflows/CI/badge.svg)](https://github.com/casbin-rs/postgres-adapter/actions) +[![codecov](https://codecov.io/gh/casbin-rs/postgres-adapter/branch/master/graph/badge.svg)](https://codecov.io/gh/casbin-rs/postgres-adapter) + + +postgres-adapter uses [tokio-postgres](https://github.com/sfackler/rust-postgres/tree/master/tokio-postgres) and [deadpool-postgres](https://github.com/bikeshedder/deadpool/tree/master/postgres) as an adapter for [Casbin-rs](https://github.com/casbin/casbin-rs). With this library, Casbin can load policy from [Postgres](https://github.com/lib/pq) or save policy to it with fully asynchronous support and prepared statements. + +## Install + +Add it to `Cargo.toml` + +```rust +postgres-adapter = { version = "0.1.0" } +tokio = "1.19.2" +``` + +## Configure + +1. Set up database environment + ```bash + #!/bin/bash + + docker run -itd \ + --restart always \ + -e POSTGRES_USER=casbin_rs \ + -e POSTGRES_PASSWORD=casbin_rs \ + -e POSTGRES_DB=casbin \ + -p 5432:5432 \ + -v /srv/docker/postgresql:/var/lib/postgresql \ + postgres:11; + ``` + +2. Create table `casbin_rule` + + ```bash + # PostgreSQL + psql postgres://casbin_rs:casbin_rs@127.0.0.1:5432/casbin -c "CREATE TABLE IF NOT EXISTS casbin_rule ( + id SERIAL PRIMARY KEY, + ptype VARCHAR NOT NULL, + v0 VARCHAR NOT NULL, + v1 VARCHAR NOT NULL, + v2 VARCHAR NOT NULL, + v3 VARCHAR NOT NULL, + v4 VARCHAR NOT NULL, + v5 VARCHAR NOT NULL, + CONSTRAINT unique_key_sqlx_adapter UNIQUE(ptype, v0, v1, v2, v3, v4, v5) + );" + +3. Configure `env` + + Rename `sample.env` to `.env` and put `DATABASE_URL`, `POOL_SIZE` inside + + ```bash + DATABASE_URL=postgres://casbin_rs:casbin_rs@localhost:5432/casbin + POOL_SIZE=8 + ``` + + Or you can export `DATABASE_URL`, `POOL_SIZE` + + ```bash + export DATABASE_URL=postgres://casbin_rs:casbin_rs@localhost:5432/casbin + export POOL_SIZE=8 + ``` + + +## Example + +```rust +use postgres_adapter::casbin::prelude::*; +use postgres_adapter::casbin::Result; +use postgres_adapter::TokioPostgresAdapter; +use tokio_postgres::NoTls; + +#[tokio::main] +async fn main() -> Result<()> { + let m = DefaultModel::from_file("examples/rbac_model.conf").await?; + + let a = TokioPostgresAdapter::new("postgres://casbin_rs:casbin_rs@127.0.0.1:5432/casbin", 8, NoTls).await?; + let mut e = Enforcer::new(m, a).await?; + + Ok(()) +} + +``` + +## Features + +- `postgres` diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 0000000..fa909a9 --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,11 @@ +version: "3.7" + +services: + postgres: + image: postgres:11-alpine + ports: + - "5432:5432" + environment: + POSTGRES_PASSWORD: casbin_rs + POSTGRES_USER: casbin_rs + POSTGRES_DB: casbin diff --git a/examples/rbac_model.conf b/examples/rbac_model.conf new file mode 100644 index 0000000..71159e3 --- /dev/null +++ b/examples/rbac_model.conf @@ -0,0 +1,14 @@ +[request_definition] +r = sub, obj, act + +[policy_definition] +p = sub, obj, act + +[role_definition] +g = _, _ + +[policy_effect] +e = some(where (p.eft == allow)) + +[matchers] +m = g(r.sub, p.sub) && r.obj == p.obj && r.act == p.act \ No newline at end of file diff --git a/examples/rbac_policy.csv b/examples/rbac_policy.csv new file mode 100644 index 0000000..f93d6df --- /dev/null +++ b/examples/rbac_policy.csv @@ -0,0 +1,5 @@ +p, alice, data1, read +p, bob, data2, write +p, data2_admin, data2, read +p, data2_admin, data2, write +g, alice, data2_admin \ No newline at end of file diff --git a/examples/rbac_with_domains_model.conf b/examples/rbac_with_domains_model.conf new file mode 100644 index 0000000..57c3721 --- /dev/null +++ b/examples/rbac_with_domains_model.conf @@ -0,0 +1,14 @@ +[request_definition] +r = sub, dom, obj, act + +[policy_definition] +p = sub, dom, obj, act + +[role_definition] +g = _, _, _ + +[policy_effect] +e = some(where (p.eft == allow)) + +[matchers] +m = g(r.sub, p.sub, r.dom) && r.dom == p.dom && r.obj == p.obj && r.act == p.act \ No newline at end of file diff --git a/examples/rbac_with_domains_policy.csv b/examples/rbac_with_domains_policy.csv new file mode 100644 index 0000000..a20ccd0 --- /dev/null +++ b/examples/rbac_with_domains_policy.csv @@ -0,0 +1,6 @@ +p, admin,domain1,data1,read +p, admin,domain1,data1,write +p, admin,domain2,data2,read +p, admin,domain2,data2,write +g, alice,admin,domain1 +g, bob,admin,domain2 diff --git a/sample.env b/sample.env new file mode 100644 index 0000000..861c493 --- /dev/null +++ b/sample.env @@ -0,0 +1,2 @@ +DATABASE_URL=postgres://casbin_rs:casbin_rs@localhost:5432/casbin +POOL_SIZE=8 diff --git a/src/actions.rs b/src/actions.rs new file mode 100644 index 0000000..14d9c5a --- /dev/null +++ b/src/actions.rs @@ -0,0 +1,680 @@ +#![allow(clippy::suspicious_else_formatting)] +#![allow(clippy::toplevel_ref_arg)] +use crate::models::{CasbinRule, NewCasbinRule}; +use crate::Error; +use casbin::{error::AdapterError, Error as CasbinError, Filter, Result}; +use deadpool_postgres::Pool; +use futures::try_join; +use tokio_postgres::{types::Type, SimpleQueryMessage}; + +pub type ConnectionPool = Pool; + +pub async fn new(conn: &ConnectionPool) -> Result> { + let client = conn.get().await.map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PoolError(err)))) + })?; + + client.simple_query( + "CREATE TABLE IF NOT EXISTS casbin_rule ( + id SERIAL PRIMARY KEY, + ptype VARCHAR NOT NULL, + v0 VARCHAR NOT NULL, + v1 VARCHAR NOT NULL, + v2 VARCHAR NOT NULL, + v3 VARCHAR NOT NULL, + v4 VARCHAR NOT NULL, + v5 VARCHAR NOT NULL, + CONSTRAINT unique_key_pg_adapter UNIQUE(ptype, v0, v1, v2, v3, v4, v5) + ) + " + ) + .await + .map_err(|err| CasbinError::from(AdapterError(Box::new(Error::PostgresError(err))))) +} + +pub async fn remove_policy( + conn: &ConnectionPool, + pt: &str, + rule: Vec, +) -> Result { + let rule = normalize_casbin_rule(rule); + let client = conn.get().await.map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PoolError(err)))) + })?; + + let stmt = client + .prepare_typed( + "DELETE FROM casbin_rule WHERE + ptype = $1 AND + v0 = $2 AND + v1 = $3 AND + v2 = $4 AND + v3 = $5 AND + v4 = $6 AND + v5 = $7 + RETURNING id", + &[ + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + ], + ) + .await + .map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PostgresError( + err, + )))) + })?; + client + .query_opt( + &stmt, + &[ + &pt, &rule[0], &rule[1], &rule[2], &rule[3], &rule[4], + &rule[5], + ], + ) + .await + .map(|s| s.is_some()) + .map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PostgresError( + err, + )))) + }) +} + +pub async fn remove_policies( + conn: &ConnectionPool, + pt: &str, + rules: Vec>, +) -> Result { + let mut client = conn.get().await.map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PoolError(err)))) + })?; + + let stmt = client + .prepare_typed( + "DELETE FROM casbin_rule WHERE + ptype = $1 AND + v0 = $2 AND + v1 = $3 AND + v2 = $4 AND + v3 = $5 AND + v4 = $6 AND + v5 = $7 + RETURNING id", + &[ + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + ], + ) + .await + .map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PostgresError( + err, + )))) + })?; + let transaction = client.transaction().await.map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PostgresError(err)))) + })?; + for rule in rules { + let rule = normalize_casbin_rule(rule); + transaction + .query_one( + &stmt, + &[ + &pt, &rule[0], &rule[1], &rule[2], &rule[3], &rule[4], + &rule[5], + ], + ) + .await + .map(|s| s.is_empty()) + .map_err(|err| { + CasbinError::from(AdapterError(Box::new( + Error::PostgresError(err), + ))) + })?; + } + transaction.commit().await.map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PostgresError(err)))) + })?; + Ok(true) +} + +pub async fn remove_filtered_policy( + conn: &ConnectionPool, + pt: &str, + field_index: usize, + field_values: Vec, +) -> Result { + let client = conn.get().await.map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PoolError(err)))) + })?; + + let field_values = normalize_casbin_rule(field_values); + if field_index == 5 { + let stmt = client + .prepare_typed( + "DELETE FROM casbin_rule WHERE + ptype = $1 AND + (v5 is NULL OR v5 = $2) + RETURNING id", + &[Type::TEXT, Type::TEXT], + ) + .await + .map_err(|err| { + CasbinError::from(AdapterError(Box::new( + Error::PostgresError(err), + ))) + })?; + client + .query_opt(&stmt, &[&pt, &field_values[5]]) + .await + .map(|s| s.is_some()) + .map_err(|err| { + CasbinError::from(AdapterError(Box::new( + Error::PostgresError(err), + ))) + }) + } else if field_index == 4 { + let stmt = client + .prepare_typed( + "DELETE FROM casbin_rule WHERE + ptype = $1 AND + (v4 is NULL OR v4 = $2) AND + (v5 is NULL OR v5 = $3) + RETURNING id", + &[Type::TEXT, Type::TEXT, Type::TEXT], + ) + .await + .map_err(|err| { + CasbinError::from(AdapterError(Box::new( + Error::PostgresError(err), + ))) + })?; + client + .query_opt(&stmt, &[&pt, &field_values[4], &field_values[5]]) + .await + .map(|s| s.is_some()) + .map_err(|err| { + CasbinError::from(AdapterError(Box::new( + Error::PostgresError(err), + ))) + }) + } else if field_index == 3 { + let stmt = client + .prepare_typed( + "DELETE FROM casbin_rule WHERE + ptype = $1 AND + (v3 is NULL OR v3 = $2) AND + (v4 is NULL OR v4 = $3) AND + (v5 is NULL OR v5 = $4) + RETURNING id", + &[Type::TEXT, Type::TEXT, Type::TEXT, Type::TEXT], + ) + .await + .map_err(|err| { + CasbinError::from(AdapterError(Box::new( + Error::PostgresError(err), + ))) + })?; + client + .query_opt( + &stmt, + &[&pt, &field_values[3], &field_values[4], &field_values[5]], + ) + .await + .map(|s| s.is_some()) + .map_err(|err| { + CasbinError::from(AdapterError(Box::new( + Error::PostgresError(err), + ))) + }) + } else if field_index == 2 { + let stmt = client + .prepare_typed( + "DELETE FROM casbin_rule WHERE + ptype = $1 AND + (v2 is NULL OR v2 = $2) AND + (v3 is NULL OR v3 = $3) AND + (v4 is NULL OR v4 = $4) AND + (v5 is NULL OR v5 = $5) + RETURNING id", + &[Type::TEXT, Type::TEXT, Type::TEXT, Type::TEXT, Type::TEXT], + ) + .await + .map_err(|err| { + CasbinError::from(AdapterError(Box::new( + Error::PostgresError(err), + ))) + })?; + client + .query_opt( + &stmt, + &[ + &pt, + &field_values[2], + &field_values[3], + &field_values[4], + &field_values[5], + ], + ) + .await + .map(|s| s.is_some()) + .map_err(|err| { + CasbinError::from(AdapterError(Box::new( + Error::PostgresError(err), + ))) + }) + } else if field_index == 1 { + let stmt = client + .prepare_typed( + "DELETE FROM casbin_rule WHERE + ptype = $1 AND + (v1 is NULL OR v1 = $2) AND + (v2 is NULL OR v2 = $3) AND + (v3 is NULL OR v3 = $4) AND + (v4 is NULL OR v4 = $5) AND + (v5 is NULL OR v5 = $6) + RETURNING id", + &[ + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + ], + ) + .await + .map_err(|err| { + CasbinError::from(AdapterError(Box::new( + Error::PostgresError(err), + ))) + })?; + client + .query_opt( + &stmt, + &[ + &pt, + &field_values[1], + &field_values[2], + &field_values[3], + &field_values[4], + &field_values[5], + ], + ) + .await + .map(|s| s.is_some()) + .map_err(|err| { + CasbinError::from(AdapterError(Box::new( + Error::PostgresError(err), + ))) + }) + } else { + let stmt = client + .prepare_typed( + "DELETE FROM casbin_rule WHERE + ptype = $1 AND + (v0 is NULL OR v0 = $2) AND + (v1 is NULL OR v1 = $3) AND + (v2 is NULL OR v2 = $4) AND + (v3 is NULL OR v3 = $5) AND + (v4 is NULL OR v4 = $6) AND + (v5 is NULL OR v5 = $7) + RETURNING id", + &[ + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + ], + ) + .await + .map_err(|err| { + CasbinError::from(AdapterError(Box::new( + Error::PostgresError(err), + ))) + })?; + + client + .query_opt( + &stmt, + &[ + &pt, + &field_values[0], + &field_values[1], + &field_values[2], + &field_values[3], + &field_values[4], + &field_values[5], + ], + ) + .await + .map(|s| s.is_some()) + .map_err(|err| { + CasbinError::from(AdapterError(Box::new( + Error::PostgresError(err), + ))) + }) + } +} + +pub(crate) async fn load_policy( + conn: &ConnectionPool, +) -> Result> { + let client = conn.get().await.map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PoolError(err)))) + })?; + + let stmt = client + .prepare_typed("SELECT * FROM casbin_rule", &[]) + .await + .map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PostgresError( + err, + )))) + })?; + let casbin_rule: Vec = client + .query(&stmt, &[]) + .await + .map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PostgresError( + err, + )))) + })? + .into_iter() + .map(CasbinRule::from) + .collect(); + Ok(casbin_rule) +} + +pub(crate) async fn load_filtered_policy<'a>( + conn: &ConnectionPool, + filter: &Filter<'_>, +) -> Result> { + let client = conn.get().await.map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PoolError(err)))) + })?; + + let (g_filter, p_filter) = filtered_where_values(filter); + + let stmt = client + .prepare_typed("SELECT * from casbin_rule WHERE ( + ptype LIKE 'g%' AND v0 LIKE $1 AND v1 LIKE $2 AND v2 LIKE $3 AND v3 LIKE $4 AND v4 LIKE $5 AND v5 LIKE $6 ) + OR ( + ptype LIKE 'p%' AND v0 LIKE $7 AND v1 LIKE $8 AND v2 LIKE $9 AND v3 LIKE $10 AND v4 LIKE $11 AND v5 LIKE $12 )", &[ + Type::TEXT, Type::TEXT, Type::TEXT, Type::TEXT, Type::TEXT, Type::TEXT, + Type::TEXT, Type::TEXT, Type::TEXT, Type::TEXT, Type::TEXT, Type::TEXT, + ]) + .await + .map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PostgresError( + err, + )))) + })?; + let casbin_rule: Vec = client + .query( + &stmt, + &[ + &g_filter[0], + &g_filter[1], + &g_filter[2], + &g_filter[3], + &g_filter[4], + &g_filter[5], + &p_filter[0], + &p_filter[1], + &p_filter[2], + &p_filter[3], + &p_filter[4], + &p_filter[5], + ], + ) + .await + // .map(|s| s.into()) + .map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PostgresError( + err, + )))) + })? + .into_iter() + .map(CasbinRule::from) + .collect(); + + Ok(casbin_rule) +} + +fn filtered_where_values<'a>( + filter: &Filter<'a>, +) -> ([&'a str; 6], [&'a str; 6]) { + let mut g_filter: [&'a str; 6] = ["%", "%", "%", "%", "%", "%"]; + let mut p_filter: [&'a str; 6] = ["%", "%", "%", "%", "%", "%"]; + for (idx, val) in filter.g.iter().enumerate() { + if val != &"" { + g_filter[idx] = val; + } + } + for (idx, val) in filter.p.iter().enumerate() { + if val != &"" { + p_filter[idx] = val; + } + } + (g_filter, p_filter) +} + +pub(crate) async fn save_policy( + conn: &ConnectionPool, + rules: Vec>, +) -> Result<()> { + let mut client = conn.get().await.map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PoolError(err)))) + })?; + let (stmt_delete, stmt_insert) = try_join!( + client.prepare_typed("DELETE FROM casbin_rule", &[]), + client.prepare_typed( + "INSERT INTO casbin_rule ( ptype, v0, v1, v2, v3, v4, v5 ) + VALUES ( $1, $2, $3, $4, $5, $6, $7 ) RETURNING id", + &[ + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + ], + ), + ) + .map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PostgresError(err)))) + })?; + + let transaction = client.transaction().await.map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PostgresError(err)))) + })?; + + transaction.query(&stmt_delete, &[]).await.map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PostgresError(err)))) + })?; + + for rule in rules { + transaction + .query_one( + &stmt_insert, + &[ + &rule.ptype, + &rule.v0, + &rule.v1, + &rule.v2, + &rule.v3, + &rule.v4, + &rule.v5, + ], + ) + .await + .map_err(|err| { + CasbinError::from(AdapterError(Box::new( + Error::PostgresError(err), + ))) + })?; + } + transaction.commit().await.map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PostgresError(err)))) + })?; + Ok(()) +} + +pub(crate) async fn add_policy( + conn: &ConnectionPool, + rule: NewCasbinRule<'_>, +) -> Result { + let client = conn.get().await.map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PoolError(err)))) + })?; + let stmt = client + .prepare_typed( + "INSERT INTO casbin_rule ( ptype, v0, v1, v2, v3, v4, v5 ) + VALUES ( $1, $2, $3, $4, $5, $6, $7 ) + RETURNING id", + &[ + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + ], + ) + .await + .map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PostgresError( + err, + )))) + })?; + client + .query_one( + &stmt, + &[ + &rule.ptype, + &rule.v0, + &rule.v1, + &rule.v2, + &rule.v3, + &rule.v4, + &rule.v5, + ], + ) + .await + .map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PostgresError( + err, + )))) + })?; + + Ok(true) +} + +pub(crate) async fn add_policies( + conn: &ConnectionPool, + rules: Vec>, +) -> Result { + let mut client = conn.get().await.map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PoolError(err)))) + })?; + + let stmt = client + .prepare_typed( + "INSERT INTO casbin_rule ( ptype, v0, v1, v2, v3, v4, v5 ) + VALUES ( $1, $2, $3, $4, $5, $6, $7 ) + RETURNING id", + &[ + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + Type::TEXT, + ], + ) + .await + .map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PostgresError( + err, + )))) + })?; + + let transaction = client.transaction().await.map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PostgresError(err)))) + })?; + for rule in rules { + transaction + .query_one( + &stmt, + &[ + &rule.ptype, + &rule.v0, + &rule.v1, + &rule.v2, + &rule.v3, + &rule.v4, + &rule.v5, + ], + ) + .await + .map_err(|err| { + CasbinError::from(AdapterError(Box::new( + Error::PostgresError(err), + ))) + })?; + } + transaction.commit().await.map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PostgresError(err)))) + })?; + Ok(true) +} + +pub(crate) async fn clear_policy(conn: &ConnectionPool) -> Result<()> { + let mut client = conn.get().await.map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PoolError(err)))) + })?; + + let stmt = client + .prepare_typed("DELETE FROM casbin_rule", &[]) + .await + .map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PostgresError( + err, + )))) + })?; + + let transaction = client.transaction().await.map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PostgresError(err)))) + })?; + transaction.query(&stmt, &[]).await.map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PostgresError(err)))) + })?; + + transaction.commit().await.map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PostgresError(err)))) + })?; + Ok(()) +} + +fn normalize_casbin_rule(mut rule: Vec) -> Vec { + rule.resize(6, String::new()); + rule +} diff --git a/src/adapter.rs b/src/adapter.rs new file mode 100644 index 0000000..9cd6645 --- /dev/null +++ b/src/adapter.rs @@ -0,0 +1,601 @@ +use async_trait::async_trait; +use casbin::{ + error::AdapterError, Adapter, Error as CasbinError, Filter, Model, Result, +}; +use dotenv::dotenv; +use std::{ + str::FromStr, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, +}; + +use tokio_postgres::config::Config as PgConfig; + +use tokio_postgres::{ + tls::{MakeTlsConnect, TlsConnect}, + Socket, +}; + +use crate::{error::*, models::*}; + +use crate::actions as adapter; + +#[derive(Clone)] +pub struct TokioPostgresAdapter { + pool: adapter::ConnectionPool, + is_filtered: Arc, +} + +impl<'a> TokioPostgresAdapter { + pub async fn new(url: U, pool_size: u32, tls: T) -> Result + where + U: Into, + T: MakeTlsConnect + Clone + Sync + Send + 'static, + T::Stream: Sync + Send, + T::TlsConnect: Sync + Send, + >::Future: Send, + { + dotenv().ok(); + let pg_config = PgConfig::from_str(&url.into()).map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::PostgresError( + err, + )))) + })?; + let mgr_config = deadpool_postgres::ManagerConfig { + recycling_method: deadpool_postgres::RecyclingMethod::Fast, + }; + let mgr = deadpool_postgres::Manager::from_config( + pg_config, tls, mgr_config, + ); + let pool = deadpool_postgres::Pool::builder(mgr) + .max_size(pool_size as usize) + .build() + .map_err(|err| { + CasbinError::from(AdapterError(Box::new(Error::BuildError( + err, + )))) + })?; + adapter::new(&pool).await.map(|_| Self { + pool, + is_filtered: Arc::new(AtomicBool::new(false)), + }) + } + + pub async fn new_with_pool(pool: adapter::ConnectionPool) -> Result { + adapter::new(&pool).await.map(|_| Self { + pool, + is_filtered: Arc::new(AtomicBool::new(false)), + }) + } + + pub(crate) fn save_policy_line( + &self, + ptype: &'a str, + rule: &'a [String], + ) -> Option> { + if ptype.trim().is_empty() || rule.is_empty() { + return None; + } + + let mut new_rule = NewCasbinRule { + ptype, + v0: "", + v1: "", + v2: "", + v3: "", + v4: "", + v5: "", + }; + + new_rule.v0 = &rule[0]; + + if rule.len() > 1 { + new_rule.v1 = &rule[1]; + } + + if rule.len() > 2 { + new_rule.v2 = &rule[2]; + } + + if rule.len() > 3 { + new_rule.v3 = &rule[3]; + } + + if rule.len() > 4 { + new_rule.v4 = &rule[4]; + } + + if rule.len() > 5 { + new_rule.v5 = &rule[5]; + } + + Some(new_rule) + } + + pub(crate) fn load_policy_line( + &self, + casbin_rule: &CasbinRule, + ) -> Option> { + if casbin_rule.ptype.chars().next().is_some() { + return self.normalize_policy(casbin_rule); + } + None + } + + fn normalize_policy( + &self, + casbin_rule: &CasbinRule, + ) -> Option> { + let mut result = vec![ + &casbin_rule.v0, + &casbin_rule.v1, + &casbin_rule.v2, + &casbin_rule.v3, + &casbin_rule.v4, + &casbin_rule.v5, + ]; + + while let Some(last) = result.last() { + if last.is_empty() { + result.pop(); + } else { + break; + } + } + + if !result.is_empty() { + return Some(result.iter().map(|&x| x.to_owned()).collect()); + } + + None + } +} + +#[async_trait] +impl Adapter for TokioPostgresAdapter { + async fn load_policy(&self, m: &mut dyn Model) -> Result<()> { + let rules = adapter::load_policy(&self.pool).await?; + + for casbin_rule in &rules { + let rule = self.load_policy_line(casbin_rule); + + if let Some(ref sec) = + casbin_rule.ptype.chars().next().map(|x| x.to_string()) + { + if let Some(t1) = m.get_mut_model().get_mut(sec) { + if let Some(t2) = t1.get_mut(&casbin_rule.ptype) { + if let Some(rule) = rule { + t2.get_mut_policy().insert(rule); + } + } + } + } + } + + Ok(()) + } + + async fn load_filtered_policy<'a>( + &mut self, + m: &mut dyn Model, + f: Filter<'a>, + ) -> Result<()> { + let rules = adapter::load_filtered_policy(&self.pool, &f).await?; + self.is_filtered.store(true, Ordering::SeqCst); + + for casbin_rule in &rules { + if let Some(policy) = self.normalize_policy(casbin_rule) { + if let Some(ref sec) = + casbin_rule.ptype.chars().next().map(|x| x.to_string()) + { + if let Some(t1) = m.get_mut_model().get_mut(sec) { + if let Some(t2) = t1.get_mut(&casbin_rule.ptype) { + t2.get_mut_policy().insert(policy); + } + } + } + } + } + + Ok(()) + } + + async fn save_policy(&mut self, m: &mut dyn Model) -> Result<()> { + let mut rules = vec![]; + + if let Some(ast_map) = m.get_model().get("p") { + for (ptype, ast) in ast_map { + let new_rules = ast + .get_policy() + .into_iter() + .filter_map(|x| self.save_policy_line(ptype, x)); + + rules.extend(new_rules); + } + } + + if let Some(ast_map) = m.get_model().get("g") { + for (ptype, ast) in ast_map { + let new_rules = ast + .get_policy() + .into_iter() + .filter_map(|x| self.save_policy_line(ptype, x)); + + rules.extend(new_rules); + } + } + adapter::save_policy(&self.pool, rules).await + } + + async fn add_policy( + &mut self, + _sec: &str, + ptype: &str, + rule: Vec, + ) -> Result { + if let Some(new_rule) = self.save_policy_line(ptype, rule.as_slice()) { + return adapter::add_policy(&self.pool, new_rule).await; + } + + Ok(false) + } + + async fn add_policies( + &mut self, + _sec: &str, + ptype: &str, + rules: Vec>, + ) -> Result { + let new_rules = rules + .iter() + .filter_map(|x| self.save_policy_line(ptype, x)) + .collect::>(); + + adapter::add_policies(&self.pool, new_rules).await + } + + async fn remove_policy( + &mut self, + _sec: &str, + pt: &str, + rule: Vec, + ) -> Result { + adapter::remove_policy(&self.pool, pt, rule).await + } + + async fn remove_policies( + &mut self, + _sec: &str, + pt: &str, + rules: Vec>, + ) -> Result { + adapter::remove_policies(&self.pool, pt, rules).await + } + + async fn remove_filtered_policy( + &mut self, + _sec: &str, + pt: &str, + field_index: usize, + field_values: Vec, + ) -> Result { + if field_index <= 5 + && !field_values.is_empty() + && field_values.len() > field_index + { + adapter::remove_filtered_policy( + &self.pool, + pt, + field_index, + field_values, + ) + .await + } else { + Ok(false) + } + } + + async fn clear_policy(&mut self) -> Result<()> { + adapter::clear_policy(&self.pool).await + } + + fn is_filtered(&self) -> bool { + self.is_filtered.load(Ordering::SeqCst) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn to_owned(v: Vec<&str>) -> Vec { + v.into_iter().map(|x| x.to_owned()).collect() + } + + #[cfg_attr( + feature = "runtime-tokio", + tokio::test(flavor = "multi_thread") + )] + async fn test_create() { + use casbin::prelude::*; + + let m = DefaultModel::from_file("examples/rbac_model.conf") + .await + .unwrap(); + + let adapter = { + { + TokioPostgresAdapter::new( + "postgres://casbin_rs:casbin_rs@localhost:5432/casbin", + 8, + tokio_postgres::NoTls, + ) + .await + .unwrap() + } + }; + + assert!(Enforcer::new(m, adapter).await.is_ok()); + } + + #[cfg_attr( + feature = "runtime-tokio", + tokio::test(flavor = "multi_thread") + )] + async fn test_create_with_pool() { + use casbin::prelude::*; + + let m = DefaultModel::from_file("examples/rbac_model.conf") + .await + .unwrap(); + let pool = { + { + use deadpool_postgres::{ + Manager, ManagerConfig, Pool, RecyclingMethod, + }; + use tokio_postgres::NoTls; + + let pg_config = PgConfig::from_str( + &"postgres://casbin_rs:casbin_rs@localhost:5432/casbin", + ) + .expect("failed to parse url"); + let mgr_config = ManagerConfig { + recycling_method: RecyclingMethod::Fast, + }; + let mgr = Manager::from_config(pg_config, NoTls, mgr_config); + Pool::builder(mgr).max_size(8).build().unwrap() + } + }; + + let adapter = TokioPostgresAdapter::new_with_pool(pool).await.unwrap(); + + assert!(Enforcer::new(m, adapter).await.is_ok()); + } + + #[cfg_attr( + feature = "runtime-tokio", + tokio::test(flavor = "multi_thread") + )] + async fn test_adapter() { + use casbin::prelude::*; + + let file_adapter = FileAdapter::new("examples/rbac_policy.csv"); + + let m = DefaultModel::from_file("examples/rbac_model.conf") + .await + .unwrap(); + + let mut e = Enforcer::new(m, file_adapter).await.unwrap(); + let mut adapter = { + { + TokioPostgresAdapter::new( + "postgres://casbin_rs:casbin_rs@localhost:5432/casbin", + 8, + tokio_postgres::NoTls, + ) + .await + .unwrap() + } + }; + + assert!(adapter.save_policy(e.get_mut_model()).await.is_ok()); + + assert!(adapter + .remove_policy("", "p", to_owned(vec!["alice", "data1", "read"])) + .await + .unwrap()); + assert!(adapter + .remove_policy("", "p", to_owned(vec!["bob", "data2", "write"])) + .await + .is_ok()); + assert!(adapter + .remove_policy( + "", + "p", + to_owned(vec!["data2_admin", "data2", "read"]) + ) + .await + .is_ok()); + assert!(adapter + .remove_policy( + "", + "p", + to_owned(vec!["data2_admin", "data2", "write"]) + ) + .await + .is_ok()); + assert!(adapter + .remove_policy("", "g", to_owned(vec!["alice", "data2_admin"])) + .await + .is_ok()); + + assert!(adapter + .add_policy("", "p", to_owned(vec!["alice", "data1", "read"])) + .await + .is_ok()); + assert!(adapter + .add_policy("", "p", to_owned(vec!["bob", "data2", "write"])) + .await + .is_ok()); + assert!(adapter + .add_policy( + "", + "p", + to_owned(vec!["data2_admin", "data2", "read"]) + ) + .await + .is_ok()); + assert!(adapter + .add_policy( + "", + "p", + to_owned(vec!["data2_admin", "data2", "write"]) + ) + .await + .is_ok()); + + assert!(adapter + .remove_policies( + "", + "p", + vec![ + to_owned(vec!["alice", "data1", "read"]), + to_owned(vec!["bob", "data2", "write"]), + to_owned(vec!["data2_admin", "data2", "read"]), + to_owned(vec!["data2_admin", "data2", "write"]), + ] + ) + .await + .is_ok()); + + assert!(adapter + .add_policies( + "", + "p", + vec![ + to_owned(vec!["alice", "data1", "read"]), + to_owned(vec!["bob", "data2", "write"]), + to_owned(vec!["data2_admin", "data2", "read"]), + to_owned(vec!["data2_admin", "data2", "write"]), + ] + ) + .await + .is_ok()); + + assert!(adapter + .add_policy("", "g", to_owned(vec!["alice", "data2_admin"])) + .await + .is_ok()); + + assert!(adapter + .remove_policy("", "p", to_owned(vec!["alice", "data1", "read"])) + .await + .is_ok()); + assert!(adapter + .remove_policy("", "p", to_owned(vec!["bob", "data2", "write"])) + .await + .is_ok()); + assert!(adapter + .remove_policy( + "", + "p", + to_owned(vec!["data2_admin", "data2", "read"]) + ) + .await + .is_ok()); + assert!(adapter + .remove_policy( + "", + "p", + to_owned(vec!["data2_admin", "data2", "write"]) + ) + .await + .is_ok()); + assert!(adapter + .remove_policy("", "g", to_owned(vec!["alice", "data2_admin"])) + .await + .is_ok()); + + assert!(!adapter + .remove_policy( + "", + "g", + to_owned(vec!["alice", "data2_admin", "not_exists"]) + ) + .await + .unwrap()); + + assert!(adapter + .add_policy("", "g", to_owned(vec!["alice", "data2_admin"])) + .await + .is_ok()); + assert!(adapter + .add_policy("", "g", to_owned(vec!["alice", "data2_admin"])) + .await + .is_err()); + + assert!(!adapter + .remove_filtered_policy( + "", + "g", + 0, + to_owned(vec!["alice", "data2_admin", "not_exists"]), + ) + .await + .unwrap()); + + assert!(adapter + .remove_filtered_policy( + "", + "g", + 0, + to_owned(vec!["alice", "data2_admin"]) + ) + .await + .unwrap()); + + assert!(adapter + .add_policy( + "", + "g", + to_owned(vec!["alice", "data2_admin", "domain1", "domain2"]), + ) + .await + .is_ok()); + assert!(adapter + .remove_filtered_policy( + "", + "g", + 1, + to_owned(vec!["data2_admin", "domain1", "domain2"]), + ) + .await + .is_ok()); + + // shadow the previous enforcer + let mut e = Enforcer::new( + "examples/rbac_with_domains_model.conf", + "examples/rbac_with_domains_policy.csv", + ) + .await + .unwrap(); + + assert!(adapter.save_policy(e.get_mut_model()).await.is_ok()); + e.set_adapter(adapter).await.unwrap(); + + let filter = Filter { + p: vec!["", "domain1"], + g: vec!["", "", "domain1"], + }; + + e.load_filtered_policy(filter).await.unwrap(); + assert!(e.enforce(("alice", "domain1", "data1", "read")).unwrap()); + assert!(e.enforce(("alice", "domain1", "data1", "write")).unwrap()); + assert!(!e.enforce(("alice", "domain1", "data2", "read")).unwrap()); + assert!(!e.enforce(("alice", "domain1", "data2", "write")).unwrap()); + assert!(!e.enforce(("bob", "domain2", "data2", "read")).unwrap()); + assert!(!e.enforce(("bob", "domain2", "data2", "write")).unwrap()); + } +} diff --git a/src/error.rs b/src/error.rs new file mode 100644 index 0000000..310f164 --- /dev/null +++ b/src/error.rs @@ -0,0 +1,51 @@ +use deadpool_postgres::{BuildError, PoolError}; +use std::{error::Error as StdError, fmt}; +use tokio_postgres::Error as PostgresError; + +#[derive(Debug)] +pub enum Error { + PostgresError(PostgresError), + PoolError(PoolError), + BuildError(BuildError), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + use Error::*; + + match self { + PostgresError(pg_err) => pg_err.fmt(f), + PoolError(pg_err) => pg_err.fmt(f), + BuildError(pg_err) => pg_err.fmt(f), + } + } +} + +impl StdError for Error { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + use Error::*; + + match self { + PostgresError(pg_err) => Some(pg_err), + PoolError(pg_err) => Some(pg_err), + BuildError(pg_err) => Some(pg_err), + } + } +} + +impl From for Error { + fn from(pg_err: PostgresError) -> Self { + Error::PostgresError(pg_err) + } +} + +impl From for Error { + fn from(pg_err: PoolError) -> Self { + Error::PoolError(pg_err) + } +} +impl From for Error { + fn from(pg_err: BuildError) -> Self { + Error::BuildError(pg_err) + } +} diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 0000000..b8b9e9e --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,14 @@ +extern crate tokio_postgres; + +mod adapter; +mod error; + +#[macro_use] +mod models; + +mod actions; + +pub use casbin; + +pub use adapter::TokioPostgresAdapter; +pub use error::Error; diff --git a/src/models.rs b/src/models.rs new file mode 100644 index 0000000..3030e3f --- /dev/null +++ b/src/models.rs @@ -0,0 +1,40 @@ +use tokio_postgres::Row; + +#[allow(dead_code)] +#[derive(Debug)] +pub(crate) struct CasbinRule { + pub id: i32, + pub ptype: String, + pub v0: String, + pub v1: String, + pub v2: String, + pub v3: String, + pub v4: String, + pub v5: String, +} + +impl From for CasbinRule { + fn from(row: Row) -> Self { + Self { + id: row.get("id"), + ptype: row.get("ptype"), + v0: row.get("v0"), + v1: row.get("v1"), + v2: row.get("v2"), + v3: row.get("v3"), + v4: row.get("v4"), + v5: row.get("v5"), + } + } +} + +#[derive(Debug)] +pub(crate) struct NewCasbinRule<'a> { + pub ptype: &'a str, + pub v0: &'a str, + pub v1: &'a str, + pub v2: &'a str, + pub v3: &'a str, + pub v4: &'a str, + pub v5: &'a str, +}