Compare commits

34 Commits

Author SHA1 Message Date
beb9fd5465 Feat: add repository to dataplatform 2025-01-03 16:01:01 +01:00
78d6ac12bf Fix: remove recursive schemas for fs repository 2025-01-03 16:00:40 +01:00
350c03dbfe Fix: adapt to new Table form 2025-01-03 15:56:29 +01:00
e28ab332a7 feat: move fs_datacatalogue to fs_repository 2025-01-03 15:54:18 +01:00
fe780b96ef Refact: change method to access to schema an tables 2025-01-03 09:53:25 +01:00
c2813e5adb refact: move fs_files to raw_datas 2025-01-03 09:16:22 +01:00
f3036ca40d doc: explain concepts 2025-01-03 09:07:48 +01:00
86912c6d3f clean: remove makefile 2025-01-03 09:00:44 +01:00
646b3cfd92 feat: start datacatalogue 2025-01-03 08:59:54 +01:00
db14b4a49a Feat: activate uv 2025-01-03 08:59:38 +01:00
9d45625a5e Feat: move to models and add consume_flux 2025-01-03 08:46:15 +01:00
07fb92e2fa Feat: create fs_datacatalogue 2025-01-03 08:46:15 +01:00
88795fdad3 Feat: create datacatalogue with fs_datacatalogue 2025-01-03 08:46:15 +01:00
aa1ead5435 refact: move graph libs to own directory 2025-01-03 08:46:15 +01:00
c347deee85 Feat: test on pandas xlsx and ods file reader 2025-01-03 08:46:15 +01:00
5dfc1c9751 Feat: start testing fs_repository 2025-01-03 08:46:15 +01:00
7fc10128da Feat: test consume_flux 2025-01-03 08:46:15 +01:00
fe8f76245b Feat: start flux 2025-01-03 08:46:15 +01:00
d613bf00df Feat: add __init__ and mod function signature 2025-01-03 08:46:15 +01:00
8a03ba8329 refact: rename stage to repository 2025-01-03 08:46:15 +01:00
8774ec11e4 Feat: put table's callback and layout in factory 2025-01-03 08:46:15 +01:00
30913a2cea Feat: callback to toggle editing 2025-01-03 08:46:15 +01:00
159b4a8275 Feat: add navigation 2025-01-03 08:46:15 +01:00
3c1d275634 feat: organise router path 2025-01-03 08:46:15 +01:00
8313323ca1 feat: global design 2025-01-03 08:46:15 +01:00
12e5dce1b4 feat: add tailwindcss 2025-01-03 08:46:15 +01:00
2f25c219af feat: add recursive schema 2025-01-03 08:46:15 +01:00
13f80d8553 feat: add schema and table listing 2025-01-03 08:46:15 +01:00
a533443caf feat: init dashboard 2025-01-03 08:46:15 +01:00
226ce84dce Feat: add is_dag to Graph 2024-10-27 14:10:33 +01:00
9ff68cb285 Feat: add get functions on sources and targets 2024-10-27 13:48:37 +01:00
5c69bb5503 Feat: add pure graph 2024-10-07 06:17:01 +02:00
c90f407cfc Feat: init graphs 2024-10-07 06:09:01 +02:00
867747d748 Fix: filename 2024-10-07 06:08:52 +02:00
38 changed files with 930 additions and 150 deletions

View File

@@ -1,66 +0,0 @@
DATA_BASE=./datas
PDF_BASE=$(DATA_BASE)/pdfs
PDF_YEARS=$(wildcard $(PDF_BASE)/*)
RAW_BASE=$(DATA_BASE)/raw
RAW_CRG=$(RAW_BASE)/CRG
RAW_CRG_YEARS=$(subst $(PDF_BASE), $(RAW_CRG), $(PDF_YEARS))
$(RAW_CRG)/%/: $(wildcard $(PDF_BASE)/%/*)
echo $(wildcard $(PDF_BASE)/$*/*)
@echo ----
ls $(PDF_BASE)/$*/
@echo ----
echo $*
@echo ----
echo $^
@echo ----
echo $?
#./datas/raw/CRG/%:
#pdf-oralia extract all --src $$year --dest $$(subst $$PDF_BASE, $$RAW_CRG, $$year)
# $(RAW_CRG_YEARS): $(PDF_PATHS)
# for year in $(PDF_PATHS); do \
# echo $$year; \
# echo $$(subst $$PDF_BASE, $$RAW_CRG, $$year); \
# echo "----"; \
# done;
extract_pdfs:
for year in 2021 2022 2023 2024; do \
mkdir -p $(RAW_CRG)/$$year/extracted;\
pdf-oralia extract all --src $(PDF_BASE)/$$year/ --dest $(RAW_CRG)/$$year/extracted; \
pdf-oralia join --src $(RAW_CRG)/$$year/extracted/ --dest $(RAW_CRG)/$$year/; \
done
clean_raw:
rm -rf ./PLESNA Compta SYSTEM/raw/**/*.csv
clean_built:
rm -rf $(DATA_BASE)/staging/**/*.csv
rm -rf $(DATA_BASE)/gold/**/*.csv
rm -rf $(DATA_BASE)/datamart/**/*.csv
rm -rf $(DATA_BASE)/datamart/**/*.xlsx
run_ingest:
python -m scripts ingest
run_feature:
python -m scripts feature
run_datamart:
python -m scripts datamart
build: clean_built run_ingest run_feature run_datamart
clean_all: clean_built clean_raw
import_nextcloud:
rsync -av ~/Nextcloud/PLESNA\ Compta\ SYSTEM/Histoire/ ./datas/Histoire
push_nextcloud:
rsync -av ./datas/datamart/ ~/Nextcloud/PLESNA\ Compta\ SYSTEM/DataMart

View File

@@ -1,5 +1,15 @@
# E(T)LT pour Plesna
## Installation
## Concepts
- `dataplatform`: agrégation d'un datacatalogue, de moteur de compute et du dag des transformations.
- `datacatalogue`: gestion du contenu des datastores.
- `datastore`: interface de stockage des données.
- `compute`: moteur de traitement des fluxs.
- `graph/dag`: organisation logique des fluxs et des données.
## Stages
- Raw: fichiers les plus brutes possibles

View File

View File

@@ -0,0 +1,8 @@
from plesna.models.flux import Flux, FluxMetaData
def consume_flux(flux: Flux) -> FluxMetaData:
metadata = flux.transformation.function(
sources=flux.sources, targets=flux.targets, **flux.transformation.extra_kwrds
)
return FluxMetaData(data=metadata)

27
plesna/dataplatform.py Normal file
View File

@@ -0,0 +1,27 @@
from plesna.graph.graph_set import GraphSet
from plesna.storage.repository.repository import Repository
class DataPlateformError(Exception):
pass
class DataPlateform:
def __init__(self):
self._graphset = GraphSet()
self._metadata_engine = ""
self._transformations = {}
self._repositories = {}
def add_repository(self, name: str, repository: Repository):
if name in self._repositories:
raise DataPlateformError("The repository {name} already exists")
self._repositories[name] = repository
@property
def repositories(self) -> list[str]:
return list(self._repositories)
def repository(self, name: str) -> Repository:
return self._repositories[name]

0
plesna/graph/__init__.py Normal file
View File

98
plesna/graph/graph.py Normal file
View File

@@ -0,0 +1,98 @@
from functools import reduce
from typing import Callable
from pydantic import BaseModel
class Node(BaseModel):
name: str
infos: dict = {}
def __hash__(self):
return hash(self.name)
class Edge(BaseModel):
arrow_name: str
source: Node
target: Node
edge_kwrds: dict = {}
class Graph:
def __init__(self, nodes: list[Node] = [], edges: list[Edge] = []):
self._edges = []
self._nodes = set()
self.add_edges(edges)
self.add_nodes(nodes)
def add_node(self, node: Node):
self._nodes.add(node)
def add_nodes(self, nodes: list[Node]):
for node in nodes:
self.add_node(node)
def add_edge(self, edge: Edge):
self._edges.append(edge)
self.add_node(edge.source)
self.add_node(edge.target)
def add_edges(self, edges: list[Edge]):
for edge in edges:
self.add_edge(edge)
@property
def nodes(self):
return self._nodes
@property
def edges(self):
return self._edges
def get_edges_from(self, node: Node) -> list[Edge]:
"""Get all edges which have the node as source"""
return [edge for edge in self._edges if edge.source == node]
def get_edges_to(self, node: Node) -> list[Edge]:
"""Get all edges which have the node as target"""
return [edge for edge in self._edges if edge.target == node]
def get_direct_targets_from(self, node: Node) -> set[Node]:
"""Get direct nodes that are accessible from the node"""
return set(edge.target for edge in self._edges if edge.source == node)
def get_targets_from(self, node: Node) -> set[Node]:
"""Get all nodes that are accessible from the node
If the graph have a loop, the procedure be in an infinite loop!
"""
direct_targets = self.get_direct_targets_from(node)
undirect_targets = [self.get_targets_from(n) for n in direct_targets]
undirect_targets = reduce(lambda x, y: x.union(y), undirect_targets, set())
return direct_targets.union(undirect_targets)
def get_direct_sources_from(self, node: Node) -> set[Node]:
"""Get direct nodes that are targeted the node"""
return set(edge.source for edge in self._edges if edge.target == node)
def get_sources_from(self, node: Node) -> set[Node]:
"""Get all nodes that are targeted the node"""
direct_sources = self.get_direct_sources_from(node)
undirect_sources = [self.get_sources_from(n) for n in direct_sources]
undirect_sources = reduce(lambda x, y: x.union(y), undirect_sources, set())
return direct_sources.union(undirect_sources)
def is_dag(self) -> bool:
visited = set()
for node in self._nodes:
if node not in visited:
try:
targets = self.get_targets_from(node)
except RecursionError:
return False
visited.union(targets)
return True

36
plesna/graph/graph_set.py Normal file
View File

@@ -0,0 +1,36 @@
from typing import Callable
from pydantic import BaseModel
class Node(BaseModel):
name: str
infos: dict = {}
def __hash__(self):
return hash(self.name)
class EdgeOnSet(BaseModel):
arrow: Callable
sources: dict[str, Node]
targets: dict[str, Node]
edge_kwrds: dict = {}
class GraphSet:
def __init__(self):
self._edges = []
self._node_sets = set()
def append(self, edge: EdgeOnSet):
self._edges.append(edge)
self._node_sets.add(frozenset(edge.sources.values()))
self._node_sets.add(frozenset(edge.targets.values()))
@property
def node_sets(self):
return self._node_sets
def is_valid_dag(self):
pass

View File

14
plesna/models/flux.py Normal file
View File

@@ -0,0 +1,14 @@
from pydantic import BaseModel
from plesna.models.storage import Table
from plesna.models.transformation import Transformation
class Flux(BaseModel):
sources: dict[str, Table]
targets: dict[str, Table]
transformation: Transformation
class FluxMetaData(BaseModel):
data: dict

55
plesna/models/storage.py Normal file
View File

@@ -0,0 +1,55 @@
from pydantic import BaseModel
class Schema(BaseModel):
"""Where multiple tables are stored
id: uniq identifier for the schema
repo_id: id of the repo where the schema belong to
name: name of the schema
value: string which describe where to find the schema in the repository
"""
id: str
repo_id: str
name: str
value: str
tables: list[str] = []
class Table(BaseModel):
"""Place where same structured data are stored
id: uniq identifier for the table
repo_id: id of the repo where the table belong to
schema_id: id of the schema where table belong to
name: the name of the table
value: string which describe where to find the table in the storage system
"""
id: str
repo_id: str
schema_id: str
name: str
value: str
partitions: list[str] = []
class Partition(BaseModel):
"""Place where data are stored
id: uniq identifier for the table
repo_id: id of the repo where the table belong to
schema_id: id of the schema where table belong to
table_id: id of the schema where table belong to
name: the name of the partition
value: string which describe where to find the partition in the storage system
"""
id: str
repo_id: str
schema_id: str
table_id: str
name: str
value: str

View File

@@ -0,0 +1,15 @@
from collections.abc import Callable
from pydantic import BaseModel
class Transformation(BaseModel):
"""
The function have to have at least 2 arguments: sources and targets
Other arguments will came throught extra_kwrds
The function will have to return metadata as dict
"""
function: Callable
extra_kwrds: dict = {}

View File

View File

@@ -0,0 +1,24 @@
import abc
from plesna.models.storage import Schema
class DataCatalogue:
def __init__(self):
pass
@property
@abc.abstractmethod
def schemas(self) -> list[str]:
"""List schema's names"""
raise NotImplementedError
@abc.abstractmethod
def schema(self, name: str) -> Schema:
"""Get the schema properties"""
raise NotImplementedError
@abc.abstractmethod
def tables(self, schema: str) -> list[str]:
"""List table's name in schema"""
raise NotImplementedError

View File

@@ -0,0 +1,81 @@
from pathlib import Path
from pydantic import BaseModel, computed_field
from plesna.models.storage import Schema, Table
from .datacatalogue import DataCatalogue
class FakeSchema(BaseModel):
name: str
@computed_field
@property
def ref(self) -> Schema:
return Schema(
id=str(self.name),
value=str(self.name),
)
class FakeTable(BaseModel):
name: str
data: dict[str, list]
@computed_field
@property
def ref(self) -> Table:
return Table(
id=str(self.name),
value=str(self.name),
)
class FakeDataCatalogue(DataCatalogue):
"""DataCatalogue based on dictionnaries"""
def __init__(self, name: str):
self.name = name
def ls(
self, dir="", only_files=False, only_directories=False, recursive=False
) -> list[str]:
dirpath = self._basepath / dir
if only_files:
return [
str(f.relative_to(dirpath))
for f in dirpath.iterdir()
if not f.is_dir() and not str(f).startswith(".")
]
if only_directories:
if recursive:
return [
str(f[0].relative_to(dirpath))
for f in dirpath.walk()
if not str(f).startswith(".")
]
return [
str(f.relative_to(dirpath))
for f in dirpath.iterdir()
if f.is_dir() and not str(f).startswith(".")
]
return [
str(f.relative_to(dirpath))
for f in dirpath.iterdir()
if not str(f).startswith(".")
]
def schemas(self) -> dict[str, FSSchema]:
"""List schemas (sub directories within basepath)"""
subdirectories = self.ls("", only_directories=True, recursive=True)
return {str(path): FSSchema(path=path) for path in subdirectories}
def tables(self, schema_id=".") -> dict[str, FSTable]:
"""List table in schema (which are files in the directory)"""
schema_path = schema_id
return {path: FSTable(path=path) for path in self.ls(schema_path, only_files=True)}

View File

View File

@@ -0,0 +1,152 @@
from pathlib import Path
from pydantic import BaseModel, computed_field
from plesna.models.storage import Partition, Schema, Table
from plesna.storage.repository.repository import Repository
class FSPartition(BaseModel):
name: str
path: Path
@computed_field
@property
def ref(self) -> Partition:
return Partition(
id=str(self.path),
repo_id=str(self.path.parent.parent.parent),
schema_id=str(self.path.parent.parent),
table_id=str(self.path.parent),
name=self.name,
value=str(self.path.absolute()),
)
class FSTable(BaseModel):
name: str
path: Path
is_partitionned: bool
partitions: list[str] = []
@computed_field
@property
def ref(self) -> Table:
return Table(
id=str(self.path),
repo_id=str(self.path.parent.parent),
schema_id=str(self.path.parent),
name=self.name,
value=str(self.path.absolute()),
partitions=self.partitions,
)
class FSSchema(BaseModel):
name: str
path: Path
tables: list[str]
@computed_field
@property
def ref(self) -> Schema:
return Schema(
id=str(self.path),
repo_id=str(self.path.parent),
name=self.name,
value=str(self.path.absolute()),
tables=self.tables,
)
class FSRepository(Repository):
"""Repository based on files tree structure
- first level: schemas
- second level: tables
- third level: partition (actual datas)
"""
def __init__(self, name: str, basepath: str, id: str):
self._basepath = Path(basepath)
self.name = name
self.id = id
assert self._basepath.exists()
def ls(
self, dir="", only_files=False, only_directories=False, recursive=False
) -> list[str]:
"""List files in dir
:param dir: relative path from self._basepath
:param only_files: if true return only files
:param only_directories: if true return only directories
:param recursive: list content recursively (only for)
:return: list of string describing path from self._basepath / dir
"""
dirpath = self._basepath / dir
if recursive:
paths = dirpath.rglob("*")
else:
paths = dirpath.iterdir()
if only_files:
return [
str(f.relative_to(dirpath))
for f in paths
if not f.is_dir() and not str(f).startswith(".")
]
if only_directories:
return [
str(f.relative_to(dirpath))
for f in paths
if f.is_dir() and not str(f).startswith(".")
]
return [
str(f.relative_to(dirpath)) for f in paths if not str(f).startswith(".")
]
def schemas(self) -> list[str]:
"""List schemas (sub directories within basepath)"""
subdirectories = self.ls("", only_directories=True)
return [str(d) for d in subdirectories]
def _schema(self, name: str) -> FSSchema:
"""List schemas (sub directories within basepath)"""
schema_path = self._basepath / name
tables = self.ls(name)
return FSSchema(name=name, path=schema_path, tables=tables)
def schema(self, name: str) -> Schema:
return self._schema(name).ref
def _table(self, schema: str, name: str) -> FSTable:
"""Get infos on the table"""
table_path = self._basepath / schema / name
is_partitionned = table_path.is_dir()
if is_partitionned:
partitions = self.ls(f"{schema}/{name}", only_files=True)
else:
partitions = []
return FSTable(
name=name,
path=table_path,
is_partitionned=is_partitionned,
partitions=partitions,
)
def table(self, schema: str, name: str) -> Table:
return self._table(schema, name).ref
def _partition(self, schema: str, table: str, partition: str) -> FSPartition:
"""Get infos on the partition"""
table_path = self._basepath / schema / table
return FSPartition(name=partition, table_path=table_path)
def partition(self, schema: str, name: str) -> Partition:
return self._partition(schema, name).ref

View File

@@ -0,0 +1,38 @@
import abc
from plesna.models.storage import Partition, Schema, Table
class Repository:
def __init__(self):
pass
@abc.abstractmethod
def schemas(self) -> list[str]:
"""List schema's names"""
raise NotImplementedError
@abc.abstractmethod
def schema(self, name: str) -> Schema:
"""Get the schema properties"""
raise NotImplementedError
@abc.abstractmethod
def tables(self, schema: str) -> list[str]:
"""List table's name in schema"""
raise NotImplementedError
@abc.abstractmethod
def table(self, schema: str, name: str) -> Table:
"""Get the table properties"""
raise NotImplementedError
@abc.abstractmethod
def partitions(self, schema: str, table: str) -> list[str]:
"""List partition's name in table"""
raise NotImplementedError
@abc.abstractmethod
def partition(self, schema: str, name: str, partition: str) -> Partition:
"""Get the partition properties"""
raise NotImplementedError

View File

View File

@@ -0,0 +1,43 @@
from plesna.compute.consume_flux import consume_flux
from plesna.models.flux import Flux
from plesna.models.storage import Table
from plesna.models.transformation import Transformation
def test_consume_flux():
sources = {
"src1": Table(
id="src1", repo_id="test", schema_id="test", name="test", value="here"
),
"src2": Table(
id="src2", repo_id="test", schema_id="test", name="test", value="here"
),
}
targets = {
"tgt1": Table(
id="tgt1", repo_id="test", schema_id="test", name="test", value="this"
),
"tgt2": Table(
id="tgt2", repo_id="test", schema_id="test", name="test", value="that"
),
}
def func(sources, targets, **kwrds):
return {
"sources": len(sources),
"targets": len(targets),
"kwrds": len(kwrds),
}
flux = Flux(
sources=sources,
targets=targets,
transformation=Transformation(function=func, extra_kwrds={"extra": "super"}),
)
meta = consume_flux(flux)
assert meta.data == {
"sources": 2,
"targets": 2,
"kwrds": 1,
}

View File

@@ -0,0 +1,74 @@
import shutil
from pathlib import Path
import pytest
from plesna.dataplatform import DataPlateform
from plesna.storage.repository.fs_repository import FSRepository
FIXTURE_DIR = Path(__file__).parent.parent / Path("raw_datas")
@pytest.fixture
def repository(tmp_path) -> FSRepository:
raw_path = Path(tmp_path) / "raw"
raw_path.mkdir()
example_src = FIXTURE_DIR
assert example_src.exists()
recovery_loc = raw_path / "recovery"
recovery_loc.mkdir()
username_loc = raw_path / "username"
username_loc.mkdir()
salary_loc = raw_path / "salary"
salary_loc.mkdir()
for f in example_src.glob("*"):
if "recovery" in str(f):
shutil.copy(f, recovery_loc)
if "salary" in str(f):
shutil.copy(f, salary_loc)
else:
shutil.copy(f, username_loc)
bronze_path = Path(tmp_path) / "bronze"
bronze_path.mkdir()
silver_path = Path(tmp_path) / "silver"
silver_path.mkdir()
return FSRepository("test", tmp_path, "test")
def test_add_repository(
repository: FSRepository,
):
dp = DataPlateform()
dp.add_repository("test", repository)
assert dp.repositories == ["test"]
assert dp.repository("test") == repository
@pytest.fixture
def dataplatform(
repository: FSRepository,
) -> DataPlateform:
dp = DataPlateform()
dp.add_repository("test", repository)
return dp
def test_listing_content(dataplatform: DataPlateform):
assert dataplatform.repository("test").schemas() == ["raw", "bronze", "silver"]
assert dataplatform.repository("test").schema("raw").tables == [
"recovery",
"username",
"salary",
]
def test_add_flux(dataplatform: DataPlateform):
# dataplatform.add_flux()
pass

View File

@@ -0,0 +1,39 @@
from pathlib import Path
import pytest
from plesna.dataplatform import DataPlateform
from plesna.datastore.fs_datacatalogue import FSDataCatalogue
FIXTURE_DIR = Path(__file__).parent / Path("raw_data")
@pytest.fixture
def raw_catalogue(tmp_path):
raw_path = Path(tmp_path) / "raw"
return FSDataCatalogue(raw_path)
@pytest.fixture
def bronze_catalogue(tmp_path):
bronze_path = Path(tmp_path) / "bronze"
return FSDataCatalogue(bronze_path)
@pytest.fixture
def silver_catalogue(tmp_path):
silver_path = Path(tmp_path) / "silver"
return FSDataCatalogue(silver_path)
@pytest.fixture
def dataplateform(
raw_catalogue: FSDataCatalogue,
bronze_catalogue: FSDataCatalogue,
silver_catalogue: FSDataCatalogue,
):
dp = DataPlateform()
dp.add_datacatalague("raw", raw_catalogue)
dp.add_datacatalague("bronze", bronze_catalogue)
dp.add_datacatalague("silver", silver_catalogue)
pass

0
tests/graphs/__init__.py Normal file
View File

107
tests/graphs/test_graph.py Normal file
View File

@@ -0,0 +1,107 @@
import pytest
from plesna.graph.graph import Edge, Graph, Node
def test_append_nodess():
nodeA = Node(name="A")
nodeB = Node(name="B")
graph = Graph()
graph.add_node(nodeA)
graph.add_node(nodeB)
assert graph.nodes == {nodeA, nodeB}
def test_append_edges():
nodeA = Node(name="A")
nodeB = Node(name="B")
nodeC = Node(name="C")
edge1 = Edge(arrow_name="arrow", source=nodeA, target=nodeC)
edge2 = Edge(arrow_name="arrow", source=nodeB, target=nodeC)
graph = Graph()
graph.add_edge(edge1)
graph.add_edge(edge2)
assert graph.nodes == {nodeA, nodeB, nodeC}
def test_init_edges_nodes():
nodeA = Node(name="A")
nodeB = Node(name="B")
nodeC = Node(name="C")
edge1 = Edge(arrow_name="arrow", source=nodeB, target=nodeC)
graph = Graph()
graph.add_node(nodeA)
graph.add_edge(edge1)
assert graph.nodes == {nodeA, nodeB, nodeC}
@pytest.fixture
def nodes():
return {
"A": Node(name="A"),
"B": Node(name="B"),
"C": Node(name="C"),
"D": Node(name="D"),
}
@pytest.fixture
def dag_edges(nodes):
return {
"1": Edge(arrow_name="arrow", source=nodes["A"], target=nodes["C"]),
"2": Edge(arrow_name="arrow", source=nodes["B"], target=nodes["C"]),
"3": Edge(arrow_name="arrow", source=nodes["C"], target=nodes["D"]),
}
@pytest.fixture
def notdag_edges(nodes):
return {
"1": Edge(arrow_name="arrow", source=nodes["A"], target=nodes["C"]),
"2": Edge(arrow_name="arrow", source=nodes["B"], target=nodes["C"]),
"3": Edge(arrow_name="arrow", source=nodes["C"], target=nodes["D"]),
"4": Edge(arrow_name="arrow", source=nodes["D"], target=nodes["B"]),
}
def test_get_edges_from(nodes, dag_edges):
edges = dag_edges
graph = Graph(edges=edges.values())
assert graph.get_edges_from(nodes["A"]) == [edges["1"]]
def test_get_targets_from(nodes, dag_edges):
edges = dag_edges
graph = Graph(edges=edges.values())
assert graph.get_direct_targets_from(nodes["A"]) == set([nodes["C"]])
assert graph.get_direct_targets_from(nodes["C"]) == set([nodes["D"]])
assert graph.get_direct_targets_from(nodes["D"]) == set()
assert graph.get_targets_from(nodes["A"]) == set([nodes["C"], nodes["D"]])
def test_get_sources_from(nodes, dag_edges):
edges = dag_edges
graph = Graph(edges=edges.values())
assert graph.get_direct_sources_from(nodes["A"]) == set()
assert graph.get_direct_sources_from(nodes["C"]) == set([nodes["A"], nodes["B"]])
assert graph.get_direct_sources_from(nodes["D"]) == set([nodes["C"]])
assert graph.get_sources_from(nodes["D"]) == set(
[nodes["A"], nodes["B"], nodes["C"]]
)
def test_valid_dage(dag_edges, notdag_edges):
graph = Graph(edges=dag_edges.values())
assert graph.is_dag()
graph = Graph(edges=notdag_edges.values())
assert not graph.is_dag()

View File

@@ -0,0 +1,18 @@
from plesna.graph.graph_set import EdgeOnSet, GraphSet, Node
def test_init():
nodeA = Node(name="A")
nodeB = Node(name="B")
nodeC = Node(name="C")
def arrow(sources, targets):
targets["C"].infos["res"] = sources["A"].name + sources["B"].name
edge1 = EdgeOnSet(
arrow=arrow, sources={"A": nodeA, "B": nodeB}, targets={"C": nodeC}
)
graph_set = GraphSet()
graph_set.append(edge1)
assert graph_set.node_sets == {frozenset([nodeA, nodeB]), frozenset([nodeC])}

BIN
tests/raw_datas/salary.pdf Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,7 @@
Username;Identifier;First name;Last name
booker12;9012;Rachel;Booker
grey07;2070;Laura;Grey
johnson81;4081;Craig;Johnson
jenkins46;9346;Mary;Jenkins
smith79;5079;Jamie;Smith
1 Username Identifier First name Last name
2 booker12 9012 Rachel Booker
3 grey07 2070 Laura Grey
4 johnson81 4081 Craig Johnson
5 jenkins46 9346 Mary Jenkins
6 smith79 5079 Jamie Smith

View File

@@ -1,84 +0,0 @@
import shutil
from pathlib import Path
import pytest
from pandas import pandas
from dashboard.libs.repository.fs_repository import FSRepository
EXAMPLE_DIR = "./tests/repository/fs_examples/"
@pytest.fixture
def location(tmp_path):
loc = tmp_path
username_loc = loc / "username"
username_loc.mkdir()
salary_loc = loc / "salary"
salary_loc.mkdir()
example_src = Path(EXAMPLE_DIR)
for f in example_src.glob("*"):
if "username" in str(f):
shutil.copy(f, username_loc)
else:
shutil.copy(f, salary_loc)
return loc
def test_init(location):
repo = FSRepository("example", location)
assert repo.ls() == [
"username",
"salary",
]
assert repo.schemas() == [
".",
"username",
"salary",
]
assert repo.tables() == []
assert repo.tables("username") == [
"username.csv",
"username-password-recovery-code.xlsx",
"username-password-recovery-code.xls",
]
assert repo.tables("salary") == ["salary.pdf"]
def test_read_csv(location):
repo = FSRepository("example", location)
username = repo.read("username.csv", "username", delimiter=";")
assert list(username.columns) == [
"Username",
"Identifier",
"First name",
"Last name",
]
assert len(username.index) == 5
def test_fake_read_xlsx(location):
repo = FSRepository("example", location)
df = pandas.read_excel(
location / "username" / "username-password-recovery-code.xls"
)
print(df)
def test_read_xlsx(location):
repo = FSRepository("example", location)
username = repo.read("username-password-recovery-code.xls", "username")
assert list(username.columns) == [
"Username",
"Identifier",
"One-time password",
"Recovery code",
"First name",
"Last name",
"Department",
"Location",
]
assert len(username.index) == 5

View File

View File

@@ -0,0 +1,77 @@
import shutil
from pathlib import Path
import pytest
from plesna.models.storage import Schema
from plesna.storage.repository.fs_repository import FSRepository
FIXTURE_DIR = Path(__file__).parent.parent / Path("./raw_datas/")
@pytest.fixture
def location(tmp_path):
loc = tmp_path
username_loc = loc / "username"
username_loc.mkdir()
salary_loc = loc / "salary"
salary_loc.mkdir()
example_src = FIXTURE_DIR
assert example_src.exists()
for f in example_src.glob("*"):
if "username" in str(f):
shutil.copy(f, username_loc)
else:
shutil.copy(f, salary_loc)
return loc
def test_init(location):
repo = FSRepository("example", location, "example")
assert repo.ls() == [
"username",
"salary",
]
assert repo.ls(recursive=True) == [
"username",
"salary",
"username/username.csv",
"username/username-password-recovery-code.xlsx",
"username/username-password-recovery-code.xls",
"salary/salary.pdf",
]
@pytest.fixture
def repository(location) -> FSRepository:
return FSRepository("example", location, "example")
def test_list_schema(location, repository):
assert repository.schemas() == ["username", "salary"]
assert repository.schema("username").name == "username"
assert repository.schema("username").id == str(location / "username")
assert repository.schema("username").repo_id == str(location)
assert repository.schema("username").value == str(location / "username")
def test_list_tables_schema(repository):
assert repository.schema("username").tables == [
"username.csv",
"username-password-recovery-code.xlsx",
"username-password-recovery-code.xls",
]
assert repository.schema("salary").tables == ["salary.pdf"]
def test_describe_table(location, repository):
table = repository.table("username", "username.csv")
assert table.id == str(location / "username" / "username.csv")
assert table.repo_id == str(location)
assert table.schema_id == str(location / "username")
assert table.name == "username.csv"
assert table.value == str(location / "username" / "username.csv")
assert table.partitions == []

7
uv.lock generated Normal file
View File

@@ -0,0 +1,7 @@
version = 1
requires-python = ">=3.13"
[[package]]
name = "plesna"
version = "0.1.0"
source = { virtual = "." }