Compare commits
15 Commits
2761c3ed7b
...
v0.3.11
Author | SHA1 | Date | |
---|---|---|---|
45d343d810 | |||
806227f202 | |||
7bf0c38883 | |||
b15b059e2a | |||
48e75358ac | |||
132e37267b | |||
f2bcf6241a | |||
ec9cc19be5 | |||
0040dccd9a | |||
b0333cddd8 | |||
406b89fea1 | |||
812d392720 | |||
6b77980e6c | |||
90c2d3689b | |||
f9be31c090 |
16
.drone.yml
16
.drone.yml
@@ -22,10 +22,18 @@ steps:
|
||||
PYPI_TOKEN:
|
||||
from_secret: pypi_token
|
||||
|
||||
when:
|
||||
event:
|
||||
include:
|
||||
- tag
|
||||
- name: Notify on matrix
|
||||
image: plugins/matrix
|
||||
environment:
|
||||
MATRIX_ROOMID:
|
||||
from_secret: MATRIX_ROOMID
|
||||
MATRIX_ACCESSTOKEN:
|
||||
from_secret: MATRIX_ACCESSTOKEN
|
||||
MATRIX_USERID:
|
||||
from_secret: MATRIX_USERID
|
||||
settings:
|
||||
homeserver: https://matrix.poneyworld.net
|
||||
template: "Une nouvelle version (${DRONE_TAG}) de pdf-oralia est publiée!"
|
||||
|
||||
# Déclencheur de la pipeline
|
||||
trigger:
|
||||
|
@@ -45,7 +45,7 @@ def from_pdf(pdf):
|
||||
charge_tables = []
|
||||
patrimoie_tables = []
|
||||
|
||||
for page in pdf.pages:
|
||||
for page_number, page in enumerate(pdf.pages):
|
||||
page_text = page.extract_text()
|
||||
date = extract_date(page_text)
|
||||
additionnal_fields = {
|
||||
@@ -76,7 +76,7 @@ def from_pdf(pdf):
|
||||
pass
|
||||
|
||||
else:
|
||||
raise ValueError("Page non reconnu")
|
||||
logging.warning(f"Page {page_number+1} non reconnu. Page ignorée.")
|
||||
|
||||
df_charge = charge.table2df(recapitulatif_tables + charge_tables)
|
||||
df_loc = locataire.table2df(loc_tables)
|
||||
|
29
pdf_oralia/join.py
Normal file
29
pdf_oralia/join.py
Normal file
@@ -0,0 +1,29 @@
|
||||
import glob
|
||||
import logging
|
||||
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def join_excel(src, dest, file_pattern):
|
||||
"""Join every excel file in arc respecting file_pattern into on unique file in dist"""
|
||||
filenames = list_files(src, file_pattern)
|
||||
logging.debug(f"Concatenate {filenames}")
|
||||
dfs = extract_dfs(filenames)
|
||||
joined_df = pd.concat(dfs)
|
||||
logging.debug(f"Writing joined excel to {dest}")
|
||||
joined_df.to_excel(dest, index=False)
|
||||
logging.debug(f"with {len(joined_df)} rows")
|
||||
|
||||
|
||||
def list_files(src, file_glob):
|
||||
return list(glob.iglob(f"{src}/{file_glob}"))
|
||||
|
||||
|
||||
def extract_dfs(filenames):
|
||||
dfs = []
|
||||
for filename in filenames:
|
||||
logging.debug(f"Extracting {filename}")
|
||||
df = pd.read_excel(filename)
|
||||
logging.debug(f"Found {len(df)} rows")
|
||||
dfs.append(df)
|
||||
return dfs
|
@@ -17,6 +17,7 @@ DF_TYPES = {
|
||||
"annee": str,
|
||||
"lot": str,
|
||||
}
|
||||
DEFAULT_FOURNISSEUR = "ROSIER MODICA MOTTEROZ SA"
|
||||
|
||||
|
||||
def is_it(page_text):
|
||||
@@ -31,7 +32,10 @@ def is_it(page_text):
|
||||
def get_lot(txt):
|
||||
"""Return lot number from "RECAPITULATIF DES OPERATIONS" """
|
||||
regex = r"[BSM](\d+)(?=\s*-)"
|
||||
result = re.findall(regex, txt)
|
||||
try:
|
||||
result = re.findall(regex, txt)
|
||||
except TypeError:
|
||||
return "*"
|
||||
if result:
|
||||
return "{:02d}".format(int(result[0]))
|
||||
return "*"
|
||||
@@ -62,8 +66,8 @@ def extract(table, additionnal_fields: dict = {}):
|
||||
for k, v in additionnal_fields.items():
|
||||
r[k] = v
|
||||
|
||||
if "honoraire" in row[RECAPITULATIF_DES_OPERATIONS]:
|
||||
r["Fournisseur"] = "IMI GERANCE"
|
||||
if "honoraire" in row[RECAPITULATIF_DES_OPERATIONS].lower():
|
||||
r["Fournisseur"] = DEFAULT_FOURNISSEUR
|
||||
|
||||
extracted.append(r)
|
||||
|
||||
@@ -83,6 +87,5 @@ def table2df(tables):
|
||||
df = pd.concat(dfs)
|
||||
|
||||
df["immeuble"] = df["immeuble"].apply(lambda x: x[0].capitalize())
|
||||
print(df.columns)
|
||||
df["lot"] = df["RECAPITULATIF DES OPERATIONS"].apply(get_lot)
|
||||
return df.astype(DF_TYPES, errors="ignore")
|
||||
return df.astype(DF_TYPES)
|
||||
|
@@ -1,3 +1,4 @@
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
DF_TYPES = {
|
||||
@@ -33,7 +34,7 @@ def is_drop(row):
|
||||
|
||||
|
||||
def extract(table, additionnal_fields: dict = {}):
|
||||
"""Turn table to dictionary with additionnal fields"""
|
||||
"""Turn table to dictionary with additional fields"""
|
||||
extracted = []
|
||||
header = table[0]
|
||||
for row in table[1:]:
|
||||
@@ -138,8 +139,6 @@ def join_row(table):
|
||||
}
|
||||
)
|
||||
joined.append(row)
|
||||
else:
|
||||
pass
|
||||
|
||||
return joined
|
||||
|
||||
@@ -159,4 +158,9 @@ def table2df(tables):
|
||||
df["immeuble"] = df["immeuble"].apply(lambda x: x[0].capitalize())
|
||||
df["Type"] = df["Type"].apply(clean_type)
|
||||
|
||||
return df.astype(DF_TYPES, errors="ignore")
|
||||
numeric_cols = [k for k, v in DF_TYPES.items() if v == float]
|
||||
df[numeric_cols] = df[numeric_cols].replace("", np.nan)
|
||||
|
||||
df = df.drop(df[(df["Locataires"] == "") & (df["Période"] == "")].index)
|
||||
|
||||
return df.astype(DF_TYPES)
|
||||
|
@@ -5,29 +5,33 @@ from pathlib import Path
|
||||
import click
|
||||
|
||||
from .extract import extract_save
|
||||
|
||||
logging_config = dict(
|
||||
version=1,
|
||||
formatters={"f": {"format": "%(levelname)-8s %(name)-12s %(message)s"}},
|
||||
handlers={
|
||||
"h": {
|
||||
"class": "logging.StreamHandler",
|
||||
"formatter": "f",
|
||||
"level": logging.DEBUG,
|
||||
}
|
||||
},
|
||||
root={
|
||||
"handlers": ["h"],
|
||||
"level": logging.DEBUG,
|
||||
},
|
||||
)
|
||||
|
||||
dictConfig(logging_config)
|
||||
from .join import join_excel
|
||||
|
||||
|
||||
@click.group()
|
||||
def main():
|
||||
pass
|
||||
@click.option("--debug/--no-debug", default=False)
|
||||
def main(debug):
|
||||
if debug:
|
||||
logging_level = logging.DEBUG
|
||||
else:
|
||||
logging_level = logging.INFO
|
||||
logging_config = dict(
|
||||
version=1,
|
||||
formatters={"f": {"format": "%(levelname)-8s %(name)-12s %(message)s"}},
|
||||
handlers={
|
||||
"h": {
|
||||
"class": "logging.StreamHandler",
|
||||
"formatter": "f",
|
||||
"level": logging_level,
|
||||
}
|
||||
},
|
||||
root={
|
||||
"handlers": ["h"],
|
||||
"level": logging_level,
|
||||
},
|
||||
)
|
||||
|
||||
dictConfig(logging_config)
|
||||
|
||||
|
||||
@main.group()
|
||||
@@ -64,5 +68,31 @@ def all(src, dest):
|
||||
@main.command()
|
||||
@click.option("--src", help="Tous les fichiers dans src", default="./")
|
||||
@click.option("--dest", help="Où mettre les fichiers produits", default="")
|
||||
def join(src, dest):
|
||||
join_excel(src, dest, df_names=["charge", "locataire"])
|
||||
@click.option(
|
||||
"--force",
|
||||
help="Ecraser si le ficher destination existe.",
|
||||
default=False,
|
||||
is_flag=True,
|
||||
)
|
||||
def join(src, dest, force):
|
||||
"""Join tous les fichiers excel charge (resp locataire) de src dans un seul fichier charge.xlsx dans dist.
|
||||
|
||||
Exemple:
|
||||
|
||||
pdf-oralia join --src <dossier_source> --dest <dossier_destination>
|
||||
|
||||
|
||||
"""
|
||||
dest_charge = f"{dest}/charge.xlsx"
|
||||
if not force and Path(dest_charge).exists():
|
||||
raise ValueError(f"The file {dest_charge} already exists")
|
||||
dest_locataire = f"{dest}/locataire.xlsx"
|
||||
if not force and Path(dest_locataire).exists():
|
||||
raise ValueError(f"The file {dest_locataire} already exists")
|
||||
|
||||
if not Path(src).exists():
|
||||
raise ValueError(f"The source directory ({src}) does not exists.")
|
||||
join_excel(src, dest_charge, "*_charge.xlsx")
|
||||
logging.info(f"Les données charges ont été concaténées dans {dest_charge}")
|
||||
join_excel(src, dest_locataire, "*_locataire.xlsx")
|
||||
logging.info(f"Les données locataires ont été concaténées dans {dest_locataire}")
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "pdf-oralia"
|
||||
version = "dev"
|
||||
version = "0"
|
||||
description = ""
|
||||
authors = ["Bertrand Benjamin <benjamin.bertrand@opytex.org>"]
|
||||
readme = "README.md"
|
||||
|
Reference in New Issue
Block a user