Compare commits
33 Commits
v0.2.6
...
48e75358ac
Author | SHA1 | Date | |
---|---|---|---|
48e75358ac | |||
132e37267b | |||
f2bcf6241a | |||
ec9cc19be5 | |||
0040dccd9a | |||
b0333cddd8 | |||
406b89fea1 | |||
812d392720 | |||
6b77980e6c | |||
90c2d3689b | |||
f9be31c090 | |||
2761c3ed7b | |||
5692898137 | |||
44d4150910 | |||
223f25130d | |||
1a86b7bc26 | |||
c56241fe4c | |||
ceebfb0a38 | |||
18c8282f63 | |||
020fd41eab | |||
8a55e6e2cc | |||
1afb2a32ab | |||
e1332e5e4e | |||
a8550712fe | |||
f82fbc2b8e | |||
aeb804e868 | |||
c0495759fd | |||
90e16988ce | |||
693849db7a | |||
0127628217 | |||
1d094818b9 | |||
d5b88490cc | |||
b79f535112 |
29
.drone.yml
29
.drone.yml
@@ -11,25 +11,32 @@ steps:
|
||||
- name: build-and-publish
|
||||
image: python:3.11
|
||||
commands:
|
||||
- echo ${DRONE_TAG}
|
||||
- sed -i 's/version = "[^"]*"/version = "${DRONE_TAG}"/g' pyproject.toml
|
||||
- curl -sSL https://install.python-poetry.org | python3 -
|
||||
- export PATH="/root/.local/bin:$PATH"
|
||||
- poetry --version
|
||||
- poetry config pypi-token.pypi $PYPI_PASSWORD
|
||||
- poetry build
|
||||
- poetry publish --username $PYPI_USERNAME --password $PYPI_PASSWORD
|
||||
- poetry publish --username __token__ --password $PYPI_TOKEN
|
||||
environment:
|
||||
PYPI_USERNAME:
|
||||
from_secret: pypi_username
|
||||
PYPI_PASSWORD:
|
||||
from_secret: pypi_password
|
||||
PYPI_TOKEN:
|
||||
from_secret: pypi_token
|
||||
|
||||
when:
|
||||
event:
|
||||
include:
|
||||
- tag
|
||||
- name: Notify on matrix
|
||||
image: plugins/matrix
|
||||
environment:
|
||||
MATRIX_ROOMID:
|
||||
from_secret: MATRIX_ROOMID
|
||||
MATRIX_ACCESSTOKEN:
|
||||
from_secret: MATRIX_ACCESSTOKEN
|
||||
MATRIX_USERID:
|
||||
from_secret: MATRIX_USERID
|
||||
settings:
|
||||
homeserver: https://matrix.poneyworld.net
|
||||
template: "Une nouvelle version (${DRONE_TAG}) de pdf-oralia est publiée!"
|
||||
|
||||
# Déclencheur de la pipeline
|
||||
trigger:
|
||||
event:
|
||||
exclude:
|
||||
include:
|
||||
- tag
|
||||
|
@@ -0,0 +1,3 @@
|
||||
# PDF AURALIA
|
||||
|
||||
Extraction de fichiers de comptabilité en pdf vers xlsx.
|
||||
|
@@ -2,13 +2,11 @@ import logging
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
import pdfplumber
|
||||
|
||||
from .extract_charge import extract_charge, extract_remise_com
|
||||
from .extract_locataire import extract_situation_loc
|
||||
from pdf_oralia.pages import charge, locataire, patrimoine, recapitulatif
|
||||
|
||||
charge_table_settings = {
|
||||
extract_table_settings = {
|
||||
"vertical_strategy": "lines",
|
||||
"horizontal_strategy": "text",
|
||||
}
|
||||
@@ -27,45 +25,63 @@ def extract_date(page_text):
|
||||
return datetime.strptime(words[-1], "%d/%m/%Y")
|
||||
|
||||
|
||||
def extract_from_pdf(pdf, charge_dest, location_dest):
|
||||
"""Build charge_dest and location_dest xlsx file from pdf"""
|
||||
def extract_building(page_text, buildings=["bloch", "marietton", "servient"]):
|
||||
for building in buildings:
|
||||
if building in page_text.lower():
|
||||
return building
|
||||
raise ValueError("Pas d'immeuble trouvé")
|
||||
|
||||
|
||||
def catch_malformed_table(tables):
|
||||
if len(tables) == 2:
|
||||
return tables[0] + tables[1]
|
||||
return tables[0]
|
||||
|
||||
|
||||
def from_pdf(pdf):
|
||||
"""Build dataframes one about charges and another on loc"""
|
||||
recapitulatif_tables = []
|
||||
loc_tables = []
|
||||
charge_table = []
|
||||
charge_tables = []
|
||||
patrimoie_tables = []
|
||||
|
||||
df_1st_charge = extract_remise_com(
|
||||
pdf.pages[0].extract_table(charge_table_settings)
|
||||
)
|
||||
|
||||
for page in pdf.pages[1:]:
|
||||
for page_number, page in enumerate(pdf.pages):
|
||||
page_text = page.extract_text()
|
||||
situation_loc_line = [
|
||||
l for l in page_text.split("\n") if "SITUATION DES LOCATAIRES" in l
|
||||
]
|
||||
date = extract_date(page_text)
|
||||
mois = date.strftime("%m")
|
||||
annee = date.strftime("%Y")
|
||||
if situation_loc_line:
|
||||
# mois, annee = situation_loc_line[0].split(" ")[-2:]
|
||||
if loc_tables:
|
||||
loc_tables.append(page.extract_table()[1:])
|
||||
else:
|
||||
loc_tables.append(page.extract_table())
|
||||
additionnal_fields = {
|
||||
"immeuble": extract_building(page_text),
|
||||
"mois": date.strftime("%m"),
|
||||
"annee": date.strftime("%Y"),
|
||||
}
|
||||
|
||||
elif "RECAPITULATIF DES OPERATIONS" in page_text:
|
||||
if charge_table:
|
||||
charge_table += page.extract_table(charge_table_settings)[1:]
|
||||
else:
|
||||
charge_table = page.extract_table(charge_table_settings)
|
||||
if recapitulatif.is_it(page_text):
|
||||
table = page.extract_tables()[0]
|
||||
extracted = recapitulatif.extract(table, additionnal_fields)
|
||||
if extracted:
|
||||
recapitulatif_tables.append(extracted)
|
||||
|
||||
df_charge = extract_charge(charge_table)
|
||||
df_charge_with_1st = pd.concat([df_1st_charge, df_charge])
|
||||
df_charge_with_1st.to_excel(charge_dest, sheet_name="Charges", index=False)
|
||||
logging.info(f"{charge_dest} saved")
|
||||
elif locataire.is_it(page_text):
|
||||
tables = page.extract_tables(extract_table_settings)[1:]
|
||||
table = catch_malformed_table(tables)
|
||||
extracted = locataire.extract(table, additionnal_fields)
|
||||
loc_tables.append(extracted)
|
||||
|
||||
df_loc = extract_situation_loc(loc_tables, mois=mois, annee=annee)
|
||||
df_loc = df_loc.assign()
|
||||
df_loc.to_excel(location_dest, sheet_name="Location", index=False)
|
||||
logging.info(f"{location_dest} saved")
|
||||
elif charge.is_it(page_text):
|
||||
tables = page.extract_tables(extract_table_settings)[1:]
|
||||
table = catch_malformed_table(tables)
|
||||
extracted = charge.extract(table, additionnal_fields)
|
||||
charge_tables.append(extracted)
|
||||
|
||||
elif patrimoine.is_it(page_text):
|
||||
pass
|
||||
|
||||
else:
|
||||
logging.warning(f"Page {page_number+1} non reconnu. Page ignorée.")
|
||||
|
||||
df_charge = charge.table2df(recapitulatif_tables + charge_tables)
|
||||
df_loc = locataire.table2df(loc_tables)
|
||||
|
||||
return df_charge, df_loc
|
||||
|
||||
|
||||
def extract_save(pdf_file, dest):
|
||||
@@ -75,4 +91,9 @@ def extract_save(pdf_file, dest):
|
||||
xls_locataire = Path(dest) / f"{pdf_file.stem.replace(' ', '_')}_locataire.xlsx"
|
||||
|
||||
pdf = pdfplumber.open(pdf_file)
|
||||
extract_from_pdf(pdf, xls_charge, xls_locataire)
|
||||
df_charge, df_loc = from_pdf(pdf)
|
||||
|
||||
df_charge.to_excel(xls_charge, sheet_name="Charges", index=False)
|
||||
logging.info(f"{xls_charge} saved")
|
||||
df_loc.to_excel(xls_locataire, sheet_name="Location", index=False)
|
||||
logging.info(f"{xls_locataire} saved")
|
||||
|
@@ -1,68 +0,0 @@
|
||||
import logging
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def get_lot(x):
|
||||
"""Return lot number from "RECAPITULATIF DES OPERATIONS" """
|
||||
if x[:2].isdigit():
|
||||
return x[:2]
|
||||
if x[:1].isdigit():
|
||||
return "0" + x[:1]
|
||||
if x[:2] == "PC":
|
||||
return "PC"
|
||||
return ""
|
||||
|
||||
|
||||
def extract_charge(table):
|
||||
"""From pdfplumber table extract the charge dataframe"""
|
||||
df = (
|
||||
pd.DataFrame(table[1:], columns=table[0])
|
||||
.replace("", np.nan)
|
||||
.dropna(subset=["Débits", "Crédits"], how="all")
|
||||
)
|
||||
|
||||
drop_index = df[
|
||||
df["RECAPITULATIF DES OPERATIONS"].str.contains("TOTAUX", case=False)
|
||||
| df["RECAPITULATIF DES OPERATIONS"].str.contains("Solde créditeur", case=False)
|
||||
| df["RECAPITULATIF DES OPERATIONS"].str.contains("Solde débiteur", case=False)
|
||||
| df["RECAPITULATIF DES OPERATIONS"].str.contains(
|
||||
"Total des reglements locataires", case=False
|
||||
)
|
||||
].index
|
||||
df.drop(drop_index, inplace=True)
|
||||
|
||||
df[""].mask(
|
||||
df["RECAPITULATIF DES OPERATIONS"].str.contains("honoraires", case=False),
|
||||
"IMI GERANCE",
|
||||
inplace=True,
|
||||
)
|
||||
|
||||
df = df.assign(lot=df["RECAPITULATIF DES OPERATIONS"].map(get_lot))
|
||||
|
||||
df = df.astype(
|
||||
{
|
||||
"Débits": "float64",
|
||||
"Crédits": "float64",
|
||||
"Dont T.V.A.": "float64",
|
||||
"Locatif": "float64",
|
||||
"Déductible": "float64",
|
||||
}
|
||||
)
|
||||
|
||||
df.columns.values[0] = "Fournisseur"
|
||||
return df
|
||||
|
||||
|
||||
def extract_remise_com(table):
|
||||
"""Extract "remise commercial" from first page"""
|
||||
df = pd.DataFrame(table[1:], columns=table[0]).replace("", np.nan)
|
||||
df = df[
|
||||
df["RECAPITULATIF DES OPERATIONS"].str.contains(
|
||||
"Remise commerciale gérance", case=False, na=False
|
||||
)
|
||||
]
|
||||
|
||||
df.columns.values[0] = "Fournisseur"
|
||||
return df
|
@@ -1,81 +0,0 @@
|
||||
import logging
|
||||
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def parse_above_loc(content):
|
||||
row = {}
|
||||
app, loc, *_ = content.split("\n")
|
||||
app_ = app.split(" ")
|
||||
row["lot"] = f"{int(app_[1]):02d}"
|
||||
row["type"] = " ".join(app_[2:])
|
||||
row["locataire"] = loc
|
||||
return pd.Series(row)
|
||||
|
||||
|
||||
def join_row(last, next):
|
||||
row = []
|
||||
for i in range(len(last)):
|
||||
if last[i] and next[i]:
|
||||
row.append(f"{last[i]}\n{next[i]}")
|
||||
elif last[i]:
|
||||
row.append(last[i])
|
||||
elif next[i]:
|
||||
row.append(next[i])
|
||||
else:
|
||||
row.append("")
|
||||
return row
|
||||
|
||||
|
||||
def join_tables(tables):
|
||||
|
||||
joined = tables[0]
|
||||
|
||||
for t in tables[1:]:
|
||||
last_row = joined[-1]
|
||||
if "Totaux" not in last_row[0]:
|
||||
first_row = t[0]
|
||||
joined_row = join_row(last_row, first_row)
|
||||
joined = joined[:-1] + [joined_row] + t[1:]
|
||||
else:
|
||||
joined += t
|
||||
|
||||
return joined
|
||||
|
||||
|
||||
def extract_situation_loc(tables, mois, annee):
|
||||
"""From pdfplumber table extract locataire df"""
|
||||
table = join_tables(tables)
|
||||
try:
|
||||
df = pd.DataFrame(table[1:], columns=table[0])
|
||||
except IndexError:
|
||||
print(table)
|
||||
rows = []
|
||||
for i, row in df[df["Locataires"] == "Totaux"].iterrows():
|
||||
above_row_loc = df.iloc[i - 1]["Locataires"]
|
||||
up_row = pd.concat(
|
||||
[
|
||||
row,
|
||||
parse_above_loc(above_row_loc),
|
||||
]
|
||||
)
|
||||
|
||||
rows.append(up_row)
|
||||
df_cleaned = pd.concat(rows, axis=1).T
|
||||
df_cleaned.drop(["Locataires", "", "Période"], axis=1, inplace=True)
|
||||
|
||||
df_cleaned = df_cleaned.astype(
|
||||
{
|
||||
"Loyers": "float64",
|
||||
"Taxes": "float64",
|
||||
"Provisions": "float64",
|
||||
"Divers": "float64",
|
||||
"Total": "float64",
|
||||
"Réglés": "float64",
|
||||
"Impayés": "float64",
|
||||
},
|
||||
errors="ignore",
|
||||
)
|
||||
|
||||
df_cleaned = df_cleaned.assign(mois=mois, annee=annee)
|
||||
return df_cleaned
|
@@ -1,30 +1,22 @@
|
||||
import logging
|
||||
from pathlib import Path
|
||||
import glob
|
||||
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def extract_excel_to_dfs(directory, df_names=["charge", "locataire"]):
|
||||
p = Path(directory)
|
||||
dfs = {name: [] for name in df_names}
|
||||
def join_excel(src, dest, file_pattern):
|
||||
"""Join every excel file in arc respecting file_pattern into on unique file in dist"""
|
||||
filenames = list_files(src, file_pattern)
|
||||
dfs = extract_dfs(filenames)
|
||||
joined_df = pd.concat(dfs)
|
||||
joined_df.to_excel(dest, index=False)
|
||||
|
||||
for file in p.glob("*.xlsx"):
|
||||
year, month, immeuble, table = file.stem.split("_")
|
||||
df = pd.read_excel(file, dtype={"lot": str}).assign(
|
||||
annee=year, mois=month, immeuble=immeuble[:3]
|
||||
)
|
||||
dfs[table].append(df)
|
||||
|
||||
def list_files(src, file_glob):
|
||||
return list(glob.iglob(f"{src}/{file_glob}"))
|
||||
|
||||
|
||||
def extract_dfs(filenames):
|
||||
dfs = []
|
||||
for filename in filenames:
|
||||
dfs.append(pd.read_excel(filename))
|
||||
return dfs
|
||||
|
||||
|
||||
def join_excel(directory, dest, df_names=["charge", "locataire"]):
|
||||
dfs = extract_excel_to_dfs(directory, df_names)
|
||||
destinations = {}
|
||||
for tablename, datas in dfs.items():
|
||||
df = pd.concat(datas)
|
||||
destination = Path(dest) / f"{tablename}.xlsx"
|
||||
df.to_excel(destination, index=False)
|
||||
destinations[tablename] = destination
|
||||
logging.info(f"{destination} written")
|
||||
return destinations
|
||||
|
1
pdf_oralia/pages/__init__.py
Normal file
1
pdf_oralia/pages/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
from . import charge, locataire, patrimoine, recapitulatif
|
91
pdf_oralia/pages/charge.py
Normal file
91
pdf_oralia/pages/charge.py
Normal file
@@ -0,0 +1,91 @@
|
||||
import re
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
RECAPITULATIF_DES_OPERATIONS = 1
|
||||
DF_TYPES = {
|
||||
"Fournisseur": str,
|
||||
"RECAPITULATIF DES OPERATIONS": str,
|
||||
"Débits": float,
|
||||
"Crédits": float,
|
||||
"Dont T.V.A.": float,
|
||||
"Locatif": float,
|
||||
"Déductible": float,
|
||||
"immeuble": str,
|
||||
"mois": str,
|
||||
"annee": str,
|
||||
"lot": str,
|
||||
}
|
||||
DEFAULT_FOURNISSEUR = "ROSIER MODICA MOTTEROZ SA"
|
||||
|
||||
|
||||
def is_it(page_text):
|
||||
if (
|
||||
"RECAPITULATIF DES OPERATIONS" in page_text
|
||||
and "COMPTE RENDU DE GESTION" not in page_text
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def get_lot(txt):
|
||||
"""Return lot number from "RECAPITULATIF DES OPERATIONS" """
|
||||
regex = r"[BSM](\d+)(?=\s*-)"
|
||||
try:
|
||||
result = re.findall(regex, txt)
|
||||
except TypeError:
|
||||
return "*"
|
||||
if result:
|
||||
return "{:02d}".format(int(result[0]))
|
||||
return "*"
|
||||
|
||||
|
||||
def keep_row(row):
|
||||
return not any(
|
||||
[
|
||||
word.lower() in row[RECAPITULATIF_DES_OPERATIONS].lower()
|
||||
for word in ["TOTAL", "TOTAUX", "Solde créditeur", "Solde débiteur"]
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def extract(table, additionnal_fields: dict = {}):
|
||||
"""Turn table to dictionary with additional fields"""
|
||||
extracted = []
|
||||
header = table[0]
|
||||
for row in table[1:]:
|
||||
if keep_row(row):
|
||||
r = dict()
|
||||
for i, value in enumerate(row):
|
||||
if header[i] == "":
|
||||
r["Fournisseur"] = value
|
||||
else:
|
||||
r[header[i]] = value
|
||||
|
||||
for k, v in additionnal_fields.items():
|
||||
r[k] = v
|
||||
|
||||
if "honoraire" in row[RECAPITULATIF_DES_OPERATIONS].lower():
|
||||
r["Fournisseur"] = DEFAULT_FOURNISSEUR
|
||||
|
||||
extracted.append(r)
|
||||
|
||||
return extracted
|
||||
|
||||
|
||||
def table2df(tables):
|
||||
dfs = []
|
||||
for table in tables:
|
||||
df = (
|
||||
pd.DataFrame.from_records(table)
|
||||
.replace("", np.nan)
|
||||
.dropna(subset=["Débits", "Crédits"], how="all")
|
||||
)
|
||||
df["Fournisseur"] = df["Fournisseur"].fillna(method="ffill")
|
||||
dfs.append(df)
|
||||
df = pd.concat(dfs)
|
||||
|
||||
df["immeuble"] = df["immeuble"].apply(lambda x: x[0].capitalize())
|
||||
df["lot"] = df["RECAPITULATIF DES OPERATIONS"].apply(get_lot)
|
||||
return df.astype(DF_TYPES)
|
166
pdf_oralia/pages/locataire.py
Normal file
166
pdf_oralia/pages/locataire.py
Normal file
@@ -0,0 +1,166 @@
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
DF_TYPES = {
|
||||
"Locataires": str,
|
||||
"Période": str,
|
||||
"Loyers": float,
|
||||
"Taxes": float,
|
||||
"Provisions": float,
|
||||
"Divers": str,
|
||||
"Total": float,
|
||||
"Réglés": float,
|
||||
"Impayés": float,
|
||||
"immeuble": str,
|
||||
"mois": str,
|
||||
"annee": str,
|
||||
"Lot": str,
|
||||
"Type": str,
|
||||
}
|
||||
|
||||
|
||||
def is_it(page_text):
|
||||
if "SITUATION DES LOCATAIRES" in page_text:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def is_drop(row):
|
||||
if "totaux" in row[0].lower():
|
||||
return True
|
||||
if not any(row):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def extract(table, additionnal_fields: dict = {}):
|
||||
"""Turn table to dictionary with additional fields"""
|
||||
extracted = []
|
||||
header = table[0]
|
||||
for row in table[1:]:
|
||||
if not is_drop(row):
|
||||
r = dict()
|
||||
for i, value in enumerate(row):
|
||||
if header[i] != "":
|
||||
r[header[i]] = value
|
||||
for k, v in additionnal_fields.items():
|
||||
r[k] = v
|
||||
extracted.append(r)
|
||||
return extracted
|
||||
|
||||
|
||||
def join_row(last, next):
|
||||
row = {}
|
||||
for key in last:
|
||||
if last[key] == next[key]:
|
||||
row[key] = last[key]
|
||||
elif last[key] and next[key]:
|
||||
row[key] = f"{last[key]}\n{next[key]}"
|
||||
elif last[key]:
|
||||
row[key] = last[key]
|
||||
elif next[key]:
|
||||
row[key] = next[key]
|
||||
else:
|
||||
row[key] = ""
|
||||
return row
|
||||
|
||||
|
||||
def join_tables(tables):
|
||||
joined = tables[0]
|
||||
|
||||
for t in tables[1:]:
|
||||
last_row = joined[-1]
|
||||
if "totaux" not in last_row["Locataires"].lower():
|
||||
first_row = t[0]
|
||||
joined_row = join_row(last_row, first_row)
|
||||
joined = joined[:-1] + [joined_row] + t[1:]
|
||||
else:
|
||||
joined += t
|
||||
|
||||
return joined
|
||||
|
||||
|
||||
def parse_lot(string):
|
||||
words = string.split(" ")
|
||||
return {"Lot": "{:02d}".format(int(words[1])), "Type": " ".join(words[2:])}
|
||||
|
||||
|
||||
def clean_type(string):
|
||||
if "appartement" in string.lower():
|
||||
return string[-2:]
|
||||
return string
|
||||
|
||||
|
||||
def join_row(table):
|
||||
joined = []
|
||||
for row in table:
|
||||
if row["Locataires"].startswith("Lot"):
|
||||
row.update(parse_lot(row["Locataires"]))
|
||||
row["Locataires"] = ""
|
||||
joined.append(row)
|
||||
elif row["Locataires"] == "Rappel de Loyer":
|
||||
last_row = joined[-1]
|
||||
row.update(
|
||||
{
|
||||
"Lot": last_row["Lot"],
|
||||
"Type": last_row["Type"],
|
||||
"Locataires": last_row["Locataires"],
|
||||
"Divers": "Rappel de Loyer",
|
||||
}
|
||||
)
|
||||
joined.append(row)
|
||||
|
||||
elif row["Locataires"]:
|
||||
last_row = joined.pop()
|
||||
row_name = row["Locataires"].replace("\n", " ")
|
||||
row.update({k: v for k, v in last_row.items() if v})
|
||||
row["Locataires"] = last_row["Locataires"] + " " + row_name
|
||||
joined.append(row)
|
||||
|
||||
else:
|
||||
if row["Période"].startswith("Solde"):
|
||||
last_row = joined.pop()
|
||||
row.update(
|
||||
{
|
||||
"Lot": last_row["Lot"],
|
||||
"Type": last_row["Type"],
|
||||
"Locataires": last_row["Locataires"],
|
||||
}
|
||||
)
|
||||
joined.append(row)
|
||||
|
||||
elif row["Période"].startswith("Du"):
|
||||
last_row = joined[-1]
|
||||
row.update(
|
||||
{
|
||||
"Lot": last_row["Lot"],
|
||||
"Type": last_row["Type"],
|
||||
"Locataires": last_row["Locataires"],
|
||||
}
|
||||
)
|
||||
joined.append(row)
|
||||
|
||||
return joined
|
||||
|
||||
|
||||
def flat_tables(tables):
|
||||
tables_flat = []
|
||||
for table in tables:
|
||||
tables_flat.extend(table)
|
||||
return tables_flat
|
||||
|
||||
|
||||
def table2df(tables):
|
||||
tables = flat_tables(tables)
|
||||
joined = join_row(tables)
|
||||
df = pd.DataFrame.from_records(joined)
|
||||
|
||||
df["immeuble"] = df["immeuble"].apply(lambda x: x[0].capitalize())
|
||||
df["Type"] = df["Type"].apply(clean_type)
|
||||
|
||||
numeric_cols = [k for k, v in DF_TYPES.items() if v == float]
|
||||
df[numeric_cols] = df[numeric_cols].replace("", np.nan)
|
||||
|
||||
df = df.drop(df[(df["Locataires"] == "") & (df["Période"] == "")].index)
|
||||
|
||||
return df.astype(DF_TYPES)
|
4
pdf_oralia/pages/patrimoine.py
Normal file
4
pdf_oralia/pages/patrimoine.py
Normal file
@@ -0,0 +1,4 @@
|
||||
def is_it(page_text):
|
||||
if "VOTRE PATRIMOINE" in page_text:
|
||||
return True
|
||||
return False
|
34
pdf_oralia/pages/recapitulatif.py
Normal file
34
pdf_oralia/pages/recapitulatif.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def is_it(page_text):
|
||||
if "COMPTE RENDU DE GESTION" in page_text:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def extract(table, additionnal_fields: dict = {}):
|
||||
"""Extract "remise commercial" from first page"""
|
||||
extracted = []
|
||||
header = table[0]
|
||||
for row in table[1:]:
|
||||
if "Remise commerciale gérance" in row:
|
||||
r = dict()
|
||||
for i, value in enumerate(row):
|
||||
r[header[i]] = value
|
||||
for k, v in additionnal_fields.items():
|
||||
r[k] = v
|
||||
extracted.append(r)
|
||||
|
||||
return extracted
|
||||
|
||||
# df = pd.DataFrame(table[1:], columns=table[0]).replace("", np.nan)
|
||||
# df = df[
|
||||
# df["RECAPITULATIF DES OPERATIONS"].str.contains(
|
||||
# "Remise commerciale gérance", case=False, na=False
|
||||
# )
|
||||
# ]
|
||||
#
|
||||
# df.columns.values[0] = "Fournisseur"
|
||||
# return df
|
@@ -3,7 +3,6 @@ from logging.config import dictConfig
|
||||
from pathlib import Path
|
||||
|
||||
import click
|
||||
import pandas as pd
|
||||
|
||||
from .extract import extract_save
|
||||
from .join import join_excel
|
||||
@@ -66,5 +65,29 @@ def all(src, dest):
|
||||
@main.command()
|
||||
@click.option("--src", help="Tous les fichiers dans src", default="./")
|
||||
@click.option("--dest", help="Où mettre les fichiers produits", default="")
|
||||
def join(src, dest):
|
||||
join_excel(src, dest, df_names=["charge", "locataire"])
|
||||
@click.option(
|
||||
"--force",
|
||||
help="Ecraser si le ficher destination existe.",
|
||||
default=False,
|
||||
is_flag=True,
|
||||
)
|
||||
def join(src, dest, force):
|
||||
"""Join tous les fichiers excel charge (resp locataire) de src dans un seul fichier charge.xlsx dans dist.
|
||||
|
||||
Exemple:
|
||||
|
||||
pdf-oralia join --src <dossier_source> --dest <dossier_destination>
|
||||
|
||||
|
||||
"""
|
||||
dest_charge = f"{dest}/charge.xlsx"
|
||||
if not force and Path(dest_charge).exists():
|
||||
raise ValueError(f"The file {dest_charge} already exists")
|
||||
dest_locataire = f"{dest}/locataire.xlsx"
|
||||
if not force and Path(dest_locataire).exists():
|
||||
raise ValueError(f"The file {dest_locataire} already exists")
|
||||
|
||||
join_excel(src, dest_charge, "*_charge.xlsx")
|
||||
logging.info(f"Les données charges ont été concaténées dans {dest_charge}")
|
||||
join_excel(src, dest_locataire, "*_locataire.xlsx")
|
||||
logging.info(f"Les données locataires ont été concaténées dans {dest_locataire}")
|
||||
|
2387
poetry.lock
generated
2387
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "pdf-oralia"
|
||||
version = "0.2.6"
|
||||
version = "0"
|
||||
description = ""
|
||||
authors = ["Bertrand Benjamin <benjamin.bertrand@opytex.org>"]
|
||||
readme = "README.md"
|
||||
@@ -20,6 +20,7 @@ openpyxl = "^3.0.10"
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
pre-commit = "^2.20.0"
|
||||
jupyter = "^1.0.0"
|
||||
tabulate = "^0.9.0"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
|
Reference in New Issue
Block a user