added codegen-scripts and updated libs
This commit is contained in:
parent
1cf0560957
commit
7a2e95c126
1
Software/.gitignore
vendored
1
Software/.gitignore
vendored
@ -5,3 +5,4 @@ data/
|
|||||||
.vscode/launch.json
|
.vscode/launch.json
|
||||||
.vscode/ipch
|
.vscode/ipch
|
||||||
wifi_credentials.ini
|
wifi_credentials.ini
|
||||||
|
__pycache__
|
145
Software/codegen/dtcs.py
Normal file
145
Software/codegen/dtcs.py
Normal file
@ -0,0 +1,145 @@
|
|||||||
|
import os
|
||||||
|
import time
|
||||||
|
from jinja2 import Environment, FileSystemLoader
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import filechecksum as fcs
|
||||||
|
|
||||||
|
def build_dtcs():
|
||||||
|
# Pfad zur Eingabedatei und Ausgabedatei
|
||||||
|
input_file = "src/dtc_defs.txt"
|
||||||
|
output_file = "include/dtc_defs.h"
|
||||||
|
json_output_file = "data_src/static/dtc_table.json"
|
||||||
|
|
||||||
|
# Überprüfen, ob das Verzeichnis existiert, andernfalls erstellen
|
||||||
|
json_output_dir = os.path.dirname(json_output_file)
|
||||||
|
if not os.path.exists(json_output_dir):
|
||||||
|
os.makedirs(json_output_dir)
|
||||||
|
|
||||||
|
# Mehrdimensionales Array zum Speichern der Zeilen aus der Eingabedatei
|
||||||
|
dtc_lines = []
|
||||||
|
|
||||||
|
# Lesen und analysieren der Eingabedatei
|
||||||
|
with open(input_file, "r", encoding="utf-8") as f:
|
||||||
|
for line in f:
|
||||||
|
line = line.strip()
|
||||||
|
if not line or line.startswith("#"):
|
||||||
|
continue
|
||||||
|
|
||||||
|
parts = line.split(";")
|
||||||
|
if len(parts) == 5:
|
||||||
|
num, dtc_name, dtc_severity, title, description = [part.strip() for part in parts]
|
||||||
|
dtc_lines.append([int(num), dtc_name, dtc_severity, title, description])
|
||||||
|
|
||||||
|
# Überprüfen auf Duplikate in den DTC-Nummern und DTC-Namen
|
||||||
|
num_set = set()
|
||||||
|
dtc_name_set = set()
|
||||||
|
duplicates = []
|
||||||
|
|
||||||
|
for line in dtc_lines:
|
||||||
|
num, dtc_name, _, _, _ = line
|
||||||
|
if num in num_set:
|
||||||
|
duplicates.append(f"DTC-Nummer {num} ist ein Duplikat.")
|
||||||
|
else:
|
||||||
|
num_set.add(num)
|
||||||
|
|
||||||
|
if dtc_name in dtc_name_set:
|
||||||
|
duplicates.append(f"DTC-Name '{dtc_name}' ist ein Duplikat.")
|
||||||
|
else:
|
||||||
|
dtc_name_set.add(dtc_name)
|
||||||
|
|
||||||
|
if duplicates:
|
||||||
|
for duplicate in duplicates:
|
||||||
|
print(f"Fehler: {duplicate}")
|
||||||
|
raise ValueError("Duplicate DTC Data detected")
|
||||||
|
|
||||||
|
# Suchen nach DTC_NO_DTC und DTC_LAST_DTC
|
||||||
|
dtc_no_dtc_added = False
|
||||||
|
dtc_last_dtc_line = None
|
||||||
|
|
||||||
|
for line in dtc_lines:
|
||||||
|
_, dtc_name, _, _, _ = line
|
||||||
|
if dtc_name == "DTC_NO_DTC":
|
||||||
|
dtc_no_dtc_added = True
|
||||||
|
elif dtc_name == "DTC_LAST_DTC":
|
||||||
|
dtc_last_dtc_line = line
|
||||||
|
|
||||||
|
# Einen DTC für DTC_NO_DTC hinzufügen (wenn nicht vorhanden)
|
||||||
|
if not dtc_no_dtc_added:
|
||||||
|
dtc_lines.insert(0, [0, "DTC_NO_DTC", "DTC_NONE", "No Error", "No Error"])
|
||||||
|
|
||||||
|
# Falls DTC_LAST_DTC existiert, lösche es
|
||||||
|
if dtc_last_dtc_line:
|
||||||
|
dtc_lines.remove(dtc_last_dtc_line)
|
||||||
|
|
||||||
|
# Einen DTC für DTC_LAST_DTC hinzufügen (mit der höchsten Nummer)
|
||||||
|
if dtc_lines:
|
||||||
|
highest_num = max([line[0] for line in dtc_lines])
|
||||||
|
else:
|
||||||
|
highest_num = 0
|
||||||
|
|
||||||
|
dtc_lines.append([highest_num + 1, "DTC_LAST_DTC", "DTC_NONE", "Last Error", "Last Error"])
|
||||||
|
|
||||||
|
# Sortieren der Zeilen nach der Nummer aufsteigend
|
||||||
|
dtc_lines.sort(key=lambda x: x[0])
|
||||||
|
|
||||||
|
checksum = fcs.calculate_checksum(dtc_lines)
|
||||||
|
timestamp = int(time.time())
|
||||||
|
|
||||||
|
if fcs.read_and_compare_checksum(output_file, checksum):
|
||||||
|
print("Keine Änderungen im DTC-Headerfile erforderlich.")
|
||||||
|
else:
|
||||||
|
# DTC_NAME_CONSTANT-Makros initialisieren
|
||||||
|
dtc_macros = []
|
||||||
|
dtc_structs = []
|
||||||
|
|
||||||
|
# Verarbeiten der sortierten Zeilen
|
||||||
|
for i, line in enumerate(dtc_lines):
|
||||||
|
num, dtc_name, dtc_severity, title, description = line
|
||||||
|
dtc_macros.append(f"#define {dtc_name:<30} {num}")
|
||||||
|
comma = "," if i < len(dtc_lines) - 1 else " "
|
||||||
|
dtc_structs.append(f" {{ {dtc_name:<30}, {dtc_severity:<12} }}{comma} // {description}")
|
||||||
|
|
||||||
|
env = Environment(loader=FileSystemLoader('codegen/templates', encoding='utf-8'))
|
||||||
|
# Lade das Jinja2-Template aus der Datei
|
||||||
|
template = env.get_template('dtc_defs.h.j2')
|
||||||
|
|
||||||
|
# Erstelle ein Context-Dictionary mit den erforderlichen Daten
|
||||||
|
context = {
|
||||||
|
'timestamp_unix': timestamp,
|
||||||
|
'timestamp' : time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(timestamp)),
|
||||||
|
'date' : time.strftime('%d.%m.%Y', time.localtime(timestamp)),
|
||||||
|
'dtc_macros': dtc_macros, # Übergebe die dtc_macros-Liste direkt
|
||||||
|
'dtc_structs': dtc_structs, # Übergebe die dtc_structs-Liste direkt
|
||||||
|
'checksum' : checksum
|
||||||
|
}
|
||||||
|
|
||||||
|
# Rendere das Template mit den Werten und erhalte den Header-Text
|
||||||
|
header_text = template.render(context)
|
||||||
|
|
||||||
|
# Schreibe den generierten Header-Text in die Header-Datei
|
||||||
|
with open(output_file, "w", encoding='utf-8') as f:
|
||||||
|
f.write(header_text)
|
||||||
|
|
||||||
|
print(f"Header-Datei wurde erstellt: {output_file}")
|
||||||
|
|
||||||
|
if fcs.read_and_compare_json_checksum(json_output_file, checksum):
|
||||||
|
print("Keine Änderungen im DTC-JSON-file erforderlich.")
|
||||||
|
else:
|
||||||
|
dtc_info = {
|
||||||
|
"codegenerator_checksum": checksum,
|
||||||
|
'timestamp' : time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(timestamp)),
|
||||||
|
"dtc_table_data": []
|
||||||
|
}
|
||||||
|
|
||||||
|
# Verarbeiten der sortierten Zeilen
|
||||||
|
for i, line in enumerate(dtc_lines):
|
||||||
|
num, dtc_name, dtc_severity, title, description = line
|
||||||
|
dtc_info["dtc_table_data"].append({"num": num, "title": title, "description": description})
|
||||||
|
|
||||||
|
# JSON-Datei mit UTF-8-Zeichencodierung erstellen
|
||||||
|
with open(json_output_file, 'w', encoding='utf-8') as json_f:
|
||||||
|
json.dump(dtc_info, json_f, ensure_ascii=False, indent=4, separators=(',', ': '))
|
||||||
|
|
||||||
|
print(f"JSON-Datei wurde erstellt: {json_output_file}")
|
45
Software/codegen/filechecksum.py
Normal file
45
Software/codegen/filechecksum.py
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
|
||||||
|
import hashlib
|
||||||
|
import json
|
||||||
|
|
||||||
|
# Funktion zum Berechnen der SHA-256-Checksumme
|
||||||
|
def calculate_checksum(data):
|
||||||
|
sha256 = hashlib.sha256()
|
||||||
|
sha256.update(str(data).encode('utf-8'))
|
||||||
|
return sha256.hexdigest()
|
||||||
|
|
||||||
|
# Funktion zum Lesen und Vergleichen der Checksumme in einer Datei
|
||||||
|
def read_and_compare_checksum(file_path, expected_checksum):
|
||||||
|
try:
|
||||||
|
with open(file_path, 'r') as file:
|
||||||
|
content = file.read()
|
||||||
|
# Suche nach der Zeile mit der Checksumme
|
||||||
|
checksum_line_start = content.find("// CODEGENERATOR_CHECKSUM:")
|
||||||
|
if checksum_line_start != -1:
|
||||||
|
# Extrahiere die Checksumme aus der Zeile
|
||||||
|
existing_checksum = content[checksum_line_start + len("// CODEGENERATOR_CHECKSUM:"):].strip()
|
||||||
|
# Vergleiche die Checksummen
|
||||||
|
if existing_checksum == expected_checksum:
|
||||||
|
return True
|
||||||
|
except FileNotFoundError:
|
||||||
|
pass # Datei existiert nicht, was nicht schlimm ist
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def read_and_compare_json_checksum(json_file_path, expected_checksum):
|
||||||
|
try:
|
||||||
|
with open(json_file_path, 'r') as json_file:
|
||||||
|
# Lade das JSON aus der Datei
|
||||||
|
data = json.load(json_file)
|
||||||
|
|
||||||
|
# Überprüfe, ob "codegenerator_checksum" im JSON vorhanden ist
|
||||||
|
if "codegenerator_checksum" in data:
|
||||||
|
existing_checksum = data["codegenerator_checksum"]
|
||||||
|
|
||||||
|
# Vergleiche die Checksummen
|
||||||
|
if existing_checksum == expected_checksum:
|
||||||
|
return True
|
||||||
|
except FileNotFoundError:
|
||||||
|
pass # Datei existiert nicht, was nicht schlimm ist
|
||||||
|
|
||||||
|
return False
|
@ -6,9 +6,71 @@ import glob
|
|||||||
import shutil
|
import shutil
|
||||||
import gzip
|
import gzip
|
||||||
import os
|
import os
|
||||||
|
import subprocess
|
||||||
|
import platform
|
||||||
Import("env")
|
Import("env")
|
||||||
Import("projenv")
|
Import("projenv")
|
||||||
|
|
||||||
|
# Überprüfe die Betriebssystemplattform
|
||||||
|
if platform.system() == "Windows":
|
||||||
|
# Setze die Pfade zu den Tools für Windows
|
||||||
|
html_minifier_path = os.path.join(os.getenv("APPDATA"), "npm", "html-minifier.cmd")
|
||||||
|
uglifyjs_path = os.path.join(os.getenv("APPDATA"), "npm", "uglifyjs.cmd")
|
||||||
|
terser_path = os.path.join(os.getenv("APPDATA"), "npm", "terser.cmd")
|
||||||
|
cssnano_path = os.path.join(os.getenv("APPDATA"), "npm", "cssnano.cmd")
|
||||||
|
elif platform.system() == "Linux":
|
||||||
|
# Setze die Namen der Tools für Linux
|
||||||
|
html_minifier_path = "html-minifier"
|
||||||
|
uglifyjs_path = "uglifyjs"
|
||||||
|
terser_path = "terser"
|
||||||
|
cssnano_path = "cssnano"
|
||||||
|
else:
|
||||||
|
# Hier könntest du weitere Bedingungen für andere Betriebssysteme hinzufügen
|
||||||
|
raise Exception("Unterstütztes Betriebssystem nicht erkannt")
|
||||||
|
|
||||||
|
|
||||||
|
def minify_html(input_path, output_path):
|
||||||
|
subprocess.run([html_minifier_path, '--collapse-whitespace', '--remove-comments', input_path, '-o', output_path])
|
||||||
|
|
||||||
|
def minify_js(input_path, output_path):
|
||||||
|
subprocess.run([terser_path, input_path, '-o', output_path, '-c', '-m'])
|
||||||
|
|
||||||
|
def minify_css(input_path, output_path):
|
||||||
|
subprocess.run([cssnano_path, '--no-discardUnused', input_path, output_path])
|
||||||
|
|
||||||
|
def process_file(src_path, dest_path):
|
||||||
|
_, file_extension = os.path.splitext(src_path)
|
||||||
|
|
||||||
|
# Extrahiere den Ordnerpfad im Zielverzeichnis
|
||||||
|
dest_dir = os.path.dirname(dest_path)
|
||||||
|
|
||||||
|
# Erstelle den Ordner und alle dazugehörigen Unterordner, falls sie nicht existieren
|
||||||
|
os.makedirs(dest_dir, exist_ok=True)
|
||||||
|
|
||||||
|
if file_extension.lower() == '.js':
|
||||||
|
minify_js(src_path, dest_path)
|
||||||
|
elif file_extension.lower() == '.css':
|
||||||
|
minify_css(src_path, dest_path)
|
||||||
|
elif file_extension.lower() in ['.html', '.htm']:
|
||||||
|
minify_html(src_path, dest_path)
|
||||||
|
else:
|
||||||
|
# Kopiere nicht bearbeitbare Dateien direkt in den Zielordner
|
||||||
|
shutil.copy2(src_path, dest_path)
|
||||||
|
|
||||||
|
def strip_files(src_dir, dest_dir):
|
||||||
|
# Erstelle den Zielordner und alle dazugehörigen Unterordner, falls sie nicht existieren
|
||||||
|
os.makedirs(dest_dir, exist_ok=True)
|
||||||
|
|
||||||
|
# Durchlaufe alle Dateien und Unterverzeichnisse im Quellordner
|
||||||
|
for root, _, files in os.walk(src_dir):
|
||||||
|
for filename in files:
|
||||||
|
src_path = os.path.join(root, filename)
|
||||||
|
dest_path = os.path.relpath(src_path, src_dir)
|
||||||
|
dest_path = os.path.join(dest_dir, dest_path)
|
||||||
|
|
||||||
|
# Verarbeite nur Dateien (keine Unterverzeichnisse)
|
||||||
|
process_file(src_path, dest_path)
|
||||||
|
|
||||||
|
|
||||||
def gzip_file(src_path, dst_path):
|
def gzip_file(src_path, dst_path):
|
||||||
|
|
||||||
@ -47,14 +109,16 @@ def gzip_webfiles(source, target, env):
|
|||||||
filetypes_to_gzip = ['.css', '.png', '.js', '.ico', '.woff2', '.json']
|
filetypes_to_gzip = ['.css', '.png', '.js', '.ico', '.woff2', '.json']
|
||||||
print('\nGZIP: Starting gzip-Process for LittleFS-Image...\n')
|
print('\nGZIP: Starting gzip-Process for LittleFS-Image...\n')
|
||||||
data_src_dir_path = os.path.join(env.get('PROJECT_DIR'), 'data_src')
|
data_src_dir_path = os.path.join(env.get('PROJECT_DIR'), 'data_src')
|
||||||
|
data_temp_dir_path = os.path.join(env.get('PROJECT_DIR'), 'data_stripped')
|
||||||
|
strip_files(data_src_dir_path, data_temp_dir_path)
|
||||||
data_dir_path = env.get('PROJECT_DATA_DIR')
|
data_dir_path = env.get('PROJECT_DATA_DIR')
|
||||||
# check if data and datasrc exist. If the first exists and not the second, it renames it
|
# check if data and datasrc exist. If the first exists and not the second, it renames it
|
||||||
if(os.path.exists(data_dir_path) and not os.path.exists(data_src_dir_path)):
|
if(os.path.exists(data_dir_path) and not os.path.exists(data_temp_dir_path)):
|
||||||
print('GZIP: Directory "'+data_dir_path +
|
print('GZIP: Directory "'+data_dir_path +
|
||||||
'" exists, "'+data_src_dir_path+'" is not found.')
|
'" exists, "'+data_temp_dir_path+'" is not found.')
|
||||||
print('GZIP: Renaming "' + data_dir_path +
|
print('GZIP: Renaming "' + data_dir_path +
|
||||||
'" to "' + data_src_dir_path + '"')
|
'" to "' + data_temp_dir_path + '"')
|
||||||
os.rename(data_dir_path, data_src_dir_path)
|
os.rename(data_dir_path, data_temp_dir_path)
|
||||||
# Delete the 'data' directory
|
# Delete the 'data' directory
|
||||||
if(os.path.exists(data_dir_path)):
|
if(os.path.exists(data_dir_path)):
|
||||||
print('GZIP: Deleting the "data" directory ' + data_dir_path)
|
print('GZIP: Deleting the "data" directory ' + data_dir_path)
|
||||||
@ -67,27 +131,27 @@ def gzip_webfiles(source, target, env):
|
|||||||
files_to_copy = []
|
files_to_copy = []
|
||||||
files_to_gzip = []
|
files_to_gzip = []
|
||||||
|
|
||||||
all_data_src = getListOfFiles(data_src_dir_path)
|
all_data_src = getListOfFiles(data_temp_dir_path)
|
||||||
for file in all_data_src:
|
for file in all_data_src:
|
||||||
file_name, file_extension = os.path.splitext(file)
|
file_name, file_extension = os.path.splitext(file)
|
||||||
print(file_name + " has filetype " + file_extension)
|
print(file_name + " has filetype " + file_extension)
|
||||||
if file_extension in filetypes_to_gzip:
|
if file_extension in filetypes_to_gzip:
|
||||||
files_to_gzip.append(file)
|
files_to_gzip.append(file)
|
||||||
else:
|
else:
|
||||||
filename_subdir = remove_prefix(file, data_src_dir_path)
|
filename_subdir = remove_prefix(file, data_temp_dir_path)
|
||||||
files_to_copy.append(filename_subdir)
|
files_to_copy.append(filename_subdir)
|
||||||
|
|
||||||
for file in files_to_copy:
|
for file in files_to_copy:
|
||||||
print('GZIP: Copying file from: ' + data_src_dir_path + file + ' to: ' + data_dir_path + file)
|
print('GZIP: Copying file from: ' + data_temp_dir_path + file + ' to: ' + data_dir_path + file)
|
||||||
os.makedirs(os.path.dirname(data_dir_path + file), exist_ok=True)
|
os.makedirs(os.path.dirname(data_dir_path + file), exist_ok=True)
|
||||||
shutil.copy(data_src_dir_path + file, data_dir_path + file)
|
shutil.copy(data_temp_dir_path + file, data_dir_path + file)
|
||||||
# Compress and move files
|
# Compress and move files
|
||||||
|
|
||||||
was_error = False
|
was_error = False
|
||||||
try:
|
try:
|
||||||
for source_file_path in files_to_gzip:
|
for source_file_path in files_to_gzip:
|
||||||
print('GZIP: compressing... ' + source_file_path)
|
print('GZIP: compressing... ' + source_file_path)
|
||||||
filename_subdir = remove_prefix(source_file_path, data_src_dir_path)
|
filename_subdir = remove_prefix(source_file_path, data_temp_dir_path)
|
||||||
target_file_path = data_dir_path + filename_subdir
|
target_file_path = data_dir_path + filename_subdir
|
||||||
os.makedirs(os.path.dirname(target_file_path), exist_ok=True)
|
os.makedirs(os.path.dirname(target_file_path), exist_ok=True)
|
||||||
print('GZIP: Compressed... ' + target_file_path)
|
print('GZIP: Compressed... ' + target_file_path)
|
||||||
@ -100,6 +164,7 @@ def gzip_webfiles(source, target, env):
|
|||||||
print('GZIP: Failure/Incomplete.\n')
|
print('GZIP: Failure/Incomplete.\n')
|
||||||
else:
|
else:
|
||||||
print('GZIP: Compressed correctly.\n')
|
print('GZIP: Compressed correctly.\n')
|
||||||
|
shutil.rmtree(data_temp_dir_path)
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
8
Software/codegen/run_pre.py
Normal file
8
Software/codegen/run_pre.py
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
Import("env") # pylint: disable=undefined-variable
|
||||||
|
env.Execute("\"$PYTHONEXE\" -m pip install jinja2")
|
||||||
|
|
||||||
|
import struct2json
|
||||||
|
import dtcs
|
||||||
|
|
||||||
|
struct2json.struct2json()
|
||||||
|
dtcs.build_dtcs()
|
113
Software/codegen/struct2json.py
Normal file
113
Software/codegen/struct2json.py
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
import os
|
||||||
|
import time
|
||||||
|
from jinja2 import Environment, FileSystemLoader
|
||||||
|
import re
|
||||||
|
|
||||||
|
import filechecksum as fcs
|
||||||
|
|
||||||
|
# Pfad zur Eingabedatei und Ausgabedatei
|
||||||
|
input_file = "include/eeprom.h"
|
||||||
|
output_sourcefile = "src/struct2json.cpp"
|
||||||
|
output_headerfile = "include/struct2json.h"
|
||||||
|
# Liste der zu suchenden Variablen/Structs
|
||||||
|
variable_names = ['ConfigData', 'PersistenceData']
|
||||||
|
|
||||||
|
def get_types(file_content, variable_names):
|
||||||
|
result = {}
|
||||||
|
|
||||||
|
# Entferne Kommentare, um unerwünschte Störungen zu vermeiden
|
||||||
|
file_content = re.sub(r'\/\*.*?\*\/', '', file_content, flags=re.DOTALL)
|
||||||
|
file_content = re.sub(r'\/\/.*', '', file_content)
|
||||||
|
|
||||||
|
for var_name in variable_names:
|
||||||
|
# Erstelle ein reguläres Ausdrucksmuster, um den Typ der Variable zu extrahieren
|
||||||
|
pattern = re.compile(r'\b(?:extern\s+)?(\w+)\s+' + re.escape(var_name) + r'\s*;')
|
||||||
|
match = pattern.search(file_content)
|
||||||
|
|
||||||
|
if match:
|
||||||
|
# Extrahiere den Typ aus dem Treffer
|
||||||
|
type_match = match.group(1)
|
||||||
|
result[var_name] = type_match
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def extract_struct_fields(file_content, variable_types):
|
||||||
|
result = {}
|
||||||
|
|
||||||
|
# Entferne Kommentare, um unerwünschte Störungen zu vermeiden
|
||||||
|
file_content = re.sub(r'\/\*.*?\*\/', '', file_content, flags=re.DOTALL)
|
||||||
|
file_content = re.sub(r'\/\/.*', '', file_content)
|
||||||
|
|
||||||
|
for var_name, var_type in variable_types.items():
|
||||||
|
# Erstelle ein reguläres Ausdrucksmuster, um das Strukturfeld zu extrahieren
|
||||||
|
pattern = re.compile(r'typedef\s+struct\s*{([^}]*)}\s*' + re.escape(var_type) + r'\s*;')
|
||||||
|
match = pattern.search(file_content)
|
||||||
|
|
||||||
|
if match:
|
||||||
|
# Extrahiere die Felder aus dem Treffer
|
||||||
|
fields_match = re.findall(r'\b(\w+)\s+(\w+)(?:\[(\d+)\])?\s*;', match.group(1))
|
||||||
|
if fields_match:
|
||||||
|
result[var_name] = {'type': var_type, 'fields': {}}
|
||||||
|
for field_type, field_name, array_size in fields_match:
|
||||||
|
if array_size:
|
||||||
|
result[var_name]['fields'][field_name] = {'type': field_type, 'size': int(array_size)}
|
||||||
|
else:
|
||||||
|
result[var_name]['fields'][field_name] = {'type': field_type}
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def struct2json():
|
||||||
|
# Überprüfen, ob die Verzeichnisse existieren, andernfalls erstellen
|
||||||
|
output_dir_source = os.path.dirname(output_sourcefile)
|
||||||
|
if not os.path.exists(output_dir_source):
|
||||||
|
os.makedirs(output_dir_source)
|
||||||
|
output_dir_header = os.path.dirname(output_headerfile)
|
||||||
|
if not os.path.exists(output_dir_header):
|
||||||
|
os.makedirs(output_dir_header)
|
||||||
|
|
||||||
|
# Unix-Zeitstempel hinzufügen
|
||||||
|
timestamp = int(time.time())
|
||||||
|
|
||||||
|
# Parse structs
|
||||||
|
with open(input_file, 'r') as file:
|
||||||
|
content = file.read()
|
||||||
|
|
||||||
|
variable_types = get_types(content, variable_names)
|
||||||
|
structs = extract_struct_fields(content, variable_types)
|
||||||
|
checksum = fcs.calculate_checksum(structs)
|
||||||
|
|
||||||
|
env = Environment(loader=FileSystemLoader('codegen/templates', encoding='utf-8'))
|
||||||
|
# Lade das Jinja2-Template aus der Datei
|
||||||
|
template_c = env.get_template('struct2json.cpp.j2')
|
||||||
|
template_h = env.get_template('struct2json.h.j2')
|
||||||
|
|
||||||
|
# Erstelle ein Context-Dictionary mit den erforderlichen Daten
|
||||||
|
context = {
|
||||||
|
'timestamp_unix': timestamp,
|
||||||
|
'timestamp' : time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(timestamp)),
|
||||||
|
'date' : time.strftime('%d.%m.%Y', time.localtime(timestamp)),
|
||||||
|
'structs': structs,
|
||||||
|
'checksum': checksum
|
||||||
|
}
|
||||||
|
|
||||||
|
# Überprüfe, ob die Checksummen übereinstimmen
|
||||||
|
if fcs.read_and_compare_checksum(output_sourcefile, checksum):
|
||||||
|
print("Keine Änderungen in der Source-Datei erforderlich.")
|
||||||
|
else:
|
||||||
|
# Rendere das Template mit den Werten und erhalte den Source-Text
|
||||||
|
source_text = template_c.render(context)
|
||||||
|
# Schreibe den generierten Source-Text in die Source-Datei
|
||||||
|
with open(output_sourcefile, "w", encoding='utf-8') as f:
|
||||||
|
f.write(source_text)
|
||||||
|
print(f"Source-Datei wurde erstellt: {output_sourcefile}")
|
||||||
|
|
||||||
|
# Überprüfe, ob die Checksummen übereinstimmen
|
||||||
|
if fcs.read_and_compare_checksum(output_headerfile, checksum):
|
||||||
|
print("Keine Änderungen in der Header-Datei erforderlich.")
|
||||||
|
else:
|
||||||
|
# Rendere das Template mit den Werten und erhalte den Header-Text
|
||||||
|
header_text = template_h.render(context)
|
||||||
|
# Schreibe den generierten Header-Text in die Header-Datei
|
||||||
|
with open(output_headerfile, "w", encoding='utf-8') as f:
|
||||||
|
f.write(header_text)
|
||||||
|
print(f"Header-Datei wurde erstellt: {output_headerfile}")
|
54
Software/codegen/templates/dtc_defs.h.j2
Normal file
54
Software/codegen/templates/dtc_defs.h.j2
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
/**
|
||||||
|
* @file dtc_defs.h
|
||||||
|
*
|
||||||
|
* @brief Header file for Diagnostic Trouble Code (DTC) definitions in the DE-Timer application.
|
||||||
|
*
|
||||||
|
* This file contains definitions for Diagnostic Trouble Codes (DTC) in the DE-Timer project.
|
||||||
|
* It includes enums for DTC active status, severity levels, and specific DTC codes.
|
||||||
|
* The file also defines an array of DTC definitions and a timestamp indicating the generation time.
|
||||||
|
*
|
||||||
|
* @note This file is auto-generated by a script on {{ timestamp }}.
|
||||||
|
*
|
||||||
|
* @author Marcel Peterkau
|
||||||
|
* @date {{ date }}
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef DTC_DEFS_H
|
||||||
|
#define DTC_DEFS_H
|
||||||
|
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
typedef uint32_t DTCNum_t;
|
||||||
|
|
||||||
|
typedef enum
|
||||||
|
{
|
||||||
|
DTC_INACTIVE,
|
||||||
|
DTC_ACTIVE,
|
||||||
|
DTC_PREVIOUS
|
||||||
|
} DTCActive_t;
|
||||||
|
|
||||||
|
typedef enum
|
||||||
|
{
|
||||||
|
DTC_NONE,
|
||||||
|
DTC_INFO,
|
||||||
|
DTC_WARN,
|
||||||
|
DTC_CRITICAL
|
||||||
|
} DTCSeverity_t;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
DTCNum_t code;
|
||||||
|
DTCSeverity_t severity;
|
||||||
|
} DTC_t;
|
||||||
|
|
||||||
|
{% for dtc in dtc_macros -%}
|
||||||
|
{{ dtc }}
|
||||||
|
{% endfor %}
|
||||||
|
const DTC_t dtc_definitions[] = {
|
||||||
|
{% for struct in dtc_structs -%}
|
||||||
|
{{ struct }}
|
||||||
|
{% endfor -%}
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // DTC_DEFS_H
|
||||||
|
|
||||||
|
// CODEGENERATOR_CHECKSUM: {{ checksum }}
|
25
Software/codegen/templates/struct2json.cpp.j2
Normal file
25
Software/codegen/templates/struct2json.cpp.j2
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
/**
|
||||||
|
* @file struct2json.cpp
|
||||||
|
*
|
||||||
|
* @brief Implementation file for converting structs to JSON objects.
|
||||||
|
*
|
||||||
|
* @note This file is auto-generated by a script on {{ timestamp }}.
|
||||||
|
*
|
||||||
|
* @author Marcel Peterkau
|
||||||
|
* @date {{ date }}
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
#include "struct2json.h"
|
||||||
|
|
||||||
|
{% for var_name, var_info in structs.items() -%}
|
||||||
|
void generateJsonObject_{{ var_name }}(JsonObject data)
|
||||||
|
{
|
||||||
|
{% for field_name, field_type in var_info['fields'].items() -%}
|
||||||
|
data["{{ field_name }}"] = {{ var_name }}.{{ field_name }};
|
||||||
|
{% endfor -%}
|
||||||
|
}
|
||||||
|
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
// CODEGENERATOR_CHECKSUM: {{ checksum }}
|
26
Software/codegen/templates/struct2json.h.j2
Normal file
26
Software/codegen/templates/struct2json.h.j2
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
/**
|
||||||
|
* @file struct2json.h
|
||||||
|
*
|
||||||
|
* @brief Header file for converting structs to JSON objects.
|
||||||
|
*
|
||||||
|
* @note This file is auto-generated by a script on {{ timestamp }}.
|
||||||
|
*
|
||||||
|
* @author Marcel Peterkau
|
||||||
|
* @date {{ date }}
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _STRUCT2JSON_H_
|
||||||
|
#define _STRUCT2JSON_H_
|
||||||
|
|
||||||
|
#include <Arduino.h>
|
||||||
|
#include <ArduinoJson.h>
|
||||||
|
|
||||||
|
#include "eeprom.h"
|
||||||
|
|
||||||
|
{% for var_name, var_info in structs.items() -%}
|
||||||
|
void generateJsonObject_{{ var_name }}(JsonObject data);
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
#endif /* _STRUCT2JSON_H_ */
|
||||||
|
|
||||||
|
// CODEGENERATOR_CHECKSUM: {{ checksum }}
|
@ -25,11 +25,10 @@ upload_speed = 921600
|
|||||||
; --auth=${wifi_cred.ota_password}
|
; --auth=${wifi_cred.ota_password}
|
||||||
|
|
||||||
build_flags=
|
build_flags=
|
||||||
!python git_rev_macro.py
|
!python codegen/git_rev_macro.py
|
||||||
-DATOMIC_FS_UPDATE
|
-DATOMIC_FS_UPDATE
|
||||||
;-DFEATURE_ENABLE_WIFI_CLIENT
|
-DFEATURE_ENABLE_WIFI_CLIENT
|
||||||
;-DFEATURE_ENABLE_LORA
|
;-DFEATURE_ENABLE_LORA
|
||||||
;-DCAPTIVE
|
|
||||||
-DFEATURE_ENABLE_UARTLORA
|
-DFEATURE_ENABLE_UARTLORA
|
||||||
-DWIFI_AP_IP_GW=10,0,0,1
|
-DWIFI_AP_IP_GW=10,0,0,1
|
||||||
-DADMIN_PASSWORD=${wifi_cred.ota_password}
|
-DADMIN_PASSWORD=${wifi_cred.ota_password}
|
||||||
@ -43,18 +42,22 @@ build_flags=
|
|||||||
|
|
||||||
board_build.filesystem = littlefs
|
board_build.filesystem = littlefs
|
||||||
board_build.ldscript = eagle.flash.4m1m.ld
|
board_build.ldscript = eagle.flash.4m1m.ld
|
||||||
extra_scripts = post:prepare_littlefs.py
|
extra_scripts =
|
||||||
|
post:codegen/prepare_littlefs.py
|
||||||
|
pre:codegen/run_pre.py
|
||||||
|
|
||||||
monitor_filters = esp8266_exception_decoder
|
monitor_filters = esp8266_exception_decoder
|
||||||
monitor_speed = 115200
|
monitor_speed = 115200
|
||||||
|
|
||||||
|
lib_ldf_mode = deep
|
||||||
lib_deps =
|
lib_deps =
|
||||||
;xreef/EByte LoRa E220 library@^1.0.6 ; made Lib local, due to changes for I2C-controller M0,M1-Pins
|
;xreef/EByte LoRa E220 library@^1.0.6 ; made Lib local, due to changes for I2C-controller M0,M1-Pins
|
||||||
sstaub/Ticker@^4.4.0
|
sstaub/Ticker@^4.4.0
|
||||||
robtillaart/PCF8574 @ ^0.3.7
|
robtillaart/PCF8574 @ ^0.3.7
|
||||||
adafruit/Adafruit INA219 @ ^1.1.1
|
adafruit/Adafruit INA219 @ ^1.1.1
|
||||||
akj7/TM1637 Driver @ ^2.1.2
|
akj7/TM1637 Driver @ ^2.1.2
|
||||||
me-no-dev/ESPAsyncTCP @ ^1.2.2
|
sstaub/Ticker @ ^4.4.0
|
||||||
robtillaart/I2C_EEPROM @ ^1.5.2
|
robtillaart/I2C_EEPROM @ ^1.8.2
|
||||||
|
esphome/ESPAsyncWebServer-esphome @ ^3.2.2
|
||||||
sandeepmistry/LoRa @ ^0.8.0
|
sandeepmistry/LoRa @ ^0.8.0
|
||||||
bblanchon/ArduinoJson @ ^6.19.4
|
bblanchon/ArduinoJson @ ^7.0.4
|
Loading…
x
Reference in New Issue
Block a user