[Internal BoM] Added CSV aggregate

Related to #248
This commit is contained in:
Salvador E. Tropea 2022-09-13 09:25:14 -03:00
parent 1f1a56e5ac
commit 661677608e
15 changed files with 315 additions and 15 deletions

View File

@ -19,6 +19,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Option to change the title (similar to PCB Variant) - Option to change the title (similar to PCB Variant)
- Render_3D: Options to disable some technical layers and control the - Render_3D: Options to disable some technical layers and control the
silkscreen clipping. (#282) silkscreen clipping. (#282)
- Internal BoM: Now you can aggregate components using CSV files. (See #248)
### Fixed ### Fixed
- Problems to compress netlists. (#287) - Problems to compress netlists. (#287)

View File

@ -1368,7 +1368,13 @@ Notes:
- `level`: [number=0] Used to group columns. The XLSX output uses it to collapse columns. - `level`: [number=0] Used to group columns. The XLSX output uses it to collapse columns.
- `style`: [string='modern-blue'] Head style: modern-blue, modern-green, modern-red and classic. - `style`: [string='modern-blue'] Head style: modern-blue, modern-green, modern-red and classic.
- `aggregate`: [list(dict)] Add components from other projects. - `aggregate`: [list(dict)] Add components from other projects.
You can use CSV files, the first row must contain the names of the fields.
The `Reference` and `Value` are mandatory, in most cases `Part` is also needed.
The `Part` column should contain the name/type of the component. This is important for
passive components (R, L, C, etc.). If this information isn't available consider
configuring the grouping to exclude the `Part`..
* Valid keys: * Valid keys:
- `delimiter`: [string=','] Delimiter used for CSV files.
- `file`: [string=''] Name of the schematic to aggregate. - `file`: [string=''] Name of the schematic to aggregate.
- `name`: [string=''] Name to identify this source. If empty we use the name of the schematic. - `name`: [string=''] Name to identify this source. If empty we use the name of the schematic.
- `number`: [number=1] Number of boards to build (components multiplier). Use negative to subtract. - `number`: [number=1] Number of boards to build (components multiplier). Use negative to subtract.

View File

@ -107,10 +107,17 @@ outputs:
type: 'bom' type: 'bom'
dir: 'Example/bom_dir' dir: 'Example/bom_dir'
options: options:
# [list(dict)] Add components from other projects # [list(dict)] Add components from other projects.
# You can use CSV files, the first row must contain the names of the fields.
# The `Reference` and `Value` are mandatory, in most cases `Part` is also needed.
# The `Part` column should contain the name/type of the component. This is important for
# passive components (R, L, C, etc.). If this information isn't available consider
# configuring the grouping to exclude the `Part`.
aggregate: aggregate:
# [string=','] Delimiter used for CSV files
- delimiter: ','
# [string=''] Name of the schematic to aggregate # [string=''] Name of the schematic to aggregate
- file: '' file: ''
# [string=''] Name to identify this source. If empty we use the name of the schematic # [string=''] Name to identify this source. If empty we use the name of the schematic
name: '' name: ''
# [number=1] Number of boards to build (components multiplier). Use negative to subtract # [number=1] Number of boards to build (components multiplier). Use negative to subtract

View File

@ -1040,6 +1040,15 @@ class SchematicComponent(object):
return '{} ({})'.format(ref, self.name) return '{} ({})'.format(ref, self.name)
return '{} ({} {})'.format(ref, self.name, self.value) return '{} ({} {})'.format(ref, self.name, self.value)
def split_ref(self, f=None):
m = SchematicComponent.ref_re.match(self.ref)
if not m:
if f:
raise SchFileError('Malformed component reference', self.ref, f)
else:
raise SchError('Malformed component reference `{}`'.format(self.ref))
self.ref_prefix, self.ref_suffix = m.groups()
@staticmethod @staticmethod
def load(f, project, sheet_path, sheet_path_h, libs, fields, fields_lc): def load(f, project, sheet_path, sheet_path_h, libs, fields, fields_lc):
# L lib:name reference # L lib:name reference
@ -1132,10 +1141,7 @@ class SchematicComponent(object):
logger.warning(W_NOANNO + 'Component {} is not annotated'.format(comp)) logger.warning(W_NOANNO + 'Component {} is not annotated'.format(comp))
comp.annotation_error = True comp.annotation_error = True
# Separate the reference in its components # Separate the reference in its components
m = SchematicComponent.ref_re.match(comp.ref) comp.split_ref(f)
if not m:
raise SchFileError('Malformed component reference', comp.ref, f)
comp.ref_prefix, comp.ref_suffix = m.groups()
# Location in the project # Location in the project
comp.sheet_path = sheet_path comp.sheet_path = sheet_path
comp.sheet_path_h = sheet_path_h comp.sheet_path_h = sheet_path_h

View File

@ -988,10 +988,7 @@ class SchematicComponentV6(SchematicComponent):
def set_ref(self, ref): def set_ref(self, ref):
self.ref = ref self.ref = ref
# Separate the reference in its components # Separate the reference in its components
m = SchematicComponent.ref_re.match(ref) self.split_ref()
if not m:
raise SchError('Malformed component reference `{}`'.format(ref))
self.ref_prefix, self.ref_suffix = m.groups()
self.set_field('Reference', ref) self.set_field('Reference', ref)
def set_value(self, value): def set_value(self, value):

View File

@ -224,6 +224,7 @@ W_NOTYET = '(W091) '
W_NOMATCH = '(W092) ' W_NOMATCH = '(W092) '
W_DOWNTOOL = '(W093) ' W_DOWNTOOL = '(W093) '
W_NOPREFLIGHTS = '(W094) ' W_NOPREFLIGHTS = '(W094) '
W_NOPART = '(W095) '
# Somehow arbitrary, the colors are real, but can be different # Somehow arbitrary, the colors are real, but can be different
PCB_MAT_COLORS = {'fr1': "937042", 'fr2': "949d70", 'fr3': "adacb4", 'fr4': "332B16", 'fr5': "6cc290"} PCB_MAT_COLORS = {'fr1': "937042", 'fr2': "949d70", 'fr3': "adacb4", 'fr4': "332B16", 'fr5': "6cc290"}
PCB_FINISH_COLORS = {'hal': "8b898c", 'hasl': "8b898c", 'imag': "8b898c", 'enig': "cfb96e", 'enepig': "cfb96e", PCB_FINISH_COLORS = {'hal': "8b898c", 'hasl': "8b898c", 'imag': "8b898c", 'enig': "cfb96e", 'enepig': "cfb96e",

View File

@ -16,15 +16,17 @@ Dependencies:
debian: python3-xlsxwriter debian: python3-xlsxwriter
downloader: python downloader: python
""" """
import csv
from copy import deepcopy
import os import os
import re import re
from copy import deepcopy
from .gs import GS from .gs import GS
from .misc import W_BADFIELD, W_NEEDSPCB, DISTRIBUTORS, IFILT_EXPAND_TEXT_VARS from .misc import W_BADFIELD, W_NEEDSPCB, DISTRIBUTORS, IFILT_EXPAND_TEXT_VARS, W_NOPART
from .optionable import Optionable, BaseOptions from .optionable import Optionable, BaseOptions
from .registrable import RegOutput from .registrable import RegOutput
from .error import KiPlotConfigurationError from .error import KiPlotConfigurationError
from .kiplot import get_board_comps_data, load_any_sch from .kiplot import get_board_comps_data, load_any_sch
from .kicad.v5_sch import SchematicComponent, SchematicField
from .bom.columnlist import ColumnList, BoMError from .bom.columnlist import ColumnList, BoMError
from .bom.bom import do_bom from .bom.bom import do_bom
from .var_kibom import KiBoM from .var_kibom import KiBoM
@ -47,6 +49,20 @@ DEFAULT_ALIASES = [['r', 'r_small', 'res', 'resistor'],
] ]
class CompsFromCSV(object):
""" Class used to fake an schematic using a CSV file """
def __init__(self, fname, comps):
super().__init__()
self.revision = ''
self.date = GS.format_date('', fname, 'SCH')
self.title = os.path.basename(fname)
self.company = ''
self.comps = comps
def get_components(self):
return self.comps
class BoMJoinField(Optionable): class BoMJoinField(Optionable):
""" Fields to join """ """ Fields to join """
def __init__(self, field=None): def __init__(self, field=None):
@ -361,6 +377,8 @@ class Aggregate(Optionable):
""" A prefix to add to all the references from this project """ """ A prefix to add to all the references from this project """
self.number = 1 self.number = 1
""" Number of boards to build (components multiplier). Use negative to subtract """ """ Number of boards to build (components multiplier). Use negative to subtract """
self.delimiter = ','
""" Delimiter used for CSV files """
def config(self, parent): def config(self, parent):
super().config(parent) super().config(parent)
@ -454,7 +472,12 @@ class BoMOptions(BaseOptions):
By default the field indicated in `fit_field`, the field used for variants and By default the field indicated in `fit_field`, the field used for variants and
the field `part` are excluded """ the field `part` are excluded """
self.aggregate = Aggregate self.aggregate = Aggregate
""" [list(dict)] Add components from other projects """ """ [list(dict)] Add components from other projects.
You can use CSV files, the first row must contain the names of the fields.
The `Reference` and `Value` are mandatory, in most cases `Part` is also needed.
The `Part` column should contain the name/type of the component. This is important for
passive components (R, L, C, etc.). If this information isn't available consider
configuring the grouping to exclude the `Part`. """
self.ref_id = '' self.ref_id = ''
""" A prefix to add to all the references from this project. Used for multiple projects """ """ A prefix to add to all the references from this project. Used for multiple projects """
self.source_by_id = False self.source_by_id = False
@ -683,6 +706,81 @@ class BoMOptions(BaseOptions):
(self.columns_ce, self.column_levels_ce, self.column_comments_ce, self.column_rename_ce, (self.columns_ce, self.column_levels_ce, self.column_comments_ce, self.column_rename_ce,
self.join_ce) = self.process_columns_config(self.cost_extra_columns, valid_columns, extra_columns, add_all=False) self.join_ce) = self.process_columns_config(self.cost_extra_columns, valid_columns, extra_columns, add_all=False)
def load_csv(self, fname, project, delimiter):
""" Load components from a CSV file """
comps = []
logger.debug('Importing components from `{}`'.format(fname))
with open(fname) as csvfile:
reader = csv.reader(csvfile, delimiter=delimiter)
header = [x.lower() for x in next(reader)]
logger.debugl(1, '- CSV header {}'.format(header))
# The header must contain at least the reference and the value
ref_n = ColumnList.COL_REFERENCE_L
try:
ref_index = header.index(ref_n)
except ValueError:
try:
ref_index = header.index(ref_n[:-1])
except ValueError:
raise KiPlotConfigurationError('Missing `{}` in aggregated file `{}`'.format(ref_n, fname))
try:
val_index = header.index(ColumnList.COL_VALUE_L)
except ValueError:
raise KiPlotConfigurationError('Missing `{}` in aggregated file `{}`'.format(ColumnList.COL_VALUE_L, fname))
# Optional important fields:
fp_index = None
try:
fp_index = header.index(ColumnList.COL_FP_L)
except ValueError:
pass
ds_index = None
try:
ds_index = header.index(ColumnList.COL_DATASHEET_L)
except ValueError:
pass
pn_index = None
try:
pn_index = header.index(ColumnList.COL_PART_L)
except ValueError:
logger.warning(W_NOPART+'No `Part` specified, using `Value` instead, this can impact the grouping')
min_num = len(header)
for r in reader:
c = SchematicComponent()
c.unit = 0
c.project = project
c.lib = ''
c.sheet_path_h = '/'+project
for n, f in enumerate(r):
number = None
if n == ref_index:
c.ref = c.f_ref = str(f)
c.split_ref()
number = 0
elif n == val_index:
c.value = str(f)
if pn_index is None:
c.name = str(f)
number = 1
elif n == fp_index:
c.footprint = str(f)
c.footprint_lib = None
number = 2
elif ds_index:
c.datasheet = str(f)
number = 3
elif n == pn_index:
c.name = str(f)
number = -1
fld = SchematicField()
fld.number = min_num+n if number is None else number
fld.value = str(f)
fld.name = header[n]
c.add_field(fld)
comps.append(c)
logger.debugl(2, '- Adding component {}'.format(c))
comps.sort(key=lambda g: g.ref)
return CompsFromCSV(fname, comps)
def aggregate_comps(self, comps): def aggregate_comps(self, comps):
self.qtys = {GS.sch_basename: self.number} self.qtys = {GS.sch_basename: self.number}
for prj in self.aggregate: for prj in self.aggregate:
@ -691,7 +789,11 @@ class BoMOptions(BaseOptions):
logger.debug('Adding components from project {} ({}) using reference id `{}`'. logger.debug('Adding components from project {} ({}) using reference id `{}`'.
format(prj.name, prj.file, prj.ref_id)) format(prj.name, prj.file, prj.ref_id))
self.qtys[prj.name] = prj.number self.qtys[prj.name] = prj.number
ext = os.path.splitext(prj.file)[1]
if ext == 'sch' or ext == 'kicad_sch':
prj.sch = load_any_sch(prj.file, prj.name) prj.sch = load_any_sch(prj.file, prj.name)
else:
prj.sch = self.load_csv(prj.file, prj.name, prj.delimiter)
new_comps = prj.sch.get_components() new_comps = prj.sch.get_components()
for c in new_comps: for c in new_comps:
c.ref = prj.ref_id+c.ref c.ref = prj.ref_id+c.ref

7
tests/data/merge_2.csv Normal file
View File

@ -0,0 +1,7 @@
Reference,Part,Value,Footprint
R1,R,10k,RC0805JR-0710KL
R2,R,1000,RC0805JR-071KL
R3,R,1000,RC0805JR-071KL
C1,C,10nF,GRM155R71E103KA01D
C2,C,1nF,GRM1555C1H102JA01D
R4,R,1000,RC0805JR-071KL
1 Reference Part Value Footprint
2 R1 R 10k RC0805JR-0710KL
3 R2 R 1000 RC0805JR-071KL
4 R3 R 1000 RC0805JR-071KL
5 C1 C 10nF GRM155R71E103KA01D
6 C2 C 1nF GRM1555C1H102JA01D
7 R4 R 1000 RC0805JR-071KL

6
tests/data/merge_3.csv Normal file
View File

@ -0,0 +1,6 @@
Part;Value;References;Footprint
R;10k;R1;RC0805JR-0710KL
R;10k;R2;RC0805JR-0710KL
R;10k;R3;RC0805JR-0710KL
R;10k;R4;RC0805JR-0710KL
R;1k;R5;RC0805JR-071KL
1 Part Value References Footprint
2 R 10k R1 RC0805JR-0710KL
3 R 10k R2 RC0805JR-0710KL
4 R 10k R3 RC0805JR-0710KL
5 R 10k R4 RC0805JR-0710KL
6 R 1k R5 RC0805JR-071KL

View File

@ -1573,6 +1573,20 @@ def test_int_bom_merge_csv_1(test_dir):
ctx.clean_up() ctx.clean_up()
def test_int_bom_merge_csv_2(test_dir):
prj = 'merge_1'
yaml = 'int_bom_merge_csv_2'
ctx = context.TestContextSCH(test_dir, prj, yaml, BOM_DIR)
ctx.run(extra_debug=True)
rows, header, info = ctx.load_csv(prj+'-bom.csv')
ref_column = header.index(REF_COLUMN_NAME)
check_kibom_test_netlist(rows, ref_column, 4, None, MERGED_COMPS)
src_column = header.index(SOURCE_BOM_COLUMN_NAME)
check_source(rows, 'A:R1', ref_column, src_column, MERGED_R1_SRC)
ctx.search_err(r'Stats for')
ctx.clean_up()
def test_int_bom_merge_html_1(test_dir): def test_int_bom_merge_html_1(test_dir):
prj = 'merge_1' prj = 'merge_1'
yaml = 'int_bom_merge_html_1' yaml = 'int_bom_merge_html_1'
@ -1589,6 +1603,20 @@ def test_int_bom_merge_html_1(test_dir):
ctx.clean_up() ctx.clean_up()
def test_int_bom_merge_html_2(test_dir):
prj = 'merge_1'
yaml = 'int_bom_merge_html_2'
ctx = context.TestContextSCH(test_dir, prj, yaml, BOM_DIR)
ctx.run()
rows, header, info = ctx.load_html(prj+'-bom.html')
logging.debug(rows[0])
ref_column = header[0].index(REF_COLUMN_NAME)
check_kibom_test_netlist(rows[0], ref_column, 4, None, MERGED_COMPS)
src_column = header[0].index(SOURCE_BOM_COLUMN_NAME)
check_source(rows[0], 'A:R1', ref_column, src_column, MERGED_R1_SRC)
ctx.clean_up()
def test_int_bom_merge_xlsx_1(test_dir): def test_int_bom_merge_xlsx_1(test_dir):
prj = 'merge_1' prj = 'merge_1'
yaml = 'int_bom_merge_xlsx_1' yaml = 'int_bom_merge_xlsx_1'
@ -1604,6 +1632,19 @@ def test_int_bom_merge_xlsx_1(test_dir):
ctx.clean_up() ctx.clean_up()
def test_int_bom_merge_xlsx_2(test_dir):
prj = 'merge_1'
yaml = 'int_bom_merge_xlsx_2'
ctx = context.TestContextSCH(test_dir, prj, yaml, BOM_DIR)
ctx.run()
rows, header, info = ctx.load_xlsx(prj+'-bom.xlsx')
ref_column = header.index(REF_COLUMN_NAME)
check_kibom_test_netlist(rows, ref_column, 4, None, MERGED_COMPS)
src_column = header.index(SOURCE_BOM_COLUMN_NAME)
check_source(rows, 'A:R1', ref_column, src_column, MERGED_R1_SRC)
ctx.clean_up()
def test_int_bom_merge_xml_1(test_dir): def test_int_bom_merge_xml_1(test_dir):
prj = 'merge_1' prj = 'merge_1'
yaml = 'int_bom_merge_xml_1' yaml = 'int_bom_merge_xml_1'
@ -1619,6 +1660,19 @@ def test_int_bom_merge_xml_1(test_dir):
ctx.clean_up() ctx.clean_up()
def test_int_bom_merge_xml_2(test_dir):
prj = 'merge_1'
yaml = 'int_bom_merge_xml_2'
ctx = context.TestContextSCH(test_dir, prj, yaml, BOM_DIR)
ctx.run()
rows, header = ctx.load_xml(prj+'-bom.xml')
ref_column = header.index(REF_COLUMN_NAME)
check_kibom_test_netlist(rows, ref_column, 4, None, MERGED_COMPS)
src_column = header.index(SOURCE_BOM_COLUMN_NAME.replace(' ', '_'))
check_source(rows, 'A:R1', ref_column, src_column, MERGED_R1_SRC)
ctx.clean_up()
def test_int_bom_subparts_1(test_dir): def test_int_bom_subparts_1(test_dir):
prj = 'subparts' prj = 'subparts'
ctx = context.TestContextSCH(test_dir, prj, 'int_bom_subparts_1') ctx = context.TestContextSCH(test_dir, prj, 'int_bom_subparts_1')

View File

@ -0,0 +1,21 @@
# Example KiBot config file
kibot:
version: 1
outputs:
- name: result
comment: Test RAR compress
type: compress
options:
output: 'test.%x'
format: RAR
files:
- source: tests/board_samples/kicad_5/test_v5.*
from_cwd: true
dest: source
- source: tests/board_samples/kicad_5/deeper.sch
from_cwd: true
dest: source
- source: tests/board_samples/kicad_5/sub-sheet.sch
from_cwd: true
dest: source

View File

@ -0,0 +1,23 @@
# Example KiBot config file
kibot:
version: 1
outputs:
- name: 'bom_csv'
comment: "Bill of Materials in CSV format"
type: bom
dir: BoM
options:
format: CSV
ref_id: 'A:'
source_by_id: true
use_alt: true
aggregate:
- file: tests/data/merge_2.csv
name: 2nd project
ref_id: 'B:'
number: 2
- file: tests/data/merge_3.csv
ref_id: 'C:'
delimiter: ';'
number: 4

View File

@ -0,0 +1,23 @@
# Example KiBot config file
kibot:
version: 1
outputs:
- name: 'bom_csv'
comment: "Bill of Materials in CSV format"
type: bom
dir: BoM
options:
format: HTML
ref_id: 'A:'
source_by_id: true
use_alt: true
aggregate:
- file: tests/data/merge_2.csv
name: 2nd project
ref_id: 'B:'
number: 2
- file: tests/data/merge_3.csv
ref_id: 'C:'
delimiter: ';'
number: 4

View File

@ -0,0 +1,23 @@
# Example KiBot config file
kibot:
version: 1
outputs:
- name: 'bom_csv'
comment: "Bill of Materials in CSV format"
type: bom
dir: BoM
options:
format: XLSX
ref_id: 'A:'
source_by_id: true
use_alt: true
aggregate:
- file: tests/data/merge_2.csv
name: 2nd project
ref_id: 'B:'
number: 2
- file: tests/data/merge_3.csv
ref_id: 'C:'
delimiter: ';'
number: 4

View File

@ -0,0 +1,23 @@
# Example KiBot config file
kibot:
version: 1
outputs:
- name: 'bom_csv'
comment: "Bill of Materials in CSV format"
type: bom
dir: BoM
options:
format: XML
ref_id: 'A:'
source_by_id: true
use_alt: true
aggregate:
- file: tests/data/merge_2.csv
name: 2nd project
ref_id: 'B:'
number: 2
- file: tests/data/merge_3.csv
ref_id: 'C:'
delimiter: ';'
number: 4