From 17f5f07f20515b0dc93bbd64145d8996c3ec0e3f Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Fri, 27 Jan 2023 07:29:25 +0200 Subject: [PATCH 01/71] - Adding klayout dualgate table. --- Makefile | 36 +- klayout/drc/README.md | 99 ++ klayout/drc/rule_decks/dualgate.drc | 87 ++ klayout/drc/rule_decks/main.drc | 1066 +++++++++++++++++ klayout/drc/run_drc.py | 709 +++++++++++ klayout/drc/testing/README.md | 22 + klayout/drc/testing/run_regression.py | 876 ++++++++++++++ klayout/drc/testing/testcases/README.md | 7 + .../drc/testing/testcases/unit/dualgate.gds | Bin 0 -> 25068 bytes requirements.txt | 7 +- 10 files changed, 2898 insertions(+), 11 deletions(-) create mode 100644 klayout/drc/README.md create mode 100644 klayout/drc/rule_decks/dualgate.drc create mode 100644 klayout/drc/rule_decks/main.drc create mode 100644 klayout/drc/run_drc.py create mode 100644 klayout/drc/testing/README.md create mode 100644 klayout/drc/testing/run_regression.py create mode 100644 klayout/drc/testing/testcases/README.md create mode 100644 klayout/drc/testing/testcases/unit/dualgate.gds diff --git a/Makefile b/Makefile index 05b2a340..aee23758 100644 --- a/Makefile +++ b/Makefile @@ -22,6 +22,9 @@ REQUIREMENTS_FILE := requirements.txt # https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html ENVIRONMENT_FILE := pdk_regression.yml +# Path to regression +KLAYOUT_TESTS := klayout/drc/testing/ + include third_party/make-env/conda.mk # Lint python code @@ -31,21 +34,36 @@ lint: | $(CONDA_ENV_PYTHON) ################################################################################ ## DRC Regression section ################################################################################ -# DRC main testing -test-DRC-main: | $(CONDA_ENV_PYTHON) - @$(IN_CONDA_ENV) klayout -v +#================================= +# ----- test-DRC_regression ------ +#================================= +.ONESHELL: +test-DRC-main : | $(CONDA_ENV_PYTHON) + @$(IN_CONDA_ENV) python3 $(KLAYOUT_TESTS)/run_regression.py + @echo "========== DRC-Regression is done ==========" + +.ONESHELL: +test-DRC-% : | $(CONDA_ENV_PYTHON) + @$(IN_CONDA_ENV) python3 $(KLAYOUT_TESTS)/run_regression.py --table=$* + @echo "========== Table DRC-Regression is done ==========" + +#================================= +# -------- test-DRC-switch ------- +#================================= + +# .ONESHELL: +# test-DRC-switch: | $(CONDA_ENV_PYTHON) +# @echo "========== DRC-Switch testing ==========" +# @$(IN_CONDA_ENV) python3 $(KLAYOUT_TESTS)/run_switch_checking.py && rm -rf pattern.csv -# DRC main testing -test-DRC-switch: | $(CONDA_ENV_PYTHON) - @$(IN_CONDA_ENV) klayout -v ################################################################################ -## DRC Regression section +## LVS Regression section ################################################################################ -# DRC main testing +# LVS main testing test-LVS-main: | $(CONDA_ENV_PYTHON) @$(IN_CONDA_ENV) klayout -v -# DRC main testing +# LVS main testing test-LVS-switch: | $(CONDA_ENV_PYTHON) @$(IN_CONDA_ENV) klayout -v diff --git a/klayout/drc/README.md b/klayout/drc/README.md new file mode 100644 index 00000000..000809a3 --- /dev/null +++ b/klayout/drc/README.md @@ -0,0 +1,99 @@ +# DRC Documentation + +Explains how to use the runset. + +## Folder Structure + +```text +📦drc + ┣ 📦testing + ┣ 📜GF180_MCU.lyp + ┣ 📜README.md + ┣ 📜gf_018mcu.drc + ┣ 📜gf_018mcu_antenna.drc + ┣ 📜gf_018mcu_density.drc + ┗ 📜run_drc.py + ``` + +## Rule Deck Usage +The `run_drc.py` script takes a gds file to run DRC rule decks of GF180 technology with switches to select subsets of all checks. + +### Requirements +Please make sure to define PDK_ROOT and PDK environment variables to make it work. Example definition would be to work for this repo, go to the `rules/klayout` directory and run: +```bash +export PDK_ROOT=`pwd` +export PDK="drc" +``` +Also, please make sure to install the required python packages at `../requirements.test.txt` by using +```bash +pip install -r ../requirements.test.txt +``` + + +### Switches +The list of switches used for running DRC: + +1. **FEOL** : Default is on. Use it for checking Front End Of Line layers (wells, diffusion, polys, contacts). +2. **BEOL** : Default is on. Use it for checking Back End Of Line layers (metal layers, top metal layer, vias). +3. **BEOL** : Default is on. Use it for checking Back End Of Line layers (metal layers, top metal layer, vias). +4. **GF180MCU**=A : combined options of metal_level=3, mim_option=A, metal_top=30K, poly_res=1K, and mim_cap=2 +5. **GF180MCU**=B : combined options of metal_level=4, mim_option=B, metal_top=11K, poly_res=1K, and mim_cap=2 +6. **GF180MCU**=C : combined options of metal_level=5, mim_option=B, metal_top=9K, poly_res=1K, and mim_cap=2 +7. **connectivity** : Default is off. Use it for check connectivity rules. +8. **DENSITY** : Default is off. Use it for check density rules. +9. **DENSITY_only** : Default is off. Use it for check density rules only. +10. **ANTENNA** : Default is off. Use it to turn on Antenna checks. +11. **ANTENNA_only** : Default is off. Use it to turn on Antenna checks only. +12. **OFFGRID** : Default is on. Use it for checking off-grid and acute layers (ongrid of 0.005um and angles 45 deg. unless otherwise stated). + +### Usage + +```bash + run_drc.py (--help| -h) + run_drc.py (--path=) (--gf180mcu=) [--topcell=] [--thr=] [--run_mode=] [--no_feol] [--no_beol] [--connectivity] [--density] [--density_only] [--antenna] [--antenna_only] [--no_offgrid] +``` + +Example: + +```bash + python3 run_drc.py --path=testing/switch_checking/simple_por.gds.gz --thr=16 --run_mode=flat --gf180mcu=A --antenna --no_offgrid +``` + +### Options + +`--help -h` Print this help message. + +`--path=` The input GDS file path. + +`--gf180mcu=` Select combined options of metal_top, mim_option, and metal_level. Allowed values (A, B, C). + gf180mcu=A: Select metal_top=30K mim_option=A metal_level=3LM + gf180mcu=B: Select metal_top=11K mim_option=B metal_level=4LM + gf180mcu=C: Select metal_top=9K mim_option=B metal_level=5LM + +`--topcell=` Topcell name to use. + +`--thr=` The number of threads used in run. + +`--run_mode=` Select klayout mode Allowed modes (flat , deep, tiling). [default: flat] + +`--no_feol` Turn off FEOL rules from running. + +`--no_beol` Turn off BEOL rules from running. + +`--connectivity` Turn on connectivity rules. + +`--density` Turn on Density rules. + +`--density_only` Turn on Density rules only. + +`--antenna` Turn on Antenna checks. + +`--antenna_only` Turn on Antenna checks only. + +`--no_offgrid` Turn off OFFGRID checking rules. + +### **DRC Outputs** + +Results will appear at the end of the run logs. + +The result is a database file (`.lyrdb`) of all violations in the same directoy of your design. you could view it on your file using klayout. diff --git a/klayout/drc/rule_decks/dualgate.drc b/klayout/drc/rule_decks/dualgate.drc new file mode 100644 index 00000000..a31552c3 --- /dev/null +++ b/klayout/drc/rule_decks/dualgate.drc @@ -0,0 +1,87 @@ +################################################################################################ +# Copyright 2022 GlobalFoundries PDK Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################################ + +if FEOL + #================================================ + #--------------------DUALGATE-------------------- + #================================================ + + # Rule DV.1: Min. Dualgate enclose DNWELL. is 0.5µm + logger.info("Executing rule DV.1") + dv1_l1 = dualgate.enclosing(dnwell, 0.5.um, euclidian).polygons(0.001) + dv1_l2 = dnwell.not_outside(dualgate).not(dualgate) + dv1_l = dv1_l1.or(dv1_l2) + dv1_l.output("DV.1", "DV.1 : Min. Dualgate enclose DNWELL. : 0.5µm") + dv1_l1.forget + dv1_l2.forget + dv1_l.forget + + # Rule DV.2: Min. Dualgate Space. Merge if Space is less than this design rule. is 0.44µm + logger.info("Executing rule DV.2") + dv2_l1 = dualgate.space(0.44.um, euclidian).polygons(0.001) + dv2_l1.output("DV.2", "DV.2 : Min. Dualgate Space. Merge if Space is less than this design rule. : 0.44µm") + dv2_l1.forget + + # Rule DV.3: Min. Dualgate to COMP space [unrelated]. is 0.24µm + logger.info("Executing rule DV.3") + dv3_l1 = dualgate.separation(comp.outside(dualgate), 0.24.um, euclidian).polygons(0.001) + dv3_l1.output("DV.3", "DV.3 : Min. Dualgate to COMP space [unrelated]. : 0.24µm") + dv3_l1.forget + + # rule DV.4 is not a DRC check + + # Rule DV.5: Min. Dualgate width. is 0.7µm + logger.info("Executing rule DV.5") + dv5_l1 = dualgate.width(0.7.um, euclidian).polygons(0.001) + dv5_l1.output("DV.5", "DV.5 : Min. Dualgate width. : 0.7µm") + dv5_l1.forget + + comp_dv = comp.not(pcomp.outside(nwell)) + # Rule DV.6: Min. Dualgate enclose COMP (except substrate tap). is 0.24µm + logger.info("Executing rule DV.6") + dv6_l1 = dualgate.enclosing(comp_dv, 0.24.um, euclidian).polygons(0.001) + dv6_l2 = comp_dv.not_outside(dualgate).not(dualgate) + dv6_l = dv6_l1.or(dv6_l2) + dv6_l.output("DV.6", "DV.6 : Min. Dualgate enclose COMP (except substrate tap). : 0.24µm") + dv6_l1.forget + dv6_l2.forget + dv6_l.forget + + # Rule DV.7: COMP (except substrate tap) can not be partially overlapped by Dualgate. + logger.info("Executing rule DV.7") + dv7_l1 = dualgate.not_outside(comp_dv).not(dualgate.covering(comp_dv)) + dv7_l1.output("DV.7", "DV.7 : COMP (except substrate tap) can not be partially overlapped by Dualgate.") + dv7_l1.forget + + comp_dv.forget + + # Rule DV.8: Min Dualgate enclose Poly2. is 0.4µm + logger.info("Executing rule DV.8") + dv8_l1 = dualgate.enclosing(poly2, 0.4.um, euclidian).polygons(0.001) + dv8_l2 = poly2.not_outside(dualgate).not(dualgate) + dv8_l = dv8_l1.or(dv8_l2) + dv8_l.output("DV.8", "DV.8 : Min Dualgate enclose Poly2. : 0.4µm") + dv8_l1.forget + dv8_l2.forget + dv8_l.forget + + # Rule DV.9: 3.3V and 5V/6V PMOS cannot be sitting inside same NWELL. + logger.info("Executing rule DV.9") + dv9_l1 = nwell.covering(pgate.and(dualgate)).covering(pgate.not_inside(v5_xtor).not_inside(dualgate)) + dv9_l1.output("DV.9", "DV.9 : 3.3V and 5V/6V PMOS cannot be sitting inside same NWELL.") + dv9_l1.forget +end #FEOL + diff --git a/klayout/drc/rule_decks/main.drc b/klayout/drc/rule_decks/main.drc new file mode 100644 index 00000000..2976275e --- /dev/null +++ b/klayout/drc/rule_decks/main.drc @@ -0,0 +1,1066 @@ +################################################################################################ +# Copyright 2022 GlobalFoundries PDK Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################################ + +#=========================================================================================================================== +#------------------------------------------- GF 0.18um MCU DRC RULE DECK -------------------------------------------------- +#=========================================================================================================================== +require 'time' +require "logger" + +exec_start_time = Time.now + +logger = Logger.new(STDOUT) + +logger.formatter = proc do |severity, datetime, progname, msg| + "#{datetime}: Memory Usage (" + `pmap #{Process.pid} | tail -1`[10,40].strip + ") : #{msg} +" +end + +#================================================ +#----------------- FILE SETUP ------------------- +#================================================ + +# optional for a batch launch : klayout -b -r gf_018mcu.drc -rd input=design.gds -rd report=gp180_drc.lyrdb + +logger.info("Starting running GF180MCU Klayout DRC runset on %s" % [$input]) +logger.info("Ruby Version for klayout: %s" % [RUBY_VERSION]) + +if $input + if $topcell + source($input, $topcell) + else + source($input) + end +end + +if $table_name + table_name = $table_name +else + table_name = "main" +end + + +logger.info("Loading database to memory is complete.") + +if $report + logger.info("GF180MCU Klayout DRC runset output at: %s" % [$report]) + report("DRC Run Report at", $report) +else + logger.info("GF180MCU Klayout DRC runset output at default location." % [File.join(File.dirname(RBA::CellView::active.filename), "gf180_drc.lyrdb")]) + report("DRC Run Report at", File.join(File.dirname(RBA::CellView::active.filename), "gf180_drc.lyrdb")) +end + +#================================================ +#------------------ SWITCHES -------------------- +#================================================ +logger.info("Evaluate switches.") + +# connectivity rules +if $conn_drc == "true" + CONNECTIVITY_RULES = $conn_drc + logger.info("connectivity rules are enabled.") +else + CONNECTIVITY_RULES = false + logger.info("connectivity rules are disabled.") +end # connectivity rules + +# WEDGE +if $wedge == "false" + WEDGE = $wedge +else + WEDGE = "true" +end # WEDGE + +logger.info("Wedge enabled %s" % [WEDGE]) + +# BALL +if $ball == "false" + BALL = $ball +else + BALL = "true" +end # BALL + +logger.info("Ball enabled %s" % [BALL]) + +# GOLD +if $gold == "false" + GOLD = $gold +else + GOLD = "true" +end # GOLD + +logger.info("Gold enabled %s" % [GOLD]) + +if $mim_option + MIM_OPTION = $mim_option +else + MIM_OPTION = "B" +end + +logger.info("MIM Option selected %s" % [MIM_OPTION]) + +# OFFGRID +if $offgrid == "false" + OFFGRID = false +else + OFFGRID = true +end # OFFGRID + +logger.info("Offgrid enabled %s" % [OFFGRID]) + +if $thr + threads($thr) + logger.info("Number of threads to use %s" % [$thr]) +else + threads(%x("nproc")) + logger.info("Number of threads to use #{%x("nproc")}") +end + +#=== PRINT DETAILS === +if $verbose == "true" + logger.info("Verbose mode: #{$verbose}") + verbose(true) +else + verbose(false) + logger.info("Verbose mode: false") +end + +# === TILING MODE === +if $run_mode == "tiling" + tiles(500.um) + tile_borders(10.um) + logger.info("Tiling mode is enabled.") + +elsif $run_mode == "deep" + #=== HIER MODE === + deep + logger.info("deep mode is enabled.") +else + #=== FLAT MODE === + flat + logger.info("flat mode is enabled.") +end # run_mode + +# METAL_TOP +if $metal_top + METAL_TOP = $metal_top +else + METAL_TOP = "9K" +end # METAL_TOP + +logger.info("METAL_TOP Selected is %s" % [METAL_TOP]) + +# METAL_LEVEL +if $metal_level + METAL_LEVEL = $metal_level +else + METAL_LEVEL = "5LM" +end # METAL_LEVEL + +logger.info("METAL_STACK Selected is %s" % [METAL_LEVEL]) + +# FEOL +if $feol == "false" + FEOL = $feol + logger.info("FEOL is disabled.") +else + FEOL = "true" + logger.info("FEOL is enabled.") +end # FEOL + +# BEOL +if $beol == "false" + BEOL = $beol + logger.info("BEOL is disabled.") +else + BEOL = "true" + logger.info("BEOL is enabled.") +end # BEOL + +#================================================ +#------------- LAYERS DEFINITIONS --------------- +#================================================ +polygons_count = 0 +logger.info("Read in polygons from layers.") + +def get_polygons(l, d) + if $run_mode == "deep" + polygons(l, d) + else + polygons(l, d).merged + end +end + +comp = get_polygons(22 , 0 ) +count = comp.count() +logger.info("comp has %d polygons" % [count]) +polygons_count += count + +dnwell = get_polygons(12 , 0 ) +count = dnwell.count() +logger.info("dnwell has %d polygons" % [count]) +polygons_count += count + +nwell = get_polygons(21 , 0 ) +count = nwell.count() +logger.info("nwell has %d polygons" % [count]) +polygons_count += count + +lvpwell = get_polygons(204, 0 ) +count = lvpwell.count() +logger.info("lvpwell has %d polygons" % [count]) +polygons_count += count + +dualgate = get_polygons(55 , 0 ) +count = dualgate.count() +logger.info("dualgate has %d polygons" % [count]) +polygons_count += count + +poly2 = get_polygons(30 , 0 ) +count = poly2.count() +logger.info("poly2 has %d polygons" % [count]) +polygons_count += count + +nplus = get_polygons(32 , 0 ) +count = nplus.count() +logger.info("nplus has %d polygons" % [count]) +polygons_count += count + +pplus = get_polygons(31 , 0 ) +count = pplus.count() +logger.info("pplus has %d polygons" % [count]) +polygons_count += count + +sab = get_polygons(49 , 0 ) +count = sab.count() +logger.info("sab has %d polygons" % [count]) +polygons_count += count + +esd = get_polygons(24 , 0 ) +count = esd.count() +logger.info("esd has %d polygons" % [count]) +polygons_count += count + +resistor = get_polygons(62 , 0 ) +count = resistor.count() +logger.info("resistor has %d polygons" % [count]) +polygons_count += count + +fhres = get_polygons(227, 0 ) +count = fhres.count() +logger.info("fhres has %d polygons" % [count]) +polygons_count += count + +fusetop = get_polygons(75 , 0 ) +count = fusetop.count() +logger.info("fusetop has %d polygons" % [count]) +polygons_count += count + +fusewindow_d = get_polygons(96 , 1 ) +count = fusewindow_d.count() +logger.info("fusewindow_d has %d polygons" % [count]) +polygons_count += count + +polyfuse = get_polygons(220, 0 ) +count = polyfuse.count() +logger.info("polyfuse has %d polygons" % [count]) +polygons_count += count + +mvsd = get_polygons(210, 0 ) +count = mvsd.count() +logger.info("mvsd has %d polygons" % [count]) +polygons_count += count + +mvpsd = get_polygons(11 , 39) +count = mvpsd.count() +logger.info("mvpsd has %d polygons" % [count]) +polygons_count += count + +nat = get_polygons(5 , 0 ) +count = nat.count() +logger.info("nat has %d polygons" % [count]) +polygons_count += count + +comp_dummy = get_polygons(22 , 4 ) +count = comp_dummy.count() +logger.info("comp_dummy has %d polygons" % [count]) +polygons_count += count + +poly2_dummy = get_polygons(30 , 4 ) +count = poly2_dummy.count() +logger.info("poly2_dummy has %d polygons" % [count]) +polygons_count += count + +schottky_diode = get_polygons(241, 0 ) +count = schottky_diode.count() +logger.info("schottky_diode has %d polygons" % [count]) +polygons_count += count + +zener = get_polygons(178, 0 ) +count = zener.count() +logger.info("zener has %d polygons" % [count]) +polygons_count += count + +res_mk = get_polygons(110, 5 ) +count = res_mk.count() +logger.info("res_mk has %d polygons" % [count]) +polygons_count += count + +opc_drc = get_polygons(124, 5 ) +count = opc_drc.count() +logger.info("opc_drc has %d polygons" % [count]) +polygons_count += count + +ndmy = get_polygons(111, 5 ) +count = ndmy.count() +logger.info("ndmy has %d polygons" % [count]) +polygons_count += count + +pmndmy = get_polygons(152, 5 ) +count = pmndmy.count() +logger.info("pmndmy has %d polygons" % [count]) +polygons_count += count + +v5_xtor = get_polygons(112, 1 ) +count = v5_xtor.count() +logger.info("v5_xtor has %d polygons" % [count]) +polygons_count += count + +cap_mk = get_polygons(117, 5 ) +count = cap_mk.count() +logger.info("cap_mk has %d polygons" % [count]) +polygons_count += count + +mos_cap_mk = get_polygons(166, 5 ) +count = mos_cap_mk.count() +logger.info("mos_cap_mk has %d polygons" % [count]) +polygons_count += count + +ind_mk = get_polygons(151, 5 ) +count = ind_mk.count() +logger.info("ind_mk has %d polygons" % [count]) +polygons_count += count + +diode_mk = get_polygons(115, 5 ) +count = diode_mk.count() +logger.info("diode_mk has %d polygons" % [count]) +polygons_count += count + +drc_bjt = get_polygons(127, 5 ) +count = drc_bjt.count() +logger.info("drc_bjt has %d polygons" % [count]) +polygons_count += count + +lvs_bjt = get_polygons(118, 5 ) +count = lvs_bjt.count() +logger.info("lvs_bjt has %d polygons" % [count]) +polygons_count += count + +mim_l_mk = get_polygons(117, 10) +count = mim_l_mk.count() +logger.info("mim_l_mk has %d polygons" % [count]) +polygons_count += count + +latchup_mk = get_polygons(137, 5 ) +count = latchup_mk.count() +logger.info("latchup_mk has %d polygons" % [count]) +polygons_count += count + +guard_ring_mk = get_polygons(167, 5 ) +count = guard_ring_mk.count() +logger.info("guard_ring_mk has %d polygons" % [count]) +polygons_count += count + +otp_mk = get_polygons(173, 5 ) +count = otp_mk.count() +logger.info("otp_mk has %d polygons" % [count]) +polygons_count += count + +mtpmark = get_polygons(122, 5 ) +count = mtpmark.count() +logger.info("mtpmark has %d polygons" % [count]) +polygons_count += count + +neo_ee_mk = get_polygons(88 , 17) +count = neo_ee_mk.count() +logger.info("neo_ee_mk has %d polygons" % [count]) +polygons_count += count + +sramcore = get_polygons(108, 5 ) +count = sramcore.count() +logger.info("sramcore has %d polygons" % [count]) +polygons_count += count + +lvs_rf = get_polygons(100, 5 ) +count = lvs_rf.count() +logger.info("lvs_rf has %d polygons" % [count]) +polygons_count += count + +lvs_drain = get_polygons(100, 7 ) +count = lvs_drain.count() +logger.info("lvs_drain has %d polygons" % [count]) +polygons_count += count + +ind_mk = get_polygons(151, 5 ) +count = ind_mk.count() +logger.info("ind_mk has %d polygons" % [count]) +polygons_count += count + +hvpolyrs = get_polygons(123, 5 ) +count = hvpolyrs.count() +logger.info("hvpolyrs has %d polygons" % [count]) +polygons_count += count + +lvs_io = get_polygons(119, 5 ) +count = lvs_io.count() +logger.info("lvs_io has %d polygons" % [count]) +polygons_count += count + +probe_mk = get_polygons(13 , 17) +count = probe_mk.count() +logger.info("probe_mk has %d polygons" % [count]) +polygons_count += count + +esd_mk = get_polygons(24 , 5 ) +count = esd_mk.count() +logger.info("esd_mk has %d polygons" % [count]) +polygons_count += count + +lvs_source = get_polygons(100, 8 ) +count = lvs_source.count() +logger.info("lvs_source has %d polygons" % [count]) +polygons_count += count + +well_diode_mk = get_polygons(153, 51) +count = well_diode_mk.count() +logger.info("well_diode_mk has %d polygons" % [count]) +polygons_count += count + +ldmos_xtor = get_polygons(226, 0 ) +count = ldmos_xtor.count() +logger.info("ldmos_xtor has %d polygons" % [count]) +polygons_count += count + +plfuse = get_polygons(125, 5 ) +count = plfuse.count() +logger.info("plfuse has %d polygons" % [count]) +polygons_count += count + +efuse_mk = get_polygons(80 , 5 ) +count = efuse_mk.count() +logger.info("efuse_mk has %d polygons" % [count]) +polygons_count += count + +mcell_feol_mk = get_polygons(11 , 17) +count = mcell_feol_mk.count() +logger.info("mcell_feol_mk has %d polygons" % [count]) +polygons_count += count + +ymtp_mk = get_polygons(86 , 17) +count = ymtp_mk.count() +logger.info("ymtp_mk has %d polygons" % [count]) +polygons_count += count + +dev_wf_mk = get_polygons(128, 17) +count = dev_wf_mk.count() +logger.info("dev_wf_mk has %d polygons" % [count]) +polygons_count += count + +comp_label = get_polygons(22 , 10) +count = comp_label.count() +logger.info("comp_label has %d polygons" % [count]) +polygons_count += count + +poly2_label = get_polygons(30 , 10) +count = poly2_label.count() +logger.info("poly2_label has %d polygons" % [count]) +polygons_count += count + +mdiode = get_polygons(116, 5 ) +count = mdiode.count() +logger.info("mdiode has %d polygons" % [count]) +polygons_count += count + +contact = get_polygons(33 , 0 ) +count = contact.count() +logger.info("contact has %d polygons" % [count]) +polygons_count += count + +metal1_drawn = get_polygons(34 , 0 ) +count = metal1_drawn.count() +logger.info("metal1_drawn has %d polygons" % [count]) +polygons_count += count + +metal1_dummy = get_polygons(34 , 4 ) +count = metal1_dummy.count() +logger.info("metal1_dummy has %d polygons" % [count]) +polygons_count += count + +metal1 = metal1_drawn + metal1_dummy + +metal1_label = get_polygons(34 , 10) +count = metal1_label.count() +logger.info("metal1_label has %d polygons" % [count]) +polygons_count += count + +metal1_slot = get_polygons(34 , 3 ) +count = metal1_slot.count() +logger.info("metal1_slot has %d polygons" % [count]) +polygons_count += count + +metal1_blk = get_polygons(34 , 5 ) +count = metal1_blk.count() +logger.info("metal1_blk has %d polygons" % [count]) +polygons_count += count + +via1 = get_polygons(35 , 0 ) +count = via1.count() +logger.info("via1 has %d polygons" % [count]) +polygons_count += count + + +if METAL_LEVEL == "2LM" + metal2_drawn = get_polygons(36 , 0 ) + count = metal2_drawn.count() + logger.info("metal2_drawn has %d polygons" % [count]) + polygons_count += count + + metal2_dummy = get_polygons(36 , 4 ) + count = metal2_dummy.count() + logger.info("metal2_dummy has %d polygons" % [count]) + polygons_count += count + + metal2 = metal2_drawn + metal2_drawn + + metal2_label = get_polygons(36 , 10) + count = metal2_label.count() + logger.info("metal2_label has %d polygons" % [count]) + polygons_count += count + + metal2_slot = get_polygons(36 , 3 ) + count = metal2_slot.count() + logger.info("metal2_slot has %d polygons" % [count]) + polygons_count += count + + metal2_blk = get_polygons(36 , 5 ) + count = metal2_blk.count() + logger.info("metal2_blk has %d polygons" % [count]) + polygons_count += count + + top_via = via1 + topmin1_via = contact + top_metal = metal2 + topmin1_metal = metal1 + +else + metal2_drawn = get_polygons(36 , 0 ) + count = metal2_drawn.count() + logger.info("metal2_drawn has %d polygons" % [count]) + polygons_count += count + + metal2_dummy = get_polygons(36 , 4 ) + count = metal2_dummy.count() + logger.info("metal2_dummy has %d polygons" % [count]) + polygons_count += count + + metal2 = metal2_drawn + metal2_dummy + + metal2_label = get_polygons(36 , 10) + count = metal2_label.count() + logger.info("metal2_label has %d polygons" % [count]) + polygons_count += count + + metal2_slot = get_polygons(36 , 3 ) + count = metal2_slot.count() + logger.info("metal2_slot has %d polygons" % [count]) + polygons_count += count + + metal2_blk = get_polygons(36 , 5 ) + count = metal2_blk.count() + logger.info("metal2_blk has %d polygons" % [count]) + polygons_count += count + + via2 = get_polygons(38 , 0 ) + count = via2.count() + logger.info("via2 has %d polygons" % [count]) + polygons_count += count + + if METAL_LEVEL == "3LM" + metal3_drawn = get_polygons(42 , 0 ) + count = metal3_drawn.count() + logger.info("metal3_drawn has %d polygons" % [count]) + polygons_count += count + + metal3_dummy = get_polygons(42 , 4 ) + count = metal3_dummy.count() + logger.info("metal3_dummy has %d polygons" % [count]) + polygons_count += count + + metal3 = metal3_drawn + metal3_dummy + + metal3_label = get_polygons(42 , 10) + count = metal3_label.count() + logger.info("metal3_label has %d polygons" % [count]) + polygons_count += count + + metal3_slot = get_polygons(42 , 3 ) + count = metal3_slot.count() + logger.info("metal3_slot has %d polygons" % [count]) + polygons_count += count + + metal3_blk = get_polygons(42 , 5 ) + count = metal3_blk.count() + logger.info("metal3_blk has %d polygons" % [count]) + polygons_count += count + + top_via = via2 + topmin1_via = via1 + top_metal = metal3 + topmin1_metal = metal2 + else + metal3_drawn = get_polygons(42 , 0 ) + count = metal3_drawn.count() + logger.info("metal3_drawn has %d polygons" % [count]) + polygons_count += count + + metal3_dummy = get_polygons(42 , 4 ) + count = metal3_dummy.count() + logger.info("metal3_dummy has %d polygons" % [count]) + polygons_count += count + + metal3 = metal3_drawn + metal3_dummy + + metal3_label = get_polygons(42 , 10) + count = metal3_label.count() + logger.info("metal3_label has %d polygons" % [count]) + polygons_count += count + + metal3_slot = get_polygons(42 , 3 ) + count = metal3_slot.count() + logger.info("metal3_slot has %d polygons" % [count]) + polygons_count += count + + metal3_blk = get_polygons(42 , 5 ) + count = metal3_blk.count() + logger.info("metal3_blk has %d polygons" % [count]) + polygons_count += count + + via3 = get_polygons(40 , 0 ) + + if METAL_LEVEL == "4LM" + metal4_drawn = get_polygons(46 , 0 ) + count = metal4_drawn.count() + logger.info("metal4_drawn has %d polygons" % [count]) + polygons_count += count + + metal4_dummy = get_polygons(46 , 4 ) + count = metal4_dummy.count() + logger.info("metal4_dummy has %d polygons" % [count]) + polygons_count += count + + metal4 = metal4_drawn + metal4_dummy + + metal4_label = get_polygons(46 , 10) + count = metal4_label.count() + logger.info("metal4_label has %d polygons" % [count]) + polygons_count += count + + metal4_slot = get_polygons(46 , 3 ) + count = metal4_slot.count() + logger.info("metal4_slot has %d polygons" % [count]) + polygons_count += count + + metal4_blk = get_polygons(46 , 5 ) + count = metal4_blk.count() + logger.info("metal4_blk has %d polygons" % [count]) + polygons_count += count + + top_via = via3 + topmin1_via = via2 + top_metal = metal4 + topmin1_metal = metal3 + else + metal4_drawn = get_polygons(46 , 0 ) + count = metal4_drawn.count() + logger.info("metal4_drawn has %d polygons" % [count]) + polygons_count += count + + metal4_dummy = get_polygons(46 , 4 ) + count = metal4_dummy.count() + logger.info("metal4_dummy has %d polygons" % [count]) + polygons_count += count + + metal4 = metal4_drawn + metal4_dummy + + metal4_label = get_polygons(46 , 10) + count = metal4_label.count() + logger.info("metal4_label has %d polygons" % [count]) + polygons_count += count + + metal4_slot = get_polygons(46 , 3 ) + count = metal4_slot.count() + logger.info("metal4_slot has %d polygons" % [count]) + polygons_count += count + + metal4_blk = get_polygons(46 , 5 ) + count = metal4_blk.count() + logger.info("metal4_blk has %d polygons" % [count]) + polygons_count += count + + via4 = get_polygons(41 , 0 ) + count = via4.count() + logger.info("via4 has %d polygons" % [count]) + polygons_count += count + + if METAL_LEVEL == "5LM" + metal5_drawn = get_polygons(81 , 0 ) + count = metal5_drawn.count() + logger.info("metal5_drawn has %d polygons" % [count]) + polygons_count += count + + metal5_dummy = get_polygons(81 , 4 ) + count = metal5_dummy.count() + logger.info("metal5_dummy has %d polygons" % [count]) + polygons_count += count + + metal5 = metal5_drawn + metal5_dummy + + metal5_label = get_polygons(81 , 10) + count = metal5_label.count() + logger.info("metal5_label has %d polygons" % [count]) + polygons_count += count + + metal5_slot = get_polygons(81 , 3 ) + count = metal5_slot.count() + logger.info("metal5_slot has %d polygons" % [count]) + polygons_count += count + + metal5_blk = get_polygons(81 , 5 ) + count = metal5_blk.count() + logger.info("metal5_blk has %d polygons" % [count]) + polygons_count += count + + top_via = via4 + topmin1_via = via3 + top_metal = metal5 + topmin1_metal = metal4 + else + ## 6LM + metal5_drawn = get_polygons(81 , 0 ) + count = metal5_drawn.count() + logger.info("metal5_drawn has %d polygons" % [count]) + polygons_count += count + + metal5_dummy = get_polygons(81 , 4 ) + count = metal5_dummy.count() + logger.info("metal5_dummy has %d polygons" % [count]) + polygons_count += count + + metal5 = metal5_drawn + metal5_dummy + + metal5_label = get_polygons(81 , 10) + count = metal5_label.count() + logger.info("metal5_label has %d polygons" % [count]) + polygons_count += count + + metal5_slot = get_polygons(81 , 3 ) + count = metal5_slot.count() + logger.info("metal5_slot has %d polygons" % [count]) + polygons_count += count + + metal5_blk = get_polygons(81 , 5 ) + count = metal5_blk.count() + logger.info("metal5_blk has %d polygons" % [count]) + polygons_count += count + + via5 = get_polygons(82 , 0 ) + count = via5.count() + logger.info("via5 has %d polygons" % [count]) + polygons_count += count + + + metaltop_drawn = get_polygons(53 , 0 ) + count = metaltop_drawn.count() + logger.info("metaltop_drawn has %d polygons" % [count]) + polygons_count += count + + metaltop_dummy = get_polygons(53 , 4 ) + count = metaltop_dummy.count() + logger.info("metaltop_dummy has %d polygons" % [count]) + polygons_count += count + + metaltop = metaltop_drawn + metaltop_dummy + + metaltop_label = get_polygons(53 , 10) + count = metaltop_label.count() + logger.info("metaltop_label has %d polygons" % [count]) + polygons_count += count + + metaltop_slot = get_polygons(53 , 3 ) + count = metaltop_slot.count() + logger.info("metaltop_slot has %d polygons" % [count]) + polygons_count += count + + metalt_blk = get_polygons(53 , 5 ) + count = metalt_blk.count() + logger.info("metalt_blk has %d polygons" % [count]) + polygons_count += count + + top_via = via5 + topmin1_via = via4 + top_metal = metaltop + topmin1_metal = metal5 + end + end + end +end + +pad = get_polygons(37 , 0 ) +count = pad.count() +logger.info("pad has %d polygons" % [count]) +polygons_count += count + +ubmpperi = get_polygons(183, 0 ) +count = ubmpperi.count() +logger.info("ubmpperi has %d polygons" % [count]) +polygons_count += count + +ubmparray = get_polygons(184, 0 ) +count = ubmparray.count() +logger.info("ubmparray has %d polygons" % [count]) +polygons_count += count + +ubmeplate = get_polygons(185, 0 ) +count = ubmeplate.count() +logger.info("ubmeplate has %d polygons" % [count]) +polygons_count += count + +metal1_res = get_polygons(110, 11) +count = metal1_res.count() +logger.info("metal1_res has %d polygons" % [count]) +polygons_count += count + +metal2_res = get_polygons(110, 12) +count = metal2_res.count() +logger.info("metal2_res has %d polygons" % [count]) +polygons_count += count + +metal3_res = get_polygons(110, 13) +count = metal3_res.count() +logger.info("metal3_res has %d polygons" % [count]) +polygons_count += count + +metal4_res = get_polygons(110, 14) +count = metal4_res.count() +logger.info("metal4_res has %d polygons" % [count]) +polygons_count += count + +metal5_res = get_polygons(110, 15) +count = metal5_res.count() +logger.info("metal5_res has %d polygons" % [count]) +polygons_count += count + +metal6_res = get_polygons(110, 16) +count = metal6_res.count() +logger.info("metal6_res has %d polygons" % [count]) +polygons_count += count + +pr_bndry = get_polygons(0 , 0 ) +count = pr_bndry.count() +logger.info("pr_bndry has %d polygons" % [count]) +polygons_count += count + +border = get_polygons(63 , 0 ) +count = border.count() +logger.info("border has %d polygons" % [count]) +polygons_count += count +logger.info("Total no. of polygons in the design is #{polygons_count}") + +logger.info("Starting deriving base layers.") +#===================================================== +#------------- BASE LAYERS DERIVATIONS --------------- +#===================================================== + +ncomp = comp.and(nplus) +pcomp = comp.and(pplus) +tgate = poly2.and(comp).not(res_mk) + +ngate = nplus.and(tgate) +nactive = ncomp.not(nwell) +nsd = nactive.interacting(ngate).not(ngate).not(res_mk) +ptap = pcomp.not(nwell).join(pcomp.and(lvpwell)).not(res_mk) + +pgate = pplus.and(tgate) +pactive = pcomp.and(nwell) +psd = pactive.interacting(pgate).not(pgate).not(res_mk) +ntap = ncomp.and(nwell).join(ncomp.and(dnwell).not(lvpwell)).not(res_mk) + +ngate_dn = ngate.and(lvpwell).and(dnwell) +ptap_dn = ptap.and(dnwell).outside(well_diode_mk) + +pgate_dn = pgate.and(dnwell).not(lvpwell) +ntap_dn = ntap.and(dnwell) + +psd_dn = pcomp.not(lvpwell).and(dnwell).interacting(pgate_dn).not(pgate_dn).not(res_mk) +nsd_dn = ncomp.and(dnwell).not(lvpwell).interacting(ngate_dn).not(ngate_dn).not(res_mk) + +natcompsd = (nat & comp.interacting(poly2)) - tgate + +# Gate +nom_gate = tgate.not(dualgate) +thick_gate = tgate.and(dualgate) + +ngate_56V = ngate.and(dualgate) +pgate_56V = pgate.and(dualgate) + +ngate_5V = ngate_56V.and(v5_xtor) +pgate_5V = pgate_56V.and(v5_xtor) + +ngate_6V = ngate_56V.not(v5_xtor) +pgate_6V = pgate_56V.not(v5_xtor) + +# DNWELL +dnwell_3p3v = dnwell.not_interacting(v5_xtor).not_interacting(dualgate) +dnwell_56v = dnwell.overlapping(dualgate) + +# LVPWELL +lvpwell_dn = lvpwell.interacting(dnwell) +lvpwell_out = lvpwell.not_interacting(dnwell) + +lvpwell_dn3p3v = lvpwell.and(dnwell_3p3v) +lvpwell_dn56v = lvpwell.and(dnwell_56v) + +# NWELL +nwell_dn = nwell.interacting(dnwell) +nwell_n_dn = nwell.not_interacting(dnwell) + +#================================================ +#------------- LAYERS CONNECTIONS --------------- +#================================================ + +if CONNECTIVITY_RULES + + logger.info("Construct connectivity for the design.") + + connect(dnwell, ncomp) + connect(ncomp, contact) + connect(pcomp, contact) + + connect(lvpwell_out, pcomp) + connect(lvpwell_dn, pcomp) + + connect(nwell, ncomp) + connect(natcompsd, contact) + connect(mvsd, ncomp) + connect(mvpsd, pcomp) + connect(contact, metal1) + connect(metal1, via1) + connect(via1, metal2) + if METAL_LEVEL != "2LM" + connect(metal2, via2) + connect(via2, metal3) + + if METAL_LEVEL != "3LM" + connect(metal3, via3) + connect(via3, metal4) + + if METAL_LEVEL != "4LM" + connect(metal4, via4) + connect(via4, metal5) + + if METAL_LEVEL != "5LM" + connect(metal5, via5) + connect(via5, metaltop) + end + end + end + end + +end #CONNECTIVITY_RULES + +#================================================ +#------------ PRE-DEFINED FUNCTIONS ------------- +#================================================ + +def conn_space(layer,conn_val,not_conn_val, mode) + if conn_val > not_conn_val + raise "ERROR : Wrong connectivity implementation" + end + connected_output = layer.space(conn_val.um, mode).polygons(0.001) + unconnected_errors_unfiltered = layer.space(not_conn_val.um, mode) + singularity_errors = layer.space(0.001.um) + # Filter out the errors arising from the same net + unconnected_errors = DRC::DRCLayer::new(self, RBA::EdgePairs::new) + unconnected_errors_unfiltered.data.each do |ep| + net1 = l2n_data.probe_net(layer.data, ep.first.p1) + net2 = l2n_data.probe_net(layer.data, ep.second.p1) + if !net1 || !net2 + puts "Should not happen ..." + elsif net1.circuit != net2.circuit || net1.cluster_id != net2.cluster_id + # unconnected + unconnected_errors.data.insert(ep) + end + end + unconnected_output = unconnected_errors.polygons.or(singularity_errors.polygons(0.001)) + return connected_output, unconnected_output +end + +def conn_separation(layer1, layer2, conn_val,not_conn_val, mode) + if conn_val > not_conn_val + raise "ERROR : Wrong connectivity implementation" + end + connected_output = layer1.separation(layer2, conn_val.um, mode).polygons(0.001) + unconnected_errors_unfiltered = layer1.separation(layer2, not_conn_val.um, mode) + # Filter out the errors arising from the same net + unconnected_errors = DRC::DRCLayer::new(self, RBA::EdgePairs::new) + unconnected_errors_unfiltered.data.each do |ep| + net1 = l2n_data.probe_net(layer1.data, ep.first.p1) + net2 = l2n_data.probe_net(layer2.data, ep.second.p1) + if !net1 || !net2 + puts "Should not happen ..." + elsif net1.circuit != net2.circuit || net1.cluster_id != net2.cluster_id + # unconnected + unconnected_errors.data.insert(ep) + end + end + unconnected_output = unconnected_errors.polygons(0.001) + return connected_output, unconnected_output +end + +# === IMPLICIT EXTRACTION === +if CONNECTIVITY_RULES + logger.info("Connectivity rules enabled, Netlist object will be generated.") + netlist +end #CONNECTIVITY_RULES + +# === LAYOUT EXTENT === +CHIP = extent.sized(0.0) +logger.info("Total area of the design is #{CHIP.area()} um^2.") + +#================================================ +#----------------- MAIN RUNSET ------------------ +#================================================ + +logger.info("Starting GF180MCU DRC rules.") +if FEOL + logger.info("Running all FEOL rules") +end #FEOL + +if BEOL + logger.info("Running all BEOL rules") +end #BEOL + + diff --git a/klayout/drc/run_drc.py b/klayout/drc/run_drc.py new file mode 100644 index 00000000..189d628c --- /dev/null +++ b/klayout/drc/run_drc.py @@ -0,0 +1,709 @@ +# Copyright 2022 GlobalFoundries PDK Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Run GlobalFoundries 180nm MCU DRC. + +Usage: + run_drc.py (--help| -h) + run_drc.py (--path=) (--variant=) [--verbose] [--table=]... [--mp=] [--run_dir=] [--topcell=] [--thr=] [--run_mode=] [--no_feol] [--no_beol] [--connectivity] [--density] [--density_only] [--antenna] [--antenna_only] [--no_offgrid] + +Options: + --help -h Print this help message. + --path= The input GDS file path. + --variant= Select combined options of metal_top, mim_option, and metal_level. Allowed values (A, B, C). + variant=A: Select metal_top=30K mim_option=A metal_level=3LM + variant=B: Select metal_top=11K mim_option=B metal_level=4LM + variant=C: Select metal_top=9K mim_option=B metal_level=5LM + --topcell= Topcell name to use. + --table= Table name to use to run the rule deck. + --mp= Run the rule deck in parts in parallel to speed up the run. [default: 1] + --run_dir= Run directory to save all the results [default: pwd] + --thr= The number of threads used in run. + --run_mode= Select klayout mode Allowed modes (flat , deep, tiling). [default: flat] + --no_feol Turn off FEOL rules from running. + --no_beol Turn off BEOL rules from running. + --connectivity Turn on connectivity rules. + --density Turn on Density rules. + --density_only Turn on Density rules only. + --antenna Turn on Antenna checks. + --antenna_only Turn on Antenna checks only. + --no_offgrid Turn off OFFGRID checking rules. + --verbose Detailed rule execution log for debugging. +""" + +from docopt import docopt +import os +import xml.etree.ElementTree as ET +import logging +import klayout.db +import glob +from datetime import datetime +from subprocess import check_call +import shutil +import concurrent.futures +import traceback + + +def get_rules_with_violations(results_database): + """ + This function will find all the rules that has violated in a database. + + Parameters + ---------- + results_database : string or Path object + Path string to the results file + + Returns + ------- + set + A set that contains all rules in the database with violations + """ + + mytree = ET.parse(results_database) + myroot = mytree.getroot() + + all_violating_rules = set() + + for z in myroot[7]: # myroot[7] : List rules with viloations + all_violating_rules.add(f"{z[1].text}".replace("'", "")) + + return all_violating_rules + + +def check_drc_results(results_db_files: list): + """ + check_drc_results Checks the results db generated from run and report at the end if the DRC run failed or passed. + This function will exit with 1 if there are violations. + + Parameters + ---------- + results_db_files : list + A list of strings that represent paths to results databases of all the DRC runs. + """ + + if len(results_db_files) < 1: + logging.error("Klayout did not generate any rdb results. Please check run logs") + exit(1) + + full_violating_rules = set() + + for f in results_db_files: + violating_rules = get_rules_with_violations(f) + full_violating_rules.update(violating_rules) + + if len(full_violating_rules) > 0: + logging.error("Klayout DRC run is not clean.") + logging.error("Violated rules are : {}\n".format(str(full_violating_rules))) + exit(1) + else: + logging.info("Klayout DRC run is clean. GDS has no DRC violations.") + + +def get_results(rule_deck, rules, lyrdb, type): + + mytree = ET.parse(f"{lyrdb}_{type}_gf{arguments['--gf180mcu']}.lyrdb") + myroot = mytree.getroot() + + violated = [] + + for lrule in rules: + # Loop on database to get the violations of required rule + for z in myroot[7]: + if f"'{lrule}'" == f"{z[1].text}": + violated.append(lrule) + break + + lyrdb_clean = lyrdb.split("/")[-1] + + if len(violated) > 0: + logging.error( + f"\nTotal # of DRC violations in {rule_deck}.drc is {len(violated)}. Please check {lyrdb_clean}_{type}_gf{arguments['--gf180mcu']}.lyrdb file For more details" + ) + logging.info("Klayout GDS DRC Not Clean") + logging.info(f"Violated rules are : {violated}\n") + else: + logging.info( + f"\nCongratulations !!. No DRC Violations found in {lyrdb_clean} for {rule_deck}.drc rule deck with switch gf{arguments['--gf180mcu']}" + ) + logging.info("Klayout GDS DRC Clean\n") + + +def generate_drc_run_template(drc_dir: str, run_dir: str, run_tables_list: list = []): + """ + generate_drc_run_template will generate the template file to run drc in the run_dir path. + + Parameters + ---------- + drc_dir : str + Path string to the location where the DRC files would be found to get the list of the rule tables. + run_dir : str + Absolute path string to the run location where all the run output will be generated. + deck_name : str, optional + Name of the rule deck to use for generating the template, by default "" + run_tables_list : list, optional + list of target parts of the rule deck, if empty assume all of the rule tables found, by default [] + + Returns + ------- + str + Absolute path to the generated DRC file. + """ + if len(run_tables_list) < 1: + all_tables = [ + os.path.basename(f) + for f in glob.glob(os.path.join(drc_dir, "rule_decks", "*.drc")) + if "antenna" not in f + and "density" not in f + and "main" not in f + and "tail" not in f + ] + deck_name = "main" + elif len(run_tables_list) == 1: + deck_name = run_tables_list[0] + all_tables = ["{}.drc".format(run_tables_list[0])] + else: + all_tables = ["{}.drc".format(t) for t in run_tables_list] + deck_name = "main" + + logging.info( + "## Generating template with for the following rule tables: {}".format( + str(all_tables) + ) + ) + print(run_dir) + + all_tables.insert(0, "main.drc") + all_tables.append("tail.drc") + + gen_rule_deck_path = os.path.join(run_dir, "{}.drc".format(deck_name)) + with open(gen_rule_deck_path, "wb") as wfd: + for f in all_tables: + with open(os.path.join(drc_dir, "rule_decks", f), "rb") as fd: + shutil.copyfileobj(fd, wfd) + + return gen_rule_deck_path + + +def get_top_cell_names(gds_path): + """ + get_top_cell_names get the top cell names from the GDS file. + + Parameters + ---------- + gds_path : string + Path to the target GDS file. + + Returns + ------- + List of string + Names of the top cell in the layout. + """ + layout = klayout.db.Layout() + layout.read(gds_path) + top_cells = [t.name for t in layout.top_cells()] + + return top_cells + + +def get_list_of_tables(drc_dir: str): + """ + get_list_of_tables get the list of available tables in the drc + + Parameters + ---------- + drc_dir : str + Path to the DRC folder to get the list of tables from. + """ + return [ + os.path.basename(f).replace(".drc", "") + for f in glob.glob(os.path.join(drc_dir, "rule_decks", "*.drc")) + if "antenna" not in f + and "density" not in f + and "main" not in f + and "tail" not in f + ] + + +def get_run_top_cell_name(arguments, layout_path): + """ + get_run_top_cell_name Get the top cell name to use for running. If it's provided by the user, we use the user input. + If not, we get it from the GDS file. + + Parameters + ---------- + arguments : dict + Dictionary that holds the user inputs for the script generated by docopt. + layout_path : string + Path to the target layout. + + Returns + ------- + string + Name of the topcell to use in run. + + """ + + if arguments["--topcell"]: + topcell = arguments["--topcell"] + else: + layout_topcells = get_top_cell_names(layout_path) + if len(layout_topcells) > 1: + logging.error( + "## Layout has mutliple topcells. Please determine which topcell you want to run on." + ) + exit(1) + else: + topcell = layout_topcells[0] + + return topcell + + +def generate_klayout_switches(arguments, layout_path): + """ + parse_switches Function that parse all the args from input to prepare switches for DRC run. + + Parameters + ---------- + arguments : dict + Dictionary that holds the arguments used by user in the run command. This is generated by docopt library. + layout_path : string + Path to the layout file that we will run DRC on. + + Returns + ------- + dict + Dictionary that represent all run switches passed to klayout. + """ + switches = dict() + + # No. of threads + thrCount = 2 if arguments["--thr"] == None else int(arguments["--thr"]) + switches["thr"] = str(int(thrCount)) + + if arguments["--run_mode"] in ["flat", "deep", "tiling"]: + switches["run_mode"] = arguments["--run_mode"] + else: + logging.error("Allowed klayout modes are (flat , deep , tiling) only") + exit() + + if arguments["--variant"] == "A": + switches["metal_top"] = "30K" + switches["mim_option"] = "A" + switches["metal_level"] = "3LM" + # switches = switches + f"-rd metal_top=30K -rd mim_option=A -rd metal_level=3LM " + elif arguments["--variant"] == "B": + switches["metal_top"] = "11K" + switches["mim_option"] = "B" + switches["metal_level"] = "4LM" + # switches = switches + f"-rd metal_top=11K -rd mim_option=B -rd metal_level=4LM " + elif arguments["--variant"] == "C": + switches["metal_top"] = "9K" + switches["mim_option"] = "B" + switches["metal_level"] = "5LM" + # switches = switches + f"-rd metal_top=9K -rd mim_option=B -rd metal_level=5LM " + else: + logging.error("variant switch allowed values are (A , B, C) only") + exit() + + if arguments["--verbose"]: + switches["verbose"] = "true" + else: + switches["verbose"] = "false" + + if arguments["--no_feol"]: + switches["feol"] = "false" + else: + switches["feol"] = "true" + + if arguments["--no_beol"]: + switches["beol"] = "false" + else: + switches["beol"] = "true" + + if arguments["--no_offgrid"]: + switches["offgrid"] = "false" + else: + switches["offgrid"] = "true" + + if arguments["--connectivity"]: + switches["conn_drc"] = "true" + else: + switches["conn_drc"] = "false" + + if arguments["--density"]: + switches["density"] = "true" + else: + switches["density"] = "false" + + switches["topcell"] = get_run_top_cell_name(arguments, layout_path) + switches["input"] = layout_path + + return switches + + +def check_klayout_version(): + """ + check_klayout_version checks klayout version and makes sure it would work with the DRC. + """ + # ======= Checking Klayout version ======= + klayout_v_ = os.popen("klayout -b -v").read() + klayout_v_ = klayout_v_.split("\n")[0] + klayout_v_list = [] + + if klayout_v_ == "": + logging.error("Klayout is not found. Please make sure klayout is installed.") + exit(1) + else: + klayout_v_list = [int(v) for v in klayout_v_.split(" ")[-1].split(".")] + + logging.info(f"Your Klayout version is: {klayout_v_}") + + if len(klayout_v_list) < 1 or len(klayout_v_list) > 3: + logging.error("Was not able to get klayout version properly.") + exit(1) + elif len(klayout_v_list) == 2: + if klayout_v_list[1] < 28: + logging.warning(f"Prerequisites at a minimum: KLayout 0.28.0") + logging.error( + "Using this klayout version has not been assesed in this development. Limits are unknown" + ) + exit(1) + elif len(klayout_v_list) == 3: + if klayout_v_list[1] < 28 : + logging.warning(f"Prerequisites at a minimum: KLayout 0.28.0") + logging.error( + "Using this klayout version has not been assesed in this development. Limits are unknown" + ) + exit(1) + + +def check_layout_path(layout_path): + """ + check_layout_type checks if the layout provided is GDS or OAS. Otherwise, kill the process. We only support GDS or OAS now. + + Parameters + ---------- + layout_path : string + string that represent the path of the layout. + + Returns + ------- + string + string that represent full absolute layout path. + """ + + if not os.path.isfile(layout_path): + logging.error("## GDS file path provided doesn't exist or not a file.") + exit(1) + + if not ".gds" in layout_path and not ".oas" in layout_path: + logging.error("## Layout is not in GDSII or OASIS format. Please use gds format.") + exit(1) + + return os.path.abspath(layout_path) + + +def build_switches_string(sws: dict): + """ + build_switches_string Build swtiches string from dictionary. + + Parameters + ---------- + sws : dict + Dictionary that holds the Antenna swithces. + """ + switches_str = "" + for k in sws: + switches_str += "-rd {}={} ".format(k, sws[k]) + + return switches_str + + +def run_check(drc_file: str, drc_name: str, path: str, run_dir: str, sws: dict): + """ + run_antenna_check run DRC check based on DRC file provided. + + Parameters + ---------- + drc_file : str + String that has the file full path to run. + path : str + String that holds the full path of the layout. + run_dir : str + String that holds the full path of the run location. + sws : dict + Dictionary that holds all switches that needs to be passed to the antenna checks. + + Returns + ------- + string + string that represent the path to the results output database for this run. + + """ + + ## Using print because of the multiprocessing + logging.info( + "Running Global Foundries 180nm MCU {} checks on design {} on cell {}:".format( + path, drc_name, sws["topcell"] + ) + ) + + layout_base_name = os.path.basename(path).split(".")[0] + new_sws = sws.copy() + report_path = os.path.join( + run_dir, "{}_{}.lyrdb".format(layout_base_name, drc_name) + ) + + new_sws["report"] = report_path + sws_str = build_switches_string(new_sws) + sws_str += f" -rd table_name={drc_name}" + log_file = os.path.join( + run_dir, "{}_{}.log".format(layout_base_name, drc_name) + ) + + run_str = f"klayout -b -r {drc_file} {sws_str}" + + check_call(run_str, shell=True) + + return report_path + + +def run_parallel_run( + arguments: dict, + rule_deck_full_path: str, + layout_path: str, + switches: dict, + drc_run_dir: str, +): + """ + run_single_processor run the drc checks as in a multi-processing. + + Parameters + ---------- + arguments : dict + Dictionary that holds the arguments passed to the run_drc script. + rule_deck_full_path : str + String that holds the path of the rule deck files. + layout_path : str + Path to the target layout. + switches : dict + Dictionary that holds all the switches that will be passed to klayout run. + drc_run_dir : str + Path to the run location. + """ + + list_rule_deck_files = dict() + + ## Run Antenna if required. + if arguments["--antenna"]: + drc_path = os.path.join(rule_deck_full_path, "rule_decks", "antenna.drc") + list_rule_deck_files["antenna"] = drc_path + + ## Run Density if required. + if arguments["--density"]: + drc_path = os.path.join(rule_deck_full_path, "rule_decks", "density.drc") + list_rule_deck_files["density"] = drc_path + + ## list_res_db_files.append(run_check(drc_path, "antenna", layout_path, drc_run_dir, switches)) + if not arguments["--table"]: + list_of_tables = get_list_of_tables(rule_deck_full_path) + else: + list_of_tables = arguments["--table"] + + ## Generate run rule deck from template. + for t in list_of_tables: + drc_file = generate_drc_run_template(rule_deck_full_path, drc_run_dir, [t]) + list_rule_deck_files[t] = drc_file + + ## Run All DRC files. + list_res_db_files = [] + with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor: + future_to_run_name = dict() + for n in list_rule_deck_files: + future_to_run_name[ + executor.submit( + run_check, + list_rule_deck_files[n], + n, + layout_path, + drc_run_dir, + switches, + ) + ] = n + + for future in concurrent.futures.as_completed(future_to_run_name): + run_name = future_to_run_name[future] + try: + list_res_db_files.append(future.result()) + except Exception as exc: + logging.error("%s generated an exception: %s" % (run_name, str(exc))) + traceback.print_exc() + + ## Check run + check_drc_results(list_res_db_files) + + +def run_single_processor( + arguments: dict, + rule_deck_full_path: str, + layout_path: str, + switches: dict, + drc_run_dir: str, +): + """ + run_single_processor run the drc checks as single run. + + Parameters + ---------- + arguments : dict + Dictionary that holds the arguments passed to the run_drc script. + rule_deck_full_path : str + String that holds the path of the rule deck files. + layout_path : str + Path to the target layout. + switches : dict + Dictionary that holds all the switches that will be passed to klayout run. + drc_run_dir : str + Path to the run location. + """ + + list_res_db_files = [] + + ## Run Antenna if required. + if arguments["--antenna"] or arguments["--antenna_only"]: + drc_path = os.path.join(rule_deck_full_path, "rule_decks", "antenna.drc") + list_res_db_files.append( + run_check(drc_path, "antenna", layout_path, drc_run_dir, switches) + ) + + if arguments["--antenna_only"]: + logging.info("## Completed running Antenna checks only.") + exit() + + ## Run Density if required. + if arguments["--density"] or arguments["--density_only"]: + drc_path = os.path.join(rule_deck_full_path, "rule_decks", "density.drc") + list_res_db_files.append( + run_check(drc_path, "density", layout_path, drc_run_dir, switches) + ) + + if arguments["--density_only"]: + logging.info("## Completed running density checks only.") + exit() + + ## Generate run rule deck from template. + if not arguments["--table"]: + drc_file = generate_drc_run_template(rule_deck_full_path, drc_run_dir) + else: + drc_file = generate_drc_run_template( + rule_deck_full_path, drc_run_dir, arguments["--table"] + ) + + ## Run Main DRC + list_res_db_files.append( + run_check(drc_file, "main", layout_path, drc_run_dir, switches) + ) + + ## Check run + check_drc_results(list_res_db_files) + + +def main(drc_run_dir: str, now_str: str, arguments: dict): + """ + main function to run the DRC. + + Parameters + ---------- + drc_run_dir : str + String with absolute path of the full run dir. + now_str : str + String with the run name for logs. + arguments : dict + Dictionary that holds the arguments used by user in the run command. This is generated by docopt library. + """ + + # Check gds file existance + if os.path.exists(arguments["--path"]): + pass + else: + logging.error("The input GDS file path doesn't exist, please recheck.") + exit() + + rule_deck_full_path = os.path.dirname(os.path.abspath(__file__)) + + ## Check Klayout version + check_klayout_version() + + ## Check if there was a layout provided. + if not arguments["--path"]: + logging.error("No provided gds file, please add one") + exit(1) + + ## Check layout type + layout_path = arguments["--path"] + layout_path = check_layout_path(layout_path) + + ## Get run switches + switches = generate_klayout_switches(arguments, layout_path) + + if ( + int(arguments["--mp"]) == 1 + or arguments["--antenna_only"] + or arguments["--density_only"] + ): + run_single_processor( + arguments, rule_deck_full_path, layout_path, switches, drc_run_dir + ) + else: + run_parallel_run( + arguments, rule_deck_full_path, layout_path, switches, drc_run_dir + ) + + +# ================================================================ +# -------------------------- MAIN -------------------------------- +# ================================================================ + +if __name__ == "__main__": + + # arguments + arguments = docopt(__doc__, version="RUN DRC: 1.0") + + # logs format + now_str = datetime.utcnow().strftime("drc_run_%Y_%m_%d_%H_%M_%S") + + if ( + arguments["--run_dir"] == "pwd" + or arguments["--run_dir"] == "" + or arguments["--run_dir"] is None + ): + drc_run_dir = os.path.join(os.path.abspath(os.getcwd()), now_str) + else: + drc_run_dir = os.path.abspath(arguments["--run_dir"]) + + os.makedirs(drc_run_dir, exist_ok=True) + + logging.basicConfig( + level=logging.DEBUG, + handlers=[ + logging.FileHandler(os.path.join(drc_run_dir, "{}.log".format(now_str))), + logging.StreamHandler(), + ], + format=f"%(asctime)s | %(levelname)-7s | %(message)s", + datefmt="%d-%b-%Y %H:%M:%S", + ) + + # Calling main function + main(drc_run_dir, now_str, arguments) diff --git a/klayout/drc/testing/README.md b/klayout/drc/testing/README.md new file mode 100644 index 00000000..572e622b --- /dev/null +++ b/klayout/drc/testing/README.md @@ -0,0 +1,22 @@ +# Globalfoundries 180nm MCU DRC Testing + +Explains how to test GF180nm DRC rule deck. + +## Folder Structure + +```text +📦testing + ┣ 📜README.md (This file to document the regression) + ┣ 📜run_regression.py (Main regression script that runs the regression.) + ┣ 📜testcases (All testcases) + ``` + +## Prerequisites +You need the following set of tools installed to be able to run the regression: +- Python 3.6+ +- KLayout 0.28.0+ + +We have tested this using the following setup: +- Python 3.9.12 +- KLayout 0.28.2 + diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py new file mode 100644 index 00000000..3064afc6 --- /dev/null +++ b/klayout/drc/testing/run_regression.py @@ -0,0 +1,876 @@ +# Copyright 2022 GlobalFoundries PDK Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Run GlobalFoundries 180nm MCU DRC Unit Regression. + +Usage: + run_regression.py (--help| -h) + run_regression.py [--mp=] [--run_name=] [--rule_name=] [--table_name=] + +Options: + --help -h Print this help message. + --mp= The number of threads used in run. + --run_name= Select your run name. + --rule_name= Target specific rule. + --table_name= Target specific table. +""" + +from subprocess import check_call +from subprocess import Popen, PIPE +import concurrent.futures +import traceback +import yaml +from docopt import docopt +import os +from datetime import datetime +import xml.etree.ElementTree as ET +import time +import pandas as pd +import logging +import glob +from pathlib import Path +from tqdm import tqdm +import re +import gdstk + + +SUPPORTED_TC_EXT = "gds" +SUPPORTED_SW_EXT = "yaml" + + +def check_klayout_version(): + """ + check_klayout_version checks klayout version and makes sure it would work with the DRC. + """ + # ======= Checking Klayout version ======= + klayout_v_ = os.popen("klayout -b -v").read() + klayout_v_ = klayout_v_.split("\n")[0] + klayout_v_list = [] + + if klayout_v_ == "": + logging.error("Klayout is not found. Please make sure klayout is installed.") + exit(1) + else: + klayout_v_list = [int(v) for v in klayout_v_.split(" ")[-1].split(".")] + + logging.info(f"Your Klayout version is: {klayout_v_}") + + if len(klayout_v_list) < 1 or len(klayout_v_list) > 3: + logging.error("Was not able to get klayout version properly.") + exit(1) + elif len(klayout_v_list) == 2: + if klayout_v_list[1] < 28: + logging.warning("Prerequisites at a minimum: KLayout 0.28.0") + logging.error( + "Using this klayout version has not been assesed in this development. Limits are unknown" + ) + exit(1) + elif len(klayout_v_list) == 3: + if klayout_v_list[1] < 28 : + logging.warning("Prerequisites at a minimum: KLayout 0.28.0") + logging.error( + "Using this klayout version has not been assesed in this development. Limits are unknown" + ) + exit(1) + + +def get_switches(yaml_file, rule_name): + """Parse yaml file and extract switches data + Parameters + ---------- + yaml_file : str + yaml config file path given py the user. + Returns + ------- + yaml_dic : dictionary + dictionary containing switches data. + """ + + # load yaml config data + with open(yaml_file, 'r') as stream: + try: + yaml_dic = yaml.safe_load(stream) + except yaml.YAMLError as exc: + print(exc) + + switches = list() + for param, value in yaml_dic[rule_name].items(): + switch = f"{param}={value}" + switches.append(switch) + + return switches + + +def parse_results_db(test_rule, results_database): + """ + This function will parse Klayout database for analysis. + + Parameters + ---------- + results_database : string or Path object + Path string to the results file + + Returns + ------- + set + A set that contains all rules in the database with violations + """ + + mytree = ET.parse(results_database) + myroot = mytree.getroot() + # Initial values for counter + pass_patterns = 0 + fail_patterns = 0 + falsePos = 0 + falseNeg = 0 + + for z in myroot[7]: + if f"'{test_rule}_pass_patterns'" == f"{z[1].text}": + pass_patterns += 1 + if f"'{test_rule}_fail_patterns'" == f"{z[1].text}": + fail_patterns += 1 + if f"'{test_rule}_false_positive'" == f"{z[1].text}": + falsePos += 1 + if f"'{test_rule}_false_negative'" == f"{z[1].text}": + falseNeg += 1 + + return pass_patterns, fail_patterns, falsePos, falseNeg + + +def run_test_case( + runset_file, + drc_dir, + layout_path, + run_dir, + test_table, + test_rule, + switches="", +): + """ + This function run a single test case using the correct DRC file. + + Parameters + ---------- + runset_file : string or None + Filename of the runset to be used. + drc_dir : string or Path + Path to the location where all runsets exist. + layout_path : stirng or Path object + Path string to the layout of the test pattern we want to test. + run_dir : stirng or Path object + Path to the location where is the regression run is done. + switches : string + String that holds all the DRC run switches required to enable this. + + Returns + ------- + pd.DataFrame + A pandas DataFrame with the rule and rule deck used. + """ + + # Initial value for counters + falsePos_count = 0 + falseNeg_count = 0 + pass_patterns_count = 0 + fail_patterns_count = 0 + + # Get switches used for each run + sw_file = os.path.join(Path(layout_path.parent.parent).absolute(), f"{test_rule}.{SUPPORTED_SW_EXT}") + + if os.path.exists(sw_file): + switches = " ".join(get_switches(sw_file, test_rule)) + else: + switches = "--variant=C" # default switch + + # Adding switches for specific runsets + if "antenna" in runset_file: + switches += " --antenna_only" + elif "density" in runset_file: + switches += " --density_only" + + # Creating run folder structure + pattern_clean = ".".join(os.path.basename(layout_path).split(".")[:-1]) + output_loc = f"{run_dir}/{test_table}/{test_rule}_data" + pattern_log = f"{output_loc}/{pattern_clean}_drc.log" + + # command to run drc + call_str = f"python3 {drc_dir}/run_drc.py --path={layout_path} {switches} --table={test_table} --run_dir={output_loc} --run_mode=flat --thr=1 > {pattern_log} 2>&1" + + # Starting klayout run + os.makedirs(output_loc, exist_ok=True) + try: + check_call(call_str, shell=True) + except Exception as e: + pattern_results = glob.glob(os.path.join(output_loc, f"{pattern_clean}*.lyrdb")) + if len(pattern_results) < 1: + logging.error("%s generated an exception: %s" % (pattern_clean, e)) + traceback.print_exc() + raise + + # Checking if run is completed or failed + pattern_results = glob.glob(os.path.join(output_loc, f"{pattern_clean}*.lyrdb")) + + if len(pattern_results) > 0: + # db to gds conversion + marker_output, runset_analysis = convert_results_db_to_gds(pattern_results[0]) + + # Generating merged testcase for violated rules + merged_output = generate_merged_testcase(layout_path, marker_output) + + # Generating final db file + if os.path.exists(merged_output): + final_report = f'{merged_output.split(".")[0]}_final.lyrdb' + call_str = f"klayout -b -r {runset_analysis} -rd input={merged_output} -rd report={final_report}" + check_call(call_str, shell=True) + + if os.path.exists(final_report): + pass_patterns_count, fail_patterns_count, falsePos_count, falseNeg_count = parse_results_db(test_rule, final_report) + + return pass_patterns_count, fail_patterns_count, falsePos_count, falseNeg_count + else: + + return pass_patterns_count, fail_patterns_count, falsePos_count, falseNeg_count + else: + + return pass_patterns_count, fail_patterns_count, falsePos_count, falseNeg_count + + else: + return pass_patterns_count, fail_patterns_count, falsePos_count, falseNeg_count + + +def run_all_test_cases(tc_df, run_dir, thrCount): + """ + This function run all test cases from the input dataframe. + + Parameters + ---------- + tc_df : pd.DataFrame + DataFrame that holds all the test cases information for running. + run_dir : string or Path + Path string to the location of the testing code and output. + thrCount : int + Numbe of threads to use per klayout run. + + Returns + ------- + pd.DataFrame + A pandas DataFrame with all test cases information post running. + """ + + results = [] + + with concurrent.futures.ThreadPoolExecutor(max_workers=thrCount) as executor: + future_to_run_id = dict() + for i, row in tc_df.iterrows(): + future_to_run_id[ + executor.submit( + run_test_case, + str(row["runset"]), + drc_dir, + row["test_path"], + run_dir, + row["table_name"], + row["rule_name"], + thrCount, + ) + ] = row["run_id"] + + for future in concurrent.futures.as_completed(future_to_run_id): + run_id = future_to_run_id[future] + try: + pass_patterns, fail_patterns, false_positive, false_negative = future.result() + if pass_patterns + fail_patterns > 0: + if false_positive + false_negative == 0: + status_string = "Passed_rule" + else: + status_string = "Failed_rule" + else: + status_string = "Not_tested" + except Exception as exc: + logging.error("%d generated an exception: %s" % (run_id, exc)) + traceback.print_exc() + status_string = "exception" + + info = dict() + info["run_id"] = run_id + info["pass_patterns"] = pass_patterns + info["fail_patterns"] = fail_patterns + info["false_positive"] = false_positive + info["false_negative"] = false_negative + info["run_status"] = status_string + results.append(info) + + results_df = pd.DataFrame(results) + all_runs_df = tc_df.merge(results_df, on="run_id", how="left") + + return all_runs_df + + +def parse_existing_rules(rule_deck_path, output_path): + """ + This function collects the rule names from the existing drc rule decks. + + Parameters + ---------- + rule_deck_path : string or Path object + Path string to the DRC directory where all the DRC files are located. + output_path : string or Path + Path of the run location to store the output analysis file. + + Returns + ------- + pd.DataFrame + A pandas DataFrame with the rule and rule deck used. + """ + + drc_files = glob.glob(os.path.join(rule_deck_path, "rule_decks", "*.drc")) + rules_data = list() + + for runset in drc_files: + with open(runset, "r") as f: + for line in f: + if ".output" in line: + line_list = line.split('"') + rule_info = dict() + rule_info["runset"] = os.path.basename(runset) + rule_info["rule_name"] = line_list[1] + rules_data.append(rule_info) + + df = pd.DataFrame(rules_data) + df.drop_duplicates(inplace=True) + df.to_csv(os.path.join(output_path, "rule_deck_rules.csv"), index=False) + return df + + +def analyze_test_patterns_coverage(rules_df, tc_df, output_path): + """ + This function analyze the test patterns before running the test cases. + + Parameters + ---------- + rules_df : pd.DataFrame + DataFrame that holds all the rules that are found in the rule deck. + tc_df : pd.DataFrame + DataFrame that holds all the test cases and all the information required. + output_path : string or Path + Path of the run location to store the output analysis file. + + Returns + ------- + pd.DataFrame + A DataFrame with analysis of the rule testing coverage. + """ + cov_df = ( + tc_df[["table_name", "rule_name"]] + .groupby(["table_name", "rule_name"]) + .count() + .reset_index(drop=False) + ) + cov_df = cov_df.merge(rules_df, on="rule_name", how="outer") + cov_df["runset"].fillna("", inplace=True) + cov_df.to_csv(os.path.join(output_path, "testcases_coverage.csv"), index=False) + + return cov_df + + +def generate_merged_testcase(orignal_testcase, marker_testcase): + """ + This function will merge orignal gds file with generated + markers gds file. + + Parameters + ---------- + orignal_testcase : string or Path object + Path string to the orignal testcase + + marker_testcase : string or Path + Path of the output marker gds file generated from db file. + + Returns + ------- + merged_gds_path : string or Path + Path of the final merged gds file generated. + """ + + new_lib = gdstk.Library() + + lib_org = gdstk.read_gds(orignal_testcase) + lib_marker = gdstk.read_gds(marker_testcase) + + # Getting flattened top cells + top_cell_org = lib_org.top_level()[0].flatten(apply_repetitions=True) + top_cell_marker = lib_marker.top_level()[0].flatten(apply_repetitions=True) + marker_polygons = top_cell_marker.get_polygons(apply_repetitions=True, include_paths=True, depth=None) + + # Merging all polygons of markers with original testcase + for marker_polygon in marker_polygons: + top_cell_org.add(marker_polygon) + + # Adding flattened merged cell + new_lib.add(top_cell_org.flatten(apply_repetitions=True)) + + # Writing final merged gds file + merged_gds_path = f'{marker_testcase.replace(".gds", "")}_merged.gds' + new_lib.write_gds(merged_gds_path) + + return merged_gds_path + + +def darw_polygons(polygon_data, cell, lay_num, lay_dt, path_width): + """ + This function is used for drawing gds file with all violated polygons. + + Parameters + ---------- + polygon_data : str + Contains data points for each violated polygon + cell: gdstk.Cell + Top cell will contains all generated polygons + lay_num: int + Number of layer used to draw violated polygons + lay_dt : int + Data type of layer used to draw violated polygons + path_width : float + Width will used to draw edges + + Returns + ------- + None + """ + + # Cleaning data points + polygon_data = re.sub(r'\s+', '', polygon_data) + polygon_data = re.sub(r'[()]', '', polygon_data) + + tag_split = polygon_data.split(":") + tag = tag_split[0] + poly_txt = tag_split[1] + polygons = re.split(r"[/|]", poly_txt) + + # Select shape type to be drawn + if tag == "polygon": + for poly in polygons: + points = [(float(p.split(",")[0]), float(p.split(",")[1])) for p in poly.split(";")] + cell.add(gdstk.Polygon(points, lay_num, lay_dt)) + + elif tag == "edge-pair": + for poly in polygons: + points = [(float(p.split(",")[0]), float(p.split(",")[1])) for p in poly.split(";")] + cell.add(gdstk.FlexPath(points, path_width, layer=lay_num, datatype=lay_dt)) + + elif tag == "edge": + for poly in polygons: + points = [(float(p.split(",")[0]), float(p.split(",")[1])) for p in poly.split(";")] + cell.add(gdstk.FlexPath(points, path_width, layer=lay_num, datatype=lay_dt)) + else: + logging.error(f"## Unknown type: {tag} ignored") + + +def convert_results_db_to_gds(results_database: str): + """ + This function will parse Klayout database for analysis. + It converts the lyrdb klayout database file to GDSII file + + Parameters + ---------- + results_database : string or Path object + Path string to the results file + + Returns + ------- + output_gds_path : string or Path + Path of the output marker gds file generated from db file. + output_runset_path : string or Path + Path of the output drc runset used for analysis. + """ + + # layer used as a marker + rule_lay_num = 10000 + # width of edges shapes + path_width = 0.01 + + pass_marker = "input(2, 222)" + fail_marker = "input(3, 222)" + fail_marker2 = "input(6, 222)" + text_marker = "input(11, 222)" + + # Generating violated rules and its points + cell_name = "" + lib = None + cell = None + in_item = False + rule_data_type_map = list() + analysis_rules = [] + + for ev, elem in tqdm(ET.iterparse(results_database, events=('start', 'end'))): + + if elem.tag != "item" and not in_item: + elem.clear() + continue + + if elem.tag != "item" and in_item: + continue + + if elem.tag == "item" and ev == "start": + in_item = True + continue + + rules = elem.findall("category") + values = elem.findall("values") + + if len(values) > 0: + polygons = values[0].findall("value") + else: + polygons = [] + + if cell_name == "": + all_cells = elem.findall("cell") + + if len(all_cells) > 0: + cell_name = all_cells[0].text + + if cell_name is None: + elem.clear() + continue + + lib = gdstk.Library(f"{cell_name}_markers") + cell = lib.new_cell(f"{cell_name}_markers") + + if len(rules) > 0: + rule_name = rules[0].text.replace("'", "") + if rule_name is None: + elem.clear() + continue + + else: + elem.clear() + continue + + if rule_name not in rule_data_type_map: + rule_data_type_map.append(rule_name) + + ## Drawing polygons here. + rule_lay_dt = rule_data_type_map.index(rule_name) + 1 + if cell is not None: + for p in polygons: + darw_polygons(p.text, cell, rule_lay_num, rule_lay_dt, path_width) + + ## Clearing memeory + in_item = False + elem.clear() + + # Writing final marker gds file + output_gds_path = f'{results_database.replace(".lyrdb", "")}_markers.gds' + lib.write_gds(output_gds_path) + + # Writing analysis rule deck + output_runset_path = f'{results_database.replace(".lyrdb", "")}_analysis.drc' + + runset_analysis_setup = f''' + source($input) + report("DRC analysis run report at", $report) + pass_marker = {pass_marker} + fail_marker = {fail_marker} + fail_marker2 = {fail_marker2} + text_marker = {text_marker} + ''' + + pass_patterns_rule = f''' + pass_marker.interacting( text_marker.texts("{rule_name}") ).output("{rule_name}_pass_patterns", "{rule_name}_pass_patterns polygons") + ''' + fail_patterns_rule = f''' + fail_marker2.interacting(fail_marker.interacting(text_marker.texts("{rule_name}")) ).or( fail_marker.interacting(text_marker.texts("{rule_name}")).not_interacting(fail_marker2) ).output("{rule_name}_fail_patterns", "{rule_name}_fail_patterns polygons") + ''' + false_pos_rule = f''' + pass_marker.interacting(text_marker.texts("{rule_name}")).interacting(input({rule_lay_num}, {rule_lay_dt})).output("{rule_name}_false_positive", "{rule_name}_false_positive occurred") + ''' + false_neg_rule = f''' + ((fail_marker2.interacting(fail_marker.interacting(text_marker.texts("{rule_name}")))).or((fail_marker.interacting(input(11, 222).texts("{rule_name}")).not_interacting(fail_marker2)))).not_interacting(input({rule_lay_num}, {rule_lay_dt})).output("{rule_name}_false_negative", "{rule_name}_false_negative occurred") + ''' + + # Adding list of analysis rules + if not any(rule_name in rule_txt for rule_txt in analysis_rules): + analysis_rules.append(pass_patterns_rule) + analysis_rules.append(fail_patterns_rule) + analysis_rules.append(false_pos_rule) + analysis_rules.append(false_neg_rule) + + with open(output_runset_path, "a+") as runset_analysis: + # analysis_rules = list(dict.fromkeys(analysis_rules)) + runset_analysis.write(runset_analysis_setup) + runset_analysis.write("".join(analysis_rules)) + + return output_gds_path, output_runset_path + + +def get_unit_tests_dataframe(gds_files): + """ + This function is used for getting all test cases available in a formated data frame before running. + + Parameters + ---------- + gds_files : str + Path string to the location of unit test cases path. + Returns + ------- + pd.DataFrame + A DataFrame that has all the targetted test cases that we need to run. + """ + + # Get rules from gds + rules = [] + test_paths = [] + # layer num of rule text + lay_num = 11 + # layer data type of rule text + lay_dt = 222 + + # Getting all rules names from testcases + for gds_file in gds_files: + library = gdstk.read_gds(gds_file) + top_cells = library.top_level() # Get top cells + for cell in top_cells: + flatten_cell = cell.flatten() + # Get all text labels for each cell + labels = flatten_cell.get_labels(apply_repetitions=True, depth=None, layer=lay_num, texttype=lay_dt) + # Get label value + for label in labels: + rule = label.text + if rule not in rules: + rules.append(rule) + test_paths.append(gds_file) + + tc_df = pd.DataFrame({"test_path": test_paths, "rule_name": rules}) + tc_df["table_name"] = tc_df["test_path"].apply( + lambda x: x.name.replace(".gds", "") + ) + return tc_df + + +def build_unit_tests_dataframe(unit_test_cases_dir, target_table, target_rule): + """ + This function is used for getting all test cases available in a formated data frame before running. + + Parameters + ---------- + unit_test_cases_dir : str + Path string to the location of unit test cases path. + target_table : str or None + Name of table that we want to run regression for. If None, run all found. + target_rule : str or None + Name of rule that we want to run regression for. If None, run all found. + + Returns + ------- + pd.DataFrame + A DataFrame that has all the targetted test cases that we need to run. + """ + all_unit_test_cases = sorted( + Path(unit_test_cases_dir).rglob("*.{}".format(SUPPORTED_TC_EXT)) + ) + logging.info( + "## Total number of test cases found: {}".format(len(all_unit_test_cases)) + ) + + # Get test cases df from test cases + tc_df = get_unit_tests_dataframe(all_unit_test_cases) + + ## Filter test cases based on filter provided + if target_rule is not None: + tc_df = tc_df[tc_df["rule_name"] == target_rule] + + if target_table is not None: + tc_df = tc_df[tc_df["table_name"] == target_table] + + if len(tc_df) < 1: + logging.error("No test cases remaining after filtering.") + exit(1) + + return tc_df + + +def run_regression(drc_dir, output_path, target_table, target_rule, cpu_count): + """ + Running Regression Procedure. + + This function runs the full regression on all test cases. + + Parameters + ---------- + drc_dir : string + Path string to the DRC directory where all the DRC files are located. + output_path : str + Path string to the location of the output results of the run. + target_table : string or None + Name of table that we want to run regression for. If None, run all found. + target_rule : string or None + Name of rule that we want to run regression for. If None, run all found. + cpu_count : int + Number of cpus to use in running testcases. + Returns + ------- + bool + If all regression passed, it returns true. If any of the rules failed it returns false. + """ + + ## Parse Existing Rules + rules_df = parse_existing_rules(drc_dir, output_path) + logging.info("## Total number of rules found in rule decks: {}".format(len(rules_df))) + print(rules_df) + + ## Get all test cases available in the repo. + test_cases_path = os.path.join(drc_dir, "testing/testcases") + unit_test_cases_path = os.path.join(test_cases_path, "unit_testcases") + tc_df = build_unit_tests_dataframe(unit_test_cases_path, target_table, target_rule) + logging.info("## Total number of rules found in test cases: {}".format(len(tc_df))) + + ## Get tc_df with the correct rule deck per rule. + tc_df = tc_df.merge(rules_df, how="left", on="rule_name") + tc_df["run_id"] = list(range(len(tc_df))) + tc_df.drop_duplicates(inplace=True) + print(tc_df) + + tc_df.to_csv(os.path.join(output_path, "all_test_cases.csv"), index=False) + + ## Do some test cases coverage analysis + cov_df = analyze_test_patterns_coverage(rules_df, tc_df, output_path) + cov_df.drop_duplicates(inplace=True) + print(cov_df) + + ## Run all test cases + all_tc_df = run_all_test_cases(tc_df, output_path, cpu_count) + all_tc_df.drop_duplicates(inplace=True) + print(all_tc_df) + all_tc_df.to_csv( + os.path.join(output_path, "all_test_cases_results.csv"), index=False + ) + + ## Check if there any rules that generated false positive or false negative + failing_results = all_tc_df[ + ~all_tc_df["run_status"].isin(["Passed_rule", "Not_tested"]) + ] + print(failing_results) + logging.info("## Failing testcases : {}".format(len(failing_results))) + + if len(failing_results) > 0: + logging.error("## Some test cases failed .....") + return False + else: + logging.info("## All testcases passed.") + return True + + +def main(drc_dir: str, rules_dir: str, output_path: str, target_table: str, target_rule: str): + """ + Main Procedure. + + This function is the main execution procedure + + Parameters + ---------- + drc_dir : str + Path string to the DRC directory where all the DRC files are located. + rules_dir : str + Path string to the location of all rule deck files for that variant. + output_path : str + Path string to the location of the output results of the run. + target_table : str or None + Name of table that we want to run regression for. If None, run all found. + target_rule : str or None + Name of rule that we want to run regression for. If None, run all found. + Returns + ------- + bool + If all regression passed, it returns true. If any of the rules failed it returns false. + """ + + # No. of threads + cpu_count = os.cpu_count() if args["--mp"] is None else int(args["--mp"]) + + # Pandas printing setup + pd.set_option("display.max_columns", None) + pd.set_option("display.max_rows", None) + pd.set_option("max_colwidth", None) + pd.set_option("display.width", 1000) + + # info logs for args + logging.info("## Run folder is: {}".format(run_name)) + logging.info("## Target Table is: {}".format(target_table)) + logging.info("## Target rule is: {}".format(target_rule)) + + # Start of execution time + t0 = time.time() + + ## Check Klayout version + check_klayout_version() + + # Calling regression function + run_status = run_regression( + drc_dir, output_path, target_table, target_rule, cpu_count + ) + + # End of execution time + logging.info("Total execution time {}s".format(time.time() - t0)) + + if run_status: + logging.info("Test completed successfully.") + else: + logging.error("Test failed.") + exit(1) + + +# ================================================================ +# -------------------------- MAIN -------------------------------- +# ================================================================ + + +if __name__ == "__main__": + + # docopt reader + args = docopt(__doc__, version="DRC Regression: 0.2") + + # arguments + run_name = args["--run_name"] + target_table = args["--table_name"] + target_rule = args["--rule_name"] + + if run_name is None: + # logs format + run_name = datetime.utcnow().strftime("unit_tests_%Y_%m_%d_%H_%M_%S") + + # Paths of regression dirs + testing_dir = os.path.dirname(os.path.abspath(__file__)) + drc_dir = os.path.dirname(testing_dir) + rules_dir = os.path.join(drc_dir, "rule_decks") + output_path = os.path.join(testing_dir, run_name) + + # Creating output dir + os.makedirs(output_path, exist_ok=True) + + # logs format + logging.basicConfig( + level=logging.DEBUG, + handlers=[ + logging.FileHandler(os.path.join(output_path, "{}.log".format(run_name))), + logging.StreamHandler() + ], + format="%(asctime)s | %(levelname)-7s | %(message)s", + datefmt="%d-%b-%Y %H:%M:%S", + ) + + # Calling main function + run_status = main( + drc_dir, rules_dir, output_path, target_table, target_rule + ) diff --git a/klayout/drc/testing/testcases/README.md b/klayout/drc/testing/testcases/README.md new file mode 100644 index 00000000..e169610a --- /dev/null +++ b/klayout/drc/testing/testcases/README.md @@ -0,0 +1,7 @@ +# DRC Unit Tests + +This folder has the following folders: +* **switch_checking** ( Contains a small test case to be used for testing the DRC switches. ) +* **torture** ( Contains a few large test cases to test the performance of the rule deck. ) +* **unit** ( Contains the unit test structures per rule. Each file contains the test cases per table. ) + diff --git a/klayout/drc/testing/testcases/unit/dualgate.gds b/klayout/drc/testing/testcases/unit/dualgate.gds new file mode 100644 index 0000000000000000000000000000000000000000..9dbc2c5e1e82621b42cdddf621518b5832072c58 GIT binary patch literal 25068 zcmd^|dw@>W`p4IPGn2~*Imxg_&`y#OX%HlxuR! z=;BuA=%SE9LI)9|3(@72R75(JG}AiYXRYXRYt^tiATyYp=`R zFA*+$z5$}XWD<>I;uaXobJlZIWBJ5u*AfVV$BMY2R!6^3ylQ ztZP}g@Q!|>ewbZfJ3PF7^7Y9vV^T7PrR1e2XQpJ~pF)%?FGP4{pNJIUm-&Pc*H*nm z%3kjYXIxLs@r|X7Uo9La+sE}3<*Hfq$=O;@B;8%TT#{a=+w~NGOp=a2HcH25vVBaF zT~GcgACYtvpUL)MdWxSrM90tTq~kN$o;xIXe9A}hNk{RSY!9ZV_@lFQ{2bI5)jyN% zqqBm?r+gHjbQGV-_F(#c%Y`$kp_IhQLdx48aLM*b4ejxh7HU0_^!>-VB)v|z>jylh z^+eJS9O;tuI^C{6SYJ4^yGu!&^EdFUOSaGMZr2}Qr}aeAe?7+~>2iDx- z>iA5y&rG!I$v@>Il8)jt*&a+!@gGap@n_tm<1^X*SaR_Al#k+*j^Z=f9!yX13r6br zkJ5EGD;y@<3q}TyPx&Z5=_o#v?ZNa%P77yYq?C%qQXYK=$4s_I+T(xnh1M%V_vvP> zSA<@t+x4G)sP&4_f4)ZR6`|MZcKwnw!kPD?l*A_rq%7RylI`2+0q{w&lN)jyN%GY2`Md){# z_=?c$bi00!iLVI#yC%LO^g7+6zsih1+g80qI2j9sxa$ec;lqWKbw&u{DAfN@^sPir z2TPAbOZkyS2Uj8AY|Y`Rvgkt}{D2?Lf}i&^IsLBLW%|K*SU6Lf>3GCIE8*lHa!KVd z>1CBe>EFK``uV~k4i5aNZb0X+IXq1kkEuxKugP}%`OBgWb(LNbx}r@!rB{UBq}$G47B@EWi4?b)iBF{X zI^C+5eXkMDhzO}TISu3CpD`XbWc!E+yZ-Pf;XGVHO5&mvDgV>OCEFjaVAn6}CY))x zQW6VyN%^muF4;aU*RDT~^FE2V#P3IAJol1Iwx3L~>&F;9k@RDYo=AF~Zr2Z3E5wO| z=tsfR|3Lp&#UaQs-z=cG%v zkG{&TPrh2~iKM>^&r>Gpb-G>OuRos02QZ%lC*O+pv%w|X?>k`EAHevzpq`Y(IcNv( z_i)Mff_irSarE7kk4XBjF}`N9J(&JTp>P)L!+0KClz{eI&L!Iy?6b#Tg7MSLhcKQ4 zpEx4r(yLsueda@U{mY%So=EyN7*{b#uhZ@N6=N};!aRytxEAB7LoV6ARXp=zt3;iA4|gX|2B-Lz*D<1p4#t{?cd#I*MH;FdLrq+z0W1-b-G=j zYvL10pJ(C|Nw3rG`f(;ck@VwDd?M*}x?P|2E&7RVSa$$VMWY`W=929vx7qcBlC_>l z`oV`?l3u6V^?Qt-Ncwk;o=AF~ZrATH-MGO~O~>R+nrCt@>8)3vqj_aEQ4t3tz1mmuv?u zdRer156a&}GkdQrzIQCjZ^-t6P3-!Ghfw~uvWPf(t1P~6giE&fZ)?}jKZf#mkwwJZ zb+UN!oi5padl!$srs@B0ruEbY<%Lskla!Ho?$)28dCfQ}o1h;hHmxe0#;Y|cpU!Xh z*ZCgpuhYxgACdgTqF*O=!*hoG5y_vX4qUtd*!t11a+;_K+2L0YeTzK=_`Pkh}T|5#V_ zV;#{ygQw@Bf1Tx$?ca5@>vN5sNc!xrcQ8SRHjdYx|9r=HaJIg#%Fk#D%9 z`(LMf^p})=KK$b)AqM${L(E!%u})u?YzIsC&mR5$X3$3phnV@9aE^TLlI@^X|Mdc` zCzAdrqbHKyq}%j9pVlj8ejt3+hQp5`^qg+fJJ-|sBMvdpS~y(}xnw(N)i187^+YQF zbEf=6(wlUveyurwBI&p1xFo$vx9W#AKpb2@V&i5|7e;+TL8kiptuiGeP4$gte_8Q*#MXC0O zdFg54)cjtv8REb3ws4qiulc<_{#(xqr}i^a5}TqN2eMqUz4kM9{XiAj^B zoc&*yY>$4`uAe_i>xraafq5R2^g7+H?{`c%L-E`uKCln>|1_6uAG*MbW#KKyWo^nQf^-4lI@ck*!3??$C?_(vBW2r;QD>ylI{84 z@ww;v-@jToTk-rRe)g7>3x>F4`&RGs+oNBG@zZ+D>xf(4m-2;$F4?~Rh`s#lF}7Yi zPfFs4E76Yd9AUD3?L52whD_o7vR_KjSM8LP3B5|lcF?ZBGfz0@9+VRFRa=Am=Wq$x z4%+ooaK6PYqy&AHF+N?;t%Pg`?fT^y-xd`}3HmO>`owE3OUQQ6uHO+4{l^$5#6vd- z^Zf25WIO25``qW3EFQTE{a<~}%#UU9-S?yaGi3Xq`c{3>D^DrCB6LMt?^b$6=uNs+ zU+g#Wi4?b`iBF{XI^C)-+8c#@^`v6vVOjJ^Mffpf`yg-sSNi-W?`|2rS+0N}!zQhpTelI>Hy`vacy-+mBd zhFmF$hudPEx1~$AZ_l;IKY{sL@e4Tbp1AKZ-u!i63E2*M^kMG(Cw$wmpODhoFb+wcPuNjBq zmP)VLnbWQ1|NnUYezU%z=D&9?$DZ6WJb%FP-$|Krg-f=NTIQ)gkG>b~SIS2ueZn}G zY!9aY3gf@&=(mV-zC}MU*d^PiKW&eHpr6(gN&o(4m!#L}cKu7+F=wib{ui8oJH|m# zF4;b_vR%KVnbs3YzZCoROw#LgyM7zSZS^KdNo98Rvc28}yM7}2Ey_nE{nP}P zY!9YS`9(N&cHz2%O-D+Z`Jzj<*V(1xM+o)4VI>h>(56Une{!sfBr#l;I2OmL?Qruftc@(D<5wcTfh$hQwD9t;-A^9vWY7rNdXd-}QA-_x7l5LVP;i z_%izlW!&2r*Jq8te z;q2u;I?DP?yQq8Uxt+p!{c2OLzvye@+rrr}H8j3H(}@%nOaLd547~`8o6#%e5Bw<8NPvrd)@LP_Bs3_&S$@a-H@1GT))pC_S(EVh-M~ zNtBX!2J`BZU0kyLM54ZSW$8y7J(2Wx8$FTqI^C{MTr0#kn2!_BV14o~%uAVU|7NCL zzuoAGqx+nhCrv0dW z_sARdyyR-EFVXPmGId=WRuGn{PCMEH~x>CM=xl6WBXlB>1 z9EpAF`gp$_oLq=~(o-(kKGC~w%Jr+t|K(HI-|a6Y@%wwwPOy&2Wc$hf_V_tQPbA$0 zqbHJHr`z>O?QtKYo{6XPv48ZKOSYdd^G?%0c*?)S=!vA?YxG3Y>vVhk+3)CiGm+-a zg+IBZd9zNp>%aTE))PtpiD1Cq6y%3+MhIWc~ za>PfPmGJ%q-Xrl5@&1UflA}rabpA5^#nT*}_DwbxYi`B+hRZ&{zKr4K6H-3j5%2CA zzJvX?b!c}?)@>{<)4ytO*vD4oB~tmY&s{<)pH8>yv5)R^HmUt|Uxf~8pS^@^cQ)De z&#lsWBI(yGa!GofZrA7kto20F&-&3N>2)hZ0?DHIK z?2_&8eCpA+H|;fp-eY@cfe^{%HPgPp9^<1zFz>?nBp>gEF^A)xq;w|bBRz8j*7yoW z2*G5(VCi*cj<3E;C_IDs74YsR7{FTBGlw*npTqm`9|?!Z>7domhWm9sBKdi9mgbge zpo6_fko=f@RzD-LPekP=2F?m6oAhAL9B80?1ym%_Nq%5?~`&yBjGS9zt)xMSDk;y-TJ&b?9%5? zr1QV!O3j|=H|hL|bpATO-QSQ%%ukwP{2GRFcdVkX%8OEtm?;#qpB27}i=_lrKAqq0 zkK&O(Dwk}osQkn{rv0H@z6u9*`6!>xU#36x+?aJczTqf`{t(lJ1Pr6G?B<%ewDL?+e#@#lR`-)lG&UL+Cl(rgu6l&~b?A zal*Ozj7y4d(k*&foNf9GBJ~qv;fG26gig2BACHSPY25PUOM2Www2Yg$AIAA(JTgy+ zBR6Tv->~0+@f(xvN4)RTd-Us%3h^=SSI`j+(I23HF=RVvFaP=Du2;~%(0LFo<1SD9 zQz>|FccPTUMjuLfwzEsN*Pm#QfBt?Cy{|~|i4;EsUYcfx?NA>Qi<^~ z=&OYB1jegOwu5&4PgAfKit9+cZKxjiGTGj(?B|2J{?l*|QhZRwo%2Nr*&a0hF7z*? z2bG@2QFI<4+k@)q{on^X>i2}_VP3yC2J3i+Y=6-EzOLu`)BDw<@f;x1`_%>QU9x?2 zH+%g(>=(|yXYigWcm(s!nc*(kzR&x1&yJWA&es`J5b_*YoZ%;x`kpcQ8P$!zSJDBjcOx0Ygk5l+jf7#N2jUkJWw0A z<^*E1QZo|MQ?0d8vOa!Z+whe`=-vlJ-#X#(aS3t#qWi`5>Ftk+y*0LXOl#6Vj5wIxZN+=PDFqq=72nXekkXd!sRg?F;MRu}s3 zf?e_c7=fhRVpdYUJJ}b};;~cN3w1O1ue3H@YSHKQkz?h~UYl3u5K^eBb(E}r&;=o_n zk0#rHPY<2a2b$~nOtzz3rAMNr%TGGr6>loNuL0gtz+Va3e#M)?^$mW|`e=L=M|vjP z;Wt=(=WIah5s&JT%A@Mj*pVJwUznlKfAy0pzQbgDVMcI$AX4j*U&UpzJ>dOvp>+MJ z{K#i?IzHmjpTqVbdUf5tZm;9xJe7{g_OHEfewL1}^sArLdgNE-V6q+cVXJ?YANh<< z$45L>4z>r;tN1Omb$ql_rDL+aWwtfG(pRsg^~kTv!DM^&T2{Tvk9MO8PJX<(Duwp#*m~i5r1!sZhu;TTl%O1;djJG3t{EBp?vzIrs6ILy* z$IKnP>=AG_*iOo0`iTbaI`zN4u_VVtF7LGq2D-~$3kvHQ@X&g7WRx@%9-b{#u-O#^P+e;X4_q%=` zRze=YbzhF%m0YZ-VMUcWB@yG#D%gcFB>QVv@lokYXZO1fD-n~Mz%O`zd2o{E`fTtl z{3M&>uUvBe$=+WA@YL^0{DQ|DxL+0b;ftj^acsB|?SkygO*di(75&G?IFo;%y}Wa9 z{=4U5MH6q3?>UZsTY_^m-~CF;Ip{Ztvws6;;kknAu%|QZN@w@`&$Wm@1S{Zg3uhDh zUpn60Rddr$DOb;f-EiX}Z7-qs7fL<#@K|l(?8kXMmH}e?^k^O7y!So$7Ix$o;<;24 z$34K2;37?xU+L`S-LL>Ve3^LDg!m=e!)q>gAJlf{o&>D)<38N|caz`yg}0}?Lu=s+ z|L3t{hW-(^n@mGHnD(4z{%^RywqW;^~dz?9~&RNciZi>MZ6+!)MT88zW7-yoW) znrP!s9F>xq?$68ikIBkO&%p0!r}EI9N-qgi-4DS#Y8RJFtt$kp-3wy&5>A_g=FVvES20pkva+)C{6o_H zqf&D60x2086a0L)44HsN9F=T($UhQ^?&`{W%-P-Ok5BTaWTjE_xHX|qq8~B+CTMOT zFAuv7{y-M3dirxyGSmI)2L1iB3wE{9%ZXDr(Di9u>iS$Ng8s^URqwCNi*i`LuYGyh zzdRp-o!sKKjj)Te(o@nu`ej&gQ&j88SIio?_a6Vlz8t)18X{js>js9-*S@sS`6}Fx zw`utcnZH(FMdRKT;_Iy;sr#Y<*vkl&udncnr{`Zsv%c|*T0c6c{-=XqYG%>1ND_UQ df2N*GOtzD;^!lS!A9q^o4KL8+-YHj3{0~x{1e5>( literal 0 HcmV?d00001 diff --git a/requirements.txt b/requirements.txt index b90fe224..a66adf94 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,5 @@ -flake8 - +flake8 +docopt +gdstk +pandas +tqdm From de58f8812b926640627456c419e95e7bfab73b00 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Fri, 27 Jan 2023 07:40:36 +0200 Subject: [PATCH 02/71] Clean up linting issues. Change in the regression for CI to run one table only. --- .github/workflows/regression.yml | 2 +- Makefile | 9 +++------ klayout/drc/run_drc.py | 28 ++++++++++++++-------------- 3 files changed, 18 insertions(+), 21 deletions(-) diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index 0005ee95..84d200e5 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -32,7 +32,7 @@ jobs: fail-fast: false matrix: include: - - { tool: klayout, part: drc, test: main } + - { tool: klayout, part: drc, test: dualgate } - { tool: klayout, part: drc, test: switch } - { tool: klayout, part: lvs, test: main } - { tool: klayout, part: lvs, test: switch } diff --git a/Makefile b/Makefile index aee23758..0f34f1d6 100644 --- a/Makefile +++ b/Makefile @@ -50,12 +50,9 @@ test-DRC-% : | $(CONDA_ENV_PYTHON) #================================= # -------- test-DRC-switch ------- #================================= - -# .ONESHELL: -# test-DRC-switch: | $(CONDA_ENV_PYTHON) -# @echo "========== DRC-Switch testing ==========" -# @$(IN_CONDA_ENV) python3 $(KLAYOUT_TESTS)/run_switch_checking.py && rm -rf pattern.csv - +# LVS main testing +test-DRC-switch: | $(CONDA_ENV_PYTHON) + @$(IN_CONDA_ENV) klayout -v ################################################################################ ## LVS Regression section diff --git a/klayout/drc/run_drc.py b/klayout/drc/run_drc.py index 189d628c..757abd15 100644 --- a/klayout/drc/run_drc.py +++ b/klayout/drc/run_drc.py @@ -64,7 +64,7 @@ def get_rules_with_violations(results_database): ---------- results_database : string or Path object Path string to the results file - + Returns ------- set @@ -154,7 +154,7 @@ def generate_drc_run_template(drc_dir: str, run_dir: str, run_tables_list: list Name of the rule deck to use for generating the template, by default "" run_tables_list : list, optional list of target parts of the rule deck, if empty assume all of the rule tables found, by default [] - + Returns ------- str @@ -247,7 +247,7 @@ def get_run_top_cell_name(arguments, layout_path): Dictionary that holds the user inputs for the script generated by docopt. layout_path : string Path to the target layout. - + Returns ------- string @@ -280,7 +280,7 @@ def generate_klayout_switches(arguments, layout_path): Dictionary that holds the arguments used by user in the run command. This is generated by docopt library. layout_path : string Path to the layout file that we will run DRC on. - + Returns ------- dict @@ -289,7 +289,7 @@ def generate_klayout_switches(arguments, layout_path): switches = dict() # No. of threads - thrCount = 2 if arguments["--thr"] == None else int(arguments["--thr"]) + thrCount = 2 if arguments["--thr"] is None else int(arguments["--thr"]) switches["thr"] = str(int(thrCount)) if arguments["--run_mode"] in ["flat", "deep", "tiling"]: @@ -375,14 +375,14 @@ def check_klayout_version(): exit(1) elif len(klayout_v_list) == 2: if klayout_v_list[1] < 28: - logging.warning(f"Prerequisites at a minimum: KLayout 0.28.0") + logging.warning("Prerequisites at a minimum: KLayout 0.28.0") logging.error( "Using this klayout version has not been assesed in this development. Limits are unknown" ) exit(1) elif len(klayout_v_list) == 3: if klayout_v_list[1] < 28 : - logging.warning(f"Prerequisites at a minimum: KLayout 0.28.0") + logging.warning("Prerequisites at a minimum: KLayout 0.28.0") logging.error( "Using this klayout version has not been assesed in this development. Limits are unknown" ) @@ -397,7 +397,7 @@ def check_layout_path(layout_path): ---------- layout_path : string string that represent the path of the layout. - + Returns ------- string @@ -408,7 +408,7 @@ def check_layout_path(layout_path): logging.error("## GDS file path provided doesn't exist or not a file.") exit(1) - if not ".gds" in layout_path and not ".oas" in layout_path: + if ".gds" not in layout_path and ".oas" not in layout_path: logging.error("## Layout is not in GDSII or OASIS format. Please use gds format.") exit(1) @@ -445,7 +445,7 @@ def run_check(drc_file: str, drc_name: str, path: str, run_dir: str, sws: dict): String that holds the full path of the run location. sws : dict Dictionary that holds all switches that needs to be passed to the antenna checks. - + Returns ------- string @@ -469,9 +469,9 @@ def run_check(drc_file: str, drc_name: str, path: str, run_dir: str, sws: dict): new_sws["report"] = report_path sws_str = build_switches_string(new_sws) sws_str += f" -rd table_name={drc_name}" - log_file = os.path.join( - run_dir, "{}_{}.log".format(layout_base_name, drc_name) - ) + # log_file = os.path.join( + # run_dir, "{}_{}.log".format(layout_base_name, drc_name) + # ) run_str = f"klayout -b -r {drc_file} {sws_str}" @@ -701,7 +701,7 @@ def main(drc_run_dir: str, now_str: str, arguments: dict): logging.FileHandler(os.path.join(drc_run_dir, "{}.log".format(now_str))), logging.StreamHandler(), ], - format=f"%(asctime)s | %(levelname)-7s | %(message)s", + format="%(asctime)s | %(levelname)-7s | %(message)s", datefmt="%d-%b-%Y %H:%M:%S", ) From ce978a64811e398a83fa1a0fc48ef2bf5f6593f1 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Fri, 27 Jan 2023 07:45:03 +0200 Subject: [PATCH 03/71] Adding yaml package required for running regression. --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index a66adf94..9ce9f63e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,3 +3,4 @@ docopt gdstk pandas tqdm +pyyaml From 88da3d87150259ecd3b6250d01c13598a116fad5 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Fri, 27 Jan 2023 11:16:55 +0200 Subject: [PATCH 04/71] Clean up run regression to make it cleaner and work as expected. --- .gitignore | 2 + Makefile | 5 +- klayout/drc/README.md | 16 +- klayout/drc/rule_decks/tail.drc | 23 ++ klayout/drc/testing/run_regression.py | 372 +++++++++++++------------- requirements.txt | 1 + 6 files changed, 213 insertions(+), 206 deletions(-) create mode 100644 klayout/drc/rule_decks/tail.drc diff --git a/.gitignore b/.gitignore index cf6a0bb6..5ae8f4bc 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,8 @@ __pycache__/ *.py[cod] *$py.class +klayout/drc/testing/unit_tests_* + # C extensions *.so diff --git a/Makefile b/Makefile index 0f34f1d6..ef7f7391 100644 --- a/Makefile +++ b/Makefile @@ -43,8 +43,9 @@ test-DRC-main : | $(CONDA_ENV_PYTHON) @echo "========== DRC-Regression is done ==========" .ONESHELL: -test-DRC-% : | $(CONDA_ENV_PYTHON) - @$(IN_CONDA_ENV) python3 $(KLAYOUT_TESTS)/run_regression.py --table=$* +test-DRC-% : + @which python + @python $(KLAYOUT_TESTS)/run_regression.py --table=$* @echo "========== Table DRC-Regression is done ==========" #================================= diff --git a/klayout/drc/README.md b/klayout/drc/README.md index 000809a3..8863bc42 100644 --- a/klayout/drc/README.md +++ b/klayout/drc/README.md @@ -7,11 +7,8 @@ Explains how to use the runset. ```text 📦drc ┣ 📦testing - ┣ 📜GF180_MCU.lyp + ┣ 📦rule_decks ┣ 📜README.md - ┣ 📜gf_018mcu.drc - ┣ 📜gf_018mcu_antenna.drc - ┣ 📜gf_018mcu_density.drc ┗ 📜run_drc.py ``` @@ -19,14 +16,11 @@ Explains how to use the runset. The `run_drc.py` script takes a gds file to run DRC rule decks of GF180 technology with switches to select subsets of all checks. ### Requirements -Please make sure to define PDK_ROOT and PDK environment variables to make it work. Example definition would be to work for this repo, go to the `rules/klayout` directory and run: -```bash -export PDK_ROOT=`pwd` -export PDK="drc" -``` -Also, please make sure to install the required python packages at `../requirements.test.txt` by using +Please make sure to use the latest Klayout setup at your side. + +Also, please make sure to install the required python packages at `requirements.txt` by using ```bash -pip install -r ../requirements.test.txt +pip install -r requirements.test.txt ``` diff --git a/klayout/drc/rule_decks/tail.drc b/klayout/drc/rule_decks/tail.drc new file mode 100644 index 00000000..b4ef0bca --- /dev/null +++ b/klayout/drc/rule_decks/tail.drc @@ -0,0 +1,23 @@ +################################################################################################ +# Copyright 2022 GlobalFoundries PDK Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################################ + + + +exec_end_time = Time.now +run_time = exec_end_time - exec_start_time +logger.info("%s DRC Total Run time %f seconds" % [table_name, run_time]) + + diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py index 3064afc6..2b321363 100644 --- a/klayout/drc/testing/run_regression.py +++ b/klayout/drc/testing/run_regression.py @@ -17,13 +17,12 @@ Usage: run_regression.py (--help| -h) - run_regression.py [--mp=] [--run_name=] [--rule_name=] [--table_name=] + run_regression.py [--mp=] [--run_name=] [--table_name=] Options: --help -h Print this help message. --mp= The number of threads used in run. --run_name= Select your run name. - --rule_name= Target specific rule. --table_name= Target specific table. """ @@ -45,10 +44,51 @@ import re import gdstk +from collections import defaultdict + SUPPORTED_TC_EXT = "gds" SUPPORTED_SW_EXT = "yaml" +RULE_LAY_NUM = 10000 +PATH_WIDTH = 0.01 +RULE_STR_SEP = "--" +ANALYSIS_RULES = ["pass_patterns", "fail_patterns", "false_negative", "false_positive"] + +def get_unit_test_coverage(gds_file): + """ + This function is used for getting all test cases available inside a single test table. + Parameters + ---------- + gds_file : str + Path string to the location of unit test cases path. + Returns + ------- + list + A list of unique rules found. + """ + # Get rules from gds + rules = [] + + # layer num of rule text + lay_num = 11 + # layer data type of rule text + lay_dt = 222 + + # Getting all rules names from testcase + library = gdstk.read_gds(gds_file) + top_cells = library.top_level() # Get top cells + for cell in top_cells: + flatten_cell = cell.flatten() + # Get all text labels for each cell + labels = flatten_cell.get_labels(apply_repetitions=True, depth=None, layer=lay_num, texttype=lay_dt) + # Get label value + for label in labels: + rule = label.text + if rule not in rules: + rules.append(rule) + + return rules def check_klayout_version(): """ @@ -113,7 +153,7 @@ def get_switches(yaml_file, rule_name): return switches -def parse_results_db(test_rule, results_database): +def parse_results_db(results_database): """ This function will parse Klayout database for analysis. @@ -130,83 +170,67 @@ def parse_results_db(test_rule, results_database): mytree = ET.parse(results_database) myroot = mytree.getroot() + # Initial values for counter - pass_patterns = 0 - fail_patterns = 0 - falsePos = 0 - falseNeg = 0 + rule_counts = defaultdict(int) for z in myroot[7]: - if f"'{test_rule}_pass_patterns'" == f"{z[1].text}": - pass_patterns += 1 - if f"'{test_rule}_fail_patterns'" == f"{z[1].text}": - fail_patterns += 1 - if f"'{test_rule}_false_positive'" == f"{z[1].text}": - falsePos += 1 - if f"'{test_rule}_false_negative'" == f"{z[1].text}": - falseNeg += 1 - - return pass_patterns, fail_patterns, falsePos, falseNeg + rule_name = f"{z[1].text}".replace("'", "") + rule_counts[rule_name] += 1 + return rule_counts def run_test_case( - runset_file, drc_dir, layout_path, run_dir, - test_table, - test_rule, - switches="", + table_name, ): """ This function run a single test case using the correct DRC file. Parameters ---------- - runset_file : string or None - Filename of the runset to be used. drc_dir : string or Path Path to the location where all runsets exist. layout_path : stirng or Path object Path string to the layout of the test pattern we want to test. run_dir : stirng or Path object Path to the location where is the regression run is done. - switches : string - String that holds all the DRC run switches required to enable this. + table_name : string + Table name that we are running on. Returns ------- - pd.DataFrame - A pandas DataFrame with the rule and rule deck used. + dict + A dict with all rule counts """ # Initial value for counters - falsePos_count = 0 - falseNeg_count = 0 - pass_patterns_count = 0 - fail_patterns_count = 0 + rule_counts = defaultdict(int) # Get switches used for each run - sw_file = os.path.join(Path(layout_path.parent.parent).absolute(), f"{test_rule}.{SUPPORTED_SW_EXT}") + sw_file = os.path.join(Path(layout_path.parent.parent).absolute(), f"{table_name}.{SUPPORTED_SW_EXT}") if os.path.exists(sw_file): - switches = " ".join(get_switches(sw_file, test_rule)) + switches = " ".join(get_switches(sw_file, table_name)) else: switches = "--variant=C" # default switch + # Adding switches for specific runsets - if "antenna" in runset_file: + if "antenna" in str(layout_path): switches += " --antenna_only" - elif "density" in runset_file: + elif "density" in str(layout_path): switches += " --density_only" # Creating run folder structure pattern_clean = ".".join(os.path.basename(layout_path).split(".")[:-1]) - output_loc = f"{run_dir}/{test_table}/{test_rule}_data" + output_loc = f"{run_dir}/{table_name}" pattern_log = f"{output_loc}/{pattern_clean}_drc.log" # command to run drc - call_str = f"python3 {drc_dir}/run_drc.py --path={layout_path} {switches} --table={test_table} --run_dir={output_loc} --run_mode=flat --thr=1 > {pattern_log} 2>&1" + call_str = f"python3 {drc_dir}/run_drc.py --path={layout_path} {switches} --table={table_name} --run_dir={output_loc} --run_mode=flat --thr=1 > {pattern_log} 2>&1" # Starting klayout run os.makedirs(output_loc, exist_ok=True) @@ -222,9 +246,12 @@ def run_test_case( # Checking if run is completed or failed pattern_results = glob.glob(os.path.join(output_loc, f"{pattern_clean}*.lyrdb")) + # Get list of rules covered in the test case + rules_tested = get_unit_test_coverage(layout_path) + if len(pattern_results) > 0: # db to gds conversion - marker_output, runset_analysis = convert_results_db_to_gds(pattern_results[0]) + marker_output, runset_analysis = convert_results_db_to_gds(pattern_results[0], rules_tested) # Generating merged testcase for violated rules merged_output = generate_merged_testcase(layout_path, marker_output) @@ -236,21 +263,16 @@ def run_test_case( check_call(call_str, shell=True) if os.path.exists(final_report): - pass_patterns_count, fail_patterns_count, falsePos_count, falseNeg_count = parse_results_db(test_rule, final_report) - - return pass_patterns_count, fail_patterns_count, falsePos_count, falseNeg_count + rule_counts = parse_results_db(final_report) + return rule_counts else: - - return pass_patterns_count, fail_patterns_count, falsePos_count, falseNeg_count + return rule_counts else: - - return pass_patterns_count, fail_patterns_count, falsePos_count, falseNeg_count - + return rule_counts else: - return pass_patterns_count, fail_patterns_count, falsePos_count, falseNeg_count + return rule_counts - -def run_all_test_cases(tc_df, run_dir, thrCount): +def run_all_test_cases(tc_df, drc_dir, run_dir, num_workers): """ This function run all test cases from the input dataframe. @@ -258,10 +280,12 @@ def run_all_test_cases(tc_df, run_dir, thrCount): ---------- tc_df : pd.DataFrame DataFrame that holds all the test cases information for running. + drc_dir : string or Path + Path string to the location of the drc runsets. run_dir : string or Path Path string to the location of the testing code and output. - thrCount : int - Numbe of threads to use per klayout run. + num_workers : int + Number of workers to use for running the regression. Returns ------- @@ -269,53 +293,56 @@ def run_all_test_cases(tc_df, run_dir, thrCount): A pandas DataFrame with all test cases information post running. """ - results = [] + results_df_list = [] + tc_df["run_status"] = "no status" - with concurrent.futures.ThreadPoolExecutor(max_workers=thrCount) as executor: + with concurrent.futures.ThreadPoolExecutor(max_workers=num_workers) as executor: future_to_run_id = dict() for i, row in tc_df.iterrows(): future_to_run_id[ executor.submit( run_test_case, - str(row["runset"]), drc_dir, row["test_path"], run_dir, row["table_name"], - row["rule_name"], - thrCount, ) ] = row["run_id"] for future in concurrent.futures.as_completed(future_to_run_id): run_id = future_to_run_id[future] try: - pass_patterns, fail_patterns, false_positive, false_negative = future.result() - if pass_patterns + fail_patterns > 0: - if false_positive + false_negative == 0: - status_string = "Passed_rule" - else: - status_string = "Failed_rule" + rule_counts = future.result() + if rule_counts: + rule_counts_df = pd.DataFrame({"analysis_rule": rule_counts.keys(), "count": rule_counts.values()}) + rule_counts_df["rule_name"] = rule_counts_df["analysis_rule"].str.split(RULE_STR_SEP).str[0] + rule_counts_df["type"] = rule_counts_df["analysis_rule"].str.split(RULE_STR_SEP).str[1] + rule_counts_df.drop(columns=["analysis_rule"], inplace=True) + rule_counts_df["count"] = rule_counts_df["count"].astype(int) + rule_counts_df = rule_counts_df.pivot(index="rule_name", columns="type", values="count").fillna(0)\ + .reset_index(drop=False).rename(columns={"index": "rule_name"}) + for c in ANALYSIS_RULES: + if c not in rule_counts_df.columns: + rule_counts_df[c] = 0 + + rule_counts_df[ANALYSIS_RULES] = rule_counts_df[ANALYSIS_RULES].astype(int) + rule_counts_df = rule_counts_df[["rule_name"] + ANALYSIS_RULES] + results_df_list.append(rule_counts_df) + tc_df.loc[tc_df["run_id"] == run_id, "run_status"] = "completed" else: - status_string = "Not_tested" + tc_df.loc[tc_df["run_id"] == run_id, "run_status"] = "no output" + except Exception as exc: logging.error("%d generated an exception: %s" % (run_id, exc)) traceback.print_exc() - status_string = "exception" + tc_df.loc[tc_df["run_id"] == run_id, "run_status"] = "exception" - info = dict() - info["run_id"] = run_id - info["pass_patterns"] = pass_patterns - info["fail_patterns"] = fail_patterns - info["false_positive"] = false_positive - info["false_negative"] = false_negative - info["run_status"] = status_string - results.append(info) - - results_df = pd.DataFrame(results) - all_runs_df = tc_df.merge(results_df, on="run_id", how="left") + if len(results_df_list) > 0: + results_df = pd.concat(results_df_list) + else: + results_df = pd.DataFrame() - return all_runs_df + return results_df, tc_df def parse_existing_rules(rule_deck_path, output_path): @@ -344,7 +371,7 @@ def parse_existing_rules(rule_deck_path, output_path): if ".output" in line: line_list = line.split('"') rule_info = dict() - rule_info["runset"] = os.path.basename(runset) + rule_info["table_name"] = os.path.basename(runset).replace(".drc", "") rule_info["rule_name"] = line_list[1] rules_data.append(rule_info) @@ -428,7 +455,7 @@ def generate_merged_testcase(orignal_testcase, marker_testcase): return merged_gds_path -def darw_polygons(polygon_data, cell, lay_num, lay_dt, path_width): +def draw_polygons(polygon_data, cell, lay_num, lay_dt, path_width): """ This function is used for drawing gds file with all violated polygons. @@ -478,7 +505,7 @@ def darw_polygons(polygon_data, cell, lay_num, lay_dt, path_width): logging.error(f"## Unknown type: {tag} ignored") -def convert_results_db_to_gds(results_database: str): +def convert_results_db_to_gds(results_database: str, rules_tested: list): """ This function will parse Klayout database for analysis. It converts the lyrdb klayout database file to GDSII file @@ -487,6 +514,8 @@ def convert_results_db_to_gds(results_database: str): ---------- results_database : string or Path object Path string to the results file + rules_tested : list + List of strings that holds the rule names that are covered by the test case. Returns ------- @@ -496,23 +525,32 @@ def convert_results_db_to_gds(results_database: str): Path of the output drc runset used for analysis. """ - # layer used as a marker - rule_lay_num = 10000 - # width of edges shapes - path_width = 0.01 - + # Writing analysis rule deck pass_marker = "input(2, 222)" fail_marker = "input(3, 222)" fail_marker2 = "input(6, 222)" text_marker = "input(11, 222)" + output_runset_path = f'{results_database.replace(".lyrdb", "")}_analysis.drc' + + analysis_rules = [] + runset_analysis_setup = f''' + source($input) + report("DRC analysis run report at", $report) + pass_marker = {pass_marker} + fail_marker = {fail_marker} + fail_marker2 = {fail_marker2} + text_marker = {text_marker} + + ''' + analysis_rules.append(runset_analysis_setup) + # Generating violated rules and its points cell_name = "" lib = None cell = None in_item = False rule_data_type_map = list() - analysis_rules = [] for ev, elem in tqdm(ET.iterparse(results_database, events=('start', 'end'))): @@ -565,103 +603,64 @@ def convert_results_db_to_gds(results_database: str): rule_lay_dt = rule_data_type_map.index(rule_name) + 1 if cell is not None: for p in polygons: - darw_polygons(p.text, cell, rule_lay_num, rule_lay_dt, path_width) + draw_polygons(p.text, cell, RULE_LAY_NUM, rule_lay_dt, PATH_WIDTH) ## Clearing memeory in_item = False elem.clear() - # Writing final marker gds file + # Writing final marker gds file + if lib is not None: output_gds_path = f'{results_database.replace(".lyrdb", "")}_markers.gds' lib.write_gds(output_gds_path) + else: + logging.error("Failed to get any results in the lyrdb database.") + exit(1) - # Writing analysis rule deck - output_runset_path = f'{results_database.replace(".lyrdb", "")}_analysis.drc' - - runset_analysis_setup = f''' - source($input) - report("DRC analysis run report at", $report) - pass_marker = {pass_marker} - fail_marker = {fail_marker} - fail_marker2 = {fail_marker2} - text_marker = {text_marker} - ''' - + # Saving analysis rule deck. + for r in rule_data_type_map: + rule_lay_dt = rule_data_type_map.index(rule_name) + 1 pass_patterns_rule = f''' - pass_marker.interacting( text_marker.texts("{rule_name}") ).output("{rule_name}_pass_patterns", "{rule_name}_pass_patterns polygons") + pass_marker.interacting( text_marker.texts("{r}") ).output("{r}{RULE_STR_SEP}pass_patterns", "{r}{RULE_STR_SEP}pass_patterns polygons") ''' fail_patterns_rule = f''' - fail_marker2.interacting(fail_marker.interacting(text_marker.texts("{rule_name}")) ).or( fail_marker.interacting(text_marker.texts("{rule_name}")).not_interacting(fail_marker2) ).output("{rule_name}_fail_patterns", "{rule_name}_fail_patterns polygons") + fail_marker2.interacting(fail_marker.interacting(text_marker.texts("{r}")) ).or( fail_marker.interacting(text_marker.texts("{r}")).not_interacting(fail_marker2) ).output("{r}{RULE_STR_SEP}fail_patterns", "{r}{RULE_STR_SEP}fail_patterns polygons") ''' false_pos_rule = f''' - pass_marker.interacting(text_marker.texts("{rule_name}")).interacting(input({rule_lay_num}, {rule_lay_dt})).output("{rule_name}_false_positive", "{rule_name}_false_positive occurred") + pass_marker.interacting(text_marker.texts("{r}")).interacting(input({RULE_LAY_NUM}, {rule_lay_dt})).output("{r}{RULE_STR_SEP}false_positive", "{r}{RULE_STR_SEP}false_positive occurred") ''' false_neg_rule = f''' - ((fail_marker2.interacting(fail_marker.interacting(text_marker.texts("{rule_name}")))).or((fail_marker.interacting(input(11, 222).texts("{rule_name}")).not_interacting(fail_marker2)))).not_interacting(input({rule_lay_num}, {rule_lay_dt})).output("{rule_name}_false_negative", "{rule_name}_false_negative occurred") + ((fail_marker2.interacting(fail_marker.interacting(text_marker.texts("{r}")))).or((fail_marker.interacting(input(11, 222).texts("{r}")).not_interacting(fail_marker2)))).not_interacting(input({RULE_LAY_NUM}, {rule_lay_dt})).output("{r}{RULE_STR_SEP}false_negative", "{r}{RULE_STR_SEP}false_negative occurred") ''' - # Adding list of analysis rules - if not any(rule_name in rule_txt for rule_txt in analysis_rules): - analysis_rules.append(pass_patterns_rule) - analysis_rules.append(fail_patterns_rule) - analysis_rules.append(false_pos_rule) - analysis_rules.append(false_neg_rule) + analysis_rules.append(pass_patterns_rule) + analysis_rules.append(fail_patterns_rule) + analysis_rules.append(false_pos_rule) + analysis_rules.append(false_neg_rule) + + for r in rules_tested: + if r in rule_data_type_map: + continue + + pass_patterns_rule = f''' + pass_marker.interacting( text_marker.texts("{r}") ).output("{r}{RULE_STR_SEP}pass_patterns", "{r}{RULE_STR_SEP}pass_patterns polygons") + ''' + fail_patterns_rule = f''' + fail_marker2.interacting(fail_marker.interacting(text_marker.texts("{r}")) ).or( fail_marker.interacting(text_marker.texts("{r}")).not_interacting(fail_marker2) ).output("{r}{RULE_STR_SEP}fail_patterns", "{r}{RULE_STR_SEP}fail_patterns polygons") + ''' + + analysis_rules.append(pass_patterns_rule) + analysis_rules.append(fail_patterns_rule) - with open(output_runset_path, "a+") as runset_analysis: - # analysis_rules = list(dict.fromkeys(analysis_rules)) - runset_analysis.write(runset_analysis_setup) + with open(output_runset_path, "w") as runset_analysis: runset_analysis.write("".join(analysis_rules)) return output_gds_path, output_runset_path -def get_unit_tests_dataframe(gds_files): +def build_tests_dataframe(unit_test_cases_dir, target_table): """ - This function is used for getting all test cases available in a formated data frame before running. - - Parameters - ---------- - gds_files : str - Path string to the location of unit test cases path. - Returns - ------- - pd.DataFrame - A DataFrame that has all the targetted test cases that we need to run. - """ - - # Get rules from gds - rules = [] - test_paths = [] - # layer num of rule text - lay_num = 11 - # layer data type of rule text - lay_dt = 222 - - # Getting all rules names from testcases - for gds_file in gds_files: - library = gdstk.read_gds(gds_file) - top_cells = library.top_level() # Get top cells - for cell in top_cells: - flatten_cell = cell.flatten() - # Get all text labels for each cell - labels = flatten_cell.get_labels(apply_repetitions=True, depth=None, layer=lay_num, texttype=lay_dt) - # Get label value - for label in labels: - rule = label.text - if rule not in rules: - rules.append(rule) - test_paths.append(gds_file) - - tc_df = pd.DataFrame({"test_path": test_paths, "rule_name": rules}) - tc_df["table_name"] = tc_df["test_path"].apply( - lambda x: x.name.replace(".gds", "") - ) - return tc_df - - -def build_unit_tests_dataframe(unit_test_cases_dir, target_table, target_rule): - """ - This function is used for getting all test cases available in a formated data frame before running. + This function is used for getting all test cases available in a formated dataframe before running. Parameters ---------- @@ -669,8 +668,6 @@ def build_unit_tests_dataframe(unit_test_cases_dir, target_table, target_rule): Path string to the location of unit test cases path. target_table : str or None Name of table that we want to run regression for. If None, run all found. - target_rule : str or None - Name of rule that we want to run regression for. If None, run all found. Returns ------- @@ -685,11 +682,10 @@ def build_unit_tests_dataframe(unit_test_cases_dir, target_table, target_rule): ) # Get test cases df from test cases - tc_df = get_unit_tests_dataframe(all_unit_test_cases) - - ## Filter test cases based on filter provided - if target_rule is not None: - tc_df = tc_df[tc_df["rule_name"] == target_rule] + tc_df = pd.DataFrame({"test_path": all_unit_test_cases}) + tc_df["table_name"] = tc_df["test_path"].apply( + lambda x: x.name.replace(".gds", "") + ) if target_table is not None: tc_df = tc_df[tc_df["table_name"] == target_table] @@ -698,10 +694,11 @@ def build_unit_tests_dataframe(unit_test_cases_dir, target_table, target_rule): logging.error("No test cases remaining after filtering.") exit(1) + tc_df["run_id"] = range(len(tc_df)) return tc_df -def run_regression(drc_dir, output_path, target_table, target_rule, cpu_count): +def run_regression(drc_dir, output_path, target_table, cpu_count): """ Running Regression Procedure. @@ -715,8 +712,6 @@ def run_regression(drc_dir, output_path, target_table, target_rule, cpu_count): Path string to the location of the output results of the run. target_table : string or None Name of table that we want to run regression for. If None, run all found. - target_rule : string or None - Name of rule that we want to run regression for. If None, run all found. cpu_count : int Number of cpus to use in running testcases. Returns @@ -732,25 +727,22 @@ def run_regression(drc_dir, output_path, target_table, target_rule, cpu_count): ## Get all test cases available in the repo. test_cases_path = os.path.join(drc_dir, "testing/testcases") - unit_test_cases_path = os.path.join(test_cases_path, "unit_testcases") - tc_df = build_unit_tests_dataframe(unit_test_cases_path, target_table, target_rule) - logging.info("## Total number of rules found in test cases: {}".format(len(tc_df))) - - ## Get tc_df with the correct rule deck per rule. - tc_df = tc_df.merge(rules_df, how="left", on="rule_name") - tc_df["run_id"] = list(range(len(tc_df))) - tc_df.drop_duplicates(inplace=True) - print(tc_df) - - tc_df.to_csv(os.path.join(output_path, "all_test_cases.csv"), index=False) + unit_test_cases_path = os.path.join(test_cases_path, "unit") + tc_df = build_tests_dataframe(unit_test_cases_path, target_table) + logging.info("## Total table gds files found: {}".format(len(tc_df))) - ## Do some test cases coverage analysis - cov_df = analyze_test_patterns_coverage(rules_df, tc_df, output_path) - cov_df.drop_duplicates(inplace=True) - print(cov_df) + # ## Do some test cases coverage analysis + # cov_df = analyze_test_patterns_coverage(rules_df, tc_df, output_path) + # cov_df.drop_duplicates(inplace=True) + # print(cov_df) ## Run all test cases - all_tc_df = run_all_test_cases(tc_df, output_path, cpu_count) + print(tc_df) + results_df, tc_df = run_all_test_cases(tc_df, drc_dir, output_path, cpu_count) + print(results_df) + print(tc_df) + + exit() all_tc_df.drop_duplicates(inplace=True) print(all_tc_df) all_tc_df.to_csv( @@ -772,7 +764,7 @@ def run_regression(drc_dir, output_path, target_table, target_rule, cpu_count): return True -def main(drc_dir: str, rules_dir: str, output_path: str, target_table: str, target_rule: str): +def main(drc_dir: str, output_path: str, target_table: str): """ Main Procedure. @@ -782,14 +774,10 @@ def main(drc_dir: str, rules_dir: str, output_path: str, target_table: str, targ ---------- drc_dir : str Path string to the DRC directory where all the DRC files are located. - rules_dir : str - Path string to the location of all rule deck files for that variant. output_path : str Path string to the location of the output results of the run. target_table : str or None Name of table that we want to run regression for. If None, run all found. - target_rule : str or None - Name of rule that we want to run regression for. If None, run all found. Returns ------- bool @@ -808,7 +796,6 @@ def main(drc_dir: str, rules_dir: str, output_path: str, target_table: str, targ # info logs for args logging.info("## Run folder is: {}".format(run_name)) logging.info("## Target Table is: {}".format(target_table)) - logging.info("## Target rule is: {}".format(target_rule)) # Start of execution time t0 = time.time() @@ -818,7 +805,7 @@ def main(drc_dir: str, rules_dir: str, output_path: str, target_table: str, targ # Calling regression function run_status = run_regression( - drc_dir, output_path, target_table, target_rule, cpu_count + drc_dir, output_path, target_table, cpu_count ) # End of execution time @@ -844,7 +831,6 @@ def main(drc_dir: str, rules_dir: str, output_path: str, target_table: str, targ # arguments run_name = args["--run_name"] target_table = args["--table_name"] - target_rule = args["--rule_name"] if run_name is None: # logs format @@ -872,5 +858,5 @@ def main(drc_dir: str, rules_dir: str, output_path: str, target_table: str, targ # Calling main function run_status = main( - drc_dir, rules_dir, output_path, target_table, target_rule + drc_dir, output_path, target_table ) diff --git a/requirements.txt b/requirements.txt index 9ce9f63e..9ea30e59 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,3 +4,4 @@ gdstk pandas tqdm pyyaml +klayout From 7983f16b655d0cab0094ce17dcaaf6411399fde3 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Fri, 27 Jan 2023 11:55:54 +0200 Subject: [PATCH 05/71] Complete regression script update to make sure it would generate error. --- klayout/drc/testing/run_regression.py | 71 +++++++++++++++++++-------- 1 file changed, 50 insertions(+), 21 deletions(-) diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py index 2b321363..88720620 100644 --- a/klayout/drc/testing/run_regression.py +++ b/klayout/drc/testing/run_regression.py @@ -321,12 +321,15 @@ def run_all_test_cases(tc_df, drc_dir, run_dir, num_workers): rule_counts_df["count"] = rule_counts_df["count"].astype(int) rule_counts_df = rule_counts_df.pivot(index="rule_name", columns="type", values="count").fillna(0)\ .reset_index(drop=False).rename(columns={"index": "rule_name"}) + + rule_counts_df["table_name"] = tc_df.loc[tc_df["run_id"] == run_id, "table_name"].iloc[0] + for c in ANALYSIS_RULES: if c not in rule_counts_df.columns: rule_counts_df[c] = 0 rule_counts_df[ANALYSIS_RULES] = rule_counts_df[ANALYSIS_RULES].astype(int) - rule_counts_df = rule_counts_df[["rule_name"] + ANALYSIS_RULES] + rule_counts_df = rule_counts_df[["table_name", "rule_name"] + ANALYSIS_RULES] results_df_list.append(rule_counts_df) tc_df.loc[tc_df["run_id"] == run_id, "run_status"] = "completed" else: @@ -591,7 +594,6 @@ def convert_results_db_to_gds(results_database: str, rules_tested: list): if rule_name is None: elem.clear() continue - else: elem.clear() continue @@ -619,7 +621,7 @@ def convert_results_db_to_gds(results_database: str, rules_tested: list): # Saving analysis rule deck. for r in rule_data_type_map: - rule_lay_dt = rule_data_type_map.index(rule_name) + 1 + rule_lay_dt = rule_data_type_map.index(r) + 1 pass_patterns_rule = f''' pass_marker.interacting( text_marker.texts("{r}") ).output("{r}{RULE_STR_SEP}pass_patterns", "{r}{RULE_STR_SEP}pass_patterns polygons") ''' @@ -697,6 +699,36 @@ def build_tests_dataframe(unit_test_cases_dir, target_table): tc_df["run_id"] = range(len(tc_df)) return tc_df +def aggregate_results(tc_df: pd.DataFrame, results_df: pd.DataFrame, rules_df: pd.DataFrame): + """ + aggregate_results Aggregate the results for all runs. + + Parameters + ---------- + tc_df : pd.DataFrame + Dataframe that holds the information about the test cases. + results_df : pd.DataFrame + Dataframe that holds the information about the unit test rules. + rules_df : pd.DataFrame + Dataframe that holds the information about all the rules implemented in the rule deck. + + Returns + ------- + pd.DataFrame + A DataFrame that has all data analysis aggregated into one. + """ + df = results_df.merge(rules_df, how="outer", on=["table_name", "rule_name"]) + df[ANALYSIS_RULES] = df[ANALYSIS_RULES].fillna(0) + df = df.merge(tc_df[["table_name", "run_status"]], how="left", on="table_name") + + df["rule_status"] = "Passed" + df.loc[(df["false_negative"] > 0), "rule_status"] = "Rule Failed" + df.loc[(df["false_positive"] > 0), "rule_status"] = "Rule Failed" + df.loc[(df["pass_patterns"] < 1), "rule_status"] = "Rule Not Tested" + df.loc[(df["fail_patterns"] < 1), "rule_status"] = "Rule Not Tested" + df.loc[~(df["run_status"].isin(["completed"])), "rule_status"] = "Test Case Run Failed" + + return df def run_regression(drc_dir, output_path, target_table, cpu_count): """ @@ -723,38 +755,35 @@ def run_regression(drc_dir, output_path, target_table, cpu_count): ## Parse Existing Rules rules_df = parse_existing_rules(drc_dir, output_path) logging.info("## Total number of rules found in rule decks: {}".format(len(rules_df))) - print(rules_df) + logging.info("## Parsed Rules: \n" + str(rules_df)) ## Get all test cases available in the repo. test_cases_path = os.path.join(drc_dir, "testing/testcases") unit_test_cases_path = os.path.join(test_cases_path, "unit") tc_df = build_tests_dataframe(unit_test_cases_path, target_table) logging.info("## Total table gds files found: {}".format(len(tc_df))) + logging.info("## Found testcases: \n" + str(tc_df)) - # ## Do some test cases coverage analysis - # cov_df = analyze_test_patterns_coverage(rules_df, tc_df, output_path) - # cov_df.drop_duplicates(inplace=True) - # print(cov_df) - - ## Run all test cases - print(tc_df) + ## Run all test cases. results_df, tc_df = run_all_test_cases(tc_df, drc_dir, output_path, cpu_count) - print(results_df) - print(tc_df) + logging.info("## Testcases found results: \n" + str(results_df)) + logging.info("## Updated testcases: \n" + str(tc_df)) + + ## Aggregate all dataframes into one + df = aggregate_results(tc_df, results_df, rules_df) + df.drop_duplicates(inplace=True) + logging.info("## Final analysis table: \n" + str(df)) - exit() - all_tc_df.drop_duplicates(inplace=True) - print(all_tc_df) - all_tc_df.to_csv( + ## Generate error if there are any missing info or fails. + df.to_csv( os.path.join(output_path, "all_test_cases_results.csv"), index=False ) ## Check if there any rules that generated false positive or false negative - failing_results = all_tc_df[ - ~all_tc_df["run_status"].isin(["Passed_rule", "Not_tested"]) + failing_results = df[ + ~df["rule_status"].isin(["Passed"]) ] - print(failing_results) - logging.info("## Failing testcases : {}".format(len(failing_results))) + logging.info("## Failing test cases: \n" + str(failing_results)) if len(failing_results) > 0: logging.error("## Some test cases failed .....") From 9c1d098c8838953c67fc42001fbdb38592ae3a45 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Fri, 27 Jan 2023 11:58:19 +0200 Subject: [PATCH 06/71] Update makefile. --- Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index ef7f7391..1434fb97 100644 --- a/Makefile +++ b/Makefile @@ -43,9 +43,9 @@ test-DRC-main : | $(CONDA_ENV_PYTHON) @echo "========== DRC-Regression is done ==========" .ONESHELL: -test-DRC-% : - @which python - @python $(KLAYOUT_TESTS)/run_regression.py --table=$* +test-DRC-% : | $(CONDA_ENV_PYTHON) + @which python3 + @$(IN_CONDA_ENV) python3 $(KLAYOUT_TESTS)/run_regression.py --table=$* @echo "========== Table DRC-Regression is done ==========" #================================= From 91ff35812f3a365dc293608ebb2037a04c01bfee Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Fri, 27 Jan 2023 12:55:39 +0200 Subject: [PATCH 07/71] Make code flake8 clean. --- klayout/drc/testing/run_regression.py | 34 +++++++++++++++++---------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py index 88720620..e1333c87 100644 --- a/klayout/drc/testing/run_regression.py +++ b/klayout/drc/testing/run_regression.py @@ -90,6 +90,7 @@ def get_unit_test_coverage(gds_file): return rules + def check_klayout_version(): """ check_klayout_version checks klayout version and makes sure it would work with the DRC. @@ -180,6 +181,7 @@ def parse_results_db(results_database): return rule_counts + def run_test_case( drc_dir, layout_path, @@ -217,7 +219,6 @@ def run_test_case( else: switches = "--variant=C" # default switch - # Adding switches for specific runsets if "antenna" in str(layout_path): switches += " --antenna_only" @@ -264,7 +265,7 @@ def run_test_case( if os.path.exists(final_report): rule_counts = parse_results_db(final_report) - return rule_counts + return rule_counts else: return rule_counts else: @@ -272,6 +273,7 @@ def run_test_case( else: return rule_counts + def run_all_test_cases(tc_df, drc_dir, run_dir, num_workers): """ This function run all test cases from the input dataframe. @@ -319,15 +321,19 @@ def run_all_test_cases(tc_df, drc_dir, run_dir, num_workers): rule_counts_df["type"] = rule_counts_df["analysis_rule"].str.split(RULE_STR_SEP).str[1] rule_counts_df.drop(columns=["analysis_rule"], inplace=True) rule_counts_df["count"] = rule_counts_df["count"].astype(int) - rule_counts_df = rule_counts_df.pivot(index="rule_name", columns="type", values="count").fillna(0)\ - .reset_index(drop=False).rename(columns={"index": "rule_name"}) + rule_counts_df = rule_counts_df.pivot(index="rule_name", + columns="type", + values="count") + rule_counts_df = rule_counts_df.fillna(0) + rule_counts_df = rule_counts_df.reset_index(drop=False) + rule_counts_df = rule_counts_df.rename(columns={"index": "rule_name"}) rule_counts_df["table_name"] = tc_df.loc[tc_df["run_id"] == run_id, "table_name"].iloc[0] for c in ANALYSIS_RULES: if c not in rule_counts_df.columns: rule_counts_df[c] = 0 - + rule_counts_df[ANALYSIS_RULES] = rule_counts_df[ANALYSIS_RULES].astype(int) rule_counts_df = rule_counts_df[["table_name", "rule_name"] + ANALYSIS_RULES] results_df_list.append(rule_counts_df) @@ -639,11 +645,11 @@ def convert_results_db_to_gds(results_database: str, rules_tested: list): analysis_rules.append(fail_patterns_rule) analysis_rules.append(false_pos_rule) analysis_rules.append(false_neg_rule) - + for r in rules_tested: if r in rule_data_type_map: continue - + pass_patterns_rule = f''' pass_marker.interacting( text_marker.texts("{r}") ).output("{r}{RULE_STR_SEP}pass_patterns", "{r}{RULE_STR_SEP}pass_patterns polygons") ''' @@ -699,6 +705,7 @@ def build_tests_dataframe(unit_test_cases_dir, target_table): tc_df["run_id"] = range(len(tc_df)) return tc_df + def aggregate_results(tc_df: pd.DataFrame, results_df: pd.DataFrame, rules_df: pd.DataFrame): """ aggregate_results Aggregate the results for all runs. @@ -711,7 +718,7 @@ def aggregate_results(tc_df: pd.DataFrame, results_df: pd.DataFrame, rules_df: p Dataframe that holds the information about the unit test rules. rules_df : pd.DataFrame Dataframe that holds the information about all the rules implemented in the rule deck. - + Returns ------- pd.DataFrame @@ -722,14 +729,15 @@ def aggregate_results(tc_df: pd.DataFrame, results_df: pd.DataFrame, rules_df: p df = df.merge(tc_df[["table_name", "run_status"]], how="left", on="table_name") df["rule_status"] = "Passed" - df.loc[(df["false_negative"] > 0), "rule_status"] = "Rule Failed" - df.loc[(df["false_positive"] > 0), "rule_status"] = "Rule Failed" - df.loc[(df["pass_patterns"] < 1), "rule_status"] = "Rule Not Tested" - df.loc[(df["fail_patterns"] < 1), "rule_status"] = "Rule Not Tested" - df.loc[~(df["run_status"].isin(["completed"])), "rule_status"] = "Test Case Run Failed" + df.loc[(df["false_negative"] > 0), "rule_status"] = "Rule Failed" + df.loc[(df["false_positive"] > 0), "rule_status"] = "Rule Failed" + df.loc[(df["pass_patterns"] < 1), "rule_status"] = "Rule Not Tested" + df.loc[(df["fail_patterns"] < 1), "rule_status"] = "Rule Not Tested" + df.loc[~(df["run_status"].isin(["completed"])), "rule_status"] = "Test Case Run Failed" return df + def run_regression(drc_dir, output_path, target_table, cpu_count): """ Running Regression Procedure. From 87f7d87bb369576eccd5575122a18328952ff3f9 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Fri, 27 Jan 2023 13:23:14 +0200 Subject: [PATCH 08/71] Dealing with results case. --- klayout/drc/testing/run_regression.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py index e1333c87..a68be091 100644 --- a/klayout/drc/testing/run_regression.py +++ b/klayout/drc/testing/run_regression.py @@ -724,7 +724,18 @@ def aggregate_results(tc_df: pd.DataFrame, results_df: pd.DataFrame, rules_df: p pd.DataFrame A DataFrame that has all data analysis aggregated into one. """ - df = results_df.merge(rules_df, how="outer", on=["table_name", "rule_name"]) + if len(rules_df) < 1 and len(results_df) < 1: + logging.error("## There are no rules for analysis or run.") + exit(1) + elif len(rules_df) < 1 and len(results_df) > 0: + df = results_df + elif len(rules_df) > 0 and len(results_df) < 1: + df = rules_df + for c in ANALYSIS_RULES: + df[c] = 0 + else: + df = results_df.merge(rules_df, how="outer", on=["table_name", "rule_name"]) + df[ANALYSIS_RULES] = df[ANALYSIS_RULES].fillna(0) df = df.merge(tc_df[["table_name", "run_status"]], how="left", on="table_name") From 60589c731d2cea857772aefa1f5dc636e13c7013 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Fri, 27 Jan 2023 13:25:09 +0200 Subject: [PATCH 09/71] Make sure that CI fail in case of failure --- .github/workflows/regression.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index 84d200e5..3ddd0cd0 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -29,7 +29,7 @@ jobs: regression: runs-on: ubuntu-latest strategy: - fail-fast: false + max-parallel: 8 matrix: include: - { tool: klayout, part: drc, test: dualgate } From 9caedda0d1bd832e3cc7800b52788acd17883416 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Fri, 27 Jan 2023 13:27:18 +0200 Subject: [PATCH 10/71] Clean flake8 issue. --- klayout/drc/testing/run_regression.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py index a68be091..77a37430 100644 --- a/klayout/drc/testing/run_regression.py +++ b/klayout/drc/testing/run_regression.py @@ -735,7 +735,7 @@ def aggregate_results(tc_df: pd.DataFrame, results_df: pd.DataFrame, rules_df: p df[c] = 0 else: df = results_df.merge(rules_df, how="outer", on=["table_name", "rule_name"]) - + df[ANALYSIS_RULES] = df[ANALYSIS_RULES].fillna(0) df = df.merge(tc_df[["table_name", "run_status"]], how="left", on="table_name") From 141ccb6471a28a81cea35736016e538cb4812178 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Fri, 27 Jan 2023 13:29:30 +0200 Subject: [PATCH 11/71] Remove echo to force issue. --- Makefile | 2 -- 1 file changed, 2 deletions(-) diff --git a/Makefile b/Makefile index 1434fb97..21f134b2 100644 --- a/Makefile +++ b/Makefile @@ -40,13 +40,11 @@ lint: | $(CONDA_ENV_PYTHON) .ONESHELL: test-DRC-main : | $(CONDA_ENV_PYTHON) @$(IN_CONDA_ENV) python3 $(KLAYOUT_TESTS)/run_regression.py - @echo "========== DRC-Regression is done ==========" .ONESHELL: test-DRC-% : | $(CONDA_ENV_PYTHON) @which python3 @$(IN_CONDA_ENV) python3 $(KLAYOUT_TESTS)/run_regression.py --table=$* - @echo "========== Table DRC-Regression is done ==========" #================================= # -------- test-DRC-switch ------- From 90909864667bccbd602864310022e75e9c233137 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Fri, 27 Jan 2023 13:33:37 +0200 Subject: [PATCH 12/71] Allow all CI to run for regression in case of one failed. --- .github/workflows/regression.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index 3ddd0cd0..0712bac4 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -29,7 +29,8 @@ jobs: regression: runs-on: ubuntu-latest strategy: - max-parallel: 8 + max-parallel: 4 + fail-fast: false matrix: include: - { tool: klayout, part: drc, test: dualgate } From 03eb74b1cbc7da67e6096844ce1ff7859fdfbdbb Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Sun, 29 Jan 2023 11:40:54 +0200 Subject: [PATCH 13/71] Update regression to make sure to do proper filtering. --- klayout/drc/testing/run_regression.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py index 77a37430..5185052b 100644 --- a/klayout/drc/testing/run_regression.py +++ b/klayout/drc/testing/run_regression.py @@ -43,6 +43,7 @@ from tqdm import tqdm import re import gdstk +import errno from collections import defaultdict @@ -354,7 +355,7 @@ def run_all_test_cases(tc_df, drc_dir, run_dir, num_workers): return results_df, tc_df -def parse_existing_rules(rule_deck_path, output_path): +def parse_existing_rules(rule_deck_path, output_path, target_table=None): """ This function collects the rule names from the existing drc rule decks. @@ -364,6 +365,8 @@ def parse_existing_rules(rule_deck_path, output_path): Path string to the DRC directory where all the DRC files are located. output_path : string or Path Path of the run location to store the output analysis file. + target_table : string Optional + Name of the table to be in testing Returns ------- @@ -371,7 +374,15 @@ def parse_existing_rules(rule_deck_path, output_path): A pandas DataFrame with the rule and rule deck used. """ - drc_files = glob.glob(os.path.join(rule_deck_path, "rule_decks", "*.drc")) + if target_table is None: + drc_files = glob.glob(os.path.join(rule_deck_path, "rule_decks", "*.drc")) + else: + table_rule_file = os.path.join(rule_deck_path, "rule_decks", f"{target_table}.drc") + if not os.path.isfile(table_rule_file): + raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), table_rule_file) + + drc_files = [table_rule_file] + rules_data = list() for runset in drc_files: @@ -772,7 +783,7 @@ def run_regression(drc_dir, output_path, target_table, cpu_count): """ ## Parse Existing Rules - rules_df = parse_existing_rules(drc_dir, output_path) + rules_df = parse_existing_rules(drc_dir, output_path, target_table) logging.info("## Total number of rules found in rule decks: {}".format(len(rules_df))) logging.info("## Parsed Rules: \n" + str(rules_df)) From d4808c793e9d5ba83c7fb69c6c91a31f432ad1af Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Sun, 29 Jan 2023 16:54:44 +0200 Subject: [PATCH 14/71] Moving switches parsing to make sure that the yaml file at the same level as the gds file. --- klayout/drc/testing/run_regression.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py index 5185052b..6bdb9124 100644 --- a/klayout/drc/testing/run_regression.py +++ b/klayout/drc/testing/run_regression.py @@ -213,7 +213,7 @@ def run_test_case( rule_counts = defaultdict(int) # Get switches used for each run - sw_file = os.path.join(Path(layout_path.parent.parent).absolute(), f"{table_name}.{SUPPORTED_SW_EXT}") + sw_file = os.path.join(Path(layout_path.parent).absolute(), f"{table_name}.{SUPPORTED_SW_EXT}") if os.path.exists(sw_file): switches = " ".join(get_switches(sw_file, table_name)) From 90ed803a47a7737c0d75e7f65898122caf6b2eac Mon Sep 17 00:00:00 2001 From: Amro Tork <74936860+atorkmabrains@users.noreply.github.com> Date: Mon, 30 Jan 2023 08:11:50 +0200 Subject: [PATCH 15/71] Update README.md --- klayout/drc/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/klayout/drc/README.md b/klayout/drc/README.md index 8863bc42..d92d2193 100644 --- a/klayout/drc/README.md +++ b/klayout/drc/README.md @@ -5,9 +5,9 @@ Explains how to use the runset. ## Folder Structure ```text -📦drc - ┣ 📦testing - ┣ 📦rule_decks +📁drc + ┣ 📁testing + ┣ 📁rule_decks ┣ 📜README.md ┗ 📜run_drc.py ``` From 2e50bd51e7ad14ec58476f3695ab950149bce688 Mon Sep 17 00:00:00 2001 From: Amro Tork <74936860+atorkmabrains@users.noreply.github.com> Date: Mon, 30 Jan 2023 08:12:11 +0200 Subject: [PATCH 16/71] Update README.md --- klayout/drc/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/klayout/drc/README.md b/klayout/drc/README.md index d92d2193..575293ab 100644 --- a/klayout/drc/README.md +++ b/klayout/drc/README.md @@ -20,7 +20,7 @@ Please make sure to use the latest Klayout setup at your side. Also, please make sure to install the required python packages at `requirements.txt` by using ```bash -pip install -r requirements.test.txt +pip install -r requirements.txt ``` From 91a8cf4915f8c1c541c6c3aaec4851441db0d166 Mon Sep 17 00:00:00 2001 From: Amro Tork <74936860+atorkmabrains@users.noreply.github.com> Date: Mon, 30 Jan 2023 08:14:00 +0200 Subject: [PATCH 17/71] Update README.md --- klayout/drc/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/klayout/drc/README.md b/klayout/drc/README.md index 575293ab..039918cf 100644 --- a/klayout/drc/README.md +++ b/klayout/drc/README.md @@ -16,7 +16,7 @@ Explains how to use the runset. The `run_drc.py` script takes a gds file to run DRC rule decks of GF180 technology with switches to select subsets of all checks. ### Requirements -Please make sure to use the latest Klayout setup at your side. +Please make sure to use the latest Klayout setup at your side. To install klayout, please refer to documentation at [klayout build](https://www.klayout.de/build.html). Also, please make sure to install the required python packages at `requirements.txt` by using ```bash From 279d74f393ea411a71f036b8b9759f9005cba05d Mon Sep 17 00:00:00 2001 From: Amro Tork <74936860+atorkmabrains@users.noreply.github.com> Date: Mon, 30 Jan 2023 08:21:51 +0200 Subject: [PATCH 18/71] Update README.md --- klayout/drc/README.md | 21 +++++---------------- 1 file changed, 5 insertions(+), 16 deletions(-) diff --git a/klayout/drc/README.md b/klayout/drc/README.md index 039918cf..99d01c4b 100644 --- a/klayout/drc/README.md +++ b/klayout/drc/README.md @@ -23,22 +23,11 @@ Also, please make sure to install the required python packages at `requirements. pip install -r requirements.txt ``` - -### Switches -The list of switches used for running DRC: - -1. **FEOL** : Default is on. Use it for checking Front End Of Line layers (wells, diffusion, polys, contacts). -2. **BEOL** : Default is on. Use it for checking Back End Of Line layers (metal layers, top metal layer, vias). -3. **BEOL** : Default is on. Use it for checking Back End Of Line layers (metal layers, top metal layer, vias). -4. **GF180MCU**=A : combined options of metal_level=3, mim_option=A, metal_top=30K, poly_res=1K, and mim_cap=2 -5. **GF180MCU**=B : combined options of metal_level=4, mim_option=B, metal_top=11K, poly_res=1K, and mim_cap=2 -6. **GF180MCU**=C : combined options of metal_level=5, mim_option=B, metal_top=9K, poly_res=1K, and mim_cap=2 -7. **connectivity** : Default is off. Use it for check connectivity rules. -8. **DENSITY** : Default is off. Use it for check density rules. -9. **DENSITY_only** : Default is off. Use it for check density rules only. -10. **ANTENNA** : Default is off. Use it to turn on Antenna checks. -11. **ANTENNA_only** : Default is off. Use it to turn on Antenna checks only. -12. **OFFGRID** : Default is on. Use it for checking off-grid and acute layers (ongrid of 0.005um and angles 45 deg. unless otherwise stated). +### Metal Stack Options +We have a list of metal stack options which corresponds to the following: +- **Option A** : combined options of metal_level=3, mim_option=A, metal_top=30K, poly_res=1K, and mim_cap=2 +- **Option B** : combined options of metal_level=4, mim_option=B, metal_top=11K, poly_res=1K, and mim_cap=2 +- **Option C** : combined options of metal_level=5, mim_option=B, metal_top=9K, poly_res=1K, and mim_cap=2 ### Usage From cd7a98f392644026a57625a79352ec193bff940b Mon Sep 17 00:00:00 2001 From: Amro Tork <74936860+atorkmabrains@users.noreply.github.com> Date: Mon, 30 Jan 2023 08:28:13 +0200 Subject: [PATCH 19/71] Update README.md --- klayout/drc/README.md | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/klayout/drc/README.md b/klayout/drc/README.md index 99d01c4b..865bd600 100644 --- a/klayout/drc/README.md +++ b/klayout/drc/README.md @@ -44,36 +44,36 @@ Example: ### Options -`--help -h` Print this help message. +- `--help -h` Print this help message. -`--path=` The input GDS file path. +- `--path=` The input GDS file path. -`--gf180mcu=` Select combined options of metal_top, mim_option, and metal_level. Allowed values (A, B, C). - gf180mcu=A: Select metal_top=30K mim_option=A metal_level=3LM - gf180mcu=B: Select metal_top=11K mim_option=B metal_level=4LM - gf180mcu=C: Select metal_top=9K mim_option=B metal_level=5LM +- `--gf180mcu=` Select combined options of metal_top, mim_option, and metal_level. Allowed values (A, B, C). + - gf180mcu=A: Select metal_top=30K mim_option=A metal_level=3LM + - gf180mcu=B: Select metal_top=11K mim_option=B metal_level=4LM + - gf180mcu=C: Select metal_top=9K mim_option=B metal_level=5LM -`--topcell=` Topcell name to use. +- `--topcell=` Topcell name to use. -`--thr=` The number of threads used in run. +- `--thr=` The number of threads used in run. -`--run_mode=` Select klayout mode Allowed modes (flat , deep, tiling). [default: flat] +- `--run_mode=` Select klayout mode Allowed modes (flat , deep, tiling). [default: flat] -`--no_feol` Turn off FEOL rules from running. +- `--no_feol` Turn off FEOL rules from running. -`--no_beol` Turn off BEOL rules from running. +- `--no_beol` Turn off BEOL rules from running. -`--connectivity` Turn on connectivity rules. +- `--connectivity` Turn on connectivity rules. -`--density` Turn on Density rules. +- `--density` Turn on Density rules. -`--density_only` Turn on Density rules only. +- `--density_only` Turn on Density rules only. -`--antenna` Turn on Antenna checks. +- `--antenna` Turn on Antenna checks. -`--antenna_only` Turn on Antenna checks only. +- `--antenna_only` Turn on Antenna checks only. -`--no_offgrid` Turn off OFFGRID checking rules. +- `--no_offgrid` Turn off OFFGRID checking rules. ### **DRC Outputs** From e7b5b39ac04751fcba85d54c4b6b3a68a7ba1330 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Mon, 30 Jan 2023 11:01:02 +0200 Subject: [PATCH 20/71] Modify the deepnwell derived layers definitions based on findings from SAB. --- klayout/drc/rule_decks/main.drc | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/klayout/drc/rule_decks/main.drc b/klayout/drc/rule_decks/main.drc index 2976275e..7aca83e9 100644 --- a/klayout/drc/rule_decks/main.drc +++ b/klayout/drc/rule_decks/main.drc @@ -890,32 +890,38 @@ polygons_count += count logger.info("Total no. of polygons in the design is #{polygons_count}") logger.info("Starting deriving base layers.") + #===================================================== #------------- BASE LAYERS DERIVATIONS --------------- #===================================================== +dnwell_n = dnwell.not(lvpwell) +dnwell_p = dnwell.and(lvpwell) + +all_nwell = dnwell_n.join(nwell) + ncomp = comp.and(nplus) pcomp = comp.and(pplus) tgate = poly2.and(comp).not(res_mk) -ngate = nplus.and(tgate) -nactive = ncomp.not(nwell) +nactive = ncomp.not(all_nwell) +ngate = nactive.and(tgate) nsd = nactive.interacting(ngate).not(ngate).not(res_mk) -ptap = pcomp.not(nwell).join(pcomp.and(lvpwell)).not(res_mk) +ptap = pcomp.not(all_nwell).not(res_mk) -pgate = pplus.and(tgate) -pactive = pcomp.and(nwell) +pactive = pcomp.and(all_nwell) +pgate = pactive.and(tgate) psd = pactive.interacting(pgate).not(pgate).not(res_mk) -ntap = ncomp.and(nwell).join(ncomp.and(dnwell).not(lvpwell)).not(res_mk) +ntap = ncomp.and(all_nwell).not(res_mk) -ngate_dn = ngate.and(lvpwell).and(dnwell) -ptap_dn = ptap.and(dnwell).outside(well_diode_mk) +ngate_dn = ngate.and(dnwell_p) +ptap_dn = ptap.and(dnwell_p).outside(well_diode_mk) -pgate_dn = pgate.and(dnwell).not(lvpwell) -ntap_dn = ntap.and(dnwell) +pgate_dn = pgate.and(dnwell_n) +ntap_dn = ntap.and(dnwell_n) -psd_dn = pcomp.not(lvpwell).and(dnwell).interacting(pgate_dn).not(pgate_dn).not(res_mk) -nsd_dn = ncomp.and(dnwell).not(lvpwell).interacting(ngate_dn).not(ngate_dn).not(res_mk) +psd_dn = pcomp.and(dnwell_n).interacting(pgate_dn).not(pgate_dn).not(res_mk) +nsd_dn = ncomp.and(dnwell_p).interacting(ngate_dn).not(ngate_dn).not(res_mk) natcompsd = (nat & comp.interacting(poly2)) - tgate From c80ddb0d4b123e5feb04961534f2cbb4ba61a391 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Mon, 30 Jan 2023 11:08:39 +0200 Subject: [PATCH 21/71] Clean up the flake8. --- klayout/drc/testing/run_regression.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py index 6bdb9124..c6bf8372 100644 --- a/klayout/drc/testing/run_regression.py +++ b/klayout/drc/testing/run_regression.py @@ -380,7 +380,7 @@ def parse_existing_rules(rule_deck_path, output_path, target_table=None): table_rule_file = os.path.join(rule_deck_path, "rule_decks", f"{target_table}.drc") if not os.path.isfile(table_rule_file): raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), table_rule_file) - + drc_files = [table_rule_file] rules_data = list() From 74edb5b6976c9905a100274091868a46022efc79 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Mon, 30 Jan 2023 11:24:45 +0200 Subject: [PATCH 22/71] Considering the case if the rule is not implemented. --- klayout/drc/testing/run_regression.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py index c6bf8372..59ece3a8 100644 --- a/klayout/drc/testing/run_regression.py +++ b/klayout/drc/testing/run_regression.py @@ -393,6 +393,7 @@ def parse_existing_rules(rule_deck_path, output_path, target_table=None): rule_info = dict() rule_info["table_name"] = os.path.basename(runset).replace(".drc", "") rule_info["rule_name"] = line_list[1] + rule_info["in_rule_deck"] = 1 rules_data.append(rule_info) df = pd.DataFrame(rules_data) @@ -748,6 +749,7 @@ def aggregate_results(tc_df: pd.DataFrame, results_df: pd.DataFrame, rules_df: p df = results_df.merge(rules_df, how="outer", on=["table_name", "rule_name"]) df[ANALYSIS_RULES] = df[ANALYSIS_RULES].fillna(0) + df["in_rule_deck"] = df["in_rule_deck"].fillna(0) df = df.merge(tc_df[["table_name", "run_status"]], how="left", on="table_name") df["rule_status"] = "Passed" @@ -755,6 +757,7 @@ def aggregate_results(tc_df: pd.DataFrame, results_df: pd.DataFrame, rules_df: p df.loc[(df["false_positive"] > 0), "rule_status"] = "Rule Failed" df.loc[(df["pass_patterns"] < 1), "rule_status"] = "Rule Not Tested" df.loc[(df["fail_patterns"] < 1), "rule_status"] = "Rule Not Tested" + df.loc[(df["in_rule_deck"] < 1), "rule_status"] = "Rule Not Implemented" df.loc[~(df["run_status"].isin(["completed"])), "rule_status"] = "Test Case Run Failed" return df From e318be650f105e88dd24dc10391dfb0ddc58553d Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Mon, 30 Jan 2023 14:06:26 +0200 Subject: [PATCH 23/71] Adding dualgate svg. --- .../drc/testing/testcases/unit/dualgate.svg | 345 ++++++++++++++++++ 1 file changed, 345 insertions(+) create mode 100644 klayout/drc/testing/testcases/unit/dualgate.svg diff --git a/klayout/drc/testing/testcases/unit/dualgate.svg b/klayout/drc/testing/testcases/unit/dualgate.svg new file mode 100644 index 00000000..1bc3d877 --- /dev/null +++ b/klayout/drc/testing/testcases/unit/dualgate.svg @@ -0,0 +1,345 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +DV.6 +DV.6 +DV.6 +DV.6 +DV.6 +DV.6 +DV.6 +DV.6 +DV.7 +DV.8 +DV.8 +DV.8 +DV.8 +DV.8 +DV.8 +DV.8 +DV.8 +DV.8 +DV.8 +DV.8 +DV.8 +DV.9 +DV.9 +DV.5 +DV.5 +DV.5 +DV.5 +DV.3 +DV.3 +DV.3 +DV.3 +DV.3 +DV.2 +DV.2 +DV.2 +DV.2 +DV.1 +DV.1 +DV.1 +DV.1 +DV.1 +DV.1 +Hole +Singular +Cor2Edge +Intersect +Basic +Touch +Angle45 +PriEnclSec +LIMITATION DEPENDENCIES: priWidth > secOvlpPriSide, priWidth>secOvlpPriTopBot, +DV.8 +Cor2Edge +Basic +Angle45 +Outside +LIMITATION DEPENDENCIES: priWidth > secOvlpPriSide, priWidth>secOvlpPriTopBot, +5 ERRs +2 ERRs +DV.5 Min. width = 0.7 +DV.1 Min. overlap of DNWELL for MV Area = 0.5 +MASK 6J DV +DF.2.6 +DV.3 Space to unrelated active = 0.24 +DV.2 MV space = 0.44 +3 ERRs +5 ERRs +4 ERRs +DV.6 Min. overlap of COMP for DV_2 outside DNWELL = 0.24 +DV.7 COMP cannot be partially overlap by DV2_D +DV.9 LV and MV PMOS can not be sitting inside same NWELL +1 ERRs + + \ No newline at end of file From ce8dfdb97b243061b76eec9deb5c5a3f477671c3 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 07:49:44 +0200 Subject: [PATCH 24/71] Remove the comment of the switches. --- klayout/drc/run_drc.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/klayout/drc/run_drc.py b/klayout/drc/run_drc.py index 757abd15..f2e5c353 100644 --- a/klayout/drc/run_drc.py +++ b/klayout/drc/run_drc.py @@ -302,17 +302,14 @@ def generate_klayout_switches(arguments, layout_path): switches["metal_top"] = "30K" switches["mim_option"] = "A" switches["metal_level"] = "3LM" - # switches = switches + f"-rd metal_top=30K -rd mim_option=A -rd metal_level=3LM " elif arguments["--variant"] == "B": switches["metal_top"] = "11K" switches["mim_option"] = "B" switches["metal_level"] = "4LM" - # switches = switches + f"-rd metal_top=11K -rd mim_option=B -rd metal_level=4LM " elif arguments["--variant"] == "C": switches["metal_top"] = "9K" switches["mim_option"] = "B" switches["metal_level"] = "5LM" - # switches = switches + f"-rd metal_top=9K -rd mim_option=B -rd metal_level=5LM " else: logging.error("variant switch allowed values are (A , B, C) only") exit() From 18b0b4912dc7544c8f9b84617180780cc02010dd Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 07:51:12 +0200 Subject: [PATCH 25/71] Add exit status 1 for bad variant. --- klayout/drc/run_drc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/klayout/drc/run_drc.py b/klayout/drc/run_drc.py index f2e5c353..4d0fcc13 100644 --- a/klayout/drc/run_drc.py +++ b/klayout/drc/run_drc.py @@ -312,7 +312,7 @@ def generate_klayout_switches(arguments, layout_path): switches["metal_level"] = "5LM" else: logging.error("variant switch allowed values are (A , B, C) only") - exit() + exit(1) if arguments["--verbose"]: switches["verbose"] = "true" From 38947c3c0902a9ae2f6028bea0f949d69bab98dd Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 07:55:46 +0200 Subject: [PATCH 26/71] Add klayout version string in error message. --- klayout/drc/run_drc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/klayout/drc/run_drc.py b/klayout/drc/run_drc.py index 4d0fcc13..91142e45 100644 --- a/klayout/drc/run_drc.py +++ b/klayout/drc/run_drc.py @@ -360,7 +360,7 @@ def check_klayout_version(): klayout_v_list = [] if klayout_v_ == "": - logging.error("Klayout is not found. Please make sure klayout is installed.") + logging.error(f"Klayout is not found. Please make sure klayout is installed. Current version: {klayout_v_}") exit(1) else: klayout_v_list = [int(v) for v in klayout_v_.split(" ")[-1].split(".")] From ea1012e56e2ba260d7d6c0136c5aea0164283d60 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 07:57:59 +0200 Subject: [PATCH 27/71] Moving verison message at the buttom. --- klayout/drc/run_drc.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/klayout/drc/run_drc.py b/klayout/drc/run_drc.py index 91142e45..61a68cda 100644 --- a/klayout/drc/run_drc.py +++ b/klayout/drc/run_drc.py @@ -365,10 +365,8 @@ def check_klayout_version(): else: klayout_v_list = [int(v) for v in klayout_v_.split(" ")[-1].split(".")] - logging.info(f"Your Klayout version is: {klayout_v_}") - if len(klayout_v_list) < 1 or len(klayout_v_list) > 3: - logging.error("Was not able to get klayout version properly.") + logging.error(f"Was not able to get klayout version properly. Current version: {klayout_v_}") exit(1) elif len(klayout_v_list) == 2: if klayout_v_list[1] < 28: @@ -384,6 +382,8 @@ def check_klayout_version(): "Using this klayout version has not been assesed in this development. Limits are unknown" ) exit(1) + + logging.info(f"Your Klayout version is: {klayout_v_}") def check_layout_path(layout_path): From 2eb8999b8f3b67af8cb004e6831143755f89a6cd Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 08:00:32 +0200 Subject: [PATCH 28/71] Merging the klayout version conditions --- klayout/drc/run_drc.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/klayout/drc/run_drc.py b/klayout/drc/run_drc.py index 61a68cda..86b7c693 100644 --- a/klayout/drc/run_drc.py +++ b/klayout/drc/run_drc.py @@ -368,20 +368,13 @@ def check_klayout_version(): if len(klayout_v_list) < 1 or len(klayout_v_list) > 3: logging.error(f"Was not able to get klayout version properly. Current version: {klayout_v_}") exit(1) - elif len(klayout_v_list) == 2: + elif len(klayout_v_list) >= 2 and len(klayout_v_list) <= 3: if klayout_v_list[1] < 28: logging.warning("Prerequisites at a minimum: KLayout 0.28.0") logging.error( "Using this klayout version has not been assesed in this development. Limits are unknown" ) exit(1) - elif len(klayout_v_list) == 3: - if klayout_v_list[1] < 28 : - logging.warning("Prerequisites at a minimum: KLayout 0.28.0") - logging.error( - "Using this klayout version has not been assesed in this development. Limits are unknown" - ) - exit(1) logging.info(f"Your Klayout version is: {klayout_v_}") From 4cad41e69c05e28457d55dbdd1751557dd2c4572 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 08:03:23 +0200 Subject: [PATCH 29/71] Change the version check to error. --- klayout/drc/run_drc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/klayout/drc/run_drc.py b/klayout/drc/run_drc.py index 86b7c693..e89b0665 100644 --- a/klayout/drc/run_drc.py +++ b/klayout/drc/run_drc.py @@ -370,7 +370,7 @@ def check_klayout_version(): exit(1) elif len(klayout_v_list) >= 2 and len(klayout_v_list) <= 3: if klayout_v_list[1] < 28: - logging.warning("Prerequisites at a minimum: KLayout 0.28.0") + logging.error("Prerequisites at a minimum: KLayout 0.28.0") logging.error( "Using this klayout version has not been assesed in this development. Limits are unknown" ) From dcc0e3280e1eceb40f28b2b74873cbad82ae2385 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 08:05:02 +0200 Subject: [PATCH 30/71] Providing gds file path with error. --- klayout/drc/run_drc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/klayout/drc/run_drc.py b/klayout/drc/run_drc.py index e89b0665..caeae6cf 100644 --- a/klayout/drc/run_drc.py +++ b/klayout/drc/run_drc.py @@ -395,7 +395,7 @@ def check_layout_path(layout_path): """ if not os.path.isfile(layout_path): - logging.error("## GDS file path provided doesn't exist or not a file.") + logging.error(f"## GDS file path {layout_path} provided doesn't exist or not a file.") exit(1) if ".gds" not in layout_path and ".oas" not in layout_path: From 5f14c444b8161c310717da175f5d655d7d82b87a Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 08:10:31 +0200 Subject: [PATCH 31/71] Multiple fixes in one commit. --- klayout/drc/run_drc.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/klayout/drc/run_drc.py b/klayout/drc/run_drc.py index caeae6cf..9fdcacc0 100644 --- a/klayout/drc/run_drc.py +++ b/klayout/drc/run_drc.py @@ -105,7 +105,7 @@ def check_drc_results(results_db_files: list): if len(full_violating_rules) > 0: logging.error("Klayout DRC run is not clean.") - logging.error("Violated rules are : {}\n".format(str(full_violating_rules))) + logging.error(f"Violated rules are : {str(full_violating_rules)}\n") exit(1) else: logging.info("Klayout DRC run is clean. GDS has no DRC violations.") @@ -399,7 +399,7 @@ def check_layout_path(layout_path): exit(1) if ".gds" not in layout_path and ".oas" not in layout_path: - logging.error("## Layout is not in GDSII or OASIS format. Please use gds format.") + logging.error(f"## Layout {layout_path} is not in GDSII or OASIS format. Please use gds format.") exit(1) return os.path.abspath(layout_path) @@ -414,11 +414,7 @@ def build_switches_string(sws: dict): sws : dict Dictionary that holds the Antenna swithces. """ - switches_str = "" - for k in sws: - switches_str += "-rd {}={} ".format(k, sws[k]) - - return switches_str + return ' '.join(f'-rd {k}={v}' for k,v in sws.items()) def run_check(drc_file: str, drc_name: str, path: str, run_dir: str, sws: dict): From d76f45aadd3d02a439770b9877bfff805f6e6db9 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 08:12:46 +0200 Subject: [PATCH 32/71] Remove unused procedure. --- klayout/drc/run_drc.py | 29 ----------------------------- 1 file changed, 29 deletions(-) diff --git a/klayout/drc/run_drc.py b/klayout/drc/run_drc.py index 9fdcacc0..580bb2e2 100644 --- a/klayout/drc/run_drc.py +++ b/klayout/drc/run_drc.py @@ -111,35 +111,6 @@ def check_drc_results(results_db_files: list): logging.info("Klayout DRC run is clean. GDS has no DRC violations.") -def get_results(rule_deck, rules, lyrdb, type): - - mytree = ET.parse(f"{lyrdb}_{type}_gf{arguments['--gf180mcu']}.lyrdb") - myroot = mytree.getroot() - - violated = [] - - for lrule in rules: - # Loop on database to get the violations of required rule - for z in myroot[7]: - if f"'{lrule}'" == f"{z[1].text}": - violated.append(lrule) - break - - lyrdb_clean = lyrdb.split("/")[-1] - - if len(violated) > 0: - logging.error( - f"\nTotal # of DRC violations in {rule_deck}.drc is {len(violated)}. Please check {lyrdb_clean}_{type}_gf{arguments['--gf180mcu']}.lyrdb file For more details" - ) - logging.info("Klayout GDS DRC Not Clean") - logging.info(f"Violated rules are : {violated}\n") - else: - logging.info( - f"\nCongratulations !!. No DRC Violations found in {lyrdb_clean} for {rule_deck}.drc rule deck with switch gf{arguments['--gf180mcu']}" - ) - logging.info("Klayout GDS DRC Clean\n") - - def generate_drc_run_template(drc_dir: str, run_dir: str, run_tables_list: list = []): """ generate_drc_run_template will generate the template file to run drc in the run_dir path. From a3e01247250d7826d29c267c88f74ac4c1d03931 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 08:15:42 +0200 Subject: [PATCH 33/71] Clean up logging. --- klayout/drc/run_drc.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/klayout/drc/run_drc.py b/klayout/drc/run_drc.py index 580bb2e2..8465ad3c 100644 --- a/klayout/drc/run_drc.py +++ b/klayout/drc/run_drc.py @@ -149,11 +149,9 @@ def generate_drc_run_template(drc_dir: str, run_dir: str, run_tables_list: list deck_name = "main" logging.info( - "## Generating template with for the following rule tables: {}".format( - str(all_tables) - ) + f"## Generating template with for the following rule tables: {all_tables}" ) - print(run_dir) + logging.info(f"## Your run dir located at: {run_dir}") all_tables.insert(0, "main.drc") all_tables.append("tail.drc") From 6659ae2a115862a088a7216769befdfa1521869e Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 08:28:41 +0200 Subject: [PATCH 34/71] Update the selection of the file. --- klayout/drc/run_drc.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/klayout/drc/run_drc.py b/klayout/drc/run_drc.py index 8465ad3c..287ba959 100644 --- a/klayout/drc/run_drc.py +++ b/klayout/drc/run_drc.py @@ -198,10 +198,7 @@ def get_list_of_tables(drc_dir: str): return [ os.path.basename(f).replace(".drc", "") for f in glob.glob(os.path.join(drc_dir, "rule_decks", "*.drc")) - if "antenna" not in f - and "density" not in f - and "main" not in f - and "tail" not in f + if all(t not in f for t in ('antenna', 'density', 'main', 'tail')) ] From e826996a644d7641dad309a340f875c5d42104af Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 08:46:32 +0200 Subject: [PATCH 35/71] Fix the fix path issue. --- klayout/drc/run_drc.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/klayout/drc/run_drc.py b/klayout/drc/run_drc.py index 287ba959..d52a35a8 100644 --- a/klayout/drc/run_drc.py +++ b/klayout/drc/run_drc.py @@ -572,7 +572,7 @@ def run_single_processor( check_drc_results(list_res_db_files) -def main(drc_run_dir: str, now_str: str, arguments: dict): +def main(drc_run_dir: str, arguments: dict): """ main function to run the DRC. @@ -580,18 +580,15 @@ def main(drc_run_dir: str, now_str: str, arguments: dict): ---------- drc_run_dir : str String with absolute path of the full run dir. - now_str : str - String with the run name for logs. arguments : dict Dictionary that holds the arguments used by user in the run command. This is generated by docopt library. """ # Check gds file existance - if os.path.exists(arguments["--path"]): - pass - else: - logging.error("The input GDS file path doesn't exist, please recheck.") - exit() + if not os.path.exists(arguments["--path"]): + file_path = arguments["--path"] + logging.error(f"The input GDS file path {file_path} doesn't exist, please recheck.") + exit(1) rule_deck_full_path = os.path.dirname(os.path.abspath(__file__)) @@ -658,4 +655,4 @@ def main(drc_run_dir: str, now_str: str, arguments: dict): ) # Calling main function - main(drc_run_dir, now_str, arguments) + main(drc_run_dir, arguments) From 8c1ce8761e022afac062441e6b30f02f5e702452 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 09:27:50 +0200 Subject: [PATCH 36/71] Adding intentional pass condition for reporting. --- klayout/drc/testing/run_regression.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py index 59ece3a8..43493df3 100644 --- a/klayout/drc/testing/run_regression.py +++ b/klayout/drc/testing/run_regression.py @@ -147,12 +147,7 @@ def get_switches(yaml_file, rule_name): except yaml.YAMLError as exc: print(exc) - switches = list() - for param, value in yaml_dic[rule_name].items(): - switch = f"{param}={value}" - switches.append(switch) - - return switches + return [f'{param}={value}' for param, value in yaml_dic[rule_name].items()] def parse_results_db(results_database): @@ -752,12 +747,18 @@ def aggregate_results(tc_df: pd.DataFrame, results_df: pd.DataFrame, rules_df: p df["in_rule_deck"] = df["in_rule_deck"].fillna(0) df = df.merge(tc_df[["table_name", "run_status"]], how="left", on="table_name") - df["rule_status"] = "Passed" + df["rule_status"] = "Unknown" df.loc[(df["false_negative"] > 0), "rule_status"] = "Rule Failed" df.loc[(df["false_positive"] > 0), "rule_status"] = "Rule Failed" df.loc[(df["pass_patterns"] < 1), "rule_status"] = "Rule Not Tested" df.loc[(df["fail_patterns"] < 1), "rule_status"] = "Rule Not Tested" df.loc[(df["in_rule_deck"] < 1), "rule_status"] = "Rule Not Implemented" + + pass_cond = (df["pass_patterns"] > 0) & (df["fail_patterns"] > 0) & \ + (df["false_negative"] < 1) & (df["false_positive"] < 1) & \ + (df["in_rule_deck"] > 0) + + df.loc[pass_cond, "rule_status"] = "Rule Not Tested" df.loc[~(df["run_status"].isin(["completed"])), "rule_status"] = "Test Case Run Failed" return df From 6e0718cfbfc323c8bfa92c70bf06f005148aa121 Mon Sep 17 00:00:00 2001 From: Amro Tork <74936860+atorkmabrains@users.noreply.github.com> Date: Tue, 31 Jan 2023 10:09:12 +0200 Subject: [PATCH 37/71] Update README.md --- klayout/drc/testing/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/klayout/drc/testing/README.md b/klayout/drc/testing/README.md index 572e622b..254cfb5e 100644 --- a/klayout/drc/testing/README.md +++ b/klayout/drc/testing/README.md @@ -5,10 +5,10 @@ Explains how to test GF180nm DRC rule deck. ## Folder Structure ```text -📦testing +📁testing ┣ 📜README.md (This file to document the regression) ┣ 📜run_regression.py (Main regression script that runs the regression.) - ┣ 📜testcases (All testcases) + ┣ 📁testcases (All testcases) ``` ## Prerequisites From eddd7d4724820c32a0c2a74895e2eb3a84de318e Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 09:29:48 +0200 Subject: [PATCH 38/71] Clean up version checking. --- klayout/drc/testing/run_regression.py | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py index 43493df3..4136f20a 100644 --- a/klayout/drc/testing/run_regression.py +++ b/klayout/drc/testing/run_regression.py @@ -112,21 +112,13 @@ def check_klayout_version(): if len(klayout_v_list) < 1 or len(klayout_v_list) > 3: logging.error("Was not able to get klayout version properly.") exit(1) - elif len(klayout_v_list) == 2: + elif len(klayout_v_list) >= 2 or len(klayout_v_list) <= 3: if klayout_v_list[1] < 28: - logging.warning("Prerequisites at a minimum: KLayout 0.28.0") + logging.error("Prerequisites at a minimum: KLayout 0.28.0") logging.error( "Using this klayout version has not been assesed in this development. Limits are unknown" ) exit(1) - elif len(klayout_v_list) == 3: - if klayout_v_list[1] < 28 : - logging.warning("Prerequisites at a minimum: KLayout 0.28.0") - logging.error( - "Using this klayout version has not been assesed in this development. Limits are unknown" - ) - exit(1) - def get_switches(yaml_file, rule_name): """Parse yaml file and extract switches data @@ -753,7 +745,7 @@ def aggregate_results(tc_df: pd.DataFrame, results_df: pd.DataFrame, rules_df: p df.loc[(df["pass_patterns"] < 1), "rule_status"] = "Rule Not Tested" df.loc[(df["fail_patterns"] < 1), "rule_status"] = "Rule Not Tested" df.loc[(df["in_rule_deck"] < 1), "rule_status"] = "Rule Not Implemented" - + pass_cond = (df["pass_patterns"] > 0) & (df["fail_patterns"] > 0) & \ (df["false_negative"] < 1) & (df["false_positive"] < 1) & \ (df["in_rule_deck"] > 0) From a5f9900c357dfa2cb8c44857e149eb13a7696f84 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 12:51:56 +0200 Subject: [PATCH 39/71] Clean up run regression and run drc to make them work properly for our regression --- klayout/drc/run_drc.py | 4 - klayout/drc/testing/run_regression.py | 102 ++++++++++++++------------ 2 files changed, 56 insertions(+), 50 deletions(-) diff --git a/klayout/drc/run_drc.py b/klayout/drc/run_drc.py index d52a35a8..8d0a2a6d 100644 --- a/klayout/drc/run_drc.py +++ b/klayout/drc/run_drc.py @@ -421,12 +421,8 @@ def run_check(drc_file: str, drc_name: str, path: str, run_dir: str, sws: dict): new_sws["report"] = report_path sws_str = build_switches_string(new_sws) sws_str += f" -rd table_name={drc_name}" - # log_file = os.path.join( - # run_dir, "{}_{}.log".format(layout_base_name, drc_name) - # ) run_str = f"klayout -b -r {drc_file} {sws_str}" - check_call(run_str, shell=True) return report_path diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py index 4136f20a..303f7604 100644 --- a/klayout/drc/testing/run_regression.py +++ b/klayout/drc/testing/run_regression.py @@ -53,7 +53,7 @@ RULE_LAY_NUM = 10000 PATH_WIDTH = 0.01 RULE_STR_SEP = "--" -ANALYSIS_RULES = ["pass_patterns", "fail_patterns", "false_negative", "false_positive"] +ANALYSIS_RULES = ["pass_patterns", "fail_patterns", "false_negative", "false_positive", "not_tested"] def get_unit_test_coverage(gds_file): @@ -154,7 +154,7 @@ def parse_results_db(results_database): Returns ------- set - A set that contains all rules in the database with violations + A set that contains all rules in the database with or without violations """ mytree = ET.parse(results_database) @@ -163,6 +163,12 @@ def parse_results_db(results_database): # Initial values for counter rule_counts = defaultdict(int) + # Get the list of all rules that ran regardless it generated output or not + for z in myroot[5]: + rule_name = f"{z[0].text}" + rule_counts[rule_name] = 0 + + # Count rules with violations. for z in myroot[7]: rule_name = f"{z[1].text}".replace("'", "") rule_counts[rule_name] += 1 @@ -230,8 +236,16 @@ def run_test_case( if len(pattern_results) < 1: logging.error("%s generated an exception: %s" % (pattern_clean, e)) traceback.print_exc() - raise - + raise Exception('Failed DRC run.') + + # dumping log into output to make CI have the log + if os.path.isfile(pattern_log): + logging.info("# Dumping drc run output log:") + with open(pattern_log, "r") as f: + for line in f: + line = line.strip() + logging.info(f"{line}") + # Checking if run is completed or failed pattern_results = glob.glob(os.path.join(output_loc, f"{pattern_clean}*.lyrdb")) @@ -248,9 +262,29 @@ def run_test_case( # Generating final db file if os.path.exists(merged_output): final_report = f'{merged_output.split(".")[0]}_final.lyrdb' - call_str = f"klayout -b -r {runset_analysis} -rd input={merged_output} -rd report={final_report}" - check_call(call_str, shell=True) + analysis_log = f'{merged_output.split(".")[0]}_analysis.log' + call_str = f"klayout -b -r {runset_analysis} -rd input={merged_output} -rd report={final_report} > {analysis_log} 2>&1" + + failed_analysis_step = False + + try: + check_call(call_str, shell=True) + except Exception as e: + failed_analysis_step = True + logging.error("%s generated an exception: %s" % (pattern_clean, e)) + traceback.print_exc() + # dumping log into output to make CI have the log + if os.path.isfile(analysis_log): + logging.info("# Dumping analysis run output log:") + with open(analysis_log, "r") as f: + for line in f: + line = line.strip() + logging.info(f"{line}") + + if failed_analysis_step: + raise Exception('Failed DRC analysis run.') + if os.path.exists(final_report): rule_counts = parse_results_db(final_report) return rule_counts @@ -388,38 +422,6 @@ def parse_existing_rules(rule_deck_path, output_path, target_table=None): df.to_csv(os.path.join(output_path, "rule_deck_rules.csv"), index=False) return df - -def analyze_test_patterns_coverage(rules_df, tc_df, output_path): - """ - This function analyze the test patterns before running the test cases. - - Parameters - ---------- - rules_df : pd.DataFrame - DataFrame that holds all the rules that are found in the rule deck. - tc_df : pd.DataFrame - DataFrame that holds all the test cases and all the information required. - output_path : string or Path - Path of the run location to store the output analysis file. - - Returns - ------- - pd.DataFrame - A DataFrame with analysis of the rule testing coverage. - """ - cov_df = ( - tc_df[["table_name", "rule_name"]] - .groupby(["table_name", "rule_name"]) - .count() - .reset_index(drop=False) - ) - cov_df = cov_df.merge(rules_df, on="rule_name", how="outer") - cov_df["runset"].fillna("", inplace=True) - cov_df.to_csv(os.path.join(output_path, "testcases_coverage.csv"), index=False) - - return cov_df - - def generate_merged_testcase(orignal_testcase, marker_testcase): """ This function will merge orignal gds file with generated @@ -550,6 +552,8 @@ def convert_results_db_to_gds(results_database: str, rules_tested: list): fail_marker2 = {fail_marker2} text_marker = {text_marker} + full_chip = extent.sized(0.0) + ''' analysis_rules.append(runset_analysis_setup) @@ -627,6 +631,9 @@ def convert_results_db_to_gds(results_database: str, rules_tested: list): # Saving analysis rule deck. for r in rule_data_type_map: rule_lay_dt = rule_data_type_map.index(r) + 1 + rule_layer_name = f'rule_{r.replace(".", "_")}' + rule_layer = f'{rule_layer_name} = input({RULE_LAY_NUM}, {rule_lay_dt})' + pass_patterns_rule = f''' pass_marker.interacting( text_marker.texts("{r}") ).output("{r}{RULE_STR_SEP}pass_patterns", "{r}{RULE_STR_SEP}pass_patterns polygons") ''' @@ -634,16 +641,21 @@ def convert_results_db_to_gds(results_database: str, rules_tested: list): fail_marker2.interacting(fail_marker.interacting(text_marker.texts("{r}")) ).or( fail_marker.interacting(text_marker.texts("{r}")).not_interacting(fail_marker2) ).output("{r}{RULE_STR_SEP}fail_patterns", "{r}{RULE_STR_SEP}fail_patterns polygons") ''' false_pos_rule = f''' - pass_marker.interacting(text_marker.texts("{r}")).interacting(input({RULE_LAY_NUM}, {rule_lay_dt})).output("{r}{RULE_STR_SEP}false_positive", "{r}{RULE_STR_SEP}false_positive occurred") + pass_marker.interacting(text_marker.texts("{r}")).interacting({rule_layer_name}).output("{r}{RULE_STR_SEP}false_positive", "{r}{RULE_STR_SEP}false_positive occurred") ''' false_neg_rule = f''' - ((fail_marker2.interacting(fail_marker.interacting(text_marker.texts("{r}")))).or((fail_marker.interacting(input(11, 222).texts("{r}")).not_interacting(fail_marker2)))).not_interacting(input({RULE_LAY_NUM}, {rule_lay_dt})).output("{r}{RULE_STR_SEP}false_negative", "{r}{RULE_STR_SEP}false_negative occurred") + ((fail_marker2.interacting(fail_marker.interacting(text_marker.texts("{r}")))).or((fail_marker.interacting(input(11, 222).texts("{r}")).not_interacting(fail_marker2)))).not_interacting({rule_layer_name}).output("{r}{RULE_STR_SEP}false_negative", "{r}{RULE_STR_SEP}false_negative occurred") ''' - + rule_not_tested = f''' + full_chip.not_interacting({rule_layer_name}).output("{r}{RULE_STR_SEP}not_tested", "{r}{RULE_STR_SEP}not_tested occurred") + ''' + + analysis_rules.append(rule_layer) analysis_rules.append(pass_patterns_rule) analysis_rules.append(fail_patterns_rule) analysis_rules.append(false_pos_rule) analysis_rules.append(false_neg_rule) + analysis_rules.append(rule_not_tested) for r in rules_tested: if r in rule_data_type_map: @@ -742,17 +754,15 @@ def aggregate_results(tc_df: pd.DataFrame, results_df: pd.DataFrame, rules_df: p df["rule_status"] = "Unknown" df.loc[(df["false_negative"] > 0), "rule_status"] = "Rule Failed" df.loc[(df["false_positive"] > 0), "rule_status"] = "Rule Failed" - df.loc[(df["pass_patterns"] < 1), "rule_status"] = "Rule Not Tested" - df.loc[(df["fail_patterns"] < 1), "rule_status"] = "Rule Not Tested" + df.loc[(df["not_tested"] > 0), "rule_status"] = "Rule Not Tested" df.loc[(df["in_rule_deck"] < 1), "rule_status"] = "Rule Not Implemented" + df.loc[~(df["run_status"].isin(["completed"])), "rule_status"] = "Test Case Run Failed" pass_cond = (df["pass_patterns"] > 0) & (df["fail_patterns"] > 0) & \ (df["false_negative"] < 1) & (df["false_positive"] < 1) & \ (df["in_rule_deck"] > 0) - df.loc[pass_cond, "rule_status"] = "Rule Not Tested" - df.loc[~(df["run_status"].isin(["completed"])), "rule_status"] = "Test Case Run Failed" - + df.loc[pass_cond, "rule_status"] = "Passed" return df From 167ed093a8242ca1530f8edba9cfa36985617302 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 12:53:03 +0200 Subject: [PATCH 40/71] Clean up code with black. --- klayout/drc/run_drc.py | 26 ++-- klayout/drc/testing/run_regression.py | 196 ++++++++++++++++---------- 2 files changed, 140 insertions(+), 82 deletions(-) diff --git a/klayout/drc/run_drc.py b/klayout/drc/run_drc.py index 8d0a2a6d..84d7cd93 100644 --- a/klayout/drc/run_drc.py +++ b/klayout/drc/run_drc.py @@ -198,7 +198,7 @@ def get_list_of_tables(drc_dir: str): return [ os.path.basename(f).replace(".drc", "") for f in glob.glob(os.path.join(drc_dir, "rule_decks", "*.drc")) - if all(t not in f for t in ('antenna', 'density', 'main', 'tail')) + if all(t not in f for t in ("antenna", "density", "main", "tail")) ] @@ -326,13 +326,17 @@ def check_klayout_version(): klayout_v_list = [] if klayout_v_ == "": - logging.error(f"Klayout is not found. Please make sure klayout is installed. Current version: {klayout_v_}") + logging.error( + f"Klayout is not found. Please make sure klayout is installed. Current version: {klayout_v_}" + ) exit(1) else: klayout_v_list = [int(v) for v in klayout_v_.split(" ")[-1].split(".")] if len(klayout_v_list) < 1 or len(klayout_v_list) > 3: - logging.error(f"Was not able to get klayout version properly. Current version: {klayout_v_}") + logging.error( + f"Was not able to get klayout version properly. Current version: {klayout_v_}" + ) exit(1) elif len(klayout_v_list) >= 2 and len(klayout_v_list) <= 3: if klayout_v_list[1] < 28: @@ -341,7 +345,7 @@ def check_klayout_version(): "Using this klayout version has not been assesed in this development. Limits are unknown" ) exit(1) - + logging.info(f"Your Klayout version is: {klayout_v_}") @@ -361,11 +365,15 @@ def check_layout_path(layout_path): """ if not os.path.isfile(layout_path): - logging.error(f"## GDS file path {layout_path} provided doesn't exist or not a file.") + logging.error( + f"## GDS file path {layout_path} provided doesn't exist or not a file." + ) exit(1) if ".gds" not in layout_path and ".oas" not in layout_path: - logging.error(f"## Layout {layout_path} is not in GDSII or OASIS format. Please use gds format.") + logging.error( + f"## Layout {layout_path} is not in GDSII or OASIS format. Please use gds format." + ) exit(1) return os.path.abspath(layout_path) @@ -380,7 +388,7 @@ def build_switches_string(sws: dict): sws : dict Dictionary that holds the Antenna swithces. """ - return ' '.join(f'-rd {k}={v}' for k,v in sws.items()) + return " ".join(f"-rd {k}={v}" for k, v in sws.items()) def run_check(drc_file: str, drc_name: str, path: str, run_dir: str, sws: dict): @@ -583,7 +591,9 @@ def main(drc_run_dir: str, arguments: dict): # Check gds file existance if not os.path.exists(arguments["--path"]): file_path = arguments["--path"] - logging.error(f"The input GDS file path {file_path} doesn't exist, please recheck.") + logging.error( + f"The input GDS file path {file_path} doesn't exist, please recheck." + ) exit(1) rule_deck_full_path = os.path.dirname(os.path.abspath(__file__)) diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py index 303f7604..293ec8ac 100644 --- a/klayout/drc/testing/run_regression.py +++ b/klayout/drc/testing/run_regression.py @@ -53,7 +53,13 @@ RULE_LAY_NUM = 10000 PATH_WIDTH = 0.01 RULE_STR_SEP = "--" -ANALYSIS_RULES = ["pass_patterns", "fail_patterns", "false_negative", "false_positive", "not_tested"] +ANALYSIS_RULES = [ + "pass_patterns", + "fail_patterns", + "false_negative", + "false_positive", + "not_tested", +] def get_unit_test_coverage(gds_file): @@ -82,7 +88,9 @@ def get_unit_test_coverage(gds_file): for cell in top_cells: flatten_cell = cell.flatten() # Get all text labels for each cell - labels = flatten_cell.get_labels(apply_repetitions=True, depth=None, layer=lay_num, texttype=lay_dt) + labels = flatten_cell.get_labels( + apply_repetitions=True, depth=None, layer=lay_num, texttype=lay_dt + ) # Get label value for label in labels: rule = label.text @@ -120,6 +128,7 @@ def check_klayout_version(): ) exit(1) + def get_switches(yaml_file, rule_name): """Parse yaml file and extract switches data Parameters @@ -133,13 +142,13 @@ def get_switches(yaml_file, rule_name): """ # load yaml config data - with open(yaml_file, 'r') as stream: + with open(yaml_file, "r") as stream: try: yaml_dic = yaml.safe_load(stream) except yaml.YAMLError as exc: print(exc) - return [f'{param}={value}' for param, value in yaml_dic[rule_name].items()] + return [f"{param}={value}" for param, value in yaml_dic[rule_name].items()] def parse_results_db(results_database): @@ -206,7 +215,9 @@ def run_test_case( rule_counts = defaultdict(int) # Get switches used for each run - sw_file = os.path.join(Path(layout_path.parent).absolute(), f"{table_name}.{SUPPORTED_SW_EXT}") + sw_file = os.path.join( + Path(layout_path.parent).absolute(), f"{table_name}.{SUPPORTED_SW_EXT}" + ) if os.path.exists(sw_file): switches = " ".join(get_switches(sw_file, table_name)) @@ -236,8 +247,8 @@ def run_test_case( if len(pattern_results) < 1: logging.error("%s generated an exception: %s" % (pattern_clean, e)) traceback.print_exc() - raise Exception('Failed DRC run.') - + raise Exception("Failed DRC run.") + # dumping log into output to make CI have the log if os.path.isfile(pattern_log): logging.info("# Dumping drc run output log:") @@ -245,7 +256,7 @@ def run_test_case( for line in f: line = line.strip() logging.info(f"{line}") - + # Checking if run is completed or failed pattern_results = glob.glob(os.path.join(output_loc, f"{pattern_clean}*.lyrdb")) @@ -254,7 +265,9 @@ def run_test_case( if len(pattern_results) > 0: # db to gds conversion - marker_output, runset_analysis = convert_results_db_to_gds(pattern_results[0], rules_tested) + marker_output, runset_analysis = convert_results_db_to_gds( + pattern_results[0], rules_tested + ) # Generating merged testcase for violated rules merged_output = generate_merged_testcase(layout_path, marker_output) @@ -264,7 +277,7 @@ def run_test_case( final_report = f'{merged_output.split(".")[0]}_final.lyrdb' analysis_log = f'{merged_output.split(".")[0]}_analysis.log' call_str = f"klayout -b -r {runset_analysis} -rd input={merged_output} -rd report={final_report} > {analysis_log} 2>&1" - + failed_analysis_step = False try: @@ -281,10 +294,10 @@ def run_test_case( for line in f: line = line.strip() logging.info(f"{line}") - + if failed_analysis_step: - raise Exception('Failed DRC analysis run.') - + raise Exception("Failed DRC analysis run.") + if os.path.exists(final_report): rule_counts = parse_results_db(final_report) return rule_counts @@ -338,26 +351,43 @@ def run_all_test_cases(tc_df, drc_dir, run_dir, num_workers): try: rule_counts = future.result() if rule_counts: - rule_counts_df = pd.DataFrame({"analysis_rule": rule_counts.keys(), "count": rule_counts.values()}) - rule_counts_df["rule_name"] = rule_counts_df["analysis_rule"].str.split(RULE_STR_SEP).str[0] - rule_counts_df["type"] = rule_counts_df["analysis_rule"].str.split(RULE_STR_SEP).str[1] + rule_counts_df = pd.DataFrame( + { + "analysis_rule": rule_counts.keys(), + "count": rule_counts.values(), + } + ) + rule_counts_df["rule_name"] = ( + rule_counts_df["analysis_rule"].str.split(RULE_STR_SEP).str[0] + ) + rule_counts_df["type"] = ( + rule_counts_df["analysis_rule"].str.split(RULE_STR_SEP).str[1] + ) rule_counts_df.drop(columns=["analysis_rule"], inplace=True) rule_counts_df["count"] = rule_counts_df["count"].astype(int) - rule_counts_df = rule_counts_df.pivot(index="rule_name", - columns="type", - values="count") + rule_counts_df = rule_counts_df.pivot( + index="rule_name", columns="type", values="count" + ) rule_counts_df = rule_counts_df.fillna(0) rule_counts_df = rule_counts_df.reset_index(drop=False) - rule_counts_df = rule_counts_df.rename(columns={"index": "rule_name"}) + rule_counts_df = rule_counts_df.rename( + columns={"index": "rule_name"} + ) - rule_counts_df["table_name"] = tc_df.loc[tc_df["run_id"] == run_id, "table_name"].iloc[0] + rule_counts_df["table_name"] = tc_df.loc[ + tc_df["run_id"] == run_id, "table_name" + ].iloc[0] for c in ANALYSIS_RULES: if c not in rule_counts_df.columns: rule_counts_df[c] = 0 - rule_counts_df[ANALYSIS_RULES] = rule_counts_df[ANALYSIS_RULES].astype(int) - rule_counts_df = rule_counts_df[["table_name", "rule_name"] + ANALYSIS_RULES] + rule_counts_df[ANALYSIS_RULES] = rule_counts_df[ + ANALYSIS_RULES + ].astype(int) + rule_counts_df = rule_counts_df[ + ["table_name", "rule_name"] + ANALYSIS_RULES + ] results_df_list.append(rule_counts_df) tc_df.loc[tc_df["run_id"] == run_id, "run_status"] = "completed" else: @@ -398,9 +428,13 @@ def parse_existing_rules(rule_deck_path, output_path, target_table=None): if target_table is None: drc_files = glob.glob(os.path.join(rule_deck_path, "rule_decks", "*.drc")) else: - table_rule_file = os.path.join(rule_deck_path, "rule_decks", f"{target_table}.drc") + table_rule_file = os.path.join( + rule_deck_path, "rule_decks", f"{target_table}.drc" + ) if not os.path.isfile(table_rule_file): - raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), table_rule_file) + raise FileNotFoundError( + errno.ENOENT, os.strerror(errno.ENOENT), table_rule_file + ) drc_files = [table_rule_file] @@ -412,7 +446,9 @@ def parse_existing_rules(rule_deck_path, output_path, target_table=None): if ".output" in line: line_list = line.split('"') rule_info = dict() - rule_info["table_name"] = os.path.basename(runset).replace(".drc", "") + rule_info["table_name"] = os.path.basename(runset).replace( + ".drc", "" + ) rule_info["rule_name"] = line_list[1] rule_info["in_rule_deck"] = 1 rules_data.append(rule_info) @@ -422,6 +458,7 @@ def parse_existing_rules(rule_deck_path, output_path, target_table=None): df.to_csv(os.path.join(output_path, "rule_deck_rules.csv"), index=False) return df + def generate_merged_testcase(orignal_testcase, marker_testcase): """ This function will merge orignal gds file with generated @@ -449,7 +486,9 @@ def generate_merged_testcase(orignal_testcase, marker_testcase): # Getting flattened top cells top_cell_org = lib_org.top_level()[0].flatten(apply_repetitions=True) top_cell_marker = lib_marker.top_level()[0].flatten(apply_repetitions=True) - marker_polygons = top_cell_marker.get_polygons(apply_repetitions=True, include_paths=True, depth=None) + marker_polygons = top_cell_marker.get_polygons( + apply_repetitions=True, include_paths=True, depth=None + ) # Merging all polygons of markers with original testcase for marker_polygon in marker_polygons: @@ -488,8 +527,8 @@ def draw_polygons(polygon_data, cell, lay_num, lay_dt, path_width): """ # Cleaning data points - polygon_data = re.sub(r'\s+', '', polygon_data) - polygon_data = re.sub(r'[()]', '', polygon_data) + polygon_data = re.sub(r"\s+", "", polygon_data) + polygon_data = re.sub(r"[()]", "", polygon_data) tag_split = polygon_data.split(":") tag = tag_split[0] @@ -499,17 +538,26 @@ def draw_polygons(polygon_data, cell, lay_num, lay_dt, path_width): # Select shape type to be drawn if tag == "polygon": for poly in polygons: - points = [(float(p.split(",")[0]), float(p.split(",")[1])) for p in poly.split(";")] + points = [ + (float(p.split(",")[0]), float(p.split(",")[1])) + for p in poly.split(";") + ] cell.add(gdstk.Polygon(points, lay_num, lay_dt)) elif tag == "edge-pair": for poly in polygons: - points = [(float(p.split(",")[0]), float(p.split(",")[1])) for p in poly.split(";")] + points = [ + (float(p.split(",")[0]), float(p.split(",")[1])) + for p in poly.split(";") + ] cell.add(gdstk.FlexPath(points, path_width, layer=lay_num, datatype=lay_dt)) elif tag == "edge": for poly in polygons: - points = [(float(p.split(",")[0]), float(p.split(",")[1])) for p in poly.split(";")] + points = [ + (float(p.split(",")[0]), float(p.split(",")[1])) + for p in poly.split(";") + ] cell.add(gdstk.FlexPath(points, path_width, layer=lay_num, datatype=lay_dt)) else: logging.error(f"## Unknown type: {tag} ignored") @@ -544,7 +592,7 @@ def convert_results_db_to_gds(results_database: str, rules_tested: list): output_runset_path = f'{results_database.replace(".lyrdb", "")}_analysis.drc' analysis_rules = [] - runset_analysis_setup = f''' + runset_analysis_setup = f""" source($input) report("DRC analysis run report at", $report) pass_marker = {pass_marker} @@ -554,7 +602,7 @@ def convert_results_db_to_gds(results_database: str, rules_tested: list): full_chip = extent.sized(0.0) - ''' + """ analysis_rules.append(runset_analysis_setup) # Generating violated rules and its points @@ -564,7 +612,7 @@ def convert_results_db_to_gds(results_database: str, rules_tested: list): in_item = False rule_data_type_map = list() - for ev, elem in tqdm(ET.iterparse(results_database, events=('start', 'end'))): + for ev, elem in tqdm(ET.iterparse(results_database, events=("start", "end"))): if elem.tag != "item" and not in_item: elem.clear() @@ -632,24 +680,24 @@ def convert_results_db_to_gds(results_database: str, rules_tested: list): for r in rule_data_type_map: rule_lay_dt = rule_data_type_map.index(r) + 1 rule_layer_name = f'rule_{r.replace(".", "_")}' - rule_layer = f'{rule_layer_name} = input({RULE_LAY_NUM}, {rule_lay_dt})' + rule_layer = f"{rule_layer_name} = input({RULE_LAY_NUM}, {rule_lay_dt})" - pass_patterns_rule = f''' + pass_patterns_rule = f""" pass_marker.interacting( text_marker.texts("{r}") ).output("{r}{RULE_STR_SEP}pass_patterns", "{r}{RULE_STR_SEP}pass_patterns polygons") - ''' - fail_patterns_rule = f''' + """ + fail_patterns_rule = f""" fail_marker2.interacting(fail_marker.interacting(text_marker.texts("{r}")) ).or( fail_marker.interacting(text_marker.texts("{r}")).not_interacting(fail_marker2) ).output("{r}{RULE_STR_SEP}fail_patterns", "{r}{RULE_STR_SEP}fail_patterns polygons") - ''' - false_pos_rule = f''' + """ + false_pos_rule = f""" pass_marker.interacting(text_marker.texts("{r}")).interacting({rule_layer_name}).output("{r}{RULE_STR_SEP}false_positive", "{r}{RULE_STR_SEP}false_positive occurred") - ''' - false_neg_rule = f''' + """ + false_neg_rule = f""" ((fail_marker2.interacting(fail_marker.interacting(text_marker.texts("{r}")))).or((fail_marker.interacting(input(11, 222).texts("{r}")).not_interacting(fail_marker2)))).not_interacting({rule_layer_name}).output("{r}{RULE_STR_SEP}false_negative", "{r}{RULE_STR_SEP}false_negative occurred") - ''' - rule_not_tested = f''' + """ + rule_not_tested = f""" full_chip.not_interacting({rule_layer_name}).output("{r}{RULE_STR_SEP}not_tested", "{r}{RULE_STR_SEP}not_tested occurred") - ''' - + """ + analysis_rules.append(rule_layer) analysis_rules.append(pass_patterns_rule) analysis_rules.append(fail_patterns_rule) @@ -661,12 +709,12 @@ def convert_results_db_to_gds(results_database: str, rules_tested: list): if r in rule_data_type_map: continue - pass_patterns_rule = f''' + pass_patterns_rule = f""" pass_marker.interacting( text_marker.texts("{r}") ).output("{r}{RULE_STR_SEP}pass_patterns", "{r}{RULE_STR_SEP}pass_patterns polygons") - ''' - fail_patterns_rule = f''' + """ + fail_patterns_rule = f""" fail_marker2.interacting(fail_marker.interacting(text_marker.texts("{r}")) ).or( fail_marker.interacting(text_marker.texts("{r}")).not_interacting(fail_marker2) ).output("{r}{RULE_STR_SEP}fail_patterns", "{r}{RULE_STR_SEP}fail_patterns polygons") - ''' + """ analysis_rules.append(pass_patterns_rule) analysis_rules.append(fail_patterns_rule) @@ -702,9 +750,7 @@ def build_tests_dataframe(unit_test_cases_dir, target_table): # Get test cases df from test cases tc_df = pd.DataFrame({"test_path": all_unit_test_cases}) - tc_df["table_name"] = tc_df["test_path"].apply( - lambda x: x.name.replace(".gds", "") - ) + tc_df["table_name"] = tc_df["test_path"].apply(lambda x: x.name.replace(".gds", "")) if target_table is not None: tc_df = tc_df[tc_df["table_name"] == target_table] @@ -717,7 +763,9 @@ def build_tests_dataframe(unit_test_cases_dir, target_table): return tc_df -def aggregate_results(tc_df: pd.DataFrame, results_df: pd.DataFrame, rules_df: pd.DataFrame): +def aggregate_results( + tc_df: pd.DataFrame, results_df: pd.DataFrame, rules_df: pd.DataFrame +): """ aggregate_results Aggregate the results for all runs. @@ -756,11 +804,17 @@ def aggregate_results(tc_df: pd.DataFrame, results_df: pd.DataFrame, rules_df: p df.loc[(df["false_positive"] > 0), "rule_status"] = "Rule Failed" df.loc[(df["not_tested"] > 0), "rule_status"] = "Rule Not Tested" df.loc[(df["in_rule_deck"] < 1), "rule_status"] = "Rule Not Implemented" - df.loc[~(df["run_status"].isin(["completed"])), "rule_status"] = "Test Case Run Failed" - - pass_cond = (df["pass_patterns"] > 0) & (df["fail_patterns"] > 0) & \ - (df["false_negative"] < 1) & (df["false_positive"] < 1) & \ - (df["in_rule_deck"] > 0) + df.loc[ + ~(df["run_status"].isin(["completed"])), "rule_status" + ] = "Test Case Run Failed" + + pass_cond = ( + (df["pass_patterns"] > 0) + & (df["fail_patterns"] > 0) + & (df["false_negative"] < 1) + & (df["false_positive"] < 1) + & (df["in_rule_deck"] > 0) + ) df.loc[pass_cond, "rule_status"] = "Passed" return df @@ -790,7 +844,9 @@ def run_regression(drc_dir, output_path, target_table, cpu_count): ## Parse Existing Rules rules_df = parse_existing_rules(drc_dir, output_path, target_table) - logging.info("## Total number of rules found in rule decks: {}".format(len(rules_df))) + logging.info( + "## Total number of rules found in rule decks: {}".format(len(rules_df)) + ) logging.info("## Parsed Rules: \n" + str(rules_df)) ## Get all test cases available in the repo. @@ -811,14 +867,10 @@ def run_regression(drc_dir, output_path, target_table, cpu_count): logging.info("## Final analysis table: \n" + str(df)) ## Generate error if there are any missing info or fails. - df.to_csv( - os.path.join(output_path, "all_test_cases_results.csv"), index=False - ) + df.to_csv(os.path.join(output_path, "all_test_cases_results.csv"), index=False) ## Check if there any rules that generated false positive or false negative - failing_results = df[ - ~df["rule_status"].isin(["Passed"]) - ] + failing_results = df[~df["rule_status"].isin(["Passed"])] logging.info("## Failing test cases: \n" + str(failing_results)) if len(failing_results) > 0: @@ -869,9 +921,7 @@ def main(drc_dir: str, output_path: str, target_table: str): check_klayout_version() # Calling regression function - run_status = run_regression( - drc_dir, output_path, target_table, cpu_count - ) + run_status = run_regression(drc_dir, output_path, target_table, cpu_count) # End of execution time logging.info("Total execution time {}s".format(time.time() - t0)) @@ -915,13 +965,11 @@ def main(drc_dir: str, output_path: str, target_table: str): level=logging.DEBUG, handlers=[ logging.FileHandler(os.path.join(output_path, "{}.log".format(run_name))), - logging.StreamHandler() + logging.StreamHandler(), ], format="%(asctime)s | %(levelname)-7s | %(message)s", datefmt="%d-%b-%Y %H:%M:%S", ) # Calling main function - run_status = main( - drc_dir, output_path, target_table - ) + run_status = main(drc_dir, output_path, target_table) From 4c502f8e4df4dad13e05daedc6f92c1fc6b37e65 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 13:19:05 +0200 Subject: [PATCH 41/71] Cover the case if there is only fail patterns for the rule. --- klayout/drc/testing/run_regression.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py index 293ec8ac..6ea61b12 100644 --- a/klayout/drc/testing/run_regression.py +++ b/klayout/drc/testing/run_regression.py @@ -802,7 +802,7 @@ def aggregate_results( df["rule_status"] = "Unknown" df.loc[(df["false_negative"] > 0), "rule_status"] = "Rule Failed" df.loc[(df["false_positive"] > 0), "rule_status"] = "Rule Failed" - df.loc[(df["not_tested"] > 0), "rule_status"] = "Rule Not Tested" + df.loc[(df["not_tested"] > 0) | (df["pass_patterns"] < 1), "rule_status"] = "Rule Not Tested" df.loc[(df["in_rule_deck"] < 1), "rule_status"] = "Rule Not Implemented" df.loc[ ~(df["run_status"].isin(["completed"])), "rule_status" From 35b40da033bbacb2159aa5096414279cabdbd81c Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 13:27:58 +0200 Subject: [PATCH 42/71] Make sure to check if the counts of pass or fail patterns is incorrect. --- klayout/drc/testing/run_regression.py | 1 + 1 file changed, 1 insertion(+) diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py index 6ea61b12..25a83a12 100644 --- a/klayout/drc/testing/run_regression.py +++ b/klayout/drc/testing/run_regression.py @@ -803,6 +803,7 @@ def aggregate_results( df.loc[(df["false_negative"] > 0), "rule_status"] = "Rule Failed" df.loc[(df["false_positive"] > 0), "rule_status"] = "Rule Failed" df.loc[(df["not_tested"] > 0) | (df["pass_patterns"] < 1), "rule_status"] = "Rule Not Tested" + df.loc[(df["fail_patterns"] < 1) | (df["pass_patterns"] < 1), "rule_status"] = "Rule Not Tested" df.loc[(df["in_rule_deck"] < 1), "rule_status"] = "Rule Not Implemented" df.loc[ ~(df["run_status"].isin(["completed"])), "rule_status" From 50f671b55d546ab5bd6624ae9798d46a8a85cec4 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 13:34:22 +0200 Subject: [PATCH 43/71] Make sure to raise an exception even if partial database exist. --- klayout/drc/testing/run_regression.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py index 25a83a12..7b2c524a 100644 --- a/klayout/drc/testing/run_regression.py +++ b/klayout/drc/testing/run_regression.py @@ -242,12 +242,10 @@ def run_test_case( os.makedirs(output_loc, exist_ok=True) try: check_call(call_str, shell=True) - except Exception as e: - pattern_results = glob.glob(os.path.join(output_loc, f"{pattern_clean}*.lyrdb")) - if len(pattern_results) < 1: - logging.error("%s generated an exception: %s" % (pattern_clean, e)) - traceback.print_exc() - raise Exception("Failed DRC run.") + except Exception as e: + logging.error("%s generated an exception: %s" % (pattern_clean, e)) + traceback.print_exc() + raise Exception("Failed DRC run.") # dumping log into output to make CI have the log if os.path.isfile(pattern_log): From 48fe5ed96bf64aadc4eabb16167578aa6a1e3f15 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 13:37:39 +0200 Subject: [PATCH 44/71] Change back the regression. --- klayout/drc/testing/run_regression.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py index 7b2c524a..25a83a12 100644 --- a/klayout/drc/testing/run_regression.py +++ b/klayout/drc/testing/run_regression.py @@ -242,10 +242,12 @@ def run_test_case( os.makedirs(output_loc, exist_ok=True) try: check_call(call_str, shell=True) - except Exception as e: - logging.error("%s generated an exception: %s" % (pattern_clean, e)) - traceback.print_exc() - raise Exception("Failed DRC run.") + except Exception as e: + pattern_results = glob.glob(os.path.join(output_loc, f"{pattern_clean}*.lyrdb")) + if len(pattern_results) < 1: + logging.error("%s generated an exception: %s" % (pattern_clean, e)) + traceback.print_exc() + raise Exception("Failed DRC run.") # dumping log into output to make CI have the log if os.path.isfile(pattern_log): From 22f9d3aa103937297d8562e58bbf5f5e1065c753 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 13:54:10 +0200 Subject: [PATCH 45/71] Add more rules for case where the run fails. --- klayout/drc/testing/run_regression.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py index 25a83a12..bf4cd645 100644 --- a/klayout/drc/testing/run_regression.py +++ b/klayout/drc/testing/run_regression.py @@ -705,10 +705,15 @@ def convert_results_db_to_gds(results_database: str, rules_tested: list): analysis_rules.append(false_neg_rule) analysis_rules.append(rule_not_tested) + rule_lay_dt = len(rule_data_type_map) + 1 + for r in rules_tested: if r in rule_data_type_map: continue + rule_layer_name = f'rule_{r.replace(".", "_")}' + rule_layer = f"{rule_layer_name} = input({RULE_LAY_NUM}, {rule_lay_dt})" + pass_patterns_rule = f""" pass_marker.interacting( text_marker.texts("{r}") ).output("{r}{RULE_STR_SEP}pass_patterns", "{r}{RULE_STR_SEP}pass_patterns polygons") """ @@ -716,8 +721,23 @@ def convert_results_db_to_gds(results_database: str, rules_tested: list): fail_marker2.interacting(fail_marker.interacting(text_marker.texts("{r}")) ).or( fail_marker.interacting(text_marker.texts("{r}")).not_interacting(fail_marker2) ).output("{r}{RULE_STR_SEP}fail_patterns", "{r}{RULE_STR_SEP}fail_patterns polygons") """ + false_pos_rule = f""" + pass_marker.interacting(text_marker.texts("{r}")).interacting({rule_layer_name}).output("{r}{RULE_STR_SEP}false_positive", "{r}{RULE_STR_SEP}false_positive occurred") + """ + false_neg_rule = f""" + ((fail_marker2.interacting(fail_marker.interacting(text_marker.texts("{r}")))).or((fail_marker.interacting(input(11, 222).texts("{r}")).not_interacting(fail_marker2)))).not_interacting({rule_layer_name}).output("{r}{RULE_STR_SEP}false_negative", "{r}{RULE_STR_SEP}false_negative occurred") + """ + rule_not_tested = f""" + full_chip.not_interacting({rule_layer_name}).output("{r}{RULE_STR_SEP}not_tested", "{r}{RULE_STR_SEP}not_tested occurred") + """ + analysis_rules.append(pass_patterns_rule) analysis_rules.append(fail_patterns_rule) + analysis_rules.append(false_pos_rule) + analysis_rules.append(false_neg_rule) + analysis_rules.append(rule_not_tested) + + rule_lay_dt += 1 with open(output_runset_path, "w") as runset_analysis: runset_analysis.write("".join(analysis_rules)) From 75cc940fd8f28c3738ef6351303af2cffce80b17 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 13:57:38 +0200 Subject: [PATCH 46/71] Add more rules for case where the run fails. --- klayout/drc/testing/run_regression.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py index bf4cd645..1d508875 100644 --- a/klayout/drc/testing/run_regression.py +++ b/klayout/drc/testing/run_regression.py @@ -731,6 +731,7 @@ def convert_results_db_to_gds(results_database: str, rules_tested: list): full_chip.not_interacting({rule_layer_name}).output("{r}{RULE_STR_SEP}not_tested", "{r}{RULE_STR_SEP}not_tested occurred") """ + analysis_rules.append(rule_layer) analysis_rules.append(pass_patterns_rule) analysis_rules.append(fail_patterns_rule) analysis_rules.append(false_pos_rule) @@ -822,8 +823,12 @@ def aggregate_results( df["rule_status"] = "Unknown" df.loc[(df["false_negative"] > 0), "rule_status"] = "Rule Failed" df.loc[(df["false_positive"] > 0), "rule_status"] = "Rule Failed" - df.loc[(df["not_tested"] > 0) | (df["pass_patterns"] < 1), "rule_status"] = "Rule Not Tested" - df.loc[(df["fail_patterns"] < 1) | (df["pass_patterns"] < 1), "rule_status"] = "Rule Not Tested" + df.loc[ + (df["not_tested"] > 0) | (df["pass_patterns"] < 1), "rule_status" + ] = "Rule Not Tested" + df.loc[ + (df["fail_patterns"] < 1) | (df["pass_patterns"] < 1), "rule_status" + ] = "Rule Not Tested" df.loc[(df["in_rule_deck"] < 1), "rule_status"] = "Rule Not Implemented" df.loc[ ~(df["run_status"].isin(["completed"])), "rule_status" From f5a84b53be1ec8de6ea5c32a294a1e89fa23b67a Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 14:12:34 +0200 Subject: [PATCH 47/71] Consider rule syntax exception. --- klayout/drc/testing/run_regression.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py index 1d508875..b4def2bd 100644 --- a/klayout/drc/testing/run_regression.py +++ b/klayout/drc/testing/run_regression.py @@ -834,6 +834,13 @@ def aggregate_results( ~(df["run_status"].isin(["completed"])), "rule_status" ] = "Test Case Run Failed" + df.loc[ + (df["not_tested"] > 0) | (df["pass_patterns"] < 1), "rule_status" + ] = "Rule Not Tested" + + rule_exp_cond = ((df["fail_patterns"] > 0) & (df["false_negative"] > 0) & (df["not_test"] > 0)) + df.loc[rule_exp_cond, "rule_status"] = "Rule Syntax Exception" + pass_cond = ( (df["pass_patterns"] > 0) & (df["fail_patterns"] > 0) From 3600070c577b3688bcc4700f100ef427c0f63300 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 14:13:34 +0200 Subject: [PATCH 48/71] Consider rule syntax exception. --- klayout/drc/testing/run_regression.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py index b4def2bd..17f7e9a9 100644 --- a/klayout/drc/testing/run_regression.py +++ b/klayout/drc/testing/run_regression.py @@ -838,7 +838,7 @@ def aggregate_results( (df["not_tested"] > 0) | (df["pass_patterns"] < 1), "rule_status" ] = "Rule Not Tested" - rule_exp_cond = ((df["fail_patterns"] > 0) & (df["false_negative"] > 0) & (df["not_test"] > 0)) + rule_exp_cond = ((df["fail_patterns"] > 0) & (df["false_negative"] > 0) & (df["not_tested"] > 0)) df.loc[rule_exp_cond, "rule_status"] = "Rule Syntax Exception" pass_cond = ( From 325ec2254e20b04a29bfaf6f9426ea98b5280001 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 14:53:18 +0200 Subject: [PATCH 49/71] Fix typo. --- klayout/drc/run_drc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/klayout/drc/run_drc.py b/klayout/drc/run_drc.py index 84d7cd93..dc833b5d 100644 --- a/klayout/drc/run_drc.py +++ b/klayout/drc/run_drc.py @@ -227,7 +227,7 @@ def get_run_top_cell_name(arguments, layout_path): layout_topcells = get_top_cell_names(layout_path) if len(layout_topcells) > 1: logging.error( - "## Layout has mutliple topcells. Please determine which topcell you want to run on." + "## Layout has multiple topcells. Please determine which topcell you want to run on." ) exit(1) else: From 6d7eca6db38560bc4de083a7674158a4605543de Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 15:59:32 +0200 Subject: [PATCH 50/71] Adding hint to --topcell switch. --- klayout/drc/run_drc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/klayout/drc/run_drc.py b/klayout/drc/run_drc.py index dc833b5d..5f67b7e2 100644 --- a/klayout/drc/run_drc.py +++ b/klayout/drc/run_drc.py @@ -227,7 +227,7 @@ def get_run_top_cell_name(arguments, layout_path): layout_topcells = get_top_cell_names(layout_path) if len(layout_topcells) > 1: logging.error( - "## Layout has multiple topcells. Please determine which topcell you want to run on." + "## Layout has multiple topcells. Please use --topcell to determine which topcell you want to run on." ) exit(1) else: From 08f92a4e8f13a0539aed5d50bb828bca9abad64b Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Tue, 31 Jan 2023 16:06:41 +0200 Subject: [PATCH 51/71] Removed the commented line about list_res_db_files. --- klayout/drc/run_drc.py | 1 - 1 file changed, 1 deletion(-) diff --git a/klayout/drc/run_drc.py b/klayout/drc/run_drc.py index 5f67b7e2..68372859 100644 --- a/klayout/drc/run_drc.py +++ b/klayout/drc/run_drc.py @@ -472,7 +472,6 @@ def run_parallel_run( drc_path = os.path.join(rule_deck_full_path, "rule_decks", "density.drc") list_rule_deck_files["density"] = drc_path - ## list_res_db_files.append(run_check(drc_path, "antenna", layout_path, drc_run_dir, switches)) if not arguments["--table"]: list_of_tables = get_list_of_tables(rule_deck_full_path) else: From fffbc356677a2a6be14a759160a37e06a045180a Mon Sep 17 00:00:00 2001 From: Amro Tork <74936860+atorkmabrains@users.noreply.github.com> Date: Wed, 1 Feb 2023 06:30:15 +0200 Subject: [PATCH 52/71] Update README.rst --- README.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.rst b/README.rst index a9208ed9..374a4db6 100644 --- a/README.rst +++ b/README.rst @@ -1,3 +1,5 @@ +[![code linting](https://github.com/efabless/globalfoundries-pdk-libs-gf180mcu_fd_pv/actions/workflows/linting.yml/badge.svg)](https://github.com/efabless/globalfoundries-pdk-libs-gf180mcu_fd_pv/actions/workflows/linting.yml) + GlobalFoundries 180nm MCU Physical Verification library ======================================================= From ee005a3812d6f4e9fb38bdf6526d307f6bd27efa Mon Sep 17 00:00:00 2001 From: Amro Tork <74936860+atorkmabrains@users.noreply.github.com> Date: Wed, 1 Feb 2023 06:30:55 +0200 Subject: [PATCH 53/71] Update README.rst --- README.rst | 2 -- 1 file changed, 2 deletions(-) diff --git a/README.rst b/README.rst index 374a4db6..a9208ed9 100644 --- a/README.rst +++ b/README.rst @@ -1,5 +1,3 @@ -[![code linting](https://github.com/efabless/globalfoundries-pdk-libs-gf180mcu_fd_pv/actions/workflows/linting.yml/badge.svg)](https://github.com/efabless/globalfoundries-pdk-libs-gf180mcu_fd_pv/actions/workflows/linting.yml) - GlobalFoundries 180nm MCU Physical Verification library ======================================================= From 9b848367d3dab7a94d82e449dc4ffc6c375fed37 Mon Sep 17 00:00:00 2001 From: Amro Tork <74936860+atorkmabrains@users.noreply.github.com> Date: Wed, 1 Feb 2023 06:33:30 +0200 Subject: [PATCH 54/71] Update README.rst --- README.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/README.rst b/README.rst index a9208ed9..7358862c 100644 --- a/README.rst +++ b/README.rst @@ -1,3 +1,4 @@ +![code linting](https://github.com/efabless/globalfoundries-pdk-libs-gf180mcu_fd_pv/actions/workflows/linting.yml/badge.svg)(https://github.com/efabless/globalfoundries-pdk-libs-gf180mcu_fd_pv/actions/workflows/linting.yml) GlobalFoundries 180nm MCU Physical Verification library ======================================================= From 2448465b2ccba08286a8f03106b5413c97ede596 Mon Sep 17 00:00:00 2001 From: Amro Tork <74936860+atorkmabrains@users.noreply.github.com> Date: Wed, 1 Feb 2023 06:33:50 +0200 Subject: [PATCH 55/71] Update README.rst --- README.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/README.rst b/README.rst index 7358862c..a9208ed9 100644 --- a/README.rst +++ b/README.rst @@ -1,4 +1,3 @@ -![code linting](https://github.com/efabless/globalfoundries-pdk-libs-gf180mcu_fd_pv/actions/workflows/linting.yml/badge.svg)(https://github.com/efabless/globalfoundries-pdk-libs-gf180mcu_fd_pv/actions/workflows/linting.yml) GlobalFoundries 180nm MCU Physical Verification library ======================================================= From f18b43169d883b36d2b4a699d0a624016f7df1d7 Mon Sep 17 00:00:00 2001 From: FaragElsayed2 Date: Wed, 1 Feb 2023 08:47:48 +0200 Subject: [PATCH 56/71] Adding DV.7 pass pattern , Fixing natcomp def, Adding new svg for dualgate --- klayout/drc/rule_decks/main.drc | 4 +- .../drc/testing/testcases/unit/dualgate.gds | Bin 25068 -> 24258 bytes .../drc/testing/testcases/unit/dualgate.svg | 646 +++++++++--------- 3 files changed, 327 insertions(+), 323 deletions(-) diff --git a/klayout/drc/rule_decks/main.drc b/klayout/drc/rule_decks/main.drc index 7aca83e9..c809df0b 100644 --- a/klayout/drc/rule_decks/main.drc +++ b/klayout/drc/rule_decks/main.drc @@ -923,7 +923,7 @@ ntap_dn = ntap.and(dnwell_n) psd_dn = pcomp.and(dnwell_n).interacting(pgate_dn).not(pgate_dn).not(res_mk) nsd_dn = ncomp.and(dnwell_p).interacting(ngate_dn).not(ngate_dn).not(res_mk) -natcompsd = (nat & comp.interacting(poly2)) - tgate +natcomp = nat.and(comp) # Gate nom_gate = tgate.not(dualgate) @@ -969,7 +969,7 @@ if CONNECTIVITY_RULES connect(lvpwell_dn, pcomp) connect(nwell, ncomp) - connect(natcompsd, contact) + connect(natcomp, contact) connect(mvsd, ncomp) connect(mvpsd, pcomp) connect(contact, metal1) diff --git a/klayout/drc/testing/testcases/unit/dualgate.gds b/klayout/drc/testing/testcases/unit/dualgate.gds index 9dbc2c5e1e82621b42cdddf621518b5832072c58..f7622ae2ebf15fcda126574f3af3194b8884e2ba 100644 GIT binary patch literal 24258 zcmd^{dz_C&`^V=NYh#sj$l)83JWH%y=ff_$Sf`D3E@`dBYVCT~QWD9jB}oV6R4FBj zPEUm5p@(v;AE8tvhp3Qawcow%{65#reeLYGcD^3l>-X2sAHDltGoQI;=DzQ_XReuh zMTjU?Y=Efl6f1d36cdgJ72%>$!T%Sb#X`ErcMziD4JF#e%xd24(>Knyj9quv(#3=N zi0UQmy0Rf5&4)A_5<4y_ZA4O5iYOi`L`a!p;x-X-Q!yb#c-b4I`$m1?bhxD1@eAp$ zC@vf(+dEtm#mkAp_0P`HdLrqb8)-;-oo?5Y|7fZG$9${(Gua+3?RqMo@)1c#{+VnK zrYHaHOKbn_Pig=8WP9o0{z*^%Nk{(k$@U<6^56Cr?Y}Mhi~2vGY`-PAf6|kG(vkms zvOS3Yr9Q&xx<|UiZY!nxa$`fbcim(6zvO3031G_5C+esLE=((81) zzQ*^$x$nGmi9>cvx8_HNY`^ckU0u>)Q2H zKIJ2lj{Gy(9!yXE?+MlZ?>V9U=acQB!Tpn-{F9FS=acP0^yGhFMeTne`iuHMpKPxf z+&}5bKk3MSKG_~bU#YZkhVPUval~@)lp))P@3i}`aEsOxNnhcTA?bCxU0<$_))Prz zP8yP4r`z?3Hwq_tkaUT|nt>+`*`7Seu5UkD>xra~jyEK|PPgmHZ_;q>e`p8opUL*5 z;dVWhPx*+XBmYdc2h)@PA(`6${XMmRCfkQ(2KP_-$Uo`GKa=gj^yGi=WbOYxx(?`n zCff&34(^}wk$=*WeAOY==YdG+64S~`w_7xra~uVF}foo?56 z>Y?>S(s#bokn}p;uJ3~JzH3$ZAzp$1Dh0^)s&;)?Um-Shl`iq4deSY^#E|V9y4v-j zE47|T`Wt5$l3u6V^^bb~iKL(6`6rTIr`z=tJ^w_~KjQf(l3u6V^@}$Pv3?WYd*Fse zcwa6tWc&I}cKr+8wVp`&1@#R{uhZ@N`ksFx>F@OX6G^Yr?fSZ&esq<{KJAtK93|Kn{9IXyhaT8^w1hjQ9T#fV3wy!8O|VN%v? zXvpc|Z3-=i=EMFGS9N`ek%y!|ZM7lQL#NyIRS!eoLHdc2<)y#IHbb_Dcd+Y4C9Nlt z{syXtN7Czbt3Kz{G3eV$#mF);=a-$(dt`ffTdUr`wuTUQ{wx)nY>@s9C4^wI{m!55 z`W~Zo{fSiXUbwzYs=rRR>m$7WBa*I}*MCIP>vXF=Z}J}1enqr9Z|WM=enqt3OSkG} z8ugkHATsPu}^d+Anv-VV<{k^JxR{1eH)PPgjag-3-GwMHs-FC*Po-W3j$?NMv& z`m!$wr`P+^C5|j5-6|st+1~4YyFPAsYWP88rc73Va@!Z2Nabq>i z=g@8@+t-(}>nC}7BIzc3dLrp{x?R6uyAbOpV;lrOZh&#sXUO(-f$wu3kJNhjc27?v zeSJ?)B)v`#@L#*&dV2ZS!fErFbcwOGu%5%ZiplmipV{@#;yp_Fh@^XNogv$U=@(VU z`*#x7O<=bitmmc}vc2mhyZ;(b31`4o%;&-TaUayeyqd}O0bA|*8!>;Td_>Zf&M;(q zFn!rCF`s@I_b)i&H|bVxZpilG58M6sUXS@)Bk2-{&6I8*%&VAeAKb{UZ;$yCeurHjRszq7 zAM+{jqw$zeJ!Z)E^?tiPbfDG~Nq^%3L(=PXyZ%wnKauoPJpV+}>vX$*qUWDT`bRwf zMAGYYyMFN)>((81) zzP?v~BI)n+>Q5xSP7lx*SL4qS70=5#I#Gza2c=@`Eb0GuRUw#cuY1t0Kk~Q`wU5ah zV%w3@zom>J+iM@Q>%V(e2;VuGLu`Mq^#8{(WV`R2RiF1@66(KKDz=^=WxpFye~)ah zyVtHCHyZUnEc1wMM@!lNsv+BJAGYhqPC)&Cl6l1TNm35HV90jgPXYSM-uT~!;;HLv z32|zq=EVWvDIqTTq`Uc7!_$rN`wo-u)JS`Ii&yDQaK6XwRqxBEUK39LjR~_7(^1`jC5s(`TP_ ziNg>_sJ+aP?S1yy^{ru{AuJHgmDr4s3+pT z!jSFj1LIc#^{?aUiKK7f>4~J*>307e8w&ALW$6;nKP}zv8HQ~Csj^)kK2+<8q>p;d zkn}p;t}h>=^+eKF`@@j*I^C|n=b+XTNk4SIA?bCxRiEo-E4?D=ZjZ>357}7YV(*yK36nNkGht3e9-VUkQs+aU<-Yx`_?ezlZD*^gSSD@c1{lu2D zrT@0ohHS65)9OFR)p|wJmujo^MCiSAyZ#vN7s^L$IYs7NAiYPn2l4NJ|EjJ(G4dPf z--z}zss1|M>fhhlYd?|d-*1m0>2-S?IY!DRcgBUb&}`iunV zvECr&jF9g14-MIVG;qCA@cwh_juhh9BE+e{EAZdCy&>C=EwcMBvqgv#JuvQqmr;+J zvklpPqK92S`XUw$MzwuJxOpBH-@wML4ZS|%h+owg_^~>i7XWK-?8^FU0r2Ek*L$+_5XxC41 zh4X1cy!XMwgQYt^*^up@Hni&pB0jYJEA$_D2=mZ1^aGRa+rP5wTdshLEAalxo6r~I-!`e(a)!*y#yo<__Ilf_ zdU@AirB{Sbwu@4FMd-bBt3Ge5=buP^zx4bQ$-hpw>SaZ|FY0ZTiY=#0S?6_>{6C?RTBU7{77<6F=`R-Iz^=Y+n|5{|D$lsDtshmvo7K=s(?R$o8YXto3*McM{_G zQOujcE7&g@akC-Yj~}(`qZ2Xy;`t$7!uXp&>n)FLKN0x+6s&*lmEA)8@SRlrZ8hQ@ zZwkR=`w!pQ{g)gpoUosyOZ1(UZpBXw*&Y_SUJUU6`${!_=MsGwzr`3sjNi)sr}4X> z-mUgL)-Tw1C5~u?c*ABxw)Y9#ZwSy=!E^Uab;OCm<%gx)5ND8>Y=5S@wf(t&ga~KH zK&f~L^VO1A$1&NyW1v;dJij>&zeZgBwA8!^(*6gN z?We8!+y}QSz2-%k``|vM*F25iD&4BTx@DcxYhILBw|t`Xny2wwrQ7sElwLu4j~BuB zN$-*E_WJ*SY`;%ke{@sX8>IWpR>aE_G-F@D`>QeHu;QhcsK4_;{DGd-i`AoOtuf`Zr9hqxf9AqBwej(hHMX}Z}&RR53R+#2^^M( z@w=QM+wWa#_um`m1}Gnq^nF$vvOSo-YDg+;QI1x43?NWjy=! zQ9tGX`t>=-yTSjZqU*07f&a=y*897hfWCOq^~o^~m?|-X44E|RvExLX< z*3lJ0J$=;m`|tgXaLV6WbbT4bgUihpT&%}iBzXF!*Wdnm({TFD!Tr*W;hpFsp$IT7Q$J=<5$G>_g|MX!g+kV=f4R0sIt&E zDZ2j7lEQf|zUcZD!-ezNa!=pn`rAJZ=N`647hQi3;t@M}{4Ik2Hn{(H&MmtCD&IoC zrpWr-obk}VQgnUZc_GfSe=q)VJ@c1Jm{T+Sm->m<*sh9`;?)6_0+W*oF z?0;-6x_(oKzo);e^L6)M!TF>6Ip&`~%<<~)%|Ao+{IycS^-stPLVW0A{}SAU_`%H` z4cY#otDoP(^bw+OB{;tsoq&-$oBrX2J{ctN9mhy!+QQp=@JwEBi#Y>4cQ+1rCs0YG|m%j z!#Q}cdpyqRrx~)nOJICBz<mj&v(JOMO+W!$C0=mOAXn+&RZY<>3XXEtK;d3q;KHqiKN%*cK-wI_ zhP1xd=~jKN|2d^sBz=j8wVnvQmu}U&4bruLBI%niFeLvvJwP8;@cCGcdCaa2(j^{z z6X#p13+F4G6Ip|O9_H?;h=1Q}^6lDCXnEZ!uFx9y&+O%z3$SlEc#&|JbLM0JNC<~H z59c^(Ux~@O*~<$pUtO>4YPudos^3#T8&W%Tx>Zl-1)M!2)wuy@|8aGmAfIgC6L?;U z`%C$6k9|DkbBUzy($J9Y!SweZ(|RK5M;tXIy-v5*->uO~>xrbV7j8&;oo?5s;W|fS zKZ}@%_rhbi-b}Vf2kvJDwEqso0TVx$E^!F<1)Jg=E0gVspWFS{d=K}-EbJqLON%3( z|BfNs7t9LKH}~$>G~8eQ;iH9U@SbL~ROwG|D+IIgllYz?UI=F7IO!jmg!2j>!(Z3w zOv*=kCYRgby+Zw~?>V|1hX41apBOP!`g>j#BI;G??|ssc(;EbyL+5s=a+=^lpnSy0 zH*xMiPU~CrHRSYQ46 z_o7~ud0$YyJaT%2)z)%kl{F}5l~in68{eti1^p5!Yi%^-^aiU6El0h#*EGkz*%XXJ z;IDYDKb|4P@1fG&`jz327?-vjF!_#8DYU#;8kb7rKCOU#U}7cgXO(SgSo39^WBOG% z%!;4jyT(B#e}%yPVZM&4+~oI#Q}I{n5^s4+x+BXQ`Y;a|1%J%S*uPF~VDeY|wb1&i z_P>CAMruEi+W#`*P1OE3h8a@(UmI%jS=z74n;s#Y&-UT`2zaoQbZ2!E&bNawuiR(2 zaZVGQC&c@S$-3;q?@LwROq^Sx{v@79zfCBs`*CUm!;?!8U%$!Z z3)Y`|x?x^iQo6+RF*t9!5am3DbEdx-p1M)Ghki2oPLwS4deeJFeNRQtJAGG0;|YCN z#bmo>98h`z9pxjEF2oy0h-?p{&-t;x%BL8$K<1n!y+`Q1bbI~Rp45JbjW0_7`nHDT zU#DC3d6STz#up-uH`C(`*=`wksQqroNkZ+j5DS}!>2#}qcP8e0D^5toU2kAM6fFdk z?JG{$^%Wk49?v&%*B-31atztNVu4*B{=Tk1v9NiZPPgiFb20BbT~8`rcucy*$76ls zk?p7JS@r51C(Re>JSX}8^F9~Z|FloV;3+#J`$% zQ9hb~ksjYu<&*8f^mU%ay6s1-H^5}X30fn*&SZN+;5p}l?RSSS#Xc|IL&W}=7ZyCX z&h|Eizi(9ghwc^5i#>2Y7hHjL?dT5;+5Tb=>-BTnAJzRwr1p3H)R5Y*)9v~Oj|*r1 zCahP$S8l_3x6y`dpC9<0a6tQe_r?1f{YI?0SGs-i9%8b+Lf|>M0Dbk1!g(I+6XGj> z;QHa57L)DI&)4-2({TaJi}Uwci|bv{Vc6BFtD?ePGSa+L?e^T=1|fGd5mq%MF*SX} zxU{6qYn9(<2^1Yk(J>=4B5v4-Yx&5@dsm1$QruKEBtAVWC39>_a+bFmx7I7?#1?qz zB}!Ed>5w!w6$@Imdbj5HSHaj>w>RbQlQAxN`EjsTDElBhpeLqpqn~lb~vZ zR@|ML8ke4&mY9-ktyQSH7CFDZDnzwOqIXsNb)g>dePa5=C-n5i#@!v)Gd8Yg$N0F! zR=zQrsr^%jWsUT;^PyuB#;1)z@rkL!Qo{3-+L?qt8DlzRWQAM(;@l4Jm3OEp^uI8A zH!Z=tJp7Co>kQoM{_8!Iw`~sE+U`28ao)r?py>WzUoTl{Bl4$SNB_$v?V&s0UW1<`{-RfVZe?Lm+JGVsZnQZ@lRB%1=Q#r-Tlu&x6y>yP#-f5@-&Otw$#Y}Kp!A)g|2T#h2!ub~&YRkR-WtMc!W z?V!~^`4Qi5R(>fz>GR3AfZRBzHNLa)kWvi(~6g&np2f97gElkE!wzZ58V{gprD zQ-n^{pUL(hdR1@a=XNMP>dobN>DKlaWv8W;H-4!|#yeSvB+f`E6r~(8eM^BD7j(FLwqaIbVM--5DtV5U$5`l>ZH`&vcaYE&6dL$~kld#Fzfx zL};q~N@p*3_*;l%;k`E;fAOoz&C*R-g?0|b?|#Cm7KX?*PIOnnbsUigdl^lYU+L`S z&iWP+6Ra?@v$0ad>?j*2qc6Opd8rs8Q!{bGofv_1M1E$^Ksq8-T4yh}UsKcr*R5|y zeLd)RHC!J>T(e3BozhDfr{7#A#FYy8jfnG=je5Ry zc@%g>h|74+h$_F**~@+MuyER8+*JHXx=&oiZ;$N}Q6)Pwx(;?q_uzL^FMr^895C+8 zx{UD*?axL(AH;p1-5ui=Dl|jcc=CLOS|I>+JQd{k#yrT)~&r;F;0b z!Cz~5cCfbR^8i-#xS!9Q)b?Ml*voAdEu6clD?i29aA##HD6JL%4 z@tnPM0plf}v!$`%CfFAtBK836n4b;%x5@wFMSHm|+G3}yG9uzV&<`QfrQZvG^EesT zsRn*~TrgML^I6$m?vtB^)3-04A3X1UC7vfd|NVP|mB2R;Il_Ix?AHgsF>dx-f&5Bm zFZU_DZwJ&sesJIz>CVGW$3WbtPfrrgpbUJ$a0h;GgMB~pWx`(HTJ@4Amv~ zb_j@Xe+&-5eCKAQmw;XA?B&kH4#K6gc+X;I=5hzTm!Z2dA6zfQZ|$)@MIhz!84x>@ zSE4mlexZrrEjA38RJtj(~`#cGKTwNd-jj(-rYAmBh%NTpD!jeCCNwS{wb1@*W%$OWXQrg#FKZ9;B_(I2j!#j|iirHL`s?GnL={zD1a-<- zRV4LFr1!Tzujh_A9f5E7T2p(PUh8`2_ehMdV85RF9Pi4RfAgLbXJ4ZdBCk~k)u(@8 zRuQsNv{8M~>@W9B$Alhtt2-vP-;fAjh7PLnokabFZY~&gEqM3nSaa?lh!e`m-d)+; zr%FstN>9(o@(oS#jY-PPN=-^jo8aR+b?5}#Oc6t3uT?Id7IEl7ujVa%-TV2H(udJS zyt_w2q7NQ?o{O=mSy_l%`%=>>BIp~NG&;qn9)%ztEY}BPg4pl_^=^{_z3WBUa?~o- qumAqj9WGs0nFAIp|AP?a=H`>_ltJ~~we<1lwcg`j^d(VFaq+*yUlYLq literal 25068 zcmd^|dw@>W`p4IPGn2~*Imxg_&`y#OX%HlxuR! z=;BuA=%SE9LI)9|3(@72R75(JG}AiYXRYXRYt^tiATyYp=`R zFA*+$z5$}XWD<>I;uaXobJlZIWBJ5u*AfVV$BMY2R!6^3ylQ ztZP}g@Q!|>ewbZfJ3PF7^7Y9vV^T7PrR1e2XQpJ~pF)%?FGP4{pNJIUm-&Pc*H*nm z%3kjYXIxLs@r|X7Uo9La+sE}3<*Hfq$=O;@B;8%TT#{a=+w~NGOp=a2HcH25vVBaF zT~GcgACYtvpUL)MdWxSrM90tTq~kN$o;xIXe9A}hNk{RSY!9ZV_@lFQ{2bI5)jyN% zqqBm?r+gHjbQGV-_F(#c%Y`$kp_IhQLdx48aLM*b4ejxh7HU0_^!>-VB)v|z>jylh z^+eJS9O;tuI^C{6SYJ4^yGu!&^EdFUOSaGMZr2}Qr}aeAe?7+~>2iDx- z>iA5y&rG!I$v@>Il8)jt*&a+!@gGap@n_tm<1^X*SaR_Al#k+*j^Z=f9!yX13r6br zkJ5EGD;y@<3q}TyPx&Z5=_o#v?ZNa%P77yYq?C%qQXYK=$4s_I+T(xnh1M%V_vvP> zSA<@t+x4G)sP&4_f4)ZR6`|MZcKwnw!kPD?l*A_rq%7RylI`2+0q{w&lN)jyN%GY2`Md){# z_=?c$bi00!iLVI#yC%LO^g7+6zsih1+g80qI2j9sxa$ec;lqWKbw&u{DAfN@^sPir z2TPAbOZkyS2Uj8AY|Y`Rvgkt}{D2?Lf}i&^IsLBLW%|K*SU6Lf>3GCIE8*lHa!KVd z>1CBe>EFK``uV~k4i5aNZb0X+IXq1kkEuxKugP}%`OBgWb(LNbx}r@!rB{UBq}$G47B@EWi4?b)iBF{X zI^C+5eXkMDhzO}TISu3CpD`XbWc!E+yZ-Pf;XGVHO5&mvDgV>OCEFjaVAn6}CY))x zQW6VyN%^muF4;aU*RDT~^FE2V#P3IAJol1Iwx3L~>&F;9k@RDYo=AF~Zr2Z3E5wO| z=tsfR|3Lp&#UaQs-z=cG%v zkG{&TPrh2~iKM>^&r>Gpb-G>OuRos02QZ%lC*O+pv%w|X?>k`EAHevzpq`Y(IcNv( z_i)Mff_irSarE7kk4XBjF}`N9J(&JTp>P)L!+0KClz{eI&L!Iy?6b#Tg7MSLhcKQ4 zpEx4r(yLsueda@U{mY%So=EyN7*{b#uhZ@N6=N};!aRytxEAB7LoV6ARXp=zt3;iA4|gX|2B-Lz*D<1p4#t{?cd#I*MH;FdLrq+z0W1-b-G=j zYvL10pJ(C|Nw3rG`f(;ck@VwDd?M*}x?P|2E&7RVSa$$VMWY`W=929vx7qcBlC_>l z`oV`?l3u6V^?Qt-Ncwk;o=AF~ZrATH-MGO~O~>R+nrCt@>8)3vqj_aEQ4t3tz1mmuv?u zdRer156a&}GkdQrzIQCjZ^-t6P3-!Ghfw~uvWPf(t1P~6giE&fZ)?}jKZf#mkwwJZ zb+UN!oi5padl!$srs@B0ruEbY<%Lskla!Ho?$)28dCfQ}o1h;hHmxe0#;Y|cpU!Xh z*ZCgpuhYxgACdgTqF*O=!*hoG5y_vX4qUtd*!t11a+;_K+2L0YeTzK=_`Pkh}T|5#V_ zV;#{ygQw@Bf1Tx$?ca5@>vN5sNc!xrcQ8SRHjdYx|9r=HaJIg#%Fk#D%9 z`(LMf^p})=KK$b)AqM${L(E!%u})u?YzIsC&mR5$X3$3phnV@9aE^TLlI@^X|Mdc` zCzAdrqbHKyq}%j9pVlj8ejt3+hQp5`^qg+fJJ-|sBMvdpS~y(}xnw(N)i187^+YQF zbEf=6(wlUveyurwBI&p1xFo$vx9W#AKpb2@V&i5|7e;+TL8kiptuiGeP4$gte_8Q*#MXC0O zdFg54)cjtv8REb3ws4qiulc<_{#(xqr}i^a5}TqN2eMqUz4kM9{XiAj^B zoc&*yY>$4`uAe_i>xraafq5R2^g7+H?{`c%L-E`uKCln>|1_6uAG*MbW#KKyWo^nQf^-4lI@ck*!3??$C?_(vBW2r;QD>ylI{84 z@ww;v-@jToTk-rRe)g7>3x>F4`&RGs+oNBG@zZ+D>xf(4m-2;$F4?~Rh`s#lF}7Yi zPfFs4E76Yd9AUD3?L52whD_o7vR_KjSM8LP3B5|lcF?ZBGfz0@9+VRFRa=Am=Wq$x z4%+ooaK6PYqy&AHF+N?;t%Pg`?fT^y-xd`}3HmO>`owE3OUQQ6uHO+4{l^$5#6vd- z^Zf25WIO25``qW3EFQTE{a<~}%#UU9-S?yaGi3Xq`c{3>D^DrCB6LMt?^b$6=uNs+ zU+g#Wi4?b`iBF{XI^C)-+8c#@^`v6vVOjJ^Mffpf`yg-sSNi-W?`|2rS+0N}!zQhpTelI>Hy`vacy-+mBd zhFmF$hudPEx1~$AZ_l;IKY{sL@e4Tbp1AKZ-u!i63E2*M^kMG(Cw$wmpODhoFb+wcPuNjBq zmP)VLnbWQ1|NnUYezU%z=D&9?$DZ6WJb%FP-$|Krg-f=NTIQ)gkG>b~SIS2ueZn}G zY!9aY3gf@&=(mV-zC}MU*d^PiKW&eHpr6(gN&o(4m!#L}cKu7+F=wib{ui8oJH|m# zF4;b_vR%KVnbs3YzZCoROw#LgyM7zSZS^KdNo98Rvc28}yM7}2Ey_nE{nP}P zY!9YS`9(N&cHz2%O-D+Z`Jzj<*V(1xM+o)4VI>h>(56Une{!sfBr#l;I2OmL?Qruftc@(D<5wcTfh$hQwD9t;-A^9vWY7rNdXd-}QA-_x7l5LVP;i z_%izlW!&2r*Jq8te z;q2u;I?DP?yQq8Uxt+p!{c2OLzvye@+rrr}H8j3H(}@%nOaLd547~`8o6#%e5Bw<8NPvrd)@LP_Bs3_&S$@a-H@1GT))pC_S(EVh-M~ zNtBX!2J`BZU0kyLM54ZSW$8y7J(2Wx8$FTqI^C{MTr0#kn2!_BV14o~%uAVU|7NCL zzuoAGqx+nhCrv0dW z_sARdyyR-EFVXPmGId=WRuGn{PCMEH~x>CM=xl6WBXlB>1 z9EpAF`gp$_oLq=~(o-(kKGC~w%Jr+t|K(HI-|a6Y@%wwwPOy&2Wc$hf_V_tQPbA$0 zqbHJHr`z>O?QtKYo{6XPv48ZKOSYdd^G?%0c*?)S=!vA?YxG3Y>vVhk+3)CiGm+-a zg+IBZd9zNp>%aTE))PtpiD1Cq6y%3+MhIWc~ za>PfPmGJ%q-Xrl5@&1UflA}rabpA5^#nT*}_DwbxYi`B+hRZ&{zKr4K6H-3j5%2CA zzJvX?b!c}?)@>{<)4ytO*vD4oB~tmY&s{<)pH8>yv5)R^HmUt|Uxf~8pS^@^cQ)De z&#lsWBI(yGa!GofZrA7kto20F&-&3N>2)hZ0?DHIK z?2_&8eCpA+H|;fp-eY@cfe^{%HPgPp9^<1zFz>?nBp>gEF^A)xq;w|bBRz8j*7yoW z2*G5(VCi*cj<3E;C_IDs74YsR7{FTBGlw*npTqm`9|?!Z>7domhWm9sBKdi9mgbge zpo6_fko=f@RzD-LPekP=2F?m6oAhAL9B80?1ym%_Nq%5?~`&yBjGS9zt)xMSDk;y-TJ&b?9%5? zr1QV!O3j|=H|hL|bpATO-QSQ%%ukwP{2GRFcdVkX%8OEtm?;#qpB27}i=_lrKAqq0 zkK&O(Dwk}osQkn{rv0H@z6u9*`6!>xU#36x+?aJczTqf`{t(lJ1Pr6G?B<%ewDL?+e#@#lR`-)lG&UL+Cl(rgu6l&~b?A zal*Ozj7y4d(k*&foNf9GBJ~qv;fG26gig2BACHSPY25PUOM2Www2Yg$AIAA(JTgy+ zBR6Tv->~0+@f(xvN4)RTd-Us%3h^=SSI`j+(I23HF=RVvFaP=Du2;~%(0LFo<1SD9 zQz>|FccPTUMjuLfwzEsN*Pm#QfBt?Cy{|~|i4;EsUYcfx?NA>Qi<^~ z=&OYB1jegOwu5&4PgAfKit9+cZKxjiGTGj(?B|2J{?l*|QhZRwo%2Nr*&a0hF7z*? z2bG@2QFI<4+k@)q{on^X>i2}_VP3yC2J3i+Y=6-EzOLu`)BDw<@f;x1`_%>QU9x?2 zH+%g(>=(|yXYigWcm(s!nc*(kzR&x1&yJWA&es`J5b_*YoZ%;x`kpcQ8P$!zSJDBjcOx0Ygk5l+jf7#N2jUkJWw0A z<^*E1QZo|MQ?0d8vOa!Z+whe`=-vlJ-#X#(aS3t#qWi`5>Ftk+y*0LXOl#6Vj5wIxZN+=PDFqq=72nXekkXd!sRg?F;MRu}s3 zf?e_c7=fhRVpdYUJJ}b};;~cN3w1O1ue3H@YSHKQkz?h~UYl3u5K^eBb(E}r&;=o_n zk0#rHPY<2a2b$~nOtzz3rAMNr%TGGr6>loNuL0gtz+Va3e#M)?^$mW|`e=L=M|vjP z;Wt=(=WIah5s&JT%A@Mj*pVJwUznlKfAy0pzQbgDVMcI$AX4j*U&UpzJ>dOvp>+MJ z{K#i?IzHmjpTqVbdUf5tZm;9xJe7{g_OHEfewL1}^sArLdgNE-V6q+cVXJ?YANh<< z$45L>4z>r;tN1Omb$ql_rDL+aWwtfG(pRsg^~kTv!DM^&T2{Tvk9MO8PJX<(Duwp#*m~i5r1!sZhu;TTl%O1;djJG3t{EBp?vzIrs6ILy* z$IKnP>=AG_*iOo0`iTbaI`zN4u_VVtF7LGq2D-~$3kvHQ@X&g7WRx@%9-b{#u-O#^P+e;X4_q%=` zRze=YbzhF%m0YZ-VMUcWB@yG#D%gcFB>QVv@lokYXZO1fD-n~Mz%O`zd2o{E`fTtl z{3M&>uUvBe$=+WA@YL^0{DQ|DxL+0b;ftj^acsB|?SkygO*di(75&G?IFo;%y}Wa9 z{=4U5MH6q3?>UZsTY_^m-~CF;Ip{Ztvws6;;kknAu%|QZN@w@`&$Wm@1S{Zg3uhDh zUpn60Rddr$DOb;f-EiX}Z7-qs7fL<#@K|l(?8kXMmH}e?^k^O7y!So$7Ix$o;<;24 z$34K2;37?xU+L`S-LL>Ve3^LDg!m=e!)q>gAJlf{o&>D)<38N|caz`yg}0}?Lu=s+ z|L3t{hW-(^n@mGHnD(4z{%^RywqW;^~dz?9~&RNciZi>MZ6+!)MT88zW7-yoW) znrP!s9F>xq?$68ikIBkO&%p0!r}EI9N-qgi-4DS#Y8RJFtt$kp-3wy&5>A_g=FVvES20pkva+)C{6o_H zqf&D60x2086a0L)44HsN9F=T($UhQ^?&`{W%-P-Ok5BTaWTjE_xHX|qq8~B+CTMOT zFAuv7{y-M3dirxyGSmI)2L1iB3wE{9%ZXDr(Di9u>iS$Ng8s^URqwCNi*i`LuYGyh zzdRp-o!sKKjj)Te(o@nu`ej&gQ&j88SIio?_a6Vlz8t)18X{js>js9-*S@sS`6}Fx zw`utcnZH(FMdRKT;_Iy;sr#Y<*vkl&udncnr{`Zsv%c|*T0c6c{-=XqYG%>1ND_UQ df2N*GOtzD;^!lS!A9q^o4KL8+-YHj3{0~x{1e5>( diff --git a/klayout/drc/testing/testcases/unit/dualgate.svg b/klayout/drc/testing/testcases/unit/dualgate.svg index 1bc3d877..8eeb2475 100644 --- a/klayout/drc/testing/testcases/unit/dualgate.svg +++ b/klayout/drc/testing/testcases/unit/dualgate.svg @@ -1,5 +1,5 @@ - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -DV.6 -DV.6 -DV.6 -DV.6 -DV.6 -DV.6 -DV.6 -DV.6 -DV.7 -DV.8 -DV.8 -DV.8 -DV.8 -DV.8 -DV.8 -DV.8 -DV.8 -DV.8 -DV.8 -DV.8 -DV.8 -DV.9 -DV.9 -DV.5 -DV.5 -DV.5 -DV.5 -DV.3 -DV.3 -DV.3 -DV.3 -DV.3 -DV.2 -DV.2 -DV.2 -DV.2 -DV.1 -DV.1 -DV.1 -DV.1 -DV.1 -DV.1 -Hole -Singular -Cor2Edge -Intersect -Basic -Touch -Angle45 -PriEnclSec -LIMITATION DEPENDENCIES: priWidth > secOvlpPriSide, priWidth>secOvlpPriTopBot, -DV.8 -Cor2Edge -Basic -Angle45 -Outside -LIMITATION DEPENDENCIES: priWidth > secOvlpPriSide, priWidth>secOvlpPriTopBot, -5 ERRs -2 ERRs -DV.5 Min. width = 0.7 -DV.1 Min. overlap of DNWELL for MV Area = 0.5 -MASK 6J DV -DF.2.6 -DV.3 Space to unrelated active = 0.24 -DV.2 MV space = 0.44 -3 ERRs -5 ERRs -4 ERRs -DV.6 Min. overlap of COMP for DV_2 outside DNWELL = 0.24 -DV.7 COMP cannot be partially overlap by DV2_D -DV.9 LV and MV PMOS can not be sitting inside same NWELL -1 ERRs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +DV.7 +DV.8 +DV.8 +DV.8 +DV.8 +DV.8 +DV.8 +DV.8 +DV.8 +DV.8 +DV.8 +DV.8 +DV.8 +DV.9 +DV.9 +DV.5 +DV.5 +DV.5 +DV.5 +DV.3 +DV.3 +DV.3 +DV.3 +DV.3 +DV.2 +DV.2 +DV.2 +DV.2 +DV.1 +DV.1 +DV.1 +DV.1 +DV.1 +DV.1 +DV.6 +DV.6 +DV.6 +DV.6 +DV.6 +DV.6 +DV.6 +DV.6 +DV.7 +Hole +Singular +Cor2Edge +Intersect +Basic +Touch +Angle45 +PriEnclSec +LIMITATION DEPENDENCIES: priWidth > secOvlpPriSide, priWidth>secOvlpPriTopBot, +DV.8 +Cor2Edge +Basic +Angle45 +Outside +LIMITATION DEPENDENCIES: priWidth > secOvlpPriSide, priWidth>secOvlpPriTopBot, +5 ERRs +2 ERRs +DV.5 Min. width = 0.7 +DV.1 Min. overlap of DNWELL for MV Area = 0.5 +MASK 6J DV +DF.2.6 +DV.3 Space to unrelated active = 0.24 +DV.2 MV space = 0.44 +3 ERRs +5 ERRs +4 ERRs +DV.6 Min. overlap of COMP for DV_2 outside DNWELL = 0.24 +DV.7 COMP cannot be partially overlap by DV2_D +DV.9 LV and MV PMOS can not be sitting inside same NWELL +1 ERRs \ No newline at end of file From 773e5d355305916f5c4e9b56b73d7869ff1055b7 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Wed, 1 Feb 2023 12:27:40 +0200 Subject: [PATCH 57/71] Fix logging of verbose mode. --- klayout/drc/rule_decks/main.drc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/klayout/drc/rule_decks/main.drc b/klayout/drc/rule_decks/main.drc index c809df0b..972e514c 100644 --- a/klayout/drc/rule_decks/main.drc +++ b/klayout/drc/rule_decks/main.drc @@ -130,12 +130,11 @@ else end #=== PRINT DETAILS === +logger.info("Verbose mode: #{$verbose}") if $verbose == "true" - logger.info("Verbose mode: #{$verbose}") verbose(true) else verbose(false) - logger.info("Verbose mode: false") end # === TILING MODE === From 75c95c89c1d15320686e9b1c5d026b5acc4e90df Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Wed, 1 Feb 2023 12:30:57 +0200 Subject: [PATCH 58/71] Cleaning up the get_polygon function. --- klayout/drc/rule_decks/main.drc | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/klayout/drc/rule_decks/main.drc b/klayout/drc/rule_decks/main.drc index 972e514c..76c65c25 100644 --- a/klayout/drc/rule_decks/main.drc +++ b/klayout/drc/rule_decks/main.drc @@ -196,11 +196,8 @@ polygons_count = 0 logger.info("Read in polygons from layers.") def get_polygons(l, d) - if $run_mode == "deep" - polygons(l, d) - else - polygons(l, d).merged - end + ps = polygons(l, d) + return $run_mode == "deep" ? ps : ps.merged end comp = get_polygons(22 , 0 ) From fc02994de961e9886c07bc6986bea3b18339e437 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Wed, 1 Feb 2023 12:34:36 +0200 Subject: [PATCH 59/71] Fix the metal2 dummy addition. --- klayout/drc/rule_decks/main.drc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/klayout/drc/rule_decks/main.drc b/klayout/drc/rule_decks/main.drc index 76c65c25..8803c8c6 100644 --- a/klayout/drc/rule_decks/main.drc +++ b/klayout/drc/rule_decks/main.drc @@ -539,7 +539,7 @@ if METAL_LEVEL == "2LM" logger.info("metal2_dummy has %d polygons" % [count]) polygons_count += count - metal2 = metal2_drawn + metal2_drawn + metal2 = metal2_drawn + metal2_dummy metal2_label = get_polygons(36 , 10) count = metal2_label.count() From 7fa2159ce72cc15cdffef06f91a608b3736ac8af Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Wed, 1 Feb 2023 12:47:43 +0200 Subject: [PATCH 60/71] Update to get the number of processors from inside ruby. --- klayout/drc/rule_decks/main.drc | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/klayout/drc/rule_decks/main.drc b/klayout/drc/rule_decks/main.drc index 8803c8c6..8d7f2f10 100644 --- a/klayout/drc/rule_decks/main.drc +++ b/klayout/drc/rule_decks/main.drc @@ -19,6 +19,7 @@ #=========================================================================================================================== require 'time' require "logger" +require 'etc' exec_start_time = Time.now @@ -123,11 +124,11 @@ logger.info("Offgrid enabled %s" % [OFFGRID]) if $thr threads($thr) - logger.info("Number of threads to use %s" % [$thr]) else - threads(%x("nproc")) - logger.info("Number of threads to use #{%x("nproc")}") + thr ||= Etc.nprocessors + threads(thr) end +logger.info("Number of threads to use %s" % [$thr]) #=== PRINT DETAILS === logger.info("Verbose mode: #{$verbose}") From 53bdede64d4bb6b287b9dfedfc65a11e60717416 Mon Sep 17 00:00:00 2001 From: FaragElsayed2 Date: Wed, 1 Feb 2023 12:51:55 +0200 Subject: [PATCH 61/71] Updating actions to run drc regression --- .github/workflows/regression.yml | 56 ++++++++++++++++++++++++++++++-- 1 file changed, 54 insertions(+), 2 deletions(-) diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index 0712bac4..6b7678f0 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -26,15 +26,67 @@ on: workflow_dispatch: jobs: - regression: + build_drc-matrix: + runs-on: ubuntu-latest + outputs: + drc_table: ${{ steps.set-matrix.outputs.drc_table }} + steps: + - uses: actions/checkout@v3 + - id: set-matrix + run: | + cd klayout/drc/testing + drc_table=`echo '[' ; find testcases/unit/ -iname '*.gds' | tr '\n' ','|sed -e 's/^/\"/'|sed -e 's/,$/\"]/'|sed -e 's/,/\", \"/g'` + drc_table="${drc_table//'testcases/unit/'/}"; drc_table="${drc_table//'"", '/}"; drc_table="${drc_table//'.gds'/}"; + drc_table=`echo $drc_table | jq -c .` + echo $drc_table + echo "drc_table=$drc_table" >>$GITHUB_OUTPUT + + drc_regression: + needs: build_drc-matrix + runs-on: ubuntu-latest + strategy: + max-parallel: 4 + fail-fast: false + matrix: + part: [drc] + test: ${{ fromJson(needs.build_drc-matrix.outputs.drc_table) }} + + name: ${{ matrix.part }} | ${{ matrix.test }} + + steps: + - uses: actions/checkout@v3 + with: + submodules: 'recursive' + - name: Testing ${{ matrix.part }} for ${{ matrix.test }} + run: | + make test-"$(python -c 'print("${{ matrix.part }}".upper())')"-${{ matrix.test }} + + drc_switch: runs-on: ubuntu-latest strategy: max-parallel: 4 fail-fast: false matrix: include: - - { tool: klayout, part: drc, test: dualgate } - { tool: klayout, part: drc, test: switch } + + name: ${{ matrix.part }} | ${{ matrix.test }} + + steps: + - uses: actions/checkout@v3 + with: + submodules: 'recursive' + - name: Testing ${{ matrix.part }} for ${{ matrix.test }} + run: | + make test-"$(python -c 'print("${{ matrix.part }}".upper())')"-${{ matrix.test }} + + lvs_regression: + runs-on: ubuntu-latest + strategy: + max-parallel: 4 + fail-fast: false + matrix: + include: - { tool: klayout, part: lvs, test: main } - { tool: klayout, part: lvs, test: switch } From 6a694f790754f113f74313f2cdefc4561db768d1 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Wed, 1 Feb 2023 13:58:19 +0200 Subject: [PATCH 62/71] Adding link to documentation to refer to the rule that is not implemented. --- klayout/drc/rule_decks/dualgate.drc | 1 + 1 file changed, 1 insertion(+) diff --git a/klayout/drc/rule_decks/dualgate.drc b/klayout/drc/rule_decks/dualgate.drc index a31552c3..871a7a6e 100644 --- a/klayout/drc/rule_decks/dualgate.drc +++ b/klayout/drc/rule_decks/dualgate.drc @@ -42,6 +42,7 @@ if FEOL dv3_l1.forget # rule DV.4 is not a DRC check + # Refer to: https://gf180mcu-pdk.readthedocs.io/en/latest/physical_verification/design_manual/drm_07_07.html # Rule DV.5: Min. Dualgate width. is 0.7µm logger.info("Executing rule DV.5") From 32bad21a191cb38e80a1518432ef6b2b58ccf512 Mon Sep 17 00:00:00 2001 From: FaragElsayed2 Date: Wed, 1 Feb 2023 19:13:04 +0200 Subject: [PATCH 63/71] Updating regression to catch extremely short error edges --- klayout/drc/testing/run_regression.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py index 17f7e9a9..55468264 100644 --- a/klayout/drc/testing/run_regression.py +++ b/klayout/drc/testing/run_regression.py @@ -44,7 +44,7 @@ import re import gdstk import errno - +import numpy as np from collections import defaultdict @@ -547,9 +547,14 @@ def draw_polygons(polygon_data, cell, lay_num, lay_dt, path_width): elif tag == "edge-pair": for poly in polygons: points = [ - (float(p.split(",")[0]), float(p.split(",")[1])) + [float(p.split(",")[0]), float(p.split(",")[1])] for p in poly.split(";") ] + dist = np.sqrt( ( (points[0][0]) - (points[1][0]) )**2 + ( (points[0][1]) - (points[1][1]) )**2 ) + # Adding condition for extremely small edge length + ## to generate a path to be drawn + if dist < path_width: + points[1][0] = points[0][0] + 2*path_width cell.add(gdstk.FlexPath(points, path_width, layer=lay_num, datatype=lay_dt)) elif tag == "edge": From dfda31755ceae74b7426c9fa559650760a0db31e Mon Sep 17 00:00:00 2001 From: FaragElsayed2 Date: Wed, 1 Feb 2023 19:22:35 +0200 Subject: [PATCH 64/71] Cleaning regression script --- klayout/drc/testing/run_regression.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py index 55468264..43a3208b 100644 --- a/klayout/drc/testing/run_regression.py +++ b/klayout/drc/testing/run_regression.py @@ -550,11 +550,11 @@ def draw_polygons(polygon_data, cell, lay_num, lay_dt, path_width): [float(p.split(",")[0]), float(p.split(",")[1])] for p in poly.split(";") ] - dist = np.sqrt( ( (points[0][0]) - (points[1][0]) )**2 + ( (points[0][1]) - (points[1][1]) )**2 ) + dist = np.sqrt(((points[0][0]) - (points[1][0]))**2 + ((points[0][1]) - (points[1][1]))**2) # Adding condition for extremely small edge length ## to generate a path to be drawn if dist < path_width: - points[1][0] = points[0][0] + 2*path_width + points[1][0] = points[0][0] + 2 * path_width cell.add(gdstk.FlexPath(points, path_width, layer=lay_num, datatype=lay_dt)) elif tag == "edge": From f5d7fb675069185c8ba532806147f7b23dcdbbda Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Thu, 2 Feb 2023 06:39:09 +0200 Subject: [PATCH 65/71] Changing the check for 6LM --- klayout/drc/rule_decks/main.drc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/klayout/drc/rule_decks/main.drc b/klayout/drc/rule_decks/main.drc index 8d7f2f10..2b622d99 100644 --- a/klayout/drc/rule_decks/main.drc +++ b/klayout/drc/rule_decks/main.drc @@ -754,8 +754,7 @@ else topmin1_via = via3 top_metal = metal5 topmin1_metal = metal4 - else - ## 6LM + elsif METAL_LEVEL == "6LM" metal5_drawn = get_polygons(81 , 0 ) count = metal5_drawn.count() logger.info("metal5_drawn has %d polygons" % [count]) @@ -820,6 +819,9 @@ else topmin1_via = via4 top_metal = metaltop topmin1_metal = metal5 + else + logger.errors("Unknown metal stack %s" % [METAL_LEVEL]) + raise end end end From d6fd77865b13be853f82d2bd5ee9a9c897fed530 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Thu, 2 Feb 2023 06:42:37 +0200 Subject: [PATCH 66/71] Add logger error for the connectivity check. --- klayout/drc/rule_decks/main.drc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/klayout/drc/rule_decks/main.drc b/klayout/drc/rule_decks/main.drc index 2b622d99..2cae0f9d 100644 --- a/klayout/drc/rule_decks/main.drc +++ b/klayout/drc/rule_decks/main.drc @@ -820,7 +820,7 @@ else top_metal = metaltop topmin1_metal = metal5 else - logger.errors("Unknown metal stack %s" % [METAL_LEVEL]) + logger.error("Unknown metal stack %s" % [METAL_LEVEL]) raise end end @@ -1013,7 +1013,7 @@ def conn_space(layer,conn_val,not_conn_val, mode) net1 = l2n_data.probe_net(layer.data, ep.first.p1) net2 = l2n_data.probe_net(layer.data, ep.second.p1) if !net1 || !net2 - puts "Should not happen ..." + logger.error("Connectivity check encountered 2 nets that doesn't exist. Potential issue in klayout...") elsif net1.circuit != net2.circuit || net1.cluster_id != net2.cluster_id # unconnected unconnected_errors.data.insert(ep) @@ -1035,7 +1035,7 @@ def conn_separation(layer1, layer2, conn_val,not_conn_val, mode) net1 = l2n_data.probe_net(layer1.data, ep.first.p1) net2 = l2n_data.probe_net(layer2.data, ep.second.p1) if !net1 || !net2 - puts "Should not happen ..." + logger.error("Connectivity check encountered 2 nets that doesn't exist. Potential issue in klayout...") elsif net1.circuit != net2.circuit || net1.cluster_id != net2.cluster_id # unconnected unconnected_errors.data.insert(ep) From 8aab0c181974ef10574529553b560816cde7872b Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Thu, 2 Feb 2023 07:27:16 +0200 Subject: [PATCH 67/71] Fix the report location string. --- klayout/drc/rule_decks/main.drc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/klayout/drc/rule_decks/main.drc b/klayout/drc/rule_decks/main.drc index 2cae0f9d..eae8eb8f 100644 --- a/klayout/drc/rule_decks/main.drc +++ b/klayout/drc/rule_decks/main.drc @@ -60,7 +60,7 @@ if $report logger.info("GF180MCU Klayout DRC runset output at: %s" % [$report]) report("DRC Run Report at", $report) else - logger.info("GF180MCU Klayout DRC runset output at default location." % [File.join(File.dirname(RBA::CellView::active.filename), "gf180_drc.lyrdb")]) + logger.info("GF180MCU Klayout DRC runset output at default location: %s" % [File.join(File.dirname(RBA::CellView::active.filename), "gf180_drc.lyrdb")]) report("DRC Run Report at", File.join(File.dirname(RBA::CellView::active.filename), "gf180_drc.lyrdb")) end From 9ca4c200f32ea8a0a1e2ea54a3c55e6b71c1e5a4 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Thu, 2 Feb 2023 07:28:04 +0200 Subject: [PATCH 68/71] Remove klayout direct run command. --- klayout/drc/rule_decks/main.drc | 3 --- 1 file changed, 3 deletions(-) diff --git a/klayout/drc/rule_decks/main.drc b/klayout/drc/rule_decks/main.drc index eae8eb8f..0dd8d206 100644 --- a/klayout/drc/rule_decks/main.drc +++ b/klayout/drc/rule_decks/main.drc @@ -33,9 +33,6 @@ end #================================================ #----------------- FILE SETUP ------------------- #================================================ - -# optional for a batch launch : klayout -b -r gf_018mcu.drc -rd input=design.gds -rd report=gp180_drc.lyrdb - logger.info("Starting running GF180MCU Klayout DRC runset on %s" % [$input]) logger.info("Ruby Version for klayout: %s" % [RUBY_VERSION]) From 9677658d80ce6df632aab5210e0363e5296c7a74 Mon Sep 17 00:00:00 2001 From: Amro Tork Date: Thu, 2 Feb 2023 07:37:07 +0200 Subject: [PATCH 69/71] Updating path resolution in the main drc. --- klayout/drc/rule_decks/main.drc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/klayout/drc/rule_decks/main.drc b/klayout/drc/rule_decks/main.drc index 0dd8d206..e575e6c1 100644 --- a/klayout/drc/rule_decks/main.drc +++ b/klayout/drc/rule_decks/main.drc @@ -57,8 +57,10 @@ if $report logger.info("GF180MCU Klayout DRC runset output at: %s" % [$report]) report("DRC Run Report at", $report) else - logger.info("GF180MCU Klayout DRC runset output at default location: %s" % [File.join(File.dirname(RBA::CellView::active.filename), "gf180_drc.lyrdb")]) - report("DRC Run Report at", File.join(File.dirname(RBA::CellView::active.filename), "gf180_drc.lyrdb")) + layout_dir = Pathname.new(RBA::CellView::active.filename).parent.realpath + report_path = layout_dir.join("gf180_drc.lyrdb").to_s + logger.info("GF180MCU Klayout DRC runset output at default location: %s" % report_path) + report("DRC Run Report at", report_path) end #================================================ From 9693bc9b73c8a8ce41dab05876380af42645e310 Mon Sep 17 00:00:00 2001 From: FaragElsayed2 Date: Thu, 2 Feb 2023 08:03:03 +0200 Subject: [PATCH 70/71] Updating regression to catch extremely short error edges --- klayout/drc/testing/run_regression.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py index 43a3208b..c935b723 100644 --- a/klayout/drc/testing/run_regression.py +++ b/klayout/drc/testing/run_regression.py @@ -563,6 +563,11 @@ def draw_polygons(polygon_data, cell, lay_num, lay_dt, path_width): (float(p.split(",")[0]), float(p.split(",")[1])) for p in poly.split(";") ] + dist = np.sqrt(((points[0][0]) - (points[1][0]))**2 + ((points[0][1]) - (points[1][1]))**2) + # Adding condition for extremely small edge length + ## to generate a path to be drawn + if dist < path_width: + points[1][0] = points[0][0] + 2 * path_width cell.add(gdstk.FlexPath(points, path_width, layer=lay_num, datatype=lay_dt)) else: logging.error(f"## Unknown type: {tag} ignored") From 28944442b3f7119530218a59beb1d607fc257c35 Mon Sep 17 00:00:00 2001 From: FaragElsayed2 Date: Thu, 2 Feb 2023 08:12:13 +0200 Subject: [PATCH 71/71] Fixing an issue in catching extremely short error edges --- klayout/drc/testing/run_regression.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py index c935b723..a3ba601c 100644 --- a/klayout/drc/testing/run_regression.py +++ b/klayout/drc/testing/run_regression.py @@ -560,7 +560,7 @@ def draw_polygons(polygon_data, cell, lay_num, lay_dt, path_width): elif tag == "edge": for poly in polygons: points = [ - (float(p.split(",")[0]), float(p.split(",")[1])) + [float(p.split(",")[0]), float(p.split(",")[1])] for p in poly.split(";") ] dist = np.sqrt(((points[0][0]) - (points[1][0]))**2 + ((points[0][1]) - (points[1][1]))**2)