diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index 0005ee95..6b7678f0 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -26,14 +26,67 @@ on: workflow_dispatch: jobs: - regression: + build_drc-matrix: + runs-on: ubuntu-latest + outputs: + drc_table: ${{ steps.set-matrix.outputs.drc_table }} + steps: + - uses: actions/checkout@v3 + - id: set-matrix + run: | + cd klayout/drc/testing + drc_table=`echo '[' ; find testcases/unit/ -iname '*.gds' | tr '\n' ','|sed -e 's/^/\"/'|sed -e 's/,$/\"]/'|sed -e 's/,/\", \"/g'` + drc_table="${drc_table//'testcases/unit/'/}"; drc_table="${drc_table//'"", '/}"; drc_table="${drc_table//'.gds'/}"; + drc_table=`echo $drc_table | jq -c .` + echo $drc_table + echo "drc_table=$drc_table" >>$GITHUB_OUTPUT + + drc_regression: + needs: build_drc-matrix + runs-on: ubuntu-latest + strategy: + max-parallel: 4 + fail-fast: false + matrix: + part: [drc] + test: ${{ fromJson(needs.build_drc-matrix.outputs.drc_table) }} + + name: ${{ matrix.part }} | ${{ matrix.test }} + + steps: + - uses: actions/checkout@v3 + with: + submodules: 'recursive' + - name: Testing ${{ matrix.part }} for ${{ matrix.test }} + run: | + make test-"$(python -c 'print("${{ matrix.part }}".upper())')"-${{ matrix.test }} + + drc_switch: runs-on: ubuntu-latest strategy: + max-parallel: 4 fail-fast: false matrix: include: - - { tool: klayout, part: drc, test: main } - { tool: klayout, part: drc, test: switch } + + name: ${{ matrix.part }} | ${{ matrix.test }} + + steps: + - uses: actions/checkout@v3 + with: + submodules: 'recursive' + - name: Testing ${{ matrix.part }} for ${{ matrix.test }} + run: | + make test-"$(python -c 'print("${{ matrix.part }}".upper())')"-${{ matrix.test }} + + lvs_regression: + runs-on: ubuntu-latest + strategy: + max-parallel: 4 + fail-fast: false + matrix: + include: - { tool: klayout, part: lvs, test: main } - { tool: klayout, part: lvs, test: switch } diff --git a/.gitignore b/.gitignore index cf6a0bb6..5ae8f4bc 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,8 @@ __pycache__/ *.py[cod] *$py.class +klayout/drc/testing/unit_tests_* + # C extensions *.so diff --git a/Makefile b/Makefile index 05b2a340..21f134b2 100644 --- a/Makefile +++ b/Makefile @@ -22,6 +22,9 @@ REQUIREMENTS_FILE := requirements.txt # https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html ENVIRONMENT_FILE := pdk_regression.yml +# Path to regression +KLAYOUT_TESTS := klayout/drc/testing/ + include third_party/make-env/conda.mk # Lint python code @@ -31,21 +34,32 @@ lint: | $(CONDA_ENV_PYTHON) ################################################################################ ## DRC Regression section ################################################################################ -# DRC main testing -test-DRC-main: | $(CONDA_ENV_PYTHON) - @$(IN_CONDA_ENV) klayout -v +#================================= +# ----- test-DRC_regression ------ +#================================= +.ONESHELL: +test-DRC-main : | $(CONDA_ENV_PYTHON) + @$(IN_CONDA_ENV) python3 $(KLAYOUT_TESTS)/run_regression.py -# DRC main testing +.ONESHELL: +test-DRC-% : | $(CONDA_ENV_PYTHON) + @which python3 + @$(IN_CONDA_ENV) python3 $(KLAYOUT_TESTS)/run_regression.py --table=$* + +#================================= +# -------- test-DRC-switch ------- +#================================= +# LVS main testing test-DRC-switch: | $(CONDA_ENV_PYTHON) @$(IN_CONDA_ENV) klayout -v ################################################################################ -## DRC Regression section +## LVS Regression section ################################################################################ -# DRC main testing +# LVS main testing test-LVS-main: | $(CONDA_ENV_PYTHON) @$(IN_CONDA_ENV) klayout -v -# DRC main testing +# LVS main testing test-LVS-switch: | $(CONDA_ENV_PYTHON) @$(IN_CONDA_ENV) klayout -v diff --git a/klayout/drc/README.md b/klayout/drc/README.md new file mode 100644 index 00000000..865bd600 --- /dev/null +++ b/klayout/drc/README.md @@ -0,0 +1,82 @@ +# DRC Documentation + +Explains how to use the runset. + +## Folder Structure + +```text +📁drc + ┣ 📁testing + ┣ 📁rule_decks + ┣ 📜README.md + ┗ 📜run_drc.py + ``` + +## Rule Deck Usage +The `run_drc.py` script takes a gds file to run DRC rule decks of GF180 technology with switches to select subsets of all checks. + +### Requirements +Please make sure to use the latest Klayout setup at your side. To install klayout, please refer to documentation at [klayout build](https://www.klayout.de/build.html). + +Also, please make sure to install the required python packages at `requirements.txt` by using +```bash +pip install -r requirements.txt +``` + +### Metal Stack Options +We have a list of metal stack options which corresponds to the following: +- **Option A** : combined options of metal_level=3, mim_option=A, metal_top=30K, poly_res=1K, and mim_cap=2 +- **Option B** : combined options of metal_level=4, mim_option=B, metal_top=11K, poly_res=1K, and mim_cap=2 +- **Option C** : combined options of metal_level=5, mim_option=B, metal_top=9K, poly_res=1K, and mim_cap=2 + +### Usage + +```bash + run_drc.py (--help| -h) + run_drc.py (--path=) (--gf180mcu=) [--topcell=] [--thr=] [--run_mode=] [--no_feol] [--no_beol] [--connectivity] [--density] [--density_only] [--antenna] [--antenna_only] [--no_offgrid] +``` + +Example: + +```bash + python3 run_drc.py --path=testing/switch_checking/simple_por.gds.gz --thr=16 --run_mode=flat --gf180mcu=A --antenna --no_offgrid +``` + +### Options + +- `--help -h` Print this help message. + +- `--path=` The input GDS file path. + +- `--gf180mcu=` Select combined options of metal_top, mim_option, and metal_level. Allowed values (A, B, C). + - gf180mcu=A: Select metal_top=30K mim_option=A metal_level=3LM + - gf180mcu=B: Select metal_top=11K mim_option=B metal_level=4LM + - gf180mcu=C: Select metal_top=9K mim_option=B metal_level=5LM + +- `--topcell=` Topcell name to use. + +- `--thr=` The number of threads used in run. + +- `--run_mode=` Select klayout mode Allowed modes (flat , deep, tiling). [default: flat] + +- `--no_feol` Turn off FEOL rules from running. + +- `--no_beol` Turn off BEOL rules from running. + +- `--connectivity` Turn on connectivity rules. + +- `--density` Turn on Density rules. + +- `--density_only` Turn on Density rules only. + +- `--antenna` Turn on Antenna checks. + +- `--antenna_only` Turn on Antenna checks only. + +- `--no_offgrid` Turn off OFFGRID checking rules. + +### **DRC Outputs** + +Results will appear at the end of the run logs. + +The result is a database file (`.lyrdb`) of all violations in the same directoy of your design. you could view it on your file using klayout. diff --git a/klayout/drc/rule_decks/dualgate.drc b/klayout/drc/rule_decks/dualgate.drc new file mode 100644 index 00000000..871a7a6e --- /dev/null +++ b/klayout/drc/rule_decks/dualgate.drc @@ -0,0 +1,88 @@ +################################################################################################ +# Copyright 2022 GlobalFoundries PDK Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################################ + +if FEOL + #================================================ + #--------------------DUALGATE-------------------- + #================================================ + + # Rule DV.1: Min. Dualgate enclose DNWELL. is 0.5µm + logger.info("Executing rule DV.1") + dv1_l1 = dualgate.enclosing(dnwell, 0.5.um, euclidian).polygons(0.001) + dv1_l2 = dnwell.not_outside(dualgate).not(dualgate) + dv1_l = dv1_l1.or(dv1_l2) + dv1_l.output("DV.1", "DV.1 : Min. Dualgate enclose DNWELL. : 0.5µm") + dv1_l1.forget + dv1_l2.forget + dv1_l.forget + + # Rule DV.2: Min. Dualgate Space. Merge if Space is less than this design rule. is 0.44µm + logger.info("Executing rule DV.2") + dv2_l1 = dualgate.space(0.44.um, euclidian).polygons(0.001) + dv2_l1.output("DV.2", "DV.2 : Min. Dualgate Space. Merge if Space is less than this design rule. : 0.44µm") + dv2_l1.forget + + # Rule DV.3: Min. Dualgate to COMP space [unrelated]. is 0.24µm + logger.info("Executing rule DV.3") + dv3_l1 = dualgate.separation(comp.outside(dualgate), 0.24.um, euclidian).polygons(0.001) + dv3_l1.output("DV.3", "DV.3 : Min. Dualgate to COMP space [unrelated]. : 0.24µm") + dv3_l1.forget + + # rule DV.4 is not a DRC check + # Refer to: https://gf180mcu-pdk.readthedocs.io/en/latest/physical_verification/design_manual/drm_07_07.html + + # Rule DV.5: Min. Dualgate width. is 0.7µm + logger.info("Executing rule DV.5") + dv5_l1 = dualgate.width(0.7.um, euclidian).polygons(0.001) + dv5_l1.output("DV.5", "DV.5 : Min. Dualgate width. : 0.7µm") + dv5_l1.forget + + comp_dv = comp.not(pcomp.outside(nwell)) + # Rule DV.6: Min. Dualgate enclose COMP (except substrate tap). is 0.24µm + logger.info("Executing rule DV.6") + dv6_l1 = dualgate.enclosing(comp_dv, 0.24.um, euclidian).polygons(0.001) + dv6_l2 = comp_dv.not_outside(dualgate).not(dualgate) + dv6_l = dv6_l1.or(dv6_l2) + dv6_l.output("DV.6", "DV.6 : Min. Dualgate enclose COMP (except substrate tap). : 0.24µm") + dv6_l1.forget + dv6_l2.forget + dv6_l.forget + + # Rule DV.7: COMP (except substrate tap) can not be partially overlapped by Dualgate. + logger.info("Executing rule DV.7") + dv7_l1 = dualgate.not_outside(comp_dv).not(dualgate.covering(comp_dv)) + dv7_l1.output("DV.7", "DV.7 : COMP (except substrate tap) can not be partially overlapped by Dualgate.") + dv7_l1.forget + + comp_dv.forget + + # Rule DV.8: Min Dualgate enclose Poly2. is 0.4µm + logger.info("Executing rule DV.8") + dv8_l1 = dualgate.enclosing(poly2, 0.4.um, euclidian).polygons(0.001) + dv8_l2 = poly2.not_outside(dualgate).not(dualgate) + dv8_l = dv8_l1.or(dv8_l2) + dv8_l.output("DV.8", "DV.8 : Min Dualgate enclose Poly2. : 0.4µm") + dv8_l1.forget + dv8_l2.forget + dv8_l.forget + + # Rule DV.9: 3.3V and 5V/6V PMOS cannot be sitting inside same NWELL. + logger.info("Executing rule DV.9") + dv9_l1 = nwell.covering(pgate.and(dualgate)).covering(pgate.not_inside(v5_xtor).not_inside(dualgate)) + dv9_l1.output("DV.9", "DV.9 : 3.3V and 5V/6V PMOS cannot be sitting inside same NWELL.") + dv9_l1.forget +end #FEOL + diff --git a/klayout/drc/rule_decks/main.drc b/klayout/drc/rule_decks/main.drc new file mode 100644 index 00000000..e575e6c1 --- /dev/null +++ b/klayout/drc/rule_decks/main.drc @@ -0,0 +1,1070 @@ +################################################################################################ +# Copyright 2022 GlobalFoundries PDK Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################################ + +#=========================================================================================================================== +#------------------------------------------- GF 0.18um MCU DRC RULE DECK -------------------------------------------------- +#=========================================================================================================================== +require 'time' +require "logger" +require 'etc' + +exec_start_time = Time.now + +logger = Logger.new(STDOUT) + +logger.formatter = proc do |severity, datetime, progname, msg| + "#{datetime}: Memory Usage (" + `pmap #{Process.pid} | tail -1`[10,40].strip + ") : #{msg} +" +end + +#================================================ +#----------------- FILE SETUP ------------------- +#================================================ +logger.info("Starting running GF180MCU Klayout DRC runset on %s" % [$input]) +logger.info("Ruby Version for klayout: %s" % [RUBY_VERSION]) + +if $input + if $topcell + source($input, $topcell) + else + source($input) + end +end + +if $table_name + table_name = $table_name +else + table_name = "main" +end + + +logger.info("Loading database to memory is complete.") + +if $report + logger.info("GF180MCU Klayout DRC runset output at: %s" % [$report]) + report("DRC Run Report at", $report) +else + layout_dir = Pathname.new(RBA::CellView::active.filename).parent.realpath + report_path = layout_dir.join("gf180_drc.lyrdb").to_s + logger.info("GF180MCU Klayout DRC runset output at default location: %s" % report_path) + report("DRC Run Report at", report_path) +end + +#================================================ +#------------------ SWITCHES -------------------- +#================================================ +logger.info("Evaluate switches.") + +# connectivity rules +if $conn_drc == "true" + CONNECTIVITY_RULES = $conn_drc + logger.info("connectivity rules are enabled.") +else + CONNECTIVITY_RULES = false + logger.info("connectivity rules are disabled.") +end # connectivity rules + +# WEDGE +if $wedge == "false" + WEDGE = $wedge +else + WEDGE = "true" +end # WEDGE + +logger.info("Wedge enabled %s" % [WEDGE]) + +# BALL +if $ball == "false" + BALL = $ball +else + BALL = "true" +end # BALL + +logger.info("Ball enabled %s" % [BALL]) + +# GOLD +if $gold == "false" + GOLD = $gold +else + GOLD = "true" +end # GOLD + +logger.info("Gold enabled %s" % [GOLD]) + +if $mim_option + MIM_OPTION = $mim_option +else + MIM_OPTION = "B" +end + +logger.info("MIM Option selected %s" % [MIM_OPTION]) + +# OFFGRID +if $offgrid == "false" + OFFGRID = false +else + OFFGRID = true +end # OFFGRID + +logger.info("Offgrid enabled %s" % [OFFGRID]) + +if $thr + threads($thr) +else + thr ||= Etc.nprocessors + threads(thr) +end +logger.info("Number of threads to use %s" % [$thr]) + +#=== PRINT DETAILS === +logger.info("Verbose mode: #{$verbose}") +if $verbose == "true" + verbose(true) +else + verbose(false) +end + +# === TILING MODE === +if $run_mode == "tiling" + tiles(500.um) + tile_borders(10.um) + logger.info("Tiling mode is enabled.") + +elsif $run_mode == "deep" + #=== HIER MODE === + deep + logger.info("deep mode is enabled.") +else + #=== FLAT MODE === + flat + logger.info("flat mode is enabled.") +end # run_mode + +# METAL_TOP +if $metal_top + METAL_TOP = $metal_top +else + METAL_TOP = "9K" +end # METAL_TOP + +logger.info("METAL_TOP Selected is %s" % [METAL_TOP]) + +# METAL_LEVEL +if $metal_level + METAL_LEVEL = $metal_level +else + METAL_LEVEL = "5LM" +end # METAL_LEVEL + +logger.info("METAL_STACK Selected is %s" % [METAL_LEVEL]) + +# FEOL +if $feol == "false" + FEOL = $feol + logger.info("FEOL is disabled.") +else + FEOL = "true" + logger.info("FEOL is enabled.") +end # FEOL + +# BEOL +if $beol == "false" + BEOL = $beol + logger.info("BEOL is disabled.") +else + BEOL = "true" + logger.info("BEOL is enabled.") +end # BEOL + +#================================================ +#------------- LAYERS DEFINITIONS --------------- +#================================================ +polygons_count = 0 +logger.info("Read in polygons from layers.") + +def get_polygons(l, d) + ps = polygons(l, d) + return $run_mode == "deep" ? ps : ps.merged +end + +comp = get_polygons(22 , 0 ) +count = comp.count() +logger.info("comp has %d polygons" % [count]) +polygons_count += count + +dnwell = get_polygons(12 , 0 ) +count = dnwell.count() +logger.info("dnwell has %d polygons" % [count]) +polygons_count += count + +nwell = get_polygons(21 , 0 ) +count = nwell.count() +logger.info("nwell has %d polygons" % [count]) +polygons_count += count + +lvpwell = get_polygons(204, 0 ) +count = lvpwell.count() +logger.info("lvpwell has %d polygons" % [count]) +polygons_count += count + +dualgate = get_polygons(55 , 0 ) +count = dualgate.count() +logger.info("dualgate has %d polygons" % [count]) +polygons_count += count + +poly2 = get_polygons(30 , 0 ) +count = poly2.count() +logger.info("poly2 has %d polygons" % [count]) +polygons_count += count + +nplus = get_polygons(32 , 0 ) +count = nplus.count() +logger.info("nplus has %d polygons" % [count]) +polygons_count += count + +pplus = get_polygons(31 , 0 ) +count = pplus.count() +logger.info("pplus has %d polygons" % [count]) +polygons_count += count + +sab = get_polygons(49 , 0 ) +count = sab.count() +logger.info("sab has %d polygons" % [count]) +polygons_count += count + +esd = get_polygons(24 , 0 ) +count = esd.count() +logger.info("esd has %d polygons" % [count]) +polygons_count += count + +resistor = get_polygons(62 , 0 ) +count = resistor.count() +logger.info("resistor has %d polygons" % [count]) +polygons_count += count + +fhres = get_polygons(227, 0 ) +count = fhres.count() +logger.info("fhres has %d polygons" % [count]) +polygons_count += count + +fusetop = get_polygons(75 , 0 ) +count = fusetop.count() +logger.info("fusetop has %d polygons" % [count]) +polygons_count += count + +fusewindow_d = get_polygons(96 , 1 ) +count = fusewindow_d.count() +logger.info("fusewindow_d has %d polygons" % [count]) +polygons_count += count + +polyfuse = get_polygons(220, 0 ) +count = polyfuse.count() +logger.info("polyfuse has %d polygons" % [count]) +polygons_count += count + +mvsd = get_polygons(210, 0 ) +count = mvsd.count() +logger.info("mvsd has %d polygons" % [count]) +polygons_count += count + +mvpsd = get_polygons(11 , 39) +count = mvpsd.count() +logger.info("mvpsd has %d polygons" % [count]) +polygons_count += count + +nat = get_polygons(5 , 0 ) +count = nat.count() +logger.info("nat has %d polygons" % [count]) +polygons_count += count + +comp_dummy = get_polygons(22 , 4 ) +count = comp_dummy.count() +logger.info("comp_dummy has %d polygons" % [count]) +polygons_count += count + +poly2_dummy = get_polygons(30 , 4 ) +count = poly2_dummy.count() +logger.info("poly2_dummy has %d polygons" % [count]) +polygons_count += count + +schottky_diode = get_polygons(241, 0 ) +count = schottky_diode.count() +logger.info("schottky_diode has %d polygons" % [count]) +polygons_count += count + +zener = get_polygons(178, 0 ) +count = zener.count() +logger.info("zener has %d polygons" % [count]) +polygons_count += count + +res_mk = get_polygons(110, 5 ) +count = res_mk.count() +logger.info("res_mk has %d polygons" % [count]) +polygons_count += count + +opc_drc = get_polygons(124, 5 ) +count = opc_drc.count() +logger.info("opc_drc has %d polygons" % [count]) +polygons_count += count + +ndmy = get_polygons(111, 5 ) +count = ndmy.count() +logger.info("ndmy has %d polygons" % [count]) +polygons_count += count + +pmndmy = get_polygons(152, 5 ) +count = pmndmy.count() +logger.info("pmndmy has %d polygons" % [count]) +polygons_count += count + +v5_xtor = get_polygons(112, 1 ) +count = v5_xtor.count() +logger.info("v5_xtor has %d polygons" % [count]) +polygons_count += count + +cap_mk = get_polygons(117, 5 ) +count = cap_mk.count() +logger.info("cap_mk has %d polygons" % [count]) +polygons_count += count + +mos_cap_mk = get_polygons(166, 5 ) +count = mos_cap_mk.count() +logger.info("mos_cap_mk has %d polygons" % [count]) +polygons_count += count + +ind_mk = get_polygons(151, 5 ) +count = ind_mk.count() +logger.info("ind_mk has %d polygons" % [count]) +polygons_count += count + +diode_mk = get_polygons(115, 5 ) +count = diode_mk.count() +logger.info("diode_mk has %d polygons" % [count]) +polygons_count += count + +drc_bjt = get_polygons(127, 5 ) +count = drc_bjt.count() +logger.info("drc_bjt has %d polygons" % [count]) +polygons_count += count + +lvs_bjt = get_polygons(118, 5 ) +count = lvs_bjt.count() +logger.info("lvs_bjt has %d polygons" % [count]) +polygons_count += count + +mim_l_mk = get_polygons(117, 10) +count = mim_l_mk.count() +logger.info("mim_l_mk has %d polygons" % [count]) +polygons_count += count + +latchup_mk = get_polygons(137, 5 ) +count = latchup_mk.count() +logger.info("latchup_mk has %d polygons" % [count]) +polygons_count += count + +guard_ring_mk = get_polygons(167, 5 ) +count = guard_ring_mk.count() +logger.info("guard_ring_mk has %d polygons" % [count]) +polygons_count += count + +otp_mk = get_polygons(173, 5 ) +count = otp_mk.count() +logger.info("otp_mk has %d polygons" % [count]) +polygons_count += count + +mtpmark = get_polygons(122, 5 ) +count = mtpmark.count() +logger.info("mtpmark has %d polygons" % [count]) +polygons_count += count + +neo_ee_mk = get_polygons(88 , 17) +count = neo_ee_mk.count() +logger.info("neo_ee_mk has %d polygons" % [count]) +polygons_count += count + +sramcore = get_polygons(108, 5 ) +count = sramcore.count() +logger.info("sramcore has %d polygons" % [count]) +polygons_count += count + +lvs_rf = get_polygons(100, 5 ) +count = lvs_rf.count() +logger.info("lvs_rf has %d polygons" % [count]) +polygons_count += count + +lvs_drain = get_polygons(100, 7 ) +count = lvs_drain.count() +logger.info("lvs_drain has %d polygons" % [count]) +polygons_count += count + +ind_mk = get_polygons(151, 5 ) +count = ind_mk.count() +logger.info("ind_mk has %d polygons" % [count]) +polygons_count += count + +hvpolyrs = get_polygons(123, 5 ) +count = hvpolyrs.count() +logger.info("hvpolyrs has %d polygons" % [count]) +polygons_count += count + +lvs_io = get_polygons(119, 5 ) +count = lvs_io.count() +logger.info("lvs_io has %d polygons" % [count]) +polygons_count += count + +probe_mk = get_polygons(13 , 17) +count = probe_mk.count() +logger.info("probe_mk has %d polygons" % [count]) +polygons_count += count + +esd_mk = get_polygons(24 , 5 ) +count = esd_mk.count() +logger.info("esd_mk has %d polygons" % [count]) +polygons_count += count + +lvs_source = get_polygons(100, 8 ) +count = lvs_source.count() +logger.info("lvs_source has %d polygons" % [count]) +polygons_count += count + +well_diode_mk = get_polygons(153, 51) +count = well_diode_mk.count() +logger.info("well_diode_mk has %d polygons" % [count]) +polygons_count += count + +ldmos_xtor = get_polygons(226, 0 ) +count = ldmos_xtor.count() +logger.info("ldmos_xtor has %d polygons" % [count]) +polygons_count += count + +plfuse = get_polygons(125, 5 ) +count = plfuse.count() +logger.info("plfuse has %d polygons" % [count]) +polygons_count += count + +efuse_mk = get_polygons(80 , 5 ) +count = efuse_mk.count() +logger.info("efuse_mk has %d polygons" % [count]) +polygons_count += count + +mcell_feol_mk = get_polygons(11 , 17) +count = mcell_feol_mk.count() +logger.info("mcell_feol_mk has %d polygons" % [count]) +polygons_count += count + +ymtp_mk = get_polygons(86 , 17) +count = ymtp_mk.count() +logger.info("ymtp_mk has %d polygons" % [count]) +polygons_count += count + +dev_wf_mk = get_polygons(128, 17) +count = dev_wf_mk.count() +logger.info("dev_wf_mk has %d polygons" % [count]) +polygons_count += count + +comp_label = get_polygons(22 , 10) +count = comp_label.count() +logger.info("comp_label has %d polygons" % [count]) +polygons_count += count + +poly2_label = get_polygons(30 , 10) +count = poly2_label.count() +logger.info("poly2_label has %d polygons" % [count]) +polygons_count += count + +mdiode = get_polygons(116, 5 ) +count = mdiode.count() +logger.info("mdiode has %d polygons" % [count]) +polygons_count += count + +contact = get_polygons(33 , 0 ) +count = contact.count() +logger.info("contact has %d polygons" % [count]) +polygons_count += count + +metal1_drawn = get_polygons(34 , 0 ) +count = metal1_drawn.count() +logger.info("metal1_drawn has %d polygons" % [count]) +polygons_count += count + +metal1_dummy = get_polygons(34 , 4 ) +count = metal1_dummy.count() +logger.info("metal1_dummy has %d polygons" % [count]) +polygons_count += count + +metal1 = metal1_drawn + metal1_dummy + +metal1_label = get_polygons(34 , 10) +count = metal1_label.count() +logger.info("metal1_label has %d polygons" % [count]) +polygons_count += count + +metal1_slot = get_polygons(34 , 3 ) +count = metal1_slot.count() +logger.info("metal1_slot has %d polygons" % [count]) +polygons_count += count + +metal1_blk = get_polygons(34 , 5 ) +count = metal1_blk.count() +logger.info("metal1_blk has %d polygons" % [count]) +polygons_count += count + +via1 = get_polygons(35 , 0 ) +count = via1.count() +logger.info("via1 has %d polygons" % [count]) +polygons_count += count + + +if METAL_LEVEL == "2LM" + metal2_drawn = get_polygons(36 , 0 ) + count = metal2_drawn.count() + logger.info("metal2_drawn has %d polygons" % [count]) + polygons_count += count + + metal2_dummy = get_polygons(36 , 4 ) + count = metal2_dummy.count() + logger.info("metal2_dummy has %d polygons" % [count]) + polygons_count += count + + metal2 = metal2_drawn + metal2_dummy + + metal2_label = get_polygons(36 , 10) + count = metal2_label.count() + logger.info("metal2_label has %d polygons" % [count]) + polygons_count += count + + metal2_slot = get_polygons(36 , 3 ) + count = metal2_slot.count() + logger.info("metal2_slot has %d polygons" % [count]) + polygons_count += count + + metal2_blk = get_polygons(36 , 5 ) + count = metal2_blk.count() + logger.info("metal2_blk has %d polygons" % [count]) + polygons_count += count + + top_via = via1 + topmin1_via = contact + top_metal = metal2 + topmin1_metal = metal1 + +else + metal2_drawn = get_polygons(36 , 0 ) + count = metal2_drawn.count() + logger.info("metal2_drawn has %d polygons" % [count]) + polygons_count += count + + metal2_dummy = get_polygons(36 , 4 ) + count = metal2_dummy.count() + logger.info("metal2_dummy has %d polygons" % [count]) + polygons_count += count + + metal2 = metal2_drawn + metal2_dummy + + metal2_label = get_polygons(36 , 10) + count = metal2_label.count() + logger.info("metal2_label has %d polygons" % [count]) + polygons_count += count + + metal2_slot = get_polygons(36 , 3 ) + count = metal2_slot.count() + logger.info("metal2_slot has %d polygons" % [count]) + polygons_count += count + + metal2_blk = get_polygons(36 , 5 ) + count = metal2_blk.count() + logger.info("metal2_blk has %d polygons" % [count]) + polygons_count += count + + via2 = get_polygons(38 , 0 ) + count = via2.count() + logger.info("via2 has %d polygons" % [count]) + polygons_count += count + + if METAL_LEVEL == "3LM" + metal3_drawn = get_polygons(42 , 0 ) + count = metal3_drawn.count() + logger.info("metal3_drawn has %d polygons" % [count]) + polygons_count += count + + metal3_dummy = get_polygons(42 , 4 ) + count = metal3_dummy.count() + logger.info("metal3_dummy has %d polygons" % [count]) + polygons_count += count + + metal3 = metal3_drawn + metal3_dummy + + metal3_label = get_polygons(42 , 10) + count = metal3_label.count() + logger.info("metal3_label has %d polygons" % [count]) + polygons_count += count + + metal3_slot = get_polygons(42 , 3 ) + count = metal3_slot.count() + logger.info("metal3_slot has %d polygons" % [count]) + polygons_count += count + + metal3_blk = get_polygons(42 , 5 ) + count = metal3_blk.count() + logger.info("metal3_blk has %d polygons" % [count]) + polygons_count += count + + top_via = via2 + topmin1_via = via1 + top_metal = metal3 + topmin1_metal = metal2 + else + metal3_drawn = get_polygons(42 , 0 ) + count = metal3_drawn.count() + logger.info("metal3_drawn has %d polygons" % [count]) + polygons_count += count + + metal3_dummy = get_polygons(42 , 4 ) + count = metal3_dummy.count() + logger.info("metal3_dummy has %d polygons" % [count]) + polygons_count += count + + metal3 = metal3_drawn + metal3_dummy + + metal3_label = get_polygons(42 , 10) + count = metal3_label.count() + logger.info("metal3_label has %d polygons" % [count]) + polygons_count += count + + metal3_slot = get_polygons(42 , 3 ) + count = metal3_slot.count() + logger.info("metal3_slot has %d polygons" % [count]) + polygons_count += count + + metal3_blk = get_polygons(42 , 5 ) + count = metal3_blk.count() + logger.info("metal3_blk has %d polygons" % [count]) + polygons_count += count + + via3 = get_polygons(40 , 0 ) + + if METAL_LEVEL == "4LM" + metal4_drawn = get_polygons(46 , 0 ) + count = metal4_drawn.count() + logger.info("metal4_drawn has %d polygons" % [count]) + polygons_count += count + + metal4_dummy = get_polygons(46 , 4 ) + count = metal4_dummy.count() + logger.info("metal4_dummy has %d polygons" % [count]) + polygons_count += count + + metal4 = metal4_drawn + metal4_dummy + + metal4_label = get_polygons(46 , 10) + count = metal4_label.count() + logger.info("metal4_label has %d polygons" % [count]) + polygons_count += count + + metal4_slot = get_polygons(46 , 3 ) + count = metal4_slot.count() + logger.info("metal4_slot has %d polygons" % [count]) + polygons_count += count + + metal4_blk = get_polygons(46 , 5 ) + count = metal4_blk.count() + logger.info("metal4_blk has %d polygons" % [count]) + polygons_count += count + + top_via = via3 + topmin1_via = via2 + top_metal = metal4 + topmin1_metal = metal3 + else + metal4_drawn = get_polygons(46 , 0 ) + count = metal4_drawn.count() + logger.info("metal4_drawn has %d polygons" % [count]) + polygons_count += count + + metal4_dummy = get_polygons(46 , 4 ) + count = metal4_dummy.count() + logger.info("metal4_dummy has %d polygons" % [count]) + polygons_count += count + + metal4 = metal4_drawn + metal4_dummy + + metal4_label = get_polygons(46 , 10) + count = metal4_label.count() + logger.info("metal4_label has %d polygons" % [count]) + polygons_count += count + + metal4_slot = get_polygons(46 , 3 ) + count = metal4_slot.count() + logger.info("metal4_slot has %d polygons" % [count]) + polygons_count += count + + metal4_blk = get_polygons(46 , 5 ) + count = metal4_blk.count() + logger.info("metal4_blk has %d polygons" % [count]) + polygons_count += count + + via4 = get_polygons(41 , 0 ) + count = via4.count() + logger.info("via4 has %d polygons" % [count]) + polygons_count += count + + if METAL_LEVEL == "5LM" + metal5_drawn = get_polygons(81 , 0 ) + count = metal5_drawn.count() + logger.info("metal5_drawn has %d polygons" % [count]) + polygons_count += count + + metal5_dummy = get_polygons(81 , 4 ) + count = metal5_dummy.count() + logger.info("metal5_dummy has %d polygons" % [count]) + polygons_count += count + + metal5 = metal5_drawn + metal5_dummy + + metal5_label = get_polygons(81 , 10) + count = metal5_label.count() + logger.info("metal5_label has %d polygons" % [count]) + polygons_count += count + + metal5_slot = get_polygons(81 , 3 ) + count = metal5_slot.count() + logger.info("metal5_slot has %d polygons" % [count]) + polygons_count += count + + metal5_blk = get_polygons(81 , 5 ) + count = metal5_blk.count() + logger.info("metal5_blk has %d polygons" % [count]) + polygons_count += count + + top_via = via4 + topmin1_via = via3 + top_metal = metal5 + topmin1_metal = metal4 + elsif METAL_LEVEL == "6LM" + metal5_drawn = get_polygons(81 , 0 ) + count = metal5_drawn.count() + logger.info("metal5_drawn has %d polygons" % [count]) + polygons_count += count + + metal5_dummy = get_polygons(81 , 4 ) + count = metal5_dummy.count() + logger.info("metal5_dummy has %d polygons" % [count]) + polygons_count += count + + metal5 = metal5_drawn + metal5_dummy + + metal5_label = get_polygons(81 , 10) + count = metal5_label.count() + logger.info("metal5_label has %d polygons" % [count]) + polygons_count += count + + metal5_slot = get_polygons(81 , 3 ) + count = metal5_slot.count() + logger.info("metal5_slot has %d polygons" % [count]) + polygons_count += count + + metal5_blk = get_polygons(81 , 5 ) + count = metal5_blk.count() + logger.info("metal5_blk has %d polygons" % [count]) + polygons_count += count + + via5 = get_polygons(82 , 0 ) + count = via5.count() + logger.info("via5 has %d polygons" % [count]) + polygons_count += count + + + metaltop_drawn = get_polygons(53 , 0 ) + count = metaltop_drawn.count() + logger.info("metaltop_drawn has %d polygons" % [count]) + polygons_count += count + + metaltop_dummy = get_polygons(53 , 4 ) + count = metaltop_dummy.count() + logger.info("metaltop_dummy has %d polygons" % [count]) + polygons_count += count + + metaltop = metaltop_drawn + metaltop_dummy + + metaltop_label = get_polygons(53 , 10) + count = metaltop_label.count() + logger.info("metaltop_label has %d polygons" % [count]) + polygons_count += count + + metaltop_slot = get_polygons(53 , 3 ) + count = metaltop_slot.count() + logger.info("metaltop_slot has %d polygons" % [count]) + polygons_count += count + + metalt_blk = get_polygons(53 , 5 ) + count = metalt_blk.count() + logger.info("metalt_blk has %d polygons" % [count]) + polygons_count += count + + top_via = via5 + topmin1_via = via4 + top_metal = metaltop + topmin1_metal = metal5 + else + logger.error("Unknown metal stack %s" % [METAL_LEVEL]) + raise + end + end + end +end + +pad = get_polygons(37 , 0 ) +count = pad.count() +logger.info("pad has %d polygons" % [count]) +polygons_count += count + +ubmpperi = get_polygons(183, 0 ) +count = ubmpperi.count() +logger.info("ubmpperi has %d polygons" % [count]) +polygons_count += count + +ubmparray = get_polygons(184, 0 ) +count = ubmparray.count() +logger.info("ubmparray has %d polygons" % [count]) +polygons_count += count + +ubmeplate = get_polygons(185, 0 ) +count = ubmeplate.count() +logger.info("ubmeplate has %d polygons" % [count]) +polygons_count += count + +metal1_res = get_polygons(110, 11) +count = metal1_res.count() +logger.info("metal1_res has %d polygons" % [count]) +polygons_count += count + +metal2_res = get_polygons(110, 12) +count = metal2_res.count() +logger.info("metal2_res has %d polygons" % [count]) +polygons_count += count + +metal3_res = get_polygons(110, 13) +count = metal3_res.count() +logger.info("metal3_res has %d polygons" % [count]) +polygons_count += count + +metal4_res = get_polygons(110, 14) +count = metal4_res.count() +logger.info("metal4_res has %d polygons" % [count]) +polygons_count += count + +metal5_res = get_polygons(110, 15) +count = metal5_res.count() +logger.info("metal5_res has %d polygons" % [count]) +polygons_count += count + +metal6_res = get_polygons(110, 16) +count = metal6_res.count() +logger.info("metal6_res has %d polygons" % [count]) +polygons_count += count + +pr_bndry = get_polygons(0 , 0 ) +count = pr_bndry.count() +logger.info("pr_bndry has %d polygons" % [count]) +polygons_count += count + +border = get_polygons(63 , 0 ) +count = border.count() +logger.info("border has %d polygons" % [count]) +polygons_count += count +logger.info("Total no. of polygons in the design is #{polygons_count}") + +logger.info("Starting deriving base layers.") + +#===================================================== +#------------- BASE LAYERS DERIVATIONS --------------- +#===================================================== + +dnwell_n = dnwell.not(lvpwell) +dnwell_p = dnwell.and(lvpwell) + +all_nwell = dnwell_n.join(nwell) + +ncomp = comp.and(nplus) +pcomp = comp.and(pplus) +tgate = poly2.and(comp).not(res_mk) + +nactive = ncomp.not(all_nwell) +ngate = nactive.and(tgate) +nsd = nactive.interacting(ngate).not(ngate).not(res_mk) +ptap = pcomp.not(all_nwell).not(res_mk) + +pactive = pcomp.and(all_nwell) +pgate = pactive.and(tgate) +psd = pactive.interacting(pgate).not(pgate).not(res_mk) +ntap = ncomp.and(all_nwell).not(res_mk) + +ngate_dn = ngate.and(dnwell_p) +ptap_dn = ptap.and(dnwell_p).outside(well_diode_mk) + +pgate_dn = pgate.and(dnwell_n) +ntap_dn = ntap.and(dnwell_n) + +psd_dn = pcomp.and(dnwell_n).interacting(pgate_dn).not(pgate_dn).not(res_mk) +nsd_dn = ncomp.and(dnwell_p).interacting(ngate_dn).not(ngate_dn).not(res_mk) + +natcomp = nat.and(comp) + +# Gate +nom_gate = tgate.not(dualgate) +thick_gate = tgate.and(dualgate) + +ngate_56V = ngate.and(dualgate) +pgate_56V = pgate.and(dualgate) + +ngate_5V = ngate_56V.and(v5_xtor) +pgate_5V = pgate_56V.and(v5_xtor) + +ngate_6V = ngate_56V.not(v5_xtor) +pgate_6V = pgate_56V.not(v5_xtor) + +# DNWELL +dnwell_3p3v = dnwell.not_interacting(v5_xtor).not_interacting(dualgate) +dnwell_56v = dnwell.overlapping(dualgate) + +# LVPWELL +lvpwell_dn = lvpwell.interacting(dnwell) +lvpwell_out = lvpwell.not_interacting(dnwell) + +lvpwell_dn3p3v = lvpwell.and(dnwell_3p3v) +lvpwell_dn56v = lvpwell.and(dnwell_56v) + +# NWELL +nwell_dn = nwell.interacting(dnwell) +nwell_n_dn = nwell.not_interacting(dnwell) + +#================================================ +#------------- LAYERS CONNECTIONS --------------- +#================================================ + +if CONNECTIVITY_RULES + + logger.info("Construct connectivity for the design.") + + connect(dnwell, ncomp) + connect(ncomp, contact) + connect(pcomp, contact) + + connect(lvpwell_out, pcomp) + connect(lvpwell_dn, pcomp) + + connect(nwell, ncomp) + connect(natcomp, contact) + connect(mvsd, ncomp) + connect(mvpsd, pcomp) + connect(contact, metal1) + connect(metal1, via1) + connect(via1, metal2) + if METAL_LEVEL != "2LM" + connect(metal2, via2) + connect(via2, metal3) + + if METAL_LEVEL != "3LM" + connect(metal3, via3) + connect(via3, metal4) + + if METAL_LEVEL != "4LM" + connect(metal4, via4) + connect(via4, metal5) + + if METAL_LEVEL != "5LM" + connect(metal5, via5) + connect(via5, metaltop) + end + end + end + end + +end #CONNECTIVITY_RULES + +#================================================ +#------------ PRE-DEFINED FUNCTIONS ------------- +#================================================ + +def conn_space(layer,conn_val,not_conn_val, mode) + if conn_val > not_conn_val + raise "ERROR : Wrong connectivity implementation" + end + connected_output = layer.space(conn_val.um, mode).polygons(0.001) + unconnected_errors_unfiltered = layer.space(not_conn_val.um, mode) + singularity_errors = layer.space(0.001.um) + # Filter out the errors arising from the same net + unconnected_errors = DRC::DRCLayer::new(self, RBA::EdgePairs::new) + unconnected_errors_unfiltered.data.each do |ep| + net1 = l2n_data.probe_net(layer.data, ep.first.p1) + net2 = l2n_data.probe_net(layer.data, ep.second.p1) + if !net1 || !net2 + logger.error("Connectivity check encountered 2 nets that doesn't exist. Potential issue in klayout...") + elsif net1.circuit != net2.circuit || net1.cluster_id != net2.cluster_id + # unconnected + unconnected_errors.data.insert(ep) + end + end + unconnected_output = unconnected_errors.polygons.or(singularity_errors.polygons(0.001)) + return connected_output, unconnected_output +end + +def conn_separation(layer1, layer2, conn_val,not_conn_val, mode) + if conn_val > not_conn_val + raise "ERROR : Wrong connectivity implementation" + end + connected_output = layer1.separation(layer2, conn_val.um, mode).polygons(0.001) + unconnected_errors_unfiltered = layer1.separation(layer2, not_conn_val.um, mode) + # Filter out the errors arising from the same net + unconnected_errors = DRC::DRCLayer::new(self, RBA::EdgePairs::new) + unconnected_errors_unfiltered.data.each do |ep| + net1 = l2n_data.probe_net(layer1.data, ep.first.p1) + net2 = l2n_data.probe_net(layer2.data, ep.second.p1) + if !net1 || !net2 + logger.error("Connectivity check encountered 2 nets that doesn't exist. Potential issue in klayout...") + elsif net1.circuit != net2.circuit || net1.cluster_id != net2.cluster_id + # unconnected + unconnected_errors.data.insert(ep) + end + end + unconnected_output = unconnected_errors.polygons(0.001) + return connected_output, unconnected_output +end + +# === IMPLICIT EXTRACTION === +if CONNECTIVITY_RULES + logger.info("Connectivity rules enabled, Netlist object will be generated.") + netlist +end #CONNECTIVITY_RULES + +# === LAYOUT EXTENT === +CHIP = extent.sized(0.0) +logger.info("Total area of the design is #{CHIP.area()} um^2.") + +#================================================ +#----------------- MAIN RUNSET ------------------ +#================================================ + +logger.info("Starting GF180MCU DRC rules.") +if FEOL + logger.info("Running all FEOL rules") +end #FEOL + +if BEOL + logger.info("Running all BEOL rules") +end #BEOL + + diff --git a/klayout/drc/rule_decks/tail.drc b/klayout/drc/rule_decks/tail.drc new file mode 100644 index 00000000..b4ef0bca --- /dev/null +++ b/klayout/drc/rule_decks/tail.drc @@ -0,0 +1,23 @@ +################################################################################################ +# Copyright 2022 GlobalFoundries PDK Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################################ + + + +exec_end_time = Time.now +run_time = exec_end_time - exec_start_time +logger.info("%s DRC Total Run time %f seconds" % [table_name, run_time]) + + diff --git a/klayout/drc/run_drc.py b/klayout/drc/run_drc.py new file mode 100644 index 00000000..68372859 --- /dev/null +++ b/klayout/drc/run_drc.py @@ -0,0 +1,663 @@ +# Copyright 2022 GlobalFoundries PDK Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Run GlobalFoundries 180nm MCU DRC. + +Usage: + run_drc.py (--help| -h) + run_drc.py (--path=) (--variant=) [--verbose] [--table=]... [--mp=] [--run_dir=] [--topcell=] [--thr=] [--run_mode=] [--no_feol] [--no_beol] [--connectivity] [--density] [--density_only] [--antenna] [--antenna_only] [--no_offgrid] + +Options: + --help -h Print this help message. + --path= The input GDS file path. + --variant= Select combined options of metal_top, mim_option, and metal_level. Allowed values (A, B, C). + variant=A: Select metal_top=30K mim_option=A metal_level=3LM + variant=B: Select metal_top=11K mim_option=B metal_level=4LM + variant=C: Select metal_top=9K mim_option=B metal_level=5LM + --topcell= Topcell name to use. + --table= Table name to use to run the rule deck. + --mp= Run the rule deck in parts in parallel to speed up the run. [default: 1] + --run_dir= Run directory to save all the results [default: pwd] + --thr= The number of threads used in run. + --run_mode= Select klayout mode Allowed modes (flat , deep, tiling). [default: flat] + --no_feol Turn off FEOL rules from running. + --no_beol Turn off BEOL rules from running. + --connectivity Turn on connectivity rules. + --density Turn on Density rules. + --density_only Turn on Density rules only. + --antenna Turn on Antenna checks. + --antenna_only Turn on Antenna checks only. + --no_offgrid Turn off OFFGRID checking rules. + --verbose Detailed rule execution log for debugging. +""" + +from docopt import docopt +import os +import xml.etree.ElementTree as ET +import logging +import klayout.db +import glob +from datetime import datetime +from subprocess import check_call +import shutil +import concurrent.futures +import traceback + + +def get_rules_with_violations(results_database): + """ + This function will find all the rules that has violated in a database. + + Parameters + ---------- + results_database : string or Path object + Path string to the results file + + Returns + ------- + set + A set that contains all rules in the database with violations + """ + + mytree = ET.parse(results_database) + myroot = mytree.getroot() + + all_violating_rules = set() + + for z in myroot[7]: # myroot[7] : List rules with viloations + all_violating_rules.add(f"{z[1].text}".replace("'", "")) + + return all_violating_rules + + +def check_drc_results(results_db_files: list): + """ + check_drc_results Checks the results db generated from run and report at the end if the DRC run failed or passed. + This function will exit with 1 if there are violations. + + Parameters + ---------- + results_db_files : list + A list of strings that represent paths to results databases of all the DRC runs. + """ + + if len(results_db_files) < 1: + logging.error("Klayout did not generate any rdb results. Please check run logs") + exit(1) + + full_violating_rules = set() + + for f in results_db_files: + violating_rules = get_rules_with_violations(f) + full_violating_rules.update(violating_rules) + + if len(full_violating_rules) > 0: + logging.error("Klayout DRC run is not clean.") + logging.error(f"Violated rules are : {str(full_violating_rules)}\n") + exit(1) + else: + logging.info("Klayout DRC run is clean. GDS has no DRC violations.") + + +def generate_drc_run_template(drc_dir: str, run_dir: str, run_tables_list: list = []): + """ + generate_drc_run_template will generate the template file to run drc in the run_dir path. + + Parameters + ---------- + drc_dir : str + Path string to the location where the DRC files would be found to get the list of the rule tables. + run_dir : str + Absolute path string to the run location where all the run output will be generated. + deck_name : str, optional + Name of the rule deck to use for generating the template, by default "" + run_tables_list : list, optional + list of target parts of the rule deck, if empty assume all of the rule tables found, by default [] + + Returns + ------- + str + Absolute path to the generated DRC file. + """ + if len(run_tables_list) < 1: + all_tables = [ + os.path.basename(f) + for f in glob.glob(os.path.join(drc_dir, "rule_decks", "*.drc")) + if "antenna" not in f + and "density" not in f + and "main" not in f + and "tail" not in f + ] + deck_name = "main" + elif len(run_tables_list) == 1: + deck_name = run_tables_list[0] + all_tables = ["{}.drc".format(run_tables_list[0])] + else: + all_tables = ["{}.drc".format(t) for t in run_tables_list] + deck_name = "main" + + logging.info( + f"## Generating template with for the following rule tables: {all_tables}" + ) + logging.info(f"## Your run dir located at: {run_dir}") + + all_tables.insert(0, "main.drc") + all_tables.append("tail.drc") + + gen_rule_deck_path = os.path.join(run_dir, "{}.drc".format(deck_name)) + with open(gen_rule_deck_path, "wb") as wfd: + for f in all_tables: + with open(os.path.join(drc_dir, "rule_decks", f), "rb") as fd: + shutil.copyfileobj(fd, wfd) + + return gen_rule_deck_path + + +def get_top_cell_names(gds_path): + """ + get_top_cell_names get the top cell names from the GDS file. + + Parameters + ---------- + gds_path : string + Path to the target GDS file. + + Returns + ------- + List of string + Names of the top cell in the layout. + """ + layout = klayout.db.Layout() + layout.read(gds_path) + top_cells = [t.name for t in layout.top_cells()] + + return top_cells + + +def get_list_of_tables(drc_dir: str): + """ + get_list_of_tables get the list of available tables in the drc + + Parameters + ---------- + drc_dir : str + Path to the DRC folder to get the list of tables from. + """ + return [ + os.path.basename(f).replace(".drc", "") + for f in glob.glob(os.path.join(drc_dir, "rule_decks", "*.drc")) + if all(t not in f for t in ("antenna", "density", "main", "tail")) + ] + + +def get_run_top_cell_name(arguments, layout_path): + """ + get_run_top_cell_name Get the top cell name to use for running. If it's provided by the user, we use the user input. + If not, we get it from the GDS file. + + Parameters + ---------- + arguments : dict + Dictionary that holds the user inputs for the script generated by docopt. + layout_path : string + Path to the target layout. + + Returns + ------- + string + Name of the topcell to use in run. + + """ + + if arguments["--topcell"]: + topcell = arguments["--topcell"] + else: + layout_topcells = get_top_cell_names(layout_path) + if len(layout_topcells) > 1: + logging.error( + "## Layout has multiple topcells. Please use --topcell to determine which topcell you want to run on." + ) + exit(1) + else: + topcell = layout_topcells[0] + + return topcell + + +def generate_klayout_switches(arguments, layout_path): + """ + parse_switches Function that parse all the args from input to prepare switches for DRC run. + + Parameters + ---------- + arguments : dict + Dictionary that holds the arguments used by user in the run command. This is generated by docopt library. + layout_path : string + Path to the layout file that we will run DRC on. + + Returns + ------- + dict + Dictionary that represent all run switches passed to klayout. + """ + switches = dict() + + # No. of threads + thrCount = 2 if arguments["--thr"] is None else int(arguments["--thr"]) + switches["thr"] = str(int(thrCount)) + + if arguments["--run_mode"] in ["flat", "deep", "tiling"]: + switches["run_mode"] = arguments["--run_mode"] + else: + logging.error("Allowed klayout modes are (flat , deep , tiling) only") + exit() + + if arguments["--variant"] == "A": + switches["metal_top"] = "30K" + switches["mim_option"] = "A" + switches["metal_level"] = "3LM" + elif arguments["--variant"] == "B": + switches["metal_top"] = "11K" + switches["mim_option"] = "B" + switches["metal_level"] = "4LM" + elif arguments["--variant"] == "C": + switches["metal_top"] = "9K" + switches["mim_option"] = "B" + switches["metal_level"] = "5LM" + else: + logging.error("variant switch allowed values are (A , B, C) only") + exit(1) + + if arguments["--verbose"]: + switches["verbose"] = "true" + else: + switches["verbose"] = "false" + + if arguments["--no_feol"]: + switches["feol"] = "false" + else: + switches["feol"] = "true" + + if arguments["--no_beol"]: + switches["beol"] = "false" + else: + switches["beol"] = "true" + + if arguments["--no_offgrid"]: + switches["offgrid"] = "false" + else: + switches["offgrid"] = "true" + + if arguments["--connectivity"]: + switches["conn_drc"] = "true" + else: + switches["conn_drc"] = "false" + + if arguments["--density"]: + switches["density"] = "true" + else: + switches["density"] = "false" + + switches["topcell"] = get_run_top_cell_name(arguments, layout_path) + switches["input"] = layout_path + + return switches + + +def check_klayout_version(): + """ + check_klayout_version checks klayout version and makes sure it would work with the DRC. + """ + # ======= Checking Klayout version ======= + klayout_v_ = os.popen("klayout -b -v").read() + klayout_v_ = klayout_v_.split("\n")[0] + klayout_v_list = [] + + if klayout_v_ == "": + logging.error( + f"Klayout is not found. Please make sure klayout is installed. Current version: {klayout_v_}" + ) + exit(1) + else: + klayout_v_list = [int(v) for v in klayout_v_.split(" ")[-1].split(".")] + + if len(klayout_v_list) < 1 or len(klayout_v_list) > 3: + logging.error( + f"Was not able to get klayout version properly. Current version: {klayout_v_}" + ) + exit(1) + elif len(klayout_v_list) >= 2 and len(klayout_v_list) <= 3: + if klayout_v_list[1] < 28: + logging.error("Prerequisites at a minimum: KLayout 0.28.0") + logging.error( + "Using this klayout version has not been assesed in this development. Limits are unknown" + ) + exit(1) + + logging.info(f"Your Klayout version is: {klayout_v_}") + + +def check_layout_path(layout_path): + """ + check_layout_type checks if the layout provided is GDS or OAS. Otherwise, kill the process. We only support GDS or OAS now. + + Parameters + ---------- + layout_path : string + string that represent the path of the layout. + + Returns + ------- + string + string that represent full absolute layout path. + """ + + if not os.path.isfile(layout_path): + logging.error( + f"## GDS file path {layout_path} provided doesn't exist or not a file." + ) + exit(1) + + if ".gds" not in layout_path and ".oas" not in layout_path: + logging.error( + f"## Layout {layout_path} is not in GDSII or OASIS format. Please use gds format." + ) + exit(1) + + return os.path.abspath(layout_path) + + +def build_switches_string(sws: dict): + """ + build_switches_string Build swtiches string from dictionary. + + Parameters + ---------- + sws : dict + Dictionary that holds the Antenna swithces. + """ + return " ".join(f"-rd {k}={v}" for k, v in sws.items()) + + +def run_check(drc_file: str, drc_name: str, path: str, run_dir: str, sws: dict): + """ + run_antenna_check run DRC check based on DRC file provided. + + Parameters + ---------- + drc_file : str + String that has the file full path to run. + path : str + String that holds the full path of the layout. + run_dir : str + String that holds the full path of the run location. + sws : dict + Dictionary that holds all switches that needs to be passed to the antenna checks. + + Returns + ------- + string + string that represent the path to the results output database for this run. + + """ + + ## Using print because of the multiprocessing + logging.info( + "Running Global Foundries 180nm MCU {} checks on design {} on cell {}:".format( + path, drc_name, sws["topcell"] + ) + ) + + layout_base_name = os.path.basename(path).split(".")[0] + new_sws = sws.copy() + report_path = os.path.join( + run_dir, "{}_{}.lyrdb".format(layout_base_name, drc_name) + ) + + new_sws["report"] = report_path + sws_str = build_switches_string(new_sws) + sws_str += f" -rd table_name={drc_name}" + + run_str = f"klayout -b -r {drc_file} {sws_str}" + check_call(run_str, shell=True) + + return report_path + + +def run_parallel_run( + arguments: dict, + rule_deck_full_path: str, + layout_path: str, + switches: dict, + drc_run_dir: str, +): + """ + run_single_processor run the drc checks as in a multi-processing. + + Parameters + ---------- + arguments : dict + Dictionary that holds the arguments passed to the run_drc script. + rule_deck_full_path : str + String that holds the path of the rule deck files. + layout_path : str + Path to the target layout. + switches : dict + Dictionary that holds all the switches that will be passed to klayout run. + drc_run_dir : str + Path to the run location. + """ + + list_rule_deck_files = dict() + + ## Run Antenna if required. + if arguments["--antenna"]: + drc_path = os.path.join(rule_deck_full_path, "rule_decks", "antenna.drc") + list_rule_deck_files["antenna"] = drc_path + + ## Run Density if required. + if arguments["--density"]: + drc_path = os.path.join(rule_deck_full_path, "rule_decks", "density.drc") + list_rule_deck_files["density"] = drc_path + + if not arguments["--table"]: + list_of_tables = get_list_of_tables(rule_deck_full_path) + else: + list_of_tables = arguments["--table"] + + ## Generate run rule deck from template. + for t in list_of_tables: + drc_file = generate_drc_run_template(rule_deck_full_path, drc_run_dir, [t]) + list_rule_deck_files[t] = drc_file + + ## Run All DRC files. + list_res_db_files = [] + with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor: + future_to_run_name = dict() + for n in list_rule_deck_files: + future_to_run_name[ + executor.submit( + run_check, + list_rule_deck_files[n], + n, + layout_path, + drc_run_dir, + switches, + ) + ] = n + + for future in concurrent.futures.as_completed(future_to_run_name): + run_name = future_to_run_name[future] + try: + list_res_db_files.append(future.result()) + except Exception as exc: + logging.error("%s generated an exception: %s" % (run_name, str(exc))) + traceback.print_exc() + + ## Check run + check_drc_results(list_res_db_files) + + +def run_single_processor( + arguments: dict, + rule_deck_full_path: str, + layout_path: str, + switches: dict, + drc_run_dir: str, +): + """ + run_single_processor run the drc checks as single run. + + Parameters + ---------- + arguments : dict + Dictionary that holds the arguments passed to the run_drc script. + rule_deck_full_path : str + String that holds the path of the rule deck files. + layout_path : str + Path to the target layout. + switches : dict + Dictionary that holds all the switches that will be passed to klayout run. + drc_run_dir : str + Path to the run location. + """ + + list_res_db_files = [] + + ## Run Antenna if required. + if arguments["--antenna"] or arguments["--antenna_only"]: + drc_path = os.path.join(rule_deck_full_path, "rule_decks", "antenna.drc") + list_res_db_files.append( + run_check(drc_path, "antenna", layout_path, drc_run_dir, switches) + ) + + if arguments["--antenna_only"]: + logging.info("## Completed running Antenna checks only.") + exit() + + ## Run Density if required. + if arguments["--density"] or arguments["--density_only"]: + drc_path = os.path.join(rule_deck_full_path, "rule_decks", "density.drc") + list_res_db_files.append( + run_check(drc_path, "density", layout_path, drc_run_dir, switches) + ) + + if arguments["--density_only"]: + logging.info("## Completed running density checks only.") + exit() + + ## Generate run rule deck from template. + if not arguments["--table"]: + drc_file = generate_drc_run_template(rule_deck_full_path, drc_run_dir) + else: + drc_file = generate_drc_run_template( + rule_deck_full_path, drc_run_dir, arguments["--table"] + ) + + ## Run Main DRC + list_res_db_files.append( + run_check(drc_file, "main", layout_path, drc_run_dir, switches) + ) + + ## Check run + check_drc_results(list_res_db_files) + + +def main(drc_run_dir: str, arguments: dict): + """ + main function to run the DRC. + + Parameters + ---------- + drc_run_dir : str + String with absolute path of the full run dir. + arguments : dict + Dictionary that holds the arguments used by user in the run command. This is generated by docopt library. + """ + + # Check gds file existance + if not os.path.exists(arguments["--path"]): + file_path = arguments["--path"] + logging.error( + f"The input GDS file path {file_path} doesn't exist, please recheck." + ) + exit(1) + + rule_deck_full_path = os.path.dirname(os.path.abspath(__file__)) + + ## Check Klayout version + check_klayout_version() + + ## Check if there was a layout provided. + if not arguments["--path"]: + logging.error("No provided gds file, please add one") + exit(1) + + ## Check layout type + layout_path = arguments["--path"] + layout_path = check_layout_path(layout_path) + + ## Get run switches + switches = generate_klayout_switches(arguments, layout_path) + + if ( + int(arguments["--mp"]) == 1 + or arguments["--antenna_only"] + or arguments["--density_only"] + ): + run_single_processor( + arguments, rule_deck_full_path, layout_path, switches, drc_run_dir + ) + else: + run_parallel_run( + arguments, rule_deck_full_path, layout_path, switches, drc_run_dir + ) + + +# ================================================================ +# -------------------------- MAIN -------------------------------- +# ================================================================ + +if __name__ == "__main__": + + # arguments + arguments = docopt(__doc__, version="RUN DRC: 1.0") + + # logs format + now_str = datetime.utcnow().strftime("drc_run_%Y_%m_%d_%H_%M_%S") + + if ( + arguments["--run_dir"] == "pwd" + or arguments["--run_dir"] == "" + or arguments["--run_dir"] is None + ): + drc_run_dir = os.path.join(os.path.abspath(os.getcwd()), now_str) + else: + drc_run_dir = os.path.abspath(arguments["--run_dir"]) + + os.makedirs(drc_run_dir, exist_ok=True) + + logging.basicConfig( + level=logging.DEBUG, + handlers=[ + logging.FileHandler(os.path.join(drc_run_dir, "{}.log".format(now_str))), + logging.StreamHandler(), + ], + format="%(asctime)s | %(levelname)-7s | %(message)s", + datefmt="%d-%b-%Y %H:%M:%S", + ) + + # Calling main function + main(drc_run_dir, arguments) diff --git a/klayout/drc/testing/README.md b/klayout/drc/testing/README.md new file mode 100644 index 00000000..254cfb5e --- /dev/null +++ b/klayout/drc/testing/README.md @@ -0,0 +1,22 @@ +# Globalfoundries 180nm MCU DRC Testing + +Explains how to test GF180nm DRC rule deck. + +## Folder Structure + +```text +📁testing + ┣ 📜README.md (This file to document the regression) + ┣ 📜run_regression.py (Main regression script that runs the regression.) + ┣ 📁testcases (All testcases) + ``` + +## Prerequisites +You need the following set of tools installed to be able to run the regression: +- Python 3.6+ +- KLayout 0.28.0+ + +We have tested this using the following setup: +- Python 3.9.12 +- KLayout 0.28.2 + diff --git a/klayout/drc/testing/run_regression.py b/klayout/drc/testing/run_regression.py new file mode 100644 index 00000000..a3ba601c --- /dev/null +++ b/klayout/drc/testing/run_regression.py @@ -0,0 +1,1018 @@ +# Copyright 2022 GlobalFoundries PDK Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Run GlobalFoundries 180nm MCU DRC Unit Regression. + +Usage: + run_regression.py (--help| -h) + run_regression.py [--mp=] [--run_name=] [--table_name=] + +Options: + --help -h Print this help message. + --mp= The number of threads used in run. + --run_name= Select your run name. + --table_name= Target specific table. +""" + +from subprocess import check_call +from subprocess import Popen, PIPE +import concurrent.futures +import traceback +import yaml +from docopt import docopt +import os +from datetime import datetime +import xml.etree.ElementTree as ET +import time +import pandas as pd +import logging +import glob +from pathlib import Path +from tqdm import tqdm +import re +import gdstk +import errno +import numpy as np +from collections import defaultdict + + +SUPPORTED_TC_EXT = "gds" +SUPPORTED_SW_EXT = "yaml" +RULE_LAY_NUM = 10000 +PATH_WIDTH = 0.01 +RULE_STR_SEP = "--" +ANALYSIS_RULES = [ + "pass_patterns", + "fail_patterns", + "false_negative", + "false_positive", + "not_tested", +] + + +def get_unit_test_coverage(gds_file): + """ + This function is used for getting all test cases available inside a single test table. + Parameters + ---------- + gds_file : str + Path string to the location of unit test cases path. + Returns + ------- + list + A list of unique rules found. + """ + # Get rules from gds + rules = [] + + # layer num of rule text + lay_num = 11 + # layer data type of rule text + lay_dt = 222 + + # Getting all rules names from testcase + library = gdstk.read_gds(gds_file) + top_cells = library.top_level() # Get top cells + for cell in top_cells: + flatten_cell = cell.flatten() + # Get all text labels for each cell + labels = flatten_cell.get_labels( + apply_repetitions=True, depth=None, layer=lay_num, texttype=lay_dt + ) + # Get label value + for label in labels: + rule = label.text + if rule not in rules: + rules.append(rule) + + return rules + + +def check_klayout_version(): + """ + check_klayout_version checks klayout version and makes sure it would work with the DRC. + """ + # ======= Checking Klayout version ======= + klayout_v_ = os.popen("klayout -b -v").read() + klayout_v_ = klayout_v_.split("\n")[0] + klayout_v_list = [] + + if klayout_v_ == "": + logging.error("Klayout is not found. Please make sure klayout is installed.") + exit(1) + else: + klayout_v_list = [int(v) for v in klayout_v_.split(" ")[-1].split(".")] + + logging.info(f"Your Klayout version is: {klayout_v_}") + + if len(klayout_v_list) < 1 or len(klayout_v_list) > 3: + logging.error("Was not able to get klayout version properly.") + exit(1) + elif len(klayout_v_list) >= 2 or len(klayout_v_list) <= 3: + if klayout_v_list[1] < 28: + logging.error("Prerequisites at a minimum: KLayout 0.28.0") + logging.error( + "Using this klayout version has not been assesed in this development. Limits are unknown" + ) + exit(1) + + +def get_switches(yaml_file, rule_name): + """Parse yaml file and extract switches data + Parameters + ---------- + yaml_file : str + yaml config file path given py the user. + Returns + ------- + yaml_dic : dictionary + dictionary containing switches data. + """ + + # load yaml config data + with open(yaml_file, "r") as stream: + try: + yaml_dic = yaml.safe_load(stream) + except yaml.YAMLError as exc: + print(exc) + + return [f"{param}={value}" for param, value in yaml_dic[rule_name].items()] + + +def parse_results_db(results_database): + """ + This function will parse Klayout database for analysis. + + Parameters + ---------- + results_database : string or Path object + Path string to the results file + + Returns + ------- + set + A set that contains all rules in the database with or without violations + """ + + mytree = ET.parse(results_database) + myroot = mytree.getroot() + + # Initial values for counter + rule_counts = defaultdict(int) + + # Get the list of all rules that ran regardless it generated output or not + for z in myroot[5]: + rule_name = f"{z[0].text}" + rule_counts[rule_name] = 0 + + # Count rules with violations. + for z in myroot[7]: + rule_name = f"{z[1].text}".replace("'", "") + rule_counts[rule_name] += 1 + + return rule_counts + + +def run_test_case( + drc_dir, + layout_path, + run_dir, + table_name, +): + """ + This function run a single test case using the correct DRC file. + + Parameters + ---------- + drc_dir : string or Path + Path to the location where all runsets exist. + layout_path : stirng or Path object + Path string to the layout of the test pattern we want to test. + run_dir : stirng or Path object + Path to the location where is the regression run is done. + table_name : string + Table name that we are running on. + + Returns + ------- + dict + A dict with all rule counts + """ + + # Initial value for counters + rule_counts = defaultdict(int) + + # Get switches used for each run + sw_file = os.path.join( + Path(layout_path.parent).absolute(), f"{table_name}.{SUPPORTED_SW_EXT}" + ) + + if os.path.exists(sw_file): + switches = " ".join(get_switches(sw_file, table_name)) + else: + switches = "--variant=C" # default switch + + # Adding switches for specific runsets + if "antenna" in str(layout_path): + switches += " --antenna_only" + elif "density" in str(layout_path): + switches += " --density_only" + + # Creating run folder structure + pattern_clean = ".".join(os.path.basename(layout_path).split(".")[:-1]) + output_loc = f"{run_dir}/{table_name}" + pattern_log = f"{output_loc}/{pattern_clean}_drc.log" + + # command to run drc + call_str = f"python3 {drc_dir}/run_drc.py --path={layout_path} {switches} --table={table_name} --run_dir={output_loc} --run_mode=flat --thr=1 > {pattern_log} 2>&1" + + # Starting klayout run + os.makedirs(output_loc, exist_ok=True) + try: + check_call(call_str, shell=True) + except Exception as e: + pattern_results = glob.glob(os.path.join(output_loc, f"{pattern_clean}*.lyrdb")) + if len(pattern_results) < 1: + logging.error("%s generated an exception: %s" % (pattern_clean, e)) + traceback.print_exc() + raise Exception("Failed DRC run.") + + # dumping log into output to make CI have the log + if os.path.isfile(pattern_log): + logging.info("# Dumping drc run output log:") + with open(pattern_log, "r") as f: + for line in f: + line = line.strip() + logging.info(f"{line}") + + # Checking if run is completed or failed + pattern_results = glob.glob(os.path.join(output_loc, f"{pattern_clean}*.lyrdb")) + + # Get list of rules covered in the test case + rules_tested = get_unit_test_coverage(layout_path) + + if len(pattern_results) > 0: + # db to gds conversion + marker_output, runset_analysis = convert_results_db_to_gds( + pattern_results[0], rules_tested + ) + + # Generating merged testcase for violated rules + merged_output = generate_merged_testcase(layout_path, marker_output) + + # Generating final db file + if os.path.exists(merged_output): + final_report = f'{merged_output.split(".")[0]}_final.lyrdb' + analysis_log = f'{merged_output.split(".")[0]}_analysis.log' + call_str = f"klayout -b -r {runset_analysis} -rd input={merged_output} -rd report={final_report} > {analysis_log} 2>&1" + + failed_analysis_step = False + + try: + check_call(call_str, shell=True) + except Exception as e: + failed_analysis_step = True + logging.error("%s generated an exception: %s" % (pattern_clean, e)) + traceback.print_exc() + + # dumping log into output to make CI have the log + if os.path.isfile(analysis_log): + logging.info("# Dumping analysis run output log:") + with open(analysis_log, "r") as f: + for line in f: + line = line.strip() + logging.info(f"{line}") + + if failed_analysis_step: + raise Exception("Failed DRC analysis run.") + + if os.path.exists(final_report): + rule_counts = parse_results_db(final_report) + return rule_counts + else: + return rule_counts + else: + return rule_counts + else: + return rule_counts + + +def run_all_test_cases(tc_df, drc_dir, run_dir, num_workers): + """ + This function run all test cases from the input dataframe. + + Parameters + ---------- + tc_df : pd.DataFrame + DataFrame that holds all the test cases information for running. + drc_dir : string or Path + Path string to the location of the drc runsets. + run_dir : string or Path + Path string to the location of the testing code and output. + num_workers : int + Number of workers to use for running the regression. + + Returns + ------- + pd.DataFrame + A pandas DataFrame with all test cases information post running. + """ + + results_df_list = [] + tc_df["run_status"] = "no status" + + with concurrent.futures.ThreadPoolExecutor(max_workers=num_workers) as executor: + future_to_run_id = dict() + for i, row in tc_df.iterrows(): + future_to_run_id[ + executor.submit( + run_test_case, + drc_dir, + row["test_path"], + run_dir, + row["table_name"], + ) + ] = row["run_id"] + + for future in concurrent.futures.as_completed(future_to_run_id): + run_id = future_to_run_id[future] + try: + rule_counts = future.result() + if rule_counts: + rule_counts_df = pd.DataFrame( + { + "analysis_rule": rule_counts.keys(), + "count": rule_counts.values(), + } + ) + rule_counts_df["rule_name"] = ( + rule_counts_df["analysis_rule"].str.split(RULE_STR_SEP).str[0] + ) + rule_counts_df["type"] = ( + rule_counts_df["analysis_rule"].str.split(RULE_STR_SEP).str[1] + ) + rule_counts_df.drop(columns=["analysis_rule"], inplace=True) + rule_counts_df["count"] = rule_counts_df["count"].astype(int) + rule_counts_df = rule_counts_df.pivot( + index="rule_name", columns="type", values="count" + ) + rule_counts_df = rule_counts_df.fillna(0) + rule_counts_df = rule_counts_df.reset_index(drop=False) + rule_counts_df = rule_counts_df.rename( + columns={"index": "rule_name"} + ) + + rule_counts_df["table_name"] = tc_df.loc[ + tc_df["run_id"] == run_id, "table_name" + ].iloc[0] + + for c in ANALYSIS_RULES: + if c not in rule_counts_df.columns: + rule_counts_df[c] = 0 + + rule_counts_df[ANALYSIS_RULES] = rule_counts_df[ + ANALYSIS_RULES + ].astype(int) + rule_counts_df = rule_counts_df[ + ["table_name", "rule_name"] + ANALYSIS_RULES + ] + results_df_list.append(rule_counts_df) + tc_df.loc[tc_df["run_id"] == run_id, "run_status"] = "completed" + else: + tc_df.loc[tc_df["run_id"] == run_id, "run_status"] = "no output" + + except Exception as exc: + logging.error("%d generated an exception: %s" % (run_id, exc)) + traceback.print_exc() + tc_df.loc[tc_df["run_id"] == run_id, "run_status"] = "exception" + + if len(results_df_list) > 0: + results_df = pd.concat(results_df_list) + else: + results_df = pd.DataFrame() + + return results_df, tc_df + + +def parse_existing_rules(rule_deck_path, output_path, target_table=None): + """ + This function collects the rule names from the existing drc rule decks. + + Parameters + ---------- + rule_deck_path : string or Path object + Path string to the DRC directory where all the DRC files are located. + output_path : string or Path + Path of the run location to store the output analysis file. + target_table : string Optional + Name of the table to be in testing + + Returns + ------- + pd.DataFrame + A pandas DataFrame with the rule and rule deck used. + """ + + if target_table is None: + drc_files = glob.glob(os.path.join(rule_deck_path, "rule_decks", "*.drc")) + else: + table_rule_file = os.path.join( + rule_deck_path, "rule_decks", f"{target_table}.drc" + ) + if not os.path.isfile(table_rule_file): + raise FileNotFoundError( + errno.ENOENT, os.strerror(errno.ENOENT), table_rule_file + ) + + drc_files = [table_rule_file] + + rules_data = list() + + for runset in drc_files: + with open(runset, "r") as f: + for line in f: + if ".output" in line: + line_list = line.split('"') + rule_info = dict() + rule_info["table_name"] = os.path.basename(runset).replace( + ".drc", "" + ) + rule_info["rule_name"] = line_list[1] + rule_info["in_rule_deck"] = 1 + rules_data.append(rule_info) + + df = pd.DataFrame(rules_data) + df.drop_duplicates(inplace=True) + df.to_csv(os.path.join(output_path, "rule_deck_rules.csv"), index=False) + return df + + +def generate_merged_testcase(orignal_testcase, marker_testcase): + """ + This function will merge orignal gds file with generated + markers gds file. + + Parameters + ---------- + orignal_testcase : string or Path object + Path string to the orignal testcase + + marker_testcase : string or Path + Path of the output marker gds file generated from db file. + + Returns + ------- + merged_gds_path : string or Path + Path of the final merged gds file generated. + """ + + new_lib = gdstk.Library() + + lib_org = gdstk.read_gds(orignal_testcase) + lib_marker = gdstk.read_gds(marker_testcase) + + # Getting flattened top cells + top_cell_org = lib_org.top_level()[0].flatten(apply_repetitions=True) + top_cell_marker = lib_marker.top_level()[0].flatten(apply_repetitions=True) + marker_polygons = top_cell_marker.get_polygons( + apply_repetitions=True, include_paths=True, depth=None + ) + + # Merging all polygons of markers with original testcase + for marker_polygon in marker_polygons: + top_cell_org.add(marker_polygon) + + # Adding flattened merged cell + new_lib.add(top_cell_org.flatten(apply_repetitions=True)) + + # Writing final merged gds file + merged_gds_path = f'{marker_testcase.replace(".gds", "")}_merged.gds' + new_lib.write_gds(merged_gds_path) + + return merged_gds_path + + +def draw_polygons(polygon_data, cell, lay_num, lay_dt, path_width): + """ + This function is used for drawing gds file with all violated polygons. + + Parameters + ---------- + polygon_data : str + Contains data points for each violated polygon + cell: gdstk.Cell + Top cell will contains all generated polygons + lay_num: int + Number of layer used to draw violated polygons + lay_dt : int + Data type of layer used to draw violated polygons + path_width : float + Width will used to draw edges + + Returns + ------- + None + """ + + # Cleaning data points + polygon_data = re.sub(r"\s+", "", polygon_data) + polygon_data = re.sub(r"[()]", "", polygon_data) + + tag_split = polygon_data.split(":") + tag = tag_split[0] + poly_txt = tag_split[1] + polygons = re.split(r"[/|]", poly_txt) + + # Select shape type to be drawn + if tag == "polygon": + for poly in polygons: + points = [ + (float(p.split(",")[0]), float(p.split(",")[1])) + for p in poly.split(";") + ] + cell.add(gdstk.Polygon(points, lay_num, lay_dt)) + + elif tag == "edge-pair": + for poly in polygons: + points = [ + [float(p.split(",")[0]), float(p.split(",")[1])] + for p in poly.split(";") + ] + dist = np.sqrt(((points[0][0]) - (points[1][0]))**2 + ((points[0][1]) - (points[1][1]))**2) + # Adding condition for extremely small edge length + ## to generate a path to be drawn + if dist < path_width: + points[1][0] = points[0][0] + 2 * path_width + cell.add(gdstk.FlexPath(points, path_width, layer=lay_num, datatype=lay_dt)) + + elif tag == "edge": + for poly in polygons: + points = [ + [float(p.split(",")[0]), float(p.split(",")[1])] + for p in poly.split(";") + ] + dist = np.sqrt(((points[0][0]) - (points[1][0]))**2 + ((points[0][1]) - (points[1][1]))**2) + # Adding condition for extremely small edge length + ## to generate a path to be drawn + if dist < path_width: + points[1][0] = points[0][0] + 2 * path_width + cell.add(gdstk.FlexPath(points, path_width, layer=lay_num, datatype=lay_dt)) + else: + logging.error(f"## Unknown type: {tag} ignored") + + +def convert_results_db_to_gds(results_database: str, rules_tested: list): + """ + This function will parse Klayout database for analysis. + It converts the lyrdb klayout database file to GDSII file + + Parameters + ---------- + results_database : string or Path object + Path string to the results file + rules_tested : list + List of strings that holds the rule names that are covered by the test case. + + Returns + ------- + output_gds_path : string or Path + Path of the output marker gds file generated from db file. + output_runset_path : string or Path + Path of the output drc runset used for analysis. + """ + + # Writing analysis rule deck + pass_marker = "input(2, 222)" + fail_marker = "input(3, 222)" + fail_marker2 = "input(6, 222)" + text_marker = "input(11, 222)" + + output_runset_path = f'{results_database.replace(".lyrdb", "")}_analysis.drc' + + analysis_rules = [] + runset_analysis_setup = f""" + source($input) + report("DRC analysis run report at", $report) + pass_marker = {pass_marker} + fail_marker = {fail_marker} + fail_marker2 = {fail_marker2} + text_marker = {text_marker} + + full_chip = extent.sized(0.0) + + """ + analysis_rules.append(runset_analysis_setup) + + # Generating violated rules and its points + cell_name = "" + lib = None + cell = None + in_item = False + rule_data_type_map = list() + + for ev, elem in tqdm(ET.iterparse(results_database, events=("start", "end"))): + + if elem.tag != "item" and not in_item: + elem.clear() + continue + + if elem.tag != "item" and in_item: + continue + + if elem.tag == "item" and ev == "start": + in_item = True + continue + + rules = elem.findall("category") + values = elem.findall("values") + + if len(values) > 0: + polygons = values[0].findall("value") + else: + polygons = [] + + if cell_name == "": + all_cells = elem.findall("cell") + + if len(all_cells) > 0: + cell_name = all_cells[0].text + + if cell_name is None: + elem.clear() + continue + + lib = gdstk.Library(f"{cell_name}_markers") + cell = lib.new_cell(f"{cell_name}_markers") + + if len(rules) > 0: + rule_name = rules[0].text.replace("'", "") + if rule_name is None: + elem.clear() + continue + else: + elem.clear() + continue + + if rule_name not in rule_data_type_map: + rule_data_type_map.append(rule_name) + + ## Drawing polygons here. + rule_lay_dt = rule_data_type_map.index(rule_name) + 1 + if cell is not None: + for p in polygons: + draw_polygons(p.text, cell, RULE_LAY_NUM, rule_lay_dt, PATH_WIDTH) + + ## Clearing memeory + in_item = False + elem.clear() + + # Writing final marker gds file + if lib is not None: + output_gds_path = f'{results_database.replace(".lyrdb", "")}_markers.gds' + lib.write_gds(output_gds_path) + else: + logging.error("Failed to get any results in the lyrdb database.") + exit(1) + + # Saving analysis rule deck. + for r in rule_data_type_map: + rule_lay_dt = rule_data_type_map.index(r) + 1 + rule_layer_name = f'rule_{r.replace(".", "_")}' + rule_layer = f"{rule_layer_name} = input({RULE_LAY_NUM}, {rule_lay_dt})" + + pass_patterns_rule = f""" + pass_marker.interacting( text_marker.texts("{r}") ).output("{r}{RULE_STR_SEP}pass_patterns", "{r}{RULE_STR_SEP}pass_patterns polygons") + """ + fail_patterns_rule = f""" + fail_marker2.interacting(fail_marker.interacting(text_marker.texts("{r}")) ).or( fail_marker.interacting(text_marker.texts("{r}")).not_interacting(fail_marker2) ).output("{r}{RULE_STR_SEP}fail_patterns", "{r}{RULE_STR_SEP}fail_patterns polygons") + """ + false_pos_rule = f""" + pass_marker.interacting(text_marker.texts("{r}")).interacting({rule_layer_name}).output("{r}{RULE_STR_SEP}false_positive", "{r}{RULE_STR_SEP}false_positive occurred") + """ + false_neg_rule = f""" + ((fail_marker2.interacting(fail_marker.interacting(text_marker.texts("{r}")))).or((fail_marker.interacting(input(11, 222).texts("{r}")).not_interacting(fail_marker2)))).not_interacting({rule_layer_name}).output("{r}{RULE_STR_SEP}false_negative", "{r}{RULE_STR_SEP}false_negative occurred") + """ + rule_not_tested = f""" + full_chip.not_interacting({rule_layer_name}).output("{r}{RULE_STR_SEP}not_tested", "{r}{RULE_STR_SEP}not_tested occurred") + """ + + analysis_rules.append(rule_layer) + analysis_rules.append(pass_patterns_rule) + analysis_rules.append(fail_patterns_rule) + analysis_rules.append(false_pos_rule) + analysis_rules.append(false_neg_rule) + analysis_rules.append(rule_not_tested) + + rule_lay_dt = len(rule_data_type_map) + 1 + + for r in rules_tested: + if r in rule_data_type_map: + continue + + rule_layer_name = f'rule_{r.replace(".", "_")}' + rule_layer = f"{rule_layer_name} = input({RULE_LAY_NUM}, {rule_lay_dt})" + + pass_patterns_rule = f""" + pass_marker.interacting( text_marker.texts("{r}") ).output("{r}{RULE_STR_SEP}pass_patterns", "{r}{RULE_STR_SEP}pass_patterns polygons") + """ + fail_patterns_rule = f""" + fail_marker2.interacting(fail_marker.interacting(text_marker.texts("{r}")) ).or( fail_marker.interacting(text_marker.texts("{r}")).not_interacting(fail_marker2) ).output("{r}{RULE_STR_SEP}fail_patterns", "{r}{RULE_STR_SEP}fail_patterns polygons") + """ + + false_pos_rule = f""" + pass_marker.interacting(text_marker.texts("{r}")).interacting({rule_layer_name}).output("{r}{RULE_STR_SEP}false_positive", "{r}{RULE_STR_SEP}false_positive occurred") + """ + false_neg_rule = f""" + ((fail_marker2.interacting(fail_marker.interacting(text_marker.texts("{r}")))).or((fail_marker.interacting(input(11, 222).texts("{r}")).not_interacting(fail_marker2)))).not_interacting({rule_layer_name}).output("{r}{RULE_STR_SEP}false_negative", "{r}{RULE_STR_SEP}false_negative occurred") + """ + rule_not_tested = f""" + full_chip.not_interacting({rule_layer_name}).output("{r}{RULE_STR_SEP}not_tested", "{r}{RULE_STR_SEP}not_tested occurred") + """ + + analysis_rules.append(rule_layer) + analysis_rules.append(pass_patterns_rule) + analysis_rules.append(fail_patterns_rule) + analysis_rules.append(false_pos_rule) + analysis_rules.append(false_neg_rule) + analysis_rules.append(rule_not_tested) + + rule_lay_dt += 1 + + with open(output_runset_path, "w") as runset_analysis: + runset_analysis.write("".join(analysis_rules)) + + return output_gds_path, output_runset_path + + +def build_tests_dataframe(unit_test_cases_dir, target_table): + """ + This function is used for getting all test cases available in a formated dataframe before running. + + Parameters + ---------- + unit_test_cases_dir : str + Path string to the location of unit test cases path. + target_table : str or None + Name of table that we want to run regression for. If None, run all found. + + Returns + ------- + pd.DataFrame + A DataFrame that has all the targetted test cases that we need to run. + """ + all_unit_test_cases = sorted( + Path(unit_test_cases_dir).rglob("*.{}".format(SUPPORTED_TC_EXT)) + ) + logging.info( + "## Total number of test cases found: {}".format(len(all_unit_test_cases)) + ) + + # Get test cases df from test cases + tc_df = pd.DataFrame({"test_path": all_unit_test_cases}) + tc_df["table_name"] = tc_df["test_path"].apply(lambda x: x.name.replace(".gds", "")) + + if target_table is not None: + tc_df = tc_df[tc_df["table_name"] == target_table] + + if len(tc_df) < 1: + logging.error("No test cases remaining after filtering.") + exit(1) + + tc_df["run_id"] = range(len(tc_df)) + return tc_df + + +def aggregate_results( + tc_df: pd.DataFrame, results_df: pd.DataFrame, rules_df: pd.DataFrame +): + """ + aggregate_results Aggregate the results for all runs. + + Parameters + ---------- + tc_df : pd.DataFrame + Dataframe that holds the information about the test cases. + results_df : pd.DataFrame + Dataframe that holds the information about the unit test rules. + rules_df : pd.DataFrame + Dataframe that holds the information about all the rules implemented in the rule deck. + + Returns + ------- + pd.DataFrame + A DataFrame that has all data analysis aggregated into one. + """ + if len(rules_df) < 1 and len(results_df) < 1: + logging.error("## There are no rules for analysis or run.") + exit(1) + elif len(rules_df) < 1 and len(results_df) > 0: + df = results_df + elif len(rules_df) > 0 and len(results_df) < 1: + df = rules_df + for c in ANALYSIS_RULES: + df[c] = 0 + else: + df = results_df.merge(rules_df, how="outer", on=["table_name", "rule_name"]) + + df[ANALYSIS_RULES] = df[ANALYSIS_RULES].fillna(0) + df["in_rule_deck"] = df["in_rule_deck"].fillna(0) + df = df.merge(tc_df[["table_name", "run_status"]], how="left", on="table_name") + + df["rule_status"] = "Unknown" + df.loc[(df["false_negative"] > 0), "rule_status"] = "Rule Failed" + df.loc[(df["false_positive"] > 0), "rule_status"] = "Rule Failed" + df.loc[ + (df["not_tested"] > 0) | (df["pass_patterns"] < 1), "rule_status" + ] = "Rule Not Tested" + df.loc[ + (df["fail_patterns"] < 1) | (df["pass_patterns"] < 1), "rule_status" + ] = "Rule Not Tested" + df.loc[(df["in_rule_deck"] < 1), "rule_status"] = "Rule Not Implemented" + df.loc[ + ~(df["run_status"].isin(["completed"])), "rule_status" + ] = "Test Case Run Failed" + + df.loc[ + (df["not_tested"] > 0) | (df["pass_patterns"] < 1), "rule_status" + ] = "Rule Not Tested" + + rule_exp_cond = ((df["fail_patterns"] > 0) & (df["false_negative"] > 0) & (df["not_tested"] > 0)) + df.loc[rule_exp_cond, "rule_status"] = "Rule Syntax Exception" + + pass_cond = ( + (df["pass_patterns"] > 0) + & (df["fail_patterns"] > 0) + & (df["false_negative"] < 1) + & (df["false_positive"] < 1) + & (df["in_rule_deck"] > 0) + ) + + df.loc[pass_cond, "rule_status"] = "Passed" + return df + + +def run_regression(drc_dir, output_path, target_table, cpu_count): + """ + Running Regression Procedure. + + This function runs the full regression on all test cases. + + Parameters + ---------- + drc_dir : string + Path string to the DRC directory where all the DRC files are located. + output_path : str + Path string to the location of the output results of the run. + target_table : string or None + Name of table that we want to run regression for. If None, run all found. + cpu_count : int + Number of cpus to use in running testcases. + Returns + ------- + bool + If all regression passed, it returns true. If any of the rules failed it returns false. + """ + + ## Parse Existing Rules + rules_df = parse_existing_rules(drc_dir, output_path, target_table) + logging.info( + "## Total number of rules found in rule decks: {}".format(len(rules_df)) + ) + logging.info("## Parsed Rules: \n" + str(rules_df)) + + ## Get all test cases available in the repo. + test_cases_path = os.path.join(drc_dir, "testing/testcases") + unit_test_cases_path = os.path.join(test_cases_path, "unit") + tc_df = build_tests_dataframe(unit_test_cases_path, target_table) + logging.info("## Total table gds files found: {}".format(len(tc_df))) + logging.info("## Found testcases: \n" + str(tc_df)) + + ## Run all test cases. + results_df, tc_df = run_all_test_cases(tc_df, drc_dir, output_path, cpu_count) + logging.info("## Testcases found results: \n" + str(results_df)) + logging.info("## Updated testcases: \n" + str(tc_df)) + + ## Aggregate all dataframes into one + df = aggregate_results(tc_df, results_df, rules_df) + df.drop_duplicates(inplace=True) + logging.info("## Final analysis table: \n" + str(df)) + + ## Generate error if there are any missing info or fails. + df.to_csv(os.path.join(output_path, "all_test_cases_results.csv"), index=False) + + ## Check if there any rules that generated false positive or false negative + failing_results = df[~df["rule_status"].isin(["Passed"])] + logging.info("## Failing test cases: \n" + str(failing_results)) + + if len(failing_results) > 0: + logging.error("## Some test cases failed .....") + return False + else: + logging.info("## All testcases passed.") + return True + + +def main(drc_dir: str, output_path: str, target_table: str): + """ + Main Procedure. + + This function is the main execution procedure + + Parameters + ---------- + drc_dir : str + Path string to the DRC directory where all the DRC files are located. + output_path : str + Path string to the location of the output results of the run. + target_table : str or None + Name of table that we want to run regression for. If None, run all found. + Returns + ------- + bool + If all regression passed, it returns true. If any of the rules failed it returns false. + """ + + # No. of threads + cpu_count = os.cpu_count() if args["--mp"] is None else int(args["--mp"]) + + # Pandas printing setup + pd.set_option("display.max_columns", None) + pd.set_option("display.max_rows", None) + pd.set_option("max_colwidth", None) + pd.set_option("display.width", 1000) + + # info logs for args + logging.info("## Run folder is: {}".format(run_name)) + logging.info("## Target Table is: {}".format(target_table)) + + # Start of execution time + t0 = time.time() + + ## Check Klayout version + check_klayout_version() + + # Calling regression function + run_status = run_regression(drc_dir, output_path, target_table, cpu_count) + + # End of execution time + logging.info("Total execution time {}s".format(time.time() - t0)) + + if run_status: + logging.info("Test completed successfully.") + else: + logging.error("Test failed.") + exit(1) + + +# ================================================================ +# -------------------------- MAIN -------------------------------- +# ================================================================ + + +if __name__ == "__main__": + + # docopt reader + args = docopt(__doc__, version="DRC Regression: 0.2") + + # arguments + run_name = args["--run_name"] + target_table = args["--table_name"] + + if run_name is None: + # logs format + run_name = datetime.utcnow().strftime("unit_tests_%Y_%m_%d_%H_%M_%S") + + # Paths of regression dirs + testing_dir = os.path.dirname(os.path.abspath(__file__)) + drc_dir = os.path.dirname(testing_dir) + rules_dir = os.path.join(drc_dir, "rule_decks") + output_path = os.path.join(testing_dir, run_name) + + # Creating output dir + os.makedirs(output_path, exist_ok=True) + + # logs format + logging.basicConfig( + level=logging.DEBUG, + handlers=[ + logging.FileHandler(os.path.join(output_path, "{}.log".format(run_name))), + logging.StreamHandler(), + ], + format="%(asctime)s | %(levelname)-7s | %(message)s", + datefmt="%d-%b-%Y %H:%M:%S", + ) + + # Calling main function + run_status = main(drc_dir, output_path, target_table) diff --git a/klayout/drc/testing/testcases/README.md b/klayout/drc/testing/testcases/README.md new file mode 100644 index 00000000..e169610a --- /dev/null +++ b/klayout/drc/testing/testcases/README.md @@ -0,0 +1,7 @@ +# DRC Unit Tests + +This folder has the following folders: +* **switch_checking** ( Contains a small test case to be used for testing the DRC switches. ) +* **torture** ( Contains a few large test cases to test the performance of the rule deck. ) +* **unit** ( Contains the unit test structures per rule. Each file contains the test cases per table. ) + diff --git a/klayout/drc/testing/testcases/unit/dualgate.gds b/klayout/drc/testing/testcases/unit/dualgate.gds new file mode 100644 index 00000000..f7622ae2 Binary files /dev/null and b/klayout/drc/testing/testcases/unit/dualgate.gds differ diff --git a/klayout/drc/testing/testcases/unit/dualgate.svg b/klayout/drc/testing/testcases/unit/dualgate.svg new file mode 100644 index 00000000..8eeb2475 --- /dev/null +++ b/klayout/drc/testing/testcases/unit/dualgate.svg @@ -0,0 +1,349 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +DV.7 +DV.8 +DV.8 +DV.8 +DV.8 +DV.8 +DV.8 +DV.8 +DV.8 +DV.8 +DV.8 +DV.8 +DV.8 +DV.9 +DV.9 +DV.5 +DV.5 +DV.5 +DV.5 +DV.3 +DV.3 +DV.3 +DV.3 +DV.3 +DV.2 +DV.2 +DV.2 +DV.2 +DV.1 +DV.1 +DV.1 +DV.1 +DV.1 +DV.1 +DV.6 +DV.6 +DV.6 +DV.6 +DV.6 +DV.6 +DV.6 +DV.6 +DV.7 +Hole +Singular +Cor2Edge +Intersect +Basic +Touch +Angle45 +PriEnclSec +LIMITATION DEPENDENCIES: priWidth > secOvlpPriSide, priWidth>secOvlpPriTopBot, +DV.8 +Cor2Edge +Basic +Angle45 +Outside +LIMITATION DEPENDENCIES: priWidth > secOvlpPriSide, priWidth>secOvlpPriTopBot, +5 ERRs +2 ERRs +DV.5 Min. width = 0.7 +DV.1 Min. overlap of DNWELL for MV Area = 0.5 +MASK 6J DV +DF.2.6 +DV.3 Space to unrelated active = 0.24 +DV.2 MV space = 0.44 +3 ERRs +5 ERRs +4 ERRs +DV.6 Min. overlap of COMP for DV_2 outside DNWELL = 0.24 +DV.7 COMP cannot be partially overlap by DV2_D +DV.9 LV and MV PMOS can not be sitting inside same NWELL +1 ERRs + + \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index b90fe224..9ea30e59 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,7 @@ -flake8 - +flake8 +docopt +gdstk +pandas +tqdm +pyyaml +klayout