Skip to content
Snippets Groups Projects
Unverified Commit c71becce authored by Donn's avatar Donn Committed by GitHub
Browse files

Rewrite `run_designs.py` (#760)

+ run_designs.py now exits with exit code 2 if a design failed
~ run_designs.py rewritten in click, API altered slightly
~ renamed `addComputedStatistics` to `add_computed_statistics`
- removed `make regression_test`
parent 96670268
No related branches found
No related tags found
No related merge requests found
...@@ -127,6 +127,10 @@ if os.getenv("GITHUB_ACTIONS") != "true": ...@@ -127,6 +127,10 @@ if os.getenv("GITHUB_ACTIONS") != "true":
print("Environment variables required: \"PDK_ROOT\"") print("Environment variables required: \"PDK_ROOT\"")
exit(os.EX_CONFIG) exit(os.EX_CONFIG)
if os.getenv("OPENLANE_IMAGE_NAME") is None:
print("Environment variables required: \"OPENLANE_IMAGE_NAME\"")
exit(os.EX_CONFIG)
origin = os.getenv("REPO_URL") origin = os.getenv("REPO_URL")
repo = Repo("Openlane", origin) repo = Repo("Openlane", origin)
......
...@@ -48,6 +48,7 @@ group = subprocess.check_output(["id", "-g", username]).decode("utf8")[:-1] ...@@ -48,6 +48,7 @@ group = subprocess.check_output(["id", "-g", username]).decode("utf8")[:-1]
docker_command = [ docker_command = [
"docker", "run", "docker", "run",
"-v", f"{os.path.realpath(gh.root)}:/openlane", "-v", f"{os.path.realpath(gh.root)}:/openlane",
"-v", f"{os.path.realpath(gh.root)}/designs:/openlane/install",
"-v", f"{gh.pdk}:{gh.pdk}", "-v", f"{gh.pdk}:{gh.pdk}",
# "-u", f"{user}:{group}", # "-u", f"{user}:{group}",
"-e", f"PDK_ROOT={gh.pdk}", "-e", f"PDK_ROOT={gh.pdk}",
...@@ -56,19 +57,23 @@ docker_command = [ ...@@ -56,19 +57,23 @@ docker_command = [
shlex.join([ shlex.join([
"python3", "python3",
"run_designs.py", "run_designs.py",
"--tarList", "all", "--tar_list", "all",
"--disable_timestamp", "--disable_timestamp",
"--designs",
] + design_list + [
"--tag", test_name, "--tag", test_name,
"--threads", str(threads_used), "--threads", str(threads_used),
"--print_rem", "30", "--print_rem", "30",
"--benchmark", os.path.join("regression_results", "benchmark_results", "SW_HD.csv") "--benchmark", os.path.join("regression_results", "benchmark_results", "SW_HD.csv"),
]) "--show_output"
] + design_list)
] ]
print(f"Running {shlex.join(docker_command)} in {os.getenv('PWD')}") print(f"Running {shlex.join(docker_command)} in {os.getenv('PWD')}")
subprocess.run(docker_command, check=True)
try:
subprocess.run(docker_command, check=True)
except subprocess.CalledProcessError as e:
if e.returncode != 2:
raise e
cat = lambda x: print(open(x).read()) cat = lambda x: print(open(x).read())
...@@ -77,8 +82,6 @@ results_folder = os.path.join(gh.root, "regression_results", test_name) ...@@ -77,8 +82,6 @@ results_folder = os.path.join(gh.root, "regression_results", test_name)
print("Verbose differences within the benchmark:") print("Verbose differences within the benchmark:")
for report in glob.glob(os.path.join(results_folder, f"{test_name}*.rpt")): for report in glob.glob(os.path.join(results_folder, f"{test_name}*.rpt")):
cat(report) cat(report)
print("Full report:")
cat(os.path.join(results_folder, f"{test_name}.csv"))
design_test_report = os.path.join(results_folder, f"{test_name}.rpt.yml") design_test_report = os.path.join(results_folder, f"{test_name}.rpt.yml")
if not os.path.exists(design_test_report): if not os.path.exists(design_test_report):
......
...@@ -10,7 +10,6 @@ just a few small guidelines you need to follow. ...@@ -10,7 +10,6 @@ just a few small guidelines you need to follow.
- `make fastest_test_set`: to run the same test set that the basic CI uses, which will be used to evaluate your Pull Request. - `make fastest_test_set`: to run the same test set that the basic CI uses, which will be used to evaluate your Pull Request.
- [This](./regression_results/README.md) for custom test sets. (check the `-b` flag). - [This](./regression_results/README.md) for custom test sets. (check the `-b` flag).
- `make test`: tests the flow against one design `$TEST_DESIGN`. The default is `spm`. - `make test`: tests the flow against one design `$TEST_DESIGN`. The default is `spm`.
- `make regression_test`: tests the flow against all available designs and compares the resulting statistics with benchmark results and produces a human readable report and summary. This can take hours and is only really recommended for major changes.
## Code reviews ## Code reviews
......
...@@ -19,7 +19,7 @@ PDK_ROOT ?= $(shell pwd)/pdks ...@@ -19,7 +19,7 @@ PDK_ROOT ?= $(shell pwd)/pdks
DOCKER_OPTIONS = $(shell python3 ./env.py docker-config) DOCKER_OPTIONS = $(shell python3 ./env.py docker-config)
ifneq (,$(DOCKER_SWAP)) # Set to -1 for unlimited ifneq (,$(DOCKER_SWAP)) # Set to -1 for unlimited
DOCKER_OPTIONS += --memory-swap=$(DOCKER_SWAP) DOCKER_OPTIONS += --memory-swap=$(DOCKER_SWAP)
endif endif
ifneq (,$(DOCKER_MEMORY)) ifneq (,$(DOCKER_MEMORY))
DOCKER_OPTIONS += --memory=$(DOCKER_MEMORY) DOCKER_OPTIONS += --memory=$(DOCKER_MEMORY)
...@@ -195,21 +195,6 @@ mount: ...@@ -195,21 +195,6 @@ mount:
cd $(OPENLANE_DIR) && \ cd $(OPENLANE_DIR) && \
$(ENV_START) -ti $(OPENLANE_IMAGE_NAME) $(ENV_START) -ti $(OPENLANE_IMAGE_NAME)
MISC_REGRESSION_ARGS=
.PHONY: regression regression_test
regression_test: MISC_REGRESSION_ARGS=--benchmark $(BENCHMARK)
regression_test: regression
regression:
cd $(OPENLANE_DIR) && \
$(ENV_COMMAND) sh -c "\
python3 run_designs.py\
--defaultTestSet\
--tag $(REGRESSION_TAG)\
--threads $(THREADS)\
--print $(PRINT_REM_DESIGNS_TIME)\
$(MISC_REGRESSION_ARGS)\
"
DLTAG=custom_design_List DLTAG=custom_design_List
.PHONY: test_design_list fastest_test_set extended_test_set .PHONY: test_design_list fastest_test_set extended_test_set
fastest_test_set: DESIGN_LIST=$(shell cat ./.github/test_sets/fastest_test_set) fastest_test_set: DESIGN_LIST=$(shell cat ./.github/test_sets/fastest_test_set)
...@@ -222,11 +207,11 @@ test_design_list: ...@@ -222,11 +207,11 @@ test_design_list:
cd $(OPENLANE_DIR) && \ cd $(OPENLANE_DIR) && \
$(ENV_COMMAND) sh -c "\ $(ENV_COMMAND) sh -c "\
python3 run_designs.py\ python3 run_designs.py\
--designs $(DESIGN_LIST)\
--tag $(DLTAG)\ --tag $(DLTAG)\
--threads $(THREADS)\ --threads $(THREADS)\
--print_rem $(PRINT_REM_DESIGNS_TIME)\ --print_rem $(PRINT_REM_DESIGNS_TIME)\
--benchmark $(BENCHMARK)\ --benchmark $(BENCHMARK)\
$(DESIGN_LIST)\
" "
.PHONY: test .PHONY: test
......
...@@ -86,19 +86,6 @@ This should produce a clean run for the spm. The final layout will be generated ...@@ -86,19 +86,6 @@ This should produce a clean run for the spm. The final layout will be generated
If everything is okay, you can skip forward to [running OpenLane](#running-openlane). If everything is okay, you can skip forward to [running OpenLane](#running-openlane).
## Running the Regression Test
To run the regression test, which tests the flow against all available designs under [./designs/](./designs/) vs the the benchmark results, run the following command:
```bash
make regression_test
```
Your results will be compared with: [sky130_fd_sc_hd](https://github.com/The-OpenROAD-Project/OpenLane/blob/master/regression_results/benchmark_results/SW_HD.csv).
After running you'll find a directory added under [./regression_results/](./regression_results) it will contain all the reports needed for you to know whether you've been successful or not. Check [this](./regression_results/README.md#output) for more details.
**Note**: if `flow_status` is `flow_failed`, that means the design failed. Any reported statistics from any run after the failure of the design is reported as `-1` as well.
## Updating OpenLane ## Updating OpenLane
If you already have the repo locally, then no need to re-clone it. You can directly run the following: If you already have the repo locally, then no need to re-clone it. You can directly run the following:
...@@ -325,7 +312,7 @@ OpenLane provides `run_designs.py`, a script that can do multiple runs in a para ...@@ -325,7 +312,7 @@ OpenLane provides `run_designs.py`, a script that can do multiple runs in a para
Also, it can be used for testing the flow by running the flow against several designs using their best configurations. For example the following run: spm using its default configuration files `config.tcl.` : Also, it can be used for testing the flow by running the flow against several designs using their best configurations. For example the following run: spm using its default configuration files `config.tcl.` :
``` ```
python3 run_designs.py --designs spm xtea md5 aes256 --tag test --threads 3 python3 run_designs.py --tag test --threads 3 spm xtea md5 aes256
``` ```
For more information on how to run this script, refer to this [file][21] For more information on how to run this script, refer to this [file][21]
......
...@@ -8,7 +8,7 @@ OpenLane provides `run_designs.py`, a script that can do multiple runs in a para ...@@ -8,7 +8,7 @@ OpenLane provides `run_designs.py`, a script that can do multiple runs in a para
Also, it can be used for testing the flow by running the flow against several designs using their best configurations. For example the following has two runs: spm and xtea using their default configuration files `config.tcl.` : Also, it can be used for testing the flow by running the flow against several designs using their best configurations. For example the following has two runs: spm and xtea using their default configuration files `config.tcl.` :
``` ```
python3 run_designs.py --designs spm xtea des aes256 --tag test --threads 3 python3 run_designs.py --tag test --threads 3 spm xtea des aes256
``` ```
## Default Test Set Results: ## Default Test Set Results:
...@@ -22,14 +22,6 @@ You can view the results of the run against some designs (more [here](#usage)) a ...@@ -22,14 +22,6 @@ You can view the results of the run against some designs (more [here](#usage)) a
**Note**: `flow_failed` under `flow_status` implies that the run had failed. **Note**: `flow_failed` under `flow_status` implies that the run had failed.
To replicate these sheets, run the following command inside the docker after setting the proper standard cell library in [../configuration/general.tcl](../configuration/general.tcl):
```bash
python3 run_design.py --defaultTestSet --htmlExtract
```
You can control the run by adding more of the flags in this [section](#command-line-arguments)
## Usage ## Usage
- The list of flags that could be used with run_designs.py is described here [Command line arguments](#command-line-arguments). Check [columns_defintions.md][21] for more details on the reported configuration parameters. - The list of flags that could be used with run_designs.py is described here [Command line arguments](#command-line-arguments). Check [columns_defintions.md][21] for more details on the reported configuration parameters.
...@@ -126,160 +118,6 @@ The script can be used in two ways ...@@ -126,160 +118,6 @@ The script can be used in two ways
## Command line arguments ## Command line arguments
<table> `python3 ./run_designs.py --help`
<tr>
<th>
Argument
</th>
<th >
Description
</th>
</tr>
<tr>
<td align="center">
<code>--designs | -d design1 design2 design3 ...</code> <br> (Required)
</td>
<td align="justify">
Specifies the designs to run. Similar to the argument of <code>./flow.tcl -design</code>
</td>
</tr>
<tr>
</tr>
<td align="center">
<code>--defaultTestSet | -dts </code> <br> (Boolean)
</td>
<td align="justify">
Ignores the design flag, and runs the default design test set consisting of all designs under the ../designs/ directory. <br> Default: <code> False</code>
</td>
</tr>
<tr>
</tr>
<td align="center">
<code>--excluded_designs | -e design1 design2 design3 ...</code> <br> (Optional)
</td>
<td align="justify">
Specifies the designs to exclude from the run. Useful with <code>&lt;--defaultTestSet&gt;</code>.
</td>
</tr>
<tr>
</tr>
<td align="center">
<code>--regression | -r &lt;file&gt; </code> <br> (Optional)
</td>
<td align="justify">
Creates configuration files using the parameters in <code>&lt;file&gt;</code> and runs the configuration files on each design <br>
The generated configuration files are based on the default config file in each design <code>designs/&lt;design&gt;/config.tcl</code> and the passed parameters in <code>&lt;file&gt;</code>
The regression/exploration/configuration script described above. If not specified then none will be used and the designs will run against defualt/specified configs
</td>
</tr>
<tr>
<td align="center">
<code>--tag | -t &lt;name&gt;</code> <br> (Optional)
</td>
<td align="justify">
Appends a tag to the log files in <code>regression_results/</code> and the generated configuration files when passing <code>--regression</code> <br> Default value: <code>regression</code>
</td>
</tr>
<tr>
</tr>
<td align="center">
<code>--threads | -th &lt;number&gt;</code> <br> (Optional)
</td>
<td align="justify">
Number of threads <br> Default value: <code>5</code>
</td>
</tr>
<tr>
</tr>
<td align="center">
<code>--config | -c &lt;config&gt;</code> <br> (Optional)
</td>
<td align="justify">
Defines the configuration file to be used in NON regression mode<br> Default value: <code>config</code>
</td>
<tr>
</tr>
<td align="center">
<code>--configuration_parameters | -cp &lt;file&gt;</code> <br> (Optional)
</td>
<td align="justify">
<code> &lt;file&gt; </code> contains configuration parameters to be printed in the csv report.
Input must be file containing the names of the configurations comma separated.
If not specified the default configuration list will be used.
If this is followed by "all" all configurations will be reported
</td>
<tr>
</tr>
<td align="center">
<code>--append_configurations | -app</code> <br> (Boolean)
</td>
<td align="justify">
Specifies whether or not to print the added configuration_parameters as well as the default or not. <br> Default value: <code>False</code>
</td>
<tr>
</tr>
<td align="center">
<code>--clean | -cl</code> <br> (Boolean)
</td>
<td align="justify">
Specifies whether or not to delete the tmp directory of all designs and move merged_unpadded to the results directory.<br> Default value: <code>False</code>
</td>
<tr>
</tr>
<td align="center">
<code>--tar | -tar &lt;list&gt;</code> <br> (Optional)
</td>
<td align="justify">
List sub directories or files under the run directory, and they will be compressed into a {design}_{tag}.tar.gz under the runs dirctory.
If the flag is followed by "all" then the whole directory will be compressed.
</td>
<tr>
</tr>
<td align="center">
<code>--delete | -dl</code> <br> (Boolean)
</td>
<td align="justify">
Specifies whether or not to delete the run directory after completion and reporting the results in the csv.
If this flag is used with --tar, then the compressed files will not be deleted because they are placed outside of the run directory. <br> Default value: <code>False</code>
</td>
</tr>
<tr>
</tr>
<td align="center">
<code>--benchmark | -b &lt;file&gt;</code> <br> (Optional)
</td>
<td align="justify">
If provided this run will be tested against (compared to) the given benchmark <code>&lt;file&gt;</code>. check the output section above for the details of the reported results.
</td>
</tr>
<tr>
</tr>
<td align="center">
<code>--print_rem | -p &lt;number&gt;</code> <br> (Optional)
</td>
<td align="justify">
If a <code>&lt;number&gt;</code> greater than 0 is provided, a list of the remaining designs will be printed into the terminal every <code>&lt;number&gt;</code> seconds.
</td>
</tr>
<tr>
</tr>
<td align="center">
<code>--disable_timestamp | -dt </code> <br> (Boolean)
</td>
<td align="justify">
If enabled, the output files and tags will not contain the appended timestamp. <br> Default value: <code>False</code>
</td>
</tr>
<tr>
</tr>
<td align="center">
<code>--show_output | -so </code> <br> (Boolean)
</td>
<td align="justify">
If enabled, the full output log resulting from running ./flow.tcl will be displayed realtime in the terminal. However, if more than one design or more than one configuration is running at the same time, this flag will be ignored and no live output will be displayed. <br> Default value: <code>False</code>
</td>
</tr>
</table>
[21]: ./columns_defintions.md [21]: ./columns_defintions.md
run_designs.py 100644 → 100755
This diff is collapsed.
...@@ -96,7 +96,7 @@ class ConfigHandler: ...@@ -96,7 +96,7 @@ class ConfigHandler:
config = config_coded.decode(sys.getfilesystemencoding()).strip() config = config_coded.decode(sys.getfilesystemencoding()).strip()
config = config.split("##") config = config.split("##")
config = list(filter(None, config)) config = list(filter(None, config))
config = [element.strip("{}") for element in config] config = [element.strip('{}"') for element in config]
return config return config
@staticmethod @staticmethod
......
...@@ -48,7 +48,7 @@ def cli(design, design_name, tag, run_path, output_file, man_report): ...@@ -48,7 +48,7 @@ def cli(design, design_name, tag, run_path, output_file, man_report):
f.write(report) f.write(report)
# Adding Extra Attributes computed from configs and reported statistics # Adding Extra Attributes computed from configs and reported statistics
utils.addComputedStatistics(output_file) utils.add_computed_statistics(output_file)
# Tracking Magic DRC, LVS, Antenna Logs: # Tracking Magic DRC, LVS, Antenna Logs:
run_path = run_path or utils.get_run_path(design, tag) run_path = run_path or utils.get_run_path(design, tag)
......
...@@ -34,20 +34,16 @@ def get_design_path(design): ...@@ -34,20 +34,16 @@ def get_design_path(design):
return None return None
def get_run_path(design, tag): def get_run_path(design, tag):
DEFAULT_PATH = os.path.join( return os.path.join(
get_design_path(design), get_design_path(design),
'runs/{tag}/'.format( "runs",
tag=tag tag
)
) )
return DEFAULT_PATH
def get_design_name(design, config): def get_design_name(design, config):
design_path= get_design_path(design=design) design_path= get_design_path(design=design)
if design_path is None: if design_path is None:
print("{design} not found, skipping...".format(design=design)) return ("Design path not found", None)
return "[INVALID]: design path doesn't exist"
config_file = "{design_path}/{config}.tcl".format( config_file = "{design_path}/{config}.tcl".format(
design_path=design_path, design_path=design_path,
config=config, config=config,
...@@ -58,13 +54,13 @@ def get_design_name(design, config): ...@@ -58,13 +54,13 @@ def get_design_name(design, config):
config_file_opener.close() config_file_opener.close()
pattern = re.compile(r'\s*?set ::env\(DESIGN_NAME\)\s*?(\S+)\s*') pattern = re.compile(r'\s*?set ::env\(DESIGN_NAME\)\s*?(\S+)\s*')
for name in re.findall(pattern, configs): for name in re.findall(pattern, configs):
return name.replace("\"","") return (None, name.strip('"{}'))
return "[INVALID]: design name doesn't exist inside the config file!" return ("Invalid configuration file", None)
except OSError: except OSError:
return "[INVALID]: design config doesn't exist" return ("Configuration file not found", None)
# addComputedStatistics adds: CellPerMMSquaredOverCoreUtil, suggested_clock_period, and suggested_clock_frequency to a report.csv # add_computed_statistics adds: CellPerMMSquaredOverCoreUtil, suggested_clock_period, and suggested_clock_frequency to a report.csv
def addComputedStatistics(filename): def add_computed_statistics(filename):
data = pd.read_csv(filename) data = pd.read_csv(filename)
df = pd.DataFrame(data) df = pd.DataFrame(data)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment