python_code
stringlengths 0
91.3k
|
---|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing each execution in each language."""
import os
import pytest
from babelcode import code_generator
from babelcode import data_types
from babelcode.data_types.prediction import Prediction
from babelcode.data_types.question import Question
from babelcode.execution import execute_code
from babelcode.schema_parsing import parsing
from tests import utils as testing_utils
def setup_module(_):
"""Setup the environment so execution is allowed."""
os.environ['ALLOW_EXECUTION'] = 'true'
def teardown_module(_):
"""Disable execution on teardown."""
os.environ['ALLOW_EXECUTION'] = 'true'
@pytest.mark.parametrize('lang_name', testing_utils.LANGS_TO_TEST)
class TestLanguageExecution(testing_utils.BaseLanguageTestingClass):
"""Unit-tests for language execution."""
def _make_schema(self, params, return_type):
return {'params': params, 'return': {'type': return_type}}
def _setup_test(self, lang_name):
super()._setup_test(lang_name)
self.prompt_translator = self.lang.make_prompt_translator()
def make_executable_code(
self,
tmp_path,
question,
code_return_value,
use_type_annotations: bool = True,
):
"""Helper function to make the temporary code for execution.
Args:
tmp_path: Path to the temporary directory.
question: The question to test.
code_return_value: The value the code should return.
use_type_annotations: Use type annotations in the signature.
Returns:
The path to the written code and the prediction code
"""
# Get the schema so we can get the return type of the expected outputs.
schema, inputs = parsing.parse_schema_and_input_order(
self.schema_spec, question.schema)
return_value = self.literal_translator.convert_var_to_literal(
schema[data_types.EXPECTED_KEY_NAME], code_return_value)
out_code_path = tmp_path.joinpath(f'code.{self.lang.file_ext}')
with out_code_path.open('w') as f:
input_code = self.lang_spec.func_template_path.read_text()
signature = self.prompt_translator.translate_signature_with_docstring(
'Python',
'Test break /**/ // */ -- """ #',
'test',
'Solution',
schema,
inputs,
use_type_annotations,
)
input_code = input_code.replace('FN_SIGNATURE', signature)
input_code = input_code.replace('RETURN_VALUE', return_value)
code = code_generator.generate_code_for_question(
question,
schema,
input_order=inputs,
literal_translator=self.literal_translator,
prompt_translator=self.prompt_translator,
template_map=self.template_map,
)
code = code.replace('PLACEHOLDER_CODE_BODY', input_code)
code = code.replace('PLACEHOLDER_FN_NAME', self.lang_spec['entry_point'])
code = code.replace('PLACEHOLDER_CLS_NAME',
self.lang_spec.get('entry_cls', ''))
print(code)
f.write(code)
return out_code_path, input_code
@pytest.mark.parametrize('is_correct', [True, False],
ids=['correct', 'wrong'])
def test_map_equality(self, lang_name, is_correct, tmp_path):
self._setup_test(lang_name)
question = Question(
qid=0,
title='testing',
schema=self._make_schema(
[
{
'name': 'arg0',
'type': 'boolean'
},
],
'map<string;list<double>>',
),
test_list=[
dict(idx=0, inputs={'arg0': True}, outputs={'A': [1.234567]}),
],
entry_fn_name='test',
)
if is_correct:
output = {'A': [1.234567]}
else:
output = {'A': [1.234549]}
code_path, pred_code = self.make_executable_code(tmp_path, question, output)
result = execute_code(
prediction=Prediction('1', '0', lang_name, pred_code, code_path),
commands=self.lang.command_fn(code_path),
)
print(result.stderr)
assert not result.timed_out
assert result.return_code == 0
assert result.stdout == f'TEST-0...{"PASSED" if is_correct else "FAILED"}\n'
assert not result.stderr
@pytest.mark.parametrize('is_correct', [True, False],
ids=['correct', 'wrong'])
def test_list_equality(self, lang_name, is_correct, tmp_path):
self._setup_test(lang_name)
question = Question(
qid=0,
title='testing',
schema=self._make_schema([{
'name': 'arg0',
'type': 'boolean'
}], 'list<map<string;boolean>>'),
test_list=[
dict(idx=0, inputs={'arg0': True}, outputs=[{
'A': False
}]),
],
entry_fn_name='test',
)
if is_correct:
output = [{'A': False}]
else:
output = [{'A': True}]
code_path, pred_code = self.make_executable_code(tmp_path, question, output)
result = execute_code(
prediction=Prediction('1', '0', lang_name, pred_code, code_path),
commands=self.lang.command_fn(code_path),
)
assert not result.timed_out
assert result.return_code == 0
assert result.stdout == f'TEST-0...{"PASSED" if is_correct else "FAILED"}\n'
assert not result.stderr
@pytest.mark.parametrize('is_correct', [True, False],
ids=['correct', 'wrong'])
def test_set_equality(self, lang_name, is_correct, tmp_path):
self._setup_test(lang_name)
question = Question(
qid=0,
title='testing',
schema=self._make_schema([{
'name': 'arg0',
'type': 'set<string>'
}], 'set<integer>'),
test_list=[
dict(idx=0, inputs={'arg0': ['1', '2']}, outputs=[1, 2, 2]),
],
entry_fn_name='test',
)
if is_correct:
output = [2, 1]
else:
output = [3, 2]
code_path, pred_code = self.make_executable_code(tmp_path, question, output)
result = execute_code(
prediction=Prediction('1', '0', lang_name, pred_code, code_path),
commands=self.lang.command_fn(code_path),
)
assert not result.timed_out
assert result.return_code == 0
assert result.stdout == f'TEST-0...{"PASSED" if is_correct else "FAILED"}\n'
assert not result.stderr
@pytest.mark.parametrize(
'type_str',
['set<integer>', 'map<string;integer>', 'list<string>', 'string'],
)
def test_null_equality(self, lang_name, type_str, tmp_path):
self._setup_test(lang_name)
question = Question(
qid=0,
title='testing',
schema=self._make_schema([{
'name': 'arg0',
'type': type_str
}], type_str),
test_list=[
dict(idx=0, inputs={'arg0': None}, outputs=None),
],
entry_fn_name='test',
)
code_path, pred_code = self.make_executable_code(tmp_path, question, None)
result = execute_code(
prediction=Prediction('1', '0', lang_name, pred_code, code_path),
commands=self.lang.command_fn(code_path),
)
assert not result.timed_out
assert result.return_code == 0
assert result.stdout == 'TEST-0...PASSED\n'
assert not result.stderr
@pytest.mark.parametrize('is_correct', [True, False],
ids=['correct', 'wrong'])
@pytest.mark.parametrize(
'primitive',
[('string', '1,2'), ('character', '1')],
ids=['string', 'char'],
)
def test_primitive_equality(self, lang_name, is_correct, primitive, tmp_path):
self._setup_test(lang_name)
primitive_type, primitive_value = primitive
question = Question(
qid=0,
title='testing',
schema=self._make_schema([{
'name': 'arg0',
'type': 'boolean'
}], primitive_type),
test_list=[
dict(idx=0, inputs={'arg0': True}, outputs='2'),
],
entry_fn_name='test',
)
code_path, pred_code = self.make_executable_code(
tmp_path, question, '2' if is_correct else primitive_value)
result = execute_code(
prediction=Prediction('1', '0', lang_name, pred_code, code_path),
commands=self.lang.command_fn(code_path),
)
assert not result.timed_out
assert result.return_code == 0
assert result.stdout == f'TEST-0...{"PASSED" if is_correct else "FAILED"}\n'
assert not result.stderr
@pytest.mark.parametrize('is_correct', [True, False],
ids=['correct', 'wrong'])
def test_float_equality(self, lang_name, is_correct, tmp_path):
self._setup_test(lang_name)
question = Question(
qid=0,
title='testing',
schema=self._make_schema([{
'name': 'arg0',
'type': 'boolean'
}], 'float'),
test_list=[
dict(idx=0, inputs={'arg0': True}, outputs=float('0.0001202')),
],
entry_fn_name='test',
)
code_path, pred_code = self.make_executable_code(
tmp_path,
question,
float('0.0001203') if is_correct else float('0.0001302'),
)
result = execute_code(
prediction=Prediction('1', '0', lang_name, pred_code, code_path),
commands=self.lang.command_fn(code_path),
)
assert not result.timed_out
assert result.return_code == 0
assert result.stdout == f'TEST-0...{"PASSED" if is_correct else "FAILED"}\n'
assert not result.stderr
@pytest.mark.parametrize('is_correct', [True, False],
ids=['correct', 'wrong'])
def test_double_equality(self, lang_name, is_correct, tmp_path):
self._setup_test(lang_name)
question = Question(
qid=0,
title='testing',
schema=self._make_schema([{
'name': 'arg0',
'type': 'boolean'
}], 'double'),
test_list=[
dict(idx=0, inputs={'arg0': True}, outputs=float('0.0000001202')),
],
entry_fn_name='test',
)
code_path, pred_code = self.make_executable_code(
tmp_path,
question,
float('0.0000001203') if is_correct else float('0.0000001302'),
)
result = execute_code(
prediction=Prediction('1', '0', lang_name, pred_code, code_path),
commands=self.lang.command_fn(code_path),
)
assert not result.timed_out
assert result.return_code == 0
assert result.stdout == f'TEST-0...{"PASSED" if is_correct else "FAILED"}\n'
assert not result.stderr
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for non-language code generator functionality."""
import copy
from typing import Any, Dict, Optional, Union
import pytest
from babelcode import data_types
from babelcode import schema_parsing
SchemaType = schema_parsing.SchemaType
# Define these test cases up here to not clutter up the parameterization.
SCHEMA_TYPE_TCS = {}
SCHEMA_TYPE_TCS['primitive'] = {
'str': 'integer',
'schema': SchemaType(type_str='integer'),
}
SCHEMA_TYPE_TCS['primitive_ds_single'] = {
'str':
'string[]',
'schema':
SchemaType(type_str='list', elements=[SchemaType(type_str='string')]),
}
SCHEMA_TYPE_TCS['primitive_nested_ds'] = {
'str':
'list<tuple<boolean>>',
'schema':
SchemaType(
type_str='list',
elements=[SchemaType('list', elements=[SchemaType('boolean')])],
),
}
SCHEMA_TYPE_TCS['double_brace'] = {
'str':
'integer[][]',
'schema':
SchemaType(
type_str='list',
elements=[SchemaType('list', elements=[SchemaType('integer')])],
),
}
SCHEMA_TYPE_TCS['map_nested_list'] = {
'str':
'map<string;list<integer>>',
'schema':
SchemaType(
type_str='map',
key_type=SchemaType('string'),
elements=[
SchemaType(type_str='list', elements=[SchemaType('integer')])
],
),
}
SCHEMA_TYPE_TCS['nested_tuple'] = {
'str':
'tuple<tuple<string|string>|tuple<integer>>',
'schema':
SchemaType(
type_str='tuple',
elements=[
SchemaType(
type_str='list',
elements=[
SchemaType('string'),
],
),
SchemaType(type_str='list', elements=[SchemaType('integer')]),
],
),
}
class TestSchemaType:
@pytest.mark.parametrize('schema_name', list(SCHEMA_TYPE_TCS))
def test_from_generic_type_str(self, schema_name: str):
"""Test parsing the generic type strings."""
input_str = SCHEMA_TYPE_TCS[schema_name]['str']
expected = SCHEMA_TYPE_TCS[schema_name]['schema']
result = SchemaType.from_generic_type_string(input_str)
assert result == expected
def test_depth(self):
"""Test getting depth of schema type."""
schema_type = SchemaType.from_generic_type_string(
'map<string;list<integer>>')
assert schema_type.depth == 2
@pytest.mark.parametrize(
['left', 'right', 'expected_str'],
[
['float', 'double', 'double'],
['integer', 'double', 'double'],
['float', 'integer', 'float'],
['integer', 'long', 'long'],
['long', 'double', 'double'],
['list<double>', 'list<float>', 'list<double>'],
['map<string;float>', 'map<string;double>', 'map<string;double>'],
['tuple<float|double>', 'tuple<double|float>', 'list<double>'],
['string', 'integer', None],
['string', 'character', 'string'],
['list<character>', 'list<string>', 'list<string>'],
],
)
def test_reconcile_type(left: str, right: str, expected_str: Optional[str]):
"""Test reconciliation of types."""
left = SchemaType.from_generic_type_string(left)
right = SchemaType.from_generic_type_string(right)
result = schema_parsing.reconcile_type(left, right)
if expected_str is not None:
assert result.to_generic() == expected_str
expected = SchemaType.from_generic_type_string(expected_str)
else:
expected = None
assert result == expected
@pytest.mark.parametrize(
['type_str', 'value'],
[
('list<list<map<string;integer>>>', [[{
'A': 1
}]]),
('list<integer>', []),
('string', ''),
],
ids=['list_list_map', 'null_list', 'empty_string'],
)
def test_validate_correct_type_valid(type_str: str, value: Any):
"""Test basic validation where no changes occur."""
schema = SchemaType.from_generic_type_string(type_str)
result = schema_parsing.validate_correct_type(schema, copy.deepcopy(value))
# Make sure no changes happen
assert result == value
@pytest.mark.parametrize(
['type_str', 'value', 'expected'],
[
('list<integer>', None, []),
('map<integer;integer>', {
'1': 1
}, {
1: 1
}),
('double', 1, 1.0),
],
ids=['empty_list', 'cast_int_key', 'int_to_float'],
)
def test_validate_correct_type_conversions(type_str, value, expected):
"""Test validation when the value must be modified."""
schema = SchemaType.from_generic_type_string(type_str)
result = schema_parsing.validate_correct_type(schema, copy.deepcopy(value))
# Make sure no changes happen
assert result == expected
@pytest.mark.parametrize(
['type_str', 'value'],
[
('list<integer,integer>', [1, 1]),
('integer', 'String'),
('integer', None),
('set<integer>', {'hello'}),
('map<string;integer>', {
(1,): 1
}),
('map<string;integer>', {
'hello': 'hello'
}),
('list<integer>', [1, 'hello']),
],
ids=[
'multiple_elements',
'incorrect_type',
'non_null',
'set_invalid_element',
'map_invalid_key',
'map_invalid_element',
'list_multiple_types',
],
)
def test_validate_correct_type_invalid(type_str: str, value: Any):
"""Test cases where validating types should fail."""
schema = SchemaType.from_generic_type_string(type_str)
with pytest.raises(schema_parsing.SchemaTypeError):
schema_parsing.validate_correct_type(schema, value)
@pytest.mark.parametrize('iterable_type', ['set', 'list'])
def test_validate_correct_type_convert_iterable(iterable_type: str):
"""Test that the value conversion works for lists."""
type_str = f'{iterable_type}<float>'
schema = SchemaType.from_generic_type_string(type_str)
result = schema_parsing.validate_correct_type(schema, [1, 1.0, 2.0, 3])
assert result == [1.0, 1.0, 2.0, 3.0]
@pytest.mark.parametrize(
['key_type', 'expected'],
[('string', {
'1': 2.0,
'3': 4.0
}), ('integer', {
1: 2.0,
3: 4.0
})],
ids=['str_keys', 'int_keys'],
)
def test_validate_correct_type_convert_dict(key_type: str,
expected: Dict[Union[str, int],
Union[int, float]]):
"""Test that the value conversion works for dicts."""
type_str = f'map<{key_type};float>'
schema = SchemaType.from_generic_type_string(type_str)
result = schema_parsing.validate_correct_type(schema, {'1': 2, 3: 4.0})
assert result == expected
@pytest.mark.parametrize(
['left', 'right', 'expected'],
[
['list<null>', 'list<integer>', True],
['list<null>', 'list<list<integer>>', True],
['map<string;integer>', 'map<integer;string>', False],
['integer', 'boolean', False],
['map<string;integer>', 'map<string;integer>', True],
['tuple<string|integer>', 'tuple<string>', False],
['string', 'null', True],
['null', 'string', True],
],
)
def test_generic_equal(left: str, right: str, expected: bool):
"""Test the generic equivalence function."""
left = SchemaType.from_generic_type_string(left)
right = SchemaType.from_generic_type_string(right)
assert schema_parsing.is_generic_equal(left, right) == expected
assert schema_parsing.is_generic_equal(right, left) == expected
# Testing that types are generically equal to themselves.
assert schema_parsing.is_generic_equal(left, left)
assert schema_parsing.is_generic_equal(right, right)
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the SchemaType class and functionality."""
# Because of how pytest fixtures work, this error will be incorrectly triggered,
# so disable it for the file here. Pytest Fixture docs:
# https://docs.pytest.org/en/6.2.x/fixture.html
# pylint:disable=redefined-outer-name
from typing import Any, Dict
import pytest
from babelcode import data_types
from babelcode import schema_parsing
PRIMITIVE_MAP = {
'boolean': 'Bool',
'integer': 'Int',
'character': 'Char',
'float': 'Float',
'double': 'Double',
'long': 'Long',
'string': 'String',
}
@pytest.fixture(scope='module')
def lang_spec():
"""Dummy specifications for testing."""
yield schema_parsing.LanguageSchemaSpec(
name='Testing',
primitive_lang_map=PRIMITIVE_MAP,
format_list_type=lambda t: f'vector({t})',
format_map_type=lambda k, v: f'map({k},{v})',
format_set_type=lambda t: f'set({t})',
)
# Use the Any type here as depth is not known.
def make_schema_type(
type_str: str, language_type: Dict[str, Any]) -> schema_parsing.SchemaType:
"""Helper function to make the schema type."""
schema_type = schema_parsing.SchemaType.from_generic_type_string(type_str)
# Helper function to recursively set the language type.
def recurse_set_lang_type(
current_type: schema_parsing.SchemaType,
lang_type: Dict[str, Any]) -> schema_parsing.SchemaType:
current_type.lang_type = lang_type[data_types.EXPECTED_KEY_NAME]
if 'elements' in lang_type:
for i, element in enumerate(lang_type['elements']):
current_type.elements[i] = recurse_set_lang_type(
current_type.elements[i], element)
if 'key_type' in lang_type:
current_type.key_type = recurse_set_lang_type(current_type.key_type,
lang_type['key_type'])
return current_type
return recurse_set_lang_type(schema_type, language_type)
@pytest.mark.parametrize(['input_type', 'expected'],
list(PRIMITIVE_MAP.items()))
def test_parse_schema_and_input_order_primitives(
input_type: str, expected: str,
lang_spec: schema_parsing.LanguageSchemaSpec):
"""Test that parsing the schema for primitives."""
input_schema = {
'params': [{
'name': 'arg0',
'type': input_type
},],
'return': {
'type': input_type
},
}
parsed_schema, input_order = schema_parsing.parse_schema_and_input_order(
lang_spec, input_schema)
expected_schema = {
'arg0':
make_schema_type(input_type,
{data_types.EXPECTED_KEY_NAME: expected}),
data_types.EXPECTED_KEY_NAME:
make_schema_type(input_type,
{data_types.EXPECTED_KEY_NAME: expected}),
}
assert parsed_schema == expected_schema
assert input_order == ['arg0']
# Define these up here for ease of use.
LIST_LANG_TYPE = {
data_types.EXPECTED_KEY_NAME: 'vector(String)',
'elements': [{
data_types.EXPECTED_KEY_NAME: 'String'
}],
}
LIST_MAP_LANG_TYPE = {
data_types.EXPECTED_KEY_NAME:
'vector(map(String,Int))',
'elements': [{
data_types.EXPECTED_KEY_NAME: 'map(String,Int)',
'elements': [{
data_types.EXPECTED_KEY_NAME: 'Int'
}],
'key_type': {
data_types.EXPECTED_KEY_NAME: 'String'
},
}],
}
SET_LANG_TYPE = {
data_types.EXPECTED_KEY_NAME: 'set(String)',
'elements': [{
data_types.EXPECTED_KEY_NAME: 'String'
}],
}
@pytest.mark.parametrize(
['input_type', 'expected'],
[
['list<string>', LIST_LANG_TYPE],
['list<map<string;integer>>', LIST_MAP_LANG_TYPE],
['set<string>', SET_LANG_TYPE],
],
ids=['list', 'list_map', 'set'],
)
def test_parse_schema_and_input_order_data_structures(input_type, expected,
lang_spec):
"""Test that parsing the schema for data structures."""
input_schema = {
'params': [{
'name': 'arg0',
'type': input_type
},],
'return': {
'type': input_type
},
}
parsed_schema, input_order = schema_parsing.parse_schema_and_input_order(
lang_spec, input_schema)
expected_schema = {
'arg0': make_schema_type(input_type, expected),
data_types.EXPECTED_KEY_NAME: make_schema_type(input_type, expected),
}
assert parsed_schema == expected_schema
assert input_order == ['arg0']
def test_parse_schema_and_input_order_mixed_types(lang_spec):
"""Test that parsing the schema when there are multiple types."""
input_schema = {
'params': [
{
'name': 'arg0',
'type': 'map<string;boolean>'
},
{
'name': 'arg1',
'type': 'long'
},
],
'return': {
'type': 'float'
},
}
parsed_schema, input_order = schema_parsing.parse_schema_and_input_order(
lang_spec, input_schema)
expected_schema = {
'arg0':
make_schema_type(
'map<string;boolean>',
{
data_types.EXPECTED_KEY_NAME: 'map(String,Bool)',
'key_type': {
data_types.EXPECTED_KEY_NAME: 'String'
},
'elements': [{
data_types.EXPECTED_KEY_NAME: 'Bool'
}],
},
),
'arg1':
make_schema_type('long', {data_types.EXPECTED_KEY_NAME: 'Long'}),
data_types.EXPECTED_KEY_NAME:
make_schema_type('float', {data_types.EXPECTED_KEY_NAME: 'Float'}),
}
assert parsed_schema == expected_schema
assert input_order == ['arg0', 'arg1']
def test_parse_schema_and_input_order_unsupported(lang_spec):
"""Test that an error is raised when an unsupported type is passed."""
input_schema = {
'params': [{
'name': 'arg0',
'type': 'tuple<string|integer>'
},],
'return': {
'type': 'float'
},
}
with pytest.raises(schema_parsing.SchemaTypeError):
_ = schema_parsing.parse_schema_and_input_order(lang_spec, input_schema)
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Makes the validation predictions for testing that the literals can be parsed correctly."""
import collections
import json
import pathlib
import sys
from absl import app
from absl import flags
PROJECT_ROOT = pathlib.Path().parent
if str(pathlib.Path(__file__).parents[1]) not in sys.path:
sys.path.insert(0, str(pathlib.Path(__file__).parents[1]))
from babelcode import QUESTION_DATA_KEYS
from babelcode import data_types
from babelcode import languages
from babelcode import schema_parsing
_NAME = flags.DEFINE_string('name',
None,
required=True,
help="Name of the dataset this is for validating.")
_TEST_CODE_PATH = flags.DEFINE_string('problem_code_path',
None,
required=True,
help="Path to where the problem code is.")
_OUT_PATH = flags.DEFINE_string('output_path',
None,
required=True,
help="Path to save.")
_ADD_TEST_DATA = flags.DEFINE_bool("add_test_data",
False,
help="Add Testing data to the predictions")
_ONLY_LANG = flags.DEFINE_string(
"language",
default=None,
help="Only Make validation predictions in this language.")
def make_pred_dict(qid, code, lang):
return {'qid': qid, 'code': code, 'language': lang}
def group_by_lang(generator):
out = collections.defaultdict(dict)
for line in generator:
out[line['language']][line['qid']] = line
return out
def main(_):
ds_name = _NAME.value
print(f'Making validation predictions for {ds_name}')
fixtures_path = PROJECT_ROOT.joinpath('test_fixtures', 'language_data')
test_code = pathlib.Path(_TEST_CODE_PATH.value)
prompt_info = group_by_lang(
map(json.loads,
test_code.joinpath('prompt_info.jsonl').open()))
question_info = group_by_lang(
map(json.loads,
test_code.joinpath('testing_code.jsonl').open()))
assert set(prompt_info.keys()) == set(question_info.keys())
print(f'Languages found: {list(prompt_info)}')
validation_preds = []
print('Checking for golden predictions')
golden_path = PROJECT_ROOT.joinpath('data', 'golden_predictions',
f'{ds_name}.jsonl')
if golden_path.exists():
print('Golden Predictions found')
golden_predictions = group_by_lang(map(json.loads, golden_path.open()))
for lang, preds in golden_predictions.items():
print(f'Replacing validation preds for {lang} with golden...')
for qid, pred in preds.items():
validation_preds.append(pred)
if qid in prompt_info[lang]:
prompt_info[lang].pop(qid)
for lang, prompt_map in prompt_info.items():
if _ONLY_LANG.value and lang != _ONLY_LANG.value:
continue
if not prompt_map:
continue
print(f'Generating {len(prompt_map)} validation predictions for {lang}...')
language = languages.LanguageRegistry.get_language(lang)
translator = language.make_literal_translator()
func_template = fixtures_path.joinpath(lang,
'func_template.txt').read_text()
question_map = question_info[lang]
language_spec = schema_parsing.LanguageSchemaSpecRegistry.get_lang_spec(
lang)
for qid, prompt in prompt_map.items():
question_data = question_map[qid]
schema, _ = schema_parsing.parse_schema_and_input_order(
language_spec, question_data['schema'])
return_type = schema[data_types.EXPECTED_KEY_NAME]
return_value = question_data['test_list'][0]['outputs']
return_code = translator.convert_var_to_literal(return_type, return_value)
signature = prompt['signature_with_docstring'] or prompt['signature']
input_code = func_template.replace('FN_SIGNATURE', signature)
input_code = input_code.replace('RETURN_VALUE', return_code)
p_dict = make_pred_dict(qid, input_code, lang)
if _ADD_TEST_DATA.value:
for k in QUESTION_DATA_KEYS:
p_dict[k] = question_data[k]
validation_preds.append(p_dict)
out_path = pathlib.Path(_OUT_PATH.value)
if not out_path.exists():
out_path.mkdir(parents=True)
print(f'Saving {len(validation_preds)} to {out_path}')
with out_path.joinpath(f'{ds_name}.jsonl').open('w') as f:
f.write('\n'.join(map(json.dumps, validation_preds)))
if __name__ == "__main__":
app.run(main)
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Makes error code from a evaluation result for debugging."""
import argparse
import collections
import json
import random
import shutil
import sys
from pathlib import Path
PROJECT_ROOT = Path().parent
if str(Path(__file__).parents[1]) not in sys.path:
sys.path.insert(0, str(Path(__file__).parents[1]))
from babelcode.languages import LanguageRegistry
defaultdict = collections.defaultdict
parser = argparse.ArgumentParser(
description='Makes error code from a evaluation result for debugging.')
parser.add_argument('dataset', help='Name of the dataset.')
parser.add_argument('pred_results', type=Path, help='Prediction Results File.')
parser.add_argument('output_path', type=Path, help='Path to save results.')
parser.add_argument('--n_per_question',
'-n',
type=int,
default=1,
help='Number of predictions to generate per question.')
parser.add_argument('--num_questions',
'-q',
type=int,
default=-1,
help='Max number of questions to print.')
parser.add_argument('--seed', type=int)
parser.add_argument(
'--include_failed',
default='',
help=
'Comma Separated list of languages to include predictions who failed tests.'
)
OUTCOME_TO_WRITE = ['Had Error', 'Had Runtime Error', 'Timed Out']
def main(dataset, pred_results, output_path: Path, num_to_save_per_question,
num_questions_to_print, lang_include_failed):
dataset_path = PROJECT_ROOT.joinpath('data', 'problem_code', dataset,
'testing_code.jsonl')
testing_code = {
f'{l["language"]}/{l["qid"]}': l
for l in map(json.loads, dataset_path.open())
}
lang_include_failed = lang_include_failed.split(',')
pred_failures_by_question = defaultdict(lambda: defaultdict(dict))
for l in map(json.loads, pred_results.open()):
outcome = l['outcome']
if outcome not in OUTCOME_TO_WRITE:
if outcome != 'Failed Tests':
continue
else:
if l['language'] not in lang_include_failed:
continue
lang, qid = l['language'], l['qid']
if outcome not in pred_failures_by_question[lang][qid]:
pred_failures_by_question[lang][qid][outcome] = []
pred_failures_by_question[lang][qid][outcome].append(l)
print((
f'{sum(map(len,pred_failures_by_question.values()))} questions found with'
+ f' an error across {len(pred_failures_by_question)} languages'))
output_path = output_path.joinpath(dataset)
if output_path.exists():
shutil.rmtree(output_path)
output_path.mkdir(parents=True)
for lang_name, question_errors in pred_failures_by_question.items():
print('\n' + '=' * 80)
print(lang_name)
lang_path = output_path.joinpath(lang_name)
lang_path.mkdir()
language = LanguageRegistry.get_language(lang_name)
qids_to_print = list(question_errors)
if num_questions_to_print != -1:
qids_to_print = random.sample(qids_to_print,
k=min(len(qids_to_print),
num_questions_to_print))
for qid in qids_to_print:
outcomes = question_errors[qid]
print(f'{qid=}')
print('# Outcomes by type:')
for outcome in OUTCOME_TO_WRITE:
print(f'\t{outcome} = {len(outcomes.get(outcome,[]))}')
test_code_data = testing_code[f'{lang_name}/{qid}']
test_code = test_code_data['test_code']
test_code = test_code.replace('PLACEHOLDER_FN_NAME',
test_code_data['entry_fn_name'])
test_code = test_code.replace('PLACEHOLDER_CLS_NAME',
test_code_data['entry_cls_name'])
for outcome, q_list in outcomes.items():
to_save = random.sample(q_list,
k=min(num_to_save_per_question, len(q_list)))
for p in to_save:
filename = f'{p["qid"]}_{p["id"]}_{p["outcome"].replace(" ","_")}'
print(f'Saving {filename}')
with lang_path.joinpath(f'stderr.{filename}').open('w') as f:
f.write(p['stderr'])
with lang_path.joinpath(f'{filename}.{language.file_ext}').open(
'w') as f:
code = test_code.replace('PLACEHOLDER_CODE_BODY', p['code'])
f.write(code)
if __name__ == '__main__':
args = parser.parse_args()
random.seed(args.seed)
main(args.dataset, args.pred_results, args.output_path, args.n_per_question,
args.num_questions, args.include_failed)
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Makes the data files for use in HF datasets. """
import argparse
import collections
import json
import shutil
from pathlib import Path
import sys
try:
from babelcode import languages
except ImportError:
sys.path.append(Path(__name__).parent)
from babelcode import languages
parser = argparse.ArgumentParser("Make HF Datasets")
parser.add_argument("data_path", type=str, help="Path to the parsed datasets.")
parser.add_argument("output_path", type=str, help="Path to save to.")
SUPPORTED_DS = {"human_eval", "mbpp", "tp3", "transcoder"}
PROMPT_KEYS_TO_KEEP = {
"signature", "signature_with_docstring", "text", "entry_fn_name",
"entry_cls_name", "arguments"
}
CODE_KEYS_TO_KEEP = {"title", "test_code", "test_list", "test_case_ids"}
RAW_QUESTION_DIR = Path("data/raw_datasets")
def main(args):
data_path = Path(args.data_path)
out_path = Path(args.output_path)
shutil.rmtree(out_path, ignore_errors=True)
out_path.mkdir()
print(f"Creating HF Datasets from parsed datasets located at '{data_path}'")
for dir_found in data_path.glob("*"):
code_map = collections.defaultdict(dict)
prompt_map = collections.defaultdict(dict)
print(f"Parsing {dir_found}")
ds_name = dir_found.stem
if ds_name not in SUPPORTED_DS:
print(f"{ds_name} is not supported...")
continue
raw_question_dir = RAW_QUESTION_DIR.joinpath(f'{ds_name}_questions.jsonl')
question_solutions = {}
for l in map(json.loads, raw_question_dir.open()):
question_solutions[l['id']] = {'solution_python': l['solution']}
if 'other_lang_solutions' in l:
for lang, s in l['other_lang_solutions'].items():
if lang == "C++":
lang = 'cpp'
question_solutions[l['id']][f'solution_{lang}'] = s
for line in map(json.loads,
dir_found.joinpath("testing_code.jsonl").open()):
code_map[line['language']][line['qid']] = line
for line in map(json.loads, dir_found.joinpath("prompt_info.jsonl").open()):
prompt_map[line['language']][line['qid']] = line
assert set(prompt_map.keys()) == set(code_map.keys())
out = []
for language in prompt_map.keys():
prompts = prompt_map[language]
codes = code_map[language]
assert set(codes.keys()) == set(prompts.keys())
for q in codes.keys():
lang = languages.LanguageRegistry.get_language(language)
command = lang.command_fn(Path("__FILENAME__"))
q_dict = {"qid": q, "language": language, "extension": lang.file_ext}
q_dict['commands'] = [c.command for c in command]
q_dict['timeouts'] = [c.timeout for c in command]
for k in PROMPT_KEYS_TO_KEEP:
q_dict[k] = prompts[q][k]
for k in CODE_KEYS_TO_KEEP:
q_dict[k] = codes[q][k]
q_dict.update(question_solutions[q])
out.append(q_dict)
print(f"Found {len(out)} questions")
with out_path.joinpath(f'{ds_name}.jsonl').open('w') as f:
for p in out:
f.write(json.dumps(p) + '\n')
if __name__ == "__main__":
main(parser.parse_args())
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metric functions."""
import collections
import copy
from typing import Any, Dict, List, Tuple
import gin
import numpy as np
from absl import logging
from babelcode import data_types
from babelcode.utils import set_seed
QuestionResult = data_types.QuestionResult
PredictionResult = data_types.PredictionResult
PredictionOutcome = data_types.PredictionOutcome
def pass_at_k_estimator(n, c, k):
"""Pass@K estimator from https://arxiv.org/abs/2107.03374.
Args:
n: total number of samples
c: number of correct sample
k: k in pass@$k$
Returns:
Estimated pass@k value.
"""
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1))
def pass_at_k_subsampling(pred_results: np.ndarray, k: int,
rng: np.random.Generator) -> bool:
"""Pass@k metric done by sampling a subset of the total results.
Args:
pred_results: An array with N elements for each prediction with a bool
value if the prediction passed or not.
k: Number of predictions to check.
rng: The numpy random generator.
Returns:
Did a program in the sample subset pass its tests.
"""
if len(pred_results) == 1:
return pred_results[0] > 0
# Sample a subset of the prediction results without replacement.
subset = rng.choice(pred_results, size=k, replace=False)
# If at least 1 prediction in the subset passes, count that as a pass.
return subset.sum() > 0
def calculate_subsampling_passes(passing_vals: np.ndarray, k_val: int,
seed: int, subsampling_rounds: int,
subsampling_iter_per_round: int,
shuffle: bool) -> np.ndarray:
"""Calculates the subsampling pass@k means and variances.
Args:
passing_vals: The array of bools for if the prediction passed.
k_val: The k value to calculate.
seed: The seed.
subsampling_rounds: Number of subsampling rounds.
subsampling_iter_per_round: Number of iterations per round.
shuffle: Shuffle the prediction results at the start of each round.
Returns:
The array of results from subsampling.
"""
logging.info(
'Calculating subsampling passes for k=%d and passing_vals.shape=%s',
k_val, passing_vals.shape)
logging.info('Doing %d rounds with %d iterations per round.',
subsampling_rounds, subsampling_iter_per_round)
# The subsampled pass@k for each round.
# (num subsampling rounds, num iterations per round)
subsampling_pass_at_k_vals = []
set_seed(seed)
rng = np.random.default_rng(seed)
for round_num in range(subsampling_rounds):
if shuffle:
rng.shuffle(passing_vals)
round_subset_results = []
for iteration_num in range(subsampling_iter_per_round):
iteration_results = []
# Go over each question and determine if at least 1 prediction in the
# sampled subset passed.
for i in range(passing_vals.shape[0]):
had_passing_subset = pass_at_k_subsampling(passing_vals[i],
k=k_val,
rng=rng)
iteration_results.append(had_passing_subset)
logging.debug('Finished iteration %d of round %d', iteration_num,
round_num)
logging.debug('Got pass@k results of: %s', iteration_results)
# Calculate the subsampled pass@k for the questions.
pass_at_k = float(np.mean(iteration_results)) * 100
round_subset_results.append(pass_at_k)
subsampling_pass_at_k_vals.append(round_subset_results)
return np.array(subsampling_pass_at_k_vals)
def calculate_question_aggregate_metrics(
question_results: Dict[str, QuestionResult],
tracked_attributes: List[str]) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Calculate aggregate metrics for the question results.
Args:
question_results: The dict of qids to question results.
tracked_attributes: The attributes of PredictionResult to track.
Returns:
Returns the main dict of results for the whole set of questions, and the
individual question results.
"""
def calculate_question_metric(question: QuestionResult):
out = {
'qid': question.id,
'language': question.lang,
'num_predictions': question.num_predictions
}
for outcome in PredictionOutcome:
out[str(outcome)] = question.count_result(outcome)
# Certain metrics, such as runtimes and test cases passed are only cared
# about depending on if that prediction passed
tracked_stats = {k: [] for k in tracked_attributes + ['num_tc_passed']}
num_total_tests_passed = collections.Counter()
for i in range(len(question)):
try:
p = question.get_vals_for_idx(i)
except IndexError as e:
logging.error('%d is too big for question %s with len(question)=%d', i,
question.id, len(question))
raise e
if p[PredictionOutcome.PASSED]:
for attr_name in tracked_stats.keys():
tracked_stats[attr_name].append(p[attr_name])
num_total_tests_passed[p['num_tc_passed']] += 1
for k, v in tracked_stats.items():
out[f'{k}_median'] = np.median(v) if v else None
out[f'{k}_mean'] = np.mean(v) if v else None
num_passed_n_total_tests = {}
for tc_count in range(question.num_test_cases + 1):
num_passed_n_total_tests[str(tc_count)] = num_total_tests_passed[tc_count]
out['num_passed_N_total_tests'] = num_passed_n_total_tests
out['num_results_by_test'] = question.specific_test_results
return out
logging.info('Calculating question aggregate metrics.')
outcome_counts = collections.Counter()
net_metrics = {
'num_predictions': 0,
'questions_passed': 0,
'num_questions': len(question_results)
}
individual_question_metrics = []
for question_result in question_results.values():
q_metrics = calculate_question_metric(question_result)
for outcome in PredictionOutcome:
outcome_counts[str(outcome)] += q_metrics[str(outcome)]
net_metrics['num_predictions'] += q_metrics['num_predictions']
net_metrics['questions_passed'] += question_result.has_result(
PredictionOutcome.PASSED)
individual_question_metrics.append(q_metrics)
net_metrics['questions_passed_pct'] = ( # type: ignore
net_metrics['questions_passed'] / len(question_results)) * 100
return {**net_metrics, **outcome_counts}, individual_question_metrics
def calculate_pass_metrics(
question_results: Dict[str, QuestionResult],
seed: int,
k_vals: List[int],
num_preds_per_question: int,
subsampling_rounds: int,
subsampling_iter_per_round: int,
shuffle: bool,
):
"""Calculates the pass@k metrics with the codex estimator and subsampling.
Args:
question_results: The mapping of question id to question result.
seed: The seed to use for randomization.
k_vals: The k values to report.
num_preds_per_question: The number of predictions per question.
subsampling_rounds: The number of rounds for subsampling.
subsampling_iter_per_round: The number of iterations to do for each round.
shuffle: If true, shuffle the set of results (by question) at the start of
each subsampling round.
Returns:
The pass@k metrics. Those calculated by the codex estimator are prefixed
with "estimator_" and those from subsampling are calculated are prefixed
with "subsampling_".
"""
logging.info('Calculating pass@k values for %d questions.',
len(question_results))
logging.debug('Creating the passing values nested arrays')
pass_val_arrays = []
for question in question_results.values():
if len(question) < num_preds_per_question:
logging.debug('Padding %s', question.id)
pass_arr = question.padded(PredictionOutcome.PASSED,
num_preds_per_question, False)
else:
pass_arr = question.results[PredictionOutcome.PASSED]
pass_val_arrays.append(pass_arr)
passed_counts = list(map(sum, pass_val_arrays))
out = {}
logging.debug('Setting seed=%d', seed)
for k in k_vals:
if k > num_preds_per_question:
logging.warning(
'Skipping k=%d because num_preds_per_question=%d (not enough predictions)',
k, num_preds_per_question)
out[f'estimate_pass@{k}'] = None # type: ignore
out[f'subsampling_pass@{k}'] = None # type: ignore
out[f'subsampling_pass@{k}_var'] = None # type: ignore
continue
estimator_values = []
for results_for_question in passed_counts:
estimator_values.append(
pass_at_k_estimator(n=num_preds_per_question,
c=results_for_question,
k=k))
# Cast to float for later serialization.
estimated_pass_at_k = float(np.mean(estimator_values) * 100)
out[f'estimate_pass@{k}'] = estimated_pass_at_k # type: ignore
subsampled_pass_at_k = calculate_subsampling_passes(
np.array(pass_val_arrays),
k,
subsampling_rounds=subsampling_rounds,
subsampling_iter_per_round=subsampling_iter_per_round,
seed=seed,
shuffle=shuffle)
out[f'subsampling_pass@{k}'] = np.mean(subsampled_pass_at_k)
out[f'subsampling_pass@{k}_var'] = float(subsampled_pass_at_k.var())
return out
def _group_execution_results_by_question(
raw_results: List[data_types.ExecutionResult], question_data: Dict[str, Any]
) -> Tuple[int, Dict[str, data_types.PredictionResult]]:
"""Groups the raw execution results into a set of prediction results by question id.
Args:
raw_results: The list of raw execution results.
question_data: The information for each question.
Returns:
The number of predictions ran and the grouped prediction results.
"""
preds_by_question = collections.defaultdict(list)
num_predictions_ran = len(raw_results)
# Define this variable here for latter debugging of potential parsing errors.
num_parsed = 0
for result in raw_results:
pred_result = PredictionResult.from_execution_result(
result, question_data[result.prediction.qid])
preds_by_question[pred_result.qid].append(pred_result)
num_parsed += 1
if num_parsed % 5000 == 0:
logging.info('Parsed %d results', num_parsed)
logging.info('Finished parsing %d (expected %d) for %d questions', num_parsed,
num_predictions_ran, len(preds_by_question))
return num_predictions_ran, dict(preds_by_question)
def _create_question_results(
preds_by_question: Dict[str, data_types.PredictionResult],
question_data: Dict[str, Any], tracked_attributes: List[str],
num_preds_per_question: int
) -> Tuple[int, Dict[str, data_types.QuestionResult]]:
"""Creates the question results for the predictions grouped by question.
Args:
preds_by_question: Predictions grouped by question id.
question_data: The underlying question data.
tracked_attributes: The attributes of PredictionResult to keep track of.
num_preds_per_question: The number of predictions per question.
Returns:
The maximum predictions found for a single question, used for padding and
the mapping of question id to question result.
"""
question_results = {}
max_preds = num_preds_per_question
logging.info('Creating %d question results...', len(preds_by_question))
completed = 0
for qid, preds in preds_by_question.items():
question_results[qid] = QuestionResult.from_pred_results(
qid=qid,
lang=preds[0].lang,
num_test_cases=len(question_data[qid]['test_case_ids']),
pred_result_list=preds,
tracked_attributes=tracked_attributes)
max_preds = max(max_preds, len(preds))
completed += 1
if completed % 2500 == 0:
logging.info('%d/%d finished.', completed, len(preds))
logging.debug('max_preds=%d', max_preds)
return max_preds, question_results
@gin.configurable('metrics',
denylist=['raw_results', 'question_data', 'seed', 'runtime'])
def calculate_metrics_from_raw_results(
raw_results: List[data_types.ExecutionResult],
question_data: Dict[str, Any],
runtime: str,
seed: int,
k_vals: List[int],
num_preds_per_question: int,
tracked_pred_attrs: List[str],
subsampling_rounds: int = 10,
subsampling_iter_per_round: int = 10,
shuffle: bool = True,
include_outcome_pct: bool = True):
"""Calculate metrics from the raw execution results.
Args:
raw_results (List[data_types.ExecutionResult]): The raw execution results.
question_data (Dict[str, Any]): The question specific data.
runtime (str): The net runtime of the entire execution.
seed (int): The seed to use.
k_vals (List[int]): The k values to use for pass@k
num_preds_per_question (int): The number of predictions per question. If
more or found or some questions have too few predictions, then their
passed counts will be padded with False for pass@k calculation.
tracked_pred_attrs (List[str]): The list of PredictionResult attributes to
track and report mean and median for.
subsampling_rounds (int, optional): Number of rounds for the subsampling
pass@k values.
subsampling_iter_per_round: Number of iterations per round for the
subsampling pass@k values.
shuffle (bool, optional): Shuffle before subsampling. Defaults to True.
include_outcome_pct (bool, optional): Include percentages for each outcome.
Defaults to True.
Returns:
The metrics dict for the raw results.
"""
logging.info('Calculating metrics from %d results', len(raw_results))
logging.debug('%d number of predictions per question.',
num_preds_per_question)
num_predictions_ran, preds_by_question = _group_execution_results_by_question(
raw_results=raw_results, question_data=question_data)
max_preds, question_results = _create_question_results(
preds_by_question=preds_by_question,
question_data=question_data,
tracked_attributes=tracked_pred_attrs,
num_preds_per_question=num_preds_per_question)
# Calculate overall metrics
net_metrics, question_metrics = calculate_question_aggregate_metrics(
question_results=question_results, tracked_attributes=tracked_pred_attrs)
if include_outcome_pct:
# Calculate the pct stats for all of the prediction outcomes excluding
# passed, as that is equal to the estimate pass@1 value.
logging.info('Calculating percentages for prediction outcomes...')
for outcome in PredictionOutcome:
if outcome == PredictionOutcome.PASSED:
continue
new_key = f'{str(outcome)}_pct'
net_metrics[new_key] = net_metrics[str( # type:ignore
outcome)] / num_predictions_ran * 100
pass_metrics = calculate_pass_metrics(
question_results=question_results,
seed=seed,
num_preds_per_question=max_preds,
k_vals=k_vals,
subsampling_rounds=subsampling_rounds,
subsampling_iter_per_round=subsampling_iter_per_round,
shuffle=shuffle)
net_metrics = {**net_metrics, **pass_metrics}
net_metrics['total_runtime'] = runtime
prediction_results = []
for p in preds_by_question.values():
prediction_results.extend(p)
return net_metrics, question_metrics, prediction_results
@gin.configurable(denylist=['lang_metrics', 'question_metrics', 'language'])
def format_output_metrics(
lang_metrics: Dict[str, Dict[str, Any]],
question_metrics: List[Dict[str, Any]],
language: str,
question_key_format: str = '{language}/question/{question_id}',
language_key_format: str = '{language}/{field}',
):
"""Format the language metrics and questions metrics for saving.
Args:
lang_metrics (Dict[str, Dict[str, Any]]): The language overall metrics.
question_metrics (List[Dict[str, Any]]): The question specific metrics.
language (str): The name of the language.
question_key_format (str, optional): The formatting template to use for
the question keys. Must have language and question_id in it. Defaults to
'{language}/question/{question_id}'.
language_key_format (str, optional): The formatting template to use for
the overall key. Must have language and field in it. Defaults to
'{language}/{field}'.
Returns:
The dict with the language metrics and the question metrics using their
respective key format.
"""
# Create the initial output dict from the language metrics dict.
formatted_metrics = {
language_key_format.format(language=language, field='metrics'):
lang_metrics
}
for question_result in question_metrics:
key = question_key_format.format(language=language,
question_id=question_result['qid'])
formatted_metrics[key] = question_result
return formatted_metrics
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for executing a set of commands to evaluate a code prediction."""
import collections
import contextlib
import copy
import datetime
import gc
import json
import multiprocessing as mp
import os
import pathlib
import signal
import subprocess
from typing import List
import gin
import psutil
from absl import logging
from babelcode.data_types.command import Command
from babelcode.data_types.prediction import Prediction
from babelcode.data_types.result_types import ExecutionResult
from babelcode.languages import Language
from babelcode.utils import convert_timedelta_to_milliseconds
from babelcode.utils import format_timedelta_str
class UnknownLangError(Exception):
"""Exception for when an unknown language is found."""
class PredTimeoutError(Exception):
"""Timeout error for running commands."""
@contextlib.contextmanager
def time_limit(seconds: float):
"""Sets a time limit."""
def signal_handler(signum, frame):
raise PredTimeoutError('Timed out!')
signal.setitimer(signal.ITIMER_REAL, seconds)
signal.signal(signal.SIGALRM, signal_handler)
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL, 0)
CommandResult = collections.namedtuple(
'CommandResult',
['return_code', 'runtime', 'max_memory_used', 'outputs', 'timed_out'],
)
def set_limits():
"""Sets limits and other info before execution."""
p = psutil.Process(os.getpid())
p.nice(19)
def safe_execute(command: List[Command],
cwd: pathlib.Path,
timeout_buffer: float = 0.005) -> CommandResult:
"""Executes a list of commands safely.
Args:
command: The list of commands to run.
cwd: The working directory to run them in.
timeout_buffer: A buffer to use for timeout.
Returns:
The result of executing the command.
"""
timed_out = False
return_code = -1
runtime = command.timeout
outputs = (None, None)
start_time = datetime.datetime.now()
execution_process = subprocess.Popen( # pylint: disable=subprocess-popen-preexec-fn
command.command,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
# preexec_fn=set_limits,
)
pid = execution_process.pid
process = psutil.Process(pid)
memory_used = process.memory_info().rss
try:
with time_limit(command.timeout + timeout_buffer):
while execution_process.poll() is None:
memory_used = max(memory_used, process.memory_info().rss)
outputs = execution_process.communicate()
runtime = datetime.datetime.now() - start_time
return_code = execution_process.returncode
except PredTimeoutError:
timed_out = True
runtime = datetime.timedelta(seconds=command.timeout)
finally:
execution_process.kill()
return CommandResult(
return_code=return_code,
runtime=runtime,
max_memory_used=memory_used,
outputs=outputs,
timed_out=timed_out,
)
def execute_code(prediction: Prediction,
commands: List[Command]) -> ExecutionResult:
"""Execute a file of code.
Args:
prediction: The Prediction to execute.
commands: The commands to run.
Returns:
The execution result.
"""
if os.getenv('ALLOW_EXECUTION', 'false') != 'true':
raise ValueError('EXECUTION IS NOT ALLOWED IN THIS ENVIRONMENT')
failed = False
file_path = prediction.file_path
cwd = file_path.parent if file_path.is_file() else file_path
cwd = str(cwd.resolve().absolute())
last_ran_command = -1
start_time = datetime.datetime.utcnow()
finished_time = None
command_runtimes = [None] * len(commands)
command_memory_used = [None] * len(commands)
for i, command in enumerate(commands):
last_ran_command = i
command_result = safe_execute(command, cwd)
command_memory_used[i] = command_result.max_memory_used
command_runtimes[i] = convert_timedelta_to_milliseconds(
command_result.runtime)
if command_result.timed_out:
net_runtime = sum(filter(lambda t: t, command_runtimes))
return ExecutionResult(
prediction=prediction,
commands=commands,
stdout='',
stderr='',
return_code=0,
net_runtime=net_runtime,
last_ran_command_idx=i,
timed_out=True,
command_runtimes=command_runtimes,
command_memory=command_memory_used,
)
finished_time = datetime.datetime.utcnow()
stdout, stderr = command_result.outputs
# If there was an error running the commands, stop the iteration.
if command_result.return_code != 0:
failed = True
break
# Elapsed time
if finished_time is None:
elapsed_time = None
else:
# Convert the timedelta to milliseconds for ease of use.
total_time = finished_time - start_time
elapsed_time = convert_timedelta_to_milliseconds(total_time)
# We assume that the last process result is the one we care about. So only
# take the stdout and stderr from those.
return ExecutionResult(
prediction=prediction,
commands=commands,
stdout=stdout.decode('utf-8', errors='ignore'),
stderr=stderr.decode('utf-8', errors='ignore'),
return_code=command_result.return_code,
net_runtime=elapsed_time,
last_ran_command_idx=last_ran_command,
had_error=failed,
command_runtimes=command_runtimes,
timed_out=False,
command_memory=command_memory_used,
)
# Wrapper to allow serialized multiprocessing with mp.pool.
def exec_wrapper(arg_list):
"""Execution wrapper to make execution work with multiprocessing.
Args:
arg_list: The list of args.
Returns:
The execution result.
"""
prediction, commands = arg_list
return execute_code(prediction=prediction, commands=commands)
def execution_results_writer(
result_queue: mp.JoinableQueue,
execution_results_file: pathlib.Path,
runtime_results_file: pathlib.Path,
):
"""Listens to a result queue and writes the runtimes and results to disk."""
execution_fd = execution_results_file.open('a')
runtime_fd = runtime_results_file.open('a')
written_records = 0
while True:
if result_queue.empty():
continue
execution_results = result_queue.get()
if execution_results is None:
logging.debug('Execution Saver Got Poison Pill, exiting...')
runtime_fd.close()
execution_fd.close()
result_queue.task_done()
return
is_execution_result, result = execution_results
if is_execution_result:
result: ExecutionResult
execution_fd.write(json.dumps(result.to_dict()) + '\n')
else:
runtime_fd.write(json.dumps(result) + '\n')
result_queue.task_done()
written_records += 1
if written_records % 1000 == 0:
logging.info('Wrote %d records', written_records)
@gin.configurable('execution', denylist=['predictions', 'lang', 'output_dir'])
def execute_predictions(
predictions: List[Prediction],
lang: Language,
output_dir: pathlib.Path,
num_workers: int = 1,
update_freq: int = 250,
max_task_per_child: int = 1,
garbage_collection_freq: int = 500,
):
"""Execute a list of predictions in a specific language.
Args:
predictions: List of predictions.
lang: The language the code is written in.
output_dir: The output directory.
num_workers: The number of workers to use.
update_freq: Frequency of updates.
max_task_per_child: The maximum tasks ran per child before it is killed.
garbage_collection_freq: How often to run garbage collection.
Returns:
The the array of raw execution results and the total runtime.
"""
if os.getenv('ALLOW_EXECUTION', 'false') != 'true':
raise ValueError('EXECUTION IS NOT ALLOWED IN THIS ENVIRONMENT')
logging.info('Evaluating %d predictions in %s', len(predictions), lang.name)
# Make the arguments to submit to the ThreadPoolExecutor. Do it here so we
# can have a progress bar as well.
executor_args = []
logging.debug('Creating args for executor with %d predictions',
len(predictions))
failed = 0
for prediction in predictions:
if not prediction.file_path.exists():
logging.error('Got prediction %s that does not exist', prediction)
failed += 1
continue
executor_args.append((prediction, lang.command_fn(prediction.file_path)))
logging.info('%d/%d are not be able to be executed', failed, len(predictions))
# # Create a temporary directory in the project.
# temp_dir = TEMP_EXECUTION_PATH.joinpath(output_dir.stem)
# temp_dir.mkdir(parents=True, exist_ok=True)
execution_results_file = output_dir.joinpath(
f'{lang.name}_execution_results.jsonl')
runtime_results_file = output_dir.joinpath(
f'{lang.name}_runtime_tracking.jsonl')
execution_results_fd = execution_results_file.open('a')
runtime_results_fd = runtime_results_file.open('a')
logging.info('Starting %d workers...', num_workers)
num_to_complete = len(executor_args)
num_completed = 0
start_time = batch_time = datetime.datetime.utcnow()
results = []
time_writing = 0
batch_writing = 0
last_written_idx = 0
summary_result_tracking = collections.Counter()
batch_cpu_used = []
batch_mem_used = []
with mp.Pool(num_workers, maxtasksperchild=max_task_per_child) as pool:
for result in pool.imap_unordered(exec_wrapper, executor_args):
num_completed += 1
# Simple tracking of metrics for as it progress
had_error = result.had_error or result.return_code != 0
summary_result_tracking['Had Error'] += had_error
summary_result_tracking['Timed Out'] += result.timed_out
summary_result_tracking['Executed'] += not (result.timed_out or had_error)
results.append(result)
batch_cpu_used.append(psutil.cpu_percent())
batch_mem_used.append(psutil.virtual_memory().percent)
# Update stats
if num_completed % update_freq == 0:
# Calculate The Overall rate
pct_done = num_completed / num_to_complete * 100
current_time = datetime.datetime.utcnow()
elapsed = current_time - start_time
if elapsed.total_seconds() == 0:
rate = num_completed
else:
rate = num_completed / elapsed.total_seconds()
# Calculate The Batch rate
batch_elapsed = current_time - batch_time
if batch_elapsed.total_seconds() == 0:
batch_rate = update_freq
else:
batch_rate = update_freq / batch_elapsed.total_seconds()
rate_msg = (f'{num_completed:,} ({pct_done:0.2f}%) done in'
f' {format_timedelta_str(elapsed)}')
logging.info(rate_msg)
cpu_usage = max(batch_cpu_used)
memory_usage = max(batch_mem_used)
summary_str = [
'CPU Used = %-6s' % f'{cpu_usage:0.2f}',
'RAM Used = %-6s' % f'{memory_usage:0.2f}',
]
batch_cpu_used = []
batch_mem_used = []
for k, v in summary_result_tracking.items():
value_str = f'{v:,}'
summary_str.append(f'{k:>10}={value_str:<8}')
logging.info(' | '.join(summary_str))
logging.info(
'Programs/Second: Batch %-7s | Overall %-7s',
f'{batch_rate:.2f}',
f'{rate:.2f}',
)
# Write the results to the results file.
start_write = datetime.datetime.utcnow()
for i in range(last_written_idx, len(results)):
execution_results_fd.write(json.dumps(results[i].to_dict()) + '\n')
batch_writing = (datetime.datetime.utcnow() -
start_write).total_seconds()
last_written_idx = len(results)
# Log the time spent writing.
time_writing += batch_writing
logging.info(
'%s time spent writing.',
f'{time_writing / elapsed.total_seconds():.2%}',
)
logging.debug(
'Size of Execution Results: %s KB',
f'{round(execution_results_file.stat().st_size/1024,2):.2f}',
)
batch_metrics = {
'now': datetime.datetime.utcnow().isoformat(),
'completed': num_completed,
'pct_done': pct_done,
'net_elapsed': elapsed.total_seconds(),
'net_rate': rate,
'batch_elapsed': batch_elapsed.total_seconds(),
'batch_rate': batch_rate,
'cpu_used': cpu_usage,
'memory_used': memory_usage,
'time_writing': time_writing,
'batch_writing': batch_writing,
}
# Save the batch runtime metrics
write_runtime = datetime.datetime.utcnow()
runtime_results_fd.write(json.dumps(batch_metrics) + '\n')
elapsed_write_runtime = (datetime.datetime.utcnow() -
write_runtime).total_seconds()
logging.debug('Spent %-6f writing runtime batch', elapsed_write_runtime)
batch_time = datetime.datetime.utcnow()
if num_completed % garbage_collection_freq == 0:
logging.debug('Running garbage collection as num_completed=%d',
num_completed)
gc.collect()
# Cleanup pool
pool.close()
pool.terminate()
logging.debug('Saving Remaining')
for i in range(last_written_idx, len(results)):
execution_results_fd.write(json.dumps(results[i].to_dict()) + '\n')
total_time = datetime.datetime.utcnow() - start_time
logging.info('Finished executing %d in %s', num_to_complete, total_time)
logging.debug('Got %d results back, expected %d', num_completed,
len(executor_args))
execution_results_fd.close()
runtime_results_fd.close()
return results, format_timedelta_str(total_time)
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Driver functions for different modules.
These functions are primarily here to avoid circular imports while also allowing
them to be testable.
"""
import collections
import json
import logging
import pathlib
import shutil
import tempfile
from typing import Any, Callable, Dict, List, Optional, Tuple
import gin
from absl import logging
from jinja2 import Template
from tqdm import tqdm
from babelcode import code_generator
from babelcode import data_types
from babelcode import execution
from babelcode import languages
from babelcode import metrics as metrics_module
from babelcode import schema_parsing
from babelcode import translation
from babelcode import utils
ObfuscationFnType = Callable[[data_types.Question], data_types.Question]
Prediction = data_types.Prediction
@gin.configurable('prompt_generation',
allowlist=['force_type_annotations', 'obfuscation_fn'])
def generate_prompt_info(
original_question: data_types.Question,
language: languages.Language,
prompt_translator: translation.PromptTranslator = None,
template_map: Dict[str, Template] = None,
force_type_annotations: bool = False,
obfuscation_fn: ObfuscationFnType = code_generator.pass_through_obfuscate,
) -> Dict[str, Any]:
"""Generates the prompt information for a question.
Args:
original_question: The question to generate the testing code for.
language: The langauge to use.
prompt_translator: The prompt translator to use.
template_map: The mapping of templates to use.
force_type_annotations: Force type annotations. obfuscation_function
obfuscation_fn: Callable that takes in a question and returns a question.
Can use this to obfuscate variables.
Returns:
The dict of prompting information.
"""
question = obfuscation_fn(original_question)
language_schema_spec = (
schema_parsing.LanguageSchemaSpecRegistry.get_lang_spec(language.name))
schema, input_order = schema_parsing.parse_schema_and_input_order(
language_spec=language_schema_spec, raw_schema=question.schema)
signature = prompt_translator.translate_signature(
question.entry_fn_name,
entry_cls_name=question.entry_cls_name,
schema=schema,
input_order=input_order,
use_type_annotation=question.use_type_annotation
or force_type_annotations,
)
signature_with_docstring = description = None
if original_question.text:
# Replace the original fn name with the obfuscated one
text = original_question.text.replace(original_question.entry_fn_name,
question.entry_fn_name)
text = text.replace(original_question.entry_cls_name,
question.entry_cls_name)
signature_with_docstring = (
prompt_translator.translate_signature_with_docstring(
'Python',
text,
question.entry_fn_name,
entry_cls_name=question.entry_cls_name,
schema=schema,
input_order=input_order,
use_type_annotation=question.use_type_annotation
or force_type_annotations,
))
description = prompt_translator.translate_prompt('Python', text,
question.entry_fn_name)
entry_fn_name = prompt_translator.translate_entry_function_name(
question.entry_fn_name)
entry_cls_name = prompt_translator.translate_entry_cls_name(
question.entry_cls_name)
return {
'qid': question.qid,
'signature': signature,
'signature_with_docstring': signature_with_docstring,
'text': description,
'header': template_map['HEADER'].render(),
'entry_fn_name': entry_fn_name,
'entry_cls_name': entry_cls_name,
'arguments': input_order,
}
def _generate_question_code(
question: data_types.Question,
schema: Dict[str, schema_parsing.SchemaType],
input_order: List[str],
literal_translator: translation.LiteralTranslator,
prompt_translator: translation.PromptTranslator,
template_map: Dict[str, Template],
) -> Dict[str, Any]:
"""Generates the code for a specific question.
Args:
question: The question to generate the testing code for.
schema: Te parsed schema of the question.
input_order: The order of inputs.
literal_translator: The literal translator to use.
prompt_translator: The prompt translator to use.
template_map: The mapping of templates to use.
Returns:
The dictionary with testing code.
"""
out_dict = question.to_dict()
out_dict['test_code'] = code_generator.generate_code_for_question(
question=question,
parsed_schema=schema,
input_order=input_order,
literal_translator=literal_translator,
template_map=template_map,
prompt_translator=prompt_translator,
)
out_dict['entry_fn_name'] = prompt_translator.translate_entry_function_name(
question.entry_fn_name)
out_dict['entry_cls_name'] = prompt_translator.translate_entry_cls_name(
question.entry_cls_name)
return out_dict
ALLOWED_ERRORS = (
data_types.QuestionParsingError,
data_types.QuestionValidationError,
data_types.IOPairError,
schema_parsing.SchemaTypeError,
)
# Helper function to generate the code in a given language
def generate_code_for_questions(questions: List[data_types.Question],
lang: languages.Language):
"""Generate Code in a specified language.
Args:
questions: The list of questions.
lang: The language to use.
Returns:
The problem code and the failures.
"""
logging.info('Generating %s code', lang.name)
failures = []
out = []
logging.debug('Initializing literal translator.')
literal_translator = lang.make_literal_translator()
prompt_translator = lang.make_prompt_translator()
template_map = code_generator.load_template_map(lang.make_template_map())
logging.debug('Getting language schema specification.')
language_schema_spec = (
schema_parsing.LanguageSchemaSpecRegistry.get_lang_spec(lang.name))
for question in tqdm(questions, desc='Generating Code'):
logging.debug('Generating code for %s', question.qid)
try:
schema, input_order = schema_parsing.parse_schema_and_input_order(
language_spec=language_schema_spec, raw_schema=question.schema)
except ALLOWED_ERRORS as e:
logging.debug('%s failed to parse schema because of %s', question.qid,
str(e))
failures.append((question, e))
continue
try:
question_dict = _generate_question_code(
question=question,
schema=schema,
input_order=input_order,
literal_translator=literal_translator,
template_map=template_map,
prompt_translator=prompt_translator,
)
prompt_dict = generate_prompt_info(
original_question=question,
language=lang,
template_map=template_map,
prompt_translator=prompt_translator,
)
except ALLOWED_ERRORS as e:
logging.debug('%s failed to parse schema because of %s', question.qid,
str(e))
failures.append((question, e))
continue
out.append((question_dict, prompt_dict))
fail_msg = f'{len(failures)}/{len(questions)} failed for {lang.name}'
print(fail_msg)
logging.info(fail_msg)
return out, failures
def setup_language_code_dirs(
out_dir: pathlib.Path,
lang: languages.Language,
predictions: Dict[str, Any],
question_mapping: Dict[str, Dict[str, str]],
force_question_entry: bool,
):
"""Setup the directories for each of the questions.
Args:
out_dir: Path to write dirs to.
lang: The language to write each question in.
predictions: The predictions for this language.
question_mapping: The mapping of questions to their information.
force_question_entry: Force using the default question entry points
instead of those specified by the predictions.
Raises:
KeyError: Duplicated prediction ids.
Returns:
The dict of `Prediction` objects with their full testing code created
and the read question info.
"""
# Do an ID->Question data mapping so we can align with the predictions.
out = {}
for key, pred_dict in tqdm(predictions.items(), desc='Creating Dirs'):
# Get the prediction and question data.
qid = key.split('/')[0]
try:
question = question_mapping[qid]
except KeyError:
logging.warning('Could not find %s', qid)
continue
file_name = key.replace('/', '_')
q_path = out_dir.joinpath(file_name)
q_path.mkdir()
code_path = q_path.joinpath(f'{file_name}.{lang.file_ext}')
if force_question_entry:
pred_dict['entry_fn_name'] = question['entry_fn_name']
if pred_dict.get('entry_cls_name', None) is not None:
pred_dict['entry_cls_name'] = question['entry_cls_name']
prediction = Prediction.from_dict(pred_dict,
file_path=code_path,
default_language=lang.name)
with code_path.open('w', encoding='utf-8') as f:
code = question['test_code'].replace('PLACEHOLDER_CODE_BODY',
prediction.code)
entry_fn_name = prediction.entry_fn_name or question['entry_fn_name']
entry_cls_name = prediction.entry_cls_name or question['entry_cls_name']
code = code.replace('PLACEHOLDER_FN_NAME', entry_fn_name)
code = code.replace('PLACEHOLDER_CLS_NAME', entry_cls_name)
f.write(code)
pred_key = f'{prediction.qid}/{prediction.id}'
if pred_key in out:
logging.error('Prediction %s already exists', pred_key)
raise KeyError('Duplicate predictions')
out[pred_key] = prediction
return out
@gin.configurable(
'tensorboard_metrics',
denylist=['results', 'question_results', 'step', 'summary_writer', 'lang'],
)
def write_metrics_to_tb(
results,
question_results,
step,
summary_writer,
lang,
overall_metrics: List[str],
question_metrics: List[str],
):
"""Writes the metrics to a tensorboard."""
utils.write_to_tb_writer(
{
k: v for k, v in results.items() if k in overall_metrics
},
summary_writer,
step,
lang,
)
utils.write_to_tb_writer(
{
q['qid']: {
k: v for k, v in q.items() if k in question_metrics
} for q in question_results
},
summary_writer,
step,
f'{lang}_questions',
)
def load_progress_from_dir(dir_path: pathlib.Path) -> Dict[str, Any]:
"""Loads progress of an evaluation run from a directory.
Args:
dir_path: The directory to load progress from.
Returns:
A dict with the keys 'execution_results' that has the prior ran execution
results.
"""
logging.info('Loading progress from %s', dir_path)
out = collections.defaultdict(dict)
found_results = 0
for exec_file in dir_path.glob('*_execution_results.jsonl'):
logging.info('Loading execution results from %s', exec_file)
for lang, results in data_types.read_execution_results_from_file(
exec_file).items():
out[lang].update(results)
found_results += len(results)
logging.info('Found %d execution results across %d languages:', found_results,
len(out))
for k, v in out.items():
logging.info('\t%-10s: %s', k, len(v))
return out
def _process_results(
lang: languages.Language,
raw_results: List[data_types.ExecutionResult],
total_runtime: str,
question_mapping: Dict[str, Dict[str, str]],
step: int,
seed: int,
summary_writer=None,
) -> Tuple[Dict[str, Any], List[Dict[str, Any]]]:
"""Processes the results from executing code."""
metrics_tuple = metrics_module.calculate_metrics_from_raw_results(
raw_results=raw_results, # type: ignore
question_data=question_mapping,
runtime=total_runtime,
seed=seed,
k_vals=gin.REQUIRED, # type: ignore
num_preds_per_question=gin.REQUIRED, # type: ignore
subsampling_rounds=gin.REQUIRED, # type: ignore
subsampling_iter_per_round=gin.REQUIRED, # type: ignore
shuffle=gin.REQUIRED, # type: ignore
include_outcome_pct=gin.REQUIRED,
) # type: ignore
metrics, question_results, pred_results = metrics_tuple
if summary_writer is not None:
write_metrics_to_tb(
metrics,
question_results,
step,
summary_writer,
lang.name,
overall_metrics=gin.REQUIRED,
question_metrics=gin.REQUIRED,
)
logging.info('Adding metadata to %d questions', len(question_results))
for i in range(len(question_results)):
qid = question_results[i]['qid']
meta = question_mapping[qid].get('metadata', {})
for k in list(meta.keys()):
if k in question_results[i]:
logging.warning(
'Question %s has metadata key %s that is used for a metric.',
qid,
k,
)
meta.pop(k)
question_results[i].update({
'title': question_mapping[qid]['title'],
**meta
})
metrics = metrics_module.format_output_metrics(
lang_metrics=metrics,
question_metrics=question_results,
language=lang.name,
)
return metrics, pred_results
def execute_bc_predictions(
lang: languages.Language,
question_mapping: Dict[str, Dict[str, str]],
raw_predictions: Dict[str, Any],
output_path: pathlib.Path,
debug_dir_path: pathlib.Path,
seed: int,
step: int,
force_question_entry: bool,
executed_predictions: Dict[str, Any],
summary_writer=None,
):
"""Evaluates the predictions from a single language."""
def run_executions_in_dir(used_dir_path):
logging.debug('Writing %s code to %s', lang.name, used_dir_path)
if force_question_entry:
logging.info('Force Use Question Entry is Enabled.')
else:
logging.info('Force Use Question Entry is disabled.')
removed = 0
to_remove = []
for k in executed_predictions:
if k in raw_predictions:
logging.debug('%s prediction is already completed, removing', k)
raw_predictions.pop(k)
removed += 1
else:
to_remove.append(k)
logging.debug('Removing %d keys that were not found...', len(to_remove))
for k in to_remove:
executed_predictions.pop(k)
logging.info(
'Skipping %d already executed predictions out of %d',
removed,
len(raw_predictions) + removed,
)
if raw_predictions:
predictions = setup_language_code_dirs(
used_dir_path,
lang=lang,
predictions=raw_predictions,
question_mapping=question_mapping,
force_question_entry=force_question_entry,
)
# Execute the predictions for the specific language.
raw_results, total_runtime = execution.execute_predictions(
list(predictions.values()), lang, output_path)
raw_results += list(executed_predictions.values())
else:
logging.info('All predictions have been executed, evaluating...')
raw_results = list(executed_predictions.values())
total_runtime = '00:00:00'
return _process_results(
lang=lang,
raw_results=raw_results,
total_runtime=total_runtime,
question_mapping=question_mapping,
step=step,
seed=seed,
summary_writer=summary_writer,
)
if debug_dir_path is not None:
debug_dir_path = pathlib.Path(debug_dir_path, lang.name)
if debug_dir_path.exists():
shutil.rmtree(debug_dir_path)
debug_dir_path.mkdir(parents=True)
return run_executions_in_dir(debug_dir_path)
else:
# Use a temporary directory so that we can write without worry.
with tempfile.TemporaryDirectory() as temp_dir:
return run_executions_in_dir(pathlib.Path(temp_dir)) |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init for main package."""
from babelcode import code_generator
from babelcode import data_types
from babelcode import languages
from babelcode import schema_parsing
from babelcode import translation
from babelcode import utils
from babelcode.drivers import execute_bc_predictions
from babelcode.drivers import generate_code_for_questions
from babelcode.drivers import generate_prompt_info
from babelcode.drivers import load_progress_from_dir
QUESTION_DATA_KEYS = {
"test_code", "entry_fn_name", "entry_cls_name", "qid", "language",
"test_case_ids"
}
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for handling the generation of code."""
import pathlib
from typing import Dict, List, Union
import gin
import jinja2
from absl import logging
from babelcode import data_types
from babelcode import schema_parsing
from babelcode import translation
SchemaType = schema_parsing.SchemaType
SchemaMapType = schema_parsing.SchemaMapType
Question = data_types.Question
# Set a list of required templates that must be present.
REQUIRED_TEMPLATES = ['MAIN', 'HEADER', 'EVALUATION']
@gin.configurable(denylist=['question'])
def pass_through_obfuscate(question: Question) -> Question:
"""Returns the question passed."""
return question
@gin.configurable(denylist=['question'])
def naive_obfuscation(
question: Question,
function_name: str = 'model_prediction',
cls_name: str = 'Prediction',
arg_template: str = 'arg{idx}',
force_type_annotation: bool = False,
) -> Question:
"""Obfuscates a question by naively replacing variables and function name.
Args:
question: The question to obfuscate.
function_name: The name of the function to use.
cls_name: The name of the class to use.
arg_template: The template to use for the argument names. Must have `{idx}`.
force_type_annotation: Force type annotation for the question.
Returns:
The obfuscated question.
"""
logging.info('Naively obfuscating %s', question.qid)
new_question = question.copy()
new_arg_names = {
v['name']: arg_template.format(idx=i)
for i, v in enumerate(question.schema['params'])
}
new_question.change_var_names(new_arg_names)
new_question.entry_fn_name = function_name
new_question.entry_cls_name = cls_name
if force_type_annotation:
new_question.use_type_annotation = True
return new_question
def _determine_question_requirements(
question: Question,
schema: SchemaMapType,
double_precision: float,
float_precision: float,
) -> Dict[str, Union[str, float, bool]]:
"""Determines the question specifc requirements needed for generating the code.
Args:
question: The question the requirements are for.
schema: The schema of the question.
double_precision: The precision to use for double evaluation.
float_precision: The precision to use for float evaluation.
Returns:
A dict of requirements for the question.
"""
_ = question
question_requirements = {
'evaluation_method': 'default',
'precision': float_precision,
'use_float': True,
}
return_type = schema[data_types.EXPECTED_KEY_NAME]
if return_type.type_str == 'double' or return_type.type_str == 'float':
question_requirements['evaluation_method'] = 'float'
if return_type.type_str == 'double':
question_requirements['precision'] = double_precision
question_requirements['use_float'] = False
return question_requirements
def load_template_map(
template_map: Dict[str, pathlib.Path]) -> Dict[str, jinja2.Template]:
"""Loads the Jinja template maps.
Args:
template_map: The mapping of string to paths for the templates.
Returns:
The map of template names to loaded templates.
Raises:
KeyError: If the required templates aer missing.
"""
logging.info('Template map is %s', template_map)
# Make sure that the required templates are present.
for template_name in REQUIRED_TEMPLATES:
if template_name not in template_map:
raise KeyError(f'Missing required template "{template_name}"')
# Load the actual files as Jinja templates
templates = {}
for name, path in template_map.items():
logging.info('Loading template "%s" located at "%s"', name, path)
templates[name] = jinja2.Template(path.read_text(),
undefined=jinja2.StrictUndefined)
return templates
def generate_code_for_question(
question: Question,
parsed_schema: Dict[str, SchemaType],
input_order: List[str],
literal_translator: translation.LiteralTranslator,
prompt_translator: translation.PromptTranslator,
template_map: Dict[str, jinja2.Template],
float_precision: float = 1e-6,
double_precision: float = 1e-9,
debug: bool = False,
) -> str:
"""Generates code for a question.
Args:
question: The question to use.
parsed_schema: The schema for the question parsed in the given language.
input_order: The ordering of the inputs.
literal_translator: The literal translator class to use.
prompt_translator: The prompt translator class to use.
template_map: The map of templates to use.
float_precision: Float precision to use for evaluation.
double_precision: Double precision to use for evaluation.
debug: Debug mode. Default is False.
Returns:
The code to test the question.
"""
question_requirements = _determine_question_requirements(
question, parsed_schema, double_precision, float_precision)
logging.debug('question_requirements=%s', question_requirements)
test_cases = []
for io_pair in question.test_list:
test_cases.append(
literal_translator.generate_test_case_literals(
qid=question.qid,
io_pair=io_pair,
underlying_schema=parsed_schema,
input_order=input_order,
))
# Make the signature to use with the driver function.
signature, params, _ = prompt_translator.translate_type_signature(
schema=parsed_schema, input_order=input_order, use_type_annotation=False)
precision = question_requirements['precision']
type_str = 'float' if question_requirements['use_float'] else 'double'
precision = literal_translator.convert_primitive_fn(
SchemaType(type_str=type_str), precision)
evaluation_kwargs = {
'evaluation_method': question_requirements['evaluation_method'],
'precision': precision,
}
evaluation_code = template_map['EVALUATION'].render(**evaluation_kwargs)
header_code = template_map['HEADER'].render()
# Setup the template args for use with the jinja template.
template_args = {
'params': params,
'signature': signature,
'test_cases': test_cases,
'return_type': parsed_schema[data_types.EXPECTED_KEY_NAME].lang_type,
'debug': debug,
'text': question.text,
'evaluation_function': evaluation_code,
'header': header_code,
}
main_code = template_map['MAIN'].render(**template_args)
return main_code
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Result Data Types."""
import collections
import dataclasses
import enum
import json
import pathlib
import re
from typing import Any, Dict, List, Optional, Union
from absl import logging
from babelcode.data_types.command import Command
from babelcode.data_types.prediction import Prediction
class PredictionOutcome(enum.Enum):
"""Enum for holding potential outcomes for a prediction."""
PASSED = 'Passed'
TIMED_OUT = 'Timed Out'
HAD_ERROR = 'Had Error'
HAD_RUNTIME_ERROR = 'Had Runtime Error'
FAILED_TEST = 'Failed Tests'
def __str__(self) -> str:
return self.value
@dataclasses.dataclass()
class ExecutionResult:
"""The raw result of executing a prediction through the command line.
Attributes:
prediction: The prediction executed.
commands: The commands used to execute the prediciton.
stdout: The standard output of the last command ran.
stderr: The standard error of the last command ran.
return_code: The return code of the last command ran.
net_runtime: The overall runtime.
last_ran_command_idx: The index of the last command ran.
command_runtimes: The array of runtimes for each command.
command_memory: The array of memory used for each command.
had_error: If there was an error.
timed_out: If the prediction timed out.
all_commands_ran: Did all commands run.
"""
prediction: Prediction
commands: List[Command]
stdout: str
stderr: str
return_code: int
net_runtime: Optional[float]
last_ran_command_idx: int
command_runtimes: List[Optional[float]]
command_memory: List[Optional[int]]
had_error: bool = False
timed_out: bool = False
all_commands_ran: bool = False
def to_dict(self):
"""Serializes to a dictionary."""
self_dict = dataclasses.asdict(self)
# Need to do special serialization of Prediction.
self_dict.pop('prediction')
self_dict['commands'] = [c.to_dict() for c in self.commands]
prediction_dict = self.prediction.to_dict()
return {**prediction_dict, **self_dict}
@classmethod
def from_dict(cls, execution_dict: Dict[str, Any]) -> 'ExecutionResult':
"""Creates an execution result from a dict."""
pred_keys = [
'id',
'qid',
'lang',
'code',
'entry_fn_name',
'entry_cls_name',
'file_path',
]
pred_dict = {k: execution_dict.pop(k) for k in pred_keys}
file_path = pathlib.Path(pred_dict.pop('file_path')).absolute()
commands = execution_dict.pop('commands')
commands = [Command.from_dict(c) for c in commands]
return cls(
prediction=Prediction.from_dict(pred_dict, file_path,
pred_dict['lang']),
commands=commands,
**execution_dict,
)
def __post_init__(self):
self.all_commands_ran = self.last_ran_command_idx + 1 == len(self.commands)
@property
def key(self):
return f'{self.prediction.qid}/{self.prediction.id}'
def read_execution_results_from_file(
file_path,) -> Dict[str, Dict[str, ExecutionResult]]:
"""Yiels execution results from a file."""
parsed = collections.defaultdict(dict)
line_num = 0
# Execution results could have been corrupted if interrupted during saving,
# so we check if they are parsable, and if not discard them.
for line in file_path.open():
line_num += 1
try:
parsed_line = json.loads(line)
result = ExecutionResult.from_dict(parsed_line)
parsed[result.prediction.lang][result.key] = result
except json.JSONDecodeError:
logging.info('Line %d had json error.', line_num)
continue
except (KeyError, TypeError) as e:
logging.info('Line %d had key error %s', line_num, e)
continue
logging.info('Read %d lines.', line_num)
# We rewrite in case of corrpuption.
with file_path.open('w') as f:
for result_dict in parsed.values():
for result in result_dict.values():
f.write(json.dumps(result.to_dict()) + '\n')
return parsed
GET_TC_REGEX = re.compile(r'^TEST-(.+)\.\.\.(.+)$', flags=re.MULTILINE)
@dataclasses.dataclass
class PredictionResult:
"""The parsed result for a given prediction.
Attributes:
qid: The question the prediction is for.
id: The id of the prediction.
lang: The language of the prediction.
code: The code of the prediction.
outcome: The outcome of the evaluation.
test_case_results: The results for each test case.
num_tc_passed: The number of test cases passed.
num_tc: The number of total test cases.
all_commands_ran: Did all commands run.
final_command_runtime: The runtime of the last command.
final_command_memory: The memory of the last command.
net_runtime: The overall runtime.
command_runtimes: The array of runtimes for each command.
command_memory: The array of memory used for each command.
stderr: The standard error from executing the prediction.
"""
qid: str
id: str
lang: str
code: str
outcome: PredictionOutcome
test_case_results: Dict[str, str]
num_tc_passed: int
num_tc: int
all_commands_ran: bool
final_command_runtime: float
final_command_memory: int
net_runtime: Optional[float]
command_runtimes: List[Optional[float]]
command_memory: List[Optional[int]]
stderr: str
@classmethod
def from_execution_result(
cls, execution_result: ExecutionResult,
question_info: Dict[str, Any]) -> 'PredictionResult':
"""Create a prediction execution_result from an Execution Result.
Args:
execution_result: The execution execution_result dict.
question_info: The question information.
Returns:
A new PredictionResult
"""
outcome = PredictionOutcome.PASSED
if execution_result.return_code != 0:
outcome = PredictionOutcome.HAD_ERROR
elif execution_result.had_error:
outcome = PredictionOutcome.HAD_ERROR
elif execution_result.timed_out:
outcome = PredictionOutcome.TIMED_OUT
elif not execution_result.stdout:
outcome = PredictionOutcome.HAD_ERROR
test_cases_results = {}
failed_a_test_case = False
missing_test_case = False
had_runtime_error = False
num_passed = 0
for match in GET_TC_REGEX.findall(execution_result.stdout):
idx, test_result = match
if idx in question_info['test_case_ids']:
test_cases_results[idx] = test_result
for tid in question_info['test_case_ids']:
tc_result = test_cases_results.get(tid, 'MISSING')
if tc_result == 'MISSING':
test_cases_results[tid] = 'MISSING'
missing_test_case = True
elif tc_result == 'FAILED':
failed_a_test_case = True
elif tc_result != 'PASSED':
had_runtime_error = True
else:
num_passed += 1
if outcome == PredictionOutcome.PASSED:
# We only ever need to add in 'MISSING' if there was an error for some
# reason.
if missing_test_case:
outcome = PredictionOutcome.HAD_ERROR
elif had_runtime_error:
outcome = PredictionOutcome.HAD_RUNTIME_ERROR
elif failed_a_test_case:
# We only want to do more fail checking in the case that we do not
# already have a failure.
outcome = PredictionOutcome.FAILED_TEST
last_command = execution_result.last_ran_command_idx
last_runtime = execution_result.command_runtimes[last_command]
last_memory_used = execution_result.command_memory[last_command]
return cls(
qid=execution_result.prediction.qid,
id=execution_result.prediction.id,
lang=execution_result.prediction.lang,
code=execution_result.prediction.code,
outcome=outcome,
test_case_results=test_cases_results,
num_tc_passed=num_passed,
num_tc=len(test_cases_results),
all_commands_ran=execution_result.all_commands_ran,
net_runtime=execution_result.net_runtime,
stderr=execution_result.stderr,
command_runtimes=execution_result.command_runtimes,
command_memory=execution_result.command_memory,
final_command_runtime=last_runtime,
final_command_memory=last_memory_used,
)
def to_dict(self):
out = dataclasses.asdict(self)
out['language'] = out.pop('lang')
# For json conversion later.
out['outcome'] = out['outcome'].value
return out
RESERVED_ATTRIBUTES = [
'num_tc_passed',
'outcome',
'test_case_results',
]
@dataclasses.dataclass
class QuestionResult:
"""Class to hold aggregate results for a single question.
Attributes:
id: The question id this is for.
lang: The language this is for.
num_test_cases: The number of test cases in the question.
num_predictions: The number of predictions for seen.
tracked_attributes: The attributes of PredictionResult to track.
specific_test_results: The counters for each test case.
results: The overall aggregate results.
"""
id: str
lang: str
num_test_cases: int
num_predictions: int = 0
tracked_attributes: List[str] = dataclasses.field(default_factory=list)
specific_test_results: Dict[str, Dict[str, int]] = dataclasses.field(
default_factory=lambda: collections.defaultdict(collections.Counter))
results: Dict[str, List[int]] = dataclasses.field(
default_factory=lambda: collections.defaultdict(list))
def __post__init__(self) -> None:
"""Remove reserved attrs from those tracked."""
for k in RESERVED_ATTRIBUTES:
if k in self.tracked_attributes:
self.tracked_attributes.remove(k)
def __len__(self) -> int:
"""Gets the number of predictions."""
return self.num_predictions
def get_vals_for_idx(self, idx: int) -> Dict[str, Any]:
"""Gets the results for an prediction idx."""
out = {}
for m, v in self.results.items():
try:
out[m] = v[idx]
except IndexError as e:
raise IndexError(f'Result {m} had index error with {idx=}') from e
return out
def update_with_result(self, pred_result: PredictionResult) -> None:
"""Updates the Question result with a prediction result.
Args:
pred_result: The prediciton result to add.
"""
self.num_predictions += 1
for outcome in PredictionOutcome:
self.results[outcome].append(pred_result.outcome == outcome)
for attr_name in self.tracked_attributes:
self.results[attr_name].append(getattr(pred_result, attr_name))
self.results['num_tc_passed'].append(pred_result.num_tc_passed)
for test_idx in range(self.num_test_cases):
test_result = pred_result.test_case_results.get(str(test_idx), 'MISSING')
self.specific_test_results[str(test_idx)][test_result] += 1
def count_result(self, name: Union[str, PredictionOutcome]) -> int:
"""Gets the number of results with name."""
return sum(self.results[name])
def has_result(self, name: Union[str, PredictionOutcome]) -> bool:
"""Checks if there is a result with the name."""
return bool(self.count_result(name))
def padded(self, name: Union[str, PredictionOutcome], max_len: int,
value: Any) -> List[Any]:
"""Pads the result with a value.
Args:
name: The name of the result to pad.
max_len: The length to pad too.
value: The value to pad with.
Returns:
"""
return self.results[name] + [
value for _ in range(max_len - self.num_predictions)
]
@classmethod
def from_pred_results(
cls,
qid: str,
lang: str,
num_test_cases: int,
pred_result_list: List[PredictionResult],
tracked_attributes: List[str],
) -> 'QuestionResult':
"""Create a QuestionResult from a list of pred results.
Args:
qid: The question id.
lang: The language.
num_test_cases: The number of test cases for the question.
pred_result_list: The list of prediction results.
tracked_attributes: The list of attributes of PredictionResult to track.
Returns:
The question result object.
"""
out = cls(
id=qid,
lang=lang,
num_test_cases=num_test_cases,
tracked_attributes=tracked_attributes,
)
# Update the results with the list of pred results.
for pred_result in pred_result_list:
out.update_with_result(pred_result=pred_result)
return out
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command data type."""
import dataclasses
from typing import Any, Dict, List
@dataclasses.dataclass
class Command:
"""The bash command to execute along with the timeout to use with it.
The command needs to be formatted as a list of arguments for it to work with
the subprocess shell. For example the command `rm -rf dir` would be passed as
`["rm","-rf","dir"]`.
Attributes:
command: The string command to run.
timeout: The timeout to use with the command. Defaults to 10.
"""
command: List[str]
timeout: int = 10
def to_dict(self) -> Dict[str, Any]:
"""Converts a command to a dict."""
return dataclasses.asdict(self)
@classmethod
def from_dict(cls, command_dict: Dict[str, Any]) -> 'Command':
"""Converts a dict to a command."""
return cls(**command_dict)
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Question data type."""
import copy
import dataclasses
import json
import pathlib
from typing import Any, Dict, List, Optional, Tuple, Union
from absl import logging
from babelcode import utils
class IOPairError(Exception):
"""Error when parsing or using an IO Pair."""
class QuestionParsingError(Exception):
"""Error with parsing a question."""
class QuestionValidationError(Exception):
"""Error with validating a question."""
# The required keys for a question dict.
REQUIRED_KEYS = ['qid', 'title', 'schema', 'test_list', 'entry_fn_name']
EXPECTED_KEY_NAME = 'EXPECTED_OUTPUT_TYPE'
@dataclasses.dataclass
class Question:
"""Dataclass for a programming question from a dataset.
Attributes:
qid: The question id
title: The title of the question
schema: The schema for the question.
test_list: The Test cases for the question
entry_fn_name: The default entry function name to use.
entry_cls_name: The default entry class name to use for the question if the
language requires it (i.e. Java)
text: The natural language description for the question.
allow_arbitrary_order: Allow results to be arbitrary ordered.
use_type_annotation: Use type annotation when generating prompts.
metadata: The metadata dict for the question.
challenge_test_list: The list of challenge test cases.
solutions: The mapping of languages to the solution code, if it exists.
"""
qid: str
title: str
schema: Dict[str, Union[List[Dict[str, str]], Dict[str, str]]]
# The full type would be something along the lines of:
# `List[Dict[str,Union[bool,float,str,int,List[Union[bool,float,str,int,Dict...]]]]]`.
# As types such as Lists and Maps can be nested to depth `n`, there is not
# really a good type to put but `Any`.
test_list: List[Dict[str, utils.TestValueType]]
entry_fn_name: str
entry_cls_name: str = 'Solution'
text: Optional[str] = None
allow_arbitrary_order: bool = False
use_type_annotation: bool = False
metadata: Dict[str, Any] = dataclasses.field(default_factory=dict)
challenge_test_list: List[Dict[str, utils.TestValueType]] = dataclasses.field(
default_factory=list)
solutions: Optional[Dict[str, str]] = dataclasses.field(default_factory=dict)
def __len__(self):
return len(self.test_list)
def __iter__(self):
for test in self.test_list:
yield test
@classmethod
def from_dict(cls,
input_dict: Dict[str, Any],
allow_arbitrary_order: bool = False) -> 'Question':
"""Create a question object from a dictionary.
Args:
input_dict: The dictionary to create a question for.
allow_arbitrary_order: Use arbitrary ordering for checking
Raises:
QuestionParsingError: if the question cannot be parsed from the dict.
Returns:
The parsed question object.
"""
missing_keys = [k for k in REQUIRED_KEYS if k not in input_dict]
if missing_keys:
raise QuestionParsingError(f'Missing required keys: {missing_keys}')
qid = str(input_dict['qid'])
title = input_dict['title']
logging.info('Creating Question with id %s and title "%s"', qid, title)
raw_schema = input_dict['schema']
if isinstance(raw_schema, dict):
missing_keys = [k for k in ['params', 'return'] if k not in raw_schema]
if missing_keys:
raise QuestionParsingError(
f'Question {qid} is missing keys {missing_keys}')
for i, arg in enumerate(raw_schema['params']):
missing_keys = [k for k in ['name', 'type'] if k not in arg]
if missing_keys:
raise QuestionParsingError(
f'Argument {i} of Question {qid} is missing keys {missing_keys}')
if 'type' not in raw_schema['return']:
raise QuestionParsingError(
f'Question {qid} is missing "type" key in return.')
else:
raise QuestionParsingError(
f'"schema" must be a dict. Not {type(raw_schema).__name__} ')
test_list = input_dict['test_list']
return cls(
qid=qid,
title=title,
schema=raw_schema,
test_list=test_list,
allow_arbitrary_order=allow_arbitrary_order,
entry_fn_name=input_dict['entry_fn_name'],
entry_cls_name=input_dict.get('entry_cls_name', 'Solution'),
text=input_dict.get('text', None),
use_type_annotation=input_dict.get('use_type_annotation', False),
metadata=input_dict.get('metadata', {}),
challenge_test_list=input_dict.get('challenge_test_list', []),
solutions=input_dict.get('solutions', {}),
)
def __str__(self) -> str:
"""Converts question to a minimal string."""
return f'{self.qid}: {self.title}'
def to_dict(self) -> Dict[str, Any]:
"""Converts the question to a dict."""
self_dict = dataclasses.asdict(self)
self_dict['test_case_ids'] = [str(t['idx']) for t in self.test_list]
return self_dict
def copy(self) -> 'Question':
"""Copies the question to a new object."""
return Question(
qid=self.qid,
title=self.title,
schema=copy.deepcopy(self.schema),
test_list=copy.deepcopy(self.test_list),
entry_fn_name=self.entry_fn_name,
entry_cls_name=self.entry_cls_name,
text=self.text,
metadata=copy.deepcopy(self.metadata),
challenge_test_list=copy.deepcopy(self.challenge_test_list),
allow_arbitrary_order=self.allow_arbitrary_order,
use_type_annotation=self.use_type_annotation,
)
def change_var_names(self, name_map: Dict[str, str]) -> None:
"""Changes the variable name by updating the tests and schema.
Args:
name_map: The mapping of old names to new names.
Raises:
QuestionValidationError: If the old name is missing.
"""
def _update_test(test_dict):
for old_name, new_name in name_map.items():
if old_name not in test_dict['inputs']:
raise QuestionValidationError(
f'Test case {test_dict["idx"]} in question {self.qiq} does not'
f' have input {old_name}')
test_dict['inputs'][new_name] = test_dict['inputs'].pop(old_name)
return test_dict
for i in range(len(self.test_list)):
self.test_list[i] = _update_test(self.test_list[i])
for i in range(len(self.challenge_test_list)):
self.challenge_test_list[i] = _update_test(self.challenge_test_list[i])
logging.info('Renaming %s variables with map=%s', self.qid, name_map)
found = set()
for i, arg in enumerate(self.schema['params']):
if arg['name'] in name_map:
old_name = arg['name']
self.schema['params'][i]['name'] = name_map[old_name]
found.add(old_name)
if found != set(name_map.keys()):
raise QuestionValidationError(
f'Could not find variable(s) {found.difference(set(name_map.keys()))}'
)
def read_input_questions(
input_path: pathlib.Path,
) -> Tuple[List[Question], List[Tuple[Dict[str, Any], Exception]]]:
"""Read and parse questions from an input file.
This reads and parses the questions from a given json lines file. If it fails
to parse a given line, it adds that to the list of failed lines and returns
them with
Args:
input_path: The path the to the questions json lines file.
Raises:
json.JSONDecodeError: The line is not valid JSON.
Returns:
The list of questions.
"""
logging.info('Reading questions from file "%s"', input_path)
found_questions = []
failed_line_dicts = []
for line_number, raw_line in enumerate(input_path.open('r')):
# Because we are reading a json lines file, we check to try-except the
# decoding of the line so we can provide better debugging information to the
# user. Otherwise, using map(json.loads,file) would say that the line number
# where the error occurred is always 1 as it only ever sees a single line at
# a time.
try:
line = json.loads(raw_line)
except json.JSONDecodeError as e:
logging.exception('Line %s is not valid JSON for reason "%s"',
line_number, e)
raise json.JSONDecodeError(
f'Invalid JSON line: {line_number}, error={e}',
doc=line,
pos=line_number,
)
try:
found_questions.append(Question.from_dict(line, False))
# In the case a line is missing keys, we do not want to cause the entire
# program to fail. So we add it to the list of lines that failed. The only
# way that `from_dict` could raise a `KeyError` is if a required key for a
# question is missing. Rather than adding a lot of
# `if x in line ... else raise QuestionError` for each required key, it is
# easier to check for the `KeyError``.
except QuestionParsingError as e:
failed_line_dicts.append((line, e))
logging.warning('Line %s failed to parse with reason %s', line_number, e)
return found_questions, failed_line_dicts
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init for data types."""
from babelcode.data_types.command import Command
from babelcode.data_types.prediction import Prediction
from babelcode.data_types.question import EXPECTED_KEY_NAME
from babelcode.data_types.question import IOPairError
from babelcode.data_types.question import Question
from babelcode.data_types.question import QuestionParsingError
from babelcode.data_types.question import QuestionValidationError
from babelcode.data_types.question import read_input_questions
from babelcode.data_types.result_types import ExecutionResult
from babelcode.data_types.result_types import PredictionOutcome
from babelcode.data_types.result_types import PredictionResult
from babelcode.data_types.result_types import QuestionResult
from babelcode.data_types.result_types import read_execution_results_from_file
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Prediction data type."""
import dataclasses
import pathlib
from typing import Dict, Optional, Union
@dataclasses.dataclass(frozen=True)
class Prediction:
"""A prediction program to run for a given question.
Each prediction has its own prediction object that is
created for it. It stores attributes about it such as
where the raw code is, prediction id, etc. It is
frozen so that it can never be changed later in the
pipeline.
Attributes:
id: The id of this prediction.
qid: The id of the question this prediction is for.
lang: The name of the language this prediction is in.
code: The predicted program.
file_path: The file path where the full testing code along with the program
are saved.
entry_fn_name: If this is not None, the name of the function to use as the
entry point for the prediction.
entry_cls_name: If this is not None, the class name to use as the entry
point for this prediction.
"""
id: str
qid: str
lang: str
code: str
file_path: pathlib.Path
entry_fn_name: Optional[str] = None
entry_cls_name: Optional[str] = None
@classmethod
def from_dict(cls, pred_dict: Dict[str, Union[str, int]],
file_path: pathlib.Path, default_language: str) -> 'Prediction':
"""Creates a prediction object from a dictionary.
Args:
pred_dict: The dictionary for the prediction.
file_path: The path where the full testing code is located at.
default_language: If no language is present in the dictionary, use this
language.
Returns:
The created prediction object.
"""
return cls(id=str(pred_dict['id']),
qid=str(pred_dict['qid']),
lang=pred_dict.get('language', default_language),
code=pred_dict['code'],
entry_fn_name=pred_dict.get('entry_fn_name', None),
entry_cls_name=pred_dict.get('entry_cls_name', None),
file_path=file_path)
def to_dict(self) -> Dict[str, Union[str, int, None]]:
"""Serializes the prediction to a dict.
Returns:
The dictionary version of the prediction with file_path serialized
properly.
"""
self_dict = dataclasses.asdict(self)
# pathlib.Path is not JSON serializable
self_dict['file_path'] = str(self.file_path.resolve().absolute())
return self_dict
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Initialization of the Code Generation module."""
from babelcode.translation.literal_translator import LiteralTranslator
from babelcode.translation.primitive_translator import convert_float
from babelcode.translation.primitive_translator import convert_string
from babelcode.translation.primitive_translator import make_primitive_translator
from babelcode.translation.prompt_translator import PromptTranslator
from babelcode.translation.utils import escape_cpp_like_comment_chars
from babelcode.translation.utils import escape_triple_quotes
from babelcode.translation.utils import format_cpp_like_docstring
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for language specific prompt+signature translation."""
import re
from typing import Callable, Dict, List, Optional, Tuple
import jinja2
from babelcode import data_types
from babelcode import schema_parsing
from babelcode import utils
SchemaMapType = schema_parsing.SchemaMapType
SchemaType = schema_parsing.SchemaType
SchemaValueType = schema_parsing.SchemaValueType
Question = data_types.Question
class PromptTranslator:
"""Base prompt translator class for translating signatures and prompts.
Attributes:
lang_name: The name of the language.
naming_convention: The naming convention to use.
escape_fn: Callable that takes in a string and replaces language specific
characters with "\\{CHARACTER}".
"""
def __init__(self,
lang_name: str,
naming_convention: utils.NamingConvention,
escape_fn: Optional[Callable[[str], str]] = None) -> None:
"""Initialize the PromptTranslator class."""
self.naming_convention = naming_convention
self.lang_name = lang_name
self.escape_fn = escape_fn or (lambda s: s)
@property
def word_replacement_map(self) -> Dict[str, str]:
"""The mapping of source language specific words to the target language.
For example, the C++ translator would want to change the word 'list' to
vector. Thus the word_replacement_map would be {'vector':'list'}.
The values must be a list of words, with the key being the word to replace
them with. Not Casing does not matter.
Returns:
Dict[str, str]: The mapping of words to replace in the prompt.
"""
raise NotImplementedError
@property
def signature_template(self) -> str:
"""The jinja template to create a function signature.
The template must have the following inputs:
- `entry_fn_name`: The name of the function.
- `signature`: The argument signature for the function.
- `return_type`: The return type of the function.
- `docstring`: The location of the docstring with respect to the function.
This must also handle the case when docstring is None.
The optional arguments are:
- `entry_cls_name`: The name of the entry class.
- `params`: The list of parameter names.
Returns:
The string jinja2 template for creating a signature.
"""
raise NotImplementedError
def clean_docstring_for_lang(self, docstring: str) -> str:
"""Cleans the docstring for a given language.
By "cleaning", we mean removing/escaping characters that could cause errors.
Args:
docstring (str): The raw docstring.
Returns:
str: The cleaned docstring for the language.
"""
raise NotImplementedError()
def translate_entry_function_name(
self,
entry_fn_name: str,
) -> str:
"""Translates the function name to the proper convention.
Args:
entry_fn_name (str): The original function name.
Returns:
str: The function name with proper formatting.
"""
return utils.format_str_with_convention(self.naming_convention,
entry_fn_name)
def translate_entry_cls_name(self, entry_cls_name: str) -> str:
"""Translates the name of the entry class for a language.
Args:
entry_cls_name (str): The name of the entry class.
Returns:
The translated entry name.
"""
return entry_cls_name
def format_docstring_for_lang(self, docstring: str) -> str:
"""Formats a docstring to a language's syntax.
Args:
docstring (str): The original docstring.
Returns:
The formatted docstring.
"""
raise NotImplementedError()
def format_signature(self, signature_args: List[str]) -> str:
return ', '.join(signature_args)
def translate_signature_argument_to_lang(self, arg_name: str,
arg_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates a single argument name of a function signature to a language.
Args:
arg_name (str): The name of the argument.
arg_type (SchemaType): Its generic type.
use_type_annotation (bool): If the language does not require type
annotations, this flag forces the use of them.
Returns:
The formatted argument for the signature.
"""
raise NotImplementedError()
def translate_argument_name_to_lang(self, arg_name: str) -> str:
"""Translates a single argument name to a language.
This differs from the signature argument as this will be used for parameters
or for languages like Haskell where the signature arguments and their types
are not together.
Args:
arg_name (str): The name of the argument.
Returns:
The translated argument.
"""
return arg_name
def translate_signature_returns_to_lang(self, return_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the return type of a signature for a language.
Args:
return_type (SchemaType): The generic type the function returns.
use_type_annotation (bool): If the language does not require type
annotations, this flag forces the use of them.
Returns:
The translated return type.
"""
raise NotImplementedError()
def translate_prompt(self, source_language: str, prompt: str,
entry_fn_name: str) -> str:
"""Translates a prompt to a language.
This function replaces words from the word replacement map and replaces any
examples with the function name present to the proper casing for the
language.
For Example:
If the language uses Pascal Casing, a prompt with "entry_fn(...)" present
will become "EntryFn(...)".
Args:
source_language (str): The source language of the prompt. This is used
for replacing specific language names (i.e. Python) with the name of
the language this translator is for.
prompt (str): The prompt to translate.
entry_fn_name (str): The name of the entry function from the prompt.
Returns:
The translated prompt.
"""
new_entry_name = self.translate_entry_function_name(entry_fn_name)
prompt = prompt.replace(entry_fn_name, new_entry_name)
formatting_functions = [str.title, str.lower, str.upper]
# First replace all of the source language occurrences in the prompt
for format_fn in formatting_functions:
prompt = prompt.replace(format_fn(source_language),
format_fn(self.lang_name))
# Then go through the list of words to replace and replace them.
replace_regex = r'( ?(?:a|an)?(?:^| )(?:__WORDS__)s?)(?=[ ,.])'
for format_fn in formatting_functions:
for target, original in self.word_replacement_map.items():
regex_words = '|'.join(map(format_fn, original))
new_word = format_fn(target)
needs_an = any(
new_word.startswith(v) for v in ['a', 'e', 'i', 'o', 'u'])
word_regex = replace_regex.replace('__WORDS__', regex_words)
matches = set(re.findall(word_regex, prompt))
for found in sorted(list(matches), key=len, reverse=True):
replacement_word = new_word
if found.startswith('a ') or found.startswith('an '):
replacement_word = f'a{"n" if needs_an else ""} {new_word}'
elif found.startswith(' '):
replacement_word = f' {new_word}'
if found.endswith('s') and not replacement_word.endswith('s'):
replacement_word += 's'
prompt = prompt.replace(found, replacement_word)
return self.clean_docstring_for_lang(prompt)
def translate_type_signature(
self, schema: SchemaMapType, input_order: List[str],
use_type_annotation: bool) -> Tuple[List[str], List[str], str]:
"""Translates the type signatures for a functions arguments and returns.
Args:
schema (SchemaMapType): The schema of the function.
input_order (List[str]): The order of arguments.
use_type_annotation (bool): Use type annotations for this function.
Returns:
The translated signature, argument names and return types.
"""
signature = []
arguments = []
for arg_name in input_order:
arguments.append(self.translate_argument_name_to_lang(arg_name))
signature.append(
self.translate_signature_argument_to_lang(arg_name, schema[arg_name],
use_type_annotation))
return_type = self.translate_signature_returns_to_lang(
schema[data_types.EXPECTED_KEY_NAME], use_type_annotation)
return self.format_signature(signature), arguments, return_type
def translate_signature(self,
entry_fn_name: str,
entry_cls_name: str,
schema: SchemaMapType,
input_order: List[str],
use_type_annotation: bool,
docstring: Optional[str] = None) -> str:
"""Translates an entire signature.
Args:
entry_fn_name (str): The name of the entry function.
entry_cls_name (str): The name of the entry class.
schema (SchemaMapType): The schema for the function.
input_order (List[str]): The order of arguments.
use_type_annotation (bool): Use type annotation for this function.
docstring (Optional[str], optional): If passed, this will be used for a
docstring. Defaults to None.
Returns:
The translated signature rendered from the template.
"""
# First translate the entry function and class.
entry_fn_name = self.translate_entry_function_name(entry_fn_name)
entry_cls_name = self.translate_entry_cls_name(entry_cls_name)
signature, arguments, return_type = self.translate_type_signature(
schema, input_order, use_type_annotation)
# Replace the docstring argument with an if statement to handle when there
# is no docstring.
template = jinja2.Template(self.signature_template,
undefined=jinja2.StrictUndefined)
return template.render(entry_fn_name=entry_fn_name,
entry_cls_name=entry_cls_name,
signature=signature,
return_type=return_type,
params=arguments,
docstring=docstring)
def translate_signature_with_docstring(self, source_language: str,
docstring: str, entry_fn_name: str,
entry_cls_name: str,
schema: SchemaMapType,
input_order: List[str],
use_type_annotation: bool) -> str:
"""Translates the signature and prompt as a docstring.
Args:
source_language (str): The source language of the prompt.
docstring (str): If passed, this will be used for a docstring. Defaults
to None.
entry_fn_name (str): The name of the entry function.
entry_cls_name (str): The name of the entry class.
schema (SchemaMapType): The schema for the function.
input_order (List[str]): The order of arguments.
use_type_annotation (bool): Use type annotation for this function.
Returns:
The translated signature with docstring.
"""
docstring = self.translate_prompt(source_language=source_language,
prompt=docstring,
entry_fn_name=entry_fn_name)
docstring = docstring.replace('\\', '\\\\')
docstring = self.escape_fn(docstring)
docstring = self.format_docstring_for_lang(docstring)
return self.translate_signature(entry_fn_name=entry_fn_name,
entry_cls_name=entry_cls_name,
schema=schema,
input_order=input_order,
use_type_annotation=use_type_annotation,
docstring=docstring)
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for translation."""
def escape_triple_quotes(seq: str) -> str:
"""Escapes the triple quotes sequence."""
return seq.replace('"""', '\\"\\"\\"')
def escape_cpp_like_comment_chars(seq: str) -> str:
"""Escapes the characters for C++ like comments."""
seq = seq.replace('*/', '\\*/')
seq = seq.replace('/*', '/\\*')
seq = seq.replace('/', '\\/')
return seq
def format_cpp_like_docstring(docstring: str, join_seq: str = '\n') -> str:
"""Formats a docstring for C++ style formatting."""
out = ['/**']
for line in docstring.splitlines(False):
prefix = ' * '
out.append(f'{prefix}{line}')
out.append(' */')
return join_seq.join(out)
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the primitive translator functions."""
import functools
import logging
import re
from typing import Callable, Dict, Optional
from absl import logging
from babelcode import data_types
from babelcode import schema_parsing
SchemaType = schema_parsing.SchemaType
SchemaValueType = schema_parsing.SchemaValueType
ConvertFunctionType = Callable[[SchemaValueType], str]
def convert_string(value: SchemaValueType,
wrap_char: str = '"',
escape_fn: Optional[Callable[[str], str]] = None) -> str:
"""Converts a string to a language in a generic manner.
This function is declared outside of the make_primitive_converter_func so that
other language implementations can make use of it if they only need to make
minor changes.
Args:
value: The value to convert.
wrap_char: the char to wrap the result in.
escape_fn: Callable that takes in a string and replaces language specific
characters with "\\{CHARACTER}".
Returns:
The string code for the literal value of the value in a given
language.
"""
if value is not None:
# Replace characters that could cause issues when generating the literal
# tests.
str_value = f'{repr(str(value))[1:-1]}'
else:
return f'{wrap_char}{wrap_char}'
str_value = str_value.replace(wrap_char, '\\' + wrap_char)
# Some languages (Go for example) do not natively support \' in strings, so
# instead, replace those escaped characters with the unescaped version.
if wrap_char == '"':
str_value = str_value.replace('\\\'', '\'')
else:
str_value = str_value.replace('\\"', '"')
if escape_fn is not None:
str_value = escape_fn(str_value)
return f'{wrap_char}{str_value}{wrap_char}'
def convert_float(value: SchemaValueType, suffix: str = '') -> str:
"""Converts a value to a float string with special handling of ints.
Args:
value (SchemaValueType): The value to convert.
suffix: The suffix to add at the end of the converted float.
Returns:
The float as a string.
"""
if isinstance(value, int):
value = float(value)
return f'{str(value)}{suffix}'
def make_primitive_translator(
type_specific_overrides: Dict[str, ConvertFunctionType], # pytype:ignore,
escape_fn: Optional[Callable[[str], str]] = None # pytype: ignore
) -> Callable[[SchemaType, SchemaValueType], str]:
"""Creates the callable that will serve as the primitive converter.
Args:
type_specific_overrides: Any overrides for a specific primitive type.
escape_fn: The escape function to use for converting strings.
Returns:
Callable[[SchemaType, SchemaValueType], str]: The primitive converter
callable to use.
"""
logging.info('Making primitive translator...')
if escape_fn is not None:
logging.info('Escape function will be used.')
else:
logging.info('No escape function passed.')
def generic_convert(value: SchemaValueType) -> str:
"""Generic conversion function to convert a value to the literal string.
Args:
value: The value to convert.
Returns:
The string code for the literal value of the value in a given
language.
"""
return str(value)
convert_mapping = {}
string_converter = functools.partial(convert_string, escape_fn=escape_fn)
char_converter = functools.partial(convert_string,
wrap_char='\'',
escape_fn=escape_fn)
special_default_conversions = {
'string': string_converter,
'character': char_converter,
'boolean': lambda t: 'true' if t else 'false',
'float': convert_float,
'double': convert_float
}
for generic in schema_parsing.PRIMITIVE_TYPES:
# Check if it is a generic
if generic in type_specific_overrides:
logging.info('Override found for "%s"', generic)
convert_mapping[generic] = type_specific_overrides[generic]
else:
logging.info('Using default "%s" converter', generic)
convert_mapping[generic] = special_default_conversions.get(
generic, generic_convert)
def primitive_converter(schema: SchemaType, value: SchemaValueType) -> str:
if schema.type_str not in convert_mapping:
raise data_types.IOPairError(
f'{schema.type_str} is not a valid primitive')
return convert_mapping[schema.type_str](value)
return primitive_converter
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the base class for translating objects to literal code representation.
"""
import json
from typing import Any, Callable, Dict, List
from absl import logging
from babelcode import data_types
from babelcode import schema_parsing
from babelcode import utils
SchemaType = schema_parsing.SchemaType
NamingConvention = utils.NamingConvention
class LiteralTranslator:
"""Base class for translating literals to specific languages."""
def __init__(self, lang_name: str, naming_convention: utils.NamingConvention,
convert_primitive_fn: Callable[[SchemaType, Any], str]):
self.lang_name = lang_name
self.naming_convention = naming_convention
self.convert_primitive_fn = convert_primitive_fn
def generate_test_case_literals(self, qid: str, io_pair: Dict[str, Any],
underlying_schema: Dict[str, Any],
input_order: List[str]) -> Dict[str, Any]:
"""Generates the code to initialize each argument and expected value for a test case.
Args:
qid: The question ID
io_pair: The IO pair to generate for.
underlying_schema: The parsed RAW schema to use.
input_order: The ordering of the parameter input.
Returns:
A dictionary with the idx, inputs, and the expected literal.
"""
def convert_with_exception_handling(arg_name, generic_type, value):
try:
return self.convert_var_to_literal(generic_type=generic_type,
value=value)
# We want to provide extra debugging info when there is an error raised
# so that we can figure out what caused it.
except Exception as e:
logging.debug('Argument "%s" from %s had unexpected error %s', arg_name,
qid, e)
logging.debug('IO Pair is %s', json.dumps(io_pair))
logging.debug('Underlying schema is %s', underlying_schema)
raise e
input_literals = []
for var in input_order:
var_type = underlying_schema[var]
var_value = schema_parsing.validate_correct_type(
var_type, io_pair['inputs'].get(var))
input_literals.append(
convert_with_exception_handling(var, var_type, var_value))
expected_literal = convert_with_exception_handling(
data_types.EXPECTED_KEY_NAME,
generic_type=underlying_schema[data_types.EXPECTED_KEY_NAME],
value=io_pair['outputs'])
return {
'idx': io_pair['idx'],
'inputs': input_literals,
'outputs': expected_literal
}
################################################################
# Functions to that subclasses will likely need to override. #
################################################################
def format_list(self, generic_type: SchemaType,
list_values: List[Any]) -> str:
"""Formats the list of values to the code to initialize the list.
Args:
generic_type: The underlying schema type for the list.
list_values: The list of strings that are the literal initialization code
for each element of the list.
Returns:
The code to initialize a list object in the current language.
"""
# Some languages require the generic_type to initialize the list.
_ = generic_type
return f'[{", ".join(list_values)}]'
def format_set(self, generic_type: SchemaType, set_values: List[str]) -> str:
"""Formats the list of values to the code to initialize the set.
Args:
generic_type: The underlying schema type for the list.
set_values: The list of strings that are the literal initialization code
for each element of the set.
Returns:
The code to initialize a set object in the current language.
"""
# Some languages require the generic_type to create the set.
_ = generic_type
return f'set([{", ".join(set_values)}])'
def format_map(self, key_type: SchemaType, value_type: SchemaType,
entries: List[str]) -> str:
"""Formats the map with keys and entries to the code to initialize the map.
We include the `key_type` and `value_type` for languages that require them
to initialize the map(i.e. Golang).
Args:
key_type: The SchemaType of the key_type.
value_type: The SchemaType of the value.
entries: The list of code to initialize the entries.
Returns:
The code to initialize an map object in the current language.
"""
# Some languages require either or both of the types for the keys and
# values.
_ = key_type
_ = value_type
return '{' + ', '.join(entries) + '}'
def format_map_entry(self, key: str, value: str) -> str:
"""Formats a single map entry to the literal code.
Args:
key: The code to initialize the key.
value: The code to initialize the value.
Returns:
The code to make the single entry.
"""
return f'{key}: {value}'
################################################################
# Functions to convert values to their literal representation. #
################################################################
def convert_var_to_literal(self, generic_type: SchemaType, value: Any) -> str:
"""Converts a variable to its literal string representation.
Args:
generic_type: The generic schema type of the variable.
value: The value of the variable.
Returns:
The literal string in the respective language.
Raises:
IOPairError if the generic_type is not supported or the value
is none and the type does not support it.
"""
# If the type is a leaf type, then we convert directly to a literal.
if (generic_type.type_str in schema_parsing.PRIMITIVE_TYPES
or generic_type.is_leaf()):
# Convert the value to null instead of empty.
if schema_parsing.allows_null(generic_type.type_str) and not value:
value = None
return self.convert_primitive_fn(generic_type, value)
if generic_type.type_str in ['list', 'set']:
return self.convert_array_like_type(generic_type, value,
generic_type.type_str == 'set')
elif generic_type.type_str == 'map':
return self.convert_map(generic_type, value)
else:
raise data_types.IOPairError(
f'{generic_type} is not a supported type by {self.lang_name}')
def convert_array_like_type(self, generic_type: SchemaType, nested_value: Any,
use_format_set: bool) -> str:
"""Converts a list to a string in a language.
Args:
generic_type: The underlying type of the object.
nested_value: The nested object to convert.
use_format_set: Use the format set method.
Raises:
data_types.QuestionValidationError: Error occurred in conversion
data_types.IOPairError: Error with the IO pair.
Returns:
The nested object literal string.
"""
format_fn = self.format_list
target_type = 'list'
if use_format_set:
target_type = 'set'
format_fn = self.format_set
try:
# Convert the list of values to a set to remove duplicates, then back
# to list. We do the second conversion so we only need 1 function to
# handle both types.
nested_value = list(set(nested_value)) if nested_value else set()
except TypeError as e:
raise data_types.QuestionValidationError(
'Could not convert nested values to set') from e
def convert_nested(current_type, nested_list):
if current_type.type_str != target_type:
return self.convert_var_to_literal(current_type, nested_list)
if not current_type.elements:
raise data_types.IOPairError(
f'{current_type} does not have child but {nested_list} is nested')
out = []
for v in nested_list:
out.append(convert_nested(current_type.elements[0], v))
return format_fn(current_type, out)
if not nested_value:
return format_fn(generic_type, [])
return convert_nested(generic_type, nested_value)
def convert_map(self, generic_type: SchemaType, map_value: Dict[Any,
Any]) -> str:
"""Converts a dictionary to the language specific map code.
Args:
generic_type: The generic_type of the map.
map_value: The raw dict value.
Raises:
data_types.IOPairError: Error with the IO pair.
Returns:
The string with the literal code.
"""
# Format the empty dict value specifically
if not map_value:
return self.format_map(generic_type.key_type, generic_type.elements[0],
[])
if not generic_type.elements:
raise data_types.IOPairError(
f'{generic_type} does not have type value but {map_value} is nested')
if generic_type.key_type is None:
raise data_types.IOPairError(
f'{generic_type} does not have key_type value but {map_value} is nested'
)
entries = []
for key_type, value in map_value.items():
# Get the string values for the key_type and value to make the entry
key_str = self.convert_var_to_literal(
generic_type.key_type, # type: ignore
key_type)
value_str = self.convert_var_to_literal(generic_type.elements[0], value)
entries.append(self.format_map_entry(key_str, value_str))
return self.format_map(generic_type.key_type, generic_type.elements[0],
entries)
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init for the dataset conversion module."""
from babelcode.dataset_conversion.question_parsing import POTENTIAL_ERROR_TYPES
from babelcode.dataset_conversion.question_parsing import parse_question_dict
from babelcode.dataset_conversion.utils import PRIMITIVE_TYPES_TO_GENERIC
from babelcode.dataset_conversion.utils import AnnotationError
from babelcode.dataset_conversion.utils import convert_to_source
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for converting a question from a dataset to the correct format."""
import ast
import collections
import dataclasses
from typing import Any, Dict, List, Optional, Tuple
from absl import logging
from babelcode import data_types
from babelcode import schema_parsing
from babelcode import utils as bc_utils
from babelcode.dataset_conversion import assertion_parsing
from babelcode.dataset_conversion import utils
# List of reserved keywords across languages, not exhaustive.
PROJECT_ROOT = bc_utils.PROJECT_ROOT
RESERVED_KEYWORDS = frozenset(
PROJECT_ROOT.joinpath(
'data', 'reserved_keywords.txt').read_text().splitlines(False) +
[data_types.EXPECTED_KEY_NAME])
# Define the list of potentially raised errors for use in try except.
POTENTIAL_ERROR_TYPES = (
utils.AnnotationError,
assertion_parsing.AssertionToSchemaError,
data_types.IOPairError,
assertion_parsing.LiteralParsingError,
data_types.QuestionValidationError,
data_types.QuestionParsingError,
)
def _convert_type_annotation_to_schema(type_annotation) -> str:
"""Converts a type annotation string to the generic values.
Args:
type_annotation: The node to convert.
Raises:
utils.AnnotationError: The type annotation is not valid.
Returns:
The generic type string.
"""
if isinstance(type_annotation, ast.Name):
if type_annotation.id in utils.PRIMITIVE_TYPES_TO_GENERIC:
return utils.PRIMITIVE_TYPES_TO_GENERIC[type_annotation.id]
raise utils.AnnotationError(f'{type_annotation.id} cannot be a leaf node')
if not isinstance(type_annotation, ast.Subscript):
raise utils.AnnotationError(
'type_annotation must be either a Subscript or Name, '
f'got {type(type_annotation).__name__}')
# Index can be a tuple or single value.
value_node = type_annotation.slice
if isinstance(value_node, ast.Tuple):
children = value_node.elts
elif isinstance(value_node, ast.Name):
children = [value_node]
else:
raise utils.AnnotationError('Subscripts must be either a tuple or name')
children = list(map(_convert_type_annotation_to_schema, children))
node_name = type_annotation.value.id
if node_name in utils.PRIMITIVE_TYPES_TO_GENERIC:
raise utils.AnnotationError('Primitives must be leaf nodes.')
if node_name == 'Dict':
if len(children) != 2:
raise utils.AnnotationError(
f'Dicts must have 2 children, found {len(children)}')
node_name = 'map'
delimiter = ';'
elif node_name == 'Tuple':
node_name = 'tuple'
delimiter = '|'
else:
if len(children) != 1:
raise utils.AnnotationError(
f'{node_name} must have 1 child, found {len(children)}')
if node_name in ['List', 'Set']:
node_name = node_name.lower()
delimiter = ','
return f'{node_name}<{delimiter.join(children)}>'
@dataclasses.dataclass
class PotentialType:
"""Dataclass for potential types to clean up code.
Attributes:
schema: The schema type.
generic_str: The generic string for the schema type.
n: The number of occurrences found.
depth: The depth of the type.
"""
schema: schema_parsing.SchemaType
generic_str: str
n: int
depth: int
def _determine_type_to_keep(left: PotentialType,
right: PotentialType) -> PotentialType:
"""Determines which of the two potential types to keep.
Args:
left (PotentialType): Option 1.
right (PotentialType): Option 2
Returns:
The type who has the most depth and does not have null types.
"""
replace_potential_type = False
if left.depth > right.depth:
replace_potential_type = True
if 'null' in right.generic_str and left.depth >= right.depth:
# We only want to keep the deepest type with null
replace_potential_type = True
if replace_potential_type:
left.n = right.n + 1
return left
else:
right.n += 1
return right
def _get_final_schema_type(qid: str, arg_name: str,
schema_list: List[PotentialType], found_type: str):
"""Determines the final generic type for a given argument.
Args:
qid (str): The question id.
arg_name (str): The argument name
schema_list (List[PotentialType]): The list of schema types found.
found_type (str): The type found through annotations.
Raises:
IOPairError: If there is an unfixable error with either test case.
Returns:
The generic test case string to use.
"""
non_null_schemas = list(
filter(lambda s: 'null' not in s.generic_str, schema_list))
if len(non_null_schemas) > 1:
logging.error('qid=%s Found more than one potential schema type for %s',
qid, arg_name)
logging.debug('Schema types found %s',
list(map(lambda s: s.generic_str, schema_list)))
logging.debug(
'qid=%s has inconsistent types used in test cases for %s, %s',
qid,
arg_name,
','.join([t.generic_str for t in non_null_schemas]),
)
raise data_types.IOPairError('Inconsistent Types found')
if found_type is None:
if not non_null_schemas:
logging.error('qid=%s Could not find any schema type for %s', qid,
arg_name)
logging.debug('Input types are: %s', [t.generic_str for t in schema_list])
raise data_types.IOPairError('No Non-Null types found')
return non_null_schemas[0].generic_str
else:
if non_null_schemas:
found_schema_type = schema_parsing.SchemaType.from_generic_type_string(
found_type)
potential_schema_type = non_null_schemas[0].schema
potential_schema_str = non_null_schemas[0].generic_str
if 'null' in found_type:
return potential_schema_str
if not schema_parsing.is_generic_equal(potential_schema_type,
found_schema_type):
reconcile_result = schema_parsing.reconcile_type(
potential_schema_type, found_schema_type)
if reconcile_result is not None:
new_type = reconcile_result.to_generic()
logging.debug(
'Reconciled %s and %s to %s',
potential_schema_str,
found_type,
new_type,
)
return new_type
logging.error(
('qid=%s has non equal and non reconcilable types. found_type=%s'
' != potential_schema_str=%s'),
qid,
found_type,
potential_schema_str,
)
raise data_types.IOPairError('Non equal and non reconcilable types')
return found_type
def _consolidate_type(
arg_name: str,
potential_type: PotentialType,
existing_type_list: List[PotentialType],
):
"""Consolidate a new type into a list of existing types.
Args:
arg_name (str): The argument this is for.
potential_type (PotentialType): The new type.
existing_type_list (List[PotentialType]): The list of found types.
Returns:
The updated list of found types.
"""
schema_type_str = potential_type.generic_str
schema = potential_type.schema
for j, existing_type in enumerate(existing_type_list):
logging.debug(
'Evaluating if schema_type_str=%s == existing_type.generic_str=%s',
schema_type_str,
existing_type.generic_str,
)
if schema_parsing.is_generic_equal(schema, existing_type.schema):
logging.debug('They are equal, so determining the type to keep.')
existing_type_list[j] = _determine_type_to_keep(potential_type,
existing_type)
return existing_type_list
reconcile_result = schema_parsing.reconcile_type(schema,
existing_type.schema)
if reconcile_result is not None:
existing_type_list[j] = PotentialType(
schema=reconcile_result,
generic_str=reconcile_result.to_generic(),
n=potential_type.n + existing_type.n,
depth=reconcile_result.depth,
)
return existing_type_list
logging.debug(
'Adding new potential type schema_type_str=%s to arg_name=%s',
schema_type_str,
arg_name,
)
existing_type_list.append(potential_type)
return existing_type_list
def consolidate_schema_from_test_cases(
qid: str,
test_cases: List[Dict[str, schema_parsing.SchemaValueType]],
found_args: List[str],
found_arg_types: Dict[str, Optional[str]],
return_type: Optional[str],
) -> Tuple[Dict[str, str], str]:
"""Consolidates the schema with the found and parsed types.
Using the found types from the type annotations and the types parsed from the
assert statements, we need to consolidate them and get the final types.
Args:
qid: The question id this is for.
test_cases: The list of test cases.
found_args: The list of arguments found.
found_arg_types: The types of arguments found from the annotations.
return_type: The type found for the return type.
Raises:
schema_parsing.SchemaTypeError: If there is an error with the schema.
data_types.IOPairError: If there is an error with the test case.
Returns:
The map of argument name to generic type string and the type string of the
return.
"""
# Keep track of schema types by argument idx and depth
arg_schema_types_found = collections.defaultdict(list)
expected_schema = []
expected_number_of_args = len(found_args)
# Keep track of signature as a whole
for tc_id, tc in test_cases.items():
logging.debug('Validating tc_id=%s...', tc_id)
if len(tc['inputs']) != expected_number_of_args:
logging.error(
('Test case tc_id=%s of qid=%s did not have the correct number of'
' inputs'),
tc_id,
qid,
)
logging.error('Expected %s, got %d', expected_number_of_args,
len(tc['inputs']))
logging.debug('Expected arguments are %s', found_args)
logging.debug('Test case is: %s', tc)
raise data_types.IOPairError('Incorrect number of inputs')
# Go through the schema and add the types found and their depth
logging.debug('Parsing the param types')
for i, (schema_type_str, depth) in enumerate(tc['schema']['params']):
try:
schema = schema_parsing.SchemaType.from_generic_type_string(
schema_type_str)
except schema_parsing.SchemaTypeError as e:
logging.error(
'qid=%s tc_id=%s had invalid schema type string schema_type_str=%s',
qid,
tc_id,
schema_type_str,
)
logging.error('Message was %s', e)
raise e
potential_type = PotentialType(schema, schema_type_str, 1, depth)
arg_schema_types_found[i] = _consolidate_type(found_args[i],
potential_type,
arg_schema_types_found[i])
# Add the potential return type to the list of expected schemas.
logging.debug('Parsing the return type')
rtr_str = tc['schema']['returns'][0]
try:
parsed_schema_type = schema_parsing.SchemaType.from_generic_type_string(
rtr_str)
except schema_parsing.SchemaTypeError as e:
logging.error(tc['schema'])
raise e
potential_expected_type = PotentialType(parsed_schema_type, rtr_str, 1,
tc['schema']['returns'][1])
expected_schema = _consolidate_type(data_types.EXPECTED_KEY_NAME,
potential_expected_type,
expected_schema)
# Go through and assert that only one schema type was found per argument.
for arg_idx, schemas_found in arg_schema_types_found.items():
if arg_idx >= len(found_args):
logging.error('arg_idx=%d is > len(found_args)=%d', arg_idx,
len(found_args))
raise data_types.IOPairError('Found arg idx too large.')
found_arg_types[found_args[arg_idx]] = _get_final_schema_type(
qid,
found_args[arg_idx],
schemas_found,
found_arg_types[found_args[arg_idx]],
)
return_type = _get_final_schema_type(qid, 'return', expected_schema,
return_type)
return found_arg_types, return_type
def get_arguments_from_solution(qid, solution, entry_fn_name):
"""Get the names of arguments from a given solution.
Args:
qid: The question id this is for.
solution: The solution to the problem.
entry_fn_name: The name of the function we are searching for.
Raises:
data_types.QuestionValidationError: If there is an error with the
solution tree.
data_types.QuestionParsingError: If there was an error parsing the
question.
Returns:
The argument order and any types found through annotations.
"""
solution_tree = ast.parse(solution)
target_function = None
for b in solution_tree.body:
if isinstance(b, ast.FunctionDef):
if b.name == entry_fn_name:
logging.debug('Found target function with entry_fn_name=%s',
entry_fn_name)
target_function = b
break
# The target function must be present.
if target_function is None:
logging.error('Unable to find entry function "%s"', entry_fn_name)
raise data_types.QuestionValidationError('Unable to find entry function')
# Default values are not allowed yet
if target_function.args.defaults:
raise data_types.QuestionValidationError('Defaults are not supported yet')
# Get arguments and their types if present
arg_order = []
arg_types = {}
dupe_arg_types = collections.Counter()
return_type = None
target_function: ast.FunctionDef
logging.debug('Looking for annotations...')
if target_function.returns is not None:
try:
return_type = _convert_type_annotation_to_schema(target_function.returns)
logging.debug('Found return type of "%s"', return_type)
except utils.AnnotationError as e:
logging.warning('Failed to parse return annotation for "%s"', qid)
logging.debug('Return type error was %s', e)
argument_nodes = target_function.args.args
if any(
getattr(target_function.args, v)
for v in ['posonlyargs', 'vararg', 'kwonlyargs']):
raise data_types.QuestionValidationError(
'Unsupported argument types in the solution')
if not argument_nodes:
raise data_types.QuestionParsingError('No arguments')
for arg in argument_nodes:
arg_name = arg.arg.lower()
logging.debug('Found argument "%s" at position %d', arg_name,
len(arg_order))
if dupe_arg_types[arg_name] > 0:
dupe_arg_types[arg_name] += 1
arg_name = f'{arg_name}{dupe_arg_types[arg_name]}'
elif arg_name in arg_types:
arg_types[f'{arg_name}0'] = arg_types.pop(arg_name)
for i in range(len(arg_order)):
if arg_order[i] == arg_name:
arg_order[i] = f'{arg_name}0'
break
dupe_arg_types[arg_name] += 1
arg_name = f'{arg_name}{dupe_arg_types[arg_name]}'
arg_order.append(arg_name)
if arg.annotation is not None:
try:
arg_types[arg_name] = _convert_type_annotation_to_schema(arg.annotation)
logging.debug('%s has type %s', arg_name, arg_types[arg_name])
except utils.AnnotationError as e:
logging.warning('failed to parse annotation for %s', arg_name)
logging.debug('Error for %s was %s', arg_name, e)
arg_types[arg_name] = None
else:
arg_types[arg_name] = None
# Go through and change the argument names if they are a reserved keyword.
for i, arg in enumerate(arg_order):
if arg in RESERVED_KEYWORDS:
logging.info('Changing argument "%s" as it is a reserved keyword', arg)
new_arg_name = f'{arg}_arg{i}'
arg_order[i] = new_arg_name
arg_types[new_arg_name] = arg_types.pop(arg)
logging.info('New argument name is "%s"', new_arg_name)
return arg_order, arg_types, return_type
def parse_question_dict(qid: str, testing_code: str, solution: str,
entry_fn_name: str) -> Dict[str, Any]:
"""Parse the schema and test cases for a given question.
First looks at the function specified by `entry_fn_name` to determine the
argument names and their ordering. Then checks to see if there are
annotations. If there are no annotations, we then use the raw values in the
test case to determine the types.
Args:
qid (str): The qid of the question
testing_code (str): The testing code.
solution (str): The solution.
entry_fn_name (str): The name of the function in the solution to use for
gathering both argument names and types.
Raises:
IOPairError: The question had an error in the test cases that made it
impossible to parse.
QuestionError: If the function defined by entry_fn_name could not be found.
Returns:
A dict with the schema, entry points, and the parsed test list.
"""
# First find and parse the target function body to determine argument name
# and order. Also check if there are annotations to use for the schema.
logging.info('Parsing question dict for question "%s"...', qid)
arg_order, arg_types, return_type = get_arguments_from_solution(
qid, solution, entry_fn_name)
uses_type_annotation = False
if any(v is not None for v in arg_types.values()) and arg_types:
uses_type_annotation = True
logging.debug('Argument order is %s', arg_order)
logging.debug('qid=%s has argument types of %s', qid, arg_types)
visitor = assertion_parsing.AssertionToSchemaVisitor(entry_fn_name)
try:
visitor.visit(ast.parse(testing_code))
except data_types.IOPairError as e:
logging.error('Failed to parse test cases from qid=%s', qid)
if str(e) == 'Imports are not supported':
# Special handling of this error so that we can explicitly mark them for
# manual fixing.
logging.error('Imports found in testing code to qid=%s', qid)
raise data_types.QuestionValidationError(
'Requires import(s) to generate tests') from e
raise e
if not visitor.test_cases:
logging.error('qid=%s does not contain any test cases', qid)
logging.debug('testing_code=%s', testing_code)
raise data_types.QuestionParsingError('No test cases were found')
logging.debug('Found %d test cases in qid=%s', len(visitor.test_cases), qid)
argument_types, return_type = consolidate_schema_from_test_cases(
qid,
test_cases=visitor.test_cases,
found_args=arg_order,
found_arg_types=arg_types,
return_type=return_type,
)
logging.debug('Final types for qid=%s are %s', qid, argument_types)
logging.debug('Final return type for qid=%s is %s', qid, return_type)
parameters = []
for v in arg_order:
parameters.append({'name': v, 'type': argument_types[v]})
schema = {'params': parameters, 'return': {'type': return_type}}
test_cases = []
for k, v in visitor.test_cases.items():
new_test_case = {'idx': k, 'outputs': v['outputs']}
new_test_case['inputs'] = {
arg: value for arg, value in zip(arg_order, v['inputs'])
}
test_cases.append(new_test_case)
return {
'schema': schema,
'test_list': test_cases,
'entry_fn_name': entry_fn_name,
'use_type_annotation': uses_type_annotation,
}
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for the dataset conversion module."""
import ast
# Dict to hold mapping of primitives to their generic form
PRIMITIVE_TYPES_TO_GENERIC = {
'int': 'integer',
'str': 'string',
'bool': 'boolean',
'float': 'float'
}
class AnnotationError(Exception):
"""Invalid type annotation."""
def convert_to_source(node: ast.AST) -> str:
"""Convert an AST node to its respective source code.
Args:
node (ast.AST): The node to convert.
Returns:
The raw code.
"""
return ast.unparse(node).rstrip()
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for parsing assertion statements."""
import ast
import re
from typing import List, Tuple, Union
from absl import logging
from babelcode import schema_parsing
from babelcode.dataset_conversion import utils
# Python AST library has a visitor class to recursively traverse the tree. To do
# this you need to override the visit_NODE NAME functions. These cause a pylint
# error but there is no way around it.
# pylint: disable=invalid-name
DOUBLE_REGEX = re.compile(r'.[0-9]{7,}')
UNSUPPORTED_TYPES = (
ast.Assign,
ast.With,
ast.For,
ast.While,
ast.FunctionDef,
ast.ClassDef,
ast.AnnAssign,
ast.ListComp,
ast.Lambda,
ast.DictComp,
ast.GeneratorExp,
)
def _write_error_logs(node, msg):
"""Write logging messages for when raising an error.
Args:
node: The ast node the error occurred at.
msg: The message of the error.
"""
source_code = utils.convert_to_source(node)
logging.error('Test case error: "%s"', msg)
logging.debug('Source code=%s', source_code)
logging.debug('AST Tree for error was %s', ast.dump(node))
class LiteralParsingError(Exception):
"""Error for if a literal is not parsable."""
class LiteralParser(ast.NodeVisitor):
"""Visitor for parsing literal objects in python.
Attributes:
depth: The current depth.
schema_type: The current schematype.
value: The current value
"""
def __init__(self):
"""Initializes the visitor."""
self.depth = 0
self.schema_type = None
self.value = None
def _make_error(self, node: ast.AST, msg: str) -> LiteralParsingError:
"""Writes debugging information and creates the error.
Args:
node: The current AST Node.
msg: The message to send.
Returns:
The LiteralParsingError to raise.
"""
_write_error_logs(node, msg)
logging.debug('value=%s', self.value)
logging.debug('schema_type=%s', self.schema_type)
logging.debug('depth=%d', self.depth)
return LiteralParsingError(msg)
def set_attributes(self, value: schema_parsing.SchemaValueType,
schema_type: str, depth: int):
"""Sets the class attributes for recursion.
Args:
value: The new self.value.
schema_type: The new self.schema_type.
depth: The new self.depth.
"""
self.depth = depth
self.schema_type = schema_type
self.value = value
def validate_types(self, schema_types: List[str]) -> str:
"""Validates that only a single type was found.
Args:
schema_types (List[str]): List of types found.
Raises:
LiteralParsingError: If there are more than one schema type.
Returns:
str: The single schema type found.
"""
# Clean types for equivalent numeric types.
consolidate_type_found = False
for v in schema_types:
for t in ['long', 'integer', 'float', 'double', 'string', 'character']:
if t in v:
consolidate_type_found = True
break
if consolidate_type_found:
consolidated_schema_types = []
for t in schema_types:
new_type = schema_parsing.SchemaType.from_generic_type_string(t)
if not consolidated_schema_types:
consolidated_schema_types.append(new_type)
continue
to_keep = []
for other in consolidated_schema_types:
result = schema_parsing.reconcile_type(new_type, other)
if result is not None:
new_type = result
else:
to_keep.append(other)
del consolidated_schema_types
consolidated_schema_types = [*to_keep, new_type]
schema_types = list(
map(lambda t: t.to_generic(), consolidated_schema_types))
unique_types = len(set(schema_types))
# No types found, then it must be null.
if unique_types == 0:
return 'null'
# We expect 1 unique type per literal value node.
elif unique_types != 1:
raise LiteralParsingError(f'Expecting one type, got {unique_types}')
return schema_types[0]
def visit_Constant(self, node: ast.Constant):
"""Handles the constant node.
Args:
node (ast.Constant): The constant/literal node.
Raises:
LiteralParsingError: If the type of the node's value is not a primitive.
"""
self.value = node.value
self.schema_type = type(self.value).__name__
if self.value is None:
self.schema_type = 'null'
return
if self.schema_type not in utils.PRIMITIVE_TYPES_TO_GENERIC:
raise LiteralParsingError(
f'{self.schema_type} is a constant but not a primitive')
self.schema_type = utils.PRIMITIVE_TYPES_TO_GENERIC[self.schema_type]
# Check for doubles here.
if self.schema_type == 'float':
self.value = float(self.value)
if DOUBLE_REGEX.search(str(self.value)):
self.schema_type = 'double'
elif self.schema_type == 'integer':
if len(str(self.value)) > 9:
self.schema_type = 'long'
# Check for single character values.
if self.schema_type == 'string':
if len(self.value) == 1:
self.schema_type = 'character'
def _get_children(self, children_nodes: List[ast.AST],
starting_depth: int) -> Tuple[List[str], List[str], int]:
"""Gets the children types, values, and maximum depth of a given node.
Args:
children_nodes: The children nodes to traverse.
starting_depth: The starting depth.
Returns:
The values found, the schema types found, and the max depth.
"""
children_types = []
children_values = []
max_depth = starting_depth
for child in children_nodes:
self.depth = starting_depth
self.visit(child)
schema_type = schema_parsing.SchemaType.from_generic_type_string(
self.schema_type)
has_equal_type = False
type_to_replace = None
for i, (k, v) in enumerate(children_types):
if (schema_parsing.is_generic_equal(schema_type, v)
and not schema_type.is_leaf()):
null_in_either = 'null' in k and 'null' not in self.schema_type
if null_in_either and 'tuple' not in self.schema_type:
type_to_replace = i
break
if type_to_replace is not None:
children_types[type_to_replace] = (self.schema_type, schema_type)
elif not has_equal_type:
children_types.append((self.schema_type, schema_type))
children_values.append(self.value)
max_depth = max(self.depth, max_depth)
return children_values, [v[0] for v in children_types], max_depth
def _handle_list(self, node: Union[ast.List, ast.Set], type_name: str):
"""Parses a list or set node.
Args:
node (Union[ast.List,ast.Set]): The node to parse
type_name (str): The type calling this function
"""
# Lists and Sets have the same attributes to query, so we can do
# those in one swoop.
self.depth += 1
children_values, children_types, max_depth = self._get_children(
node.elts, self.depth)
type_found = self.validate_types(children_types)
# Need to map the list of children values to floats to match the type.
if type_found in ['float', 'double']:
children_values = list(map(float, children_values))
self.set_attributes(
depth=max_depth,
schema_type=f'{type_name}<{type_found}>',
value=children_values,
)
def visit_List(self, node: ast.List) -> None:
"""Handles the list node.
Args:
node: The list node to parse.
"""
self._handle_list(node, 'list')
def visit_Set(self, node: ast.Set) -> None:
"""Handles the set node.
Args:
node: The current node.
"""
self._handle_list(node, 'set')
def visit_Dict(self, node: ast.Dict) -> None:
"""Handles the dict node.
Args:
node: The dictionary node.
Raises:
LiteralParsingError: If there are parsing issues with the dictionary.
"""
self.depth += 1
depth = self.depth
key_values, key_type, _ = self._get_children(node.keys, depth)
key_type = self.validate_types(key_type)
if key_type == 'character':
key_type = 'string'
elif key_type not in ['integer', 'string', 'boolean']:
# In the case a dictionary is actually an empty set
if key_type == 'null':
self.set_attributes(value={}, schema_type='set<null>', depth=depth)
return
raise self._make_error(node,
f'Dictionary keys cannot be of type {key_type}')
children_values, children_type, max_depth = self._get_children(
node.values, depth)
children_type = self.validate_types(children_type)
if len(children_values) != len(key_values):
raise LiteralParsingError(
'Dicts require the keys and children values have the same length.')
# Need to map the list of children values to floats to match the type.
if children_type in ['float', 'double']:
children_values = list(map(float, children_values))
schema = f'map<{key_type};{children_type}>'
self.set_attributes(
value={
k: v for k, v in zip(key_values, children_values)
},
schema_type=schema,
depth=max_depth,
)
def visit_UnaryOp(self, node: ast.UnaryOp) -> None:
"""Handles the unary op node.
Args:
node: The unary operation node.
Raises:
LiteralParsingError: if there was an error trying to apply the unary
operation to the value.
"""
depth = self.depth
children_value, children_type, max_depth = self._get_children(
[node.operand], depth)
children_type = self.validate_types(children_type)
if len(children_value) != 1:
logging.warning('Found unary op with more than 1 child value')
logging.debug('source code: %s', utils.convert_to_source(node))
children_value = children_value[0]
if isinstance(node.op, ast.USub):
try:
children_value = -1 * children_value
except ValueError as e:
raise self._make_error(node, 'Could not apply -1 * node') from e
elif isinstance(node.op, ast.Not):
children_value = not children_value
children_type = 'boolean'
else:
raise self._make_error(node,
f'Unsupported unary op {type(node.op).__name__}')
self.set_attributes(children_value, children_type, max_depth)
def visit_Tuple(self, node: ast.Tuple) -> None:
"""Handles the tuple node.
Args:
node: The tuple node to parse.
"""
self.depth += 1
children_values, children_types, max_depth = self._get_children(
node.elts, self.depth)
if children_types:
child_type_str = '|'.join(children_types)
else:
child_type_str = 'null'
self.value = []
schema_type = f'tuple<{child_type_str}>'
self.set_attributes(children_values, schema_type, max_depth)
def generic_visit(self, node: ast.AST) -> None:
"""Raises an error for unsupported ast types.
Args:
node: The current node.
Raises:
LiteralParsingError: If this function is called, it means an unsupported
type was encountered.
"""
# Save the node source for debugging.
raise self._make_error(node, f'{type(node).__name__} is not supported')
class AssertionToSchemaError(Exception):
"""Error for when parsing schema from an assertion fails."""
class AssertionToSchemaVisitor(ast.NodeVisitor):
"""Node visitor for getting test case values from an assertion."""
def __init__(self, target_fn_name: str) -> None:
self.target_fn = target_fn_name
self.test_cases = {}
self._input_schema = []
self._input_arguments = []
def _make_error(self, node, msg):
_write_error_logs(node, msg)
return AssertionToSchemaError(msg)
def _parse_literal(self, node):
visitor = LiteralParser()
visitor.visit(node)
return visitor.value, visitor.schema_type, visitor.depth
def visit_Assert(self, node: ast.Assert) -> None:
"""Handles the assertion AST node.
Args:
node: The assertion AST node.
Raises:
AssertionToSchemaError: If the assertion is not in the `assert f(x) == y`
format.
"""
self._input_arguments = []
self._input_schema = []
logging.debug('Found new test case at %s', utils.convert_to_source(node))
test_node = node.test
# Handling the case of assert f(Arguments) == Value
if isinstance(test_node, ast.Compare):
if not isinstance(test_node.ops[0], ast.Eq):
raise self._make_error(test_node, 'Only == is supported for operators')
if not isinstance(test_node.left, ast.Call):
raise self._make_error(
test_node, 'Only calls on the left side are currently supported')
self.visit(test_node.left)
if len(test_node.comparators) != 1:
raise self._make_error(test_node,
'The right hand side must be a single value')
output, output_type, depth = self._parse_literal(test_node.comparators[0])
output_schema = (output_type, depth)
# Handling the case of assert not f(arguments)
elif isinstance(test_node, ast.UnaryOp):
if not isinstance(test_node.op, ast.Not):
raise self._make_error(test_node,
'Only "not" is supported for unary operators')
output = False
output_schema = ('boolean', 0)
if not isinstance(test_node.operand, ast.Call):
raise self._make_error(test_node,
'When using "not", the operand must be a call')
self.visit(test_node.operand)
# Handling the case of assert f(Arguments)
elif isinstance(test_node, ast.Call):
self.visit(test_node)
output = True
output_schema = ('boolean', 0)
else:
raise self._make_error(
test_node,
f'Unexpected type of {type(test_node).__name__} for test call',
)
logging.debug('Adding test case with idx=%d', len(self.test_cases))
logging.debug('Input schema=%s', self._input_schema)
logging.debug('output schema=%s', output_schema)
self.test_cases[len(self.test_cases)] = {
'inputs': self._input_arguments,
'outputs': output,
'schema': {
'params': self._input_schema,
'returns': output_schema
},
}
def visit_Call(self, node: ast.Call) -> None:
"""Handles the call AST node.
Args:
node: The call AST Node.
Raises:
AssertionToSchemaError: If the call is not parsable.
"""
if not isinstance(node.func, ast.Name):
raise self._make_error(
node, 'The calling function must be a name (i.e. not an attribute)')
if node.func.id != self.target_fn:
for arg_node in node.args:
self.visit(arg_node)
if self._input_arguments:
raise self._make_error(
node, 'Multiple non-nested function calls are not yet supported')
for arg_node in node.args:
arg_value, arg_type, arg_depth = self._parse_literal(arg_node)
self._input_schema.append((arg_type, arg_depth))
self._input_arguments.append(arg_value)
if not self._input_arguments:
raise self._make_error(node, 'Calls with no arguments are not supported.')
def generic_visit(self, node: ast.AST):
"""Override the generic visit to restrict what types are allowed.
Args:
node (ast.AST): The current Node
Raises:
AssertionToSchemaError: Unable to parse the node.
"""
if isinstance(node, UNSUPPORTED_TYPES):
logging.warning('"%s" is not a supported type', type(node).__name__)
logging.debug('source_code: %s', utils.convert_to_source(node))
return
if isinstance(node, (ast.Import, ast.ImportFrom)):
raise AssertionToSchemaError('Imports are not supported')
# Manually call visit_Assert here so we can catch AssertionToSchemaErrors
# and print out the calling node's source code for debugging.
if isinstance(node, ast.Assert):
try:
self.visit_Assert(node)
except AssertionToSchemaError as e:
logging.debug(
'Assertion Code that caused error: %s',
utils.convert_to_source(node),
)
raise e
else:
super().generic_visit(node)
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities Module."""
from pathlib import Path
from babelcode.utils.file_utils import *
from babelcode.utils.metric_utils import write_to_tb_writer
from babelcode.utils.naming_convention import NamingConvention
from babelcode.utils.naming_convention import format_str_with_convention
from babelcode.utils.utilities import TestValueType
from babelcode.utils.utilities import convert_timedelta_to_milliseconds
from babelcode.utils.utilities import format_timedelta_str
from babelcode.utils.utilities import set_seed
PROJECT_ROOT = (Path(__file__) / ".." / ".." / "..").resolve()
FIXTURES_PATH = PROJECT_ROOT / "test_fixtures"
TEMP_EXECUTION_PATH = PROJECT_ROOT / 'tmp_execution_results'
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for metrics."""
import tensorflow as tf
def write_to_tb_writer(metrics, writer, step, prefix="eval"):
"""Writes metrics to a tensorboard writer."""
if writer is None:
return
with writer.as_default():
for met, value in metrics.items():
name = f"{prefix}/{met}"
if isinstance(value, (float, int)):
tf.summary.scalar(name, value, step=step)
elif isinstance(value, dict):
write_to_tb_writer(value, writer, step=step, prefix=name)
elif isinstance(value, str):
tf.summary.text(name, value, step=step)
else:
continue
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General Utility File."""
import json
import pathlib
from typing import Any, Callable, Dict, Optional
from absl import logging
__all__ = ["setup_logging", "jsonl_file_to_map"]
def setup_logging(file_name: str,
debug: bool,
log_path: Optional[pathlib.Path] = None):
"""Sets up logging."""
log_path = log_path or pathlib.Path("logs")
log_path = log_path.joinpath(file_name)
log_path.mkdir(parents=True, exist_ok=True)
if debug:
logging.set_verbosity(logging.DEBUG)
else:
logging.set_verbosity(logging.INFO)
logging.get_absl_handler().use_absl_log_file(file_name, log_path)
def jsonl_file_to_map(
file_path: pathlib.Path,
mapping_fn: Callable[[Dict[str, Any]], str]) -> Dict[str, Dict[str, Any]]:
"""Reads a json lines file to a dictionary mapping.
Args:
file_path: The File to read.
mapping_fn: The key of each line to use as the main key in the dict.
Returns:
A dict where each element is `line[mapping_key]=line` where line is
each line from the file.
"""
return {mapping_fn(line): line for line in map(json.loads, file_path.open())}
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Naming convention to support conversions to different styles."""
import enum
import re
from typing import List, Tuple
class NamingConvention(enum.Enum):
"""Enum for holding thee valid naming conventions."""
SNAKE_CASE = 'SNAKE_CASE'
CAMEL_CASE = 'CAMEL_CASE'
PASCAL_CASE = 'PASCAL_CASE'
ALLOWED_NAMING_CONVENTIONS = {v.value: v for v in NamingConvention}
def _tokenize_name(name: str) -> List[str]:
"""Tokenizes a name by splitting on different casing and underscores.
Args:
name: The name to tokenize.
Returns:
The list of tokens found.
"""
tokens = re.sub(r'((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z])|(?<=_)[a-zA-Z0-9])',
r' \1', name).split()
return [t.replace('_', '') for t in tokens]
def convert_to_snake(tokens: List[str]) -> str:
"""Converts name to snake_case.
Args:
tokens: The tokens of the name.
Returns:
The name in snake_case.
"""
return '_'.join(map(lambda t: t.lower(), tokens))
def _get_first_and_remaining_tokens(tokens: List[str]) -> Tuple[str, List[str]]:
"""Gets the first token and list of remaining."""
first, *remaining = tokens
if not first:
first, *remaining = remaining
return first, remaining
def convert_to_camel(tokens: List[str]) -> str:
"""Converts a name to camelCase.
Args:
tokens: The tokens of the name.
Returns:
The name in camelCase.
"""
first, remaining = _get_first_and_remaining_tokens(tokens)
if not remaining:
return first.lower()
return ''.join([first.lower(), *map(str.title, remaining)])
def convert_to_pascal(tokens: List[str]) -> str:
"""Converts a name to PascalCase.
Args:
tokens: The tokens of the name.
Returns:
The name in PascalCase.
"""
first, remaining = _get_first_and_remaining_tokens(tokens)
if not remaining:
return first.title()
return ''.join([first.title(), *map(str.title, remaining)])
CONVENTION_TO_CONVERT = {
NamingConvention.SNAKE_CASE: convert_to_snake,
NamingConvention.CAMEL_CASE: convert_to_camel,
NamingConvention.PASCAL_CASE: convert_to_pascal,
}
def format_str_with_convention(convention: NamingConvention, seq: str) -> str:
"""Formats string for a specific convention.
Args:
convention (NamingConvention): Convention to use.
seq (str): The sequence to format.
Returns:
The formatted sequence.
"""
tokens = _tokenize_name(seq)
convention_fn = CONVENTION_TO_CONVERT[convention]
return convention_fn(tokens)
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General Utilities that are not large enough to warrant their own file."""
import datetime
import random
from typing import Mapping, Sequence, Union
import numpy as np
TestValueType = Union[
str,
int,
float,
bool,
Sequence['TestValueType'],
Mapping[Union[str, int], 'TestValueType'],
]
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
def convert_timedelta_to_milliseconds(delta: datetime.timedelta) -> float:
"""Converts a timedelta to milliseconds.
Args:
delta: the time delta
Returns:
The milliseconds.
"""
return delta.total_seconds() * 1000
def format_timedelta_str(td: datetime.timedelta) -> str:
"""Formats a timedelta into a readable string."""
hours, remainder = divmod(td.total_seconds(), 3600)
minutes, seconds = divmod(remainder, 60)
return '{:02}:{:02}:{:02}'.format(int(hours), int(minutes), int(seconds))
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Initialize and register all languages."""
from babelcode.languages import lang_implementations
from babelcode.languages.language import Language
from babelcode.languages.language import LanguageRegistry
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base implementations of language functionality."""
import dataclasses
import pathlib
from typing import Any, Callable, Dict, List, Optional
from absl import logging
from babelcode import data_types
from babelcode import schema_parsing
from babelcode import translation
from babelcode import utils
__all__ = ['Language', 'LanguageRegistry']
BASE_TEMPLATE_DIRECTORY = utils.PROJECT_ROOT.joinpath('templates')
SchemaType = schema_parsing.SchemaType
SchemaValueType = schema_parsing.SchemaValueType
SchemaMapType = schema_parsing.SchemaMapType
DEFAULT_TEMPLATES = {
'MAIN': 'main.txt',
'EVALUATION': 'evaluation.txt',
'HEADER': 'header.txt'
}
@dataclasses.dataclass(frozen=True)
class Language:
"""The base language class.
Attributes:
name: The name of the language.
file_ext: The string file extension used by this language.
template_mapping: The mapping of template names to their file name. This
MUST have a'MAIN' entry.
literal_translator_cls: The LiteralTranslator class for this language.
command_fn: The callable that takes in a filepath and returns the list of
string commands to run for this language.
primitive_conversion_mapping: The overwrites needed for language specific
primitive to code conversions. If you add an override to the map, then the
value must be a callable that takes in a schema type and corresponding
value, then returns the language specific code to convert it to the
literal code.
prompt_translator_cls: The prompt translator class for this language.
naming_convention: The naming convention to use for this language.
escape_fn: Callable that takes in a string and replaces language specific
characters with their escaped versions.
"""
name: str
file_ext: str
literal_translator_cls: translation.LiteralTranslator
command_fn: Callable[[pathlib.Path], List[data_types.Command]]
primitive_conversion_mapping: Dict[str, Callable[[SchemaValueType], str]]
prompt_translator_cls: translation.PromptTranslator
naming_convention: utils.NamingConvention = utils.NamingConvention.SNAKE_CASE
template_mapping: Dict[str, str] = dataclasses.field(default_factory=dict)
escape_fn: Optional[Callable[[str], str]] = None
def __post_init__(self):
"""Adds default templates to the template mapping if they do not exist."""
for k, v in DEFAULT_TEMPLATES.items():
if k not in self.template_mapping:
self.template_mapping[k] = v
def __str__(self) -> str:
"""String representation of the language."""
return self.name
def make_primitive_translator(self) -> Callable[[SchemaType, Any], str]:
"""Initializes and returns the primitive translator function."""
logging.info('Making primitive translator for %s', self.name)
return translation.make_primitive_translator(
type_specific_overrides=self.primitive_conversion_mapping,
escape_fn=self.escape_fn)
def make_literal_translator(self) -> translation.LiteralTranslator:
"""Initializes and returns the literal translator class."""
logging.info('Making literal translator for %s', self.name)
return self.literal_translator_cls(
lang_name=self.name,
naming_convention=self.naming_convention,
convert_primitive_fn=self.make_primitive_translator())
def make_template_map(self) -> Dict[str, pathlib.Path]:
"""Returns the template mapping with the full paths."""
logging.info('Making template map for %s', self.name)
lang_template_dir = BASE_TEMPLATE_DIRECTORY.joinpath(self.name)
out = {}
for name, fn in self.template_mapping.items():
out[name] = lang_template_dir.joinpath(fn)
return out
def make_prompt_translator(self) -> translation.PromptTranslator:
"""Initializes the prompt translator class."""
logging.info('Making prompt translator for %s', self.name)
return self.prompt_translator_cls(self.name, self.naming_convention,
self.escape_fn)
class LanguageRegistry:
"""Language registry for mapping language names to their objects."""
_REGISTRY = {}
_FILE_EXT_TO_LANG = {}
@classmethod
def register_language(cls, language: Language) -> None:
"""Registers a language under the language's name."""
cls._REGISTRY[language.name] = language
cls._FILE_EXT_TO_LANG[language.file_ext] = language.name
@classmethod
def get_language(cls, language: str) -> Language:
"""Gets the language from a name."""
return cls._REGISTRY[language]
@classmethod
def list_languages(cls) -> List[str]:
"""Lists the registered languages."""
return list(cls._REGISTRY)
@classmethod
def get_lang_from_ext(cls, file_ext: str) -> Language:
"""Gets the language based on the file extension."""
return cls._REGISTRY[cls._FILE_EXT_TO_LANG[file_ext]]
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lua Specific Classes and Functions."""
from typing import Dict, List
from babelcode import data_types
from babelcode import schema_parsing
from babelcode import translation
from babelcode import utils
from babelcode.languages import language
SchemaType = schema_parsing.SchemaType
SchemaMapType = schema_parsing.SchemaMapType
SchemaValueType = schema_parsing.SchemaValueType
class LuaLiteralTranslator(translation.LiteralTranslator):
"""The Lua generator."""
def format_list(self, generic_type: SchemaType,
list_values: List[str]) -> str:
"""Formats a list for Lua."""
_ = generic_type
return '{' + ', '.join(list_values) + '}'
def format_map(self, key_type: SchemaType, value_type: SchemaType,
entries: List[str]) -> str:
"""Formats a map for Lua."""
_ = key_type
_ = value_type
return '{' + ', '.join(entries) + '}'
def format_map_entry(self, key_type: str, value: str) -> str:
"""Formats a map entry for Lua."""
return f'[{key_type}]={value}'
def format_set(self, generic_type: SchemaType, set_values: List[str]) -> str:
"""Formats a set for Lua."""
_ = generic_type
return '{' + ', '.join(map(lambda v: f'[{v}]=true', set_values)) + '}'
class LuaPromptTranslator(translation.PromptTranslator):
"""The Lua prompt translator."""
@property
def word_replacement_map(self) -> Dict[str, str]:
"""The Lua words to replace."""
return {
'array': ['vector', 'list'],
'table': ['map', 'dict', 'dictionary', 'dictionaries'],
}
@property
def signature_template(self) -> str:
"""The Lua signature template."""
return '\n'.join([
'{%- if docstring is not none -%}{{docstring~"\n"}}{%- endif -%}',
'function {{entry_fn_name}}({{signature}})'
])
def clean_docstring_for_lang(self, docstring: str) -> str:
"""Cleans a docstring for Lua."""
return docstring.replace('--', '\\-\\-')
def format_docstring_for_lang(self, docstring: str) -> str:
"""Formats a docstring for Lua."""
out = []
for line in docstring.splitlines(False):
out.append(f'-- {line}')
return '\n'.join(out)
def translate_signature_argument_to_lang(self, arg_name: str,
arg_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature argument to Lua."""
_ = arg_type
_ = use_type_annotation
return arg_name
def translate_signature_returns_to_lang(self, return_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature return to Lua."""
_ = use_type_annotation
return return_type.lang_type
language.LanguageRegistry.register_language(
language.Language(
name='Lua',
file_ext='lua',
literal_translator_cls=LuaLiteralTranslator,
command_fn=lambda fp: [data_types.Command(['lua', fp.name])],
primitive_conversion_mapping={},
prompt_translator_cls=LuaPromptTranslator,
naming_convention=utils.NamingConvention.SNAKE_CASE))
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Java Specific Classes and Functions."""
from typing import Dict, List
from babelcode import data_types
from babelcode import schema_parsing
from babelcode import translation
from babelcode import utils
from babelcode.languages import language
SchemaType = schema_parsing.SchemaType
class JavaLiteralTranslator(translation.LiteralTranslator):
"""The Java generator."""
def format_list(self, generic_type: SchemaType,
list_values: List[str]) -> str:
"""Formats a list for Java."""
_ = generic_type
return f'new ArrayList<>(Arrays.asList({", ".join(list_values)}))'
def format_map(self, key_type: SchemaType, value_type: SchemaType,
entries: List[str]) -> str:
"""Formats a map for Java."""
_ = key_type, value_type
return f'Map.ofEntries({", ".join(entries)})'
def format_map_entry(self, key: str, value: str) -> str:
"""Formats a single entry for a map."""
return f'entry({key}, {value})'
def format_set(self, generic_type: SchemaType, set_values: List[str]):
"""Formats a set for Java."""
_ = generic_type
return f'new HashSet<>(Arrays.asList({", ".join(set_values)}))'
class JavaPromptTranslator(translation.PromptTranslator):
"""The Java prompt translator."""
@property
def word_replacement_map(self) -> Dict[str, str]:
"""The Java words to replace."""
return {
'array': ['vector', 'list'],
'map': ['dict', 'dictionary', 'dictionaries'],
}
@property
def signature_template(self) -> str:
"""The Java signature template."""
return '\n'.join([
'class {{entry_cls_name}} {',
'{% if docstring is not none -%}{{docstring}}{%- endif %}',
' public {{return_type}} {{entry_fn_name}}({{signature}}) {'
])
def clean_docstring_for_lang(self, docstring: str) -> str:
"""Cleans and translates a docstring for Java."""
return translation.escape_cpp_like_comment_chars(docstring)
def format_docstring_for_lang(self, docstring: str) -> str:
"""Formats a docstring for Java."""
# Manually add in the tab for formatting.
return ' ' + translation.format_cpp_like_docstring(docstring,
join_seq='\n ')
def translate_signature_argument_to_lang(self, arg_name: str,
arg_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature argument to Java."""
_ = use_type_annotation
return f'{arg_type.lang_type} {arg_name}'
def translate_signature_returns_to_lang(self, return_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature return to Java."""
_ = use_type_annotation
return return_type.lang_type
language.LanguageRegistry.register_language(
language.Language(name='Java',
file_ext='java',
literal_translator_cls=JavaLiteralTranslator,
command_fn=lambda fp:
[data_types.Command(['java', fp.name], timeout=15)],
primitive_conversion_mapping={
'float': lambda v: translation.convert_float(v, 'f'),
'long': lambda v: f'{v}L'
},
prompt_translator_cls=JavaPromptTranslator,
naming_convention=utils.NamingConvention.CAMEL_CASE))
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""R Specific Classes and Functions."""
from typing import Dict, List
from babelcode import schema_parsing
from babelcode import translation
from babelcode import utils
from babelcode.data_types.command import Command
from babelcode.languages import language
SchemaMapType = schema_parsing.SchemaMapType
SchemaType = schema_parsing.SchemaType
SchemaValueType = schema_parsing.SchemaValueType
class RLiteralTranslator(translation.LiteralTranslator):
"""The R generator."""
def format_map(self, key_type: SchemaType, value_type: SchemaType,
entries: List[str]) -> str:
"""Formats a map for R."""
_ = key_type
_ = value_type
return 'list(' + ', '.join(entries) + ')'
def format_map_entry(self, key: str, value: str) -> str:
"""Formats a single entry for a map."""
return f'{key} = {value}'
def format_set(self, generic_type: SchemaType, set_values: List[str]):
"""Formats a set for R."""
_ = generic_type
return f'list({", ".join(set_values)})'
def format_list(self, generic_type: SchemaType,
list_values: List[str]) -> str:
"""Formats a list for R."""
_ = generic_type
return f'list({", ".join(list_values)})'
class RPromptTranslator(translation.PromptTranslator):
"""The R prompt translator."""
@property
def word_replacement_map(self) -> Dict[str, str]:
"""The R words to replace."""
return {
'list': ['array'],
}
@property
def signature_template(self) -> str:
"""The R signature template."""
return '\n'.join([
'{%- if docstring is not none -%}{{docstring~"\n"}}{%- endif -%}',
'{{entry_fn_name}} <- function({{signature}}) {',
])
def clean_docstring_for_lang(self, docstring: str) -> str:
"""Translates a docstring for R."""
return docstring
def format_docstring_for_lang(self, docstring: str) -> str:
"""Formats a docstring for R."""
return '\n'.join(map(lambda v: f'# {v}', docstring.splitlines(False)))
def translate_signature_argument_to_lang(self, arg_name: str,
arg_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature argument to R."""
_ = use_type_annotation
return arg_name
def translate_signature_returns_to_lang(self, return_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature return to R."""
_ = use_type_annotation
return ''
language.LanguageRegistry.register_language(
language.Language(
name='R',
file_ext='r',
literal_translator_cls=RLiteralTranslator,
command_fn=lambda fp: [Command(['Rscript', fp.name], timeout=10)],
primitive_conversion_mapping={
'boolean': lambda v: 'TRUE' if v else 'FALSE',
'integer': lambda v: f'{v}L',
'long': lambda v: f'{v}L'
},
prompt_translator_cls=RPromptTranslator,
naming_convention=utils.NamingConvention.SNAKE_CASE,
))
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TypeScript Specific Classes and Functions."""
import pathlib
from typing import Dict, List
from babelcode import data_types
from babelcode import schema_parsing
from babelcode import translation
from babelcode import utils
from babelcode.languages.language import Language
from babelcode.languages.language import LanguageRegistry
SchemaType = schema_parsing.SchemaType
SchemaValueType = schema_parsing.SchemaTypeError
class TSLiteralTranslator(translation.LiteralTranslator):
"""The TypeScript generator."""
def format_map(self, key_type: SchemaType, value_type: SchemaType,
entries: List[str]) -> str:
"""Formats a map for TypeScript."""
_ = key_type
_ = value_type
return '{' + ', '.join(entries) + '}'
def format_map_entry(self, key: str, value: str) -> str:
"""Formats a map entry for TypeScript."""
return f'{key}: {value}'
def format_set(self, generic_type: SchemaType, set_values: List[str]) -> str:
"""Formats a set for TypeScript."""
type_str = generic_type.elements[0].lang_type
return f'new Set<{type_str}>([{", ".join(set_values)}])'
def format_list(self, generic_type: SchemaType,
list_values: List[SchemaValueType]) -> str:
"""Format an array or TypeScript."""
_ = generic_type.elements[0].lang_type
return f'[{", ".join(list_values)}]'
class TSPromptTranslator(translation.PromptTranslator):
"""The TypeScript prompt translator."""
@property
def word_replacement_map(self) -> Dict[str, str]:
return {
'array': ['vector', 'list'],
'map': ['dict', 'dictionary', 'dictionaries'],
}
@property
def signature_template(self) -> str:
return '\n'.join([
'{%- if docstring is not none -%}{{docstring~"\n"}}{%- endif -%}',
'function {{entry_fn_name}}({{signature}}){{return_type}} {'
])
def clean_docstring_for_lang(self, docstring: str) -> str:
return translation.escape_cpp_like_comment_chars(docstring)
def format_docstring_for_lang(self, docstring: str) -> str:
return translation.format_cpp_like_docstring(docstring)
def translate_signature_argument_to_lang(self, arg_name: str,
arg_type: SchemaType,
use_type_annotation: bool) -> str:
_ = use_type_annotation
return f'{arg_name}: {arg_type.lang_type}'
def translate_signature_returns_to_lang(self, return_type: SchemaType,
use_type_annotation: bool) -> str:
_ = use_type_annotation
return f': {return_type.lang_type}'
def make_commands(file_path: pathlib.Path) -> List[data_types.Command]:
"""Makes the command to run a TypeScript source file.
Args:
file_path: The path to the file to run.
Returns:
The list of commands to run.
"""
return [
data_types.Command([
'tsc', '--target', 'es2020', '--lib', 'es5,dom,es2015,es2020',
file_path.name
],
timeout=15),
data_types.Command(['node', f'{file_path.stem}.js'])
]
LanguageRegistry.register_language(
Language(name='TypeScript',
file_ext='ts',
literal_translator_cls=TSLiteralTranslator,
command_fn=make_commands,
primitive_conversion_mapping={},
prompt_translator_cls=TSPromptTranslator,
naming_convention=utils.NamingConvention.CAMEL_CASE))
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python3 Specific Classes and Functions."""
from typing import Dict
from babelcode import schema_parsing
from babelcode import translation
from babelcode import utils
from babelcode.data_types.command import Command
from babelcode.languages import language
SchemaMapType = schema_parsing.SchemaMapType
SchemaType = schema_parsing.SchemaType
SchemaValueType = schema_parsing.SchemaValueType
class Py3LiteralTranslator(translation.LiteralTranslator):
"""The Python3 generator."""
class Py3PromptTranslator(translation.PromptTranslator):
"""The Python prompt translator."""
@property
def word_replacement_map(self) -> Dict[str, str]:
"""The Python words to replace."""
return {
'list': ['array', 'vector'],
'dictionary': ['map'],
}
@property
def signature_template(self) -> str:
"""The Python signature template."""
return '\n'.join([
'def {{entry_fn_name}}({{signature}}){{return_type}}:',
'{%- if docstring is not none -%}{{"\n"~docstring}}{%- endif -%}'
])
def clean_docstring_for_lang(self, docstring: str) -> str:
"""Translates a docstring for Python."""
return translation.escape_triple_quotes(docstring)
def format_docstring_for_lang(self, docstring: str) -> str:
"""Formats a docstring for Python."""
out = []
for i, line in enumerate(docstring.splitlines(False)):
prefix = ' '
if i == 0:
prefix += '"""'
out.append(f'{prefix}{line}')
out.append(' """')
return '\n'.join(out)
def translate_signature_argument_to_lang(self, arg_name: str,
arg_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature argument to Python."""
if use_type_annotation:
return f'{arg_name}: {arg_type.lang_type}'
return arg_name
def translate_signature_returns_to_lang(self, return_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature return to Python."""
if use_type_annotation:
return f' -> {return_type.lang_type}'
return ''
language.LanguageRegistry.register_language(
language.Language(
name='Python',
file_ext='py',
literal_translator_cls=Py3LiteralTranslator,
command_fn=lambda fp: [Command(['python', fp.name], timeout=10)],
primitive_conversion_mapping={'boolean': str},
prompt_translator_cls=Py3PromptTranslator,
naming_convention=utils.NamingConvention.SNAKE_CASE))
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init file for the language implementations."""
from babelcode.languages.lang_implementations import cpp
from babelcode.languages.lang_implementations import csharp
from babelcode.languages.lang_implementations import dart
from babelcode.languages.lang_implementations import go
from babelcode.languages.lang_implementations import haskell
from babelcode.languages.lang_implementations import java
from babelcode.languages.lang_implementations import javascript
from babelcode.languages.lang_implementations import julia
from babelcode.languages.lang_implementations import kotlin
from babelcode.languages.lang_implementations import lua
from babelcode.languages.lang_implementations import php
from babelcode.languages.lang_implementations import py3
from babelcode.languages.lang_implementations import r
from babelcode.languages.lang_implementations import rust
from babelcode.languages.lang_implementations import scala
from babelcode.languages.lang_implementations import typescript
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dart Specific classes and functions."""
import pathlib
from typing import Dict, List
from babelcode import data_types
from babelcode import schema_parsing
from babelcode import translation
from babelcode import utils
from babelcode.languages import language
SchemaMapType = schema_parsing.SchemaMapType
SchemaType = schema_parsing.SchemaType
SchemaValueType = schema_parsing.SchemaValueType
def make_commands(file_path: pathlib.Path) -> List[data_types.Command]:
"""Makes the command to run a Dart source file.
Args:
file_path: The path to the file to run.
Returns:
The list of commands to run.
"""
return [
data_types.Command(['dart', 'run', file_path.name]),
]
class DartLiteralTranslator(translation.LiteralTranslator):
"""The Dart generator."""
def format_list(self, generic_type: SchemaType,
list_values: List[str]) -> str:
"""Formats a list for Dart."""
if not list_values:
return f'<{generic_type.elements[0].lang_type}>[]'
return f'[{", ".join(list_values)}]'
def format_map(self, key_type: SchemaType, value_type: SchemaType,
entries: List[str]):
"""Formats a map for Dart."""
if not entries:
key_str = key_type.lang_type
val_str = value_type.lang_type
return f'<{key_str},{val_str}>' + '{}'
return '{' + ', '.join(entries) + '}'
def format_map_entry(self, key: str, value: str) -> str:
"""Formats a map entry for Dart."""
return f'{key}: {value}'
def format_set(self, generic_type: SchemaType, set_values: List[str]) -> str:
"""Formats a set for Dart."""
if not set_values:
return f'<{generic_type.elements[0].lang_type}>' + '{}'
return '{' + ', '.join(set_values) + '}'
class DartPromptTranslator(translation.PromptTranslator):
"""The Dart prompt translator."""
@property
def word_replacement_map(self) -> Dict[str, str]:
"""The Dart words to replace."""
return {
'vector': ['array', 'list'],
'map': ['dict', 'dictionary', 'dictionaries'],
}
@property
def signature_template(self) -> str:
"""The Dart signature template."""
return '\n'.join([
'{%- if docstring is not none -%}{{docstring~"\n"}}{%- endif -%}',
'{{return_type}}{{entry_fn_name}}({{signature}}) {',
])
def clean_docstring_for_lang(self, docstring: str) -> str:
"""Cleans a docstring for Dart."""
return translation.escape_cpp_like_comment_chars(docstring)
def format_docstring_for_lang(self, docstring: str) -> str:
"""Formats a docstring for Dart."""
out = []
for l in docstring.splitlines():
out.append(f'/// {l}')
return '\n'.join(out)
def translate_signature_argument_to_lang(self, arg_name: str,
arg_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature argument to Dart."""
if use_type_annotation:
return f'{arg_type.lang_type} {arg_name}'
return arg_name
def translate_signature_returns_to_lang(self, return_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature return to Dart."""
if use_type_annotation:
return f'{return_type.lang_type} '
return ''
language.LanguageRegistry.register_language(
language.Language(
name='Dart',
file_ext='dart',
literal_translator_cls=DartLiteralTranslator,
command_fn=make_commands,
primitive_conversion_mapping={},
prompt_translator_cls=DartPromptTranslator,
naming_convention=utils.NamingConvention.CAMEL_CASE,
escape_fn=lambda s: s.replace('$', '\\$'),
))
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""C++ Specific classes and functions."""
import pathlib
from typing import Dict, List
from babelcode import data_types
from babelcode import schema_parsing
from babelcode import translation
from babelcode import utils
from babelcode.languages import language
SchemaMapType = schema_parsing.SchemaMapType
SchemaType = schema_parsing.SchemaType
SchemaValueType = schema_parsing.SchemaValueType
def make_cpp_commands(file_path: pathlib.Path) -> List[data_types.Command]:
"""Makes the command to run a C++ source file.
Args:
file_path: The path to the file to run.
Returns:
The list of commands to run.
"""
return [
data_types.Command(['g++', file_path.name, '-o', 'main.exe'], timeout=10),
data_types.Command(['./main.exe'])
]
class CPPLiteralTranslator(translation.LiteralTranslator):
"""The C++ generator."""
def format_list(self, generic_type: SchemaType,
list_values: List[str]) -> str:
"""Formats a list for C++."""
_ = generic_type
return '{' + ', '.join(list_values) + '}'
def format_map(self, key_type: SchemaType, value_type: SchemaType,
entries: List[str]):
"""Formats a map for C++."""
_ = key_type
_ = value_type
return '{' + ', '.join(entries) + '}'
def format_map_entry(self, key: str, value: str) -> str:
"""Formats a map entry for C++."""
return '{' + f'{key}, {value}' + '}'
def format_set(self, generic_type: SchemaType, set_values: List[str]) -> str:
"""Formats a set for C++."""
_ = generic_type
return '{' + ', '.join(set_values) + '}'
class CPPPromptTranslator(translation.PromptTranslator):
"""The C++ prompt translator."""
@property
def word_replacement_map(self) -> Dict[str, str]:
"""The C++ words to replace."""
return {
'vector': ['array', 'list'],
'map': ['dict', 'dictionary', 'dictionaries'],
}
@property
def signature_template(self) -> str:
"""The C++ signature template."""
return '\n'.join([
'{%- if docstring is not none -%}{{docstring~"\n"}}{%- endif -%}',
'{{return_type}} {{entry_fn_name}}({{signature}}) {'
])
def clean_docstring_for_lang(self, docstring: str) -> str:
"""Cleans a docstring for C++."""
return translation.escape_cpp_like_comment_chars(docstring)
def format_docstring_for_lang(self, docstring: str) -> str:
"""Formats a docstring for C++."""
return translation.format_cpp_like_docstring(docstring)
def translate_signature_argument_to_lang(self, arg_name: str,
arg_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature argument to C++."""
_ = use_type_annotation
return f'{arg_type.lang_type} {arg_name}'
def translate_signature_returns_to_lang(self, return_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature return to C++."""
_ = use_type_annotation
return return_type.lang_type
language.LanguageRegistry.register_language(
language.Language(name='C++',
file_ext='cpp',
literal_translator_cls=CPPLiteralTranslator,
command_fn=make_cpp_commands,
primitive_conversion_mapping={},
prompt_translator_cls=CPPPromptTranslator,
naming_convention=utils.NamingConvention.CAMEL_CASE))
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""C# Specific Classes and Functions."""
import pathlib
from typing import Callable, Dict, List
from babelcode import data_types
from babelcode import schema_parsing
from babelcode import translation
from babelcode import utils
from babelcode.languages import language
SchemaType = schema_parsing.SchemaType
class CSharpLiteralTranslator(translation.LiteralTranslator):
"""The C# generator."""
def format_list(self, generic_type: SchemaType,
list_values: List[str]) -> str:
"""Convert the list of values to the code to initialize the list.
Args:
generic_type: The underlying schema type for the list.
list_values: The list of code for each element.
Returns:
The code to initialize an array object in the current language.
"""
return (f'new List<{generic_type.elements[0].lang_type}>' + '{' +
', '.join(list_values) + '}')
def format_map(self, key_type: SchemaType, value_type: SchemaType,
entries: List[str]) -> str:
"""Format the map with keys and entries to the code to initialize the map.
We include the `key_type` and `value_type` for languages that require them
to initialize the map(i.e. Golang).
Args:
key_type: The SchemaType of the key_type.
value_type: The SchemaType of the value.
entries: The list of code to initialize the entries.
Returns:
The code to initialize an map object in the current language.
"""
type_str = f'new Dictionary<{key_type.lang_type}, {value_type.lang_type}>'
return type_str + '{' + ', '.join(entries) + '}'
def format_map_entry(self, key: str, value: str) -> str:
"""Format a single map entry to the literal code.
Args:
key: The code to initialize the key_type.
value: The code to initialize the value.
Returns:
The code to make the single entry.
"""
return '{' + key + ', ' + value + '}'
def format_set(self, generic_type: SchemaType, set_values: List[str]):
return (f'new HashSet<{generic_type.elements[0].lang_type}>' + '{' +
', '.join(set_values) + '}')
def make_argument_signature(schema: Dict[str, SchemaType],
input_order: List[str]) -> str:
"""Make the argument signature for the language.
Args:
schema: The mapping of variables to their types.
input_order: The order of variables for the arguments.
Returns:
The string argument signature for the language.
"""
return ', '.join([f'{schema[v].lang_type} {v}' for v in input_order])
class CSharpPromptTranslator(translation.PromptTranslator):
"""The C# prompt translator."""
def __init__(
self,
lang_name: str,
naming_convention: utils.NamingConvention,
escape_fn: Callable,
):
_ = lang_name
super().__init__('C#', naming_convention, escape_fn)
@property
def word_replacement_map(self) -> Dict[str, str]:
return {
'list': ['array', 'vector'],
'dictionary': ['map'],
}
@property
def signature_template(self) -> str:
return '\n'.join([
'class {{entry_cls_name}} {',
'{% if docstring is not none -%}{{docstring}}{%- endif %}',
' public {{return_type}} {{entry_fn_name}}({{signature}}) {',
])
def clean_docstring_for_lang(self, docstring: str) -> str:
return translation.escape_cpp_like_comment_chars(docstring)
def format_docstring_for_lang(self, docstring: str) -> str:
return ' ' + translation.format_cpp_like_docstring(docstring,
join_seq='\n ')
def translate_signature_argument_to_lang(self, arg_name: str,
arg_type: SchemaType,
use_type_annotation: bool) -> str:
_ = use_type_annotation
return f'{arg_type.lang_type} {arg_name}'
def translate_signature_returns_to_lang(self, return_type: SchemaType,
use_type_annotation: bool) -> str:
_ = use_type_annotation
return return_type.lang_type
def make_commands(file_path: pathlib.Path) -> List[data_types.Command]:
"""Makes the command to run a C# source file.
Args:
file_path: The path to the file to run.
Returns:
The list of commands to run.
"""
return [
data_types.Command(
[
'mono-csc',
'-r:System.Web.dll',
'-r:System.Web.Extensions.dll',
file_path.name,
'-o',
'main.exe',
],
timeout=10,
),
data_types.Command(['mono', 'main.exe']),
]
language.LanguageRegistry.register_language(
language.Language(
name='CSharp',
file_ext='cs',
literal_translator_cls=CSharpLiteralTranslator,
command_fn=make_commands,
primitive_conversion_mapping={
'float': lambda v: translation.convert_float(v, 'f'),
'double': lambda v: translation.convert_float(v, 'm'),
},
prompt_translator_cls=CSharpPromptTranslator,
naming_convention=utils.NamingConvention.PASCAL_CASE,
))
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Golang Specific Classes and Functions."""
import pathlib
from typing import Any, Dict, List
from babelcode import data_types
from babelcode import schema_parsing
from babelcode import translation
from babelcode import utils
from babelcode.languages import language
SchemaType = schema_parsing.SchemaType
class GoLiteralTranslator(translation.LiteralTranslator):
"""The Golang generator."""
def format_list(self, generic_type: SchemaType,
list_values: List[str]) -> str:
"""Formats a list for Golang."""
_ = generic_type
return '{' + ', '.join(list_values) + '}'
def convert_array_like_type(self, generic_type: SchemaType,
list_value: List[Any],
use_format_set: bool) -> str:
"""Converts an array like with the Golang specific formatting."""
result = super().convert_array_like_type(generic_type, list_value,
use_format_set)
return f'{generic_type.lang_type}{result}'
def format_map(self, key_type: SchemaType, value_type: SchemaType,
entries: List[str]) -> str:
"""Formats a map for Golang."""
entry_str = '{' + ', '.join(entries) + '}'
return f'map[{key_type.lang_type}]{value_type.lang_type}{entry_str}'
def format_map_entry(self, key: str, value: str) -> str:
"""Formats a map entry for Golang."""
return f'{key}: {value}'
def format_set(self, generic_type: SchemaType, set_values: List[str]) -> str:
"""Formats a set for Golang."""
_ = generic_type
return '{' + ', '.join(map(lambda v: f'{v}: true', set_values)) + '}'
def make_commands(file_path: pathlib.Path) -> List[data_types.Command]:
"""Makes the command to run a Golang source file.
Args:
file_path: The path to the file to run.
Returns:
The list of commands to run.
"""
return [
data_types.Command(['go', 'build', '-o', 'main.exe', file_path.name]),
data_types.Command(['./main.exe'])
]
class GoPromptTranslator(translation.PromptTranslator):
"""The Golang prompt translator."""
@property
def word_replacement_map(self) -> Dict[str, str]:
"""The Golang words to replace."""
return {
'array': ['vector', 'list'],
'map': ['dict', 'dictionary', 'dictionaries'],
}
@property
def signature_template(self) -> str:
"""The Golang signature template."""
return '\n'.join([
'{%- if docstring is not none -%}{{docstring~"\n"}}{%- endif -%}',
'func {{entry_fn_name}}({{signature}}) {{return_type}} {'
])
def clean_docstring_for_lang(self, docstring: str) -> str:
"""Translates a docstring for Golang."""
return docstring.replace('//', '\\/\\/')
def format_docstring_for_lang(self, docstring: str) -> str:
"""Formats a docstring for Golang."""
out = []
for line in docstring.splitlines(False):
out.append(f'// {line}')
return '\n'.join(out)
def translate_signature_argument_to_lang(self, arg_name: str,
arg_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature argument to Golang."""
_ = use_type_annotation
return f'{arg_name} {arg_type.lang_type}'
def translate_signature_returns_to_lang(self, return_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature return to Golang."""
_ = use_type_annotation
return return_type.lang_type
language.LanguageRegistry.register_language(
language.Language(name='Go',
file_ext='go',
literal_translator_cls=GoLiteralTranslator,
command_fn=make_commands,
primitive_conversion_mapping={},
prompt_translator_cls=GoPromptTranslator,
naming_convention=utils.NamingConvention.CAMEL_CASE))
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Kotlin Specific classes and functions."""
import pathlib
from typing import Dict, List
from babelcode import data_types
from babelcode import schema_parsing
from babelcode import translation
from babelcode import utils
from babelcode.languages import language
SchemaMapType = schema_parsing.SchemaMapType
SchemaType = schema_parsing.SchemaType
SchemaValueType = schema_parsing.SchemaValueType
def make_commands(file_path: pathlib.Path) -> List[data_types.Command]:
"""Makes the kotlin commands to run source code."""
return [
data_types.Command(
['kotlinc', '-script', file_path.name, '-no-reflect', '-nowarn'],
timeout=30),
# data_types.Command(['java', '-jar', 'solution.jar'], timeout=15)
]
class KotlinLiteralTranslator(translation.LiteralTranslator):
"""The Kotlin generator."""
def format_list(self, generic_type: SchemaType,
list_values: List[str]) -> str:
"""Formats a list for Kotlin."""
_ = generic_type
return f'arrayListOf({", ".join(list_values)})'
def format_map(self, key_type: SchemaType, value_type: SchemaType,
entries: List[str]):
"""Formats a map for Kotlin."""
_ = key_type, value_type
return f'mapOf({", ".join(entries)})'
def format_map_entry(self, key: str, value: str) -> str:
"""Formats a map entry for Kotlin."""
return f'{key} to {value}'
def format_set(self, generic_type: SchemaType, set_values: List[str]) -> str:
"""Formats a set for Kotlin."""
return f'hashSetOf({", ".join(set_values)})'
class KotlinPromptTranslator(translation.PromptTranslator):
"""The Kotlin prompt translator."""
@property
def word_replacement_map(self) -> Dict[str, str]:
"""The Kotlin words to replace."""
return {
'array': ['vector', 'list'],
'map': ['dict', 'dictionary', 'dictionaries'],
}
@property
def signature_template(self) -> str:
"""The Kotlin signature template."""
return '\n'.join([
'{%- if docstring is not none -%}{{docstring~"\n"}}{%- endif -%}',
'fun {{entry_fn_name}}({{signature}}): {{return_type}} {'
])
def clean_docstring_for_lang(self, docstring: str) -> str:
"""Cleans a docstring for Kotlin."""
return translation.escape_cpp_like_comment_chars(docstring)
def format_docstring_for_lang(self, docstring: str) -> str:
return translation.format_cpp_like_docstring(docstring)
def translate_signature_argument_to_lang(self, arg_name: str,
arg_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature argument to Kotlin."""
_ = use_type_annotation
return f'{arg_name}: {arg_type.lang_type}'
def translate_signature_returns_to_lang(self, return_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature return to Kotlin."""
_ = use_type_annotation
return return_type.lang_type
language.LanguageRegistry.register_language(
language.Language(name='Kotlin',
file_ext='kts',
literal_translator_cls=KotlinLiteralTranslator,
command_fn=make_commands,
primitive_conversion_mapping={
'float': lambda v: translation.convert_float(v, 'f')
},
prompt_translator_cls=KotlinPromptTranslator,
naming_convention=utils.NamingConvention.CAMEL_CASE,
escape_fn=lambda s: s.replace('$', '\\$')))
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PHP Specific Classes and Functions."""
import functools
from typing import Dict, List
from babelcode import data_types
from babelcode import schema_parsing
from babelcode import translation
from babelcode import utils
from babelcode.languages import language
SchemaType = schema_parsing.SchemaType
SchemaMapType = schema_parsing.SchemaMapType
SchemaValueType = schema_parsing.SchemaValueType
class PHPLiteralTranslator(translation.LiteralTranslator):
"""The PHP generator."""
def format_list(self, generic_type: SchemaType,
list_values: List[str]) -> str:
_ = generic_type
return f'array({", ".join(list_values)})'
def format_map(self, key_type: SchemaType, value_type: SchemaType,
entries: List[str]):
_ = key_type
_ = value_type
return f'array({", ".join(entries)})'
def format_map_entry(self, key_type: str, value: str):
return f'{key_type} => {value}'
def format_set(self, generic_type: SchemaType, set_values: List[str]):
_ = generic_type
return 'array(' + ', '.join(map(lambda v: f'{v} => true', set_values)) + ')'
class PHPPromptTranslator(translation.PromptTranslator):
"""The PHP prompt translator."""
@property
def word_replacement_map(self) -> Dict[str, str]:
return {
'array': ['vector', 'list'],
'array': ['map', 'dict', 'dictionary', 'dictionaries'],
}
@property
def signature_template(self) -> str:
return '\n'.join([
'{%- if docstring is not none -%}{{docstring~"\n"}}{%- endif -%}',
'function {{entry_fn_name}}({{signature}}) {'
])
def clean_docstring_for_lang(self, docstring: str) -> str:
return translation.escape_cpp_like_comment_chars(docstring)
def format_docstring_for_lang(self, docstring: str) -> str:
out = ['/**']
for line in docstring.splitlines(False):
prefix = '* '
out.append(f'{prefix}{line}')
out.append('*/')
return '\n'.join(out)
def translate_signature_argument_to_lang(self, arg_name: str,
arg_type: SchemaType,
use_type_annotation: bool) -> str:
return f'${arg_name}'
def translate_signature_returns_to_lang(self, return_type: SchemaType,
use_type_annotation: bool) -> str:
_ = use_type_annotation
return return_type.lang_type
def translate_argument_name_to_lang(self, arg_name: str) -> str:
return f'${arg_name}'
def convert_string(v, wrap_char='"'):
v = translation.convert_string(v, wrap_char=wrap_char)
return v.replace('$', '\\$')
convert_char = functools.partial(convert_string, wrap_char='\'')
language.LanguageRegistry.register_language(
language.Language(
name='PHP',
file_ext='php',
literal_translator_cls=PHPLiteralTranslator,
command_fn=lambda fp: [data_types.Command(['php', fp.name])],
primitive_conversion_mapping={},
prompt_translator_cls=PHPPromptTranslator,
naming_convention=utils.NamingConvention.CAMEL_CASE,
escape_fn=lambda s: s.replace('$', '\\$')))
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Haskell Specific Classes and Functions."""
import pathlib
from typing import Dict, List
from babelcode import data_types
from babelcode import schema_parsing
from babelcode import translation
from babelcode import utils
from babelcode.languages import language
SchemaType = schema_parsing.SchemaType
SchemaMapType = schema_parsing.SchemaMapType
class HaskellLiteralTranslator(translation.LiteralTranslator):
"""The Haskell generator."""
def format_list(self, generic_type: SchemaType,
list_values: List[str]) -> str:
"""Formats a list for Haskell."""
_ = generic_type
return f'[{", ".join(list_values)}]'
def format_map(self, key_type: SchemaType, value_type: SchemaType,
entries: List[str]) -> str:
"""Formats a map for Haskell."""
_ = key_type
_ = value_type
return f'Map.fromList [{", ".join(entries)}]'
def format_map_entry(self, key: str, value: str) -> str:
"""Formats a map entry for Haskell."""
return f'({key}, {value})'
def format_set(self, generic_type: SchemaType, set_values: List[str]) -> str:
"""Formats a set for Haskell."""
_ = generic_type
return f'Set.fromList [{", ".join(set_values)}]'
def make_commands(file_path: pathlib.Path) -> List[data_types.Command]:
"""Makes the command to run a Haskell source file.
Args:
file_path: The path to the file to run.
Returns:
The list of commands to run.
"""
return [
data_types.Command(['ghc', '-o', 'main.exe', file_path.name]),
data_types.Command(['./main.exe'])
]
class HaskellPromptTranslator(translation.PromptTranslator):
"""The Haskell prompt translator."""
@property
def word_replacement_map(self) -> Dict[str, str]:
"""The Haskell prompt translator."""
return {
'list': ['vector', 'array'],
'map': ['dict', 'dictionary', 'dictionaries'],
}
@property
def signature_template(self) -> str:
"""The Haskell signature template."""
return '\n'.join([
'{%- if docstring is not none -%}{{docstring~"\n"}}{%- endif -%}',
'{{entry_fn_name}} :: {{signature}} -> {{return_type}}',
'{{entry_fn_name}} {{params|join(" ")}} = '
])
def clean_docstring_for_lang(self, docstring: str) -> str:
"""Cleans the docstring for Haskell."""
return docstring.replace('--', '\\-\\-')
def format_docstring_for_lang(self, docstring: str) -> str:
"""Formats a docstring for Haskell."""
out = []
for i, line in enumerate(docstring.splitlines(False)):
if i == 0:
prefix = '-- |'
else:
prefix = '--'
out.append(f'{prefix} {line}')
return '\n'.join(out)
def translate_signature_argument_to_lang(self, arg_name: str,
arg_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature return to Haskell."""
_ = arg_name
_ = use_type_annotation
return f'{arg_type.lang_type}'
def format_signature(self, signature_args: List[str]) -> str:
"""Formats the signature for Haskell."""
return ' -> '.join(signature_args)
def translate_signature_returns_to_lang(self, return_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature return to Haskell."""
_ = use_type_annotation
return return_type.lang_type
def translate_argument_name_to_lang(self, arg_name: str) -> str:
return utils.format_str_with_convention(utils.NamingConvention.SNAKE_CASE,
arg_name)
language.LanguageRegistry.register_language(
language.Language(name='Haskell',
file_ext='hs',
literal_translator_cls=HaskellLiteralTranslator,
command_fn=make_commands,
primitive_conversion_mapping={
'boolean': lambda v: 'True' if v else 'False'
},
prompt_translator_cls=HaskellPromptTranslator,
naming_convention=utils.NamingConvention.CAMEL_CASE))
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Javascript Specific Classes and Functions."""
from typing import Dict, List
from babelcode import data_types
from babelcode import schema_parsing
from babelcode import translation
from babelcode import utils
from babelcode.languages import language
SchemaType = schema_parsing.SchemaType
class JSLiteralTranslator(translation.LiteralTranslator):
"""The Javascript generator."""
def format_map(self, key_type: SchemaType, value_type: SchemaType,
entries: List[str]) -> str:
"""Formats a map for Javascript."""
_ = value_type
_ = key_type
return '{' + ', '.join(entries) + '}'
def format_map_entry(self, key: str, value: str) -> str:
"""Formats a map entry for Javascript."""
return f'{key}: {value}'
def format_set(self, generic_type: SchemaType, set_values: List[str]) -> str:
"""Formats a set for Javascript."""
_ = generic_type
return f'new Set([{", ".join(set_values)}])'
class JSPromptTranslator(translation.PromptTranslator):
"""The Javascript prompt translator."""
@property
def word_replacement_map(self) -> Dict[str, str]:
"""The Javascript words to replace."""
return {
'array': ['vector', 'list'],
'map': ['dict', 'dictionary', 'dictionaries'],
}
@property
def signature_template(self) -> str:
"""The Javascript signature template."""
return '\n'.join([
'{%- if docstring is not none -%}{{docstring~"\n"}}{%- endif -%}',
'function {{entry_fn_name}}({{signature}}) {'
])
def clean_docstring_for_lang(self, docstring: str) -> str:
"""Translates a docstring for Javascript."""
return translation.escape_cpp_like_comment_chars(docstring)
def format_docstring_for_lang(self, docstring: str) -> str:
"""Formats a docstring for Javascript."""
return translation.format_cpp_like_docstring(docstring)
def translate_signature_argument_to_lang(self, arg_name: str,
arg_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature argument to Javascript."""
return arg_name
def translate_signature_returns_to_lang(self, return_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature return to Javascript."""
_ = use_type_annotation
return f': {return_type.lang_type}'
language.LanguageRegistry.register_language(
language.Language(
name='Javascript',
file_ext='js',
literal_translator_cls=JSLiteralTranslator,
command_fn=lambda fp: [data_types.Command(['node', fp.name])],
primitive_conversion_mapping={},
prompt_translator_cls=JSPromptTranslator,
naming_convention=utils.NamingConvention.CAMEL_CASE))
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Julia Specific Classes and Functions."""
import functools
from typing import Dict, List
from babelcode import schema_parsing
from babelcode import translation
from babelcode import utils
from babelcode.data_types.command import Command
from babelcode.languages import language
SchemaMapType = schema_parsing.SchemaMapType
SchemaType = schema_parsing.SchemaType
SchemaValueType = schema_parsing.SchemaValueType
class JuliaLiteralTranslator(translation.LiteralTranslator):
"""The Julia generator."""
def format_map(self, key_type: SchemaType, value_type: SchemaType,
entries: List[str]) -> str:
"""Formats a map for Julia."""
if not entries:
return 'Dict{' + key_type.lang_type + ',' + value_type.lang_type + '}()'
return f'Dict({"," .join(entries)})'
def format_map_entry(self, key: str, value: str) -> str:
"""Formats a single entry for a map."""
return f'{key} => {value}'
def format_set(self, generic_type: SchemaType, set_values: List[str]):
"""Formats a set for Julia."""
if not set_values:
return generic_type.lang_type + '()'
return f'Set([{", ".join(set_values)}])'
def format_list(self, generic_type: SchemaType,
list_values: List[str]) -> str:
"""Formats a list for Julia."""
if not list_values:
return f'{generic_type.lang_type}(undef,0)'
return f'[{", ".join(list_values)}]'
class JuliaPromptTranslator(translation.PromptTranslator):
"""The Julia prompt translator."""
@property
def word_replacement_map(self) -> Dict[str, str]:
"""The Julia words to replace."""
return {
'vector': ['array', 'list'],
'dictionary': ['map'],
}
@property
def signature_template(self) -> str:
"""The Julia signature template."""
return '\n'.join([
'{%- if docstring is not none -%}{{docstring~"\n"}}{%- endif -%}',
'function {{entry_fn_name}}({{signature}}){{return_type}}',
])
def clean_docstring_for_lang(self, docstring: str) -> str:
"""Cleans and translates a docstring for Julia."""
return translation.escape_triple_quotes(docstring)
def format_docstring_for_lang(self, docstring: str) -> str:
"""Formats a docstring for Julia."""
return f'"""\n{docstring}\n"""'
def translate_signature_argument_to_lang(self, arg_name: str,
arg_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature argument to Julia."""
if use_type_annotation:
return f'{arg_name}::{arg_type.lang_type}'
return arg_name
def translate_signature_returns_to_lang(self, return_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature return to Julia."""
if use_type_annotation:
return f'::{return_type.lang_type}'
return ''
language.LanguageRegistry.register_language(
language.Language(
name='Julia',
file_ext='jl',
literal_translator_cls=JuliaLiteralTranslator,
command_fn=lambda fp: [Command(['julia', fp.name], timeout=10)],
primitive_conversion_mapping={},
prompt_translator_cls=JuliaPromptTranslator,
naming_convention=utils.NamingConvention.SNAKE_CASE,
escape_fn=lambda s: s.replace('$', '\\$')))
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scala Specific classes and functions."""
import pathlib
from typing import Dict, List
from babelcode import data_types
from babelcode import schema_parsing
from babelcode import translation
from babelcode import utils
from babelcode.languages import language
SchemaMapType = schema_parsing.SchemaMapType
SchemaType = schema_parsing.SchemaType
SchemaValueType = schema_parsing.SchemaValueType
def make_commands(file_path: pathlib.Path) -> List[data_types.Command]:
"""Makes the Scala commands to run source code."""
return [
data_types.Command(['scalac', '-d', 'evaluation.jar', file_path.name],
timeout=15),
data_types.Command(
['scala', '-d', 'evaluation.jar', 'QuestionEvaluator'],),
]
class ScalaLiteralTranslator(translation.LiteralTranslator):
"""The Scala generator."""
def format_list(self, generic_type: SchemaType,
list_values: List[str]) -> str:
"""Formats a list for Scala."""
_ = generic_type
return f'List({", ".join(list_values)})'
def format_map(self, key_type: SchemaType, value_type: SchemaType,
entries: List[str]):
"""Formats a map for Scala."""
_ = key_type, value_type
return f'HashMap({", ".join(entries)})'
def format_map_entry(self, key: str, value: str) -> str:
"""Formats a map entry for Scala."""
return f'{key} -> {value}'
def format_set(self, generic_type: SchemaType, set_values: List[str]) -> str:
"""Formats a set for Scala."""
return f'HashSet({", ".join(set_values)})'
class ScalaPromptTranslator(translation.PromptTranslator):
"""The Scala prompt translator."""
@property
def word_replacement_map(self) -> Dict[str, str]:
"""The Scala words to replace."""
return {
'array': ['vector'],
'map': ['dict', 'dictionary', 'dictionaries'],
}
@property
def signature_template(self) -> str:
"""The Scala signature template."""
return '\n'.join([
'{%- if docstring is not none -%}{{docstring~"\n"}}{%- endif -%}',
'def {{entry_fn_name}}({{signature}}){{return_type}} = {',
])
def clean_docstring_for_lang(self, docstring: str) -> str:
"""Cleans a docstring for Scala."""
return translation.escape_cpp_like_comment_chars(docstring)
def format_docstring_for_lang(self, docstring: str) -> str:
return translation.format_cpp_like_docstring(docstring)
def translate_signature_argument_to_lang(self, arg_name: str,
arg_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature argument to Scala."""
_ = use_type_annotation
return f'{arg_name}: {arg_type.lang_type}'
def translate_signature_returns_to_lang(self, return_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature return to Scala."""
if use_type_annotation:
return f': {return_type.lang_type}'
return ''
language.LanguageRegistry.register_language(
language.Language(
name='Scala',
file_ext='scala',
literal_translator_cls=ScalaLiteralTranslator,
command_fn=make_commands,
primitive_conversion_mapping={
'float': lambda v: translation.convert_float(v, 'F'),
'long': lambda v: f'{v}L',
},
prompt_translator_cls=ScalaPromptTranslator,
naming_convention=utils.NamingConvention.CAMEL_CASE,
escape_fn=lambda s: s,
))
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rust Specific Classes and Functions."""
import pathlib
from typing import Callable, Dict, List, Optional
from babelcode import data_types
from babelcode import schema_parsing
from babelcode import translation
from babelcode import utils
from babelcode.languages import language
SchemaMapType = schema_parsing.SchemaMapType
SchemaType = schema_parsing.SchemaType
SchemaValueType = schema_parsing.SchemaValueType
class RustLiteralTranslator(translation.LiteralTranslator):
"""The Rust generator."""
def format_list(self, generic_type: SchemaType,
list_values: List[str]) -> str:
"""Formats a list for Rust."""
_ = generic_type
return f'Vec::from([{", ".join(list_values)}])'
def format_map(self, key_type: SchemaType, value_type: SchemaType,
entries: List[str]):
"""Formats a map for Rust."""
_ = key_type
_ = value_type
entry_str = '[' + ', '.join(entries) + ']'
return f'HashMap::from({entry_str})'
def format_map_entry(self, key: str, value: str) -> str:
"""Formats a map entry for Rust."""
return f'({key}, {value})'
def format_set(self, generic_type: SchemaType, set_values: List[str]) -> str:
"""Formats a set for Rust."""
_ = generic_type
return f'Vec::from([{", ".join(set_values)}]).into_iter().collect()'
def make_commands(file_path: pathlib.Path) -> List[data_types.Command]:
"""Makes the command to run a Rust source file.
Args:
file_path: The path to the file to run.
Returns:
The list of commands to run.
"""
executable_name = f'./{file_path.stem}.exe'
return [
data_types.Command(['rustc', file_path.name, '-o', executable_name]),
data_types.Command([executable_name])
]
def _convert_string(value: SchemaValueType,
wrap_char: str = '"',
escape_fn: Optional[Callable[[str], str]] = None) -> str:
"""Converts a string to Rust specific format."""
value = translation.convert_string(value, wrap_char, escape_fn=escape_fn)
return f'{value}.to_string()'
class RustPromptTranslator(translation.PromptTranslator):
"""The Rust prompt translator."""
@property
def word_replacement_map(self) -> Dict[str, str]:
"""The Rust words to replace."""
return {
'vector': ['vec', 'list'],
'set': ['dict', 'dictionary', 'dictionaries'],
}
@property
def signature_template(self) -> str:
"""The Rust signature template."""
return '\n'.join([
'{%- if docstring is not none -%}{{docstring~"\n"}}{%- endif -%}',
'pub fn {{entry_fn_name}}({{signature}}) -> {{return_type}} {'
])
def clean_docstring_for_lang(self, docstring: str) -> str:
"""Cleans a docstring for Rust."""
return docstring.replace('///', '\\/\\/\\/')
def format_docstring_for_lang(self, docstring: str) -> str:
"""Formats a docstring for Rust."""
out = []
for l in docstring.splitlines():
out.append(f'/// {l}')
return '\n'.join(out)
def translate_signature_argument_to_lang(self, arg_name: str,
arg_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature argument to Rust."""
_ = use_type_annotation
return f'{arg_name}: {arg_type.lang_type}'
def translate_signature_returns_to_lang(self, return_type: SchemaType,
use_type_annotation: bool) -> str:
"""Translates the signature return to Rust."""
_ = use_type_annotation
return return_type.lang_type
language.LanguageRegistry.register_language(
language.Language(name='Rust',
file_ext='rs',
literal_translator_cls=RustLiteralTranslator,
command_fn=make_commands,
primitive_conversion_mapping={'string': _convert_string},
prompt_translator_cls=RustPromptTranslator,
naming_convention=utils.NamingConvention.SNAKE_CASE))
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and Functions for SchemaTypes."""
import dataclasses
import json
from typing import List, Optional, Sequence
from absl import logging
from babelcode.schema_parsing import utils
def _split_tuple_children(child_type_str: str) -> List[str]:
"""Helper function to split the children elements for a tuple.
Args:
child_type_str: The generic children string. From a generic tuple string,
this is `tuple<CHILDREN TYPE STRING>`. Each child element is split by
`|`.
Raises:
utils.SchemaTypeError: If the schema type is malformed.
Returns:
The list of unparsed generic type strings for each child element of a
tuple. This helper function does not convert these generic type strings
into `SchemaType` objects, only tokenizes the inputed child type
string.
"""
out = []
current_element_characters = []
# Counter for the number of open '<' characters
num_open = 0
for character in child_type_str:
if character == '|' and num_open == 0:
out.append(''.join(current_element_characters))
current_element_characters = []
else:
if character == '<':
num_open += 1
elif character == '>':
num_open -= 1
if num_open < 0:
raise utils.SchemaTypeError(
f'{child_type_str} has uneven ">" characters')
current_element_characters.append(character)
if num_open != 0:
raise utils.SchemaTypeError(f'{child_type_str} has uneven ">" characters')
if current_element_characters:
out.append(''.join(current_element_characters))
return out
@dataclasses.dataclass
class SchemaType:
"""Generic representation of a type either used in the arguments or returns of a question.
For every question, the solution must have both inputs and an output. To
enable translation of these test cases to multiple languages, we need a
generic representation. The `SchemaType` class fufills this need. A generic
type string can either be a leaf (i.e. int, str) or have 'children'
elements (i.e. list of lists, map). A types children are contained wrapped
with `<>`. For the a `map`, the format of the generic type string is
`map<TYPE OF KEYS;TYPE OF VALUES>. As `tuple` can have multiple types for
its children, the format of the generic type string is
`tuple<TYPE OF ELEMENT 1, TYPE OF ELEMENT 2...>`. `list` and `set` both
follow the grammar of `TYPE NAME<TYPE OF ELEMENTS>`.
Attributes:
type_str: The generic string representing the type of this node.
lang_type: The language specific type string.
elements: The types of the children elements for all data structure types.
key_type: The type of the key values for a map. Only map types are allowed
to have this value.
"""
type_str: str
lang_type: str = 'NOT_SET'
elements: List['SchemaType'] = dataclasses.field(default_factory=list)
key_type: Optional['SchemaType'] = None
def __post_init__(self):
"""Performs simple validation after the class is initialized."""
if self.key_type is not None and not self.elements:
raise utils.SchemaTypeError('key_type is set without a value_type')
if self.type_str in utils.PRIMITIVE_DATA_STRUCTURES and not self.elements:
logging.error('Data structure type "%s" does not have proper values',
self.type_str)
raise utils.SchemaTypeError('data structure type must have a value')
def is_leaf(self) -> bool:
"""Is the type a leaf.
Returns:
bool: True if there are no children elements. Else False.
"""
# Using bool(self.elements) does not work and only explicitly checking
# length is 0 works.
return len(self.elements) == 0 # pylint: disable=g-explicit-length-test
@classmethod
def from_generic_type_string(cls, type_str: str) -> 'SchemaType':
"""Converts a generic type string to a SchemaType.
Args:
type_str: The generic type string
Raises:
utils.SchemaTypeError: If there is a grammar error with the schema type
string.
ValueError: If an unexpected error occurs.
Returns:
The parsed SchemaType Object.
"""
open_count = type_str.count('<')
if open_count != type_str.count('>'):
raise utils.SchemaTypeError(
f'"{type_str}" does not have same number of <>')
if '[]' in type_str:
if open_count != 0:
raise utils.SchemaTypeError(f'"{type_str}" has both [] and <>')
return cls(type_str='list',
elements=[
cls.from_generic_type_string(type_str.replace('[]', '', 1))
])
if open_count == 0:
out = cls(type_str=type_str)
if not out.is_leaf():
raise ValueError(f'{out} had an error')
return out
# We are looking for the format TYPE_NAME<...>, split on first < then
# rejoin and remove the last > character.
type_name, *children_types = type_str.split('<')
children_types = '<'.join(children_types)
if children_types[-1] != '>':
raise utils.SchemaTypeError(f'{children_types} has extra after last >')
children_types = children_types[:-1]
# If it is a map, it must have key and value types.
if type_name == 'map':
try:
key_value, value = children_types.split(';', 1)
except ValueError as e:
raise utils.SchemaTypeError(
f'Expected map to be in the form map<TYPE_1;TYPE_2>, but got {type_str}'
) from e
return cls(type_str=type_name,
key_type=cls.from_generic_type_string(key_value),
elements=[cls.from_generic_type_string(value)])
# Tuples must be handled by themselves as they can have multiple children
# types (and even more nested types). Therefore we must handle them
# separately than other default data types.
elif type_name == 'tuple':
# Split the children types by |
all_children = _split_tuple_children(children_types)
logging.debug('Found "tuple" in string "%s" with children=%s', type_str,
all_children)
# If all of the tuple elements are of the same type, convert them to a
# list of one type to support more languages.
if len(set(all_children)) == 1:
logging.debug('Children are of same type, converting to list.')
return cls(type_str='list',
elements=[cls.from_generic_type_string(all_children[0])])
return cls(type_str=type_name,
elements=list(map(cls.from_generic_type_string, all_children)))
# If not a special type, make the type from the children types string.
else:
return cls(type_str=type_name,
elements=[cls.from_generic_type_string(children_types)])
def to_generic(self) -> str:
"""Converts the SchemaType to a generic string."""
generic_elements = [e.to_generic() for e in self.elements]
if self.type_str in ['list', 'tuple', 'set']:
delim = '|' if self.type_str == 'tuple' else ','
return f'{self.type_str}<{delim.join(generic_elements)}>'
elif self.type_str == 'map':
return f'map<{self.key_type.to_generic()};{generic_elements[0]}>'
else:
return self.type_str
@classmethod
def copy(cls, original: 'SchemaType') -> 'SchemaType':
"""Copy a SchemaType.
Args:
original: The original type to copy.
Returns:
The copy of the original.
"""
new_elts = [SchemaType.copy(elt) for elt in original.elements]
if original.key_type is not None:
new_key = SchemaType.copy(original.key_type)
else:
new_key = None
return cls(original.type_str,
original.lang_type,
elements=new_elts,
key_type=new_key)
@property
def depth(self) -> int:
"""Maximum depth for the type."""
if self.is_leaf():
return 0
max_depth_of_subtree = 0
for v in self.elements:
max_depth_of_subtree = max(max_depth_of_subtree, v.depth)
# Add 1 as this node must have descendants.
return max_depth_of_subtree + 1
def pprint(self) -> str:
return json.dumps(dataclasses.asdict(self), indent=True)
def validate_correct_type(
schema_type: SchemaType,
value: utils.SchemaValueType,
chain: Optional[List[str]] = None) -> utils.SchemaValueType:
"""Validates that a value has the correct underlying value from the schema.
Args:
schema_type: The type to validate against.
value: The value to test against.
chain: The chain calling this, for debugging. Defaults to None.
Raises:
utils.SchemaTypeError: The value is not valid for the given type.
Returns:
The value with the correct type.
"""
def check_iterable_only_has_single_type(
value: Sequence[utils.SchemaValueType]):
if len(set(type(v).__name__ for v in value)) > 1:
raise utils.SchemaTypeError(
f'{schema_type.type_str} does not support multiple element types.')
chain = chain or []
expected_type = utils.GENERIC_TO_PYTHON_TYPE.get(schema_type.type_str, None)
if expected_type is None:
raise utils.SchemaTypeError(f'Unknown type {schema_type.type_str}')
# Some types (i.e. integer, float) do not support having null values in some
# languages. To standardize across all languages, we explicitly do not allow
# these cases to be None.
type_allows_null = utils.allows_null(schema_type.type_str)
if not type_allows_null and value is None:
logging.info('Got %s from %s', type(value).__name__, value)
logging.info('Chain is %s', chain)
raise utils.SchemaTypeError(f'{schema_type.type_str} does not support null')
# If the type allows null values, and the value is None, set to the empty
# type for simplified type validation.
elif value is None and type_allows_null:
value = expected_type()
if not isinstance(value, expected_type):
# For later translation, we need to ensure that the value is a pure float
# so we convert it to a float.
if expected_type == float and isinstance(value, int):
value = float(value)
else:
logging.info('Found has mismatched types')
logging.info('Expected %s', schema_type)
logging.info('Got %s from %s', expected_type.__name__, value)
logging.info('Chain is %s', chain)
raise utils.SchemaTypeError(
f'Value is not of type {expected_type.__name__}')
if schema_type.type_str in ['list', 'set']:
# Sets and lists are represented internally as a list due to JSON.
new_values = []
for v in value:
new_values.append(
validate_correct_type(schema_type.elements[0], v,
[*chain, schema_type.type_str]))
value = new_values
check_iterable_only_has_single_type(value)
elif schema_type.type_str == 'map':
# Make sure the values in the map are the correct types.
for k, v in value.items():
value[k] = validate_correct_type(schema_type.elements[0], v,
[*chain, schema_type.type_str])
# Because the question data is parsed from a raw json line, it needs to
# conform to json standards and thus, keys cannot be ints. Therefore, all
# we do in this section is that we check that the keys are strings and that,
# if the expected type is an int, that it can be converted to an int.
new_key_map = {}
expected_key_type = utils.GENERIC_TO_PYTHON_TYPE[
schema_type.key_type.type_str]
for k in value.keys():
if not isinstance(k, (str, int)):
logging.error('Chain is %s', ', '.join(chain))
logging.error('Raw value is %s', k)
raise utils.SchemaTypeError('Raw key is not a string or int')
if expected_key_type == int:
# Make sure it can be cast to int
try:
new_key = int(k)
except ValueError as e:
logging.error('Chain is %s', ', '.join(chain))
logging.error('Raw value is %s', k)
raise utils.SchemaTypeError(
'Key is expected to be an int, but could not convert the string to int.'
) from e
else:
# Not an int, so no need to worry about casting.
new_key = str(k)
# Check for duplicate keys after the type conversion.
if new_key in new_key_map:
raise utils.SchemaTypeError(f'Duplicate key {new_key} found in value')
new_key_map[new_key] = k
# Go through and update the keys.
for new_key, old_key in new_key_map.items():
value[new_key] = value.pop(old_key)
check_iterable_only_has_single_type(list(value.values()))
check_iterable_only_has_single_type(list(value.keys()))
return value
def is_generic_equal(left: SchemaType, right: SchemaType) -> bool:
"""Check if the generic schemas are equal.
For two types to be "generically" equal, they must have the same subtree
string OR either is a leaf node with null as the generic type.
Args:
left: left hand schema type
right: right hand schema type
Returns:
True if they are generically equal, otherwise false.
"""
# Either is a leaf, then only way still equal is if type_str is 'null'
if left.is_leaf() or right.is_leaf():
if left.type_str == 'null' or right.type_str == 'null':
return True
return left.type_str == right.type_str
if left.type_str != right.type_str:
return False
if left.type_str == 'map':
# If either key is None, then it does mean there is a bigger issue, but
# at least they are not equal.
if left.key_type is None or right.key_type is None:
return False
if not is_generic_equal(left.key_type, right.key_type):
return False
if len(left.elements) != len(right.elements):
return False
for left_elem, right_elem in zip(left.elements, right.elements):
if not is_generic_equal(left_elem, right_elem):
return False
return True
def is_reconcilable(schema_type: SchemaType) -> bool:
"""Can the type be reconciled to fix a difference.
Args:
schema_type: The type to check.
Returns:
True if it can be otherwise false.
"""
if schema_type.type_str in utils.RECONCILABLE_TYPES:
return True
return False
def reconcile_type(left: SchemaType, right: SchemaType) -> Optional[SchemaType]:
"""Reconcile two types.
To reconcile two types means to "downgrade" a leaf type if there is a
collision. For example, we can downgrade a 'float' to a 'double', an
'integer' to a 'float', or a 'string' to a 'char'.
Args:
left: The first SchemaType
right: The second SchemaType
Raises:
utils.SchemaTypeError: If there is an invalid schematype or language types
are
set.
Returns:
The copy of the new type if reconciled, otherwise None.
"""
# Do not allow
if left.lang_type != 'NOT_SET' or right.lang_type != 'NOT_SET':
raise utils.SchemaTypeError('Cannot reconcile types with a lang type')
if left.is_leaf() or right.is_leaf():
# If only one is leaf, return None
if not left.is_leaf() or not right.is_leaf():
return None
if is_reconcilable(left) or is_reconcilable(right):
left_target_types = utils.RECONCILABLE_TYPES.get(left.type_str, set())
right_target_types = utils.RECONCILABLE_TYPES.get(right.type_str, set())
if right.type_str in left_target_types:
# If the right type is the target, return a copy of it.
return SchemaType.from_generic_type_string(right.to_generic())
elif left.type_str in right_target_types:
# The case where it is reconcilable and other is the old type, return
# left
return SchemaType.from_generic_type_string(left.to_generic())
# Otherwise, neither can be reconciled, return None
return None
else:
return None
# The data structures are not the same.
if left.type_str != right.type_str:
return None
if len(left.elements) != len(right.elements):
return None
if left.key_type != right.key_type:
return None
new_elements = []
for element, other_element in zip(left.elements, right.elements):
new_element = reconcile_type(element, other_element)
if new_element is None:
return None
new_elements.append(new_element)
new_type = SchemaType.copy(left)
new_type.elements = new_elements
return SchemaType.from_generic_type_string(new_type.to_generic())
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for parsing schemas."""
from typing import Dict, List, Tuple
from babelcode import data_types
from babelcode.schema_parsing import languages
from babelcode.schema_parsing import schema_type
from babelcode.schema_parsing import utils
SchemaType = schema_type.SchemaType
LanguageSchemaSpec = languages.LanguageSchemaSpec
SchemaMapType = utils.SchemaMapType
def parse_language_schema(language_spec: LanguageSchemaSpec,
generic_schema: SchemaMapType) -> SchemaMapType:
"""Parses the generic schema into a language specific one.
Args:
language_spec: The language specific schema specification.
generic_schema: The mapping of argument name to the SchemaType object
without the language type set.
Raises:
utils.SchemaTypeError: A SchemaType is not supported in the current language
Returns:
The mapping of argument name to the schema types, now with the language
type set.
"""
# Helper function for recursing.
def convert_schema_type(s_type: SchemaType) -> SchemaType:
if s_type.is_leaf():
if s_type.type_str not in language_spec.primitive_lang_map:
raise utils.SchemaTypeError(
f'Leaf type "{s_type.type_str}" is not supported by'
f' {language_spec.name}')
s_type.lang_type = language_spec.primitive_lang_map[s_type.type_str]
else:
s_type.elements = list(map(convert_schema_type, s_type.elements))
if s_type.type_str in ['list', 'set']:
if s_type.type_str == 'list':
format_fn = language_spec.format_list_type
else:
format_fn = language_spec.format_set_type
s_type.lang_type = format_fn(s_type.elements[0].lang_type)
elif s_type.type_str == 'map':
s_type.key_type = convert_schema_type(s_type.key_type)
key_type = s_type.key_type.lang_type
value_type = s_type.elements[0].lang_type
s_type.lang_type = language_spec.format_map_type(key_type, value_type)
else:
raise utils.SchemaTypeError(
f'{s_type} is not supported by {language_spec.name}')
return s_type
for name in generic_schema:
generic_schema[name] = convert_schema_type(generic_schema[name])
return generic_schema
def parse_schema_and_input_order(
language_spec: LanguageSchemaSpec,
raw_schema: List[Dict[str, str]]) -> Tuple[SchemaMapType, List[str]]:
"""Parses out the generic and the raw schema from a raw schema.
Args:
language_spec: The language schema spec to use.
raw_schema: The raw schema.
Returns:
The schema and the input order.
Raises:
data_types.IOPairError: if there is an error with parsing the schema.
"""
schema = {}
param_ordering = []
for var_dict in raw_schema['params']:
name = var_dict['name']
param_ordering.append(name)
schema[name] = SchemaType.from_generic_type_string(var_dict['type'])
schema[data_types.EXPECTED_KEY_NAME] = SchemaType.from_generic_type_string(
raw_schema['return']['type'])
schema = parse_language_schema(language_spec, schema)
# Validate that the language schema has been updated to be correct, this
# should never happen and we only check for this as a sanity check.
missing_types = list(
filter(lambda t: schema[t].lang_type == 'NOT_SET', schema))
if missing_types:
raise data_types.IOPairError(
f'{missing_types} are missing a language type.')
return schema, param_ordering
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Initialize the schema parsing module and register all of the languages."""
from babelcode.schema_parsing.languages import LanguageSchemaSpec
from babelcode.schema_parsing.languages import LanguageSchemaSpecRegistry
from babelcode.schema_parsing.parsing import parse_language_schema
from babelcode.schema_parsing.parsing import parse_schema_and_input_order
from babelcode.schema_parsing.schema_type import SchemaType
from babelcode.schema_parsing.schema_type import is_generic_equal
from babelcode.schema_parsing.schema_type import reconcile_type
from babelcode.schema_parsing.schema_type import validate_correct_type
from babelcode.schema_parsing.utils import PRIMITIVE_DATA_STRUCTURES
from babelcode.schema_parsing.utils import PRIMITIVE_TYPES
from babelcode.schema_parsing.utils import PRIMITIVE_WITH_NULL
from babelcode.schema_parsing.utils import RECONCILABLE_TYPES
from babelcode.schema_parsing.utils import SchemaMapType
from babelcode.schema_parsing.utils import SchemaTypeError
from babelcode.schema_parsing.utils import SchemaValueType
from babelcode.schema_parsing.utils import allows_null
def _register_specs():
"""Register the different language specifications.
This is done here so that, when the library is used, it automatically
registers all of the implemented languages.
"""
# pylint: disable=g-import-not-at-top
import inspect
from babelcode.schema_parsing import languages
# pylint: enable=g-import-not-at-top
for name, make_spec_fn in inspect.getmembers(languages, inspect.isfunction):
lang_spec = make_spec_fn()
if not isinstance(lang_spec, LanguageSchemaSpec):
raise TypeError(
f'{name} must return a LanguageSchemaSpec, instead got {type(lang_spec).__name__}'
)
LanguageSchemaSpecRegistry.register_language(lang_spec,
allow_overwrite=True)
_register_specs()
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for schema parsing."""
from typing import Mapping, Sequence, Union
from babelcode import utils
PRIMITIVE_TYPES = [
'string', 'integer', 'boolean', 'float', 'double', 'character', 'long'
]
PRIMITIVE_WITH_NULL = ['string', 'character']
PRIMITIVE_DATA_STRUCTURES = ['list', 'map', 'tuple', 'set']
RECONCILABLE_TYPES = {
'float': {'double'},
'integer': {'long', 'float', 'double'},
'long': {'double'},
'character': {'string'}
}
GENERIC_TO_PYTHON_TYPE = {
'list': list,
'integer': int,
'long': int,
'float': float,
'double': float,
'boolean': bool,
'string': str,
# Internally, sets are kept as lists due to the fact that JSON does not
# support sets. It is signifgantly easier to keep them as lists in the
# framework so that we can use indexing on them and list specific methods.
# This cuts down on the amount of code and special functions needed to
# handle the two.
'set': list,
'map': dict,
'tuple': tuple,
'character': str
}
def allows_null(type_str: str) -> bool:
"""Is the type string allowed to be NULL."""
return type_str in PRIMITIVE_WITH_NULL + PRIMITIVE_DATA_STRUCTURES
class SchemaTypeError(Exception):
"""Error raised when a SchemaType is not valid."""
SchemaValueType = Union[str, int, float, bool, Sequence['SchemaValueType'],
Mapping[Union[str, int], 'SchemaValueType']]
SchemaMapType = Mapping[str, SchemaValueType]
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations for each language's schema specification."""
import dataclasses
from typing import Callable, Dict, List
from babelcode.schema_parsing import utils
@dataclasses.dataclass
class LanguageSchemaSpec:
"""Specification for a language specific schema.
Attributes:
name: Name to register this spec under.
primitive_lang_map: The dictionary mapping the primitive types defined in
the `schema_parsing.py` file to the corresponding type in the current
language.
format_list_type: A callable that takes in a single string that represents
the type of the elements of the list. Returns the list type string for a
list with the element types.
format_map_type: A callable that takes in `key_type` and `value_type`
strings and returns the language type for a map.
format_set_type: A callable that takes in a single string that represents
the type of the elements of the set. Returns the set type string for a
list with the element types.
"""
name: str
primitive_lang_map: Dict[str, str]
format_list_type: Callable[[str], str]
format_map_type: Callable[[str, str], str]
format_set_type: Callable[[str], str]
class LanguageSchemaSpecRegistry:
"""The registry of language specifications."""
_REGISTRY = {}
@classmethod
def register_language(cls,
language_spec: LanguageSchemaSpec,
allow_overwrite: bool = False):
"""Registers a language specification.
Args:
language_spec: The language specification to register.
allow_overwrite: Allow overwriting existing registered.
Raises:
KeyError: The language specification is already registered.
"""
if language_spec.name in cls._REGISTRY and not allow_overwrite:
raise KeyError(f'{language_spec.name} already registered')
cls._REGISTRY[language_spec.name] = language_spec
@classmethod
def get_lang_spec(cls, language: str) -> LanguageSchemaSpec:
"""Gets the language specification."""
return cls._REGISTRY[language]
@classmethod
def list_languages(cls) -> List[str]:
"""Lists the registered languages."""
return list(cls._REGISTRY)
################################################################
# Language Schema Spec methods. #
################################################################
def make_cpp_spec() -> LanguageSchemaSpec:
"""Makes the C++ schema spec."""
primitive_map = {
'boolean': 'bool',
'integer': 'int',
'character': 'char',
'float': 'float',
'double': 'double',
'long': 'long long',
'string': 'string',
}
return LanguageSchemaSpec(
name='C++',
primitive_lang_map=primitive_map,
format_list_type=lambda t: f'vector<{t}>',
format_map_type=lambda k, v: f'map<{k},{v}>',
format_set_type=lambda t: f'set<{t}>',
)
def make_go_spec() -> LanguageSchemaSpec:
"""Makes golang spec."""
primitive_map = {
'boolean': 'bool',
'integer': 'int',
'character': 'rune',
'float': 'float64',
'double': 'float64',
'long': 'int64',
'string': 'string',
}
return LanguageSchemaSpec(
name='Go',
primitive_lang_map=primitive_map,
format_list_type=lambda t: f'[]{t}',
format_map_type=lambda k, v: f'map[{k}]{v}',
format_set_type=lambda t: f'map[{t}]bool',
)
def make_java_spec() -> LanguageSchemaSpec:
"""Makes java spec."""
primitive_map = {
'boolean': 'Boolean',
'integer': 'Integer',
'character': 'Character',
'float': 'Float',
'double': 'Double',
'long': 'Long',
'string': 'String',
}
return LanguageSchemaSpec(
name='Java',
primitive_lang_map=primitive_map,
format_list_type=lambda t: f'ArrayList<{t}>',
format_map_type=lambda k, v: f'Map<{k}, {v}>',
format_set_type=lambda t: f'HashSet<{t}>',
)
def make_js_spec() -> LanguageSchemaSpec:
"""Makes js spec."""
primitive_map = {
'boolean': 'Boolean',
'integer': 'Number',
'character': 'String',
'float': 'Number',
'double': 'Number',
'long': 'Number',
'string': 'String',
}
return LanguageSchemaSpec(
name='Javascript',
primitive_lang_map=primitive_map,
format_list_type=lambda _: 'Array',
format_map_type=lambda *_: 'Map',
format_set_type=lambda _: 'Set',
)
def make_julia_spec() -> LanguageSchemaSpec:
"""Makes Julia spec."""
primitive_map = {
'boolean': 'Bool',
'integer': 'Int64',
'character': 'Char',
'float': 'Float64',
'double': 'Float64',
'long': 'Int64',
'string': 'String',
}
return LanguageSchemaSpec(
name='Julia',
primitive_lang_map=primitive_map,
format_list_type=lambda t: 'Vector{' + t + '}',
format_map_type=lambda k, v: 'Dict{' + k + ', ' + v + '}',
format_set_type=lambda t: 'Set{' + t + '}',
)
def make_kotlin_spec() -> LanguageSchemaSpec:
"""Makes Kotlin spec."""
primitive_map = {
'boolean': 'Boolean',
'integer': 'Int',
'character': 'Char',
'float': 'Float',
'double': 'Double',
'long': 'Long',
'string': 'String',
}
return LanguageSchemaSpec(
name='Kotlin',
primitive_lang_map=primitive_map,
format_list_type=lambda t: f'ArrayList<{t}>',
format_map_type=lambda k, v: f'Map<{k}, {v}>',
format_set_type=lambda t: f'MutableSet<{t}>',
)
def make_lua_spec() -> LanguageSchemaSpec:
"""Makes the lua spec."""
primitive_map = {
'boolean': 'boolean',
'integer': 'number',
'character': 'string',
'float': 'number',
'double': 'number',
'long': 'number',
'string': 'string',
}
return LanguageSchemaSpec(
name='Lua',
primitive_lang_map=primitive_map,
format_list_type=lambda _: 'array',
format_map_type=lambda *_: 'table',
format_set_type=lambda _: 'table',
)
def make_php_spec() -> LanguageSchemaSpec:
"""Makes the PHP spec."""
primitive_map = {
'boolean': 'boolean',
'integer': 'number',
'character': 'string',
'float': 'number',
'double': 'number',
'long': 'number',
'string': 'string',
}
return LanguageSchemaSpec(
name='PHP',
primitive_lang_map=primitive_map,
format_list_type=lambda _: 'array',
format_map_type=lambda *_: 'array',
format_set_type=lambda _: 'array',
)
def make_python_spec() -> LanguageSchemaSpec:
"""Make the python spec."""
primitive_map = {
'boolean': 'bool',
'integer': 'int',
'character': 'str',
'float': 'float',
'double': 'float',
'long': 'int',
'string': 'str',
}
return LanguageSchemaSpec(
name='Python',
primitive_lang_map=primitive_map,
format_list_type=lambda t: f'List[{t}]',
format_map_type=lambda k, v: f'Dict[{k}, {v}]',
format_set_type=lambda t: f'Set[{t}]',
)
def make_rust_spec() -> LanguageSchemaSpec:
"""Makes the rust spec."""
primitive_map = {
'boolean': 'bool',
'integer': 'i32',
'character': 'char',
'float': 'f32',
'double': 'f64',
'long': 'i64',
'string': 'String',
}
return LanguageSchemaSpec(
name='Rust',
primitive_lang_map=primitive_map,
format_list_type=lambda t: f'Vec<{t}>',
format_map_type=lambda k, v: f'HashMap<{k}, {v}>',
format_set_type=lambda t: f'HashSet<{t}>',
)
def make_haskell_spec() -> LanguageSchemaSpec:
"""Makes the haskell spec."""
primitive_map = {
'boolean': 'Bool',
'integer': 'Integer',
'character': 'Char',
'float': 'Double',
'double': 'Double',
'long': 'Integer',
'string': 'String',
}
return LanguageSchemaSpec(
name='Haskell',
primitive_lang_map=primitive_map,
format_list_type=lambda t: f'[{t}]',
format_map_type=lambda k, v: f'Map.Map {k} {v}',
format_set_type=lambda t: f'Set.Set {t}',
)
def make_csharp_spec() -> LanguageSchemaSpec:
"""Makes C# spec."""
primitive_map = {
'boolean': 'bool',
'integer': 'int',
'character': 'char',
'float': 'float',
'double': 'decimal',
'long': 'long',
'string': 'string',
}
return LanguageSchemaSpec(
name='CSharp',
primitive_lang_map=primitive_map,
format_list_type=lambda t: f'List<{t}>',
format_map_type=lambda k, v: f'Dictionary<{k}, {v}>',
format_set_type=lambda t: f'HashSet<{t}>',
)
def make_typescript_spec() -> LanguageSchemaSpec:
"""Makes Typescript spec."""
primitive_map = {
'boolean': 'boolean',
'integer': 'number',
'character': 'string',
'float': 'number',
'double': 'number',
'long': 'number',
'string': 'string',
}
return LanguageSchemaSpec(
name='TypeScript',
primitive_lang_map=primitive_map,
format_list_type=lambda k: f'Array<{k}>',
format_map_type=lambda k, v: f'Record<{k},{v}>',
format_set_type=lambda v: f'Set<{v}>',
)
def make_scala_spec() -> LanguageSchemaSpec:
"""Makes Scala spec."""
primitive_map = {
'boolean': 'Boolean',
'integer': 'Int',
'character': 'Char',
'float': 'Float',
'double': 'Double',
'long': 'Long',
'string': 'String',
}
return LanguageSchemaSpec(
name='Scala',
primitive_lang_map=primitive_map,
format_list_type=lambda k: f'List[{k}]',
format_map_type=lambda k, v: f'HashMap[{k}, {v}]',
format_set_type=lambda v: f'HashSet[{v}]',
)
def make_r_spec() -> LanguageSchemaSpec:
"""Makes R spec."""
primitive_map = {
'boolean': 'logical',
'integer': 'integer',
'character': 'character',
'float': 'numeric',
'double': 'numeric',
'long': 'integer',
'string': 'character',
}
def convert_map(key, value):
"""R Does not allow integer keys."""
if key == 'integer':
raise utils.SchemaTypeError('R does not support integer key values.')
return f'list[{key}, {value}]'
return LanguageSchemaSpec(
name='R',
primitive_lang_map=primitive_map,
format_list_type=lambda k: f'list[{k}]',
format_map_type=convert_map,
format_set_type=lambda v: f'list[{v}]',
)
def make_dart_spec() -> LanguageSchemaSpec:
"""Makes dart spec."""
primitive_map = {
'boolean': 'bool',
'integer': 'int',
'character': 'String',
'float': 'double',
'double': 'double',
'long': 'int',
'string': 'String',
}
return LanguageSchemaSpec(
name='Dart',
primitive_lang_map=primitive_map,
format_list_type=lambda k: f'List<{k}>',
format_map_type=lambda k, v: f'Map<{k}, {v}>',
format_set_type=lambda v: f'Set<{v}>',
)
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pytest fixtures."""
import sys
from absl import flags
import pytest
@pytest.fixture(scope='session', autouse=True)
def parse_flags():
"""Triggers flags to be parsed before tests are executed.
Without this fixture FLAGs are not parsed and an error that flags are being
used before they are parsed is thrown.
"""
# Only pass the first item, because pytest flags shouldn't be parsed as absl
# flags.
flags.FLAGS(sys.argv[:1])
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build, test, and install circuit training."""
import argparse
import codecs
import datetime
import os
import sys
from setuptools import find_packages
from setuptools import setup
from setuptools.dist import Distribution
# Enables importing version.py directly by adding its path to sys.path.
version_path = os.path.join(os.path.dirname(__file__), 'circuit_training')
sys.path.append(version_path)
import version as circuit_training_version # pylint: disable=g-import-not-at-top
# Default versions the tf-agents dependency.
TF_AGENTS = 'tf-agents[reverb]'
TF_AGENTS_NIGHTLY = 'tf-agents-nightly[reverb]'
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
class SetupToolsHelper(object):
"""Helper to execute `setuptools.setup()`."""
def __init__(self, release=True, tf_agents_override=None):
"""Initialize SetupToolsHelper class.
Args:
release: True to do a release build. False for a nightly build.
tf_agents_override: Set to override the tf_agents dependency.
"""
self.release = release
self.tf_agents_override = tf_agents_override
def _get_version(self):
"""Returns the version and project name to associate with the build."""
if self.release:
project_name = 'circuit_training'
version = circuit_training_version.__rel_version__
else:
project_name = 'circuit_training-nightly'
version = circuit_training_version.__dev_version__
version += datetime.datetime.now().strftime('%Y%m%d')
return version, project_name
def _get_tf_agents_packages(self):
"""Returns required tf_agents package."""
if self.release:
tf_agents_version = TF_AGENTS
else:
tf_agents_version = TF_AGENTS_NIGHTLY
# Overrides required versions if tf_version_override is set.
if self.tf_agents_override:
tf_agents_version = self.tf_agents_override
return [tf_agents_version]
def run_setup(self):
# Builds the long description from the README.
root_path = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(root_path, 'README.md'),
encoding='utf-8') as f:
long_description = f.read()
version, project_name = self._get_version()
setup(
name=project_name,
version=version,
description=('Circuit Training'),
long_description=long_description,
long_description_content_type='text/markdown',
author='Google LLC',
author_email='[email protected]',
url='https://github.com/google-research/circuit_training',
license='Apache 2.0',
packages=find_packages(),
include_package_data=True,
install_requires=self._get_tf_agents_packages(),
extras_require={
'testing': self._get_tf_agents_packages(),
},
distclass=BinaryDistribution,
python_requires='>=3',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='google-research reinforcement learning circuit training',
)
if __name__ == '__main__':
# Hide argparse help so `setuptools.setup` help prints. This pattern is an
# improvement over using `sys.argv` and then `sys.argv.remove`, which also
# did not provide help about custom arguments.
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
'--release',
action='store_true',
help='Pass as true to do a release build')
parser.add_argument(
'--tf-agents-version',
type=str,
default=None,
help='Overrides version of TF-Agents required')
FLAGS, unparsed = parser.parse_known_args()
# Go forward with only non-custom flags.
sys.argv.clear()
# Downstream `setuptools.setup` expects args to start at the second element.
unparsed.insert(0, 'foo')
sys.argv.extend(unparsed)
setup_tools_helper = SetupToolsHelper(
release=FLAGS.release,
tf_agents_override=FLAGS.tf_agents_version)
setup_tools_helper.run_setup()
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define Circuit Training version information."""
# We follow Semantic Versioning (https://semver.org/)
_MAJOR_VERSION = '0'
_MINOR_VERSION = '1'
_PATCH_VERSION = '0'
# When building releases, we can update this value on the release branch to
# reflect the current release candidate ('rc0', 'rc1') or, finally, the official
# stable release (indicated by `_REL_SUFFIX = ''`). Outside the context of a
# release branch, the current version is by default assumed to be a
# 'development' version, labeled 'dev'.
_DEV_SUFFIX = 'dev'
_REL_SUFFIX = 'rc0'
# Example, '0.10.0rc0'
__version__ = '.'.join([
_MAJOR_VERSION,
_MINOR_VERSION,
_PATCH_VERSION,
])
__dev_version__ = '{}.{}'.format(__version__, _DEV_SUFFIX)
__rel_version__ = '{}{}'.format(__version__, _REL_SUFFIX)
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Eval job using a variable container to fetch the weights of the policy."""
import collections
import os
import statistics
import time
from typing import Text
from absl import flags
from absl import logging
from tf_agents.experimental.distributed import reverb_variable_container
from tf_agents.metrics import py_metric
from tf_agents.metrics import py_metrics
from tf_agents.policies import greedy_policy # pylint: disable=unused-import
from tf_agents.policies import py_tf_eager_policy
from tf_agents.train import actor
from tf_agents.train import learner
from tf_agents.train.utils import train_utils
from tf_agents.trajectories import trajectory
from tf_agents.utils import common
flags.DEFINE_string('netlist_file', '',
'File path to the netlist file.')
flags.DEFINE_string('init_placement', '',
'File path to the init placement file.')
flags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),
'Root directory for writing logs/summaries/checkpoints.')
flags.DEFINE_string('variable_container_server_address', None,
'Variable container server address.')
flags.DEFINE_integer(
'global_seed', 111,
'Used in env and weight initialization, does not impact action sampling.')
FLAGS = flags.FLAGS
class InfoMetric(py_metric.PyStepMetric):
"""Observer for graphing the environment info metrics."""
def __init__(
self,
env,
info_metric_key: Text,
buffer_size: int = 1,
name: Text = 'InfoMetric',
):
"""Observer reporting TensorBoard metrics at the end of each episode.
Args:
env: environment.
info_metric_key: a string key from the environment info to report,
e.g. wirelength, density, congestion.
buffer_size: size of the buffer for calculating the aggregated metrics.
name: name of the observer object.
"""
super(InfoMetric, self).__init__(name + '_' + info_metric_key)
self._env = env
self._info_metric_key = info_metric_key
self._buffer = collections.deque(maxlen=buffer_size)
def call(self, traj: trajectory.Trajectory):
"""Report the requested metrics at the end of each episode."""
# We collect the metrics from the info from the environment instead.
# The traj argument is kept to be compatible with the actor/learner API
# for metrics.
del traj
if self._env.done:
metric_value = self._env.get_info()[self._info_metric_key]
self._buffer.append(metric_value)
def result(self):
return statistics.mean(self._buffer)
def reset(self):
self._buffer.clear()
def evaluate(root_dir, variable_container_server_address, create_env_fn):
"""Evaluates greedy policy."""
# Create the path for the serialized greedy policy.
policy_saved_model_path = os.path.join(root_dir,
learner.POLICY_SAVED_MODEL_DIR,
learner.GREEDY_POLICY_SAVED_MODEL_DIR)
saved_model_pb_path = os.path.join(policy_saved_model_path, 'saved_model.pb')
try:
# Wait for the greedy policy to be outputed by learner (timeout after 2
# days), then load it.
train_utils.wait_for_file(
saved_model_pb_path, sleep_time_secs=2, num_retries=86400)
policy = py_tf_eager_policy.SavedModelPyTFEagerPolicy(
policy_saved_model_path, load_specs_from_pbtxt=True)
except TimeoutError as e:
# If the greedy policy does not become available during the wait time of
# the call `wait_for_file`, that probably means the learner is not running.
logging.error('Could not get the file %s. Exiting.', saved_model_pb_path)
raise e
# Create the variable container.
train_step = train_utils.create_train_step()
model_id = common.create_variable('model_id')
# Create the environment.
env = create_env_fn(train_step=train_step)
variables = {
reverb_variable_container.POLICY_KEY: policy.variables(),
reverb_variable_container.TRAIN_STEP_KEY: train_step,
'model_id': model_id,
}
variable_container = reverb_variable_container.ReverbVariableContainer(
variable_container_server_address,
table_names=[reverb_variable_container.DEFAULT_TABLE])
variable_container.update(variables)
# Create the evaluator actor.
info_metrics = [
InfoMetric(env, 'wirelength'),
InfoMetric(env, 'congestion'),
InfoMetric(env, 'density'),
]
eval_actor = actor.Actor(
env,
policy,
train_step,
episodes_per_run=1,
summary_dir=os.path.join(root_dir, learner.TRAIN_DIR, 'eval'),
metrics=[
py_metrics.NumberOfEpisodes(),
py_metrics.EnvironmentSteps(),
py_metrics.AverageReturnMetric(
name='eval_episode_return', buffer_size=1),
py_metrics.AverageEpisodeLengthMetric(buffer_size=1),
] + info_metrics,
name='performance')
# Run the experience evaluation loop.
while True:
eval_actor.run()
variable_container.update(variables)
logging.info('Evaluating using greedy policy at step: %d',
train_step.numpy())
# Write out summaries at the end of each evaluation iteration. This way,
# we can look at the wirelength, density and congestion metrics more
# frequently.
eval_actor.write_metric_summaries()
time.sleep(20)
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for PPO collect job."""
import os
from absl import flags
from absl import logging
import reverb
from tf_agents.experimental.distributed import reverb_variable_container
from tf_agents.policies import py_tf_eager_policy
from tf_agents.replay_buffers import reverb_utils
from tf_agents.train import actor
from tf_agents.train import learner
from tf_agents.train.utils import train_utils
from tf_agents.utils import common
flags.DEFINE_string('netlist_file', '',
'File path to the netlist file.')
flags.DEFINE_string('init_placement', '',
'File path to the init placement file.')
flags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),
'Root directory for writing logs/summaries/checkpoints.')
flags.DEFINE_string('replay_buffer_server_address', None,
'Replay buffer server address.')
flags.DEFINE_string('variable_container_server_address', None,
'Variable container server address.')
flags.DEFINE_integer(
'task_id', 0, 'Identifier of the collect task. Must be unique in a job.')
flags.DEFINE_integer(
'write_summaries_task_threshold', 1,
'Collect jobs with tas ID smaller than this value writes '
'summaries only.')
flags.DEFINE_integer(
'max_sequence_length', 134,
'The sequence length for Reverb replay buffer. Depends on the environment.')
flags.DEFINE_integer(
'global_seed', 111,
'Used in env and weight initialization, does not impact action sampling.')
FLAGS = flags.FLAGS
def collect(task,
root_dir,
replay_buffer_server_address,
variable_container_server_address,
create_env_fn,
max_sequence_length,
write_summaries_task_threshold=1):
"""Collects experience using a policy updated after every episode."""
# Create the environment.
env = create_env_fn()
# Create the path for the serialized collect policy.
policy_saved_model_path = os.path.join(root_dir,
learner.POLICY_SAVED_MODEL_DIR,
learner.COLLECT_POLICY_SAVED_MODEL_DIR)
saved_model_pb_path = os.path.join(policy_saved_model_path, 'saved_model.pb')
try:
# Wait for the collect policy to be outputed by learner (timeout after 2
# days), then load it.
train_utils.wait_for_file(
saved_model_pb_path, sleep_time_secs=2, num_retries=86400)
policy = py_tf_eager_policy.SavedModelPyTFEagerPolicy(
policy_saved_model_path, load_specs_from_pbtxt=True)
except TimeoutError as e:
# If the collect policy does not become available during the wait time of
# the call `wait_for_file`, that probably means the learner is not running.
logging.error('Could not get the file %s. Exiting.', saved_model_pb_path)
raise e
# Create the variable container.
train_step = train_utils.create_train_step()
model_id = common.create_variable('model_id')
variables = {
reverb_variable_container.POLICY_KEY: policy.variables(),
reverb_variable_container.TRAIN_STEP_KEY: train_step,
'model_id': model_id,
}
variable_container = reverb_variable_container.ReverbVariableContainer(
variable_container_server_address,
table_names=[reverb_variable_container.DEFAULT_TABLE])
variable_container.update(variables)
# Create the replay buffer observer for collect jobs.
observers = [
reverb_utils.ReverbAddEpisodeObserver(
reverb.Client(replay_buffer_server_address),
table_name=['training_table'],
max_sequence_length=max_sequence_length,
priority=model_id)
]
# Write metrics only if the task ID of the current job is below the limit.
summary_dir = None
metrics = []
if task < write_summaries_task_threshold:
summary_dir = os.path.join(root_dir, learner.TRAIN_DIR, str(task))
metrics = actor.collect_metrics(1)
# Create the collect actor.
collect_actor = actor.Actor(
env,
policy,
train_step,
episodes_per_run=1,
summary_dir=summary_dir,
metrics=metrics,
observers=observers)
# Run the experience collection loop.
while True:
collect_actor.run()
variable_container.update(variables)
logging.info('Collecting at step: %d', train_step.numpy())
logging.info('Collecting at model_id: %d', model_id.numpy())
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing the data collection."""
import os
from absl import flags
from absl import logging
from absl.testing import parameterized
from circuit_training.environment import environment
from circuit_training.learning import agent
import numpy as np
import tensorflow.compat.v2 as tf
from tf_agents.drivers import py_driver
from tf_agents.policies import py_tf_eager_policy
from tf_agents.specs import array_spec
from tf_agents.train.utils import spec_utils
from tf_agents.train.utils import train_utils
from tf_agents.trajectories import policy_step
from tf_agents.trajectories import time_step as ts
from tf_agents.utils import test_utils
FLAGS = flags.FLAGS
_TESTDATA_DIR = ('circuit_training/'
'environment/test_data/sample_clustered')
class _ValidateTimeStepObserver(object):
"""Observer that validates the time steps and collects episode lengths."""
def __init__(self, test_case, time_step_spec, action_step_spec):
self._test_case = test_case
self._time_step_spec = time_step_spec
self._action_step_spec = action_step_spec
self._current_len = 0
self._episode_lengths = []
@property
def episode_lengths(self):
return self._episode_lengths
def __call__(self, trajectory):
# Check the time step spec.
time_step = ts.TimeStep(
trajectory.step_type,
reward=trajectory.reward,
discount=trajectory.discount,
observation=trajectory.observation)
logging.info('Time step: %s', time_step)
logging.info('Time spec: %s', self._time_step_spec)
self._test_case.assertTrue(
array_spec.check_arrays_nest(time_step, self._time_step_spec))
# Check the action step spec.
action_step = policy_step.PolicyStep(
action=trajectory.action, info=trajectory.policy_info)
logging.info('Action step: %s', action_step.action)
logging.info('Action spec: %s', self._action_step_spec)
self._test_case.assertTrue(
array_spec.check_arrays_nest(action_step.action,
self._action_step_spec))
# Update episode length statistics.
logging.info('Index of trajector within the episode: %s', self._current_len)
if trajectory.is_last():
self._episode_lengths.append(self._current_len)
self._current_len = 0
else:
self._current_len += 1
class CollectTest(parameterized.TestCase, test_utils.TestCase):
@parameterized.named_parameters(
('_default', tf.distribute.get_strategy()),
('_one_device', tf.distribute.OneDeviceStrategy('/cpu:0')),
('_mirrored',
tf.distribute.MirroredStrategy(devices=('/cpu:0', '/cpu:1'))))
def test_collect_with_newly_initialized_ppo_collect_policy(self, strategy):
# Create the environment.
env = environment.create_circuit_environment(
netlist_file=os.path.join(FLAGS.test_srcdir, _TESTDATA_DIR,
'netlist.pb.txt'),
init_placement=os.path.join(FLAGS.test_srcdir, _TESTDATA_DIR,
'initial.plc'))
observation_tensor_spec, action_tensor_spec, time_step_tensor_spec = (
spec_utils.get_tensor_specs(env))
# Create the agent whose collect policy is being tested.
with strategy.scope():
train_step = train_utils.create_train_step()
tf_agent = agent.create_circuit_ppo_grl_agent(train_step,
observation_tensor_spec,
action_tensor_spec,
time_step_tensor_spec,
strategy)
tf_agent.initialize()
# Create, run driver and check the data in an observer performing asserts
# the specs.
validate_time_step = _ValidateTimeStepObserver(
test_case=self,
time_step_spec=env.time_step_spec(),
action_step_spec=env.action_spec())
driver = py_driver.PyDriver(
env,
py_tf_eager_policy.PyTFEagerPolicy(tf_agent.collect_policy),
observers=[validate_time_step],
max_episodes=10)
driver.run(env.reset())
# Make sure that environment steps were taken.
self.assertLen(validate_time_step.episode_lengths, 10)
episode_lens = np.array(validate_time_step.episode_lengths, dtype=np.int32)
# Check if at least one of the rollouts took more than one step to ensure
# that the time step validation has seen data.
self.assertTrue(np.any(episode_lens > 1))
logging.info('Observed episode lengths: %s', episode_lens)
if __name__ == '__main__':
test_utils.main()
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample training with distributed collection using a variable container."""
import functools
import os
import random
from absl import app
from absl import flags
from absl import logging
from circuit_training.environment import environment
from circuit_training.learning import train_ppo_lib
import numpy as np
import tensorflow as tf
from tf_agents.system import system_multiprocessing as multiprocessing
from tf_agents.train.utils import strategy_utils
FLAGS = flags.FLAGS
def main(_):
logging.info('global seed=%d', FLAGS.global_seed)
np.random.seed(FLAGS.global_seed)
random.seed(FLAGS.global_seed)
tf.random.set_seed(FLAGS.global_seed)
root_dir = os.path.join(FLAGS.root_dir, str(FLAGS.global_seed))
strategy = strategy_utils.get_strategy(FLAGS.tpu, FLAGS.use_gpu)
create_env_fn = functools.partial(
environment.create_circuit_environment,
netlist_file=FLAGS.netlist_file,
init_placement=FLAGS.init_placement,
global_seed=FLAGS.global_seed)
use_model_tpu = bool(FLAGS.tpu)
batch_size = int(FLAGS.global_batch_size / strategy.num_replicas_in_sync)
logging.info('global batch_size=%d', FLAGS.global_batch_size)
logging.info('per-replica batch_size=%d', batch_size)
train_ppo_lib.train(
root_dir=root_dir,
strategy=strategy,
replay_buffer_server_address=FLAGS.replay_buffer_server_address,
variable_container_server_address=FLAGS.variable_container_server_address,
create_env_fn=create_env_fn,
sequence_length=FLAGS.sequence_length,
per_replica_batch_size=batch_size,
num_iterations=FLAGS.num_iterations,
num_episodes_per_iteration=FLAGS.num_episodes_per_iteration,
use_model_tpu=use_model_tpu,
)
if __name__ == '__main__':
flags.mark_flags_as_required([
'root_dir',
'replay_buffer_server_address',
'variable_container_server_address',
])
multiprocessing.handle_main(functools.partial(app.run, main))
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection job using a variable container for policy updates."""
import functools
import os
from absl import app
from absl import flags
from circuit_training.environment import environment
from circuit_training.learning import ppo_collect_lib
from tf_agents.system import system_multiprocessing as multiprocessing
FLAGS = flags.FLAGS
def main(_):
root_dir = os.path.join(FLAGS.root_dir, str(FLAGS.global_seed))
create_env_fn = functools.partial(
environment.create_circuit_environment,
netlist_file=FLAGS.netlist_file,
init_placement=FLAGS.init_placement,
global_seed=FLAGS.global_seed,
)
ppo_collect_lib.collect(
task=FLAGS.task_id,
root_dir=root_dir,
replay_buffer_server_address=FLAGS.variable_container_server_address,
variable_container_server_address=FLAGS.variable_container_server_address,
create_env_fn=create_env_fn,
max_sequence_length=FLAGS.max_sequence_length,
write_summaries_task_threshold=FLAGS.write_summaries_task_threshold,
)
if __name__ == '__main__':
flags.mark_flags_as_required([
'root_dir', 'replay_buffer_server_address',
'variable_container_server_address'
])
multiprocessing.handle_main(functools.partial(app.run, main))
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample training with distributed collection using a variable container."""
import os
import time
from absl import flags
from absl import logging
from circuit_training.learning import agent
from circuit_training.learning import learner as learner_lib
import reverb
import tensorflow as tf
from tf_agents.experimental.distributed import reverb_variable_container
from tf_agents.replay_buffers import reverb_replay_buffer
from tf_agents.train import learner as actor_learner
from tf_agents.train import triggers
from tf_agents.train.utils import spec_utils
from tf_agents.train.utils import train_utils
from tf_agents.utils import common
flags.DEFINE_string('netlist_file', '',
'File path to the netlist file.')
flags.DEFINE_string('init_placement', '',
'File path to the init placement file.')
flags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),
'Root directory for writing logs/summaries/checkpoints.')
flags.DEFINE_string('replay_buffer_server_address', None,
'Replay buffer server address.')
flags.DEFINE_string('variable_container_server_address', None,
'Variable container server address.')
flags.DEFINE_integer('num_iterations', 10000,
'Total number train/eval iterations to perform.')
flags.DEFINE_integer(
'sequence_length', 134,
'The sequence length to estimate shuffle size. Depends on the environment.'
'Max horizon = T translates to sequence_length T+1 because of the '
'additional boundary step (last -> first).')
flags.DEFINE_integer(
'num_episodes_per_iteration', 1024,
'This is the number of episodes we train on in each iteration.')
flags.DEFINE_integer(
'global_batch_size', 1024,
'Global batch size across all replicas.')
flags.DEFINE_integer(
'global_seed', 111,
'Used in env and weight initialization, does not impact action sampling.')
FLAGS = flags.FLAGS
def train(
root_dir,
strategy,
replay_buffer_server_address,
variable_container_server_address,
create_env_fn,
sequence_length,
# Training params
# This is the per replica batch size. The global batch size can be computed
# by this number multiplied by the number of replicas (8 in the case of 2x2
# TPUs).
per_replica_batch_size=32,
num_epochs=4,
num_iterations=10000,
# This is the number of episodes we train on in each iteration.
# num_episodes_per_iteration * epsisode_length * num_epochs =
# global_step (number of gradient updates) * per_replica_batch_size *
# num_replicas.
num_episodes_per_iteration=1024,
use_model_tpu=False):
"""Trains a PPO agent."""
# Get the specs from the environment.
env = create_env_fn()
observation_tensor_spec, action_tensor_spec, time_step_tensor_spec = (
spec_utils.get_tensor_specs(env))
# Create the agent.
with strategy.scope():
train_step = train_utils.create_train_step()
model_id = common.create_variable('model_id')
logging.info('Using GRL agent networks.')
static_features = env.wrapped_env().get_static_obs()
tf_agent = agent.create_circuit_ppo_grl_agent(
train_step,
observation_tensor_spec,
action_tensor_spec,
time_step_tensor_spec,
strategy,
static_features=static_features,
use_model_tpu=use_model_tpu)
tf_agent.initialize()
# Create the policy saver which saves the initial model now, then it
# periodically checkpoints the policy weights.
saved_model_dir = os.path.join(root_dir, actor_learner.POLICY_SAVED_MODEL_DIR)
save_model_trigger = triggers.PolicySavedModelTrigger(
saved_model_dir,
tf_agent,
train_step,
start=-num_episodes_per_iteration,
interval=num_episodes_per_iteration)
# Create the variable container.
variables = {
reverb_variable_container.POLICY_KEY: tf_agent.collect_policy.variables(),
reverb_variable_container.TRAIN_STEP_KEY: train_step,
'model_id': model_id,
}
variable_container = reverb_variable_container.ReverbVariableContainer(
variable_container_server_address,
table_names=[reverb_variable_container.DEFAULT_TABLE])
variable_container.push(variables)
# Create the replay buffer.
reverb_replay_train = reverb_replay_buffer.ReverbReplayBuffer(
tf_agent.collect_data_spec,
sequence_length=None,
table_name='training_table',
server_address=replay_buffer_server_address)
# Initialize the dataset.
def experience_dataset_fn():
get_dtype = lambda x: x.dtype
get_shape = lambda x: (None,) + x.shape
shapes = tf.nest.map_structure(get_shape, tf_agent.collect_data_spec)
dtypes = tf.nest.map_structure(get_dtype, tf_agent.collect_data_spec)
dataset = reverb.TrajectoryDataset(
server_address=replay_buffer_server_address,
table='training_table',
dtypes=dtypes,
shapes=shapes,
# Menger uses learner_iterations_per_call (256). Using 8 here instead
# because we do not need that much data in the buffer (they have to be
# filtered out for the next iteration anyways). The rule of thumb is
# 2-3x batch_size.
max_in_flight_samples_per_worker=8,
num_workers_per_iterator=-1,
max_samples_per_stream=-1,
rate_limiter_timeout_ms=-1,
)
def broadcast_info(info_traj):
# Assumes that the first element of traj is shaped
# (sequence_length, ...); and we extract this length.
info, traj = info_traj
first_elem = tf.nest.flatten(traj)[0]
length = first_elem.shape[0] or tf.shape(first_elem)[0]
info = tf.nest.map_structure(lambda t: tf.repeat(t, [length]), info)
return reverb.ReplaySample(info, traj)
dataset = dataset.map(broadcast_info)
return dataset
# Create the learner.
learning_triggers = [
save_model_trigger,
triggers.StepPerSecondLogTrigger(train_step, interval=1000),
]
def per_sequence_fn(sample):
# At this point, each sample data contains a sequence of trajectories.
data, info = sample.data, sample.info
data = tf_agent.preprocess_sequence(data)
return data, info
learner = learner_lib.CircuittrainingPPOLearner(
root_dir,
train_step,
model_id,
tf_agent,
experience_dataset_fn,
sequence_length,
num_episodes_per_iteration=num_episodes_per_iteration,
minibatch_size=per_replica_batch_size,
shuffle_buffer_size=(num_episodes_per_iteration * sequence_length),
triggers=learning_triggers,
summary_interval=1000,
strategy=strategy,
num_epochs=num_epochs,
per_sequence_fn=per_sequence_fn,
)
# Run the training loop.
for i in range(num_iterations):
step_val = train_step.numpy()
logging.info('Training. Iteration: %d', i)
start_time = time.time()
learner.run()
num_steps = train_step.numpy() - step_val
run_time = time.time() - start_time
logging.info('Steps per sec: %s', num_steps / run_time)
logging.info('Pushing variables at model_id: %d', model_id.numpy())
variable_container.push(variables)
logging.info('clearing replay buffer')
reverb_replay_train.clear()
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main binary to launch a stand alone Reverb RB server."""
import os
from absl import app
from absl import flags
from circuit_training.learning import ppo_reverb_server_lib
FLAGS = flags.FLAGS
def main(_):
# Create the path for the serialized collect policy.
root_dir = os.path.join(FLAGS.root_dir, str(FLAGS.global_seed))
ppo_reverb_server_lib.start_reverb_server(root_dir,
FLAGS.replay_buffer_capacity,
FLAGS.port)
if __name__ == '__main__':
flags.mark_flags_as_required(['root_dir', 'port'])
app.run(main)
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for agent and network creation."""
import os
from absl import flags
from absl import logging
from circuit_training.environment import environment
from circuit_training.learning import agent
from circuit_training.utils import test_utils
import tensorflow as tf
from tf_agents.specs import tensor_spec
from tf_agents.train.utils import spec_utils
from tf_agents.train.utils import strategy_utils
from tf_agents.train.utils import train_utils
from tf_agents.trajectories import time_step as ts
from tf_agents.trajectories import trajectory
FLAGS = flags.FLAGS
_TESTDATA_DIR = ('circuit_training/'
'environment/test_data/sample_clustered')
def create_test_circuit_env():
env = environment.create_circuit_environment(
netlist_file=os.path.join(
FLAGS.test_srcdir, _TESTDATA_DIR, 'netlist.pb.txt'),
init_placement=os.path.join(
FLAGS.test_srcdir, _TESTDATA_DIR, 'initial.plc'))
return env
class AgentTest(test_utils.TestCase):
def test_value_network_grl(self):
"""GRL value network outputs the expected shape."""
env = create_test_circuit_env()
observation_tensor_spec, action_tensor_spec, _ = (
spec_utils.get_tensor_specs(env))
logging.info('action_tensor_spec: %s', action_tensor_spec)
time_step_tensor_spec = ts.time_step_spec(observation_tensor_spec)
train_step = train_utils.create_train_step()
strategy = strategy_utils.get_strategy(tpu=False, use_gpu=False)
static_features = env.get_static_obs()
grl_agent = agent.create_circuit_ppo_grl_agent(
train_step,
observation_tensor_spec,
action_tensor_spec,
time_step_tensor_spec,
strategy,
static_features=static_features)
batch_size = 4
# Check that value prediction outputs the correct shape (B, ).
sample_time_steps = tensor_spec.sample_spec_nest(
time_step_tensor_spec, outer_dims=(batch_size,))
value_outputs, _ = grl_agent.collect_policy.apply_value_network(
sample_time_steps.observation,
sample_time_steps.step_type,
value_state=(),
training=False)
self.assertEqual(value_outputs.shape, (batch_size,))
def test_train_grl(self):
"""GRL training does not fail on arbitrary data."""
env = create_test_circuit_env()
observation_tensor_spec, action_tensor_spec, _ = (
spec_utils.get_tensor_specs(env))
logging.info('action_tensor_spec: %s', action_tensor_spec)
time_step_tensor_spec = ts.time_step_spec(observation_tensor_spec)
train_step = train_utils.create_train_step()
strategy = strategy_utils.get_strategy(tpu=False, use_gpu=False)
static_features = env.get_static_obs()
grl_agent = agent.create_circuit_ppo_grl_agent(
train_step,
observation_tensor_spec,
action_tensor_spec,
time_step_tensor_spec,
strategy,
static_features=static_features)
batch_size = 4
sample_time_steps = tensor_spec.sample_spec_nest(
time_step_tensor_spec, outer_dims=(batch_size, 1))
sample_actions = tensor_spec.sample_spec_nest(
action_tensor_spec, outer_dims=(batch_size, 1))
sample_policy_info = {
'dist_params': {
'logits':
tf.ones_like(
sample_time_steps.observation['mask'],
dtype=tf.dtypes.float32)
},
'value_prediction': tf.constant([[0.2]] * batch_size),
'return': tf.constant([[0.2]] * batch_size),
'advantage': tf.constant([[0.2]] * batch_size),
}
sample_experience = trajectory.Trajectory(
sample_time_steps.step_type, sample_time_steps.observation,
sample_actions, sample_policy_info, sample_time_steps.step_type,
sample_time_steps.reward, sample_time_steps.discount)
# Check that training compeltes one iteration.
grl_agent.train(sample_experience)
if __name__ == '__main__':
test_utils.main()
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""circuit training agent definition and utility functions."""
from typing import Optional, Text, Tuple
from absl import logging
from circuit_training.model import model
import tensorflow as tf
from tf_agents.agents.ppo import ppo_agent
from tf_agents.agents.ppo import ppo_utils
from tf_agents.networks import network
from tf_agents.trajectories import time_step as ts
from tf_agents.typing import types
from tf_agents.utils import common
from tf_agents.utils import eager_utils
from tf_agents.utils import nest_utils
from tf_agents.utils import object_identity
from tf_agents.utils import value_ops
def _normalize_advantages(advantages, axes=(0), variance_epsilon=1e-8):
adv_mean, adv_var = tf.nn.moments(x=advantages, axes=axes, keepdims=True)
normalized_advantages = ((advantages - adv_mean) /
(tf.sqrt(adv_var) + variance_epsilon))
return normalized_advantages
class CircuitPPOAgent(ppo_agent.PPOAgent):
"""A PPO Agent for circuit training aligned with Menger.
Major differencs between this and ppo_agent.PPOAgent:
- Loss aggregation uses reduce_mean instead of common.aggregate_losses which
handles aggregation across multiple accelerator cores.
- Value bootstrapping uses the second to last observation, instead of the
last one. This is likely temporarily for aligning with Menger.
- The additional time dimension ([B, 1, ...] was squeezed at the beginning,
which eventually leads to different behavior when generating the action
distribution. b/202055908 tracks the work on fully understanding and
documenting this.
- Normalization is done manually as opposed to `tf.nn.batch_normalization`
which leads to different results in TPU setups.
"""
def __init__(self,
time_step_spec: ts.TimeStep,
action_spec: types.NestedTensorSpec,
optimizer: Optional[types.Optimizer] = None,
actor_net: Optional[network.Network] = None,
value_net: Optional[network.Network] = None,
importance_ratio_clipping: types.Float = 0.2,
discount_factor: types.Float = 1.0,
entropy_regularization: types.Float = 0.01,
value_pred_loss_coef: types.Float = 0.5,
gradient_clipping: Optional[types.Float] = 1.0,
value_clipping: Optional[types.Float] = None,
check_numerics: bool = False,
debug_summaries: bool = False,
summarize_grads_and_vars: bool = False,
train_step_counter: Optional[tf.Variable] = None,
aggregate_losses_across_replicas=False,
loss_scaling_factor=1.,
name: Optional[Text] = 'PPOClipAgent'):
"""Creates a PPO Agent implementing the clipped probability ratios.
Args:
time_step_spec: A `TimeStep` spec of the expected time_steps.
action_spec: A nest of BoundedTensorSpec representing the actions.
optimizer: Optimizer to use for the agent.
actor_net: A function actor_net(observations, action_spec) that returns
tensor of action distribution params for each observation. Takes nested
observation and returns nested action.
value_net: A function value_net(time_steps) that returns value tensor from
neural net predictions for each observation. Takes nested observation
and returns batch of value_preds.
importance_ratio_clipping: Epsilon in clipped, surrogate PPO objective.
For more detail, see explanation at the top of the doc.
discount_factor: Discount factor for return computation.
entropy_regularization: Coefficient for entropy regularization loss term.
value_pred_loss_coef: Multiplier for value prediction loss to balance with
policy gradient loss.
gradient_clipping: Norm length to clip gradients. Default: no clipping.
value_clipping: Difference between new and old value predictions are
clipped to this threshold. Value clipping could be helpful when training
very deep networks. Default: no clipping.
check_numerics: If true, adds tf.debugging.check_numerics to help find NaN
/ Inf values. For debugging only.
debug_summaries: A bool to gather debug summaries.
summarize_grads_and_vars: If true, gradient summaries will be written.
train_step_counter: An optional counter to increment every time the train
op is run. Defaults to the global_step.
aggregate_losses_across_replicas: only applicable to setups using multiple
relicas. Default to aggregating across multiple cores using common.
aggregate_losses. If set to `False`, use `reduce_mean` directly, which
is faster but may impact learning results.
loss_scaling_factor: the multiplier for scaling the loss, oftentimes
1/num_replicas_in_sync.
name: The name of this agent. All variables in this module will fall under
that name. Defaults to the class name.
Raises:
ValueError: If the actor_net is not a DistributionNetwork.
"""
self._loss_scaling_factor = loss_scaling_factor
self._use_tpu = bool(tf.config.list_logical_devices('TPU'))
super(CircuitPPOAgent, self).__init__(
time_step_spec,
action_spec,
optimizer,
actor_net,
value_net,
importance_ratio_clipping=importance_ratio_clipping,
discount_factor=discount_factor,
entropy_regularization=entropy_regularization,
value_pred_loss_coef=value_pred_loss_coef,
gradient_clipping=gradient_clipping,
value_clipping=value_clipping,
check_numerics=check_numerics,
debug_summaries=debug_summaries,
summarize_grads_and_vars=summarize_grads_and_vars,
train_step_counter=train_step_counter,
name=name,
aggregate_losses_across_replicas=aggregate_losses_across_replicas,
# Epochs are set through the tf.Data pipeline outside of the agent.
num_epochs=1,
# Value and advantages are computed as part of the data pipeline, this
# is set to False for all setups using minibatching and PPOLearner.
compute_value_and_advantage_in_train=False,
# Skips GAE, TD lambda returns, rewards and observations normalization.
use_gae=False,
use_td_lambda_return=False,
normalize_rewards=False,
normalize_observations=False,
update_normalizers_in_train=False,
# Skips log probability clipping and L2 losses.
log_prob_clipping=0.0,
policy_l2_reg=0.,
value_function_l2_reg=0.,
shared_vars_l2_reg=0.,
# Skips parameters used for the adaptive KL loss penalty version of PPO.
kl_cutoff_factor=0.0,
kl_cutoff_coef=0.0,
initial_adaptive_kl_beta=0.0,
adaptive_kl_target=0.0,
adaptive_kl_tolerance=0.0)
def compute_return_and_advantage(
self, next_time_steps: ts.TimeStep,
value_preds: types.Tensor) -> Tuple[types.Tensor, types.Tensor]:
"""Compute the Monte Carlo return and advantage.
Args:
next_time_steps: batched tensor of TimeStep tuples after action is taken.
value_preds: Batched value prediction tensor. Should have one more entry
in time index than time_steps, with the final value corresponding to the
value prediction of the final state.
Returns:
tuple of (return, advantage), both are batched tensors.
"""
discounts = next_time_steps.discount * tf.constant(
self._discount_factor, dtype=tf.float32)
rewards = next_time_steps.reward
# TODO(b/202226773): Move debugging to helper function for clarity.
if self._debug_summaries:
# Summarize rewards before they get normalized below.
# TODO(b/171573175): remove the condition once histograms are
# supported on TPUs.
if not self._use_tpu:
tf.compat.v2.summary.histogram(
name='rewards', data=rewards, step=self.train_step_counter)
tf.compat.v2.summary.scalar(
name='rewards_mean',
data=tf.reduce_mean(rewards),
step=self.train_step_counter)
# Normalize rewards if self._reward_normalizer is defined.
if self._reward_normalizer:
rewards = self._reward_normalizer.normalize(
rewards, center_mean=False, clip_value=self._reward_norm_clipping)
if self._debug_summaries:
# TODO(b/171573175): remove the condition once histograms are
# supported on TPUs.
if not self._use_tpu:
tf.compat.v2.summary.histogram(
name='rewards_normalized',
data=rewards,
step=self.train_step_counter)
tf.compat.v2.summary.scalar(
name='rewards_normalized_mean',
data=tf.reduce_mean(rewards),
step=self.train_step_counter)
# Make discount 0.0 at end of each episode to restart cumulative sum
# end of each episode.
episode_mask = common.get_episode_mask(next_time_steps)
discounts *= episode_mask
# Compute Monte Carlo returns. Data from incomplete trajectories, not
# containing the end of an episode will also be used, with a bootstrapped
# estimation from the last value.
# Note that when a trajectory driver is used, then the final step is
# terminal, the bootstrapped estimation will not be used, as it will be
# multiplied by zero (the discount on the last step).
# TODO(b/202055908): Use -1 instead to bootstrap from the last step, once
# we verify that it has no negative impact on learning.
final_value_bootstrapped = value_preds[:, -2]
returns = value_ops.discounted_return(
rewards,
discounts,
time_major=False,
final_value=final_value_bootstrapped)
# TODO(b/171573175): remove the condition once histograms are
# supported on TPUs.
if self._debug_summaries and not self._use_tpu:
tf.compat.v2.summary.histogram(
name='returns', data=returns, step=self.train_step_counter)
# Compute advantages.
advantages = self.compute_advantages(rewards, returns, discounts,
value_preds)
# TODO(b/171573175): remove the condition once historgrams are
# supported on TPUs.
if self._debug_summaries and not self._use_tpu:
tf.compat.v2.summary.histogram(
name='advantages', data=advantages, step=self.train_step_counter)
# Return TD-Lambda returns if both use_td_lambda_return and use_gae.
if self._use_td_lambda_return:
if not self._use_gae:
logging.warning('use_td_lambda_return was True, but use_gae was '
'False. Using Monte Carlo return.')
else:
returns = tf.add(
advantages, value_preds[:, :-1], name='td_lambda_returns')
return returns, advantages
def _train(self, experience, weights):
experience = self._as_trajectory(experience)
if self._compute_value_and_advantage_in_train:
processed_experience = self._preprocess(experience)
else:
processed_experience = experience
def squeeze_time_dim(t):
return tf.squeeze(t, axis=[1])
processed_experience = tf.nest.map_structure(squeeze_time_dim,
processed_experience)
valid_mask = ppo_utils.make_trajectory_mask(processed_experience)
masked_weights = valid_mask
if weights is not None:
masked_weights *= weights
# Reconstruct per-timestep policy distribution from stored distribution
# parameters.
old_action_distribution_parameters = (
processed_experience.policy_info['dist_params'])
old_actions_distribution = (
ppo_utils.distribution_from_spec(
self._action_distribution_spec,
old_action_distribution_parameters,
legacy_distribution_network=isinstance(
self._actor_net, network.DistributionNetwork)))
# Compute log probability of actions taken during data collection, using the
# collect policy distribution.
old_act_log_probs = common.log_probability(
old_actions_distribution, processed_experience.action,
self._action_spec)
# TODO(b/171573175): remove the condition once histograms are
# supported on TPUs.
if self._debug_summaries and not self._use_tpu:
actions_list = tf.nest.flatten(processed_experience.action)
show_action_index = len(actions_list) != 1
for i, single_action in enumerate(actions_list):
action_name = ('actions_{}'.format(i)
if show_action_index else 'actions')
tf.compat.v2.summary.histogram(
name=action_name, data=single_action, step=self.train_step_counter)
time_steps = ts.TimeStep(
step_type=processed_experience.step_type,
reward=processed_experience.reward,
discount=processed_experience.discount,
observation=processed_experience.observation)
actions = processed_experience.action
returns = processed_experience.policy_info['return']
advantages = processed_experience.policy_info['advantage']
normalized_advantages = _normalize_advantages(
advantages, variance_epsilon=1e-8)
# TODO(b/171573175): remove the condition once histograms are
# supported on TPUs.
if self._debug_summaries and not self._use_tpu:
tf.compat.v2.summary.histogram(
name='advantages_normalized',
data=normalized_advantages,
step=self.train_step_counter)
old_value_predictions = processed_experience.policy_info[
'value_prediction']
batch_size = nest_utils.get_outer_shape(time_steps, self._time_step_spec)[0]
loss_info = None # TODO(b/123627451): Remove.
variables_to_train = list(
object_identity.ObjectIdentitySet(self._actor_net.trainable_weights +
self._value_net.trainable_weights))
# Sort to ensure tensors on different processes end up in same order.
variables_to_train = sorted(variables_to_train, key=lambda x: x.name)
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(variables_to_train)
loss_info = self.get_loss(
time_steps,
actions,
old_act_log_probs,
returns,
normalized_advantages,
old_action_distribution_parameters,
masked_weights,
self.train_step_counter,
self._debug_summaries,
old_value_predictions=old_value_predictions,
training=True)
# Scales the loss, often set to 1/num_replicas, which results in using
# the average loss across all of the replicas for backprop.
scaled_loss = loss_info.loss * self._loss_scaling_factor
grads = tape.gradient(scaled_loss, variables_to_train)
if self._gradient_clipping > 0:
grads, _ = tf.clip_by_global_norm(grads, self._gradient_clipping)
# Tuple is used for py3, where zip is a generator producing values once.
grads_and_vars = tuple(zip(grads, variables_to_train))
# If summarize_gradients, create functions for summarizing both
# gradients and variables.
if self._summarize_grads_and_vars and self._debug_summaries:
eager_utils.add_gradients_summaries(grads_and_vars,
self.train_step_counter)
eager_utils.add_variables_summaries(grads_and_vars,
self.train_step_counter)
self._optimizer.apply_gradients(grads_and_vars)
self.train_step_counter.assign_add(1)
# TODO(b/1613650790: Move this logic to PPOKLPenaltyAgent.
if self._initial_adaptive_kl_beta > 0:
# After update epochs, update adaptive kl beta, then update observation
# normalizer and reward normalizer.
policy_state = self._collect_policy.get_initial_state(batch_size)
# Compute the mean kl from previous action distribution.
kl_divergence = self._kl_divergence(
time_steps, old_action_distribution_parameters,
self._collect_policy.distribution(time_steps, policy_state).action)
self.update_adaptive_kl_beta(kl_divergence)
if self.update_normalizers_in_train:
self.update_observation_normalizer(time_steps.observation)
self.update_reward_normalizer(processed_experience.reward)
loss_info = tf.nest.map_structure(tf.identity, loss_info)
with tf.name_scope('Losses/'):
tf.compat.v2.summary.scalar(
name='policy_gradient_loss',
data=loss_info.extra.policy_gradient_loss,
step=self.train_step_counter)
tf.compat.v2.summary.scalar(
name='value_estimation_loss',
data=loss_info.extra.value_estimation_loss,
step=self.train_step_counter)
tf.compat.v2.summary.scalar(
name='l2_regularization_loss',
data=loss_info.extra.l2_regularization_loss,
step=self.train_step_counter)
tf.compat.v2.summary.scalar(
name='entropy_regularization_loss',
data=loss_info.extra.entropy_regularization_loss,
step=self.train_step_counter)
tf.compat.v2.summary.scalar(
name='kl_penalty_loss',
data=loss_info.extra.kl_penalty_loss,
step=self.train_step_counter)
total_abs_loss = (
tf.abs(loss_info.extra.policy_gradient_loss) +
tf.abs(loss_info.extra.value_estimation_loss) +
tf.abs(loss_info.extra.entropy_regularization_loss) +
tf.abs(loss_info.extra.l2_regularization_loss) +
tf.abs(loss_info.extra.kl_penalty_loss))
tf.compat.v2.summary.scalar(
name='total_abs_loss',
data=total_abs_loss,
step=self.train_step_counter)
with tf.name_scope('LearningRate/'):
learning_rate = ppo_utils.get_learning_rate(self._optimizer)
tf.compat.v2.summary.scalar(
name='learning_rate',
data=learning_rate,
step=self.train_step_counter)
# TODO(b/171573175): remove the condition once histograms are
# supported on TPUs.
if self._summarize_grads_and_vars and not self._use_tpu:
with tf.name_scope('Variables/'):
all_vars = (
self._actor_net.trainable_weights +
self._value_net.trainable_weights)
for var in all_vars:
tf.compat.v2.summary.histogram(
name=var.name.replace(':', '_'),
data=var,
step=self.train_step_counter)
return loss_info
def create_circuit_ppo_grl_agent(
train_step: tf.Variable, observation_tensor_spec: types.NestedTensorSpec,
action_tensor_spec: types.NestedTensorSpec,
time_step_tensor_spec: types.TimeStep, strategy: tf.distribute.Strategy,
static_features=None,
use_model_tpu=False,
unrolled=True,
**kwargs) -> CircuitPPOAgent:
"""Creates a PPO agent using the GRL networks."""
grl_shared_net = model.GrlModel(
observation_tensor_spec,
action_tensor_spec,
static_features=static_features,
use_model_tpu=use_model_tpu,
unrolled=unrolled)
grl_actor_net = model.GrlPolicyModel(grl_shared_net, observation_tensor_spec,
action_tensor_spec)
grl_value_net = model.GrlValueModel(observation_tensor_spec, grl_shared_net)
return CircuitPPOAgent(
time_step_tensor_spec,
action_tensor_spec,
optimizer=tf.keras.optimizers.Adam(learning_rate=4e-4, epsilon=1e-5),
actor_net=grl_actor_net,
value_net=grl_value_net,
value_pred_loss_coef=0.5,
entropy_regularization=0.01,
importance_ratio_clipping=0.2,
discount_factor=1.0,
gradient_clipping=1.0,
debug_summaries=False,
train_step_counter=train_step,
value_clipping=None,
aggregate_losses_across_replicas=False,
loss_scaling_factor=1. / float(strategy.num_replicas_in_sync),
**kwargs)
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Eval job using a variable container to fetch the weights of the policy."""
import functools
import os
from absl import app
from absl import flags
from circuit_training.environment import environment
from circuit_training.learning import eval_lib
from tf_agents.policies import greedy_policy # pylint: disable=unused-import
from tf_agents.system import system_multiprocessing as multiprocessing
# TODO(b/211519018): Remove after the optimal placement can be written in GCS.
flags.DEFINE_string('output_placement_save_dir', '',
'File path to the output placement directory. If not set,'
'defaults to root_dir/global_seed.')
flags.DEFINE_bool(
'cd_finetune', False, 'runs coordinate descent to finetune macro '
'orientations. Supposed to run in eval only, not training.')
FLAGS = flags.FLAGS
def main(_):
root_dir = os.path.join(FLAGS.root_dir, str(FLAGS.global_seed))
if FLAGS.output_placement_save_dir:
output_plc_file = os.path.join(
FLAGS.output_placement_save_dir, 'rl_opt_placement.plc')
else:
output_plc_file = os.path.join(root_dir, 'rl_opt_placement.plc')
create_env_fn = functools.partial(
environment.create_circuit_environment,
netlist_file=FLAGS.netlist_file,
init_placement=FLAGS.init_placement,
is_eval=True,
save_best_cost=True,
output_plc_file=output_plc_file,
global_seed=FLAGS.global_seed,
cd_finetune=FLAGS.cd_finetune
)
eval_lib.evaluate(
root_dir=root_dir,
variable_container_server_address=FLAGS.variable_container_server_address,
create_env_fn=create_env_fn,
)
if __name__ == '__main__':
flags.mark_flags_as_required(
['root_dir', 'variable_container_server_address'])
multiprocessing.handle_main(functools.partial(app.run, main))
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library and util functions for reverb server."""
import os
from absl import flags
from absl import logging
import reverb
import tensorflow as tf
from tf_agents.experimental.distributed import reverb_variable_container
from tf_agents.policies import py_tf_eager_policy
from tf_agents.specs import tensor_spec
from tf_agents.train import learner
from tf_agents.train.utils import train_utils
from tf_agents.utils import common
flags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),
'Root directory for writing logs/summaries/checkpoints.')
flags.DEFINE_integer('replay_buffer_capacity', 1024,
'Capacity of the replay buffer table. Please set this to '
'larger than num_episodes_per_iteration.')
flags.DEFINE_integer('port', None, 'Port to start the server on.')
flags.DEFINE_integer(
'global_seed', 111,
'Used in env and weight initialization, does not impact action sampling.')
FLAGS = flags.FLAGS
def start_reverb_server(root_dir, replay_buffer_capacity, port):
"""todo."""
collect_policy_saved_model_path = os.path.join(
root_dir, learner.POLICY_SAVED_MODEL_DIR,
learner.COLLECT_POLICY_SAVED_MODEL_DIR)
saved_model_pb_path = os.path.join(collect_policy_saved_model_path,
'saved_model.pb')
try:
# Wait for the collect policy to be outputed by learner (timeout after 2
# days), then load it.
train_utils.wait_for_file(
saved_model_pb_path, sleep_time_secs=2, num_retries=86400)
collect_policy = py_tf_eager_policy.SavedModelPyTFEagerPolicy(
collect_policy_saved_model_path, load_specs_from_pbtxt=True)
except TimeoutError as e:
# If the collect policy does not become available during the wait time of
# the call `wait_for_file`, that probably means the learner is not running.
logging.error('Could not get the file %s. Exiting.', saved_model_pb_path)
raise e
# Create the signature for the variable container holding the policy weights.
train_step = train_utils.create_train_step()
model_id = common.create_variable('model_id')
variables = {
reverb_variable_container.POLICY_KEY: collect_policy.variables(),
reverb_variable_container.TRAIN_STEP_KEY: train_step,
'model_id': model_id,
}
variable_container_signature = tf.nest.map_structure(
lambda variable: tf.TensorSpec(variable.shape, dtype=variable.dtype),
variables)
logging.info('Signature of variables: \n%s', variable_container_signature)
# Create the signature for the replay buffer holding observed experience.
replay_buffer_signature = tensor_spec.from_spec(
collect_policy.collect_data_spec)
replay_buffer_signature = tensor_spec.add_outer_dim(replay_buffer_signature)
logging.info('Signature of experience: \n%s', replay_buffer_signature)
# Crete and start the replay buffer and variable container server.
# TODO(b/159130813): Optionally turn the reverb server pieces into a library.
server = reverb.Server(
tables=[
# The remover does not matter because we clear the table at the end
# of each global step. We assume that the table is large enough to
# contain the data collected from one step (otherwise some data will
# be dropped).
reverb.Table( # Replay buffer storing experience for training.
name='training_table',
sampler=reverb.selectors.MaxHeap(),
remover=reverb.selectors.MinHeap(),
# Menger sets this to 8, but empirically 1 learns better
# consistently.
rate_limiter=reverb.rate_limiters.MinSize(1),
max_size=replay_buffer_capacity,
max_times_sampled=1,
signature=replay_buffer_signature,
),
reverb.Table( # Variable container storing policy parameters.
name=reverb_variable_container.DEFAULT_TABLE,
sampler=reverb.selectors.Fifo(),
remover=reverb.selectors.Fifo(),
rate_limiter=reverb.rate_limiters.MinSize(1),
max_size=1,
max_times_sampled=0,
signature=variable_container_signature,
),
],
port=port)
server.wait()
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility to create circuit learner."""
from typing import Callable, List, Optional, Text, Tuple
from absl import logging
import tensorflow as tf
from tf_agents.agents.ppo import ppo_agent
from tf_agents.train import interval_trigger
from tf_agents.train import learner
from tf_agents.typing import types
class CircuittrainingPPOLearner(object):
"""Manages all the learning details needed.
These include:
* Using distribution strategies correctly
* Summaries
* Checkpoints
* Minimizing entering/exiting TF context:
Especially in the case of TPUs scheduling a single TPU program to
perform multiple train steps is critical for performance.
* Generalizes the train call to be done correctly across CPU, GPU, or TPU
executions managed by DistributionStrategies. This uses `strategy.run` and
then makes sure to do a reduce operation over the `LossInfo` returned by
the agent.
"""
def __init__(
self,
root_dir: Text,
train_step: tf.Variable,
model_id: tf.Variable,
agent: ppo_agent.PPOAgent,
experience_dataset_fn: Callable[..., tf.data.Dataset],
sequence_length: int,
num_episodes_per_iteration: int,
minibatch_size: int,
shuffle_buffer_size: int,
num_epochs: int = 1,
triggers: Optional[List[interval_trigger.IntervalTrigger]] = None,
checkpoint_interval: int = 100000,
summary_interval: int = 1000,
strategy: Optional[tf.distribute.Strategy] = None,
per_sequence_fn: Optional[
Callable[[Tuple[types.NestedTensor, types.ReverbSampleInfo]],
Tuple[types.NestedTensor, types.ReverbSampleInfo]]] = None,
):
"""Initializes a CircuittrainingPPOLearner instance.
Args:
root_dir: Main directory path where checkpoints, saved_models, and
summaries will be written to.
train_step: a scalar tf.int64 `tf.Variable` which will keep track of the
number of train steps. This is used for artifacts created like
summaries, or outputs in the root_dir.
model_id: a scalar tf.int64 `tf.Variable` which will keep track of the
number of learner iterations / policy updates.
agent: `ppo_agent.PPOAgent` instance to train with. Note that
update_normalizers_in_train should be set to `False`, otherwise a
ValueError will be raised. We do not update normalizers in the agent
again because we already update it in the learner. When mini batching is
enabled, compute_value_and_advantage_in_train should be set to False,
and preprocessing should be done as part of the data pipeline as part of
`replay_buffer.as_dataset`.
experience_dataset_fn: a function that will create an instance of a
tf.data.Dataset used to sample experience for training. Each element in
the dataset is a (Trajectory, SampleInfo) pair.
sequence_length: Fixed sequence length for elements in the dataset. Used
for calculating how many iterations of minibatches to use for training.
num_episodes_per_iteration: The number of episodes to sample for training.
If fewer than this amount of episodes exists in the dataset, the learner
will wait for more data to be added, or until the reverb timeout is
reached.
minibatch_size: The minibatch size. The dataset used for training is
shaped `[minibatch_size, 1, ...]`. If None, full sequences will be fed
into the agent. Please set this parameter to None for RNN networks which
requires full sequences.
shuffle_buffer_size: The buffer size for shuffling the trajectories before
splitting them into mini batches. Only required when mini batch learning
is enabled (minibatch_size is set). Otherwise it is ignored. Commonly
set to a number 1-3x the episode length of your environment.
num_epochs: The number of iterations to go through the same sequences.
triggers: List of callables of the form `trigger(train_step)`. After every
`run` call every trigger is called with the current `train_step` value
as an np scalar.
checkpoint_interval: Number of train steps in between checkpoints. Note
these are placed into triggers and so a check to generate a checkpoint
only occurs after every `run` call. Set to -1 to disable (this is not
recommended, because it means that if the pipeline gets preempted, all
previous progress is lost). This only takes care of the checkpointing
the training process. Policies must be explicitly exported through
triggers.
summary_interval: Number of train steps in between summaries. Note these
are placed into triggers and so a check to generate a checkpoint only
occurs after every `run` call.
strategy: (Optional) `tf.distribute.Strategy` to use during training.
per_sequence_fn: (Optional): sequence-wise preprecessing, pass in agent.
preprocess for advantage calculation. This operation happens after
take() and before rebatching.
Raises:
ValueError: agent._compute_value_and_advantage_in_train is set to `True`.
preprocessing must be done as part of the data pipeline when mini
batching is enabled.
"""
strategy = strategy or tf.distribute.get_strategy()
self._agent = agent
self._minibatch_size = minibatch_size
self._shuffle_buffer_size = shuffle_buffer_size
self._num_epochs = num_epochs
self._experience_dataset_fn = experience_dataset_fn
self._num_episodes_per_iteration = num_episodes_per_iteration
# Tracks the number of times learner.run() has been called.
# This is used for filtering out data generated by older models to ensure
# the on policyness of the algorithm.
self._model_id = model_id
self._sequence_length = sequence_length
self._per_sequence_fn = per_sequence_fn
self._generic_learner = learner.Learner(
root_dir,
train_step,
agent,
after_train_strategy_step_fn=None,
triggers=triggers,
checkpoint_interval=checkpoint_interval,
summary_interval=summary_interval,
use_kwargs_in_agent_train=False,
strategy=strategy)
self.num_replicas = strategy.num_replicas_in_sync
self._create_datasets(strategy)
self._steps_per_iter = self._get_train_steps_per_iteration()
logging.info('train steps per iteration: %d', self._steps_per_iter)
def _create_datasets(self, strategy):
"""Create the training dataset and iterator."""
def _filter_invalid_episodes(sample):
# Filter 1. off policy samples and 2.infeasible placements with shorter
# episode lengths than expected.
data, sample_info = sample.data, sample.info
data_model_id = tf.cast(
tf.reduce_min(sample_info.priority), dtype=tf.int64)
# TODO(b/203585138): remove filter by sequence length once variable
# lengths episodes are supported.
return tf.math.logical_and(
tf.math.equal(tf.size(data.discount), self._sequence_length),
tf.math.equal(self._model_id, data_model_id))
def _make_dataset(_):
# `experience_dataset_fn` returns a tf.Dataset. Each item is a (Trajectory
# , SampleInfo) tuple, and the Trajectory represents one single episode
# of a fixed sequence length. The Trajectory dimensions are [1, T, ...].
train_dataset = self._experience_dataset_fn()
train_dataset = train_dataset.filter(_filter_invalid_episodes)
train_dataset = train_dataset.take(self._num_episodes_per_iteration)
if self._per_sequence_fn:
train_dataset = train_dataset.map(
self._per_sequence_fn,
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=False)
# We take num_episodes_per_iteration, repeat for `num_epochs` times and
# exhaust this data in the current learner run. The next time learner
# runs, new batches of data will be sampled, cached and repeated.
# This is enabled by the `Counter().flat_map()` trick below.
# We unbatch the dataset shaped [B, T, ...] to a new dataset that
# contains individual elements.
# Note that we unbatch across the time dimension, which could result
# in mini batches that contain subsets from more than one sequences.
# PPO agent can handle mini batches across episode boundaries.
train_dataset = train_dataset.unbatch()
train_dataset = train_dataset.batch(1, drop_remainder=True).cache()
# Ideally we will train on num_episodes_per_iteration if all have the max
# sequence_length, in case of shorter episodes we will train in an
# equivalent number of steps num_episodes_per_iteration * sequence_length.
# num_samples = self._num_episodes_per_iteration * self._sequence_length
# Make sure we have enough samples to train on.
# train_dataset = train_dataset.take(num_samples).cache()
train_dataset = train_dataset.shuffle(self._shuffle_buffer_size)
train_dataset = train_dataset.repeat(self._num_epochs)
train_dataset = train_dataset.batch(
self._minibatch_size, drop_remainder=True)
options = tf.data.Options()
options.experimental_deterministic = False
options.experimental_optimization.parallel_batch = True
train_dataset = train_dataset.with_options(options)
return train_dataset
def make_dataset(_):
return tf.data.experimental.Counter().flat_map(_make_dataset)
with strategy.scope():
if strategy.num_replicas_in_sync > 1:
self._train_dataset = (
strategy.distribute_datasets_from_function(make_dataset))
else:
self._train_dataset = make_dataset(0)
self._train_iterator = iter(self._train_dataset)
def _get_train_steps_per_iteration(self):
"""Number of train steps each time learner.run() is called."""
# We exhaust all num_episodes_per_iteration taken from Reverb in this setup.
# Here we assume that there's only 1 episode per batch, and each episode is
# of the fixed sequence length.
num_mini_batches = int(self._num_episodes_per_iteration *
self._sequence_length * self._num_epochs /
self._minibatch_size)
train_steps = int(num_mini_batches / self.num_replicas)
return train_steps
def run(self):
"""Train `num_episodes_per_iteration` repeating for `num_epochs` of iterations.
Returns:
The total loss computed before running the final step.
"""
loss_info = self._generic_learner.run(self._steps_per_iter,
self._train_iterator)
self._model_id.assign_add(1)
return loss_info
@property
def train_step_numpy(self):
"""The current train_step.
Returns:
The current `train_step`. Note this will return a scalar numpy array which
holds the `train_step` value when this was called.
"""
return self._generic_learner.train_step_numpy
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions for testing."""
from absl import flags
import gin
import tensorflow as tf
flags.DEFINE_multi_string('test_gin_bindings', None, 'Gin bindings.')
FLAGS = flags.FLAGS
class TestCase(tf.test.TestCase):
"""Base class for TF-Agents unit tests."""
def setUp(self):
super(TestCase, self).setUp()
tf.compat.v1.enable_resource_variables()
gin.clear_config()
gin.parse_config(FLAGS.test_gin_bindings)
def tearDown(self):
gin.clear_config()
super(TestCase, self).tearDown()
# Main function so that users of `test_utils.TestCase` can also call
# `test_utils.main()`.
def main():
tf.test.main()
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Tests for circuit training model.
# Internal circuit training docs link.
"""
import os
from absl import flags
from absl.testing import parameterized
from circuit_training.environment import environment
from circuit_training.model import model as grl_model
from circuit_training.utils import test_utils
import tensorflow as tf
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
import tf_agents.specs.tensor_spec as tensor_spec
from tf_agents.train.utils import strategy_utils
from tf_agents.trajectories import time_step as ts
flags.DEFINE_enum('strategy_type', 'cpu', [
'tpu', 'gpu', 'cpu'
], ('Distribution Strategy type to use for training. `tpu` uses TPUStrategy for'
' running on TPUs (1x1), `gpu` uses GPUs with single host.'))
flags.DEFINE_integer(
'global_batch_size', 64, 'Defines the global batch size. '
'Note that for TPU the per TC batch size will be 32 for 1x1 TPU.')
flags.DEFINE_integer('dataset_repeat', 16,
'Defines the number of dataset repeat.')
FLAGS = flags.FLAGS
_TESTDATA_DIR = ('circuit_training/'
'environment/test_data')
class ActorModelTest(test_utils.TestCase, parameterized.TestCase):
def setUp(self):
super(ActorModelTest, self).setUp()
block_name = 'sample_clustered'
netlist_file = os.path.join(FLAGS.test_srcdir, _TESTDATA_DIR, block_name,
'netlist.pb.txt')
init_placement = os.path.join(FLAGS.test_srcdir, _TESTDATA_DIR, block_name,
'initial.plc')
env = environment.create_circuit_environment(
netlist_file=netlist_file, init_placement=init_placement)
tf_env = tf_py_environment.TFPyEnvironment(suite_gym.wrap_env(env))
self._input_tensors_spec = tf_env.observation_spec()
self._output_tensors_spec = tf_env.action_spec()
if FLAGS.strategy_type == 'tpu':
resolver = tf.distribute.cluster_resolver.TPUClusterResolver('')
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
self._strategy = tf.distribute.experimental.TPUStrategy(resolver)
elif FLAGS.strategy_type == 'gpu':
self._strategy = strategy_utils.get_strategy(tpu=None, use_gpu=True)
else:
self._strategy = strategy_utils.get_strategy(tpu=None, use_gpu=False)
with self._strategy.scope():
shared_network = grl_model.GrlModel(
input_tensors_spec=self._input_tensors_spec,
output_tensors_spec=None,
name='grl_model')
self._value_model = grl_model.GrlValueModel(
input_tensors_spec=self._input_tensors_spec,
shared_network=shared_network,
name='value_model')
self._optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)
self._value_model.create_variables()
def test_backwards_pass(self):
observation_spec = self._input_tensors_spec
time_step_spec = ts.time_step_spec(observation_spec)
outer_dims = (FLAGS.global_batch_size,)
time_step = tensor_spec.sample_spec_nest(
time_step_spec, outer_dims=outer_dims)
# TPU on forge has two cores (1x1).
# The batch defined here represents the global batch size.
# Will be evenly divided between the two cores.
dataset = tf.data.Dataset.from_tensor_slices(time_step.observation).repeat(
FLAGS.dataset_repeat).batch(FLAGS.global_batch_size)
dist_dataset = self._strategy.experimental_distribute_dataset(dataset)
with self._strategy.scope():
def _step_fn(x):
with tf.GradientTape() as tape:
value, _ = self._value_model(x, training=True)
loss = tf.math.reduce_sum(value)
grads = tape.gradient(loss, self._value_model.trainable_variables)
grads_and_vars = tuple(
zip(grads, self._value_model.trainable_variables))
self._optimizer.apply_gradients(grads_and_vars)
@tf.function
def _iter_fn(x):
self._strategy.run(_step_fn, args=(x,))
for x in dist_dataset:
_iter_fn(x)
if __name__ == '__main__':
test_utils.main()
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""New circuittraining Model for generalization."""
import sys
from typing import Dict, Optional, Text, Union, Callable, Tuple
from circuit_training.environment import observation_config
import gin
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
# Reimplements internal function
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/framework/smart_cond.py.
def smart_cond(pred: Union[bool, tf.Tensor],
true_fn: Callable[[], tf.Tensor],
false_fn: Callable[[], tf.Tensor],
name: Optional[Text] = None) -> tf.Tensor:
"""Return either `true_fn()` if predicate `pred` is true else `false_fn()`.
If `pred` is a bool or has a constant value, we return either `true_fn()`
or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both.
Arguments:
pred: A scalar determining whether to return the result of `true_fn` or
`false_fn`.
true_fn: The callable to be performed if pred is true.
false_fn: The callable to be performed if pred is false.
name: Optional name prefix when using `tf.cond`.
Returns:
Tensors returned by the call to either `true_fn` or `false_fn`.
Raises:
TypeError: If `true_fn` or `false_fn` is not callable.
"""
if not callable(true_fn):
raise TypeError('`true_fn` must be callable.')
if not callable(false_fn):
raise TypeError('`false_fn` must be callable.')
pred_value = tf.get_static_value(pred)
if isinstance(pred, tf.Tensor) or pred_value is None:
return tf.cond(pred, true_fn=true_fn, false_fn=false_fn, name=name)
if pred_value:
return true_fn()
else:
return false_fn()
@gin.configurable
class CircuitTrainingModel(tf.keras.layers.Layer):
"""GCN-based model for circuit training."""
EPSILON = 1E-6
def __init__(
self,
static_features: Optional[Dict[Text, np.ndarray]] = None,
num_gcn_layers: int = 3,
edge_fc_layers: int = 1,
gcn_node_dim: int = 8,
dirichlet_alpha: float = 0.1,
policy_noise_weight: float = 0.0,
seed: int = 0,
):
"""Builds the circuit training model.
Args:
static_features: Optional static features that are invariant across steps
on the same netlist, such as netlist metadata and the adj graphs. If not
provided, use the input features in the call method.
num_gcn_layers: Number of GCN layers.
edge_fc_layers: Number of fully connected layers in the GCN kernel.
gcn_node_dim: Node embedding dimension.
dirichlet_alpha: Dirichlet concentration value.
policy_noise_weight: Weight of the noise added to policy.
seed: Seed for sampling noise.
"""
super(CircuitTrainingModel, self).__init__()
self._num_gcn_layers = num_gcn_layers
self._gcn_node_dim = gcn_node_dim
self._dirichlet_alpha = dirichlet_alpha
self._policy_noise_weight = policy_noise_weight
self._seed = seed
self._static_features = static_features
self._observation_config = observation_config.ObservationConfig()
seed = tfp.util.SeedStream(self._seed, salt='kernel_initializer_seed')
kernel_initializer = tf.keras.initializers.glorot_uniform(seed=seed() %
sys.maxsize)
self._metadata_encoder = tf.keras.Sequential([
tf.keras.layers.Dense(
self._gcn_node_dim, kernel_initializer=kernel_initializer),
tf.keras.layers.ReLU(),
],
name='metadata_encoder')
self._feature_encoder = tf.keras.Sequential([
tf.keras.layers.Dense(
self._gcn_node_dim, kernel_initializer=kernel_initializer),
tf.keras.layers.ReLU(),
],
name='feature_encoder')
# Edge-centric GCN layers.
def create_edge_fc(name=None) -> tf.keras.layers.Layer:
seq = tf.keras.Sequential(name=name)
for _ in range(edge_fc_layers):
seq.add(
tf.keras.layers.Dense(
self._gcn_node_dim, kernel_initializer=kernel_initializer))
seq.add(tf.keras.layers.ReLU())
return seq
self._edge_fc_list = [
create_edge_fc(name='edge_fc_%d' % i)
for i in range(self._num_gcn_layers)
]
# Dot-product attention layer, a.k.a. Luong-style attention [1].
# [1] Luong, et al, 2015.
self._attention_layer = tf.keras.layers.Attention(name='attention_layer')
self._attention_query_layer = tf.keras.layers.Dense(
self._gcn_node_dim,
name='attention_query_layer',
kernel_initializer=kernel_initializer)
self._attention_key_layer = tf.keras.layers.Dense(
self._gcn_node_dim,
name='attention_key_layer',
kernel_initializer=kernel_initializer)
self._attention_value_layer = tf.keras.layers.Dense(
self._gcn_node_dim,
name='attention_value_layer',
kernel_initializer=kernel_initializer)
self._value_head = tf.keras.Sequential([
tf.keras.layers.Dense(32, kernel_initializer=kernel_initializer),
tf.keras.layers.ReLU(),
tf.keras.layers.Dense(8, kernel_initializer=kernel_initializer),
tf.keras.layers.ReLU(),
tf.keras.layers.Dense(1, kernel_initializer=kernel_initializer),
],
name='value_head')
# GAN-like deconv layers to generated the policy image.
# See figures in http://shortn/_9HCSFwasnu.
self._policy_location_head = tf.keras.Sequential(
[
tf.keras.layers.Dense(
(self._observation_config.max_grid_size // 16 *
self._observation_config.max_grid_size // 16 * 32),
kernel_initializer=kernel_initializer),
# 128/16*128/16*32 = 8*8*32
tf.keras.layers.ReLU(),
tf.keras.layers.Reshape(
target_shape=(self._observation_config.max_grid_size // 16,
self._observation_config.max_grid_size // 16,
32)),
# 8x8x32
tf.keras.layers.Conv2DTranspose(
filters=16,
kernel_size=3,
strides=2,
padding='same',
kernel_initializer=kernel_initializer),
# 16x16x16
tf.keras.layers.ReLU(),
tf.keras.layers.Conv2DTranspose(
filters=8,
kernel_size=3,
strides=2,
padding='same',
kernel_initializer=kernel_initializer),
# 32x32x8
tf.keras.layers.ReLU(),
tf.keras.layers.Conv2DTranspose(
filters=4,
kernel_size=3,
strides=2,
padding='same',
kernel_initializer=kernel_initializer),
# 64x64x4
tf.keras.layers.ReLU(),
tf.keras.layers.Conv2DTranspose(
filters=2,
kernel_size=3,
strides=2,
padding='same',
kernel_initializer=kernel_initializer),
# 128x128x2
tf.keras.layers.ReLU(),
# No activation.
tf.keras.layers.Conv2DTranspose(
filters=1,
kernel_size=3,
strides=1,
padding='same',
kernel_initializer=kernel_initializer),
# 128x128x1
tf.keras.layers.Flatten()
],
name='policy_location_head')
def _scatter_count(self, edge_h: tf.Tensor,
indices: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""Aggregate (reduce sum) edge embeddings to nodes.
Args:
edge_h: A [-1, #edges, h] tensor of edge embeddings.
indices: A [-1, #edges] tensor of node index of an edge (sparse adjacency
indices).
Returns:
A [-1, #nodes, h] tensor of aggregated node embeddings and a
[-1, #nodes, 1] tensor of edge count per node for finding the mean.
"""
batch = tf.shape(edge_h)[0]
num_items = tf.shape(edge_h)[1]
num_lattents = edge_h.shape[2]
h_node = tf.zeros(
[batch, self._observation_config.max_num_nodes, num_lattents])
count_edge = tf.zeros_like(h_node)
count = tf.ones_like(edge_h)
b_indices = tf.tile(
tf.expand_dims(tf.range(0, tf.cast(batch, dtype=tf.int32)), -1),
[1, num_items])
idx = tf.stack([b_indices, indices], axis=-1)
h_node = tf.tensor_scatter_nd_add(h_node, idx, edge_h)
count_edge = tf.tensor_scatter_nd_add(count_edge, idx, count)
return h_node, count_edge
def gather_to_edges(
self, h_nodes: tf.Tensor, sparse_adj_i: tf.Tensor,
sparse_adj_j: tf.Tensor,
sparse_adj_weight: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""Gathers node embeddings to edges.
For each edge, there are two node embeddings. It concats them together with
the edge weight. It also masks the output with 0 for edges with no weight.
Args:
h_nodes: A [-1, #node, h] tensor of node embeddings.
sparse_adj_i: A [-1, #edges] tensor for the 1st node index of an edge.
sparse_adj_j: A [-1, #edges] tensor for the 2nd node index of an edge.
sparse_adj_weight: A [-1, #edges] tensor for the weight of an edge. 0 for
fake padded edges.
Returns:
A [-1, #edges, 2*h+1], [-1, #edges, 2*h+1] tensor of edge embeddings.
"""
h_edges_1 = tf.gather(h_nodes, sparse_adj_i, batch_dims=1)
h_edges_2 = tf.gather(h_nodes, sparse_adj_j, batch_dims=1)
h_edges_12 = tf.concat([h_edges_1, h_edges_2, sparse_adj_weight], axis=-1)
h_edges_21 = tf.concat([h_edges_2, h_edges_1, sparse_adj_weight], axis=-1)
mask = tf.broadcast_to(
tf.not_equal(sparse_adj_weight, 0.0), tf.shape(h_edges_12))
h_edges_i_j = tf.where(mask, h_edges_12, tf.zeros_like(h_edges_12))
h_edges_j_i = tf.where(mask, h_edges_21, tf.zeros_like(h_edges_21))
return h_edges_i_j, h_edges_j_i
def scatter_to_nodes(self, h_edges: tf.Tensor, sparse_adj_i: tf.Tensor,
sparse_adj_j: tf.Tensor) -> tf.Tensor:
"""Scatters edge embeddings to nodes via mean aggregation.
For each node, it aggregates the embeddings of all the connected edges by
averaging them.
Args:
h_edges: A [-1, #edges, h] tensor of edge embeddings.
sparse_adj_i: A [-1, #edges] tensor for the 1st node index of an edge.
sparse_adj_j: A [-1, #edges] tensor for the 2nd node index of an edge.
Returns:
A [-1, #node, h] tensor of node embeddings.
"""
h_nodes_1, count_1 = self._scatter_count(h_edges, sparse_adj_i)
h_nodes_2, count_2 = self._scatter_count(h_edges, sparse_adj_j)
return (h_nodes_1 + h_nodes_2) / (count_1 + count_2 + self.EPSILON)
def self_attention(self,
h_current_node: tf.Tensor,
h_nodes: tf.Tensor,
training: bool = False) -> tf.Tensor:
"""Returns self-attention wrt to the current node.
Args:
h_current_node: A [-1, 1, h] tensor of the current node embedding.
h_nodes: A [-1, #nodes, h] tensor of all node embeddings.
training: Set in the training mode.
Returns:
A [-1, h] tensor of the weighted average of the node embeddings where
the weight is the attention score with respect to the current node.
"""
query = self._attention_query_layer(h_current_node, training=training)
values = self._attention_value_layer(h_nodes, training=training)
keys = self._attention_key_layer(h_nodes, training=training)
h_attended = self._attention_layer([query, values, keys], training=training)
h_attended = tf.squeeze(h_attended, axis=1)
return h_attended
def add_noise(self, logits: tf.Tensor) -> tf.Tensor:
"""Adds a non-trainable dirichlet noise to the policy."""
seed = tfp.util.SeedStream(self._seed, salt='noise_seed')
probs = tf.nn.softmax(logits)
alphas = tf.fill(tf.shape(probs), self._dirichlet_alpha)
dirichlet_distribution = tfp.distributions.Dirichlet(alphas)
noise = dirichlet_distribution.sample(seed=seed() % sys.maxsize)
noised_probs = ((1.0 - self._policy_noise_weight) * probs +
(self._policy_noise_weight) * noise)
noised_logit = tf.math.log(noised_probs + self.EPSILON)
return noised_logit
def _get_static_input(self, static_feature_key: Text,
inputs: Dict[Text, tf.Tensor]) -> tf.Tensor:
"""Returns the tensor for a particular static feature.
Args:
static_feature_key: a feature key defined in
observation_config.STATIC_OBSERVATIONS
inputs: the dictionary of input features.
Returns:
A tensor for the static feature.
"""
if self._static_features:
# For the online single-netlist training, replicates feature by batch
# size. Picking an aribitrary non-static feature to get a reference of
# the dynamic dimension at runtime.
num_batches_dim = tf.shape(inputs['current_node'])[0]
return tf.tile(
tf.expand_dims(self._static_features[static_feature_key], 0),
[num_batches_dim, 1])
else:
# For the offline multi-netlist training, reading the static feature from
# the inputs.
return inputs[static_feature_key]
def call(self,
inputs: tf.Tensor,
training: bool = False,
is_eval: bool = False) -> Tuple[Dict[Text, tf.Tensor], tf.Tensor]:
# Netlist metadata.
netlist_metadata_inputs = [
self._get_static_input(key, inputs)
for key in observation_config.NETLIST_METADATA
]
# Graph.
sparse_adj_weight = self._get_static_input('sparse_adj_weight', inputs)
sparse_adj_i = tf.cast(
self._get_static_input('sparse_adj_i', inputs), dtype=tf.int32)
sparse_adj_j = tf.cast(
self._get_static_input('sparse_adj_j', inputs), dtype=tf.int32)
# Node features.
node_types = self._get_static_input('node_types', inputs)
is_node_placed = tf.cast(inputs['is_node_placed'], dtype=tf.float32)
macros_w = self._get_static_input('macros_w', inputs)
macros_h = self._get_static_input('macros_h', inputs)
locations_x = inputs['locations_x']
locations_y = inputs['locations_y']
# Current node.
current_node = tf.cast(inputs['current_node'], dtype=tf.int32)
is_hard_macro = tf.cast(
tf.math.equal(node_types, observation_config.HARD_MACRO),
dtype=tf.float32)
is_soft_macro = tf.cast(
tf.math.equal(node_types, observation_config.SOFT_MACRO),
dtype=tf.float32)
is_port_cluster = tf.cast(
tf.math.equal(node_types, observation_config.PORT_CLUSTER),
dtype=tf.float32)
netlist_metadata = tf.concat(netlist_metadata_inputs, axis=1)
h_metadata = self._metadata_encoder(netlist_metadata, training=training)
h_nodes = tf.stack([
locations_x,
locations_y,
macros_w,
macros_h,
is_hard_macro,
is_soft_macro,
is_port_cluster,
is_node_placed,
],
axis=2)
h_nodes = self._feature_encoder(h_nodes, training=training)
# Edge-centric GCN
#
# Here, we are using a modified version of Graph Convolutional Network
# (GCN)[1] that focuses on edge properties rather than node properties.
# In this modified GCN, the features of neighbouring nodes are
# mixed together to create edge features. Then, edge features are
# aggregated on the connected nodes to create the output node embedding.
# The GCN message passing happens indirectly between neighbouring nodes
# through the mixing on the edges.
#
# Edge-centric GCN for Circuit Training
#
# The nodes of the circuit training observation graph are hard macros,
# soft macros, and port clusters and the edges are the wires between them.
# The intuition behind using edge-centric GCN was that the wirelength and
# congestion costs (reward signals) depends on properties of the
# wires (edge) and not the macros.
# This architecture has shown promising results on predicting supervised
# graph regression for predicting wirelength and congestion and we hope
# it performs well in reinforcement setting to predict value and policy.
#
# An alternative approach was applying original GCN on the Line Graph of
# the ckt graph (see https://en.wikipedia.org/wiki/Line_graph).
# Nodes of the line graph correspond to the edges of the original graph.
# However, the adjacency matrix of the line graph will be prohibitively
# large and can't be readily processed by GCN.
#
# See figures in http://shortn/_j1NsgZBqAr for edge-centric GCN.
#
# [1] Kipf and Welling, 2016.
sparse_adj_weight = tf.expand_dims(
sparse_adj_weight, axis=-1, name='sparse_adj_weight')
for i in range(self._num_gcn_layers):
# For bi-directional graph.
h_edges_i_j, h_edges_j_i = self.gather_to_edges(
h_nodes=h_nodes,
sparse_adj_i=sparse_adj_i,
sparse_adj_j=sparse_adj_j,
sparse_adj_weight=sparse_adj_weight)
h_edges = (self._edge_fc_list[i](h_edges_i_j, training=training) +
self._edge_fc_list[i](h_edges_j_i, training=training)) / 2.0
h_nodes_new = self.scatter_to_nodes(h_edges, sparse_adj_i, sparse_adj_j)
# Skip connection.
h_nodes = h_nodes_new + h_nodes
observation_hiddens = []
observation_hiddens.append(h_metadata)
h_all_edges = tf.reduce_mean(h_edges, axis=1)
observation_hiddens.append(h_all_edges)
h_current_node = tf.gather(h_nodes, current_node, batch_dims=1)
h_attended = self.self_attention(h_current_node, h_nodes, training=training)
observation_hiddens.append(h_attended)
h_current_node = tf.squeeze(h_current_node, axis=1)
observation_hiddens.append(h_current_node)
h = tf.concat(observation_hiddens, axis=1)
location_logits = self._policy_location_head(h, training=training)
# smart_cond avoids using tf.cond when condition value is static.
logits = {
'location':
smart_cond(is_eval, lambda: location_logits,
lambda: self.add_noise(location_logits)),
}
value = self._value_head(h, training=training)
return logits, value
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for circuit_training.model.model_lib."""
from absl import flags
from absl import logging
from circuit_training.environment import observation_config
from circuit_training.model import model_lib
from circuit_training.utils import test_utils
import tensorflow as tf
from tf_agents.train.utils import strategy_utils
flags.DEFINE_enum('strategy_type', 'tpu', [
'tpu', 'gpu', 'cpu'
], ('Distribution Strategy type to use for training. `tpu` uses TPUStrategy for'
' running on TPUs (1x1), `gpu` uses GPUs with single host.'))
FLAGS = flags.FLAGS
def make_strategy():
if FLAGS.strategy_type == 'tpu':
resolver = tf.distribute.cluster_resolver.TPUClusterResolver('')
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
return tf.distribute.experimental.TPUStrategy(resolver)
elif FLAGS.strategy_type == 'gpu':
return strategy_utils.get_strategy(tpu=None, use_gpu=True)
else:
return strategy_utils.get_strategy(tpu=None, use_gpu=False)
class ModelTest(test_utils.TestCase):
def test_extract_feature(self):
config = observation_config.ObservationConfig()
static_features = config.observation_space.sample()
strategy = make_strategy()
with strategy.scope():
test_model = model_lib.CircuitTrainingModel(
static_features=static_features)
@tf.function
def forward():
obs = config.observation_space.sample()
obs = tf.nest.map_structure(lambda x: tf.expand_dims(x, 0), obs)
return test_model(obs)
per_replica_result = strategy.run(forward)
logits, value = strategy.reduce('MEAN', per_replica_result, axis=None)
logging.info('logits: %s', logits)
logging.info('value: %s', value)
self.assertAllEqual(logits['location'].shape, (1, config.max_grid_size**2))
self.assertAllEqual(value.shape, (1, 1))
def test_backwards_pass(self):
config = observation_config.ObservationConfig()
static_features = config.observation_space.sample()
strategy = make_strategy()
with strategy.scope():
test_model = model_lib.CircuitTrainingModel(
static_features=static_features)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
obs = config.observation_space.sample()
obs = tf.nest.map_structure(lambda x: tf.expand_dims(x, 0), obs)
@tf.function
def loss_fn(x, training=False):
logits, value = test_model(x, training=training)
loss = (
tf.math.reduce_sum(logits['location']) + tf.math.reduce_sum(value))
return loss
def train_step(obs):
with tf.GradientTape() as tape:
loss = loss_fn(obs, training=True)
grads = tape.gradient(loss, test_model.trainable_variables)
optimizer.apply_gradients(zip(grads, test_model.trainable_variables))
return loss
@tf.function
def loss_fn_run(obs):
loss = strategy.run(loss_fn, args=(obs,))
return strategy.reduce('MEAN', loss, axis=None)
@tf.function
def train_step_run(obs):
strategy.run(train_step, args=(obs,))
# Gather variables and loss before training
initial_loss = loss_fn_run(obs).numpy()
initial_weights = [v.numpy() for v in test_model.trainable_variables]
# Run one train step
train_step_run(obs)
# Re-compute the loss
current_loss = loss_fn_run(obs).numpy()
current_weights = [v.numpy() for v in test_model.trainable_variables]
# Verify loss and weights have changed.
self.assertNotAllClose(initial_weights, current_weights)
self.assertNotAlmostEqual(initial_loss, current_loss)
if __name__ == '__main__':
test_utils.main()
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Circtuittraining GRL Model."""
from typing import Optional, Text
from absl import logging
from circuit_training.model import model_lib
import gin
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents.networks import network
from tf_agents.specs import distribution_spec
import tf_agents.specs.tensor_spec as tensor_spec
from tf_agents.typing import types
from tf_agents.utils import nest_utils
@gin.configurable(module='circuittraining.models')
class GrlModel(network.Network):
"""Circuit GRL Model used as part of the canonical version."""
def __init__(self,
input_tensors_spec: types.NestedTensorSpec,
output_tensors_spec: types.NestedTensorSpec,
name: Optional[Text] = None,
state_spec=(),
policy_noise_weight: float = 0.0,
static_features=None,
use_model_tpu=True,
unrolled=True):
super(GrlModel, self).__init__(
input_tensor_spec=input_tensors_spec, state_spec=state_spec, name=name)
# TODO(esonghori): Remove use_model_tpu.
del use_model_tpu
if static_features:
logging.info('Static features are passed to the model construction.')
self._model = model_lib.CircuitTrainingModel(
policy_noise_weight=policy_noise_weight,
static_features=static_features)
def call(self, inputs, network_state=()):
logits, value = self._model(inputs)
return {'logits': logits, 'value': value}, network_state
@gin.configurable(module='circuittraining.models')
class GrlPolicyModel(network.DistributionNetwork):
"""Circuit GRL Model."""
def __init__(self, shared_network: network.Network,
input_tensors_spec: types.NestedTensorSpec,
output_tensors_spec: types.NestedTensorSpec,
name: Optional[Text] = 'GrlPolicyModel'):
super(GrlPolicyModel, self).__init__(
input_tensor_spec=input_tensors_spec,
state_spec=(),
output_spec=output_tensors_spec,
name=name)
self._input_tensors_spec = input_tensors_spec
self._shared_network = shared_network
self._output_tensors_spec = output_tensors_spec
n_unique_actions = np.unique(output_tensors_spec.maximum -
output_tensors_spec.minimum + 1)
input_param_spec = {
'logits':
tensor_spec.TensorSpec(
shape=n_unique_actions,
dtype=tf.float32,
name=name + '_logits')
}
self._output_dist_spec = distribution_spec.DistributionSpec(
tfp.distributions.Categorical,
input_param_spec,
sample_spec=output_tensors_spec,
dtype=output_tensors_spec.dtype)
@property
def output_spec(self):
return self._output_dist_spec
@property
def distribution_tensor_spec(self):
return self._output_dist_spec
def call(self, inputs, step_types=None, network_state=()):
outer_rank = nest_utils.get_outer_rank(inputs, self._input_tensors_spec)
if outer_rank == 0:
inputs = tf.nest.map_structure(lambda x: tf.reshape(x, (1, -1)), inputs)
model_out, _ = self._shared_network(inputs)
paddings = tf.ones_like(inputs['mask'], dtype=tf.float32) * (-2.**32 + 1)
masked_logits = tf.where(
tf.cast(inputs['mask'], tf.bool), model_out['logits']['location'],
paddings)
output_dist = self._output_dist_spec.build_distribution(
logits=masked_logits)
return output_dist, network_state
@gin.configurable(module='circuittraining.models')
class GrlValueModel(network.Network):
"""Circuit GRL Model."""
def __init__(self, input_tensors_spec: types.NestedTensorSpec,
shared_network: network.Network, name: Optional[Text] = None):
super(GrlValueModel, self).__init__(
input_tensor_spec=input_tensors_spec, state_spec=(), name=name)
self._input_tensors_spec = input_tensors_spec
self._shared_network = shared_network
def call(self, inputs, step_types=None, network_state=()):
outer_rank = nest_utils.get_outer_rank(inputs,
self._input_tensors_spec)
if outer_rank == 0:
inputs = tf.nest.map_structure(lambda x: tf.reshape(x, (1, -1)), inputs)
model_out, _ = self._shared_network(inputs)
def squeeze_value_dim(value):
# Make value_prediction's shape from [B, T, 1] to [B, T].
return tf.squeeze(value, -1)
return squeeze_value_dim(model_out['value']), network_state
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PlacementCost client class."""
import json
import socket
import subprocess
import tempfile
from typing import Any, Text
from absl import flags
from absl import logging
flags.DEFINE_string('plc_wrapper_main', 'plc_wrapper_main',
'Path to plc_wrapper_main binary.')
FLAGS = flags.FLAGS
class PlacementCost(object):
"""PlacementCost object wrapper."""
BUFFER_LEN = 1024 * 1024
MAX_RETRY = 10
def __init__(self,
netlist_file: Text,
macro_macro_x_spacing: float = 0.0,
macro_macro_y_spacing: float = 0.0) -> None:
"""Creates a PlacementCost client object.
It creates a subprocess by calling plc_wrapper_main and communicate with
it over an `AF_UNIX` channel.
Args:
netlist_file: Path to the netlist proto text file.
macro_macro_x_spacing: Macro-to-macro x spacing in microns.
macro_macro_y_spacing: Macro-to-macro y spacing in microns.
"""
if not FLAGS.plc_wrapper_main:
raise ValueError('FLAGS.plc_wrapper_main should be specified.')
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
address = tempfile.NamedTemporaryFile().name
self.sock.bind(address)
self.sock.listen(1)
args = [
FLAGS.plc_wrapper_main, #
'--uid=',
'--gid=',
f'--pipe_address={address}',
f'--netlist_file={netlist_file}',
f'--macro_macro_x_spacing={macro_macro_x_spacing}',
f'--macro_macro_y_spacing={macro_macro_y_spacing}',
]
self.process = subprocess.Popen([str(a) for a in args])
self.conn, _ = self.sock.accept()
# See circuit_training/environment/plc_client_test.py for the supported APIs.
def __getattr__(self, name) -> Any:
# snake_case to PascalCase.
name = name.replace('_', ' ').title().replace(' ', '')
def f(*args) -> Any:
json_args = json.dumps({'name': name, 'args': args})
self.conn.send(json_args.encode('utf-8'))
json_ret = b''
retry = 0
# The stream from the unix socket can be incomplete after a single call
# to `recv` for large (200kb+) return values, e.g. GetMacroAdjacency. The
# loop retries until the returned value is valid json. When the host is
# under load ~10 retries have been needed. Adding a sleep did not seem to
# make a difference only added latency. b/210838186
while True:
part = self.conn.recv(PlacementCost.BUFFER_LEN)
json_ret += part
if len(part) < PlacementCost.BUFFER_LEN:
json_str = json_ret.decode('utf-8')
try:
output = json.loads(json_str)
break
except json.decoder.JSONDecodeError as e:
logging.warn('JSONDecode Error for %s \n %s', name, e)
if retry < PlacementCost.MAX_RETRY:
logging.info('Looking for more data for %s on connection:%s/%s',
name, retry, PlacementCost.MAX_RETRY)
retry += 1
else:
raise e
if isinstance(output, dict):
if 'ok' in output and not output['ok']: # Status::NotOk
raise ValueError(
f"Error in calling {name} with {args}: {output['message']}.")
elif '__tuple__' in output: # Tuple
output = tuple(output['items'])
return output
return f
def __del__(self) -> None:
self.conn.close()
self.process.kill()
self.process.wait()
self.sock.close()
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for placement_util_non_prod."""
import os
from absl import flags
from circuit_training.environment import placement_util
from circuit_training.utils import test_utils
# Internal gfile dependencies
FLAGS = flags.FLAGS
TEST_FILE_BODY = r"""# Placement file for Circuit Training
# Source input file(s) : /input/netlist.pb.txt
# This file : /this/file/initial.plc
# Original initial placement : /original/initial.plc
# Columns : 30 Rows : 20
# Width : 3000.0 Height : 2000.0
# Project : viperfish
# Block : fp_test_1
# Blockage : 0.0 100.0 3000.0 300.0 1.0
# Blockage : 3000.0 0.0 500.0 2000.0 1.0
"""
class MockPlacementCost(object):
"""A Mock class of PlacementCost for testing."""
def __init__(self):
self.node_type = ['PORT', 'PORT', 'MACRO_PIN', 'MACRO_PIN', 'MACRO']
self._fix_node_coord = [False] * len(self.node_type)
def get_node_type(self, node: int):
if node >= len(self.node_type):
return None
return self.node_type[node]
def fix_node_coord(self, node: int):
self._fix_node_coord[node] = True
def get_grid_num_columns_rows(self):
return (10, 12)
def get_canvas_width_height(self):
return (100.0, 120.0)
def get_routes_per_micron(self):
return (1.0, 2.0)
def get_macro_routing_allocation(self):
return (3.0, 4.0)
def get_congestion_smooth_range(self):
return 2.0
def get_source_filename(self):
return '/source/filename'
def get_area(self):
return 10
def get_wirelength(self):
return 11.0
def get_cost(self):
return 12.0
def get_congestion_cost(self):
return 13.0
def get_density_cost(self):
return 14.0
def get_project_name(self):
return 'project'
def get_block_name(self):
return 'block'
def get_overlap_threshold(self):
return 1e-6
def get_blockages(self):
return [[0, 0, 10.0, 10.0], [0, 20.0, 10.0, 30.0]]
def get_ref_node_id(self, node_id):
del node_id
return -1
def is_node_soft_macro(self, node_id):
del node_id
return False
def save_placement(self, filename, info):
print(info)
with open(filename, 'wt') as f:
for l in info.split('\n'):
f.write('# ' + l + '\n')
class PlacementUtilTest(test_utils.TestCase):
def test_mock_plc_get_node_type(self):
plc = MockPlacementCost()
self.assertEqual(list(placement_util.nodes_of_types(plc, ['PORT'])), [0, 1])
self.assertEqual(
list(placement_util.nodes_of_types(plc, ['MACRO_PIN'])), [2, 3])
self.assertEqual(list(placement_util.nodes_of_types(plc, ['MACRO'])), [4])
self.assertEqual(
list(placement_util.nodes_of_types(plc, ['PORT', 'MACRO'])), [0, 1, 4])
self.assertEmpty(list(placement_util.nodes_of_types(plc, ['BAD_TYPE'])))
def test_mock_plc_fix_port_coordinates(self):
plc = MockPlacementCost()
placement_util.fix_port_coordinates(plc)
self.assertTrue(plc._fix_node_coord[0])
self.assertTrue(plc._fix_node_coord[1])
self.assertFalse(plc._fix_node_coord[2])
self.assertFalse(plc._fix_node_coord[3])
def test_sample_file_extract_attribute(self):
tempfile = self.create_tempfile().full_path
with open(tempfile, 'wt') as f:
f.write(TEST_FILE_BODY)
self.assertEqual(
placement_util.extract_attribute_from_comments('Block', [tempfile]),
'fp_test_1')
self.assertEqual(
placement_util.extract_attribute_from_comments('Project', [tempfile]),
'viperfish')
self.assertIsNone(
placement_util.extract_attribute_from_comments('Unknown_Atrribute',
[tempfile]))
def test_sample_file_extract_parameters(self):
tempfile = self.create_tempfile().full_path
with open(tempfile, 'wt') as f:
f.write(TEST_FILE_BODY)
sizes = placement_util.extract_sizes_from_comments([tempfile])
self.assertLen(sizes, 4)
canvas_width, canvas_height, grid_cols, grid_rows = sizes
self.assertEqual(canvas_width, 3000.0)
self.assertEqual(canvas_height, 2000.0)
self.assertEqual(grid_cols, 30)
self.assertEqual(grid_rows, 20)
def test_sample_file_get_blockages(self):
tempfile = self.create_tempfile().full_path
with open(tempfile, 'wt') as f:
f.write(TEST_FILE_BODY)
blockages = placement_util.get_blockages_from_comments([tempfile])
self.assertLen(blockages, 2)
self.assertEqual(blockages[0], [0.0, 100.0, 3000.0, 300.0, 1.0])
self.assertEqual(blockages[1], [3000.0, 0.0, 500.0, 2000.0, 1.0])
def test_save_placement(self):
filename = os.path.join(self.create_tempdir(), 'placement.plc')
plc = MockPlacementCost()
placement_util.save_placement(plc, filename, 'user_comments')
sizes = placement_util.extract_sizes_from_comments([filename])
self.assertEqual(sizes, (100.0, 120.0, 10, 12))
self.assertEqual(
placement_util.extract_attribute_from_comments('Area', [filename]),
'10')
self.assertEqual(
placement_util.extract_attribute_from_comments('Wirelength',
[filename]), '11')
self.assertEqual(
placement_util.extract_attribute_from_comments('Wirelength cost',
[filename]), '12')
self.assertEqual(
placement_util.extract_attribute_from_comments('Congestion cost',
[filename]), '13')
self.assertEqual(
placement_util.extract_attribute_from_comments('Density cost',
[filename]), '14')
self.assertEqual(
placement_util.extract_attribute_from_comments('Project', [filename]),
'project')
self.assertEqual(
placement_util.extract_attribute_from_comments('Block', [filename]),
'block')
self.assertEqual(
placement_util.extract_attribute_from_comments('Smoothing factor',
[filename]), '2')
self.assertEqual(
placement_util.extract_attribute_from_comments('Overlap threshold',
[filename]), '1e-06')
self.assertEqual(
placement_util.get_blockages_from_comments([filename]),
[[0, 0, 10.0, 10.0], [0, 20.0, 10.0, 30.0]])
def test_sample_netlist_create_plc(self):
"""Test creating placement cost with sample netlist.
# Internal circuit training docs link.
"""
test_netlist_dir = ('circuit_training/'
'environment/test_data/macro_tiles_10x10')
netlist_file = os.path.join(FLAGS.test_srcdir, test_netlist_dir,
'netlist.pb.txt')
init_placement = os.path.join(FLAGS.test_srcdir, test_netlist_dir,
'initial.plc')
plc = placement_util.create_placement_cost(
netlist_file=netlist_file, init_placement=init_placement)
self.assertEqual(plc.get_canvas_width_height(), (1200, 1200))
self.assertEqual(plc.get_grid_num_columns_rows(), (20, 20))
self.assertEqual(plc.get_project_name(), 'circuit_training')
self.assertEqual(plc.get_block_name(), 'macro_tiles_10x10')
self.assertEqual(plc.get_routes_per_micron(), (70.33, 74.51))
self.assertEqual(plc.get_macro_routing_allocation(), (51.79, 51.79))
self.assertEqual(plc.get_congestion_smooth_range(), 2.0)
self.assertEqual(plc.get_overlap_threshold(), 4e-3)
self.assertFalse(plc.get_canvas_boundary_check())
self.assertGreater(plc.get_cost(), 0.0)
def test_sample_netlist_run_fd(self):
"""Test running FD on a sample netlist.
# Internal circuit training docs link.
"""
test_netlist_dir = ('circuit_training/'
'environment/test_data/sample_clustered')
netlist_file = os.path.join(FLAGS.test_srcdir, test_netlist_dir,
'netlist.pb.txt')
init_placement = os.path.join(FLAGS.test_srcdir, test_netlist_dir,
'initial.plc')
plc = placement_util.create_placement_cost(
netlist_file=netlist_file, init_placement=init_placement)
self.assertGreater(plc.get_cost(), 0.0)
placement_util.fd_placement_schedule(plc)
self.assertGreater(plc.get_cost(), 0.0)
if __name__ == '__main__':
test_utils.main()
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for circuit_training.environment.observation_config."""
from circuit_training.environment import observation_config
from circuit_training.utils import test_utils
import gin
class ObservationConfigTest(test_utils.TestCase):
def test_gin_binding_set_configs(self):
bindings = """
ObservationConfig.max_num_edges = 10
ObservationConfig.max_num_nodes = 11
ObservationConfig.max_grid_size = 12
"""
gin.parse_config(bindings)
config = observation_config.ObservationConfig()
self.assertEqual(config.max_num_edges, 10)
self.assertEqual(config.max_num_nodes, 11)
self.assertEqual(config.max_grid_size, 12)
def test_flatten_unflatten(self):
config = observation_config.ObservationConfig()
obs = config.observation_space.sample()
flatten_static_obs = observation_config.flatten_static(obs)
self.assertLen(flatten_static_obs.shape, 1)
static_tf_obs = observation_config.to_dict_static(flatten_static_obs)
np_obs = {k: static_tf_obs[k].numpy() for k in static_tf_obs}
for k in np_obs:
self.assertAllEqual(obs[k], np_obs[k])
flatten_dynamic_obs = observation_config.flatten_dynamic(obs)
dynamic_tf_obs = observation_config.to_dict_dynamic(flatten_dynamic_obs)
np_obs = {k: dynamic_tf_obs[k].numpy() for k in dynamic_tf_obs}
for k in np_obs:
self.assertAllEqual(obs[k], np_obs[k])
flatten_all_obs = observation_config.flatten_all(obs)
all_tf_obs = observation_config.to_dict_all(flatten_all_obs)
np_obs = {k: all_tf_obs[k].numpy() for k in all_tf_obs}
for k in np_obs:
self.assertAllEqual(obs[k], np_obs[k])
def test_observation_ordering(self):
static_observations = (
'normalized_num_edges',
'normalized_num_hard_macros',
'normalized_num_soft_macros',
'normalized_num_port_clusters',
'horizontal_routes_per_micron',
'vertical_routes_per_micron',
'macro_horizontal_routing_allocation',
'macro_vertical_routing_allocation',
'grid_cols',
'grid_rows',
'sparse_adj_i',
'sparse_adj_j',
'sparse_adj_weight',
'edge_counts',
'macros_w',
'macros_h',
'node_types',
)
dynamic_observations = (
'locations_x',
'locations_y',
'is_node_placed',
'current_node',
'mask',
)
all_observations = static_observations + dynamic_observations
# Make sure iterating order is only changed when we are deliberately
# modifying the keys in the feature set. The relative ordering is important
# because flatten/unflattening to/from a tensor is done by tf.split(). If
# ordering is different, the state will be not encoded the same way across
# training experiments/ evaluation runs.
for expected, actual in zip(static_observations,
observation_config.STATIC_OBSERVATIONS):
self.assertEqual(expected, actual)
for expected, actual in zip(dynamic_observations,
observation_config.DYNAMIC_OBSERVATIONS):
self.assertEqual(expected, actual)
for expected, actual in zip(all_observations,
observation_config.ALL_OBSERVATIONS):
self.assertEqual(expected, actual)
if __name__ == '__main__':
test_utils.main()
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example and simple binary to create and call plc client."""
from typing import Sequence
from absl import app
from absl import flags
from circuit_training.environment import plc_client
flags.DEFINE_string("netlist_file", None, "Path to the input netlist file.")
flags.mark_flags_as_required([
"netlist_file",
])
FLAGS = flags.FLAGS
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
plc = plc_client.PlacementCost(netlist_file=FLAGS.netlist_file)
print("get_cost:", plc.get_cost())
print("get_congestion_cost:", plc.get_congestion_cost())
print("get_density_cost:", plc.get_density_cost())
hard_macro_indices = [
m for m in plc.get_macro_indices() if not plc.is_node_soft_macro(m)
]
print("hard_macro_indices:", hard_macro_indices)
print("get_node_mask:", plc.get_node_mask(hard_macro_indices[0]))
if __name__ == "__main__":
app.run(main)
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""A placer that implements coordinate descent algorithm.
The placer can start from a scratch (i.e., empty grid), or from an existing node
locations specified by --init_placement.
The algorithm runs for a given number of epochs (iterations).
For each iteartion, for each node by a given --cd_node_order, place the node
greedily on the best grid location.
If --cd_use_stdcell_placer is True, place hard macros greedily first,
then followed by stdcell placer to place all stdcells.
When --cd_epochs=1, this algorithm is equivalent to greedy algorithm.
Example usage:
python circuit_training/environment/coordinate_descent_placer_main.py
--netlist_file "/path/to/netlist.pb.txt"
--init_placement "/path/to/initial_placement.plc"
"""
import functools
from absl import app
from absl import flags
from circuit_training.environment import coordinate_descent_placer
from circuit_training.environment import environment
from circuit_training.environment import placement_util
import numpy as np
flags.DEFINE_string('netlist_file', None, 'Path to netlist file.')
flags.DEFINE_string('init_placement', None, 'Path to initial placement file.')
flags.DEFINE_string('cd_output_dir', '/tmp/cd', 'CD output dir.')
flags.DEFINE_string('cd_placement_filename', 'cd', 'CD placement filename.')
FLAGS = flags.FLAGS
def main(_):
np.random.seed(FLAGS.seed)
plc = placement_util.create_placement_cost(FLAGS.netlist_file,
FLAGS.init_placement)
if not FLAGS.cd_use_init_location:
plc.unplace_all_nodes()
cost_fn = functools.partial(
environment.cost_fn,
wirelength_weight=1.0,
density_weight=0.1,
congestion_weight=0.1)
placer = coordinate_descent_placer.CoordinateDescentPlacer(plc, cost_fn)
placer.place()
placer.save_placement(FLAGS.cd_output_dir,
f'{FLAGS.cd_placement_filename}.plc')
print(f'Final CD placement can be found at {FLAGS.cd_output_dir}')
if __name__ == '__main__':
app.run(main)
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A collection of non-prod utility functions for placement.
All the dependencies in this files should be non-prod.
"""
import datetime
import re
import textwrap
from typing import Dict, Iterator, List, Optional, Text, Tuple
from absl import logging
from circuit_training.environment import plc_client
import numpy as np
# Internal gfile dependencies
def nodes_of_types(plc: plc_client.PlacementCost,
type_list: List[Text]) -> Iterator[int]:
"""Yields the index of a node of certain types."""
i = 0
while True:
node_type = plc.get_node_type(i)
if not node_type:
break
if node_type in type_list:
yield i
i += 1
def extract_attribute_from_comments(attribute: Text,
filenames: List[Text]) -> Optional[Text]:
"""Parses the files' comments section, tries to extract the attribute.
Args:
attribute: attribute to look for (case sensetive).
filenames: List of protobuf file or a plc file.
Returns:
Attribute name string, or None if not found.
"""
for filename in filenames:
if filename:
f = filename.split(',')[0]
if f:
with open(f, 'rt') as infile:
for line in infile:
if line.startswith('#'):
match = re.search(fr'{attribute} : ([-\w]+)', line)
if match:
return match.group(1)
else:
# Do not parse the rest of the file, since all the comments are at
# the top.
break
return None
def get_blockages_from_comments(
filenames: List[Text]) -> Optional[List[List[float]]]:
"""Returns list of blockages if they exist in the file's comments section."""
for filename in filenames:
if not filename:
continue
blockages = []
# Read the first file if filename is comma separated list.
# Expected blockage info line format is:
# "# Blockage : <float> <float> <float> <float> <float>"
# where first four float numbers correspond to minx, miny, maxx, maxy of
# the rectangular region, the fifth one is the blockage rate. It's usually
# set to 1.
try:
with open(filename, 'rt') as infile:
for line in infile:
if line.startswith('# Blockage : '):
blockages.append([float(x) for x in line.split()[3:8]])
elif not line.startswith('#'):
break
except OSError:
logging.error('could not read file %s.', filename)
if blockages:
return blockages
def extract_sizes_from_comments(
filenames: List[Text]) -> Optional[Tuple[float, float, int, int]]:
"""Parses the file's comments section, tries to extract canvas/grid sizes.
Args:
filenames: A list of netlist (.pb.txt) or placement (.plc) files.
Returns:
Tuple of canvas_width, canvas_height, grid_cols, grid_rows
"""
for filename in filenames:
if not filename:
continue
canvas_width, canvas_height = None, None
grid_cols, grid_rows = None, None
with open(filename, 'rt') as infile:
for line in infile:
if line.startswith('#'):
fp_re = re.search(
r'FP bbox: \{([\d\.]+) ([\d\.]+)\} \{([\d\.]+) ([\d\.]+)\}', line)
if fp_re:
canvas_width = float(fp_re.group(3))
canvas_height = float(fp_re.group(4))
continue
plc_wh = re.search(r'Width : ([\d\.]+) Height : ([\d\.]+)', line)
if plc_wh:
canvas_width = float(plc_wh.group(1))
canvas_height = float(plc_wh.group(2))
continue
plc_cr = re.search(r'Columns : ([\d]+) Rows : ([\d]+)', line)
if plc_cr:
grid_cols = int(plc_cr.group(1))
grid_rows = int(plc_cr.group(2))
else:
# Do not parse the rest of the file, since all the comments are at the
# top.
break
if canvas_width and canvas_height and grid_cols and grid_rows:
return canvas_width, canvas_height, grid_cols, grid_rows
def fix_port_coordinates(plc: plc_client.PlacementCost):
"""Find all ports and fix their coordinates.
Args:
plc: the placement cost object.
"""
for node in nodes_of_types(plc, ['PORT']):
plc.fix_node_coord(node)
# The routing capacities are calculated based on the public information about
# 7nm technology (https://en.wikichip.org/wiki/7_nm_lithography_process)
# with an arbitary, yet reasonable, assumption of 18% of the tracks for
# the power grids.
def create_placement_cost(
netlist_file: Text,
init_placement: Optional[Text] = None,
overlap_threshold: float = 4e-3,
congestion_smooth_range: int = 2,
# TODO(b/211039937): Increase macro spacing to 3-5um, after matching the
# performance for Ariane.
macro_macro_x_spacing: float = 0.1,
macro_macro_y_spacing: float = 0.1,
boundary_check: bool = False,
horizontal_routes_per_micron: float = 70.33,
vertical_routes_per_micron: float = 74.51,
macro_horizontal_routing_allocation: float = 51.79,
macro_vertical_routing_allocation: float = 51.79,
) -> plc_client.PlacementCost:
"""Creates a placement_cost object.
Args:
netlist_file: Path to the netlist proto text file.
init_placement: Path to the inital placement .plc file.
overlap_threshold: Used for macro overlap detection.
congestion_smooth_range: Smoothing factor used for congestion estimation.
Congestion is distributed to this many neighboring columns/rows.'
macro_macro_x_spacing: Macro-to-macro x spacing in microns.
macro_macro_y_spacing: Macro-to-macro y spacing in microns.
boundary_check: Do a boundary check during node placement.
horizontal_routes_per_micron: Horizontal route capacity per micros.
vertical_routes_per_micron: Vertical route capacity per micros.
macro_horizontal_routing_allocation: Macro horizontal routing allocation.
macro_vertical_routing_allocation: Macro vertical routing allocation.
Returns:
A PlacementCost object.
"""
if not netlist_file:
raise ValueError('netlist_file should be provided.')
block_name = extract_attribute_from_comments('Block',
[init_placement, netlist_file])
if not block_name:
logging.warning(
'block_name is not set. '
'Please add the block_name in:\n%s\nor in:\n%s', netlist_file,
init_placement)
plc = plc_client.PlacementCost(
netlist_file,
macro_macro_x_spacing,
macro_macro_y_spacing)
blockages = get_blockages_from_comments([netlist_file, init_placement])
if blockages:
for blockage in blockages:
plc.create_blockage(*blockage)
sizes = extract_sizes_from_comments([netlist_file, init_placement])
if sizes:
canvas_width, canvas_height, grid_cols, grid_rows = sizes
if canvas_width and canvas_height and grid_cols and grid_rows:
plc.set_canvas_size(canvas_width, canvas_height)
plc.set_placement_grid(grid_cols, grid_rows)
plc.set_project_name('circuit_training')
plc.set_block_name(block_name or 'unset_block')
plc.set_routes_per_micron(horizontal_routes_per_micron,
vertical_routes_per_micron)
plc.set_macro_routing_allocation(macro_horizontal_routing_allocation,
macro_vertical_routing_allocation)
plc.set_congestion_smooth_range(congestion_smooth_range)
plc.set_overlap_threshold(overlap_threshold)
plc.set_canvas_boundary_check(boundary_check)
plc.make_soft_macros_square()
if init_placement:
plc.restore_placement(init_placement)
fix_port_coordinates(plc)
return plc
def get_node_type_counts(plc: plc_client.PlacementCost) -> Dict[Text, int]:
"""Returns number of each type of nodes in the netlist.
Args:
plc: the placement cost object.
Returns:
Number of each type of node in a dict.
"""
counts = {
'MACRO': 0,
'STDCELL': 0,
'PORT': 0,
'MACRO_PIN': 0,
'SOFT_MACRO': 0,
'HARD_MACRO': 0,
'SOFT_MACRO_PIN': 0,
'HARD_MACRO_PIN': 0
}
for node_index in nodes_of_types(plc,
['MACRO', 'STDCELL', 'PORT', 'MACRO_PIN']):
node_type = plc.get_node_type(node_index)
counts[node_type] += 1
if node_type == 'MACRO':
if plc.is_node_soft_macro(node_index):
counts['SOFT_MACRO'] += 1
else:
counts['HARD_MACRO'] += 1
if node_type == 'MACRO_PIN':
ref_id = plc.get_ref_node_id(node_index)
if plc.is_node_soft_macro(ref_id):
counts['SOFT_MACRO_PIN'] += 1
else:
counts['HARD_MACRO_PIN'] += 1
return counts
def make_blockage_text(plc: plc_client.PlacementCost) -> Text:
ret = ''
for blockage in plc.get_blockages():
ret += 'Blockage : {}\n'.format(' '.join([str(b) for b in blockage]))
return ret
def save_placement(plc: plc_client.PlacementCost,
filename: Text,
user_comments: Text = '') -> None:
"""Saves the placement file with some information in the comments section."""
cols, rows = plc.get_grid_num_columns_rows()
width, height = plc.get_canvas_width_height()
hor_routes, ver_routes = plc.get_routes_per_micron()
hor_macro_alloc, ver_macro_alloc = plc.get_macro_routing_allocation()
smooth = plc.get_congestion_smooth_range()
info = textwrap.dedent("""\
Placement file for Circuit Training
Source input file(s) : {src_filename}
This file : {filename}
Date : {date}
Columns : {cols} Rows : {rows}
Width : {width:.3f} Height : {height:.3f}
Area : {area}
Wirelength : {wl:.3f}
Wirelength cost : {wlc:.4f}
Congestion cost : {cong:.4f}
Density cost : {density:.4f}
Project : {project}
Block : {block_name}
Routes per micron, hor : {hor_routes:.3f} ver : {ver_routes:.3f}
Routes used by macros, hor : {hor_macro_alloc:.3f} ver : {ver_macro_alloc:.3f}
Smoothing factor : {smooth}
Overlap threshold : {overlap_threshold}
""".format(
src_filename=plc.get_source_filename(),
filename=filename,
date=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
cols=cols,
rows=rows,
width=width,
height=height,
area=plc.get_area(),
wl=plc.get_wirelength(),
wlc=plc.get_cost(),
cong=plc.get_congestion_cost(),
density=plc.get_density_cost(),
project=plc.get_project_name(),
block_name=plc.get_block_name(),
hor_routes=hor_routes,
ver_routes=ver_routes,
hor_macro_alloc=hor_macro_alloc,
ver_macro_alloc=ver_macro_alloc,
smooth=smooth,
overlap_threshold=plc.get_overlap_threshold()))
info += '\n' + make_blockage_text(plc) + '\n'
info += '\nCounts of node types:\n'
node_type_counts = get_node_type_counts(plc)
for node_type in sorted(node_type_counts):
info += '{:<15} : {:>9}\n'.format(node_type + 's',
node_type_counts[node_type])
if user_comments:
info += '\nUser comments:\n' + user_comments + '\n'
info += '\nnode_index x y orientation fixed'
return plc.save_placement(filename, info)
def fd_placement_schedule(plc: plc_client.PlacementCost,
num_steps: Tuple[int, ...] = (100, 100, 100),
io_factor: float = 1.0,
move_distance_factors: Tuple[float,
...] = (1.0, 1.0, 1.0),
attract_factor: Tuple[float,
...] = (100.0, 1.0e-3, 1.0e-5),
repel_factor: Tuple[float, ...] = (0.0, 1.0e6, 1.0e7),
use_current_loc: bool = False,
move_macros: bool = False) -> None:
"""A placement schedule that uses force directed method.
Args:
plc: The plc object.
num_steps: Number of steps of the force-directed algorithm during each call.
io_factor: I/O attract factor.
move_distance_factors: Maximum distance relative to canvas size that a node
can move in a single step of the force-directed algorithm.
attract_factor: The spring constants between two connected nodes in the
force-directed algorithm. The FD algorithm will be called size of this
list times. Make sure that the size of fd_repel_factor has the same size.
repel_factor: The repellent factor for spreading the nodes to avoid
congestion in the force-directed algorithm.'
use_current_loc: If true, use the current location as the initial location.
move_macros: If true, also move the macros.
"""
assert len(num_steps) == len(move_distance_factors)
assert len(num_steps) == len(repel_factor)
assert len(num_steps) == len(attract_factor)
canvas_size = max(plc.get_canvas_width_height())
max_move_distance = [
f * canvas_size / s for s, f in zip(num_steps, move_distance_factors)
]
move_stdcells = True
log_scale_conns = False
use_sizes = False
plc.optimize_stdcells(use_current_loc, move_stdcells, move_macros,
log_scale_conns, use_sizes, io_factor, num_steps,
max_move_distance, attract_factor, repel_factor)
def get_ordered_node_indices(mode, plc, exclude_fixed_nodes=True):
"""Returns an ordering of node indices according to the specified mode.
Args:
mode: node ordering mode
plc: placement cost object
exclude_fixed_nodes: Whether fixed nodes should be excluded.
Returns:
Node indices sorted according to the mode.
"""
macro_indices = plc.get_macro_indices()
hard_macro_indices = [
m for m in macro_indices if not plc.is_node_soft_macro(m)
]
soft_macro_indices = [m for m in macro_indices if plc.is_node_soft_macro(m)]
def macro_area(idx):
w, h = plc.get_node_width_height(idx)
return w * h
if mode == 'descending_size_macro_first':
ordered_indices = (
sorted(hard_macro_indices, key=macro_area)[::-1] +
sorted(soft_macro_indices, key=macro_area)[::-1])
elif mode == 'random':
np.random.shuffle(macro_indices)
ordered_indices = macro_indices
elif mode == 'random_macro_first':
np.random.shuffle(hard_macro_indices)
ordered_indices = hard_macro_indices + soft_macro_indices
else:
raise ValueError('{} is an unsupported node placement mode.'.format(mode))
if exclude_fixed_nodes:
ordered_indices = [m for m in ordered_indices if not plc.is_node_fixed(m)]
return ordered_indices
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for circuit_training.environment.observation_extractor."""
import os
from absl import flags
from absl import logging
from circuit_training.environment import observation_config
from circuit_training.environment import observation_extractor
from circuit_training.environment import placement_util
from circuit_training.utils import test_utils
import gin
import numpy as np
FLAGS = flags.FLAGS
class ObservationExtractorTest(test_utils.TestCase):
"""Tests for the ObservationExtractor.
# Internal circuit training docs link.
"""
def setUp(self):
super(ObservationExtractorTest, self).setUp()
bindings = """
ObservationConfig.max_num_edges = 8
ObservationConfig.max_num_nodes = 6
ObservationConfig.max_grid_size = 10
"""
gin.parse_config(bindings)
self._observation_config = observation_config.ObservationConfig()
# Macros name : M0, M1, Grp_2
# Order in plc.get_macro_indices(): 0, 1, 2
# Edges: (0, 1), (0, 2)
test_netlist_dir = ('circuit_training/'
'environment/test_data/sample_clustered')
netlist_file = os.path.join(FLAGS.test_srcdir, test_netlist_dir,
'netlist.pb.txt')
plc = placement_util.create_placement_cost(
netlist_file=netlist_file, init_placement='')
plc.set_canvas_size(300, 200)
plc.set_placement_grid(9, 4)
plc.unplace_all_nodes()
# Manually adds I/O port locations, this step is not needed for real
# netlists.
plc.update_node_coords('P0', 0.5, 100) # Left
plc.update_node_coords('P1', 150, 199.5) # Top
plc.update_port_sides()
plc.snap_ports_to_edges()
self.extractor = observation_extractor.ObservationExtractor(plc=plc)
def test_static_features(self):
static_obs = self.extractor.get_static_features()
logging.info('static observation: %s', static_obs)
self.assertEqual(static_obs['normalized_num_edges'], 5.0 / 8.0)
self.assertEqual(static_obs['normalized_num_hard_macros'], 2.0 / 6.0)
self.assertEqual(static_obs['normalized_num_soft_macros'], 1.0 / 6.0)
self.assertEqual(static_obs['normalized_num_port_clusters'], 2.0 / 6.0)
self.assertAllClose(static_obs['macros_w'],
np.asarray([120., 80., 0., 0., 0., 0.]) / 300.0)
self.assertAllClose(static_obs['macros_h'],
np.asarray([120., 40., 0., 0., 0., 0.]) / 200.0)
self.assertAllEqual(static_obs['node_types'], [1, 1, 2, 3, 3, 0])
self.assertAllEqual(static_obs['sparse_adj_i'], [0, 0, 1, 1, 2, 0, 0, 0])
self.assertAllEqual(static_obs['sparse_adj_j'], [2, 3, 2, 4, 3, 0, 0, 0])
# Graph Description:
# 0->2, 0->3, 1->2, 1->4, 2->3
# Node 0: two edges
# Node 1: two edges
# Node 2: three edges
# Node 3: two edges
# Node 4: one edge
# The last zero in the array is due to the value of `max_num_nodes`.
self.assertAllEqual(static_obs['edge_counts'], [2, 2, 3, 2, 1, 0])
self.assertAllClose(static_obs['sparse_adj_weight'],
[1.0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0])
def test_initial_dynamic_features(self):
mask = np.zeros(
self._observation_config.max_grid_size *
self._observation_config.max_grid_size,
dtype=np.float32)
dynamic_obs = self.extractor.get_dynamic_features(
previous_node_index=-1, current_node_index=0, mask=mask)
logging.info('dynamic observation: %s', dynamic_obs)
# Replaces the unplaced node in the middle of the canvas.
self.assertAllClose(dynamic_obs['locations_x'],
np.asarray([150., 150., 150., 0., 150., 0.0]) / 300.0)
self.assertAllClose(dynamic_obs['locations_y'],
np.asarray([100., 100., 100., 125., 200., 0.0]) / 200.0)
self.assertAllEqual(dynamic_obs['is_node_placed'], [0, 0, 0, 1, 1, 0])
self.assertAllClose(dynamic_obs['mask'],
[0] * (self._observation_config.max_grid_size *
self._observation_config.max_grid_size))
self.assertAllClose(dynamic_obs['current_node'], [0])
def test_initial_all_features(self):
mask = np.zeros(
self._observation_config.max_grid_size *
self._observation_config.max_grid_size,
dtype=np.float32)
all_obs = self.extractor.get_all_features(
previous_node_index=-1, current_node_index=0, mask=mask)
logging.info('All observation: %s', all_obs)
self.assertEqual(all_obs['normalized_num_edges'], 5.0 / 8.0)
self.assertEqual(all_obs['normalized_num_hard_macros'], 2.0 / 6.0)
self.assertEqual(all_obs['normalized_num_soft_macros'], 1.0 / 6.0)
self.assertEqual(all_obs['normalized_num_port_clusters'], 2.0 / 6.0)
self.assertAllClose(all_obs['macros_w'],
np.asarray([120., 80., 0., 0., 0., 0.]) / 300.0)
self.assertAllClose(all_obs['macros_h'],
np.asarray([120., 40., 0., 0., 0., 0.]) / 200.0)
self.assertAllEqual(all_obs['node_types'], [1, 1, 2, 3, 3, 0])
self.assertAllEqual(all_obs['sparse_adj_i'], [0, 0, 1, 1, 2, 0, 0, 0])
self.assertAllEqual(all_obs['sparse_adj_j'], [2, 3, 2, 4, 3, 0, 0, 0])
self.assertAllClose(all_obs['sparse_adj_weight'],
[1.0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0])
self.assertAllClose(all_obs['locations_x'],
np.asarray([150., 150., 150., 0., 150., 0.0]) / 300.0)
self.assertAllClose(all_obs['locations_y'],
np.asarray([100., 100., 100., 125., 200., 0.0]) / 200.0)
self.assertAllEqual(all_obs['is_node_placed'], [0, 0, 0, 1, 1, 0])
self.assertAllClose(all_obs['mask'],
[0] * (self._observation_config.max_grid_size *
self._observation_config.max_grid_size))
self.assertAllClose(all_obs['current_node'], [0])
obs_space = self._observation_config.observation_space
self.assertTrue(obs_space.contains(all_obs))
def test_all_features_after_step(self):
self.extractor.plc.update_node_coords('M0', 100, 120)
mask = np.zeros(
self._observation_config.max_grid_size *
self._observation_config.max_grid_size,
dtype=np.float32)
all_obs = self.extractor.get_all_features(
previous_node_index=0, current_node_index=1, mask=mask)
self.assertAllClose(all_obs['locations_x'],
np.asarray([100., 150., 150., 0., 150., 0.0]) / 300.0)
self.assertAllClose(all_obs['locations_y'],
np.asarray([120., 100., 100., 125., 200., 0.0]) / 200.0)
self.assertAllEqual(all_obs['is_node_placed'], [1, 0, 0, 1, 1, 0])
self.assertAllClose(all_obs['current_node'], [1])
self.extractor.plc.update_node_coords('M1', 200, 150)
all_obs = self.extractor.get_all_features(
previous_node_index=1, current_node_index=2, mask=mask)
self.assertAllClose(all_obs['locations_x'],
np.asarray([100., 200., 150., 0., 150., 0.0]) / 300.0)
self.assertAllClose(all_obs['locations_y'],
np.asarray([120., 150., 100., 125., 200., 0.0]) / 200.0)
self.assertAllEqual(all_obs['is_node_placed'], [1, 1, 0, 1, 1, 0])
self.assertAllClose(all_obs['current_node'], [2])
if __name__ == '__main__':
test_utils.main()
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Circuit training Environmnet with gin config."""
import datetime
import math
import os
from typing import Any, Dict, Text, Tuple, Optional
from absl import logging
from circuit_training.environment import coordinate_descent_placer as cd_placer
from circuit_training.environment import observation_config
from circuit_training.environment import observation_extractor
from circuit_training.environment import placement_util
import gin
import gym
import numpy as np
import tensorflow as tf
from tf_agents.environments import suite_gym
from tf_agents.environments import wrappers
ObsType = Dict[Text, np.ndarray]
class InfeasibleActionError(ValueError):
"""An infeasible action were passed to the env."""
def __init__(self, action, mask):
"""Initialize an infeasible action error.
Args:
action: Infeasible action that was performed.
mask: The mask associated with the current observation. mask[action] is
`0` for infeasible actions.
"""
ValueError.__init__(self, action, mask)
self.action = action
self.mask = mask
def __str__(self):
return 'Infeasible action (%s) when the mask is (%s)' % (self.action,
self.mask)
@gin.configurable
def cost_fn(plc,
wirelength_weight: float = 1.0,
density_weight: float = 1.0,
congestion_weight: float = 0.5):
"""Returns the RL cost.
Args:
plc: Placement cost object.
wirelength_weight: Weight of wirelength in the reward function.
density_weight: Weight of density in the reward function.
congestion_weight: Weight of congestion in the reward function used only for
legalizing the placement in greedy std cell placer.
Returns:
The RL cost.
Raises:
ValueError: When the cost mode is not supported.
Notes: we found the default congestion and density weights more stable.
"""
proxy_cost = 0.0
wirelength = -1.0
congestion = -1.0
density = -1.0
if wirelength_weight > 0.0:
wirelength = plc.get_cost()
proxy_cost += wirelength_weight * wirelength
if congestion_weight > 0.0:
congestion = plc.get_congestion_cost()
proxy_cost += congestion_weight * congestion
if density_weight > 0.0:
density = plc.get_density_cost()
proxy_cost += density_weight * density
info = {
'wirelength': wirelength,
'congestion': congestion,
'density': density,
}
return proxy_cost, info
@gin.configurable
class CircuitEnv(object):
"""Defines the CircuitEnv class."""
INFEASIBLE_REWARD = -1.0
def __init__(self,
netlist_file: Text = '',
init_placement: Text = '',
std_cell_placer_mode: Text = 'fd',
global_seed: int = 0,
is_eval: bool = False,
save_best_cost: bool = False,
output_plc_file: Text = '',
make_soft_macros_square: bool = True,
cd_finetune: bool = False,
train_step: Optional[tf.Variable] = None):
"""Creates a CircuitEnv.
Args:
netlist_file: Path to the input netlist file.
init_placement: Path to the input inital placement file, used to read grid
and canas size.
std_cell_placer_mode: Options for fast std cells placement: `fd` (uses the
force-directed algorithm).
global_seed: Global seed for initializing env features. This seed
should be the same across actors. Not used currently.
is_eval: If set, save the final placement in output_dir.
save_best_cost: Boolean, if set, saves the palcement if its cost is better
than the previously saved palcement.
output_plc_file: The path to save the final placement.
make_soft_macros_square: If True, make the shape of soft macros square
before using analytical std cell placers like FD.
cd_finetune: If True, runs coordinate descent to finetune macro
orientations. Supposed to run in eval only, not training.
train_step: A tf.Variable indicating the training step, only used for
saving plc files in the evaluation.
"""
del global_seed
if not netlist_file:
raise ValueError('netlist_file must be provided.')
self.netlist_file = netlist_file
self._std_cell_placer_mode = std_cell_placer_mode
self._is_eval = is_eval
self._save_best_cost = save_best_cost
self._output_plc_file = output_plc_file
self._output_plc_dir = os.path.dirname(output_plc_file)
self._make_soft_macros_square = make_soft_macros_square
self._cd_finetune = cd_finetune
self._train_step = train_step
self._plc = placement_util.create_placement_cost(
netlist_file=netlist_file, init_placement=init_placement)
# We call ObservationExtractor before unplace_all_nodes, so we use the
# inital placement in the static features (location_x and location_y).
# This results in better placements.
self._observation_config = observation_config.ObservationConfig()
self._observation_extractor = observation_extractor.ObservationExtractor(
plc=self._plc)
if self._make_soft_macros_square:
# It is better to make the shape of soft macros square before using
# analytical std cell placers like FD.
self._plc.make_soft_macros_square()
self._grid_cols, self._grid_rows = self._plc.get_grid_num_columns_rows()
self._canvas_width, self._canvas_height = self._plc.get_canvas_width_height(
)
self._hard_macro_indices = [
m for m in self._plc.get_macro_indices()
if not self._plc.is_node_soft_macro(m)
]
self._num_hard_macros = len(self._hard_macro_indices)
self._sorted_node_indices = placement_util.get_ordered_node_indices(
mode='descending_size_macro_first', plc=self._plc)
self._sorted_soft_macros = self._sorted_node_indices[self._num_hard_macros:]
# Generate a map from actual macro_index to its position in
# self.macro_indices. Needed because node adjacency matrix is in the same
# node order of plc.get_macro_indices.
self._macro_index_to_pos = {}
for i, macro_index in enumerate(self._plc.get_macro_indices()):
self._macro_index_to_pos[macro_index] = i
self._plc.unplace_all_nodes()
self._saved_cost = np.inf
self._current_actions = []
self._current_node = 0
self._done = False
self._current_mask = self._get_mask()
@property
def observation_space(self) -> gym.spaces.Space:
"""Env Observation space."""
return self._observation_config.observation_space
@property
def action_space(self) -> gym.spaces.Space:
return gym.spaces.Discrete(self._observation_config.max_grid_size**2)
@property
def environment_name(self) -> Text:
return self.netlist_file
def get_static_obs(self):
"""Get the static observation for the environment.
Static observations are invariant across steps on the same netlist, such as
netlist metadata and the adj graphs. This should only be used for
generalized RL.
Returns:
Numpy array representing the observation
"""
return self._observation_extractor.get_static_features()
def _get_mask(self) -> np.ndarray:
"""Gets the node mask for the current node.
Returns:
List of 0s and 1s indicating if action is feasible or not.
"""
if self._done:
mask = np.zeros(self._observation_config.max_grid_size**2, dtype=np.int32)
else:
node_index = self._sorted_node_indices[self._current_node]
mask = np.asarray(self._plc.get_node_mask(node_index), dtype=np.int32)
mask = np.reshape(mask, [self._grid_rows, self._grid_cols])
mask = np.pad(
mask, ((0, self._observation_config.max_grid_size - self._grid_rows),
(0, self._observation_config.max_grid_size - self._grid_cols)),
mode='constant',
constant_values=0)
return np.reshape(
mask, (self._observation_config.max_grid_size**2,)).astype(np.int32)
def _get_obs(self) -> ObsType:
"""Returns the observation."""
if self._current_node > 0:
previous_node_sorted = self._sorted_node_indices[self._current_node - 1]
previous_node_index = self._macro_index_to_pos[previous_node_sorted]
else:
previous_node_index = -1
if self._current_node < self._num_hard_macros:
current_node_sorted = self._sorted_node_indices[self._current_node]
current_node_index = self._macro_index_to_pos[current_node_sorted]
else:
current_node_index = 0
return self._observation_extractor.get_all_features(
previous_node_index=previous_node_index,
current_node_index=current_node_index,
mask=self._current_mask)
def _rl_cost(self) -> float:
"""Returns the cost for RL."""
return cost_fn(self._plc)[0]
def _run_cd(self):
"""Runs coordinate descent to finetune the current placement."""
# CD only modifies macro orientation.
# Plc modified by CD will be reset at the end of the episode.
cd = cd_placer.CoordinateDescentPlacer(
plc=self._plc,
cost_fn=cost_fn,
use_stdcell_placer=True,
optimize_only_orientation=True)
cd.place()
def _save_placement(self, cost: float) -> None:
"""Saves the current placement.
Args:
cost: the current placement cost.
Raises:
IOError: If we cannot write the placement to file.
"""
if not self._save_best_cost or (cost < self._saved_cost and
(math.fabs(cost - self._saved_cost) /
(cost) > 5e-3)):
user_comments = ''
if self._train_step:
user_comments = f'Train step : {self._train_step.numpy()}'
placement_util.save_placement(self._plc, self._output_plc_file,
user_comments)
ts = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
ppo_snapshot_file = os.path.join(
self._output_plc_dir,
f'snapshot_ppo_opt_placement_timestamp_{ts}_cost_{cost:.4f}.plc')
placement_util.save_placement(self._plc, ppo_snapshot_file, user_comments)
self._saved_cost = cost
# Only runs CD if this is the best RL placement seen so far.
if self._cd_finetune:
self._run_cd()
cost = self._rl_cost()
cd_snapshot_file = os.path.join(
self._output_plc_dir,
f'snapshot_ppo_cd_placement_timestamp_{ts}_cost_{cost:.4f}.plc')
placement_util.save_placement(self._plc, cd_snapshot_file,
user_comments)
cd_plc_file = os.path.join(self._output_plc_dir, 'ppo_cd_placement.plc')
placement_util.save_placement(self._plc, cd_plc_file, user_comments)
def reset(self) -> ObsType:
"""Restes the environment.
Returns:
An initial observation.
"""
self._plc.unplace_all_nodes()
self._current_actions = []
self._current_node = 0
self._done = False
self._current_mask = self._get_mask()
return self._get_obs()
def translate_to_original_canvas(self, action: int) -> int:
"""Translates a raw location to real one in the original canvas."""
a_i = action // self._observation_config.max_grid_size
a_j = action % self._observation_config.max_grid_size
if a_i >= self._grid_rows or a_j >= self._grid_cols:
raise InfeasibleActionError(action, self._current_mask)
else:
action = a_i * self._grid_cols + a_j
return action
def place_node(self, node_index: int, action: int) -> None:
self._plc.place_node(node_index, self.translate_to_original_canvas(action))
def step(self, action: int) -> Tuple[ObsType, float, bool, Any]:
"""Steps the environment.
Args:
action: The action to take (should be a list of size 1).
Returns:
observation, reward, done, and info.
Raises:
RuntimeError: action taken after episode was done
InfeasibleActionError: bad action taken (action is not in feasible
actions)
"""
if self._done:
raise RuntimeError('Action taken after episode is done.')
action = int(action)
self._current_actions.append(action)
if self._current_mask[action] == 0:
raise InfeasibleActionError(action, self._current_mask)
node_index = self._sorted_node_indices[self._current_node]
self.place_node(node_index, action)
self._current_node += 1
self._done = (self._current_node == self._num_hard_macros)
self._current_mask = self._get_mask()
if not self._done and not np.any(self._current_mask):
logging.info('Actions took before becoming infeasible: %s',
self._current_actions)
info = {
'wirelength': -1.0,
'congestion': -1.0,
'density': -1.0,
}
return self.reset(), self.INFEASIBLE_REWARD, True, info
if self._done:
if self._std_cell_placer_mode == 'fd':
placement_util.fd_placement_schedule(self._plc)
else:
raise ValueError('%s is not a supported std_cell_placer_mode.' %
(self._std_cell_placer_mode))
# Only evaluates placement cost when all nodes are placed.
# All samples in the episode receive the same reward equal to final cost.
# This is realized by setting intermediate steps cost as zero, and
# propagate the final cost with discount factor set to 1 in replay buffer.
if self._done:
cost, cost_info = cost_fn(self._plc)
reward = -cost
info = {
'wirelength': cost_info['wirelength'],
'congestion': cost_info['congestion'],
'density': cost_info['density'],
}
if self._is_eval:
self._save_placement(cost)
else:
reward = 0.0
info = {
'wirelength': -1,
'congestion': -1,
'density': -1,
}
return self._get_obs(), reward, self._done, info
def create_circuit_environment(*args, **kwarg) -> wrappers.ActionClipWrapper:
"""Create an `CircuitEnv` wrapped as a Gym environment.
Args:
*args: Arguments.
**kwarg: keyworded Arguments.
Returns:
PyEnvironment used for training.
"""
env = CircuitEnv(*args, **kwarg)
return wrappers.ActionClipWrapper(suite_gym.wrap_env(env))
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for circuit_training.environment."""
import os
from absl import flags
from circuit_training.environment import environment
from circuit_training.utils import test_utils
import gin
import numpy as np
import tensorflow as tf
from tf_agents import specs
from tf_agents.drivers import py_driver
from tf_agents.environments import tf_py_environment
from tf_agents.policies import random_py_policy
from tf_agents.specs import array_spec
from tf_agents.train.utils import train_utils
from tf_agents.trajectories import policy_step
from tf_agents.trajectories import time_step as ts
# Internal gfile dependencies
FLAGS = flags.FLAGS
def random_action(mask):
valid_actions, = np.nonzero(mask.flatten())
if len(valid_actions): # pylint: disable=g-explicit-length-test
return np.random.choice(valid_actions)
# If there is no valid choice, then `[0]` is returned which results in an
# infeasable action ending the episode.
return 0
class _RandomValidCircuitPolicy(random_py_policy.RandomPyPolicy):
"""Policy wrapper for the function `random_action(mask)` above."""
def _action(self, time_step, policy_state):
valid_random_action = random_action(time_step.observation['mask'])
return policy_step.PolicyStep(
action=valid_random_action, state=policy_state)
class _ValidateTimeStepObserver(object):
"""Observer that validates the time steps and collects episode lengths."""
def __init__(self, test_case, time_step_spec):
self._test_case = test_case
self._time_step_spec = time_step_spec
self._current_len = 0
self._episode_lengths = []
@property
def episode_lengths(self):
return self._episode_lengths
def __call__(self, trajectory):
time_step = ts.TimeStep(
trajectory.step_type,
reward=trajectory.reward,
discount=trajectory.discount,
observation=trajectory.observation)
if trajectory.is_last():
self._episode_lengths.append(self._current_len)
self._current_len = 0
else:
self._current_len += 1
self._test_case.assertTrue(
array_spec.check_arrays_nest(time_step, self._time_step_spec))
def infeasible_action(mask):
return np.random.choice(np.nonzero(1 - mask.flatten())[0])
class EnvironmentTest(test_utils.TestCase):
"""Tests for the Environment.
# Internal circuit training docs link.
"""
def test_create_and_obs_space(self):
test_netlist_dir = ('circuit_training/'
'environment/test_data/sample_clustered')
netlist_file = os.path.join(FLAGS.test_srcdir, test_netlist_dir,
'netlist.pb.txt')
init_placement = os.path.join(FLAGS.test_srcdir, test_netlist_dir,
'initial.plc')
env = environment.CircuitEnv(
netlist_file=netlist_file, init_placement=init_placement)
obs = env.reset()
self.assertTrue(env.observation_space.contains(obs))
done = False
while not done:
action = random_action(obs['mask'])
obs, reward, done, _ = env.step(action)
self.assertTrue(env.observation_space.contains(obs))
self.assertIsInstance(reward, float)
self.assertIsInstance(done, bool)
def test_save_file_train_step(self):
test_netlist_dir = ('circuit_training/'
'environment/test_data/sample_clustered')
netlist_file = os.path.join(FLAGS.test_srcdir, test_netlist_dir,
'netlist.pb.txt')
init_placement = os.path.join(FLAGS.test_srcdir, test_netlist_dir,
'initial.plc')
output_dir = self.create_tempdir()
output_plc_file = os.path.join(output_dir, 'ppo_opt_placement.plc')
output_cd_file = os.path.join(output_dir, 'ppo_cd_placement.plc')
train_step = train_utils.create_train_step()
train_step.assign(1234)
env = environment.CircuitEnv(
netlist_file=netlist_file,
init_placement=init_placement,
is_eval=True,
save_best_cost=True,
output_plc_file=output_plc_file,
cd_finetune=True,
train_step=train_step)
obs = env.reset()
done = False
while not done:
action = random_action(obs['mask'])
obs, _, done, _ = env.step(action)
self.assertTrue(os.path.exists(output_plc_file))
with open(output_plc_file) as f:
self.assertIn('Train step : 1234', f.read())
self.assertTrue(os.path.exists(output_cd_file))
with open(output_cd_file) as f:
self.assertIn('Train step : 1234', f.read())
def test_action_space(self):
bindings = """
ObservationConfig.max_grid_size = 128
"""
gin.parse_config(bindings)
test_netlist_dir = ('circuit_training/'
'environment/test_data/sample_clustered')
netlist_file = os.path.join(FLAGS.test_srcdir, test_netlist_dir,
'netlist.pb.txt')
init_placement = os.path.join(FLAGS.test_srcdir, test_netlist_dir,
'initial.plc')
env = environment.CircuitEnv(
netlist_file=netlist_file, init_placement=init_placement)
self.assertEqual(env.action_space.shape, ())
self.assertTrue(env.action_space.contains(0))
self.assertTrue(env.action_space.contains(128**2 - 1))
self.assertFalse(env.action_space.contains(128**2))
mask = env.reset()['mask']
self.assertTrue(mask[0])
self.assertTrue(mask[1 * 128 + 1]) # (1, 1)
# Outside of the canvas:
self.assertFalse(mask[2 * 128 + 2]) # (2, 2)
def test_infisible(self):
test_netlist_dir = ('circuit_training/'
'environment/test_data/sample_clustered')
netlist_file = os.path.join(FLAGS.test_srcdir, test_netlist_dir,
'netlist.pb.txt')
init_placement = os.path.join(FLAGS.test_srcdir, test_netlist_dir,
'initial.plc')
env = environment.CircuitEnv(
netlist_file=netlist_file,
init_placement=init_placement,
)
obs = env.reset()
action = random_action(obs['mask'])
obs, _, _, _ = env.step(action)
action = infeasible_action(obs['mask'])
with self.assertRaises(environment.InfeasibleActionError):
env.step(action)
def test_wrap_tfpy_environment(self):
bindings = """
ObservationConfig.max_grid_size = 128
"""
gin.parse_config(bindings)
test_netlist_dir = ('circuit_training/'
'environment/test_data/sample_clustered')
netlist_file = os.path.join(FLAGS.test_srcdir, test_netlist_dir,
'netlist.pb.txt')
init_placement = os.path.join(FLAGS.test_srcdir, test_netlist_dir,
'initial.plc')
env = environment.create_circuit_environment(
netlist_file=netlist_file,
init_placement=init_placement,
)
tf_env = tf_py_environment.TFPyEnvironment(env)
spec = tf_env.action_spec()
self.assertEqual(type(spec), specs.BoundedTensorSpec)
self.assertEqual(spec.dtype, tf.int64)
self.assertEqual(spec.shape, ())
self.assertEqual(spec.minimum, 0)
self.assertEqual(spec.maximum, 128 * 128 - 1)
self.assertEqual(spec.name, 'action')
def test_validate_circuite_env(self):
test_netlist_dir = ('circuit_training/'
'environment/test_data/sample_clustered')
netlist_file = os.path.join(FLAGS.test_srcdir, test_netlist_dir,
'netlist.pb.txt')
init_placement = os.path.join(FLAGS.test_srcdir, test_netlist_dir,
'initial.plc')
env = environment.create_circuit_environment(
netlist_file=netlist_file,
init_placement=init_placement,
)
# Create a Python policy that provides *valid* random actions.
time_step_spec = env.time_step_spec()
valid_random_policy = _RandomValidCircuitPolicy(
time_step_spec=time_step_spec, action_spec=env.action_spec())
# Create an observer that asserts that the time steps are valid given the
# time step spec of the environment.
validate_time_step = _ValidateTimeStepObserver(
test_case=self, time_step_spec=time_step_spec)
# Create and run a driver using to validate the time steps observerd.
driver = py_driver.PyDriver(
env,
valid_random_policy,
observers=[validate_time_step],
max_episodes=10)
driver.run(env.reset())
# Make sure that environment steps were taken.
self.assertLen(validate_time_step.episode_lengths, 10)
episode_lens = np.array(validate_time_step.episode_lengths, dtype=np.int32)
# Check if at least one of the rollouts took more than one step to ensure
# that the time step validation has seen data.
self.assertTrue(np.any(episode_lens > 1))
if __name__ == '__main__':
test_utils.main()
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Coordinate descent placer library."""
import os
import time
from typing import Callable, Dict, Optional, List, Text, Tuple
from absl import logging
from circuit_training.environment import placement_util
from circuit_training.environment import plc_client
import numpy as np
NS_ORIENTATIONS = ['N', 'FN', 'S', 'FS']
EW_ORIENTATIONS = ['E', 'FE', 'W', 'FW']
class CoordinateDescentPlacer(object):
"""Coordinate descent algorithm to place nodes."""
def __init__(self,
plc: plc_client.PlacementCost,
cost_fn: Callable[[plc_client.PlacementCost],
Tuple[float, Dict[Text, float]]],
epochs: int = 10,
use_stdcell_placer: bool = False,
stdcell_placer: Text = 'fd',
node_order: Text = 'random',
accept_bad_stdcell_moves: bool = False,
stdcell_place_every_n_macros: int = 10,
optimize_only_orientation: bool = False,
cell_search_prob: float = 1.0,
k_distance_bounded_search: bool = True,
k_distance_bound: Optional[int] = None) -> None:
"""Creates a CoordinateDescentPlacer.
Args:
plc: The placement cost object.
cost_fn: The cost function that gets the plc and returns cost and info.
epochs: Number of epochs (iterations) in coordinate descend algorithm.
use_stdcell_placer: If True, places stdcells using stdcell placer.
stdcell_placer: Standad cell placer.
node_order: Order of nodes to place using coordinate descent. Choose
random, descending_size_macro_first, random_macro_first.
accept_bad_stdcell_moves: If True, accept stdcell moves even if it leads
to a higher cost.
stdcell_place_every_n_macros: Run stdcell placement for every n macros. If
None, run stdcell placement once after all macros are placed.
optimize_only_orientation: If True, only search for best orientation of
the hard macros.
cell_search_prob: The probability to include a neighborhood cell to
search. When it is 1.0, descents at the steepest direction.'
k_distance_bounded_search: If True, only search best locations within k
grid distance from current placed location. Does not apply to FD stdcell
placer.
k_distance_bound: If k_distance_bounded_search is True, only search within
a neighborhood of at most k_distance_bound grid distance. If not
spesified, it is set to max(cols, rows) // 3.
"""
self.plc = plc
self.cost_fn = cost_fn
self._epochs = epochs
self._node_order = node_order
self._stdcell_place_every_n_macros = stdcell_place_every_n_macros
self._cell_search_prob = cell_search_prob
self._cols, self._rows = self.plc.get_grid_num_columns_rows()
self._k_distance_bound = k_distance_bound or max(self._cols,
self._rows) // 3
self._use_stdcell_placer = use_stdcell_placer
self._stdcell_placer = stdcell_placer
self._accept_bad_stdcell_moves = accept_bad_stdcell_moves
self._optimize_only_orientation = optimize_only_orientation
self._k_distance_bounded_search = k_distance_bounded_search
if self._cell_search_prob < 0 or self._cell_search_prob > 1:
raise ValueError(f'{self._cell_search_prob} should be between 0 and 1.')
# Turn off incremental cost calculation if placing stdcells.
if self._use_stdcell_placer:
plc.set_use_incremental_cost(False)
# Get legal node orientations.
self._node_to_ori = {}
for node in self.plc.get_macro_indices():
if not self.plc.is_node_soft_macro(node):
# TODO(wenjiej): Find orientation when a node is not placed initially.
# Needed only when running CD from an empty grid.
assert self.plc.is_node_placed(node)
cur_ori = self.plc.get_macro_orientation(node)
if cur_ori in NS_ORIENTATIONS:
self._node_to_ori[node] = NS_ORIENTATIONS
elif cur_ori in EW_ORIENTATIONS:
self._node_to_ori[node] = EW_ORIENTATIONS
else:
raise ValueError(f'Unexpected orientation {cur_ori} for node {node}.')
if self._use_stdcell_placer:
plc.allow_hard_macros_over_std_cells(True)
# If node order is random, will shuffle node orders for each iteration.
self._ordered_node_indices = placement_util.get_ordered_node_indices(
self._node_order, self.plc)
# Exclude fixed macros with pre-determined locations.
self._ordered_node_indices = [
m for m in self._ordered_node_indices if not self.plc.is_node_fixed(m)
]
self._soft_macro_indices = [
m for m in self._ordered_node_indices if self.plc.is_node_soft_macro(m)
]
if self._use_stdcell_placer:
# Only include hard macros in self._ordered_node_indices.
self._ordered_node_indices = [
i for i in self._ordered_node_indices
if not self.plc.is_node_soft_macro(i)
]
logging.info('Total number of ordered nodes: %d',
len(self._ordered_node_indices))
logging.info('ordered_node_indices: %s', self._ordered_node_indices)
logging.info('Cost of initial placement: %s', self.report_cost())
def find_best_location(self, node: int, mask: List[int],
locations: List[int]) -> Optional[int]:
"""Given a soft macro, search the best location."""
best_loc = None
best_cost = float('inf')
for loc in locations:
assert mask[loc] == 1
self.plc.place_node(node, loc)
new_cost, _ = self.cost_fn(self.plc)
self.plc.unplace_node(node)
if new_cost < best_cost:
best_loc = loc
best_cost = new_cost
return best_loc
def find_best_location_orientation(
self, node: int, locations: List[int],
orientations: List[Text]) -> Tuple[Optional[int], Optional[Text]]:
"""Given a hard macro, search the best location and orientation."""
assert orientations
best_loc = None
best_ori = None
best_cost = float('inf')
for loc in locations:
for ori in orientations:
self.plc.place_node(node, loc)
self.plc.update_macro_orientation(node, ori)
new_cost, _ = self.cost_fn(self.plc)
self.plc.unplace_node(node)
if new_cost < best_cost:
best_loc = loc
best_ori = ori
best_cost = new_cost
return best_loc, best_ori
def find_best_orientation(self, node: int,
orientations: List[Text]) -> Optional[Text]:
"""Given a hard macro, search the best orientation."""
assert orientations
best_ori = None
best_cost = float('inf')
for ori in orientations:
self.plc.update_macro_orientation(node, ori)
new_cost, _ = self.cost_fn(self.plc)
if new_cost < best_cost:
best_ori = ori
best_cost = new_cost
return best_ori
def _get_row_col_from_cell(self, cell: int) -> Tuple[int, int]:
return cell // self._cols, cell % self._cols
def _get_cell_from_row_col(self, row: int, col: int) -> int:
return int(row * self._cols + col)
def _k_distance_bounded_locations(self, curr: int, k: int,
locations: List[int]) -> List[int]:
"""Find k grid distance bounded locations from current cell."""
curr_row, curr_col = self._get_row_col_from_cell(curr)
bounded = []
for c in locations:
if c == curr:
# Always include current location to search.
bounded.append(c)
continue
row, col = self._get_row_col_from_cell(c)
if abs(row - curr_row) + abs(col - curr_col) <= k:
if np.random.random() <= self._cell_search_prob:
bounded.append(c)
return bounded
def place_node(self, node: int) -> None:
"""Given a node, greedily place the node on the best location wrt cost."""
if not self.plc.is_node_soft_macro(node):
orientations = self._node_to_ori[node]
if self._optimize_only_orientation:
# Placing and unplacing macros cause wiered problems in FD.
# See cl/316830807. Avoid unplacing for orientation optimization.
best_ori = self.find_best_orientation(node, orientations)
self.plc.update_macro_orientation(node, best_ori)
return
# Unplace the node from its current location to prepare placing node.
curr_cell = self.plc.get_grid_cell_of_node(node)
self.plc.unplace_node(node)
mask = self.plc.get_node_mask(node)
locations = [i for i, m in enumerate(mask) if m > 0]
if not locations:
# FD or DP are run between macro moves (_stdcell_place_every_n_macros).
# They may place stdcells in a way that invalidates prior macro locations.
# Stay with previous macro locations in this case.
locations = [curr_cell]
logging.info(
'Cannot find feasible locations for node %d. '
'Use its current location %d.', node, curr_cell)
if self._k_distance_bounded_search:
k = self._k_distance_bound
# Increase search scope until there is at least one feasible location.
while True:
bounded = self._k_distance_bounded_locations(curr_cell, k, locations)
if bounded:
locations = bounded
break
else:
k += self._k_distance_bound
if self.plc.is_node_soft_macro(node):
best_loc = self.find_best_location(node, mask, locations)
self.plc.place_node(node, best_loc)
else:
best_loc, best_ori = self.find_best_location_orientation(
node, locations, orientations)
self.plc.place_node(node, best_loc)
self.plc.update_macro_orientation(node, best_ori)
def place_stdcells(self) -> None:
"""Place stdcells."""
logging.info('Place stdcells using %s', self._stdcell_placer)
old_cost, _ = self.cost_fn(self.plc)
old_coordinates = [
self.plc.get_node_location(m) for m in self._soft_macro_indices
]
if self._stdcell_placer == 'fd':
# Use default FD schedule.
# Use current stdcell location to incrementally change stdcell locations
# between iterations.
placement_util.fd_placement_schedule(self.plc, use_current_loc=True)
else:
raise ValueError(
f'stdcell placer {self._stdcell_placer} is not supported')
new_cost, _ = self.cost_fn(self.plc)
if new_cost > old_cost and not self._accept_bad_stdcell_moves:
logging.info('Bad stdcell placement moves not accepted.')
# Revert to old node coordinates.
for i, (x, y) in enumerate(old_coordinates):
self.plc.update_node_coords(self._soft_macro_indices[i], x, y)
def optimize(self, epoch: int) -> None:
"""Performs one iteration (epoch) of coordinate descent on all nodes."""
logging.info('Starts optimization in epoch %d.', epoch)
start_time = time.time()
node_indices = self._ordered_node_indices
if self._node_order == 'random':
np.random.shuffle(node_indices)
for i, node in enumerate(node_indices):
if i % 25 == 0:
logging.info('Number of nodes placed by CD: %d', i)
self.place_node(node)
if (self._use_stdcell_placer and self._stdcell_place_every_n_macros and
(i + 1) % self._stdcell_place_every_n_macros == 0):
self.place_stdcells()
# Always run stdcell placement after all macros are placed.
if self._use_stdcell_placer:
self.place_stdcells()
logging.info('One iteration of coordinate descent takes %f seconds.',
(time.time() - start_time))
def report_cost(self) -> Text:
proxy_cost, info = self.cost_fn(self.plc)
wirelength = info['wirelength']
congestion = info['congestion']
density = info['density']
return ('(Objective cost, wirelength, congestion, density): ' +
'({:.4f}, {:.4f}, {:.4f}, {:.4f}'.format(proxy_cost, wirelength,
congestion, density))
def place(self) -> None:
"""Place all nodes using coordinate descent algorithm for some iterations."""
# Run stdcell placement at the beginning of the optimization loop if needed.
# Use stdcell locations from initial placement.
if self._use_stdcell_placer:
self.place_stdcells()
prev_cost, _ = self.cost_fn(self.plc)
for i in range(self._epochs):
self.optimize(i)
logging.info('Cost after %d epochs: %s', i + 1, self.report_cost())
curr_cost, _ = self.cost_fn(self.plc)
if (prev_cost - curr_cost) / prev_cost < 1e-3:
break
prev_cost = curr_cost
def save_placement(self, output_dir: Text, plc_filename: Text) -> None:
"""Saves a placement with current plc."""
proxy_cost, info = self.cost_fn(self.plc)
wirelength = info['wirelength']
congestion = info['congestion']
density = info['density']
plc_filename_with_cost = 'cost_{:.4f}_w_{:.4f}_c_{:.4f}_d_{:.4f}_{}'.format(
proxy_cost, wirelength, congestion, density, plc_filename)
output_plc_file = os.path.join(output_dir, plc_filename_with_cost)
placement_util.save_placement(self.plc, output_plc_file)
# TODO(wenjiej): Enable saving plc view.
# placement_util.save_as_svg(self.plc, f'{output_plc_file}.svg')
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for coordinate_descent_placer."""
import functools
import os
import random
from absl import flags
from absl import logging
from absl.testing import parameterized
from circuit_training.environment import coordinate_descent_placer
from circuit_training.environment import environment
from circuit_training.environment import placement_util
from circuit_training.utils import test_utils
import numpy as np
FLAGS = flags.FLAGS
class CoordinateDescentPlacerTest(parameterized.TestCase, test_utils.TestCase):
def setUp(self):
super(CoordinateDescentPlacerTest, self).setUp()
random.seed(666)
np.random.seed(666)
def _create_plc(self, block_name):
test_netlist_dir = ('circuit_training/'
'environment/test_data')
test_netlist_dir = os.path.join(FLAGS.test_srcdir, test_netlist_dir,
block_name)
netlist_file = os.path.join(test_netlist_dir, 'netlist.pb.txt')
init_placement = os.path.join(test_netlist_dir, 'initial.plc')
return placement_util.create_placement_cost(netlist_file, init_placement)
def _randomize_placement(self, plc):
plc.unplace_all_nodes()
grid_cols, grid_rows = plc.get_grid_num_columns_rows()
grid_size = grid_rows * grid_cols
macros = plc.get_macro_indices()
num_macros = len(macros)
# Sample random locations for all nodes.
locations = random.sample(list(range(grid_size)), num_macros)
self.assertLen(locations, len(macros))
for i, m in enumerate(macros):
plc.place_node(m, locations[i])
# TODO(wenjiej): Add a FD test for blocks that have stdcells.
@parameterized.parameters(
('macro_tiles_10x10', True),
('macro_tiles_10x10', False),
)
def test_cd(self, block_name, optimize_only_orientation):
plc = self._create_plc(block_name)
cost_fn = functools.partial(
environment.cost_fn,
wirelength_weight=1.0,
density_weight=0.1,
congestion_weight=0.1)
cd_placer = coordinate_descent_placer.CoordinateDescentPlacer(
plc,
cost_fn,
epochs=3,
k_distance_bound=3,
stdcell_place_every_n_macros=10,
use_stdcell_placer=False,
optimize_only_orientation=optimize_only_orientation,
)
self._randomize_placement(plc)
before_cd_cost = cost_fn(plc)[0]
cd_placer.place()
after_cd_cost = cost_fn(plc)[0]
logging.info('before_cd_cost: %f', before_cd_cost)
logging.info('after_cd_cost: %f', after_cd_cost)
self.assertLess(after_cd_cost, before_cd_cost)
if __name__ == '__main__':
test_utils.main()
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This class extracts features from observations."""
from typing import Dict, Text, Tuple
from circuit_training.environment import observation_config
from circuit_training.environment import plc_client
import gin
import numpy as np
@gin.configurable
class ObservationExtractor(object):
"""Extracts observation features from plc."""
EPSILON = 1E-6
def __init__(self,
plc: plc_client.PlacementCost,
default_location_x: float = 0.5,
default_location_y: float = 0.5):
self.plc = plc
self._observation_config = observation_config.ObservationConfig()
self._default_location_x = default_location_x
self._default_location_y = default_location_y
self.width, self.height = self.plc.get_canvas_width_height()
self.num_cols, self.num_rows = self.plc.get_grid_num_columns_rows()
self.grid_width = self.width / self.num_cols
self.grid_height = self.height / self.num_rows
# Since there are too many I/O ports, we have to cluster them together to
# make it manageable for the model to process. The ports that are located in
# the same grid cell are clustered togheter.
self.adj_vec, grid_cell_of_clustered_ports_vec = self.plc.get_macro_and_clustered_port_adjacency(
)
self.clustered_port_locations_vec = [
self._get_clustered_port_locations(i)
for i in grid_cell_of_clustered_ports_vec
]
# Extract static features.
self._features = self._extract_static_features()
def _extract_static_features(self) -> Dict[Text, np.ndarray]:
"""Static features that are invariant across training steps."""
features = dict()
self._extract_num_macros(features)
self._extract_technology_info(features)
self._extract_node_types(features)
self._extract_macro_size(features)
self._extract_macro_and_port_adj_matrix(features)
self._extract_canvas_size(features)
self._extract_grid_size(features)
self._extract_initial_node_locations(features)
self._extract_normalized_static_features(features)
return features
def _extract_normalized_static_features(
self, features: Dict[Text, np.ndarray]) -> None:
"""Normalizes static features."""
self._add_netlist_metadata(features)
self._normalize_adj_matrix(features)
self._pad_adj_matrix(features)
self._pad_macro_static_features(features)
self._normalize_macro_size_by_canvas(features)
self._normalize_grid_size(features)
self._normalize_locations_by_canvas(features)
self._replace_unplace_node_location(features)
self._pad_macro_dynamic_features(features)
def _extract_num_macros(self, features: Dict[Text, np.ndarray]) -> None:
features['num_macros'] = np.asarray([len(self.plc.get_macro_indices())
]).astype(np.int32)
def _extract_technology_info(self, features: Dict[Text, np.ndarray]) -> None:
"""Extracts Technology-related information."""
routing_resources = {
'horizontal_routes_per_micron':
self.plc.get_routes_per_micron()[0],
'vertical_routes_per_micron':
self.plc.get_routes_per_micron()[1],
'macro_horizontal_routing_allocation':
self.plc.get_macro_routing_allocation()[0],
'macro_vertical_routing_allocation':
self.plc.get_macro_routing_allocation()[0],
}
for k in routing_resources:
features[k] = np.asarray([routing_resources[k]]).astype(np.float32)
def _extract_initial_node_locations(self, features: Dict[Text,
np.ndarray]) -> None:
"""Extracts initial node locations."""
locations_x = []
locations_y = []
is_node_placed = []
for macro_idx in self.plc.get_macro_indices():
x, y = self.plc.get_node_location(macro_idx)
locations_x.append(x)
locations_y.append(y)
is_node_placed.append(1 if self.plc.is_node_placed(macro_idx) else 0)
for x, y in self.clustered_port_locations_vec:
locations_x.append(x)
locations_y.append(y)
is_node_placed.append(1)
features['locations_x'] = np.asarray(locations_x).astype(np.float32)
features['locations_y'] = np.asarray(locations_y).astype(np.float32)
features['is_node_placed'] = np.asarray(is_node_placed).astype(np.int32)
def _extract_node_types(self, features: Dict[Text, np.ndarray]) -> None:
"""Extracts node types."""
types = []
for macro_idx in self.plc.get_macro_indices():
if self.plc.is_node_soft_macro(macro_idx):
types.append(observation_config.SOFT_MACRO)
else:
types.append(observation_config.HARD_MACRO)
for _ in range(len(self.clustered_port_locations_vec)):
types.append(observation_config.PORT_CLUSTER)
features['node_types'] = np.asarray(types).astype(np.int32)
def _extract_macro_size(self, features: Dict[Text, np.ndarray]) -> None:
"""Extracts macro sizes."""
macros_w = []
macros_h = []
for macro_idx in self.plc.get_macro_indices():
if self.plc.is_node_soft_macro(macro_idx):
# Width and height of soft macros are set to zero.
width = 0
height = 0
else:
width, height = self.plc.get_node_width_height(macro_idx)
macros_w.append(width)
macros_h.append(height)
for _ in range(len(self.clustered_port_locations_vec)):
macros_w.append(0)
macros_h.append(0)
features['macros_w'] = np.asarray(macros_w).astype(np.float32)
features['macros_h'] = np.asarray(macros_h).astype(np.float32)
def _extract_macro_and_port_adj_matrix(
self, features: Dict[Text, np.ndarray]) -> None:
"""Extracts adjacency matrix."""
num_nodes = len(self.plc.get_macro_indices()) + len(
self.clustered_port_locations_vec)
assert num_nodes * num_nodes == len(self.adj_vec)
sparse_adj_i = []
sparse_adj_j = []
sparse_adj_weight = []
edge_counts = np.zeros((self._observation_config.max_num_nodes,),
dtype=np.int32)
for i in range(num_nodes):
for j in range(i + 1, num_nodes):
weight = self.adj_vec[i + num_nodes * j]
if weight > 0:
sparse_adj_i.append(i)
sparse_adj_j.append(j)
sparse_adj_weight.append(weight)
edge_counts[i] += 1
edge_counts[j] += 1
features['sparse_adj_i'] = np.asarray(sparse_adj_i).astype(np.int32)
features['sparse_adj_j'] = np.asarray(sparse_adj_j).astype(np.int32)
features['sparse_adj_weight'] = np.asarray(sparse_adj_weight).astype(
np.float32)
features['edge_counts'] = edge_counts
def _extract_canvas_size(self, features: Dict[Text, np.ndarray]) -> None:
features['canvas_width'] = np.asarray([self.width])
features['canvas_height'] = np.asarray([self.height])
def _extract_grid_size(self, features: Dict[Text, np.ndarray]) -> None:
features['grid_cols'] = np.asarray([self.num_cols]).astype(np.float32)
features['grid_rows'] = np.asarray([self.num_rows]).astype(np.float32)
def _get_clustered_port_locations(
self, grid_cell_index: int) -> Tuple[float, float]:
"""Returns clustered port locations.
This function returns an approximation location of the ports in a grid
cell. Depending on the cell location in the canvas, the approximation
differs.
Args:
grid_cell_index: The index of the grid cell where the cluster port is
located.
Returns:
A tuple of float: Approximate x, y location of the port cluster in the
grid cell in the same unit as canvas width and height (micron).
"""
col = grid_cell_index % self.num_cols
row = grid_cell_index // self.num_cols
if col == 0 and row == 0:
return 0, 0
elif col == 0 and row == self.num_rows - 1:
return 0, self.height
elif col == self.num_cols - 1 and row == 0:
return self.width, 0
elif col == self.num_cols - 1 and row == self.num_rows - 1:
return self.width, self.height
elif col == 0:
return 0, (row + 0.5) * self.grid_height
elif col == self.num_cols - 1:
return self.width, (row + 0.5) * self.grid_height
elif row == 0:
return (col + 0.5) * self.grid_width, 0
elif row == self.num_rows - 1:
return (col + 0.5) * self.grid_width, self.height
else:
return (col + 0.5) * self.grid_width, (row + 0.5) * self.grid_height
def _add_netlist_metadata(self, features: Dict[Text, np.ndarray]) -> None:
"""Adds netlist metadata info."""
features['normalized_num_edges'] = np.asarray([
np.sum(features['sparse_adj_weight']) /
self._observation_config.max_num_edges
]).astype(np.float32)
features['normalized_num_hard_macros'] = np.asarray([
np.sum(
np.equal(features['node_types'],
observation_config.HARD_MACRO).astype(np.float32)) /
self._observation_config.max_num_nodes
]).astype(np.float32)
features['normalized_num_soft_macros'] = np.asarray([
np.sum(
np.equal(features['node_types'],
observation_config.SOFT_MACRO).astype(np.float32)) /
self._observation_config.max_num_nodes
]).astype(np.float32)
features['normalized_num_port_clusters'] = np.asarray([
np.sum(
np.equal(features['node_types'],
observation_config.PORT_CLUSTER).astype(np.float32)) /
self._observation_config.max_num_nodes
]).astype(np.float32)
def _normalize_adj_matrix(self, features: Dict[Text, np.ndarray]) -> None:
"""Normalizes adj matrix weights."""
mean_weight = np.mean(features['sparse_adj_weight'])
features['sparse_adj_weight'] = (
features['sparse_adj_weight'] /
(mean_weight + ObservationExtractor.EPSILON)).astype(np.float32)
def _pad_1d_tensor(self, tensor: np.ndarray, pad_size: int) -> np.ndarray:
return np.pad(
tensor, (0, pad_size - tensor.shape[0]),
mode='constant',
constant_values=0)
def _pad_adj_matrix(self, features: Dict[Text, np.ndarray]) -> None:
"""Pads indices and weights with zero to make their shape known."""
for var in ['sparse_adj_i', 'sparse_adj_j', 'sparse_adj_weight']:
features[var] = self._pad_1d_tensor(
features[var], self._observation_config.max_num_edges)
def _pad_macro_static_features(self, features: Dict[Text,
np.ndarray]) -> None:
"""Pads macro features to make their shape knwon."""
for var in [
'macros_w',
'macros_h',
'node_types',
]:
features[var] = self._pad_1d_tensor(
features[var], self._observation_config.max_num_nodes)
def _pad_macro_dynamic_features(self, features: Dict[Text,
np.ndarray]) -> None:
"""Pads macro features to make their shape knwon."""
for var in [
'locations_x',
'locations_y',
'is_node_placed',
]:
features[var] = self._pad_1d_tensor(
features[var], self._observation_config.max_num_nodes)
def _normalize_grid_size(self, features: Dict[Text, np.ndarray]) -> None:
features['grid_cols'] = (features['grid_cols'] /
self._observation_config.max_grid_size).astype(
np.float32)
features['grid_rows'] = (features['grid_rows'] /
self._observation_config.max_grid_size).astype(
np.float32)
def _normalize_macro_size_by_canvas(self, features: Dict[Text,
np.ndarray]) -> None:
"""Normalizes macro sizes with the canvas size."""
features['macros_w'] = (
features['macros_w'] /
(features['canvas_width'] + ObservationExtractor.EPSILON)).astype(
np.float32)
features['macros_h'] = (
features['macros_h'] /
(features['canvas_height'] + ObservationExtractor.EPSILON)).astype(
np.float32)
def _normalize_locations_by_canvas(self, features: Dict[Text,
np.ndarray]) -> None:
"""Normalizes locations with the canvas size."""
features['locations_x'] = (
features['locations_x'] /
(features['canvas_width'] + ObservationExtractor.EPSILON)).astype(
np.float32)
features['locations_y'] = (
features['locations_y'] /
(features['canvas_height'] + ObservationExtractor.EPSILON)).astype(
np.float32)
def _replace_unplace_node_location(self, features: Dict[Text,
np.ndarray]) -> None:
"""Replace the location of the unplaced macros with a constant."""
is_node_placed = np.equal(features['is_node_placed'], 1)
features['locations_x'] = np.where(
is_node_placed,
features['locations_x'],
self._default_location_x * np.ones_like(features['locations_x']),
).astype(np.float32)
features['locations_y'] = np.where(
is_node_placed,
features['locations_y'],
self._default_location_y * np.ones_like(features['locations_y']),
).astype(np.float32)
def get_static_features(self) -> Dict[Text, np.ndarray]:
return {
key: self._features[key]
for key in observation_config.STATIC_OBSERVATIONS
}
def get_initial_features(self) -> Dict[Text, np.ndarray]:
return {
key: self._features[key]
for key in observation_config.INITIAL_OBSERVATIONS
}
def _update_dynamic_features(self, previous_node_index: int,
current_node_index: int,
mask: np.ndarray) -> None:
if previous_node_index >= 0:
x, y = self.plc.get_node_location(
self.plc.get_macro_indices()[previous_node_index])
self._features['locations_x'][previous_node_index] = x / (
self.width + ObservationExtractor.EPSILON)
self._features['locations_y'][previous_node_index] = y / (
self.height + ObservationExtractor.EPSILON)
self._features['is_node_placed'][previous_node_index] = 1
self._features['mask'] = mask.astype(np.int32)
self._features['current_node'] = np.asarray([current_node_index
]).astype(np.int32)
def get_dynamic_features(self, previous_node_index: int,
current_node_index: int,
mask: np.ndarray) -> Dict[Text, np.ndarray]:
self._update_dynamic_features(previous_node_index, current_node_index, mask)
return {
key: self._features[key]
for key in observation_config.DYNAMIC_OBSERVATIONS
if key in self._features
}
def get_all_features(self, previous_node_index: int, current_node_index: int,
mask: np.ndarray) -> Dict[Text, np.ndarray]:
features = self.get_static_features()
features.update(
self.get_dynamic_features(
previous_node_index=previous_node_index,
current_node_index=current_node_index,
mask=mask))
return features
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class to store the observation shape and sizes."""
from typing import Dict, List, Text, Tuple, Union
import gin
import gym
import numpy as np
import tensorflow as tf
TensorType = Union[np.ndarray, tf.Tensor]
FeatureKeyType = Union[List[Text], Tuple[Text, ...]]
HARD_MACRO = 1
SOFT_MACRO = 2
PORT_CLUSTER = 3
NETLIST_METADATA = (
'normalized_num_edges',
'normalized_num_hard_macros',
'normalized_num_soft_macros',
'normalized_num_port_clusters',
'horizontal_routes_per_micron',
'vertical_routes_per_micron',
'macro_horizontal_routing_allocation',
'macro_vertical_routing_allocation',
'grid_cols',
'grid_rows',
)
GRAPH_ADJACENCY_MATRIX = ('sparse_adj_i', 'sparse_adj_j', 'sparse_adj_weight',
'edge_counts')
NODE_STATIC_FEATURES = (
'macros_w',
'macros_h',
'node_types',
)
STATIC_OBSERVATIONS = (
NETLIST_METADATA + GRAPH_ADJACENCY_MATRIX + NODE_STATIC_FEATURES)
INITIAL_DYNAMIC_OBSERVATIONS = (
'locations_x',
'locations_y',
'is_node_placed',
)
DYNAMIC_OBSERVATIONS = (
'locations_x',
'locations_y',
'is_node_placed',
'current_node',
'mask',
)
ALL_OBSERVATIONS = STATIC_OBSERVATIONS + DYNAMIC_OBSERVATIONS
INITIAL_OBSERVATIONS = STATIC_OBSERVATIONS + INITIAL_DYNAMIC_OBSERVATIONS
@gin.configurable
class ObservationConfig(object):
"""A class that contains shared configs for observation."""
# The default numbers are the maximum number of nodes, edges, and grid size
# on a set of TPU blocks.
# Large numbers may cause GPU/TPU OOM during training.
def __init__(self,
max_num_nodes: int = 4700,
max_num_edges: int = 28400,
max_grid_size: int = 128):
self.max_num_edges = max_num_edges
self.max_num_nodes = max_num_nodes
self.max_grid_size = max_grid_size
@property
def observation_space(self) -> gym.spaces.Space:
"""Env Observation space."""
return gym.spaces.Dict({
'normalized_num_edges':
gym.spaces.Box(low=0, high=1, shape=(1,)),
'normalized_num_hard_macros':
gym.spaces.Box(low=0, high=1, shape=(1,)),
'normalized_num_soft_macros':
gym.spaces.Box(low=0, high=1, shape=(1,)),
'normalized_num_port_clusters':
gym.spaces.Box(low=0, high=1, shape=(1,)),
'horizontal_routes_per_micron':
gym.spaces.Box(low=0, high=100, shape=(1,)),
'vertical_routes_per_micron':
gym.spaces.Box(low=0, high=100, shape=(1,)),
'macro_horizontal_routing_allocation':
gym.spaces.Box(low=0, high=100, shape=(1,)),
'macro_vertical_routing_allocation':
gym.spaces.Box(low=0, high=100, shape=(1,)),
'sparse_adj_weight':
gym.spaces.Box(low=0, high=100, shape=(self.max_num_edges,)),
'sparse_adj_i':
gym.spaces.Box(
low=0,
high=self.max_num_nodes - 1,
shape=(self.max_num_edges,),
dtype=np.int32),
'sparse_adj_j':
gym.spaces.Box(
low=0,
high=self.max_num_nodes - 1,
shape=(self.max_num_edges,),
dtype=np.int32),
'edge_counts':
gym.spaces.Box(
low=0,
high=self.max_num_edges - 1,
shape=(self.max_num_nodes,),
dtype=np.int32),
'node_types':
gym.spaces.Box(
low=0, high=3, shape=(self.max_num_nodes,), dtype=np.int32),
'is_node_placed':
gym.spaces.Box(
low=0, high=1, shape=(self.max_num_nodes,), dtype=np.int32),
'macros_w':
gym.spaces.Box(low=0, high=1, shape=(self.max_num_nodes,)),
'macros_h':
gym.spaces.Box(low=0, high=1, shape=(self.max_num_nodes,)),
'locations_x':
gym.spaces.Box(low=0, high=1, shape=(self.max_num_nodes,)),
'locations_y':
gym.spaces.Box(low=0, high=1, shape=(self.max_num_nodes,)),
'grid_cols':
gym.spaces.Box(low=0, high=1, shape=(1,)),
'grid_rows':
gym.spaces.Box(low=0, high=1, shape=(1,)),
'current_node':
gym.spaces.Box(
low=0, high=self.max_num_nodes - 1, shape=(1,), dtype=np.int32),
'mask':
gym.spaces.Box(
low=0, high=1, shape=(self.max_grid_size**2,), dtype=np.int32),
})
def _to_dict(
flatten_obs: TensorType,
keys: FeatureKeyType) -> Dict[Text, TensorType]:
obs_space = ObservationConfig().observation_space
splits = [obs_space[k].shape[0] for k in keys]
splitted_obs = tf.split(flatten_obs, splits, axis=-1)
return {k: o for o, k in zip(splitted_obs, keys)}
def _flatten(dict_obs: Dict[Text, TensorType],
keys: FeatureKeyType) -> TensorType:
out = [np.asarray(dict_obs[k]) for k in keys]
return np.concatenate(out, axis=-1)
def flatten_static(dict_obs: Dict[Text, TensorType]) -> TensorType:
return _flatten(dict_obs=dict_obs, keys=STATIC_OBSERVATIONS)
def flatten_dynamic(dict_obs: Dict[Text, TensorType]) -> TensorType:
return _flatten(dict_obs=dict_obs, keys=DYNAMIC_OBSERVATIONS)
def flatten_all(dict_obs: Dict[Text, TensorType]) -> TensorType:
return _flatten(dict_obs=dict_obs, keys=ALL_OBSERVATIONS)
def flatten_initial(dict_obs: Dict[Text, TensorType]) -> TensorType:
return _flatten(dict_obs=dict_obs, keys=INITIAL_OBSERVATIONS)
def to_dict_static(flatten_obs: TensorType) -> Dict[Text, TensorType]:
"""Convert the flattend numpy array of static observations back to a dict.
Args:
flatten_obs: a numpy array of static observations.
Returns:
A dict representation of the observations.
"""
return _to_dict(flatten_obs=flatten_obs, keys=STATIC_OBSERVATIONS)
def to_dict_dynamic(flatten_obs: TensorType) -> Dict[Text, TensorType]:
"""Convert the flattend numpy array of dynamic observations back to a dict.
Args:
flatten_obs: a numpy array of dynamic observations.
Returns:
A dict representation of the observations.
"""
return _to_dict(flatten_obs=flatten_obs, keys=DYNAMIC_OBSERVATIONS)
def to_dict_all(flatten_obs: TensorType) -> Dict[Text, TensorType]:
"""Convert the flattend numpy array of observations back to a dict.
Args:
flatten_obs: a numpy array of observations.
Returns:
A dict representation of the observations.
"""
return _to_dict(flatten_obs=flatten_obs, keys=ALL_OBSERVATIONS)
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for plc_client."""
import os
from absl import flags
from circuit_training.environment import plc_client
from circuit_training.utils import test_utils
import gin
FLAGS = flags.FLAGS
MACRO_ADJACENCY = [
0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0,
0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0,
0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0,
0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0,
0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0,
0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0,
0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3,
0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0,
3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3,
0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0,
0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0,
0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0,
0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0,
0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0,
0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0,
0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0,
0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0,
0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0,
0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0,
0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0,
0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0,
0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3,
0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0,
0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0,
0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0,
0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0,
0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0,
0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0,
0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0,
0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0,
0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0,
0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0,
0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0,
0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0,
0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3,
0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0,
3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3,
0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0,
0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0,
0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0,
0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0,
0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0,
0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0
]
class PlcClientTest(test_utils.TestCase):
"""Tests for the PlcClient.
# Internal circuit training docs link.
"""
def test_plc_client(self):
test_netlist_dir = ('circuit_training/'
'environment/test_data/macro_tiles_10x10')
netlist_file = os.path.join(FLAGS.test_srcdir, test_netlist_dir,
'netlist.pb.txt')
plc = plc_client.PlacementCost(netlist_file)
self.assertAlmostEqual(plc.get_cost(), 0.007745966692414834)
self.assertEqual(plc.get_area(), 250000)
self.assertEqual(plc.get_wirelength(), 5400)
self.assertAlmostEqual(plc.get_congestion_cost(), 1.124405186246434)
self.assertAlmostEqual(plc.get_density_cost(), 0.3570806661517036)
self.assertTrue(plc.set_canvas_size(1200.0, 1200.0))
self.assertEqual(plc.get_canvas_width_height(), (1200.0, 1200.0))
self.assertTrue(plc.set_placement_grid(20, 20))
self.assertEqual(plc.get_grid_num_columns_rows(), (20, 20))
self.assertEqual(plc.get_macro_indices(), [
0, 13, 26, 39, 52, 65, 78, 91, 104, 117, 130, 143, 156, 169, 182, 195,
208, 221, 234, 247, 260, 273, 286, 299, 312, 325, 338, 351, 364, 377,
390, 403, 416, 429, 442, 455, 468, 481, 494, 507, 520, 533, 546, 559,
572, 585, 598, 611, 624, 637, 650, 663, 676, 689, 702, 715, 728, 741,
754, 767, 780, 793, 806, 819, 832, 845, 858, 871, 884, 897, 910, 923,
936, 949, 962, 975, 988, 1001, 1014, 1027, 1040, 1053, 1066, 1079, 1092,
1105, 1118, 1131, 1144, 1157, 1170, 1183, 1196, 1209, 1222, 1235, 1248,
1261, 1274, 1287
])
plc.set_project_name('circuit_training')
self.assertEqual(plc.get_project_name(), 'circuit_training')
plc.set_block_name('macro_tiles_10x10')
self.assertEqual(plc.get_block_name(), 'macro_tiles_10x10')
plc.set_routes_per_micron(1.0, 2.0)
self.assertEqual(plc.get_routes_per_micron(), (1.0, 2.0))
plc.set_congestion_smooth_range(2.0)
self.assertEqual(plc.get_congestion_smooth_range(), 2.0)
plc.set_overlap_threshold(2.0)
self.assertEqual(plc.get_overlap_threshold(), 2.0)
plc.set_canvas_boundary_check(False)
self.assertFalse(plc.get_canvas_boundary_check())
plc.set_macro_routing_allocation(3.0, 4.0)
self.assertEqual(plc.get_macro_routing_allocation(), (3.0, 4.0))
self.assertFalse(plc.is_node_soft_macro(13))
self.assertEqual(
plc.get_node_mask(13), [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1
])
self.assertEqual(
plc.get_node_mask('M_R0_C1'), [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1
])
self.assertEqual(plc.get_node_type(9), 'MACRO_PIN')
plc.make_soft_macros_square()
self.assertEqual(plc.get_macro_adjacency(), MACRO_ADJACENCY)
self.assertFalse(plc.is_node_fixed(13))
plc.unplace_all_nodes()
self.assertTrue(plc.place_node(13, 180))
self.assertTrue(plc.place_node('M_R0_C1', 230))
initial_placement = os.path.join(FLAGS.test_srcdir, test_netlist_dir,
'initial.plc')
self.assertTrue(plc.restore_placement(initial_placement))
use_current_loc = False
move_stdcells = True
move_macros = False
log_scale_conns = False
use_sizes = False
io_factor = 1.0
num_steps = [100, 100, 100]
max_move_distance = [1200.0, 1200.0, 1200.0]
attract_factor = [100, 1e-3, 1e-5]
repel_factor = [0, 1e6, 1e7]
self.assertTrue(
plc.optimize_stdcells(use_current_loc, move_stdcells, move_macros,
log_scale_conns, use_sizes, io_factor, num_steps,
max_move_distance, attract_factor, repel_factor))
self.assertTrue(plc.update_node_coords(13, 50.0, 50.0))
self.assertTrue(plc.update_node_coords(13, 50.0, 50.0, True))
self.assertTrue(plc.update_node_coords('M_R0_C1', 50.0, 50.0))
self.assertTrue(plc.fix_node_coord(13))
self.assertTrue(plc.fix_node_coord('M_R0_C1'))
plc.update_port_sides()
plc.snap_ports_to_edges()
self.assertEqual(plc.get_macro_and_clustered_port_adjacency(),
(MACRO_ADJACENCY, []))
self.assertEqual(plc.get_node_location(13), (50.0, 50.0))
self.assertTrue(plc.is_node_placed(13))
self.assertEqual(plc.get_node_location(13), (50.0, 50.0))
self.assertEqual(plc.get_grid_cell_of_node(13), 0)
self.assertTrue(plc.update_macro_orientation(13, 'S'))
self.assertTrue(plc.update_macro_orientation('M_R0_C1', 'S'))
self.assertEqual(plc.get_macro_orientation(13), 'S')
self.assertTrue(plc.unfix_node_coord(13))
self.assertTrue(plc.unplace_node(13))
self.assertFalse(plc.is_node_placed(13))
self.assertEqual(plc.get_source_filename(), netlist_file)
self.assertEqual(plc.get_blockages(), [])
self.assertEqual(plc.get_ref_node_id(13), -1)
initial_placement = os.path.join(self.create_tempdir(), 'initial.plc')
self.assertTrue(plc.save_placement(initial_placement, 'Info'))
self.assertTrue(os.path.exists(initial_placement))
if __name__ == '__main__':
test_utils.main()
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script for BLEURT.
This script will allow pip-installing BLEURT as a Python module.
"""
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
install_requires = [
"pandas", "numpy", "scipy", "tensorflow", "tf-slim>=1.1", "sentencepiece"
]
setuptools.setup(
name="BLEURT", # Replace with your own username
version="0.0.2",
author="The Google AI Language Team",
description="The BLEURT metric for Natural Language Generation.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/google-research/bleurt",
packages=setuptools.find_packages(),
package_data={
"bleurt": ["test_checkpoint/*", "test_checkpoint/variables/*"]
},
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
],
license="Apache 2.0",
python_requires=">=3",
install_requires=install_requires)
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data tokenization, encoding and serialization library."""
import collections
from bleurt.lib import tokenizers
import numpy as np
import pandas as pd
import tensorflow.compat.v1 as tf
flags = tf.flags
logging = tf.logging
FLAGS = flags.FLAGS
flags.DEFINE_string(
"vocab_file", None, "Vocabulary file for WordPiece tokenization. "
"Overridden if `sentence_piece_model` is specified.")
flags.DEFINE_bool(
"do_lower_case", None,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models. "
"Overridden if `sentence_piece_model` is specified.")
flags.DEFINE_string(
"sentence_piece_model", None,
"Path to SentencePiece model, without `.model` extension. This flag "
"will override `vocab_file` and `do_lower_case`.")
def _truncate_seq_pair(tokens_ref, tokens_cand, max_length):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_ref) + len(tokens_cand)
if total_length <= max_length:
break
if len(tokens_ref) > len(tokens_cand):
tokens_ref.pop()
else:
tokens_cand.pop()
def encode_example(reference, candidate, tokenizer, max_seq_length):
"""Tokenization and encoding of an example rating.
Args:
reference: reference sentence.
candidate: candidate sentence.
tokenizer: instance of lib.tokenizers.Tokenizer.
max_seq_length: maximum length of BLEURT's input after tokenization.
Returns:
input_ids: contacatenated token ids of the reference and candidate.
input_mask: binary mask to separate the input from the padding.
segment_ids: binary mask to separate the sentences.
"""
# Tokenizes, truncates and concatenates the sentences, as in:
# bert/run_classifier.py
tokens_ref = tokenizer.tokenize(reference)
tokens_cand = tokenizer.tokenize(candidate)
_truncate_seq_pair(tokens_ref, tokens_cand, max_seq_length - 3)
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_ref:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for token in tokens_cand:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return input_ids, input_mask, segment_ids
def serialize_example(reference,
candidate,
tokenizer,
max_seq_length,
score=None):
"""Encodes a pair of sentences into a serialized tf.Example.
Args:
reference: reference sentence.
candidate: candidate sentence.
tokenizer: BERT-style WordPiece tokenizer.
max_seq_length: maximum length of BLEURT's input after tokenization.
score: [optional] float that indicates the score to be modelled.
Returns:
A serialized tf.Example object.
"""
def _create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
def _create_float_feature(values):
f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return f
input_ids, input_mask, segment_ids = encode_example(reference, candidate,
tokenizer, max_seq_length)
# Creates the TFExample.
features = collections.OrderedDict()
features["input_ids"] = _create_int_feature(input_ids)
features["input_mask"] = _create_int_feature(input_mask)
features["segment_ids"] = _create_int_feature(segment_ids)
if score is not None:
features["score"] = _create_float_feature([score])
# Serializes and returns.
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
return tf_example.SerializeToString()
def serialize_raw_example(reference, candidate):
"""Serializes a pair of sentences into a tf.Example without tokenization.
Args:
reference: reference sentence.
candidate: candidate sentence.
Returns:
A serialized tf.Example object.
"""
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
if isinstance(value, type(tf.constant(0))):
value = value.numpy()
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
if isinstance(reference, str):
reference = reference.encode("utf-8")
if isinstance(candidate, str):
candidate = candidate.encode("utf-8")
features = collections.OrderedDict()
features["references"] = _bytes_feature(reference)
features["candidates"] = _bytes_feature(candidate)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
return tf_example.SerializeToString()
def encode_batch(references, candidates, tokenizer, max_seq_length):
"""Encodes a batch of sentence pairs to be fed to a BLEURT checkpoint.
Args:
references: list of reference sentences.
candidates: list of candidate sentences.
tokenizer: BERT-style WordPiece tokenizer.
max_seq_length: maximum length of BLEURT's input after tokenization.
Returns:
A triplet (input_ids, input_mask, segment_ids), all numpy arrays with type
np.int64<n_sentences, max_seq_length>.
"""
encoded_examples = []
for ref, cand in zip(references, candidates):
triplet = encode_example(ref, cand, tokenizer, max_seq_length)
example = np.stack(triplet)
encoded_examples.append(example)
stacked_examples = np.stack(encoded_examples)
assert stacked_examples.shape == (len(encoded_examples), 3, max_seq_length)
return (stacked_examples[:, 0, :], stacked_examples[:, 1, :],
stacked_examples[:, 2, :])
def encode_and_serialize(input_file, output_file, vocab_file, do_lower_case,
sp_model, max_seq_length):
"""Encodes and serializes a set of ratings in JSON format."""
assert tf.io.gfile.exists(input_file), "Could not find file."
logging.info("Reading data...")
with tf.io.gfile.GFile(input_file, "r") as f:
examples_df = pd.read_json(f, lines=True)
for col in ["reference", "candidate", "score"]:
assert col in examples_df.columns, \
"field {} not found in input file!".format(col)
n_records = len(examples_df)
logging.info("Read {} examples.".format(n_records))
logging.info("Encoding and writing TFRecord file...")
tokenizer = tokenizers.create_tokenizer(
vocab_file=vocab_file, do_lower_case=do_lower_case, sp_model=sp_model)
with tf.python_io.TFRecordWriter(output_file) as writer:
iterator_id, iterator_cycle = 0, max(int(n_records / 10), 1)
for record in examples_df.itertuples(index=False):
iterator_id += 1
if iterator_id % iterator_cycle == 0:
logging.info("Writing example %d of %d", iterator_id, n_records)
tf_example = serialize_example(
record.reference,
record.candidate,
tokenizer,
max_seq_length,
score=record.score)
writer.write(tf_example)
logging.info("Done writing {} tf examples.".format(n_records))
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils to read and write from BLEURT checkpoints."""
import json
import os
import tensorflow.compat.v1 as tf
flags = tf.flags
logging = tf.logging
FLAGS = flags.FLAGS
CONFIG_FILE = "bleurt_config.json"
WEIGHTS_FILE = os.path.join("variables", "variables")
def get_bleurt_params_from_flags_or_ckpt():
"""Reads BLEURT's parameters from either flags or a json config file."""
logging.info("Reading BLEURT parameters")
if FLAGS.init_bleurt_checkpoint:
logging.info("Reading paramter from BLEURT checkpoint: {}".format(
FLAGS.init_bleurt_checkpoint))
config = read_bleurt_config(FLAGS.init_bleurt_checkpoint)
logging.info("Reads parameters from flags.")
vocab_file = config["vocab_file"]
do_lower_case = config["do_lower_case"]
sp_model = config["sp_model"]
max_seq_length = config["max_seq_length"]
bert_config_file = config["bert_config_file"]
init_checkpoint = config["tf_checkpoint_variables"]
dynamic_seq_length = config["dynamic_seq_length"]
# The following test gives the user the option to override `max_seq_length`.
# This should only be used during fine-tuning.
if FLAGS.max_seq_length:
logging.warning("Overriding `max_seq_length`. This could have unintended"
" consequences.")
max_seq_length = FLAGS.max_seq_length
else:
logging.info("Reads parameters from flags.")
assert ((FLAGS.vocab_file and FLAGS.do_lower_case is not None) or
FLAGS.sentence_piece_model), \
("Missing tokenization information. Please specify `vocab file` and "
"`do_lower_case` or `sentence_piece_model`")
vocab_file = FLAGS.vocab_file
do_lower_case = FLAGS.do_lower_case
sp_model = FLAGS.sentence_piece_model
assert FLAGS.max_seq_length, "max_seq_length missing"
max_seq_length = FLAGS.max_seq_length
assert FLAGS.bert_config_file, "config_file missing"
bert_config_file = FLAGS.bert_config_file
assert FLAGS.init_checkpoint, "init_checkpoint missing"
init_checkpoint = FLAGS.init_checkpoint
dynamic_seq_length = FLAGS.dynamic_seq_length
return {
"vocab_file": vocab_file,
"do_lower_case": do_lower_case,
"sp_model": sp_model,
"max_seq_length": max_seq_length,
"bert_config_file": bert_config_file,
"init_checkpoint": init_checkpoint,
"dynamic_seq_length": dynamic_seq_length
}
def read_bleurt_config(path):
"""Reads and checks config file from a BLEURT checkpoint."""
assert tf.io.gfile.exists(path), \
"Could not find BLEURT checkpoint {}".format(path)
config_path = os.path.join(path, CONFIG_FILE)
assert tf.io.gfile.exists(config_path), \
("Could not find BLEURT config file {}. Are you sure {}"
" is a valid checkpoint?").format(config_path, path)
logging.info("Config file found, reading.")
with tf.io.gfile.GFile(config_path, "r") as f:
raw_config = f.read()
bleurt_config = json.loads(raw_config)
logging.info("Will load checkpoint {}".format(bleurt_config["name"]))
logging.info("Loads full paths and checks that files exists.")
for k in bleurt_config:
v = bleurt_config[k]
logging.info("... {}:{}".format(k, v))
if not isinstance(v, str):
continue
if k.endswith("_file") or k.endswith("_dir"):
fname = os.path.join(path, bleurt_config[k])
assert tf.io.gfile.exists(fname), "File {} missing.".format(fname)
bleurt_config[k] = fname
if k == "sp_model":
fname = os.path.join(path, bleurt_config[k] + ".model")
assert tf.io.gfile.exists(fname), "File {} missing.".format(fname)
fname = os.path.join(path, bleurt_config[k] + ".vocab")
assert tf.io.gfile.exists(fname), "File {} missing.".format(fname)
bleurt_config[k] = os.path.join(path, bleurt_config[k])
bleurt_config["chkpt_dir"] = path
bleurt_config["tf_checkpoint_variables"] = os.path.join(path, WEIGHTS_FILE)
# Necessary for retro-compatilibity with models that were trained before
# SentencePiece was introduced.
if "sp_model" not in bleurt_config:
bleurt_config["sp_model"] = None
# Necessary for retro-compatilibity with models that were trained before
# UniformBatchScoring was introduced.
if "dynamic_seq_length" not in bleurt_config:
bleurt_config["dynamic_seq_length"] = False
return bleurt_config
def finalize_bleurt_checkpoint(tf_export_path):
"""Makes a BLEURT checkpoint from A TF Estimator export."""
logging.info("Finalizing BLEURT checkpoint.")
assert tf.io.gfile.exists(tf_export_path), "SavedModel export not found!"
bleurt_params = get_bleurt_params_from_flags_or_ckpt()
# Creates local copies of auxiliary files--BERT config, vocab file, etc.
bert_config_file = os.path.join(tf_export_path, "bert_config.json")
tf.io.gfile.copy(
bleurt_params["bert_config_file"], bert_config_file, overwrite=True)
if bleurt_params["vocab_file"]:
vocab_copy_loc = os.path.join(tf_export_path, "vocab.txt")
tf.io.gfile.copy(
bleurt_params["vocab_file"], vocab_copy_loc, overwrite=True)
vocab_val = "vocab.txt"
do_lower_case_val = bleurt_params["do_lower_case"]
sp_model_val = None
elif bleurt_params["sp_model"]:
sp_copy_loc = os.path.join(tf_export_path, "sent_piece.model")
tf.io.gfile.copy(
bleurt_params["sp_model"] + ".model", sp_copy_loc, overwrite=True)
vocab_copy_loc = os.path.join(tf_export_path, "sent_piece.vocab")
tf.io.gfile.copy(
bleurt_params["sp_model"] + ".vocab", vocab_copy_loc, overwrite=True)
vocab_val = None
do_lower_case_val = None
sp_model_val = "sent_piece"
bleurt_config = {
"name": FLAGS.bleurt_checkpoint_name,
"bert_config_file": "bert_config.json",
"max_seq_length": bleurt_params["max_seq_length"],
"vocab_file": vocab_val,
"do_lower_case": do_lower_case_val,
"sp_model": sp_model_val,
"dynamic_seq_length": bleurt_params["dynamic_seq_length"]
}
config_string = json.dumps(bleurt_config)
config_file = os.path.join(tf_export_path, "bleurt_config.json")
with tf.io.gfile.GFile(config_file, "w+") as f:
f.write(config_string)
logging.info("BLEURT checkpoint created.")
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Tests for finetuning function."""
import os
import tempfile
from bleurt import checkpoint as checkpoint_lib
from bleurt import finetune
from bleurt import score
import tensorflow.compat.v1 as tf
flags = tf.flags
FLAGS = flags.FLAGS
references = [
"An apple a day keeps the doctor away.",
"An apple a day keeps the doctor away."
]
candidates = [
"An apple a day keeps the doctor away.",
"An apple a day keeps doctors away."
]
ref_scores = [0.910811, 0.771989]
# Utils to get paths to static files.
def get_test_checkpoint():
pkg = os.path.abspath(__file__)
pkg, _ = os.path.split(pkg)
ckpt = os.path.join(pkg, "test_checkpoint")
assert tf.io.gfile.exists(ckpt)
return ckpt
def get_test_data():
pkg = os.path.abspath(__file__)
pkg, _ = os.path.split(pkg)
train_file = os.path.join(pkg, "test_data", "ratings_train.jsonl")
dev_file = os.path.join(pkg, "test_data", "ratings_dev.jsonl")
assert tf.io.gfile.exists(train_file)
assert tf.io.gfile.exists(dev_file)
return train_file, dev_file
class FinetuneTest(tf.test.TestCase):
def setUp(self):
# Saves default FLAG values.
super(FinetuneTest, self).setUp()
self._old_flags_val = (FLAGS.init_bleurt_checkpoint, FLAGS.model_dir,
FLAGS.num_train_steps, FLAGS.serialized_train_set,
FLAGS.serialized_dev_set, FLAGS.init_checkpoint,
FLAGS.bert_config_file, FLAGS.vocab_file,
FLAGS.max_seq_length, FLAGS.do_lower_case,
FLAGS.dynamic_seq_length)
def tearDown(self):
# Restores default FLAG values.
(FLAGS.init_bleurt_checkpoint, FLAGS.model_dir, FLAGS.num_train_steps,
FLAGS.serialized_train_set, FLAGS.serialized_dev_set,
FLAGS.init_checkpoint, FLAGS.bert_config_file, FLAGS.vocab_file,
FLAGS.max_seq_length, FLAGS.do_lower_case, FLAGS.dynamic_seq_length
) = self._old_flags_val
super(FinetuneTest, self).tearDown()
def test_finetune_and_predict(self):
checkpoint = get_test_checkpoint()
train_file, dev_file = get_test_data()
with tempfile.TemporaryDirectory() as model_dir:
# Sets new flags.
FLAGS.init_checkpoint = os.path.join(checkpoint, "variables", "variables")
FLAGS.bert_config_file = os.path.join(checkpoint, "bert_config.json")
FLAGS.vocab_file = os.path.join(checkpoint, "vocab.txt")
FLAGS.do_lower_case = True
FLAGS.dynamic_seq_length = True
FLAGS.max_seq_length = 512
FLAGS.model_dir = model_dir
FLAGS.num_train_steps = 1
FLAGS.learning_rate = 0.00000000001
FLAGS.serialized_train_set = os.path.join(model_dir, "train.tfrecord")
FLAGS.serialized_dev_set = os.path.join(model_dir, "dev.tfrecord")
# Runs 1 training step.
export = finetune.run_finetuning_pipeline(train_file, dev_file)
# Checks if the pipeline produced a valid BLEURT checkpoint.
self.assertTrue(tf.io.gfile.exists(export))
config = checkpoint_lib.read_bleurt_config(export)
self.assertTrue(type(config), dict)
# Runs a prediction.
scorer = score.LengthBatchingBleurtScorer(export)
scores = scorer.score(references=references, candidates=candidates)
self.assertLen(scores, 2)
self.assertAllClose(scores, ref_scores)
if __name__ == "__main__":
tf.test.main()
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Tests for scoring API functionality."""
import os
import tempfile
from bleurt import score_files
import tensorflow.compat.v1 as tf
tf.enable_eager_execution()
flags = tf.flags
FLAGS = flags.FLAGS
ref_scores = [0.926138, 0.247466, -0.935921, -1.053069]
def get_test_checkpoint():
pkg = os.path.abspath(__file__)
pkg, _ = os.path.split(pkg)
ckpt = os.path.join(pkg, "test_checkpoint")
assert tf.io.gfile.exists(ckpt)
return ckpt
def get_test_data():
pkg = os.path.abspath(__file__)
pkg, _ = os.path.split(pkg)
sentence_pairs_file = os.path.join(pkg, "test_data", "sentence_pairs.jsonl")
references_file = os.path.join(pkg, "test_data", "references")
candidates_file = os.path.join(pkg, "test_data", "candidates")
assert tf.io.gfile.exists(sentence_pairs_file)
assert tf.io.gfile.exists(references_file)
assert tf.io.gfile.exists(candidates_file)
return sentence_pairs_file, references_file, candidates_file
def get_scores_from_scores_file(scores_file):
assert tf.io.gfile.exists(scores_file)
with tf.io.gfile.GFile(FLAGS.scores_file, "r") as file:
scores = [float(line) for line in file]
return scores
class ScoreFilesTest(tf.test.TestCase):
def setUp(self):
# Saves default FLAG values.
super(ScoreFilesTest, self).setUp()
self._old_flags_val = (FLAGS.sentence_pairs_file, FLAGS.candidate_file,
FLAGS.reference_file, FLAGS.scores_file,
FLAGS.bleurt_checkpoint, FLAGS.bleurt_batch_size)
def tearDown(self):
# Restores default FLAG values.
(FLAGS.sentence_pairs_file, FLAGS.candidate_file, FLAGS.reference_file,
FLAGS.scores_file, FLAGS.bleurt_checkpoint,
FLAGS.bleurt_batch_size) = self._old_flags_val
super(ScoreFilesTest, self).tearDown()
def test_json_generator_empty(self):
# Tests AssertionError is raised when specifying an
# empty file path for generator.
FLAGS.bleurt_checkpoint = get_test_checkpoint()
with self.assertRaises(AssertionError):
generator = score_files._json_generator("")
score_files.score_files(generator, FLAGS.bleurt_checkpoint)
def test_text_generator_empty(self):
# Tests AssertionError is raised when specifying an
# empty file path for generator.
FLAGS.bleurt_checkpoint = get_test_checkpoint()
with self.assertRaises(AssertionError):
generator = score_files._text_generator("", "")
score_files.score_files(generator, FLAGS.bleurt_checkpoint)
def test_score_files_sentence_pairs(self):
# Tests specifying JSONL file of sentence pairs genereates accurate scores.
checkpoint = get_test_checkpoint()
sentence_pairs_file, _, _ = get_test_data()
with tempfile.TemporaryDirectory() as temp_dir:
FLAGS.scores_file = os.path.join(temp_dir, "scores")
generator = score_files._json_generator(sentence_pairs_file)
score_files.score_files(generator, checkpoint)
self.assertTrue(tf.io.gfile.exists(FLAGS.scores_file))
scores = get_scores_from_scores_file(FLAGS.scores_file)
self.assertLen(scores, 4)
self.assertAllClose(scores, ref_scores)
def test_score_files_text(self):
# Tests specifying two text files for candidates
# and references generates accurate scores.
checkpoint = get_test_checkpoint()
_, reference_file, candidate_file = get_test_data()
with tempfile.TemporaryDirectory() as temp_dir:
FLAGS.scores_file = os.path.join(temp_dir, "scores")
generator = score_files._text_generator(reference_file, candidate_file)
score_files.score_files(generator, checkpoint)
self.assertTrue(tf.io.gfile.exists(FLAGS.scores_file))
scores = get_scores_from_scores_file(FLAGS.scores_file)
self.assertLen(scores, 4)
self.assertAllClose(scores, ref_scores)
def test_score_diff_sentence_pairs(self):
# Tests specifying sentence pairs where number of candidates
# and references lengths differ.
checkpoint = get_test_checkpoint()
with tempfile.TemporaryDirectory() as temp_dir:
FLAGS.sentence_pairs_file = os.path.join(temp_dir, "sentence_pairs.jsonl")
with tf.io.gfile.GFile(FLAGS.sentence_pairs_file, "w+") as sentence_pairs:
sentence_pairs.write("{\"candidate\": \"sashimi\"}")
with self.assertRaises(AssertionError):
generator = score_files._json_generator(FLAGS.sentence_pairs_file)
score_files.score_files(generator, checkpoint)
def test_score_diff_text_files(self):
# Tests specifying two text files where number of candidates
# and references lengths differ.
checkpoint = get_test_checkpoint()
with tempfile.TemporaryDirectory() as temp_dir:
FLAGS.reference_file = os.path.join(temp_dir, "references")
FLAGS.candidate_file = os.path.join(temp_dir, "candidates")
with tf.io.gfile.GFile(FLAGS.reference_file, "w+") as references:
references.write("nigiri\nshrimp tempura\ntonkatsu")
with tf.io.gfile.GFile(FLAGS.candidate_file, "w+") as candidates:
candidates.write("ramen\nfish")
with self.assertRaises(AssertionError):
generator = score_files._text_generator(FLAGS.reference_file,
FLAGS.candidate_file)
score_files.score_files(generator, checkpoint)
def test_sentence_pairs_consume_buffer(self):
# Tests specifying a number of sentence pairs that
# exceeds BLEURT batch size, requiring a call to _consume_buffer.
checkpoint = get_test_checkpoint()
sentence_pairs_file, _, _ = get_test_data()
with tempfile.TemporaryDirectory() as temp_dir:
FLAGS.bleurt_batch_size = 1
FLAGS.scores_file = os.path.join(temp_dir, "scores")
generator = score_files._json_generator(sentence_pairs_file)
score_files.score_files(generator, checkpoint)
scores = get_scores_from_scores_file(FLAGS.scores_file)
self.assertLen(scores, 4)
self.assertAllClose(scores, ref_scores)
def test_text_consume_buffer(self):
# Tests specifying a number of candidate and reference pairs that
# exceeds BLEURT batch size, requiring a call to _consume_buffer.
checkpoint = get_test_checkpoint()
_, reference_file, candidate_file = get_test_data()
with tempfile.TemporaryDirectory() as temp_dir:
FLAGS.bleurt_batch_size = 2
FLAGS.scores_file = os.path.join(temp_dir, "scores")
generator = score_files._text_generator(reference_file, candidate_file)
score_files.score_files(generator, checkpoint)
scores = get_scores_from_scores_file(FLAGS.scores_file)
self.assertLen(scores, 4)
self.assertAllClose(scores, ref_scores)
def test_score_empty_candidate_and_reference_text(self):
# Tests scoring text files with an empty candidate and reference.
checkpoint = get_test_checkpoint()
with tempfile.TemporaryDirectory() as temp_dir:
FLAGS.reference_file = os.path.join(temp_dir, "references")
FLAGS.candidate_file = os.path.join(temp_dir, "candidates")
FLAGS.scores_file = os.path.join(temp_dir, "scores")
with tf.io.gfile.GFile(FLAGS.reference_file, "w+") as references:
references.write("\n")
with tf.io.gfile.GFile(FLAGS.candidate_file, "w+") as candidates:
candidates.write("\n")
generator = score_files._text_generator(FLAGS.reference_file,
FLAGS.candidate_file)
score_files.score_files(generator, checkpoint)
scores = get_scores_from_scores_file(FLAGS.scores_file)
self.assertLen(scores, 1)
self.assertAllClose(scores, [0.679957])
def test_score_empty_reference_and_candidate_pair(self):
# Tests scoring sentence pairs with empty candidate and empty reference.
checkpoint = get_test_checkpoint()
with tempfile.TemporaryDirectory() as temp_dir:
FLAGS.sentence_pairs_file = os.path.join(temp_dir, "sentence_pairs.jsonl")
FLAGS.scores_file = os.path.join(temp_dir, "scores")
with tf.io.gfile.GFile(FLAGS.sentence_pairs_file, "w+") as sentence_pairs:
sentence_pairs.write("{\"candidate\": \"\", \"reference\": \"\"}")
generator = score_files._json_generator(FLAGS.sentence_pairs_file)
score_files.score_files(generator, checkpoint)
scores = get_scores_from_scores_file(FLAGS.scores_file)
self.assertLen(scores, 1)
self.assertAllClose(scores, [0.679957])
if __name__ == "__main__":
tf.test.main()
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BLEURT scoring library."""
import itertools
from bleurt import score as score_lib
import pandas as pd
import tensorflow as tf
flags = tf.compat.v1.flags
logging = tf.compat.v1.logging
FLAGS = flags.FLAGS
flags.DEFINE_string(
"sentence_pairs_file", None,
"Path to a JSONL file that contains sentence pairs. Each JSON record must "
"contain the fields `reference` and `candidate`. Overrides `candidate_file`"
" and `reference_file` flags if specified.")
flags.DEFINE_string(
"candidate_file", None,
"Path to candidates text file, with one candidate sentence "
"per line.")
flags.DEFINE_string(
"reference_file", None,
"Path to reference text file, with one reference sentence "
"per line.")
flags.DEFINE_string(
"scores_file", None,
"[optional] Path where the scores will be written. Will use standard "
"output if unspecified.")
flags.DEFINE_string("bleurt_checkpoint", None,
"[optional] Path to BLEURT checkpoint.")
flags.DEFINE_integer("bleurt_batch_size", 16,
"Number of sentence pairs per batch.")
flags.DEFINE_integer(
"read_buffer_size", 100000,
"Number of lines to read at a time from the input files. "
"Increase or decrase to ajust memory consumption.")
flags.DEFINE_bool(
"batch_same_length", False,
"Enables dynamic batching to speed up inference."
" [experimental feature]")
def _json_generator(sentence_pairs_file):
"""Yields a generator for iterating from a single JSONL file."""
assert tf.io.gfile.exists(
sentence_pairs_file), "Sentence pairs file {} not found".format(
sentence_pairs_file)
with tf.io.gfile.GFile(sentence_pairs_file, "r") as pairs_file:
ratings_df = pd.read_json(pairs_file, lines=True)
for _, row in ratings_df.iterrows():
assert row.get("reference") is not None, (
"Reference sentence not found, are you sure the JSON record "
"contains a 'reference' field?")
assert row.get("candidate") is not None, (
"Candidate sentence not found, are you sure the JSON record "
"contains a 'candidate' field?")
yield row.get("reference"), row.get("candidate")
def _text_generator(reference_file, candidate_file):
"""Yields a generator for iterating from two text files."""
assert tf.io.gfile.exists(
reference_file), "Reference file {} not found".format(reference_file)
assert tf.io.gfile.exists(
candidate_file), "Candidate file {} not found".format(candidate_file)
with tf.io.gfile.GFile(reference_file, "r") as ref_file:
with tf.io.gfile.GFile(candidate_file, "r") as cand_file:
for ref_sentence, cand_sentence in itertools.zip_longest(
ref_file, cand_file, fillvalue=None):
assert ref_sentence is not None, (
"Reference sentence not found, are you sure that the files have "
"the same size?")
assert cand_sentence is not None, (
"Candidate sentence not found, are you sure that the files have "
"the same size?")
yield ref_sentence, cand_sentence
def score_files(generator, bleurt_checkpoint):
"""Computes BLEURT scores from a sentence pairs generator.
Requires that a JSONL file containing both candidate and reference
sentences or two individual candidate and reference text files be specified,
with the former overriding the latter if both flags are specified.
Args:
generator: A generator yielding reference and candidate sentences.
bleurt_checkpoint: BLEURT checkpoint used for scoring.
"""
ref_buffer = []
cand_buffer = []
scores_buffer = []
if not FLAGS.batch_same_length:
scorer = score_lib.BleurtScorer(bleurt_checkpoint)
else:
logging.warning(
"Enabling same length batching. BEWARE: this is an experimental "
"feature.")
scorer = score_lib.LengthBatchingBleurtScorer(bleurt_checkpoint)
def _consume_buffer():
scores = scorer.score(
references=ref_buffer,
candidates=cand_buffer,
batch_size=FLAGS.bleurt_batch_size)
del ref_buffer[:]
del cand_buffer[:]
scores_buffer.extend(scores)
logging.info("Computing BLEURT scores...")
for ref_sentence, cand_sentence in generator:
ref_buffer.append(ref_sentence)
cand_buffer.append(cand_sentence)
if len(ref_buffer) >= FLAGS.read_buffer_size:
_consume_buffer()
if ref_buffer:
_consume_buffer()
logging.info("BLEURT scores computed.")
if FLAGS.scores_file:
logging.info("Writing to disk.")
with tf.io.gfile.GFile(FLAGS.scores_file, "w+") as score_file:
for s in scores_buffer:
score_file.write("{}\n".format(str(s)))
else:
for s in scores_buffer:
print("{}".format(str(s)))
logging.info("Done.")
def check_flags_and_score():
"""Creates a file reader and runs model."""
assert FLAGS.sentence_pairs_file or (
FLAGS.reference_file and FLAGS.candidate_file
), ("Reference and candidate files not found, please specify a JSONL file or "
"two text files.")
if FLAGS.sentence_pairs_file:
sentence_pairs_generator = _json_generator(FLAGS.sentence_pairs_file)
else:
sentence_pairs_generator = _text_generator(FLAGS.reference_file,
FLAGS.candidate_file)
score_files(sentence_pairs_generator, FLAGS.bleurt_checkpoint)
def main(_):
logging.info("Running BLEURT scoring.")
check_flags_and_score()
if __name__ == "__main__":
tf.compat.v1.app.run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.